Skip to content

Commit

Permalink
[Vertex AI] Replace "enum" computed properties with static let (fir…
Browse files Browse the repository at this point in the history
  • Loading branch information
andrewheard authored and MojtabaHs committed Oct 17, 2024
1 parent 341db0c commit ae53600
Show file tree
Hide file tree
Showing 4 changed files with 40 additions and 104 deletions.
12 changes: 3 additions & 9 deletions FirebaseVertexAI/Sources/FunctionCalling.swift
Original file line number Diff line number Diff line change
Expand Up @@ -84,22 +84,16 @@ public struct FunctionCallingConfig {
/// The default behavior for function calling.
///
/// The model calls functions to answer queries at its discretion.
public static var auto: Mode {
return self.init(kind: .auto)
}
public static let auto = Mode(kind: .auto)

/// The model always predicts a provided function call to answer every query.
public static var any: Mode {
return self.init(kind: .any)
}
public static let any = Mode(kind: .any)

/// The model will never predict a function call to answer a query.
///
/// > Note: This can also be achieved by not passing any ``FunctionDeclaration`` tools
/// > when instantiating the model.
public static var none: Mode {
return self.init(kind: .none)
}
public static let none = Mode(kind: .none)

let rawValue: String
}
Expand Down
62 changes: 17 additions & 45 deletions FirebaseVertexAI/Sources/GenerateContentResponse.swift
Original file line number Diff line number Diff line change
Expand Up @@ -159,62 +159,43 @@ public struct FinishReason: DecodableProtoEnum, Hashable, Sendable {
}

/// Natural stop point of the model or provided stop sequence.
public static var stop: FinishReason {
return self.init(kind: .stop)
}
public static let stop = FinishReason(kind: .stop)

/// The maximum number of tokens as specified in the request was reached.
public static var maxTokens: FinishReason {
return self.init(kind: .maxTokens)
}
public static let maxTokens = FinishReason(kind: .maxTokens)

/// The token generation was stopped because the response was flagged for safety reasons.
///
/// > NOTE: When streaming, the ``CandidateResponse/content`` will be empty if content filters
/// > blocked the output.
public static var safety: FinishReason {
return self.init(kind: .safety)
}
public static let safety = FinishReason(kind: .safety)

/// The token generation was stopped because the response was flagged for unauthorized citations.
public static var recitation: FinishReason {
return self.init(kind: .recitation)
}
public static let recitation = FinishReason(kind: .recitation)

/// All other reasons that stopped token generation.
public static var other: FinishReason {
return self.init(kind: .other)
}
public static let other = FinishReason(kind: .other)

/// Token generation was stopped because the response contained forbidden terms.
public static var blocklist: FinishReason {
return self.init(kind: .blocklist)
}
public static let blocklist = FinishReason(kind: .blocklist)

/// Token generation was stopped because the response contained potentially prohibited content.
public static var prohibitedContent: FinishReason {
return self.init(kind: .prohibitedContent)
}
public static let prohibitedContent = FinishReason(kind: .prohibitedContent)

/// Token generation was stopped because of Sensitive Personally Identifiable Information (SPII).
public static var spii: FinishReason {
return self.init(kind: .spii)
}
public static let spii = FinishReason(kind: .spii)

/// Token generation was stopped because the function call generated by the model was invalid.
public static var malformedFunctionCall: FinishReason {
return self.init(kind: .malformedFunctionCall)
}
public static let malformedFunctionCall = FinishReason(kind: .malformedFunctionCall)

/// Returns the raw string representation of the `FinishReason` value.
///
/// > Note: This value directly corresponds to the values in the [REST
/// > API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/GenerateContentResponse#FinishReason).
public let rawValue: String

var unrecognizedValueMessageCode: VertexLog.MessageCode {
.generateContentResponseUnrecognizedFinishReason
}
static let unrecognizedValueMessageCode =
VertexLog.MessageCode.generateContentResponseUnrecognizedFinishReason
}

/// A metadata struct containing any feedback the model had on the prompt it was provided.
Expand All @@ -230,34 +211,25 @@ public struct PromptFeedback: Sendable {
}

/// The prompt was blocked because it was deemed unsafe.
public static var safety: BlockReason {
return self.init(kind: .safety)
}
public static let safety = BlockReason(kind: .safety)

/// All other block reasons.
public static var other: BlockReason {
return self.init(kind: .other)
}
public static let other = BlockReason(kind: .other)

/// The prompt was blocked because it contained terms from the terminology blocklist.
public static var blocklist: BlockReason {
return self.init(kind: .blocklist)
}
public static let blocklist = BlockReason(kind: .blocklist)

/// The prompt was blocked due to prohibited content.
public static var prohibitedContent: BlockReason {
return self.init(kind: .prohibitedContent)
}
public static let prohibitedContent = BlockReason(kind: .prohibitedContent)

/// Returns the raw string representation of the `BlockReason` value.
///
/// > Note: This value directly corresponds to the values in the [REST
/// > API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/GenerateContentResponse#BlockedReason).
public let rawValue: String

var unrecognizedValueMessageCode: VertexLog.MessageCode {
.generateContentResponseUnrecognizedBlockReason
}
static let unrecognizedValueMessageCode =
VertexLog.MessageCode.generateContentResponseUnrecognizedBlockReason
}

/// The reason a prompt was blocked, if it was blocked.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ protocol ProtoEnum {
/// provided when conforming to this type.
protocol DecodableProtoEnum: ProtoEnum, Decodable {
/// Returns the ``VertexLog/MessageCode`` associated with unrecognized (unknown) enum values.
var unrecognizedValueMessageCode: VertexLog.MessageCode { get }
static var unrecognizedValueMessageCode: VertexLog.MessageCode { get }

/// Creates a new instance by decoding from the given decoder.
///
Expand Down Expand Up @@ -91,7 +91,7 @@ extension DecodableProtoEnum {

if Kind(rawValue: rawValue) == nil {
VertexLog.error(
code: unrecognizedValueMessageCode,
code: Self.unrecognizedValueMessageCode,
"""
Unrecognized \(Self.self) with value "\(rawValue)":
- Check for updates to the SDK as support for "\(rawValue)" may have been added; see \
Expand Down
66 changes: 18 additions & 48 deletions FirebaseVertexAI/Sources/Safety.swift
Original file line number Diff line number Diff line change
Expand Up @@ -52,36 +52,27 @@ public struct SafetyRating: Equatable, Hashable, Sendable {
/// The probability is zero or close to zero.
///
/// For benign content, the probability across all categories will be this value.
public static var negligible: HarmProbability {
return self.init(kind: .negligible)
}
public static let negligible = HarmProbability(kind: .negligible)

/// The probability is small but non-zero.
public static var low: HarmProbability {
return self.init(kind: .low)
}
public static let low = HarmProbability(kind: .low)

/// The probability is moderate.
public static var medium: HarmProbability {
return self.init(kind: .medium)
}
public static let medium = HarmProbability(kind: .medium)

/// The probability is high.
///
/// The content described is very likely harmful.
public static var high: HarmProbability {
return self.init(kind: .high)
}
public static let high = HarmProbability(kind: .high)

/// Returns the raw string representation of the `HarmProbability` value.
///
/// > Note: This value directly corresponds to the values in the [REST
/// > API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/GenerateContentResponse#SafetyRating).
public let rawValue: String

var unrecognizedValueMessageCode: VertexLog.MessageCode {
.generateContentResponseUnrecognizedHarmProbability
}
static let unrecognizedValueMessageCode =
VertexLog.MessageCode.generateContentResponseUnrecognizedHarmProbability
}
}

Expand All @@ -100,29 +91,19 @@ public struct SafetySetting {
}

/// Content with `.negligible` will be allowed.
public static var blockLowAndAbove: HarmBlockThreshold {
return self.init(kind: .blockLowAndAbove)
}
public static let blockLowAndAbove = HarmBlockThreshold(kind: .blockLowAndAbove)

/// Content with `.negligible` and `.low` will be allowed.
public static var blockMediumAndAbove: HarmBlockThreshold {
return self.init(kind: .blockMediumAndAbove)
}
public static let blockMediumAndAbove = HarmBlockThreshold(kind: .blockMediumAndAbove)

/// Content with `.negligible`, `.low`, and `.medium` will be allowed.
public static var blockOnlyHigh: HarmBlockThreshold {
return self.init(kind: .blockOnlyHigh)
}
public static let blockOnlyHigh = HarmBlockThreshold(kind: .blockOnlyHigh)

/// All content will be allowed.
public static var blockNone: HarmBlockThreshold {
return self.init(kind: .blockNone)
}
public static let blockNone = HarmBlockThreshold(kind: .blockNone)

/// Turn off the safety filter.
public static var off: HarmBlockThreshold {
return self.init(kind: .off)
}
public static let off = HarmBlockThreshold(kind: .off)

let rawValue: String
}
Expand Down Expand Up @@ -156,39 +137,28 @@ public struct HarmCategory: CodableProtoEnum, Hashable, Sendable {
}

/// Harassment content.
public static var harassment: HarmCategory {
return self.init(kind: .harassment)
}
public static let harassment = HarmCategory(kind: .harassment)

/// Negative or harmful comments targeting identity and/or protected attributes.
public static var hateSpeech: HarmCategory {
return self.init(kind: .hateSpeech)
}
public static let hateSpeech = HarmCategory(kind: .hateSpeech)

/// Contains references to sexual acts or other lewd content.
public static var sexuallyExplicit: HarmCategory {
return self.init(kind: .sexuallyExplicit)
}
public static let sexuallyExplicit = HarmCategory(kind: .sexuallyExplicit)

/// Promotes or enables access to harmful goods, services, or activities.
public static var dangerousContent: HarmCategory {
return self.init(kind: .dangerousContent)
}
public static let dangerousContent = HarmCategory(kind: .dangerousContent)

/// Content that may be used to harm civic integrity.
public static var civicIntegrity: HarmCategory {
return self.init(kind: .civicIntegrity)
}
public static let civicIntegrity = HarmCategory(kind: .civicIntegrity)

/// Returns the raw string representation of the `HarmCategory` value.
///
/// > Note: This value directly corresponds to the values in the
/// > [REST API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/HarmCategory).
public let rawValue: String

var unrecognizedValueMessageCode: VertexLog.MessageCode {
.generateContentResponseUnrecognizedHarmCategory
}
static let unrecognizedValueMessageCode =
VertexLog.MessageCode.generateContentResponseUnrecognizedHarmCategory
}

// MARK: - Codable Conformances
Expand Down

0 comments on commit ae53600

Please sign in to comment.