diff --git a/FirebaseVertexAI/Sources/FunctionCalling.swift b/FirebaseVertexAI/Sources/FunctionCalling.swift index 9f62eff253e2..4cda35aa4daf 100644 --- a/FirebaseVertexAI/Sources/FunctionCalling.swift +++ b/FirebaseVertexAI/Sources/FunctionCalling.swift @@ -84,22 +84,16 @@ public struct FunctionCallingConfig { /// The default behavior for function calling. /// /// The model calls functions to answer queries at its discretion. - public static var auto: Mode { - return self.init(kind: .auto) - } + public static let auto = Mode(kind: .auto) /// The model always predicts a provided function call to answer every query. - public static var any: Mode { - return self.init(kind: .any) - } + public static let any = Mode(kind: .any) /// The model will never predict a function call to answer a query. /// /// > Note: This can also be achieved by not passing any ``FunctionDeclaration`` tools /// > when instantiating the model. - public static var none: Mode { - return self.init(kind: .none) - } + public static let none = Mode(kind: .none) let rawValue: String } diff --git a/FirebaseVertexAI/Sources/GenerateContentResponse.swift b/FirebaseVertexAI/Sources/GenerateContentResponse.swift index 94b45cb45b1e..07491765f90b 100644 --- a/FirebaseVertexAI/Sources/GenerateContentResponse.swift +++ b/FirebaseVertexAI/Sources/GenerateContentResponse.swift @@ -159,52 +159,34 @@ public struct FinishReason: DecodableProtoEnum, Hashable, Sendable { } /// Natural stop point of the model or provided stop sequence. - public static var stop: FinishReason { - return self.init(kind: .stop) - } + public static let stop = FinishReason(kind: .stop) /// The maximum number of tokens as specified in the request was reached. - public static var maxTokens: FinishReason { - return self.init(kind: .maxTokens) - } + public static let maxTokens = FinishReason(kind: .maxTokens) /// The token generation was stopped because the response was flagged for safety reasons. /// /// > NOTE: When streaming, the ``CandidateResponse/content`` will be empty if content filters /// > blocked the output. - public static var safety: FinishReason { - return self.init(kind: .safety) - } + public static let safety = FinishReason(kind: .safety) /// The token generation was stopped because the response was flagged for unauthorized citations. - public static var recitation: FinishReason { - return self.init(kind: .recitation) - } + public static let recitation = FinishReason(kind: .recitation) /// All other reasons that stopped token generation. - public static var other: FinishReason { - return self.init(kind: .other) - } + public static let other = FinishReason(kind: .other) /// Token generation was stopped because the response contained forbidden terms. - public static var blocklist: FinishReason { - return self.init(kind: .blocklist) - } + public static let blocklist = FinishReason(kind: .blocklist) /// Token generation was stopped because the response contained potentially prohibited content. - public static var prohibitedContent: FinishReason { - return self.init(kind: .prohibitedContent) - } + public static let prohibitedContent = FinishReason(kind: .prohibitedContent) /// Token generation was stopped because of Sensitive Personally Identifiable Information (SPII). - public static var spii: FinishReason { - return self.init(kind: .spii) - } + public static let spii = FinishReason(kind: .spii) /// Token generation was stopped because the function call generated by the model was invalid. - public static var malformedFunctionCall: FinishReason { - return self.init(kind: .malformedFunctionCall) - } + public static let malformedFunctionCall = FinishReason(kind: .malformedFunctionCall) /// Returns the raw string representation of the `FinishReason` value. /// @@ -212,9 +194,8 @@ public struct FinishReason: DecodableProtoEnum, Hashable, Sendable { /// > API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/GenerateContentResponse#FinishReason). public let rawValue: String - var unrecognizedValueMessageCode: VertexLog.MessageCode { - .generateContentResponseUnrecognizedFinishReason - } + static let unrecognizedValueMessageCode = + VertexLog.MessageCode.generateContentResponseUnrecognizedFinishReason } /// A metadata struct containing any feedback the model had on the prompt it was provided. @@ -230,24 +211,16 @@ public struct PromptFeedback: Sendable { } /// The prompt was blocked because it was deemed unsafe. - public static var safety: BlockReason { - return self.init(kind: .safety) - } + public static let safety = BlockReason(kind: .safety) /// All other block reasons. - public static var other: BlockReason { - return self.init(kind: .other) - } + public static let other = BlockReason(kind: .other) /// The prompt was blocked because it contained terms from the terminology blocklist. - public static var blocklist: BlockReason { - return self.init(kind: .blocklist) - } + public static let blocklist = BlockReason(kind: .blocklist) /// The prompt was blocked due to prohibited content. - public static var prohibitedContent: BlockReason { - return self.init(kind: .prohibitedContent) - } + public static let prohibitedContent = BlockReason(kind: .prohibitedContent) /// Returns the raw string representation of the `BlockReason` value. /// @@ -255,9 +228,8 @@ public struct PromptFeedback: Sendable { /// > API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/GenerateContentResponse#BlockedReason). public let rawValue: String - var unrecognizedValueMessageCode: VertexLog.MessageCode { - .generateContentResponseUnrecognizedBlockReason - } + static let unrecognizedValueMessageCode = + VertexLog.MessageCode.generateContentResponseUnrecognizedBlockReason } /// The reason a prompt was blocked, if it was blocked. diff --git a/FirebaseVertexAI/Sources/Protocols/Internal/CodableProtoEnum.swift b/FirebaseVertexAI/Sources/Protocols/Internal/CodableProtoEnum.swift index f73271a1acc4..4392157a8978 100644 --- a/FirebaseVertexAI/Sources/Protocols/Internal/CodableProtoEnum.swift +++ b/FirebaseVertexAI/Sources/Protocols/Internal/CodableProtoEnum.swift @@ -47,7 +47,7 @@ protocol ProtoEnum { /// provided when conforming to this type. protocol DecodableProtoEnum: ProtoEnum, Decodable { /// Returns the ``VertexLog/MessageCode`` associated with unrecognized (unknown) enum values. - var unrecognizedValueMessageCode: VertexLog.MessageCode { get } + static var unrecognizedValueMessageCode: VertexLog.MessageCode { get } /// Creates a new instance by decoding from the given decoder. /// @@ -91,7 +91,7 @@ extension DecodableProtoEnum { if Kind(rawValue: rawValue) == nil { VertexLog.error( - code: unrecognizedValueMessageCode, + code: Self.unrecognizedValueMessageCode, """ Unrecognized \(Self.self) with value "\(rawValue)": - Check for updates to the SDK as support for "\(rawValue)" may have been added; see \ diff --git a/FirebaseVertexAI/Sources/Safety.swift b/FirebaseVertexAI/Sources/Safety.swift index 4e93a94bf456..d810613aecbc 100644 --- a/FirebaseVertexAI/Sources/Safety.swift +++ b/FirebaseVertexAI/Sources/Safety.swift @@ -52,26 +52,18 @@ public struct SafetyRating: Equatable, Hashable, Sendable { /// The probability is zero or close to zero. /// /// For benign content, the probability across all categories will be this value. - public static var negligible: HarmProbability { - return self.init(kind: .negligible) - } + public static let negligible = HarmProbability(kind: .negligible) /// The probability is small but non-zero. - public static var low: HarmProbability { - return self.init(kind: .low) - } + public static let low = HarmProbability(kind: .low) /// The probability is moderate. - public static var medium: HarmProbability { - return self.init(kind: .medium) - } + public static let medium = HarmProbability(kind: .medium) /// The probability is high. /// /// The content described is very likely harmful. - public static var high: HarmProbability { - return self.init(kind: .high) - } + public static let high = HarmProbability(kind: .high) /// Returns the raw string representation of the `HarmProbability` value. /// @@ -79,9 +71,8 @@ public struct SafetyRating: Equatable, Hashable, Sendable { /// > API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/GenerateContentResponse#SafetyRating). public let rawValue: String - var unrecognizedValueMessageCode: VertexLog.MessageCode { - .generateContentResponseUnrecognizedHarmProbability - } + static let unrecognizedValueMessageCode = + VertexLog.MessageCode.generateContentResponseUnrecognizedHarmProbability } } @@ -100,29 +91,19 @@ public struct SafetySetting { } /// Content with `.negligible` will be allowed. - public static var blockLowAndAbove: HarmBlockThreshold { - return self.init(kind: .blockLowAndAbove) - } + public static let blockLowAndAbove = HarmBlockThreshold(kind: .blockLowAndAbove) /// Content with `.negligible` and `.low` will be allowed. - public static var blockMediumAndAbove: HarmBlockThreshold { - return self.init(kind: .blockMediumAndAbove) - } + public static let blockMediumAndAbove = HarmBlockThreshold(kind: .blockMediumAndAbove) /// Content with `.negligible`, `.low`, and `.medium` will be allowed. - public static var blockOnlyHigh: HarmBlockThreshold { - return self.init(kind: .blockOnlyHigh) - } + public static let blockOnlyHigh = HarmBlockThreshold(kind: .blockOnlyHigh) /// All content will be allowed. - public static var blockNone: HarmBlockThreshold { - return self.init(kind: .blockNone) - } + public static let blockNone = HarmBlockThreshold(kind: .blockNone) /// Turn off the safety filter. - public static var off: HarmBlockThreshold { - return self.init(kind: .off) - } + public static let off = HarmBlockThreshold(kind: .off) let rawValue: String } @@ -156,29 +137,19 @@ public struct HarmCategory: CodableProtoEnum, Hashable, Sendable { } /// Harassment content. - public static var harassment: HarmCategory { - return self.init(kind: .harassment) - } + public static let harassment = HarmCategory(kind: .harassment) /// Negative or harmful comments targeting identity and/or protected attributes. - public static var hateSpeech: HarmCategory { - return self.init(kind: .hateSpeech) - } + public static let hateSpeech = HarmCategory(kind: .hateSpeech) /// Contains references to sexual acts or other lewd content. - public static var sexuallyExplicit: HarmCategory { - return self.init(kind: .sexuallyExplicit) - } + public static let sexuallyExplicit = HarmCategory(kind: .sexuallyExplicit) /// Promotes or enables access to harmful goods, services, or activities. - public static var dangerousContent: HarmCategory { - return self.init(kind: .dangerousContent) - } + public static let dangerousContent = HarmCategory(kind: .dangerousContent) /// Content that may be used to harm civic integrity. - public static var civicIntegrity: HarmCategory { - return self.init(kind: .civicIntegrity) - } + public static let civicIntegrity = HarmCategory(kind: .civicIntegrity) /// Returns the raw string representation of the `HarmCategory` value. /// @@ -186,9 +157,8 @@ public struct HarmCategory: CodableProtoEnum, Hashable, Sendable { /// > [REST API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/HarmCategory). public let rawValue: String - var unrecognizedValueMessageCode: VertexLog.MessageCode { - .generateContentResponseUnrecognizedHarmCategory - } + static let unrecognizedValueMessageCode = + VertexLog.MessageCode.generateContentResponseUnrecognizedHarmCategory } // MARK: - Codable Conformances