Skip to content

Commit 53e3b7c

Browse files
committed
Add docs, test and changelog entry
1 parent e1e9796 commit 53e3b7c

File tree

3 files changed

+30
-2
lines changed

3 files changed

+30
-2
lines changed

FirebaseVertexAI/CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,9 @@
5858
`totalBillableCharacters` counts, where applicable. (#13813)
5959
- [added] Added a new `HarmCategory` `.civicIntegrity` for filtering content
6060
that may be used to harm civic integrity. (#13728)
61+
- [added] Added `probabilityScore`, `severity` and `severityScore` in
62+
`SafetyRating` to provide more fine-grained detail on blocked responses.
63+
(#13875)
6164
- [added] Added a new `HarmBlockThreshold` `.off`, which turns off the safety
6265
filter. (#13863)
6366
- [added] Added new `FinishReason` values `.blocklist`, `.prohibitedContent`,

FirebaseVertexAI/Sources/Safety.swift

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,17 +26,34 @@ public struct SafetyRating: Equatable, Hashable, Sendable {
2626

2727
/// The model-generated probability that the content falls under the specified harm ``category``.
2828
///
29-
/// See ``HarmProbability`` for a list of possible values.
29+
/// See ``HarmProbability`` for a list of possible values. This is a discretized representation
30+
/// of the ``probabilityScore``.
3031
///
3132
/// > Important: This does not indicate the severity of harm for a piece of content.
3233
public let probability: HarmProbability
3334

35+
/// The confidence score that the response is associated with the corresponding harm ``category``.
36+
///
37+
/// The probability safety score is a confidence score between 0.0 and 1.0, rounded to one decimal
38+
/// place; it is discretized into a ``HarmProbability`` in ``probability``. See [probability
39+
/// scores](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#comparison_of_probability_scores_and_severity_scores)
40+
/// in the Google Cloud documentation for more details.
3441
public let probabilityScore: Float
3542

43+
/// The severity reflects the magnitude of how harmful a model response might be.
44+
///
45+
/// See ``HarmSeverity`` for a list of possible values. This is a discretized representation of
46+
/// the ``severityScore``.
3647
public let severity: HarmSeverity
3748

49+
/// The severity score is the magnitude of how harmful a model response might be.
50+
///
51+
/// The severity score ranges from 0.0 to 1.0, rounded to one decimal place; it is discretized
52+
/// into a ``HarmSeverity`` in ``severity``. See [severity scores](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#comparison_of_probability_scores_and_severity_scores)
53+
/// in the Google Cloud documentation for more details.
3854
public let severityScore: Float
3955

56+
/// If true, the response was blocked.
4057
public let blocked: Bool
4158

4259
/// Initializes a new `SafetyRating` instance with the given category and probability.
@@ -92,6 +109,7 @@ public struct SafetyRating: Equatable, Hashable, Sendable {
92109
VertexLog.MessageCode.generateContentResponseUnrecognizedHarmProbability
93110
}
94111

112+
/// The magnitude of how harmful a model response might be for the respective ``HarmCategory``.
95113
public struct HarmSeverity: DecodableProtoEnum, Hashable, Sendable {
96114
enum Kind: String {
97115
case negligible = "HARM_SEVERITY_NEGLIGIBLE"
@@ -100,12 +118,16 @@ public struct SafetyRating: Equatable, Hashable, Sendable {
100118
case high = "HARM_SEVERITY_HIGH"
101119
}
102120

121+
/// Negligible level of harm severity.
103122
public static let negligible = HarmSeverity(kind: .negligible)
104123

124+
/// Low level of harm severity.
105125
public static let low = HarmSeverity(kind: .low)
106126

127+
/// Medium level of harm severity.
107128
public static let medium = HarmSeverity(kind: .medium)
108129

130+
/// High level of harm severity.
109131
public static let high = HarmSeverity(kind: .high)
110132

111133
/// Returns the raw string representation of the `HarmSeverity` value.

FirebaseVertexAI/Tests/Unit/GenerativeModelTests.swift

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -885,8 +885,11 @@ final class GenerativeModelTests: XCTestCase {
885885
for try await _ in stream {
886886
XCTFail("Content shouldn't be shown, this shouldn't happen.")
887887
}
888-
} catch let GenerateContentError.responseStoppedEarly(reason, _) {
888+
} catch let GenerateContentError.responseStoppedEarly(reason, response) {
889889
XCTAssertEqual(reason, .safety)
890+
let candidate = try XCTUnwrap(response.candidates.first)
891+
XCTAssertEqual(candidate.finishReason, reason)
892+
XCTAssertTrue(candidate.safetyRatings.contains { $0.blocked })
890893
return
891894
}
892895

0 commit comments

Comments
 (0)