[Vertex AI] Test SDK with v1
API instead of v1beta
#862
Annotations
4 errors
Run IntegrationTests:
FirebaseVertexAI/Tests/TestApp/Tests/Integration/IntegrationTests.swift#L245
testCountTokens_image_fileData_requiresUserAuth_wrongUser_permissionDenied, failed - Expected to throw an error, got response: CountTokensResponse(totalTokens: 266, totalBillableCharacters: Optional(35))
|
Run IntegrationTests:
FirebaseVertexAI/Tests/TestApp/Tests/Integration/IntegrationTests.swift#L95
testGenerateContent_image_fileData_requiresUserAuth_wrongUser_permissionDenied, failed - Expected to throw an error, got response: GenerateContentResponse(candidates: [FirebaseVertexAI.Candidate(content: FirebaseVertexAI.ModelContent(role: Optional("model"), internalParts: [FirebaseVertexAI.ModelContent.InternalPart.text("The color is pink. \n")]), safetyRatings: [FirebaseVertexAI.SafetyRating(category: FirebaseVertexAI.HarmCategory(rawValue: "HARM_CATEGORY_HATE_SPEECH"), probability: FirebaseVertexAI.SafetyRating.HarmProbability(rawValue: "NEGLIGIBLE"), probabilityScore: 0.05493164, severity: FirebaseVertexAI.SafetyRating.HarmSeverity(rawValue: "HARM_SEVERITY_NEGLIGIBLE"), severityScore: 0.1953125, blocked: false), FirebaseVertexAI.SafetyRating(category: FirebaseVertexAI.HarmCategory(rawValue: "HARM_CATEGORY_DANGEROUS_CONTENT"), probability: FirebaseVertexAI.SafetyRating.HarmProbability(rawValue: "NEGLIGIBLE"), probabilityScore: 0.056640625, severity: FirebaseVertexAI.SafetyRating.HarmSeverity(rawValue: "HARM_SEVERITY_NEGLIGIBLE"), severityScore: 0.08154297, blocked: false), FirebaseVertexAI.SafetyRating(category: FirebaseVertexAI.HarmCategory(rawValue: "HARM_CATEGORY_HARASSMENT"), probability: FirebaseVertexAI.SafetyRating.HarmProbability(rawValue: "NEGLIGIBLE"), probabilityScore: 0.057373047, severity: FirebaseVertexAI.SafetyRating.HarmSeverity(rawValue: "HARM_SEVERITY_NEGLIGIBLE"), severityScore: 0.075683594, blocked: false), FirebaseVertexAI.SafetyRating(category: FirebaseVertexAI.HarmCategory(rawValue: "HARM_CATEGORY_SEXUALLY_EXPLICIT"), probability: FirebaseVertexAI.SafetyRating.HarmProbability(rawValue: "NEGLIGIBLE"), probabilityScore: 0.048828125, severity: FirebaseVertexAI.SafetyRating.HarmSeverity(rawValue: "HARM_SEVERITY_NEGLIGIBLE"), severityScore: 0.095214844, blocked: false)], finishReason: Optional(FirebaseVertexAI.FinishReason(rawValue: "STOP")), citationMetadata: nil)], promptFeedback: nil, usageMetadata: Optional(FirebaseVertexAI.GenerateContentResponse.UsageMetadata(promptTokenCount: 271, candidatesTokenCount: 7, totalTokenCount: 278)))
|
Run IntegrationTests:
FirebaseVertexAI/Tests/TestApp/Tests/Integration/IntegrationTests.swift#L160
testGenerateContentStream_image_fileData_requiresUserAuth_wrongUser_permissionDenied, failed - Expected to throw an error, got response(s): [FirebaseVertexAI.GenerateContentResponse(candidates: [FirebaseVertexAI.Candidate(content: FirebaseVertexAI.ModelContent(role: Optional("model"), internalParts: [FirebaseVertexAI.ModelContent.InternalPart.text("The")]), safetyRatings: [], finishReason: nil, citationMetadata: nil)], promptFeedback: nil, usageMetadata: Optional(FirebaseVertexAI.GenerateContentResponse.UsageMetadata(promptTokenCount: 271, candidatesTokenCount: 1, totalTokenCount: 272))), FirebaseVertexAI.GenerateContentResponse(candidates: [FirebaseVertexAI.Candidate(content: FirebaseVertexAI.ModelContent(role: Optional("model"), internalParts: [FirebaseVertexAI.ModelContent.InternalPart.text(" color is pink. \n")]), safetyRatings: [FirebaseVertexAI.SafetyRating(category: FirebaseVertexAI.HarmCategory(rawValue: "HARM_CATEGORY_HATE_SPEECH"), probability: FirebaseVertexAI.SafetyRating.HarmProbability(rawValue: "NEGLIGIBLE"), probabilityScore: 0.05493164, severity: FirebaseVertexAI.SafetyRating.HarmSeverity(rawValue: "HARM_SEVERITY_NEGLIGIBLE"), severityScore: 0.1953125, blocked: false), FirebaseVertexAI.SafetyRating(category: FirebaseVertexAI.HarmCategory(rawValue: "HARM_CATEGORY_DANGEROUS_CONTENT"), probability: FirebaseVertexAI.SafetyRating.HarmProbability(rawValue: "NEGLIGIBLE"), probabilityScore: 0.056640625, severity: FirebaseVertexAI.SafetyRating.HarmSeverity(rawValue: "HARM_SEVERITY_NEGLIGIBLE"), severityScore: 0.08154297, blocked: false), FirebaseVertexAI.SafetyRating(category: FirebaseVertexAI.HarmCategory(rawValue: "HARM_CATEGORY_HARASSMENT"), probability: FirebaseVertexAI.SafetyRating.HarmProbability(rawValue: "NEGLIGIBLE"), probabilityScore: 0.057373047, severity: FirebaseVertexAI.SafetyRating.HarmSeverity(rawValue: "HARM_SEVERITY_NEGLIGIBLE"), severityScore: 0.075683594, blocked: false), FirebaseVertexAI.SafetyRating(category: FirebaseVertexAI.HarmCategory(rawValue: "HARM_CATEGORY_SEXUALLY_EXPLICIT"), probability: FirebaseVertexAI.SafetyRating.HarmProbability(rawValue: "NEGLIGIBLE"), probabilityScore: 0.048828125, severity: FirebaseVertexAI.SafetyRating.HarmSeverity(rawValue: "HARM_SEVERITY_NEGLIGIBLE"), severityScore: 0.095214844, blocked: false)], finishReason: Optional(FirebaseVertexAI.FinishReason(rawValue: "STOP")), citationMetadata: nil)], promptFeedback: nil, usageMetadata: Optional(FirebaseVertexAI.GenerateContentResponse.UsageMetadata(promptTokenCount: 271, candidatesTokenCount: 7, totalTokenCount: 278)))]
|
Run IntegrationTests
Process completed with exit code 1.
|
Loading