blob: 94f1c35db04e9487bbe8f38a8241c2aaee1fb3af [file] [log] [blame] [edit]
{
"canonicalName": "Cloud Natural Language",
"revision": "20200801",
"version_module": true,
"protocol": "rest",
"baseUrl": "https://language.googleapis.com/",
"kind": "discovery#restDescription",
"version": "v1beta2",
"discoveryVersion": "v1",
"servicePath": "",
"description": "Provides natural language understanding technologies, such as sentiment analysis, entity recognition, entity sentiment analysis, and other text annotations, to developers.",
"rootUrl": "https://language.googleapis.com/",
"name": "language",
"id": "language:v1beta2",
"documentationLink": "https://cloud.google.com/natural-language/",
"title": "Cloud Natural Language API",
"batchPath": "batch",
"parameters": {
"upload_protocol": {
"location": "query",
"description": "Upload protocol for media (e.g. \"raw\", \"multipart\").",
"type": "string"
},
"uploadType": {
"description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").",
"location": "query",
"type": "string"
},
"prettyPrint": {
"description": "Returns response with indentations and line breaks.",
"type": "boolean",
"location": "query",
"default": "true"
},
"access_token": {
"type": "string",
"description": "OAuth access token.",
"location": "query"
},
"oauth_token": {
"type": "string",
"description": "OAuth 2.0 token for the current user.",
"location": "query"
},
"quotaUser": {
"description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.",
"location": "query",
"type": "string"
},
"callback": {
"description": "JSONP",
"type": "string",
"location": "query"
},
"$.xgafv": {
"enum": [
"1",
"2"
],
"description": "V1 error format.",
"location": "query",
"type": "string",
"enumDescriptions": [
"v1 error format",
"v2 error format"
]
},
"fields": {
"location": "query",
"description": "Selector specifying which fields to include in a partial response.",
"type": "string"
},
"key": {
"location": "query",
"description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
"type": "string"
},
"alt": {
"enum": [
"json",
"media",
"proto"
],
"type": "string",
"description": "Data format for response.",
"default": "json",
"location": "query",
"enumDescriptions": [
"Responses with Content-Type of application/json",
"Media download with context-dependent Content-Type",
"Responses with Content-Type of application/x-protobuf"
]
}
},
"icons": {
"x32": "http://www.google.com/images/icons/product/search-32.gif",
"x16": "http://www.google.com/images/icons/product/search-16.gif"
},
"ownerDomain": "google.com",
"basePath": "",
"resources": {
"documents": {
"methods": {
"analyzeEntities": {
"flatPath": "v1beta2/documents:analyzeEntities",
"parameters": {},
"scopes": [
"https://www.googleapis.com/auth/cloud-language",
"https://www.googleapis.com/auth/cloud-platform"
],
"description": "Finds named entities (currently proper names and common nouns) in the text along with entity types, salience, mentions for each entity, and other properties.",
"response": {
"$ref": "AnalyzeEntitiesResponse"
},
"httpMethod": "POST",
"request": {
"$ref": "AnalyzeEntitiesRequest"
},
"parameterOrder": [],
"path": "v1beta2/documents:analyzeEntities",
"id": "language.documents.analyzeEntities"
},
"analyzeEntitySentiment": {
"parameterOrder": [],
"httpMethod": "POST",
"scopes": [
"https://www.googleapis.com/auth/cloud-language",
"https://www.googleapis.com/auth/cloud-platform"
],
"flatPath": "v1beta2/documents:analyzeEntitySentiment",
"response": {
"$ref": "AnalyzeEntitySentimentResponse"
},
"description": "Finds entities, similar to AnalyzeEntities in the text and analyzes sentiment associated with each entity and its mentions.",
"parameters": {},
"path": "v1beta2/documents:analyzeEntitySentiment",
"id": "language.documents.analyzeEntitySentiment",
"request": {
"$ref": "AnalyzeEntitySentimentRequest"
}
},
"analyzeSyntax": {
"response": {
"$ref": "AnalyzeSyntaxResponse"
},
"id": "language.documents.analyzeSyntax",
"path": "v1beta2/documents:analyzeSyntax",
"parameters": {},
"description": "Analyzes the syntax of the text and provides sentence boundaries and tokenization along with part of speech tags, dependency trees, and other properties.",
"scopes": [
"https://www.googleapis.com/auth/cloud-language",
"https://www.googleapis.com/auth/cloud-platform"
],
"parameterOrder": [],
"flatPath": "v1beta2/documents:analyzeSyntax",
"request": {
"$ref": "AnalyzeSyntaxRequest"
},
"httpMethod": "POST"
},
"classifyText": {
"id": "language.documents.classifyText",
"path": "v1beta2/documents:classifyText",
"scopes": [
"https://www.googleapis.com/auth/cloud-language",
"https://www.googleapis.com/auth/cloud-platform"
],
"response": {
"$ref": "ClassifyTextResponse"
},
"parameterOrder": [],
"parameters": {},
"description": "Classifies a document into categories.",
"flatPath": "v1beta2/documents:classifyText",
"httpMethod": "POST",
"request": {
"$ref": "ClassifyTextRequest"
}
},
"annotateText": {
"response": {
"$ref": "AnnotateTextResponse"
},
"request": {
"$ref": "AnnotateTextRequest"
},
"description": "A convenience method that provides all syntax, sentiment, entity, and classification features in one call.",
"id": "language.documents.annotateText",
"flatPath": "v1beta2/documents:annotateText",
"parameters": {},
"path": "v1beta2/documents:annotateText",
"parameterOrder": [],
"scopes": [
"https://www.googleapis.com/auth/cloud-language",
"https://www.googleapis.com/auth/cloud-platform"
],
"httpMethod": "POST"
},
"analyzeSentiment": {
"parameterOrder": [],
"httpMethod": "POST",
"request": {
"$ref": "AnalyzeSentimentRequest"
},
"path": "v1beta2/documents:analyzeSentiment",
"response": {
"$ref": "AnalyzeSentimentResponse"
},
"parameters": {},
"id": "language.documents.analyzeSentiment",
"flatPath": "v1beta2/documents:analyzeSentiment",
"scopes": [
"https://www.googleapis.com/auth/cloud-language",
"https://www.googleapis.com/auth/cloud-platform"
],
"description": "Analyzes the sentiment of the provided text."
}
}
}
},
"fullyEncodeReservedExpansion": true,
"mtlsRootUrl": "https://language.mtls.googleapis.com/",
"auth": {
"oauth2": {
"scopes": {
"https://www.googleapis.com/auth/cloud-language": {
"description": "Apply machine learning models to reveal the structure and meaning of text"
},
"https://www.googleapis.com/auth/cloud-platform": {
"description": "View and manage your data across Google Cloud Platform services"
}
}
}
},
"ownerName": "Google",
"schemas": {
"TextSpan": {
"type": "object",
"description": "Represents an output piece of text.",
"properties": {
"content": {
"type": "string",
"description": "The content of the output text."
},
"beginOffset": {
"description": "The API calculates the beginning offset of the content in the original document according to the EncodingType specified in the API request.",
"type": "integer",
"format": "int32"
}
},
"id": "TextSpan"
},
"Token": {
"properties": {
"dependencyEdge": {
"description": "Dependency tree parse for this token.",
"$ref": "DependencyEdge"
},
"text": {
"$ref": "TextSpan",
"description": "The token text."
},
"partOfSpeech": {
"description": "Parts of speech tag for this token.",
"$ref": "PartOfSpeech"
},
"lemma": {
"type": "string",
"description": "[Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token."
}
},
"description": "Represents the smallest syntactic building block of the text.",
"type": "object",
"id": "Token"
},
"EntityMention": {
"id": "EntityMention",
"type": "object",
"description": "Represents a mention for an entity in the text. Currently, proper noun mentions are supported.",
"properties": {
"type": {
"type": "string",
"description": "The type of the entity mention.",
"enum": [
"TYPE_UNKNOWN",
"PROPER",
"COMMON"
],
"enumDescriptions": [
"Unknown",
"Proper name",
"Common noun (or noun compound)"
]
},
"text": {
"description": "The mention text.",
"$ref": "TextSpan"
},
"sentiment": {
"description": "For calls to AnalyzeEntitySentiment or if AnnotateTextRequest.Features.extract_entity_sentiment is set to true, this field will contain the sentiment expressed for this mention of the entity in the provided document.",
"$ref": "Sentiment"
}
}
},
"AnalyzeEntitiesResponse": {
"description": "The entity analysis response message.",
"id": "AnalyzeEntitiesResponse",
"properties": {
"language": {
"type": "string",
"description": "The language of the text, which will be the same as the language specified in the request or, if not specified, the automatically-detected language. See Document.language field for more details."
},
"entities": {
"description": "The recognized entities in the input document.",
"type": "array",
"items": {
"$ref": "Entity"
}
}
},
"type": "object"
},
"AnalyzeSentimentResponse": {
"properties": {
"sentences": {
"description": "The sentiment for all the sentences in the document.",
"type": "array",
"items": {
"$ref": "Sentence"
}
},
"documentSentiment": {
"description": "The overall sentiment of the input document.",
"$ref": "Sentiment"
},
"language": {
"type": "string",
"description": "The language of the text, which will be the same as the language specified in the request or, if not specified, the automatically-detected language. See Document.language field for more details."
}
},
"type": "object",
"id": "AnalyzeSentimentResponse",
"description": "The sentiment analysis response message."
},
"AnalyzeEntitiesRequest": {
"id": "AnalyzeEntitiesRequest",
"description": "The entity analysis request message.",
"type": "object",
"properties": {
"encodingType": {
"enum": [
"NONE",
"UTF8",
"UTF16",
"UTF32"
],
"type": "string",
"enumDescriptions": [
"If `EncodingType` is not specified, encoding-dependent information (such as `begin_offset`) will be set at `-1`.",
"Encoding-dependent information (such as `begin_offset`) is calculated based on the UTF-8 encoding of the input. C++ and Go are examples of languages that use this encoding natively.",
"Encoding-dependent information (such as `begin_offset`) is calculated based on the UTF-16 encoding of the input. Java and JavaScript are examples of languages that use this encoding natively.",
"Encoding-dependent information (such as `begin_offset`) is calculated based on the UTF-32 encoding of the input. Python is an example of a language that uses this encoding natively."
],
"description": "The encoding type used by the API to calculate offsets."
},
"document": {
"$ref": "Document",
"description": "Required. Input document."
}
}
},
"AnalyzeSentimentRequest": {
"description": "The sentiment analysis request message.",
"id": "AnalyzeSentimentRequest",
"properties": {
"document": {
"$ref": "Document",
"description": "Required. Input document."
},
"encodingType": {
"enum": [
"NONE",
"UTF8",
"UTF16",
"UTF32"
],
"description": "The encoding type used by the API to calculate sentence offsets for the sentence sentiment.",
"enumDescriptions": [
"If `EncodingType` is not specified, encoding-dependent information (such as `begin_offset`) will be set at `-1`.",
"Encoding-dependent information (such as `begin_offset`) is calculated based on the UTF-8 encoding of the input. C++ and Go are examples of languages that use this encoding natively.",
"Encoding-dependent information (such as `begin_offset`) is calculated based on the UTF-16 encoding of the input. Java and JavaScript are examples of languages that use this encoding natively.",
"Encoding-dependent information (such as `begin_offset`) is calculated based on the UTF-32 encoding of the input. Python is an example of a language that uses this encoding natively."
],
"type": "string"
}
},
"type": "object"
},
"ClassifyTextResponse": {
"description": "The document classification response message.",
"id": "ClassifyTextResponse",
"properties": {
"categories": {
"items": {
"$ref": "ClassificationCategory"
},
"type": "array",
"description": "Categories representing the input document."
}
},
"type": "object"
},
"Document": {
"id": "Document",
"description": "################################################################ # Represents the input to API methods.",
"type": "object",
"properties": {
"type": {
"type": "string",
"enumDescriptions": [
"The content type is not specified.",
"Plain text",
"HTML"
],
"enum": [
"TYPE_UNSPECIFIED",
"PLAIN_TEXT",
"HTML"
],
"description": "Required. If the type is not set or is `TYPE_UNSPECIFIED`, returns an `INVALID_ARGUMENT` error."
},
"language": {
"type": "string",
"description": "The language of the document (if not specified, the language is automatically detected). Both ISO and BCP-47 language codes are accepted. [Language Support](https://cloud.google.com/natural-language/docs/languages) lists currently supported languages for each API method. If the language (either specified by the caller or automatically detected) is not supported by the called API method, an `INVALID_ARGUMENT` error is returned."
},
"content": {
"description": "The content of the input in string format. Cloud audit logging exempt since it is based on user data.",
"type": "string"
},
"referenceWebUri": {
"type": "string",
"description": "The web URI where the document comes from. This URI is not used for fetching the content, but as a hint for analyzing the document."
},
"boilerplateHandling": {
"enum": [
"BOILERPLATE_HANDLING_UNSPECIFIED",
"SKIP_BOILERPLATE",
"KEEP_BOILERPLATE"
],
"type": "string",
"description": "Indicates how detected boilerplate(e.g. advertisements, copyright declarations, banners) should be handled for this document. If not specified, boilerplate will be treated the same as content.",
"enumDescriptions": [
"The boilerplate handling is not specified.",
"Do not analyze detected boilerplate. Reference web URI is required for detecting boilerplate.",
"Treat boilerplate the same as content."
]
},
"gcsContentUri": {
"type": "string",
"description": "The Google Cloud Storage URI where the file content is located. This URI must be of the form: gs://bucket_name/object_name. For more details, see https://cloud.google.com/storage/docs/reference-uris. NOTE: Cloud Storage object versioning is not supported."
}
}
},
"AnnotateTextRequest": {
"properties": {
"document": {
"$ref": "Document",
"description": "Required. Input document."
},
"features": {
"$ref": "Features",
"description": "Required. The enabled features."
},
"encodingType": {
"enum": [
"NONE",
"UTF8",
"UTF16",
"UTF32"
],
"description": "The encoding type used by the API to calculate offsets.",
"enumDescriptions": [
"If `EncodingType` is not specified, encoding-dependent information (such as `begin_offset`) will be set at `-1`.",
"Encoding-dependent information (such as `begin_offset`) is calculated based on the UTF-8 encoding of the input. C++ and Go are examples of languages that use this encoding natively.",
"Encoding-dependent information (such as `begin_offset`) is calculated based on the UTF-16 encoding of the input. Java and JavaScript are examples of languages that use this encoding natively.",
"Encoding-dependent information (such as `begin_offset`) is calculated based on the UTF-32 encoding of the input. Python is an example of a language that uses this encoding natively."
],
"type": "string"
}
},
"description": "The request message for the text annotation API, which can perform multiple analysis types (sentiment, entities, and syntax) in one call.",
"type": "object",
"id": "AnnotateTextRequest"
},
"ClassificationCategory": {
"description": "Represents a category returned from the text classifier.",
"id": "ClassificationCategory",
"type": "object",
"properties": {
"confidence": {
"format": "float",
"type": "number",
"description": "The classifier's confidence of the category. Number represents how certain the classifier is that this category represents the given text."
},
"name": {
"description": "The name of the category representing the document, from the [predefined taxonomy](https://cloud.google.com/natural-language/docs/categories).",
"type": "string"
}
}
},
"Entity": {
"type": "object",
"id": "Entity",
"description": "Represents a phrase in the text that is a known entity, such as a person, an organization, or location. The API associates information, such as salience and mentions, with entities.",
"properties": {
"name": {
"type": "string",
"description": "The representative name for the entity."
},
"type": {
"type": "string",
"enum": [
"UNKNOWN",
"PERSON",
"LOCATION",
"ORGANIZATION",
"EVENT",
"WORK_OF_ART",
"CONSUMER_GOOD",
"OTHER",
"PHONE_NUMBER",
"ADDRESS",
"DATE",
"NUMBER",
"PRICE"
],
"description": "The entity type.",
"enumDescriptions": [
"Unknown",
"Person",
"Location",
"Organization",
"Event",
"Artwork",
"Consumer product",
"Other types of entities",
"Phone number The metadata lists the phone number, formatted according to local convention, plus whichever additional elements appear in the text: * `number` - the actual number, broken down into sections as per local convention * `national_prefix` - country code, if detected * `area_code` - region or area code, if detected * `extension` - phone extension (to be dialed after connection), if detected",
"Address The metadata identifies the street number and locality plus whichever additional elements appear in the text: * `street_number` - street number * `locality` - city or town * `street_name` - street/route name, if detected * `postal_code` - postal code, if detected * `country` - country, if detected\u003c * `broad_region` - administrative area, such as the state, if detected * `narrow_region` - smaller administrative area, such as county, if detected * `sublocality` - used in Asian addresses to demark a district within a city, if detected",
"Date The metadata identifies the components of the date: * `year` - four digit year, if detected * `month` - two digit month number, if detected * `day` - two digit day number, if detected",
"Number The metadata is the number itself.",
"Price The metadata identifies the `value` and `currency`."
]
},
"salience": {
"description": "The salience score associated with the entity in the [0, 1.0] range. The salience score for an entity provides information about the importance or centrality of that entity to the entire document text. Scores closer to 0 are less salient, while scores closer to 1.0 are highly salient.",
"type": "number",
"format": "float"
},
"sentiment": {
"description": "For calls to AnalyzeEntitySentiment or if AnnotateTextRequest.Features.extract_entity_sentiment is set to true, this field will contain the aggregate sentiment expressed for this entity in the provided document.",
"$ref": "Sentiment"
},
"metadata": {
"type": "object",
"description": "Metadata associated with the entity. For most entity types, the metadata is a Wikipedia URL (`wikipedia_url`) and Knowledge Graph MID (`mid`), if they are available. For the metadata associated with other entity types, see the Type table below.",
"additionalProperties": {
"type": "string"
}
},
"mentions": {
"items": {
"$ref": "EntityMention"
},
"description": "The mentions of this entity in the input document. The API currently supports proper noun mentions.",
"type": "array"
}
}
},
"Status": {
"description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).",
"id": "Status",
"type": "object",
"properties": {
"message": {
"description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.",
"type": "string"
},
"details": {
"description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.",
"type": "array",
"items": {
"type": "object",
"additionalProperties": {
"description": "Properties of the object. Contains field @type with type URL.",
"type": "any"
}
}
},
"code": {
"type": "integer",
"description": "The status code, which should be an enum value of google.rpc.Code.",
"format": "int32"
}
}
},
"AnalyzeEntitySentimentRequest": {
"type": "object",
"properties": {
"document": {
"$ref": "Document",
"description": "Required. Input document."
},
"encodingType": {
"description": "The encoding type used by the API to calculate offsets.",
"enum": [
"NONE",
"UTF8",
"UTF16",
"UTF32"
],
"enumDescriptions": [
"If `EncodingType` is not specified, encoding-dependent information (such as `begin_offset`) will be set at `-1`.",
"Encoding-dependent information (such as `begin_offset`) is calculated based on the UTF-8 encoding of the input. C++ and Go are examples of languages that use this encoding natively.",
"Encoding-dependent information (such as `begin_offset`) is calculated based on the UTF-16 encoding of the input. Java and JavaScript are examples of languages that use this encoding natively.",
"Encoding-dependent information (such as `begin_offset`) is calculated based on the UTF-32 encoding of the input. Python is an example of a language that uses this encoding natively."
],
"type": "string"
}
},
"id": "AnalyzeEntitySentimentRequest",
"description": "The entity-level sentiment analysis request message."
},
"AnalyzeSyntaxRequest": {
"properties": {
"encodingType": {
"enum": [
"NONE",
"UTF8",
"UTF16",
"UTF32"
],
"description": "The encoding type used by the API to calculate offsets.",
"enumDescriptions": [
"If `EncodingType` is not specified, encoding-dependent information (such as `begin_offset`) will be set at `-1`.",
"Encoding-dependent information (such as `begin_offset`) is calculated based on the UTF-8 encoding of the input. C++ and Go are examples of languages that use this encoding natively.",
"Encoding-dependent information (such as `begin_offset`) is calculated based on the UTF-16 encoding of the input. Java and JavaScript are examples of languages that use this encoding natively.",
"Encoding-dependent information (such as `begin_offset`) is calculated based on the UTF-32 encoding of the input. Python is an example of a language that uses this encoding natively."
],
"type": "string"
},
"document": {
"description": "Required. Input document.",
"$ref": "Document"
}
},
"description": "The syntax analysis request message.",
"id": "AnalyzeSyntaxRequest",
"type": "object"
},
"AnnotateTextResponse": {
"properties": {
"tokens": {
"description": "Tokens, along with their syntactic information, in the input document. Populated if the user enables AnnotateTextRequest.Features.extract_syntax.",
"type": "array",
"items": {
"$ref": "Token"
}
},
"language": {
"type": "string",
"description": "The language of the text, which will be the same as the language specified in the request or, if not specified, the automatically-detected language. See Document.language field for more details."
},
"categories": {
"description": "Categories identified in the input document.",
"items": {
"$ref": "ClassificationCategory"
},
"type": "array"
},
"documentSentiment": {
"description": "The overall sentiment for the document. Populated if the user enables AnnotateTextRequest.Features.extract_document_sentiment.",
"$ref": "Sentiment"
},
"sentences": {
"type": "array",
"description": "Sentences in the input document. Populated if the user enables AnnotateTextRequest.Features.extract_syntax.",
"items": {
"$ref": "Sentence"
}
},
"entities": {
"items": {
"$ref": "Entity"
},
"type": "array",
"description": "Entities, along with their semantic information, in the input document. Populated if the user enables AnnotateTextRequest.Features.extract_entities."
}
},
"type": "object",
"id": "AnnotateTextResponse",
"description": "The text annotations response message."
},
"PartOfSpeech": {
"type": "object",
"id": "PartOfSpeech",
"description": "Represents part of speech information for a token.",
"properties": {
"reciprocity": {
"enumDescriptions": [
"Reciprocity is not applicable in the analyzed language or is not predicted.",
"Reciprocal",
"Non-reciprocal"
],
"description": "The grammatical reciprocity.",
"type": "string",
"enum": [
"RECIPROCITY_UNKNOWN",
"RECIPROCAL",
"NON_RECIPROCAL"
]
},
"form": {
"type": "string",
"enumDescriptions": [
"Form is not applicable in the analyzed language or is not predicted.",
"Adnomial",
"Auxiliary",
"Complementizer",
"Final ending",
"Gerund",
"Realis",
"Irrealis",
"Short form",
"Long form",
"Order form",
"Specific form"
],
"description": "The grammatical form.",
"enum": [
"FORM_UNKNOWN",
"ADNOMIAL",
"AUXILIARY",
"COMPLEMENTIZER",
"FINAL_ENDING",
"GERUND",
"REALIS",
"IRREALIS",
"SHORT",
"LONG",
"ORDER",
"SPECIFIC"
]
},
"tag": {
"type": "string",
"enum": [
"UNKNOWN",
"ADJ",
"ADP",
"ADV",
"CONJ",
"DET",
"NOUN",
"NUM",
"PRON",
"PRT",
"PUNCT",
"VERB",
"X",
"AFFIX"
],
"description": "The part of speech tag.",
"enumDescriptions": [
"Unknown",
"Adjective",
"Adposition (preposition and postposition)",
"Adverb",
"Conjunction",
"Determiner",
"Noun (common and proper)",
"Cardinal number",
"Pronoun",
"Particle or other function word",
"Punctuation",
"Verb (all tenses and modes)",
"Other: foreign words, typos, abbreviations",
"Affix"
]
},
"mood": {
"enum": [
"MOOD_UNKNOWN",
"CONDITIONAL_MOOD",
"IMPERATIVE",
"INDICATIVE",
"INTERROGATIVE",
"JUSSIVE",
"SUBJUNCTIVE"
],
"type": "string",
"description": "The grammatical mood.",
"enumDescriptions": [
"Mood is not applicable in the analyzed language or is not predicted.",
"Conditional",
"Imperative",
"Indicative",
"Interrogative",
"Jussive",
"Subjunctive"
]
},
"tense": {
"enumDescriptions": [
"Tense is not applicable in the analyzed language or is not predicted.",
"Conditional",
"Future",
"Past",
"Present",
"Imperfect",
"Pluperfect"
],
"type": "string",
"enum": [
"TENSE_UNKNOWN",
"CONDITIONAL_TENSE",
"FUTURE",
"PAST",
"PRESENT",
"IMPERFECT",
"PLUPERFECT"
],
"description": "The grammatical tense."
},
"person": {
"description": "The grammatical person.",
"enum": [
"PERSON_UNKNOWN",
"FIRST",
"SECOND",
"THIRD",
"REFLEXIVE_PERSON"
],
"type": "string",
"enumDescriptions": [
"Person is not applicable in the analyzed language or is not predicted.",
"First",
"Second",
"Third",
"Reflexive"
]
},
"voice": {
"description": "The grammatical voice.",
"enumDescriptions": [
"Voice is not applicable in the analyzed language or is not predicted.",
"Active",
"Causative",
"Passive"
],
"type": "string",
"enum": [
"VOICE_UNKNOWN",
"ACTIVE",
"CAUSATIVE",
"PASSIVE"
]
},
"gender": {
"type": "string",
"description": "The grammatical gender.",
"enum": [
"GENDER_UNKNOWN",
"FEMININE",
"MASCULINE",
"NEUTER"
],
"enumDescriptions": [
"Gender is not applicable in the analyzed language or is not predicted.",
"Feminine",
"Masculine",
"Neuter"
]
},
"proper": {
"description": "The grammatical properness.",
"type": "string",
"enum": [
"PROPER_UNKNOWN",
"PROPER",
"NOT_PROPER"
],
"enumDescriptions": [
"Proper is not applicable in the analyzed language or is not predicted.",
"Proper",
"Not proper"
]
},
"number": {
"type": "string",
"enumDescriptions": [
"Number is not applicable in the analyzed language or is not predicted.",
"Singular",
"Plural",
"Dual"
],
"description": "The grammatical number.",
"enum": [
"NUMBER_UNKNOWN",
"SINGULAR",
"PLURAL",
"DUAL"
]
},
"case": {
"enumDescriptions": [
"Case is not applicable in the analyzed language or is not predicted.",
"Accusative",
"Adverbial",
"Complementive",
"Dative",
"Genitive",
"Instrumental",
"Locative",
"Nominative",
"Oblique",
"Partitive",
"Prepositional",
"Reflexive",
"Relative",
"Vocative"
],
"description": "The grammatical case.",
"type": "string",
"enum": [
"CASE_UNKNOWN",
"ACCUSATIVE",
"ADVERBIAL",
"COMPLEMENTIVE",
"DATIVE",
"GENITIVE",
"INSTRUMENTAL",
"LOCATIVE",
"NOMINATIVE",
"OBLIQUE",
"PARTITIVE",
"PREPOSITIONAL",
"REFLEXIVE_CASE",
"RELATIVE_CASE",
"VOCATIVE"
]
},
"aspect": {
"enumDescriptions": [
"Aspect is not applicable in the analyzed language or is not predicted.",
"Perfective",
"Imperfective",
"Progressive"
],
"enum": [
"ASPECT_UNKNOWN",
"PERFECTIVE",
"IMPERFECTIVE",
"PROGRESSIVE"
],
"description": "The grammatical aspect.",
"type": "string"
}
}
},
"Sentence": {
"description": "Represents a sentence in the input document.",
"id": "Sentence",
"type": "object",
"properties": {
"text": {
"description": "The sentence text.",
"$ref": "TextSpan"
},
"sentiment": {
"description": "For calls to AnalyzeSentiment or if AnnotateTextRequest.Features.extract_document_sentiment is set to true, this field will contain the sentiment for the sentence.",
"$ref": "Sentiment"
}
}
},
"Sentiment": {
"properties": {
"magnitude": {
"type": "number",
"description": "A non-negative number in the [0, +inf) range, which represents the absolute magnitude of sentiment regardless of score (positive or negative).",
"format": "float"
},
"score": {
"format": "float",
"type": "number",
"description": "Sentiment score between -1.0 (negative sentiment) and 1.0 (positive sentiment)."
}
},
"type": "object",
"description": "Represents the feeling associated with the entire text or entities in the text. Next ID: 6",
"id": "Sentiment"
},
"AnalyzeSyntaxResponse": {
"id": "AnalyzeSyntaxResponse",
"properties": {
"sentences": {
"items": {
"$ref": "Sentence"
},
"description": "Sentences in the input document.",
"type": "array"
},
"language": {
"type": "string",
"description": "The language of the text, which will be the same as the language specified in the request or, if not specified, the automatically-detected language. See Document.language field for more details."
},
"tokens": {
"description": "Tokens, along with their syntactic information, in the input document.",
"items": {
"$ref": "Token"
},
"type": "array"
}
},
"description": "The syntax analysis response message.",
"type": "object"
},
"DependencyEdge": {
"description": "Represents dependency parse tree information for a token.",
"type": "object",
"id": "DependencyEdge",
"properties": {
"label": {
"type": "string",
"enum": [
"UNKNOWN",
"ABBREV",
"ACOMP",
"ADVCL",
"ADVMOD",
"AMOD",
"APPOS",
"ATTR",
"AUX",
"AUXPASS",
"CC",
"CCOMP",
"CONJ",
"CSUBJ",
"CSUBJPASS",
"DEP",
"DET",
"DISCOURSE",
"DOBJ",
"EXPL",
"GOESWITH",
"IOBJ",
"MARK",
"MWE",
"MWV",
"NEG",
"NN",
"NPADVMOD",
"NSUBJ",
"NSUBJPASS",
"NUM",
"NUMBER",
"P",
"PARATAXIS",
"PARTMOD",
"PCOMP",
"POBJ",
"POSS",
"POSTNEG",
"PRECOMP",
"PRECONJ",
"PREDET",
"PREF",
"PREP",
"PRONL",
"PRT",
"PS",
"QUANTMOD",
"RCMOD",
"RCMODREL",
"RDROP",
"REF",
"REMNANT",
"REPARANDUM",
"ROOT",
"SNUM",
"SUFF",
"TMOD",
"TOPIC",
"VMOD",
"VOCATIVE",
"XCOMP",
"SUFFIX",
"TITLE",
"ADVPHMOD",
"AUXCAUS",
"AUXVV",
"DTMOD",
"FOREIGN",
"KW",
"LIST",
"NOMC",
"NOMCSUBJ",
"NOMCSUBJPASS",
"NUMC",
"COP",
"DISLOCATED",
"ASP",
"GMOD",
"GOBJ",
"INFMOD",
"MES",
"NCOMP"
],
"description": "The parse label for the token.",
"enumDescriptions": [
"Unknown",
"Abbreviation modifier",
"Adjectival complement",
"Adverbial clause modifier",
"Adverbial modifier",
"Adjectival modifier of an NP",
"Appositional modifier of an NP",
"Attribute dependent of a copular verb",
"Auxiliary (non-main) verb",
"Passive auxiliary",
"Coordinating conjunction",
"Clausal complement of a verb or adjective",
"Conjunct",
"Clausal subject",
"Clausal passive subject",
"Dependency (unable to determine)",
"Determiner",
"Discourse",
"Direct object",
"Expletive",
"Goes with (part of a word in a text not well edited)",
"Indirect object",
"Marker (word introducing a subordinate clause)",
"Multi-word expression",
"Multi-word verbal expression",
"Negation modifier",
"Noun compound modifier",
"Noun phrase used as an adverbial modifier",
"Nominal subject",
"Passive nominal subject",
"Numeric modifier of a noun",
"Element of compound number",
"Punctuation mark",
"Parataxis relation",
"Participial modifier",
"The complement of a preposition is a clause",
"Object of a preposition",
"Possession modifier",
"Postverbal negative particle",
"Predicate complement",
"Preconjunt",
"Predeterminer",
"Prefix",
"Prepositional modifier",
"The relationship between a verb and verbal morpheme",
"Particle",
"Associative or possessive marker",
"Quantifier phrase modifier",
"Relative clause modifier",
"Complementizer in relative clause",
"Ellipsis without a preceding predicate",
"Referent",
"Remnant",
"Reparandum",
"Root",
"Suffix specifying a unit of number",
"Suffix",
"Temporal modifier",
"Topic marker",
"Clause headed by an infinite form of the verb that modifies a noun",
"Vocative",
"Open clausal complement",
"Name suffix",
"Name title",
"Adverbial phrase modifier",
"Causative auxiliary",
"Helper auxiliary",
"Rentaishi (Prenominal modifier)",
"Foreign words",
"Keyword",
"List for chains of comparable items",
"Nominalized clause",
"Nominalized clausal subject",
"Nominalized clausal passive",
"Compound of numeric modifier",
"Copula",
"Dislocated relation (for fronted/topicalized elements)",
"Aspect marker",
"Genitive modifier",
"Genitive object",
"Infinitival modifier",
"Measure",
"Nominal complement of a noun"
]
},
"headTokenIndex": {
"type": "integer",
"description": "Represents the head of this token in the dependency tree. This is the index of the token which has an arc going to this token. The index is the position of the token in the array of tokens returned by the API method. If this token is a root token, then the `head_token_index` is its own index.",
"format": "int32"
}
}
},
"Features": {
"type": "object",
"id": "Features",
"properties": {
"classifyText": {
"description": "Classify the full document into categories. If this is true, the API will use the default model which classifies into a [predefined taxonomy](https://cloud.google.com/natural-language/docs/categories).",
"type": "boolean"
},
"extractEntitySentiment": {
"type": "boolean",
"description": "Extract entities and their associated sentiment."
},
"extractSyntax": {
"description": "Extract syntax information.",
"type": "boolean"
},
"extractDocumentSentiment": {
"description": "Extract document-level sentiment.",
"type": "boolean"
},
"extractEntities": {
"description": "Extract entities.",
"type": "boolean"
}
},
"description": "All available features for sentiment, syntax, and semantic analysis. Setting each one to true will enable that specific analysis for the input. Next ID: 10"
},
"ClassifyTextRequest": {
"properties": {
"document": {
"description": "Required. Input document.",
"$ref": "Document"
}
},
"type": "object",
"id": "ClassifyTextRequest",
"description": "The document classification request message."
},
"AnalyzeEntitySentimentResponse": {
"id": "AnalyzeEntitySentimentResponse",
"description": "The entity-level sentiment analysis response message.",
"properties": {
"entities": {
"type": "array",
"items": {
"$ref": "Entity"
},
"description": "The recognized entities in the input document with associated sentiments."
},
"language": {
"description": "The language of the text, which will be the same as the language specified in the request or, if not specified, the automatically-detected language. See Document.language field for more details.",
"type": "string"
}
},
"type": "object"
}
}
}