# Copyright 2017-2022 John Snow Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes for XlmRoBertaForTokenClassification."""
from sparknlp.common import *
[docs]class XlmRoBertaForTokenClassification(AnnotatorModel,
HasCaseSensitiveProperties,
HasBatchedAnnotate,
HasEngine,
HasMaxSentenceLengthLimit):
"""XlmRoBertaForTokenClassification can load XLM-RoBERTa Models with a token
classification head on top (a linear layer on top of the hidden-states
output) e.g. for Named-Entity-Recognition (NER) tasks.
Pretrained models can be loaded with :meth:`.pretrained` of the companion
object:
>>> token_classifier = XlmRoBertaForTokenClassification.pretrained() \\
... .setInputCols(["token", "document"]) \\
... .setOutputCol("label")
The default model is ``"mpnet_base_token_classifier"``, if no
name is provided.
For available pretrained models please see the `Models Hub
<https://sparknlp.org/models?task=Named+Entity+Recognition>`__.
To see which models are compatible and how to import them see
`Import Transformers into Spark NLP 🚀
<https://github.com/JohnSnowLabs/spark-nlp/discussions/5669>`_.
====================== ======================
Input Annotation types Output Annotation type
====================== ======================
``DOCUMENT, TOKEN`` ``NAMED_ENTITY``
====================== ======================
Parameters
----------
batchSize
Batch size. Large values allows faster processing but requires more
memory, by default 8
caseSensitive
Whether to ignore case in tokens for embeddings matching, by default
True
configProtoBytes
ConfigProto from tensorflow, serialized into byte array.
maxSentenceLength
Max sentence length to process, by default 128
Examples
--------
>>> import sparknlp
>>> from sparknlp.base import *
>>> from sparknlp.annotator import *
>>> from pyspark.ml import Pipeline
>>> documentAssembler = DocumentAssembler() \\
... .setInputCol("text") \\
... .setOutputCol("document")
>>> tokenizer = Tokenizer() \\
... .setInputCols(["document"]) \\
... .setOutputCol("token")
>>> tokenClassifier = XlmRoBertaForTokenClassification.pretrained() \\
... .setInputCols(["token", "document"]) \\
... .setOutputCol("label") \\
... .setCaseSensitive(True)
>>> pipeline = Pipeline().setStages([
... documentAssembler,
... tokenizer,
... tokenClassifier
... ])
>>> data = spark.createDataFrame([["John Lenon was born in London and lived in Paris. My name is Sarah and I live in London"]]).toDF("text")
>>> result = pipeline.fit(data).transform(data)
>>> result.select("label.result").show(truncate=False)
+------------------------------------------------------------------------------------+
|result |
+------------------------------------------------------------------------------------+
|[B-PER, I-PER, O, O, O, B-LOC, O, O, O, B-LOC, O, O, O, O, B-PER, O, O, O, O, B-LOC]|
+------------------------------------------------------------------------------------+
"""
name = "XlmRoBertaForTokenClassification"
inputAnnotatorTypes = [AnnotatorType.DOCUMENT, AnnotatorType.TOKEN]
outputAnnotatorType = AnnotatorType.NAMED_ENTITY
configProtoBytes = Param(Params._dummy(),
"configProtoBytes",
"ConfigProto from tensorflow, serialized into byte array. Get with config_proto.SerializeToString()",
TypeConverters.toListInt)
[docs] def getClasses(self):
"""
Returns labels used to train this model
"""
return self._call_java("getClasses")
[docs] def setConfigProtoBytes(self, b):
"""Sets configProto from tensorflow, serialized into byte array.
Parameters
----------
b : List[int]
ConfigProto from tensorflow, serialized into byte array
"""
return self._set(configProtoBytes=b)
@keyword_only
def __init__(self, classname="com.johnsnowlabs.nlp.annotators.classifier.dl.XlmRoBertaForTokenClassification",
java_model=None):
super(XlmRoBertaForTokenClassification, self).__init__(
classname=classname,
java_model=java_model
)
self._setDefault(
batchSize=8,
maxSentenceLength=128,
caseSensitive=True
)
@staticmethod
[docs] def loadSavedModel(folder, spark_session):
"""Loads a locally saved model.
Parameters
----------
folder : str
Folder of the saved model
spark_session : pyspark.sql.SparkSession
The current SparkSession
Returns
-------
XlmRoBertaForTokenClassification
The restored model
"""
from sparknlp.internal import _XlmRoBertaTokenClassifierLoader
jModel = _XlmRoBertaTokenClassifierLoader(folder, spark_session._jsparkSession)._java_obj
return XlmRoBertaForTokenClassification(java_model=jModel)
@staticmethod
[docs] def pretrained(name="mpnet_base_token_classifier", lang="en", remote_loc=None):
"""Downloads and loads a pretrained model.
Parameters
----------
name : str, optional
Name of the pretrained model, by default
"mpnet_base_token_classifier"
lang : str, optional
Language of the pretrained model, by default "en"
remote_loc : str, optional
Optional remote address of the resource, by default None. Will use
Spark NLPs repositories otherwise.
Returns
-------
XlmRoBertaForTokenClassification
The restored model
"""
from sparknlp.pretrained import ResourceDownloader
return ResourceDownloader.downloadModel(XlmRoBertaForTokenClassification, name, lang, remote_loc)