- Are there any resources - apart from the nltk cookbook and nlp with python that I can use?
You can consider using spaCy
to train your own custom data for NER task. Here is an example from this thread to train a model on a custom training set to detect a new entity ANIMAL
. The code was fixed and updated for easier reading.
import random
import spacy
from spacy.training import Example
LABEL = 'ANIMAL'
TRAIN_DATA = [
("Horses are too tall and they pretend to care about your feelings", {'entities': [(0, 6, LABEL)]}),
("Do they bite?", {'entities': []}),
("horses are too tall and they pretend to care about your feelings", {'entities': [(0, 6, LABEL)]}),
("horses pretend to care about your feelings", {'entities': [(0, 6, LABEL)]}),
("they pretend to care about your feelings, those horses", {'entities': [(48, 54, LABEL)]}),
("horses?", {'entities': [(0, 6, LABEL)]})
]
nlp = spacy.load('en_core_web_sm') # load existing spaCy model
ner = nlp.get_pipe('ner')
ner.add_label(LABEL)
optimizer = nlp.create_optimizer()
# get names of other pipes to disable them during training
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "ner"]
with nlp.disable_pipes(*other_pipes): # only train NER
for itn in range(20):
random.shuffle(TRAIN_DATA)
losses = {}
for text, annotations in TRAIN_DATA:
doc = nlp.make_doc(text)
example = Example.from_dict(doc, annotations)
nlp.update([example], drop=0.35, sgd=optimizer, losses=losses)
print(losses)
# test the trained model
test_text = 'Do you like horses?'
doc = nlp(test_text)
print("Entities in '%s'" % test_text)
for ent in doc.ents:
print(ent.label_, " -- ", ent.text)
Here are the outputs:
{'ner': 9.60289144264557}
{'ner': 8.875474230820478}
{'ner': 6.370401408220459}
{'ner': 6.687456469517201}
...
{'ner': 1.3796682589133492e-05}
{'ner': 1.7709562613218738e-05}
Entities in 'Do you like horses?'
ANIMAL -- horses