kuldeep0204 commited on
Commit
5f7f9c2
·
verified ·
1 Parent(s): c9ec5ea

Create nlp_pipeline.py

Browse files
Files changed (1) hide show
  1. nlp_pipeline.py +66 -0
nlp_pipeline.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # nlp_pipeline.py
2
+ from transformers import pipeline, AutoTokenizer, AutoModelForTokenClassification
3
+ from sentence_transformers import SentenceTransformer
4
+ import numpy as np
5
+
6
+ # Load lighter/CPU-friendly models for HF Space
7
+ SUMMARIZER = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6", device=-1)
8
+ # NER model (token-classification)
9
+ NER = pipeline("ner", model="dbmdz/bert-large-cased-finetuned-conll03-english", aggregation_strategy="simple", device=-1)
10
+ EMBED_MODEL = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2") # small & fast
11
+
12
+ def summarize(text, max_length=120):
13
+ # chunk if needed
14
+ if len(text) < 800:
15
+ s = SUMMARIZER(text, max_length=max_length, min_length=40, do_sample=False)
16
+ return s[0]["summary_text"]
17
+ # naive chunking
18
+ parts = []
19
+ chunk_size = 700
20
+ for i in range(0, len(text), chunk_size):
21
+ chunk = text[i:i+chunk_size]
22
+ parts.append(SUMMARIZER(chunk, max_length=60, min_length=20)[0]["summary_text"])
23
+ return " ".join(parts)
24
+
25
+ def extract_entities(text):
26
+ ner = NER(text)
27
+ # ner returns list of {'entity_group','score','word'}
28
+ grouped = {}
29
+ for ent in ner:
30
+ key = ent.get("entity_group") or ent.get("entity")
31
+ grouped.setdefault(key, []).append({"text": ent["word"], "score": float(ent["score"])})
32
+ return grouped
33
+
34
+ def embed_text(text):
35
+ return EMBED_MODEL.encode(text, convert_to_numpy=True, normalize_embeddings=True)
36
+
37
+ def get_sentence_provenance(sentences, entities):
38
+ # map entity text to sentences that contain it (case-insensitive)
39
+ prov = {}
40
+ for t in entities:
41
+ prov[t] = []
42
+ for s in sentences:
43
+ if t.lower() in s.lower():
44
+ prov[t].append(s)
45
+ return prov
46
+
47
+ def process_document(doc):
48
+ text = doc["text"]
49
+ summary = summarize(text)
50
+ entities_grouped = extract_entities(text)
51
+ # flatten entity strings (unique)
52
+ entity_texts = set()
53
+ for k, v in entities_grouped.items():
54
+ for item in v:
55
+ entity_texts.add(item["text"])
56
+ provenance = get_sentence_provenance(doc["sentences"], entity_texts)
57
+ embedding = embed_text(summary) # index the summary embedding for compactness
58
+ tags = [] # optional: simple tag by most frequent NER labels
59
+ return {
60
+ "summary": summary,
61
+ "entities": entities_grouped,
62
+ "entity_texts": list(entity_texts),
63
+ "provenance": provenance,
64
+ "embedding": embedding,
65
+ "tags": tags
66
+ }