关于challenge:基于知识图谱的电影知识问答系统训练TFIDF-向量算法和朴素贝叶斯分类器在-Neo4j-中查询
基于常识图谱的电影常识问答零碎:训练TF-IDF 向量算法和奢侈贝叶斯分类器、在 Neo4j 中查问 1.我的项目介绍训练 TF-IDF 向量算法和奢侈贝叶斯分类器,预测用户文本所属的问题类别应用分词库解析用户文本词性,提取关键词联合关键词与问题类别,在 Neo4j 中查问问题的答案通过 Flask 对外提供 RESTful API前端交互与答案展现2.我的项目实操教学2.1 数据集简介{ "introduction_by_movie": [ "nm简介", "nm剧情简介", "nm的内容是什么", "nm讲了什么", "nm讲了什么故事", "nm演了什么", "nm的故事梗概是什么", "nm的剧情简介是什么", "nm的内容简介是什么", "nm的剧情介绍是什么", "nm的情节是什么", "nm的次要情节是什么" ], "rating_by_movie": [ "nm的评分是多少", "nm得了多少分", "nm的评分有多少", "nm的评分", "nm得分是多少", "nm的分数是", "nm电影分数是多少", "nm电影评分", "nm评分", "nm的分数是多少", "nm这部电影的评分是多少" ], "release_date_by_movie": [ "nm上映工夫", "nm定档工夫", "nm的上映工夫是什么时候", "nm的首映工夫是什么时候", "nm什么时候上映", "nm什么时候首映", "最早什么时候能看到nm", "nm什么时候在影院上线", "什么时候能够在影院看到nm", "nm什么时候在影院放映", "nm什么时候首播" ], 2.2 用户词典Forrest Gump nmKill Bill: Vol. 1 nm英雄 nmMiami Vice nmIndiana Jones and the Temple of Doom nm卧虎藏龙 nmPirates of the Caribbean: At World's End nmKill Bill: Vol. 2 nmThe Matrix Reloaded nmThe Matrix Revolutions nmHarry Potter and the Chamber of Secrets nmHarry Potter and the Prisoner of Azkaban nmHarry Potter and the Goblet of Fire nmHarry Potter and the Order of the Phoenix nmThe Last Emperor nmHarry Potter and the Half-Blood Prince nm花样年华 nm2046 nmLethal Weapon 4 nmHannibal Rising nmTMNT nm무사 nmAnna and the King nm满城尽带黄金甲 nm2.3 环境依赖jiebaneo4jpython-dotenvscikit-learnflaskflask-corsgunicorn2.4 局部代码展现import osfrom neo4j import GraphDatabaseclass Database: """ Neo4j 数据库拜访层。 治理数据库连贯的生命周期,并提供查问接口。 """ def __init__(self): uri = os.environ["DATABASE_URI"] user = os.environ["DATABASE_USER"] password = os.environ["DATABASE_PASSWORD"] try: self._driver = GraphDatabase.driver(uri, auth=(user, password)) self._session = self._driver.session() except Exception as e: raise Exception("数据库连贯失败") from e def close(self): try: self._session.close() self._driver.close() except Exception as e: raise Exception("数据库断开失败") from e def find_one(self, query: str, **parameters): result = self._session.run(query, parameters).single() return result.value() if result else None def find_many(self, query: str, **parameters): return self._session.run(query, parameters).value()if __name__ == "__main__": import dotenv dotenv.load_dotenv() database = Database() genres = database.find_many( """ MATCH (m:Movie)-[BELONGS_TO]->(g:Genre) WHERE m.name = $movie_name RETURN g.name """, movie_name="卧虎藏龙", ) database.close() print(genres)import jsonimport osimport jiebafrom sklearn.feature_extraction.text import TfidfVectorizerfrom sklearn.naive_bayes import MultinomialNBTRAIN_DATASET_PATH = os.path.join("data", "train.json")jieba.setLogLevel("ERROR")def normalize(sentence: str): return " ".join(jieba.cut(sentence))class BaseClassifier: """ 底层分类器。 应用 TF-IDF 向量化文本,而后应用奢侈贝叶斯预测标签。 """ def __init__(self): self._vectorizer = TfidfVectorizer() self._classifier = MultinomialNB(alpha=0.01) def _train(self, x: list, y: list): X = self._vectorizer.fit_transform(x).toarray() self._classifier.fit(X, y) def _predict(self, x: list): X = self._vectorizer.transform(x).toarray() return self._classifier.predict(X)class Classifier(BaseClassifier): """ 问题分类器。 依据问题中呈现的关键词,将问题归于某一已知类别下。 """ def __init__(self): BaseClassifier.__init__(self) questions, labels = Classifier._read_train_dataset() self._train(questions, labels) def classify(self, sentence: str): question = normalize(sentence) return self._predict([question])[0] @staticmethod def _read_train_dataset(): with open(TRAIN_DATASET_PATH, "r", encoding="utf-8") as fr: train_dataset: dict[str, list[str]] = json.load(fr) questions = [] labels = [] for label, sentences in train_dataset.items(): questions.extend([normalize(sentence) for sentence in sentences]) labels.extend([label for _ in sentences]) return questions, labelsif __name__ == "__main__": classifier = Classifier() while True: sentence = input("请输出问题:").strip() label = classifier.classify(sentence) print(f"问题分类:{label}")2.5 运行我的项目在 backend 目录下增加环境变量文件 .env。 ...