65 lines
2.4 KiB
Python
65 lines
2.4 KiB
Python
|
import json
|
||
|
|
||
|
|
||
|
from chatglm import ChatGLMForConditionalGeneration
|
||
|
from chatglm import ChatGLMTokenizer
|
||
|
|
||
|
from transformers import AutoConfig
|
||
|
|
||
|
pretrained_model_name_or_path = "../ZhipuAI/chatglm3-6b"
|
||
|
config, kwargs = AutoConfig.from_pretrained(
|
||
|
pretrained_model_name_or_path,
|
||
|
return_unused_kwargs=True,
|
||
|
trust_remote_code=True,
|
||
|
code_revision=None,
|
||
|
_commit_hash=None,
|
||
|
)
|
||
|
glm = ChatGLMForConditionalGeneration(config)
|
||
|
|
||
|
|
||
|
tokenizer_config_file = "./chatglm/tokenizer_config.json"
|
||
|
if tokenizer_config_file is not None:
|
||
|
with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle:
|
||
|
init_kwargs = json.load(tokenizer_config_handle)
|
||
|
init_kwargs.pop("tokenizer_class", None)
|
||
|
init_kwargs.pop("tokenizer_file", None)
|
||
|
saved_init_inputs = init_kwargs.pop("init_inputs", ())
|
||
|
init_inputs = saved_init_inputs
|
||
|
init_kwargs["vocab_file"] = './chatglm/tokenizer.model'
|
||
|
init_kwargs["added_tokens_file"] = None
|
||
|
init_kwargs["special_tokens_map_file"] = None
|
||
|
init_kwargs["tokenizer_file"] = None
|
||
|
init_kwargs["name_or_path"] = pretrained_model_name_or_path
|
||
|
tokenizer = ChatGLMTokenizer(*init_inputs, **init_kwargs)
|
||
|
|
||
|
|
||
|
glm = glm.from_pretrained(pretrained_model_name_or_path, config=config).half().cuda()
|
||
|
glm = glm.eval()
|
||
|
response, history = glm.chat(tokenizer, "colin", history=[])
|
||
|
print(response)
|
||
|
response, history = glm.chat(tokenizer, "你好", history=history)
|
||
|
print(response)
|
||
|
# response, history = glm.chat(tokenizer, "你是一个心理学专家,请问晚上睡不着应该怎么办", history=history)
|
||
|
# print(response)
|
||
|
|
||
|
|
||
|
# import plotly_express as px
|
||
|
# px.imshow(ron)
|
||
|
# gapminder = px.data.gapminder()
|
||
|
# gapminder2007 = gapminder.query('year == 2007')
|
||
|
# px.scatter(gapminder2007, x='gdpPercap', y='lifeExp')
|
||
|
|
||
|
|
||
|
|
||
|
# from modelscope import AutoTokenizer, AutoModel, snapshot_download
|
||
|
# model_dir = snapshot_download("ZhipuAI/chatglm3-6b", cache_dir="./chatglm", revision="v1.0.0")
|
||
|
# model = AutoModel.from_pretrained(model_dir, trust_remote_code=True).half().cuda()
|
||
|
# tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
|
||
|
# model = model.eval()
|
||
|
# response, history = model.chat(tokenizer, "colin", history=[])
|
||
|
# print(response)
|
||
|
# response, history = model.chat(tokenizer, "你好", history=history)
|
||
|
# print(response)
|
||
|
# # response, history = model.chat(tokenizer, "你是一个心理学专家,请问晚上睡不着应该怎么办", history=history)
|
||
|
# # print(response)
|