Witllm/demo.py

79 lines
2.7 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import json
import torch
from chatglm import ChatGLMForConditionalGeneration
from chatglm import ChatGLMTokenizer
from tools import show
from transformers import AutoConfig
seed = 1234
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
pretrained_model_name_or_path = "../ZhipuAI/chatglm3-6b"
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path,
return_unused_kwargs=True,
trust_remote_code=True,
code_revision=None,
_commit_hash=None,
)
glm = ChatGLMForConditionalGeneration(config)
tokenizer_config_file = "./chatglm/tokenizer_config.json"
if tokenizer_config_file is not None:
with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle:
init_kwargs = json.load(tokenizer_config_handle)
init_kwargs.pop("tokenizer_class", None)
init_kwargs.pop("tokenizer_file", None)
saved_init_inputs = init_kwargs.pop("init_inputs", ())
init_inputs = saved_init_inputs
init_kwargs["vocab_file"] = "./chatglm/tokenizer.model"
init_kwargs["added_tokens_file"] = None
init_kwargs["special_tokens_map_file"] = None
init_kwargs["tokenizer_file"] = None
init_kwargs["name_or_path"] = pretrained_model_name_or_path
tokenizer = ChatGLMTokenizer(*init_inputs, **init_kwargs)
glm = glm.from_pretrained(pretrained_model_name_or_path).half().cuda()
glm = glm.eval()
query = "你好"
response, history = glm.chat(tokenizer, query, history=[])
print(response)
if response[1:] != " 你好👋!我是人工智能助手 ChatGLM3-6B很高兴见到你欢迎问我任何问题":
raise ()
# query = "colin"
# response, history = glm.chat(tokenizer, query, history=history)
# print(response)
# if response[1:] != " Hello! How can I assist you today":
# raise ()
# response, history = glm.chat(tokenizer, "你是一个心理学专家,请问晚上睡不着应该怎么办", history=history)
# print(response)
# import plotly_express as px
# px.imshow(ron)
# gapminder = px.data.gapminder()
# gapminder2007 = gapminder.query('year == 2007')
# px.scatter(gapminder2007, x='gdpPercap', y='lifeExp')
# from modelscope import AutoTokenizer, AutoModel, snapshot_download
# model_dir = snapshot_download("ZhipuAI/chatglm3-6b", cache_dir="./chatglm", revision="v1.0.0")
# model = AutoModel.from_pretrained(model_dir, trust_remote_code=True).half().cuda()
# tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
# model = model.eval()
# response, history = model.chat(tokenizer, "colin", history=[])
# print(response)
# response, history = model.chat(tokenizer, "你好", history=history)
# print(response)
# # response, history = model.chat(tokenizer, "你是一个心理学专家,请问晚上睡不着应该怎么办", history=history)
# # print(response)