Hi I am having trouble when using Llama2 for tasksolving and the report it's like:
''' Loop Round 0
Traceback (most recent call last):
File "/home/likai/AgentVerse/agentverse_command/main_tasksolving_cli.py", line 35, in
cli_main()
File "/home/likai/AgentVerse/agentverse_command/main_tasksolving_cli.py", line 31, in cli_main
agentversepipeline.run()
File "/home/likai/AgentVerse/agentverse/tasksolving.py", line 66, in run
result, advice, previous_plan, logs, success = asyncio.run(
File "/home/likai/.conda/envs/agentverse/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/home/likai/.conda/envs/agentverse/lib/python3.9/asyncio/base_events.py", line 642, in run_until_complete
return future.result()
File "/home/likai/AgentVerse/agentverse/environments/tasksolving_env/basic.py", line 54, in step
agents = await self.rule.role_assign(
File "/home/likai/AgentVerse/agentverse/environments/tasksolving_env/rules/base.py", line 82, in role_assign
agents = await self.role_assigner.astep(
File "/home/likai/AgentVerse/agentverse/environments/tasksolving_env/rules/role_assigner/role_description.py", line 31, in astep
roles = await role_assigner.astep(advice, task_description, len(group_members))
File "/home/likai/AgentVerse/agentverse/agents/tasksolving_agent/role_assigner.py", line 34, in astep
prepend_prompt, append_prompt, prompt_token = self.get_all_prompts(
File "/home/likai/AgentVerse/agentverse/agents/base.py", line 68, in get_all_prompts
num_prepend_prompt_token = count_string_tokens(
File "/home/likai/AgentVerse/agentverse/llms/utils/token_counter.py", line 11, in count_string_tokens
return len(tiktoken.encoding_for_model(model).encode(prompt))
File "/home/likai/.conda/envs/agentverse/lib/python3.9/site-packages/tiktoken/model.py", line 97, in encoding_for_model
return get_encoding(encoding_name_for_model(model_name))
File "/home/likai/.conda/envs/agentverse/lib/python3.9/site-packages/tiktoken/model.py", line 84, in encoding_name_for_model
raise KeyError(
KeyError: 'Could not automatically map llama-2-7b-chat-hf to a tokeniser. Please use tiktoken.get_encoding
to explicitly get the tokeniser you expect.'
'''
Can you let me know if this is because local llms is still under developing or I made some mistakes using local llms, thank you!