import json
import requests
from datetime import datetime
import markdown_table
import os
import re
import logging
Setting up logging
logging.basicConfig(level=logging.INFO, filename='assessment.log',
format='%(asctime)s %(levelname)s:%(message)s')
Function to load configuration
def load_config():
try:
with open('config.json', 'r') as config_file:
return json.load(config_file)
except Exception as e:
logging.error(f"Failed to load config: {e}")
raise
Function to read a text file and return its content as a list of lines
def read_file(file_path):
try:
with open(file_path, 'r') as file:
return file.read().splitlines()
except Exception as e:
logging.error(f"Failed to read file {file_path}: {e}")
raise
Function to make an API request
def make_api_request(api_endpoint, api_key, prompt):
try:
response = requests.post(api_endpoint, json={
'model': 'text-davinci-003',
'prompt': prompt,
'max_tokens': 50
}, headers={'Authorization': f'Bearer {api_key}'})
if response.status_code != 200:
logging.error(f"API request failed: {response.text}")
return None
return response.json().get('choices', [{}])[0].get('text', '').strip()
except Exception as e:
logging.error(f"API request error: {e}")
return None
Function to extract a numerical response
def extract_numerical_response(response_text):
match = re.search(r'\b([1-9][0-9]?|100)\b', response_text)
return int(match.group(0)) if match else None
Main function to run the assessment
def run_assessment(api_key, api_endpoint, questions, prompt_text):
scores = []
invalid_responses = 0
assessment_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
model_used = "text-davinci-003"
for question in questions:
full_prompt = f"{prompt_text}\n\n{question}"
response_text = make_api_request(api_endpoint, api_key, full_prompt)
score = extract_numerical_response(response_text) if response_text else None
if score is not None:
scores.append(str(score))
else:
invalid_responses += 1
final_score = sum(map(int, scores)) / len(scores) if scores else 0
markdown_content = markdown_table.render(["Question", "Score"], zip(questions, scores))
with open('davinci003.md', 'w') as md_file:
md_file.write(f"# Ethical AI Assessment Results\n\n")
md_file.write(f"API Evaluated: OpenAI (Model: {model_used})\n")
md_file.write(f"Assessment Date: {assessment_date}\n")
md_file.write(f"Final Score: {final_score:.2f}/100\n\n")
md_file.write(f"Total Questions: {len(questions)}\n")
md_file.write(f"Valid Responses: {len(scores)}\n")
md_file.write(f"Invalid Responses: {invalid_responses}\n\n")
md_file.write(markdown_content)
if name == "main":
config = load_config()
questions = read_file('questions.txt')
prompt_text = read_file('prompt.txt')[0]
run_assessment(os.getenv('OPENAI_API_KEY', api_key), config['api_endpoint'], questions, prompt_text)