# Import the module
import numpy as np
import cpp_nn_py
def test_ann_mlp_ga():
# create an instance of the ANN_MLP_GA_float class
mlp_ga = cpp_nn_py.ANN_MLP_GA_float([3, 12, 4, 2], 4041, 130, 10, 1, False)
# set the network name
mlp_ga.SetName("float_test")
print("Network name set to 'float_test'")
# Set mixed mode
mlp_ga.SetMixed(True)
print("Mixed mode set:", mlp_ga.GetMixed())
# create the population
mlp_ga.CreatePopulation(True)
# feedforward example (random inputs for illustration)
inputs = np.array([0.6, 0.88, 0.86], dtype=np.float32)
outputs = np.zeros(2, dtype=np.float32)
mlp_ga.feedforward(inputs, outputs, 0, False)
print("Feedforward outputs:", outputs)
# serialize the model
mlp_ga.Serialize("model_float.hd5")
print("Model serialized.")
# deserialize the model
mlp_ga.Deserialize("model_float.hd5")
print("Model deserialized.")
# Run feedforward again to verify deserialization
mlp_ga.feedforward(inputs, outputs, 0, False)
print("Feedforward outputs after deserialization:", outputs)
def test_ann_mlp_ga_double():
# Create an instance of the ANN_MLP_GA_double class
mlp_ga = cpp_nn_py.ANN_MLP_GA_double([3, 4, 2], 4041, 130, 3, 1, False)
# set the network name
mlp_ga.SetName("double_test")
print("Network name set to 'double_test'")
# set mixed mode
mlp_ga.SetMixed(True)
print("Mixed mode set:", mlp_ga.GetMixed())
# create the population
mlp_ga.CreatePopulation(True)
# feedforward example (random inputs for illustration)
inputs = np.array([0.6, 0.88, 0.86], dtype=np.float64)
outputs = np.zeros(2, dtype=np.float64)
mlp_ga.feedforward(inputs, outputs, 0, False)
print("Feedforward outputs:", outputs)
# Serialize the model
mlp_ga.Serialize("model_double.hd5")
print("Model serialized.")
# Deserialize the model
mlp_ga.Deserialize("model_double.hd5")
print("Model deserialized.")
# Run feedforward again to verify deserialization
mlp_ga.feedforward(inputs, outputs, 0, False)
print("Feedforward outputs after deserialization:", outputs)
if __name__ == "__main__":
test_ann_mlp_ga()
test_ann_mlp_ga_double()
'''
/*************************/
/* update_locales.py */
/* Version 1.0 */
/* 2024/01/18 */
/*************************/
'''
import argparse
import json
import logging
from openai import OpenAI
import os
import re
import subprocess
from typing import Dict
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(handler)
def get_openai_token() -> str:
# GET API TOKEN (e.g. ENV variable)
return API_TOKEN
def read_input_file(source_file: str, ext: str) -> str:
# Get the file extension
if ext == "json":
with open(source_file, "r", encoding="utf-8") as f:
data = json.load(f)
logger.info("Successfully loaded JSON file")
# Dump JSON as a string for the translation prompt
return json.dumps(data, ensure_ascii=False, indent=2)
elif ext == "html":
with open(source_file, "r", encoding="utf-8") as f:
data = f.read()
logger.info("Successfully loaded HTML file")
return data
raise
elif ext == "md":
with open(source_file, "r", encoding="utf-8") as f:
data = f.read()
logger.info("Successfully loaded Markdown file")
return data
else:
raise ValueError("Unsupported file type. Only .json, .html,"
" or .md are accepted.")
def get_output_file(lang: str, ext: str) -> str:
# Directory should be customized
OUTPUTDIR=OUTPUTDIR
absolute_output_dir = os.path.abspath(OUTPUTDIR)
output_path = os.path.join(absolute_output_dir, f"{lang}.{ext}")
return output_path
def clean_json_response(response: str) -> str:
"""
Clean potential markdown formatting from the JSON response.
"""
# Remove potential markdown code block formatting
json_pattern = r"```(?:json)?\s*([\s\S]*?)```"
match = re.search(json_pattern, response)
if match:
response = match.group(1)
return response.strip()
def prepare_prompt(content, content_type, target_lang):
# Adjust system prompt based on content type
system_prompt = "You are a professional language translator, "\
"specializing in technical and business content localization for "\
f"{target_lang}.\n"\
"Your task is to translate text accurately, adapting to the "\
"context and tone.\n"
if content_type.lower() == "json":
system_prompt += " Return ONLY the translated JSON without any "\
"markdown formatting or explanation."
elif content_type.lower() in ["html", "markdown"]:
system_prompt += f" Return ONLY the translated content as proper "\
f"{content_type} without any additional text or explanation."
# Adjust user prompt based on the content type
user_prompt = f"Translate the following {content_type} content from "\
f"English to {target_lang}. Ensure formatting remains intact.\n"\
f"Content to translate:\n{content}"
return system_prompt, user_prompt
def generate_translation(translations: str, content_type: str,
target_lang: str, model: str,
API_TOKEN: str) -> Dict:
client = OpenAI(api_key=API_TOKEN)
system_prompt, user_prompt = prepare_prompt(
translations, content_type, target_lang)
try:
logger.info(f"Using model: {model} for translation")
response = client.chat.completions.create(
model=model,
messages=[{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}],
# Lower temperature for more consistent translations
**({"temperature": 0.3} if model != 'o1' else {}))
# Get the response content and clean it
translation_text = response.choices[0].message.content
# Parse the cleaned JSON text
if content_type.lower() == "json":
cleaned_json_text = clean_json_response(translation_text)
try:
cleaned_data = json.loads(cleaned_json_text)
return json.dumps(cleaned_data, ensure_ascii=False, indent=2)
except json.JSONDecodeError as e:
logger.error(f"Failed to parse JSON response: {e}")
logger.debug(f"Problematic JSON content: {cleaned_json_text}")
raise
else:
return translation_text
except Exception as e:
logger.error(f"Translation generation failed: {e}")
raise
def main():
parser = argparse.ArgumentParser(
description='Generate translations for multiple languages')
parser.add_argument('source', help='Source file')
parser.add_argument('target_langs',
help='Comma-separated list of '
'target languages (e.g., it,ja)')
parser.add_argument('--dump_json', action='store_true',
help='Dump output as JSON')
default = 'gpt-4o-mini'
parser.add_argument('--model',
choices=['gpt-4o-mini', 'chatgpt-4o-latest',
'o1-mini', 'o1'], default=default,
help=f'Model to use (default: {default})')
args = parser.parse_args()
try:
content_type = os.path.splitext(args.source)[1].lower().lstrip(".")
content = read_input_file(args.source, content_type)
except Exception as e:
logger.error(f"Failed to load {args.source}: {e}")
raise
# Parse target languages from command line argument
target_languages = args.target_langs.split(',')
# Generate translations for each target language
for lang in target_languages:
try:
logger.info(f"Starting translation for {lang}")
translations = generate_translation(
content, content_type, lang,
args.model, get_openai_token())
# Save translations to file
output_path = get_output_file(lang, content_type)
with open(output_path, "w", encoding='utf-8') as f:
f.write(translations)
if content_type in {"html", "md"} and args.dump_json:
# output the input contents as valid json
json_output_path = output_path + ".en.json"
json_content = {"content_type": content_type,
"content": content}
with open(json_output_path, "w", encoding="utf-8") as json_f:
json.dump(json_content, json_f,
ensure_ascii=False, indent=2)
# output the translated contents as valid json
json_output_path = output_path + ".json"
json_content = {"content_type": content_type,
"content": translations}
with open(json_output_path, "w", encoding="utf-8") as json_f:
json.dump(json_content, json_f,
ensure_ascii=False, indent=2)
logger.info(f"Successfully generated translations for {lang}")
except Exception as e:
logger.error(f"Failed to process translations for {lang}: {e}")
if __name__ == "__main__":
main()