Skip to content
This repository has been archived by the owner on Aug 30, 2024. It is now read-only.

Commit

Permalink
update ci
Browse files Browse the repository at this point in the history
Signed-off-by: intellinjun <jun.lin@intel.com>
  • Loading branch information
intellinjun committed May 29, 2024
1 parent 235cd03 commit 03a2ab5
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 1 deletion.
1 change: 1 addition & 0 deletions neural_speed/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,7 @@ def init(self,

def init_from_bin(self, model_type, model_path, **generate_kwargs):
if self.module is None:
model_type = model_maps.get(model_type, model_type)
self.module = _import_package(model_type)
self.model = self.module.Model()
if model_type=="whisper":
Expand Down
4 changes: 4 additions & 0 deletions tests/model-test/calculate_percentiles.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,10 @@ def parse_output_file_acc(file_path):
with open(file_path, 'r', encoding='UTF-8', errors='ignore') as file:
for line in file:
accuracy_match = re.search(r"\|\s+\|\s+\|none\s+\|\s+0\|acc\s+\|\d\.\d+\|\±\s+\|\d\.\d+\|", line)
if accuracy_match:
accuracy[0]=float(re.search(r"\d+\.\d+", accuracy_match.group()).group())*100
continue
accuracy_match = re.search(r"\|\s+\|\s+\|none\s+\|\s+0\|acc\s+\|\s+\d\.\d+\|\±\s+\|\d\.\d+\|", line)
if accuracy_match:
accuracy[0]=float(re.search(r"\d+\.\d+", accuracy_match.group()).group())*100
continue
Expand Down
4 changes: 3 additions & 1 deletion tests/model-test/cpp_graph_inference.sh
Original file line number Diff line number Diff line change
Expand Up @@ -468,8 +468,10 @@ function main() {
chmod 777 ${WORKSPACE}/${logs_file}
if [[ ${input} == "1024" && ${cores_per_instance} == "32" ]]; then
echo "-------- Accuracy start--------"
if [[ "${model}" == "llama"* || "${model}" == "gptj-6b" ]]; then
if [[ "${model}" == "llama"* || "${model}" == "gptj-6b" || "${model}" == "mistral-7b" ]]; then
OMP_NUM_THREADS=56 numactl -l -C 0-55 python ./scripts/cal_acc.py --model_name ${model_path} --init_from_bin ${model}-${precision}.bin --batch_size 8 --tasks lambada_openai 2>&1 | tee -a ${WORKSPACE}/${logs_file}
elif [[ "${model}" == *"gptq" ]]; then
OMP_NUM_THREADS=56 numactl -l -C 0-55 python ./scripts/cal_acc.py --model_name ${model_path} --use_gptq --tasks lambada_openai 2>&1 | tee -a ${WORKSPACE}/${logs_file}
else
OMP_NUM_THREADS=56 numactl -l -C 0-55 python ./scripts/cal_acc.py --model_name ${model_path} --init_from_bin ${model}-${precision}.bin --tasks lambada_openai --batch_size 1 2>&1 | tee -a ${WORKSPACE}/${logs_file}
fi
Expand Down

0 comments on commit 03a2ab5

Please sign in to comment.