diff --git a/.github/workflows/black.yml b/.github/workflows/black.yml index df65bb5..e9a96b3 100644 --- a/.github/workflows/black.yml +++ b/.github/workflows/black.yml @@ -1,33 +1,10 @@ -# This is a basic workflow to help you get started with Actions +name: Lint -name: PyLinter-Black +on: [pull_request] -# Controls when the workflow will run -on: - # Triggers the workflow on push or pull request events but only for the main branch - push: - branches: [main, DEV] - pull_request: - branches: [main, DEV] - - # Allows you to run this workflow manually from the Actions tab - workflow_dispatch: - -# A workflow run is made up of one or more jobs that can run sequentially or in parallel jobs: - # This workflow contains a single job called "build" - build: - # The type of runner that the job will run on + lint: runs-on: ubuntu-latest - - # Steps represent a sequence of tasks that will be executed as part of the job steps: - # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - - uses: actions/checkout@v3 - - - name: Black Code Formatter - # You may pin to the exact commit or the version. - # uses: lgeiger/black-action@4379f39aa4b6a3bb1cceb46a7665b9c26647d82d - uses: lgeiger/black-action@v1.0.1 - with: - args: ". --check" + - uses: actions/checkout@v2 + - uses: psf/black@stable \ No newline at end of file diff --git a/.github/workflows/isort.yml b/.github/workflows/isort.yml new file mode 100644 index 0000000..7d6ce4f --- /dev/null +++ b/.github/workflows/isort.yml @@ -0,0 +1,13 @@ +name: Run isort +on: + - push + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: isort/isort-action@v1.0.0 + with: + requirements-files: "requirements.txt requirements-test.txt" + configuration: "--check-only --diff --profile black" \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..eb4cf24 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,20 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: trailing-whitespace + - id: check-yaml + - id: check-json +- repo: https://github.com/psf/black + rev: 22.12.0 + hooks: + - id: black + language_version: python3.9 +- repo: https://github.com/pycqa/isort + rev: 5.12.0 + hooks: + - id: isort + name: isort (python) + args: ["--profile", "black", "--filter-files"] \ No newline at end of file diff --git a/Pipfile b/Pipfile index 5db5b39..8390d39 100644 --- a/Pipfile +++ b/Pipfile @@ -13,14 +13,12 @@ scikit-learn = "*" torchvision = "*" pandas = "*" psycopg2-binary = "*" -<<<<<<< HEAD aiofiles = "*" -======= minio = "*" ipywidgets = "*" ->>>>>>> 2a36b7d23d3e3f03ad041b5f0381232a0aff7e99 [dev-packages] +pre-commit = "*" [requires] python_version = "3.9" diff --git a/README.md b/README.md index e7f174b..130082e 100644 Binary files a/README.md and b/README.md differ diff --git a/backend/PaintTransformer/inference.py b/backend/PaintTransformer/inference.py index 9127a50..6e48acb 100644 --- a/backend/PaintTransformer/inference.py +++ b/backend/PaintTransformer/inference.py @@ -1,13 +1,14 @@ -import numpy as np import math -import torch.nn.functional as F +import os + +import numpy as np import torch +import torch.nn.functional as F from PIL import Image -import os +from .utils.miscellaneous import * from .utils.morphology import * from .utils.network import * -from .utils.miscellaneous import * def get_path_from_current_file(path): diff --git a/backend/PaintTransformer/inference_only_final.py b/backend/PaintTransformer/inference_only_final.py index 58d4d9d..11fef0b 100644 --- a/backend/PaintTransformer/inference_only_final.py +++ b/backend/PaintTransformer/inference_only_final.py @@ -1,23 +1,25 @@ import math import os +from argparse import ArgumentParser import numpy as np import PaintTransformer.morphology as morphology import PaintTransformer.network as network +import PIL import torch import torch.nn.functional as F from PIL import Image -import PIL -from argparse import ArgumentParser idx = 0 + def save_img(img, output_path): result = Image.fromarray( (img.data.cpu().numpy().transpose((1, 2, 0)) * 255).astype(np.uint8) ) result.save(output_path) + def tensor_to_pil_image(image_tensor): pil_image = Image.fromarray( (image_tensor.data.cpu().numpy().transpose((1, 2, 0)) * 255).astype(np.uint8) @@ -562,21 +564,26 @@ def partial_render(this_canvas, patch_coord_y, patch_coord_x): return cur_canvas -def read_img(img, img_type='RGB', l=None): +def read_img(img, img_type="RGB", l=None): img = img.convert(img_type) if l is not None: original_w, original_h = img.size if original_w > original_h: - img = img.resize((l, int(l/original_w*original_h)), resample=Image.NEAREST) + img = img.resize( + (l, int(l / original_w * original_h)), resample=Image.NEAREST + ) else: - img = img.resize((int(l/original_h*original_w), l), resample=Image.NEAREST) + img = img.resize( + (int(l / original_h * original_w), l), resample=Image.NEAREST + ) img = np.array(img) if img.ndim == 2: img = np.expand_dims(img, axis=-1) img = img.transpose((2, 0, 1)) - img = torch.from_numpy(img).unsqueeze(0).float() / 255. + img = torch.from_numpy(img).unsqueeze(0).float() / 255.0 return img + def read_img_file(img_path, img_type="RGB", h=None, w=None): img = Image.open(img_path).convert(img_type) if h is not None and w is not None: @@ -627,9 +634,10 @@ def crop(img, h, w): def init(stroke_num: int, model_path: str): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = prepare_infer_model(model_path, stroke_num, device) - meta_brushes = make_meta_brushes(device, mode="small") + meta_brushes = make_meta_brushes(device, mode="small") return model, meta_brushes, device + def inference( model, device, @@ -637,17 +645,17 @@ def inference( image, stroke_num: int, patch_size: int, - K: int=None, - resize_l: int=None, - serial: bool=False, + K: int = None, + resize_l: int = None, + serial: bool = False, ): print(image) with torch.no_grad(): - frame_dir ='/output' + frame_dir = "/output" frame_list = [] original_img = read_img(image, "RGB", resize_l).to(device) original_h, original_w = original_img.shape[-2:] - if K==None: + if K == None: K = max(math.ceil(math.log2(max(original_h, original_w) / patch_size)), 0) original_img_pad_size = patch_size * (2**K) original_img_pad = pad( @@ -705,7 +713,7 @@ def inference( decision=decision, meta_brushes=meta_brushes, cur_canvas=final_result, - frame_dir='/output', + frame_dir="/output", has_border=False, original_h=original_h, original_w=original_w, @@ -823,8 +831,12 @@ def make_meta_brushes(device: torch, mode: str = "large"): Returns: torch: meta_brushes """ - brush_L_vertical = read_img_file(f"PaintTransformer/brush/brush_{mode}_vertical.png", "L") - brush_L_horizontal = read_img_file(f"PaintTransformer/brush/brush_{mode}_horizontal.png", "L") + brush_L_vertical = read_img_file( + f"PaintTransformer/brush/brush_{mode}_vertical.png", "L" + ) + brush_L_horizontal = read_img_file( + f"PaintTransformer/brush/brush_{mode}_horizontal.png", "L" + ) return torch.cat([brush_L_vertical, brush_L_horizontal], dim=0).to(device) @@ -868,19 +880,26 @@ def make_path(input_path, output_dir): parser.add_argument("--input", dest="input", type=str, default="iu") args = parser.parse_args() + output_dir_name = ( + os.path.splitext(args.input)[0] + + "_resize_" + + "{:04d}".format(args.resize_l) + + "_K_" + + "{:01d}".format(args.K) + ) - output_dir_name = os.path.splitext(args.input)[0] + '_resize_' + '{:04d}'.format(args.resize_l) + '_K_' + '{:01d}'.format(args.K) - - output_dir_root = os.path.join('output', output_dir_name) + output_dir_root = os.path.join("output", output_dir_name) - input_path = os.path.join('input', args.input) + input_path = os.path.join("input", args.input) resize_l = args.resize_l K = args.K stroke_num = 8 patch_size = 32 - model, meta_brushes, device = init(stroke_num, model_path="PaintTransformer/model.pth") + model, meta_brushes, device = init( + stroke_num, model_path="PaintTransformer/model.pth" + ) inference( model, @@ -889,7 +908,7 @@ def make_path(input_path, output_dir): stroke_num=stroke_num, patch_size=32, K=K, - need_animation=True, # whether need intermediate results for animation. - resize_l=resize_l, # resize original input to this size. (max(w, h) = resize_l) - serial=True, # if need animation, serial must be True. + need_animation=True, # whether need intermediate results for animation. + resize_l=resize_l, # resize original input to this size. (max(w, h) = resize_l) + serial=True, # if need animation, serial must be True. ) diff --git a/backend/PaintTransformer/utils/miscellaneous.py b/backend/PaintTransformer/utils/miscellaneous.py index b8469f6..c9ad743 100644 --- a/backend/PaintTransformer/utils/miscellaneous.py +++ b/backend/PaintTransformer/utils/miscellaneous.py @@ -1,7 +1,8 @@ import os -from PIL import Image -import torch + import numpy as np +import torch +from PIL import Image def set_image(img, img_type="RGB", l=None): diff --git a/backend/api/api_v1/game/game.py b/backend/api/api_v1/game/game.py index fe94494..716663b 100644 --- a/backend/api/api_v1/game/game.py +++ b/backend/api/api_v1/game/game.py @@ -1,6 +1,5 @@ from fastapi import APIRouter, HTTPException -from fastapi.responses import StreamingResponse, FileResponse - +from fastapi.responses import FileResponse, StreamingResponse router = APIRouter() diff --git a/backend/api/api_v1/inference/inference.py b/backend/api/api_v1/inference/inference.py index 54de7cd..bbcd1ef 100644 --- a/backend/api/api_v1/inference/inference.py +++ b/backend/api/api_v1/inference/inference.py @@ -1,12 +1,18 @@ -import aiofiles from io import BytesIO -from PIL import Image -from utils import predict_by_img, from_image_to_bytes, from_image_to_str, save_img -from fastapi import APIRouter, HTTPException, BackgroundTasks -from fastapi import FastAPI, File, UploadFile, Response +import aiofiles +from fastapi import ( + APIRouter, + BackgroundTasks, + FastAPI, + File, + HTTPException, + Response, + UploadFile, +) from fastapi.responses import FileResponse - +from PIL import Image +from utils import from_image_to_bytes, from_image_to_str, predict_by_img, save_img router = APIRouter() diff --git a/backend/app_only_final.py b/backend/app_only_final.py index ac465cd..755d22b 100644 --- a/backend/app_only_final.py +++ b/backend/app_only_final.py @@ -1,12 +1,11 @@ # app.py -from fastapi import FastAPI -from fastapi import File -from fastapi import FastAPI -from PaintTransformer.inference_only_final import init, inference -from PIL import Image import base64 import io +from fastapi import FastAPI, File +from PaintTransformer.inference_only_final import inference, init +from PIL import Image + # Create a FastAPI instance app = FastAPI() diff --git a/backend/batch_inference_process/inference_only_final_process.py b/backend/batch_inference_process/inference_only_final_process.py index 5c38398..8952c81 100644 --- a/backend/batch_inference_process/inference_only_final_process.py +++ b/backend/batch_inference_process/inference_only_final_process.py @@ -1,8 +1,9 @@ -from PIL import Image -import requests import base64 import io +import requests +from PIL import Image + def from_image_to_bytes(img): """ diff --git a/backend/inference_main.py b/backend/inference_main.py index fa8c010..7fb5796 100644 --- a/backend/inference_main.py +++ b/backend/inference_main.py @@ -1,10 +1,9 @@ +import os import sys from PaintTransformer.inference import * from PIL import Image -import os - resize_l = 1024 K = 5 stroke_num = 8 diff --git a/backend/main.py b/backend/main.py index 8b806ef..25c651a 100644 --- a/backend/main.py +++ b/backend/main.py @@ -1,10 +1,8 @@ # main.py +from api.api_v1 import api_router from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware -from api.api_v1 import api_router - - app = FastAPI( docs_url="/api/docs", redoc_url="/api/redoc", openapi_url="/api/openapi.json" ) diff --git a/backend/storage_minio/dummy_data_to_storage.py b/backend/storage_minio/dummy_data_to_storage.py index ef3e0d2..9fee6fb 100644 --- a/backend/storage_minio/dummy_data_to_storage.py +++ b/backend/storage_minio/dummy_data_to_storage.py @@ -1,6 +1,7 @@ -from minio import Minio -from glob import glob import os +from glob import glob + +from minio import Minio BUCKET_NAME = "savepaint-bucket" diff --git a/backend/utils.py b/backend/utils.py index 4fe687a..181d391 100644 --- a/backend/utils.py +++ b/backend/utils.py @@ -1,16 +1,15 @@ +import base64 +import io import os -import cv2 -import time import random -import base64 +import time + import aiofiles -import io -from PIL import Image +import cv2 import numpy as np - -from PaintTransformer.inference import init, inference +from PaintTransformer.inference import inference, init from PaintTransformer.inference_only_final import inference as inference_by_img - +from PIL import Image model_path = "PaintTransformer/model.pth" # main.py 기준으로 경로 설정해야 함 diff --git a/database/docker-compose.yaml b/database/docker-compose.yaml index 4eef52d..96334ed 100644 --- a/database/docker-compose.yaml +++ b/database/docker-compose.yaml @@ -12,7 +12,7 @@ services: POSTGRES_DB: mydatabase volumes: - "./init/:/docker-entrypoint-initdb.d/" - + # data-info-to-db: # build: # context: . diff --git a/database/dummy_dataset_to_db.py b/database/dummy_dataset_to_db.py index e763c61..a566a6c 100644 --- a/database/dummy_dataset_to_db.py +++ b/database/dummy_dataset_to_db.py @@ -1,9 +1,10 @@ +import os import time -import psycopg2 from argparse import ArgumentParser from glob import glob -import os + import pandas as pd +import psycopg2 from PIL import Image diff --git a/vue-project/src/components/ExampleView.vue b/vue-project/src/components/ExampleView.vue index 18f203e..f049b67 100644 --- a/vue-project/src/components/ExampleView.vue +++ b/vue-project/src/components/ExampleView.vue @@ -1,7 +1,7 @@ diff --git a/vue-project/src/views/BackUp.vue b/vue-project/src/views/BackUp.vue index 4f29f58..b6ba9fe 100644 --- a/vue-project/src/views/BackUp.vue +++ b/vue-project/src/views/BackUp.vue @@ -87,14 +87,14 @@ game_status: 시작 전 0 각 문제 마다 +1(총 9) -> 게임이 끝날 때 10 this.paintImg=response['paint_img'] this.answer=response['answer'] this.result=response['result'] - + this.$store.commit('setOrigin',response['origin_img']) this.$store.commit('setPaint',response['paint_img']) this.$store.commit('setAnswer',response['answer']) this.$store.commit('setResult',response['result']) - + }, - + enter(){ if (this.text===this.answer[this.game_status-1]){ this.text='' @@ -110,7 +110,7 @@ game_status: 시작 전 0 각 문제 마다 +1(총 9) -> 게임이 끝날 때 10 this.game_status+=1 // this.getGame() }, - + } } diff --git a/vue-project/src/views/GameView.vue b/vue-project/src/views/GameView.vue index c111822..0ac333f 100644 --- a/vue-project/src/views/GameView.vue +++ b/vue-project/src/views/GameView.vue @@ -100,14 +100,14 @@ game_status: 시작 전 0 각 문제 마다 +1(총 9) -> 게임이 끝날 때 10 this.paintImg=response['paint_img'] this.answer=response['answer'] this.result=response['result'] - + this.$store.commit('setOrigin',response['origin_img']) this.$store.commit('setPaint',response['paint_img']) this.$store.commit('setAnswer',response['answer']) this.$store.commit('setResult',response['result']) - + }, - + enter(){ if (this.text===this.answer[this.game_status-1]){ this.text='' @@ -125,7 +125,7 @@ game_status: 시작 전 0 각 문제 마다 +1(총 9) -> 게임이 끝날 때 10 this.img_timer.value=100 this.total_timer.value=100 }, - + } } diff --git a/vue-project/src/views/SelectView.vue b/vue-project/src/views/SelectView.vue index c3141af..be2d3f2 100644 --- a/vue-project/src/views/SelectView.vue +++ b/vue-project/src/views/SelectView.vue @@ -28,7 +28,7 @@ Game start - + @@ -37,7 +37,7 @@