Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feat/GitHub action #11

Open
wants to merge 17 commits into
base: DEV
Choose a base branch
from
33 changes: 5 additions & 28 deletions .github/workflows/black.yml
Original file line number Diff line number Diff line change
@@ -1,33 +1,10 @@
# This is a basic workflow to help you get started with Actions
name: Lint

name: PyLinter-Black
on: [pull_request]

# Controls when the workflow will run
on:
# Triggers the workflow on push or pull request events but only for the main branch
push:
branches: [main, DEV]
pull_request:
branches: [main, DEV]

# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:

# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
# This workflow contains a single job called "build"
build:
# The type of runner that the job will run on
lint:
runs-on: ubuntu-latest

# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v3

- name: Black Code Formatter
# You may pin to the exact commit or the version.
# uses: lgeiger/black-action@4379f39aa4b6a3bb1cceb46a7665b9c26647d82d
uses: lgeiger/black-action@v1.0.1
with:
args: ". --check"
- uses: actions/checkout@v2
- uses: psf/black@stable
13 changes: 13 additions & 0 deletions .github/workflows/isort.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
name: Run isort
on:
- push

jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: isort/isort-action@v1.0.0
with:
requirements-files: "requirements.txt requirements-test.txt"
configuration: "--check-only --diff --profile black"
20 changes: 20 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: trailing-whitespace
- id: check-yaml
- id: check-json
- repo: https://github.com/psf/black
rev: 22.12.0
hooks:
- id: black
language_version: python3.9
- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
- id: isort
name: isort (python)
args: ["--profile", "black", "--filter-files"]
4 changes: 1 addition & 3 deletions Pipfile
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,12 @@ scikit-learn = "*"
torchvision = "*"
pandas = "*"
psycopg2-binary = "*"
<<<<<<< HEAD
aiofiles = "*"
=======
minio = "*"
ipywidgets = "*"
>>>>>>> 2a36b7d23d3e3f03ad041b5f0381232a0aff7e99

[dev-packages]
pre-commit = "*"

[requires]
python_version = "3.9"
Binary file modified README.md
Binary file not shown.
9 changes: 5 additions & 4 deletions backend/PaintTransformer/inference.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
import numpy as np
import math
import torch.nn.functional as F
import os

import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
import os

from .utils.miscellaneous import *
from .utils.morphology import *
from .utils.network import *
from .utils.miscellaneous import *


def get_path_from_current_file(path):
Expand Down
65 changes: 42 additions & 23 deletions backend/PaintTransformer/inference_only_final.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,25 @@
import math
import os
from argparse import ArgumentParser

import numpy as np
import PaintTransformer.morphology as morphology
import PaintTransformer.network as network
import PIL
import torch
import torch.nn.functional as F
from PIL import Image
import PIL
from argparse import ArgumentParser

idx = 0


def save_img(img, output_path):
result = Image.fromarray(
(img.data.cpu().numpy().transpose((1, 2, 0)) * 255).astype(np.uint8)
)
result.save(output_path)


def tensor_to_pil_image(image_tensor):
pil_image = Image.fromarray(
(image_tensor.data.cpu().numpy().transpose((1, 2, 0)) * 255).astype(np.uint8)
Expand Down Expand Up @@ -562,21 +564,26 @@ def partial_render(this_canvas, patch_coord_y, patch_coord_x):
return cur_canvas


def read_img(img, img_type='RGB', l=None):
def read_img(img, img_type="RGB", l=None):
img = img.convert(img_type)
if l is not None:
original_w, original_h = img.size
if original_w > original_h:
img = img.resize((l, int(l/original_w*original_h)), resample=Image.NEAREST)
img = img.resize(
(l, int(l / original_w * original_h)), resample=Image.NEAREST
)
else:
img = img.resize((int(l/original_h*original_w), l), resample=Image.NEAREST)
img = img.resize(
(int(l / original_h * original_w), l), resample=Image.NEAREST
)
img = np.array(img)
if img.ndim == 2:
img = np.expand_dims(img, axis=-1)
img = img.transpose((2, 0, 1))
img = torch.from_numpy(img).unsqueeze(0).float() / 255.
img = torch.from_numpy(img).unsqueeze(0).float() / 255.0
return img


def read_img_file(img_path, img_type="RGB", h=None, w=None):
img = Image.open(img_path).convert(img_type)
if h is not None and w is not None:
Expand Down Expand Up @@ -627,27 +634,28 @@ def crop(img, h, w):
def init(stroke_num: int, model_path: str):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = prepare_infer_model(model_path, stroke_num, device)
meta_brushes = make_meta_brushes(device, mode="small")
meta_brushes = make_meta_brushes(device, mode="small")
return model, meta_brushes, device


def inference(
model,
device,
meta_brushes,
image,
stroke_num: int,
patch_size: int,
K: int=None,
resize_l: int=None,
serial: bool=False,
K: int = None,
resize_l: int = None,
serial: bool = False,
):
print(image)
with torch.no_grad():
frame_dir ='/output'
frame_dir = "/output"
frame_list = []
original_img = read_img(image, "RGB", resize_l).to(device)
original_h, original_w = original_img.shape[-2:]
if K==None:
if K == None:
K = max(math.ceil(math.log2(max(original_h, original_w) / patch_size)), 0)
original_img_pad_size = patch_size * (2**K)
original_img_pad = pad(
Expand Down Expand Up @@ -705,7 +713,7 @@ def inference(
decision=decision,
meta_brushes=meta_brushes,
cur_canvas=final_result,
frame_dir='/output',
frame_dir="/output",
has_border=False,
original_h=original_h,
original_w=original_w,
Expand Down Expand Up @@ -823,8 +831,12 @@ def make_meta_brushes(device: torch, mode: str = "large"):
Returns:
torch: meta_brushes
"""
brush_L_vertical = read_img_file(f"PaintTransformer/brush/brush_{mode}_vertical.png", "L")
brush_L_horizontal = read_img_file(f"PaintTransformer/brush/brush_{mode}_horizontal.png", "L")
brush_L_vertical = read_img_file(
f"PaintTransformer/brush/brush_{mode}_vertical.png", "L"
)
brush_L_horizontal = read_img_file(
f"PaintTransformer/brush/brush_{mode}_horizontal.png", "L"
)
return torch.cat([brush_L_vertical, brush_L_horizontal], dim=0).to(device)


Expand Down Expand Up @@ -868,19 +880,26 @@ def make_path(input_path, output_dir):
parser.add_argument("--input", dest="input", type=str, default="iu")
args = parser.parse_args()

output_dir_name = (
os.path.splitext(args.input)[0]
+ "_resize_"
+ "{:04d}".format(args.resize_l)
+ "_K_"
+ "{:01d}".format(args.K)
)

output_dir_name = os.path.splitext(args.input)[0] + '_resize_' + '{:04d}'.format(args.resize_l) + '_K_' + '{:01d}'.format(args.K)

output_dir_root = os.path.join('output', output_dir_name)
output_dir_root = os.path.join("output", output_dir_name)

input_path = os.path.join('input', args.input)
input_path = os.path.join("input", args.input)
resize_l = args.resize_l
K = args.K

stroke_num = 8
patch_size = 32

model, meta_brushes, device = init(stroke_num, model_path="PaintTransformer/model.pth")
model, meta_brushes, device = init(
stroke_num, model_path="PaintTransformer/model.pth"
)

inference(
model,
Expand All @@ -889,7 +908,7 @@ def make_path(input_path, output_dir):
stroke_num=stroke_num,
patch_size=32,
K=K,
need_animation=True, # whether need intermediate results for animation.
resize_l=resize_l, # resize original input to this size. (max(w, h) = resize_l)
serial=True, # if need animation, serial must be True.
need_animation=True, # whether need intermediate results for animation.
resize_l=resize_l, # resize original input to this size. (max(w, h) = resize_l)
serial=True, # if need animation, serial must be True.
)
5 changes: 3 additions & 2 deletions backend/PaintTransformer/utils/miscellaneous.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import os
from PIL import Image
import torch

import numpy as np
import torch
from PIL import Image


def set_image(img, img_type="RGB", l=None):
Expand Down
3 changes: 1 addition & 2 deletions backend/api/api_v1/game/game.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from fastapi import APIRouter, HTTPException
from fastapi.responses import StreamingResponse, FileResponse

from fastapi.responses import FileResponse, StreamingResponse

router = APIRouter()

Expand Down
18 changes: 12 additions & 6 deletions backend/api/api_v1/inference/inference.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,18 @@
import aiofiles
from io import BytesIO
from PIL import Image

from utils import predict_by_img, from_image_to_bytes, from_image_to_str, save_img
from fastapi import APIRouter, HTTPException, BackgroundTasks
from fastapi import FastAPI, File, UploadFile, Response
import aiofiles
from fastapi import (
APIRouter,
BackgroundTasks,
FastAPI,
File,
HTTPException,
Response,
UploadFile,
)
from fastapi.responses import FileResponse

from PIL import Image
from utils import from_image_to_bytes, from_image_to_str, predict_by_img, save_img

router = APIRouter()

Expand Down
9 changes: 4 additions & 5 deletions backend/app_only_final.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
# app.py
from fastapi import FastAPI
from fastapi import File
from fastapi import FastAPI
from PaintTransformer.inference_only_final import init, inference
from PIL import Image
import base64
import io

from fastapi import FastAPI, File
from PaintTransformer.inference_only_final import inference, init
from PIL import Image

# Create a FastAPI instance
app = FastAPI()

Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
from PIL import Image
import requests
import base64
import io

import requests
from PIL import Image


def from_image_to_bytes(img):
"""
Expand Down
3 changes: 1 addition & 2 deletions backend/inference_main.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
import os
import sys

from PaintTransformer.inference import *
from PIL import Image

import os

resize_l = 1024
K = 5
stroke_num = 8
Expand Down
4 changes: 1 addition & 3 deletions backend/main.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
# main.py
from api.api_v1 import api_router
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware

from api.api_v1 import api_router


app = FastAPI(
docs_url="/api/docs", redoc_url="/api/redoc", openapi_url="/api/openapi.json"
)
Expand Down
5 changes: 3 additions & 2 deletions backend/storage_minio/dummy_data_to_storage.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from minio import Minio
from glob import glob
import os
from glob import glob

from minio import Minio

BUCKET_NAME = "savepaint-bucket"

Expand Down
Loading