commit
aecddf273a
50
README.md
50
README.md
|
@ -1,6 +1,53 @@
|
|||
## PyTorch Implementation of [AnimeGANv2](https://github.com/TachibanaYoshino/AnimeGANv2)
|
||||
|
||||
|
||||
**Torch Hub Usage**
|
||||
|
||||
You can load Animegan v2 via `torch.hub`:
|
||||
|
||||
```python
|
||||
import torch
|
||||
model = torch.hub.load('bryandlee/animegan2-pytorch', 'generator').eval()
|
||||
# convert your image into tensor here
|
||||
out = model(img_tensor)
|
||||
```
|
||||
|
||||
You can load with various configs (more details in [the torch docs](https://pytorch.org/docs/stable/hub.html)):
|
||||
```python
|
||||
model = torch.hub.load(
|
||||
"bryandlee/animegan2-pytorch",
|
||||
"generator",
|
||||
pretrained=True, # or give URL to a pretrained model
|
||||
device="cuda", # or "cpu" if you don't have a GPU
|
||||
progress=True, # show progress
|
||||
)
|
||||
```
|
||||
|
||||
Currently, the following `pretrained` shorthands are available:
|
||||
```python
|
||||
model = torch.hub.load("bryandlee/animegan2-pytorch", "generator", pretrained="celeba_distill")
|
||||
model = torch.hub.load("bryandlee/animegan2-pytorch", "generator", pretrained="face_paint_512_v1")
|
||||
model = torch.hub.load("bryandlee/animegan2-pytorch", "generator", pretrained="face_paint_512_v2")
|
||||
model = torch.hub.load("bryandlee/animegan2-pytorch", "generator", pretrained="paprika")
|
||||
```
|
||||
|
||||
You can also load the `face2paint` util function. First, install dependencies:
|
||||
|
||||
```
|
||||
pip install torchvision Pillow numpy
|
||||
```
|
||||
|
||||
Then, import the function using `torch.hub`:
|
||||
```python
|
||||
face2paint = torch.hub.load(
|
||||
'bryandlee/animegan2-pytorch', 'face2paint',
|
||||
size=512, device="cpu"
|
||||
)
|
||||
|
||||
img = Image.open(...).convert("RGB")
|
||||
out = face2paint(model, img)
|
||||
```
|
||||
|
||||
**Updates**
|
||||
|
||||
* `2021-10-17` Add weights for [face portrait v2](https://github.com/bryandlee/animegan2-pytorch#additional-models)
|
||||
|
@ -9,7 +56,6 @@
|
|||
See demo: [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/akhaliq/AnimeGANv2)
|
||||
|
||||
|
||||
|
||||
**Weight Conversion from the Original Repo (Requires TensorFlow 1.x)**
|
||||
```
|
||||
git clone https://github.com/TachibanaYoshino/AnimeGANv2
|
||||
|
@ -86,5 +132,3 @@ Trained on <b>512x512</b> face images. Compared to v1, `🔻beautify` `🔺robus
|
|||
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
|
|
63
hubconf.py
Normal file
63
hubconf.py
Normal file
|
@ -0,0 +1,63 @@
|
|||
import torch
|
||||
|
||||
def generator(pretrained=True, device="cpu", progress=True, check_hash=True):
|
||||
from model import Generator
|
||||
|
||||
release_url = "https://github.com/bryandlee/animegan2-pytorch/raw/main/weights"
|
||||
known = {
|
||||
name: f"{release_url}/{name}.pt"
|
||||
for name in [
|
||||
'celeba_distill', 'face_paint_512_v1', 'face_paint_512_v2', 'paprika'
|
||||
]
|
||||
}
|
||||
|
||||
device = torch.device(device)
|
||||
model = Generator().to(device)
|
||||
|
||||
if type(pretrained) == str:
|
||||
# Look if a known name is passed, otherwise assume it's a URL
|
||||
ckpt_url = known.get(pretrained, pretrained)
|
||||
pretrained = True
|
||||
else:
|
||||
ckpt_url = known.get('face_paint_512_v2')
|
||||
|
||||
if pretrained is True:
|
||||
state_dict = torch.hub.load_state_dict_from_url(
|
||||
ckpt_url,
|
||||
map_location=device,
|
||||
progress=progress,
|
||||
check_hash=check_hash,
|
||||
)
|
||||
model.load_state_dict(state_dict)
|
||||
|
||||
return model
|
||||
|
||||
|
||||
def face2paint(device="cpu", size=512):
|
||||
from PIL import Image
|
||||
from torchvision.transforms.functional import to_tensor, to_pil_image
|
||||
|
||||
def face2paint(
|
||||
model: torch.nn.Module,
|
||||
img: Image.Image,
|
||||
size: int = size,
|
||||
side_by_side: bool = True,
|
||||
device: str = device,
|
||||
) -> Image.Image:
|
||||
w, h = img.size
|
||||
s = min(w, h)
|
||||
img = img.crop(((w - s) // 2, (h - s) // 2, (w + s) // 2, (h + s) // 2))
|
||||
img = img.resize((size, size), Image.LANCZOS)
|
||||
|
||||
with torch.no_grad():
|
||||
input = to_tensor(img).unsqueeze(0) * 2 - 1
|
||||
output = model(input.to(device)).cpu()[0]
|
||||
|
||||
if side_by_side:
|
||||
output = torch.cat([input[0], output], dim=2)
|
||||
|
||||
output = (output * 0.5 + 0.5).clip(0, 1)
|
||||
|
||||
return to_pil_image(output)
|
||||
|
||||
return face2paint
|
BIN
weights/celeba_distill.pt
Normal file
BIN
weights/celeba_distill.pt
Normal file
Binary file not shown.
BIN
weights/face_paint_512_v1.pt
Normal file
BIN
weights/face_paint_512_v1.pt
Normal file
Binary file not shown.
BIN
weights/face_paint_512_v2.pt
Normal file
BIN
weights/face_paint_512_v2.pt
Normal file
Binary file not shown.
BIN
weights/paprika.pt
Normal file
BIN
weights/paprika.pt
Normal file
Binary file not shown.
Loading…
Reference in a new issue