Skip to content

Commit

Permalink
Merge pull request #4 from guardrails-ai/fix-pyproject-toml
Browse files Browse the repository at this point in the history
Fix pyproject.toml
  • Loading branch information
dtam authored Aug 29, 2024
2 parents a4225ee + 966c5d4 commit cd19305
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 8 deletions.
3 changes: 1 addition & 2 deletions inference/download_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,7 @@
)
def download_model(model_name, model_revision, force_download=False):
from huggingface_hub import snapshot_download
from transformers import AutoTokenizer, AutoModelForCausalLM
import json
from transformers import AutoTokenizer
import os

volume.reload()
Expand Down
3 changes: 0 additions & 3 deletions inference/serving-non-optimized-fastapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,9 +53,6 @@ def load(self):

@modal.method()
def generate(self, chat):
import torch
from torch.nn.functional import softmax

tokenizer = self.tokenizer
model = self.model

Expand Down
8 changes: 5 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[project]
name = "llamaguard-7b
name = "llamaguard-7b"
version = "0.0.1"
description = "Template repo for Guardrails Hub validators."
authors = [
Expand Down Expand Up @@ -29,5 +29,7 @@ testpaths = [
[tool.pyright]
include = ["validator"]

[tool.setuptools.packages.find]
include = ["validator", "inference", "validator.*", "inference.*"]
[tool.setuptools]
packages = [
"validator"
]

0 comments on commit cd19305

Please sign in to comment.