diff --git a/catalog.json b/catalog.json index a848bfe..85fa4df 100644 --- a/catalog.json +++ b/catalog.json @@ -917,5 +917,47 @@ } ] } + }, + { + "_descriptorVersion": "0.0.1", + "datePublished": "2024-07-23T21:29:44.000Z", + "name": "Llama 3.1 8B Instruct", + "description": "Llama 3.1 is a dense Transformer with 8B, 70B, or 405B parameters and a context window of up to 128K tokens trained by Meta.", + "author": { + "name": "Meta AI", + "url": "https://ai.meta.com", + "blurb": "Pushing the boundaries of AI through research, infrastructure and product innovation." + }, + "numParameters": "8B", + "resources": { + "canonicalUrl": "https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct", + "paperUrl": "https://ai.meta.com/research/publications/the-llama-3-herd-of-models/", + "downloadUrl": "https://huggingface.co/lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF" + }, + "trainedFor": "chat", + "arch": "llama", + "files": { + "highlighted": { + "economical": { + "name": "Meta-Llama-3-8B-Instruct-Q4_K_M.gguf" + } + }, + "all": [ + { + "name": "Meta-Llama-3-8B-Instruct-Q4_K_M.gguf", + "url": "https://huggingface.co/lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q4_K_M.gguf", + "sizeBytes": 4920734656, + "quantization": "Q4_K_M", + "format": "gguf", + "sha256checksum": "2a4ca64e02e7126436cfdb066dd7311f2486eb487191910d3d000fde13826a4d", + "publisher": { + "name": "lmstudio-community", + "socialUrl": "https://huggingface.co/lmstudio-community" + }, + "respository": "lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF", + "repositoryUrl": "https://huggingface.co/lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF" + } + ] + } } ] \ No newline at end of file