Models
GitHub
Discord
Turbo
Sign in
Download
Models
Download
GitHub
Discord
Sign in
Hudson
/
pythia
:70m-q8_0
194
Downloads
Updated
11 months ago
A set of EleutherAI models trained on the Pile.
A set of EleutherAI models trained on the Pile.
Cancel
pythia:70m-q8_0
...
/
model
bfb20e9a71e7 · 77MB
Metadata
general.architecture
gptneox
gptneox
general.file_type
Q8_0
Q8_0
gptneox.attention.head_count
8
8
gptneox.attention.layer_norm_epsilon
1e-05
1e-05
gptneox.block_count
6
6
gptneox.context_length
2048
2048
gptneox.embedding_length
512
512
gptneox.feed_forward_length
2048
2048
gptneox.rope.dimension_count
16
16
gptneox.use_parallel_residual
true
true
tokenizer.ggml.bos_token_id
0
0
tokenizer.ggml.eos_token_id
0
0
tokenizer.ggml.merges
[Ġ Ġ, Ġ t, Ġ a, h e, i n, ...]
[Ġ Ġ, Ġ t, Ġ a, h e, i n, ...]
tokenizer.ggml.model
gpt2
gpt2
tokenizer.ggml.pre
olmo
olmo
tokenizer.ggml.token_type
[3, 3, 1, 1, 1, ...]
[3, 3, 1, 1, 1, ...]
tokenizer.ggml.tokens
[<|endoftext|>, <|padding|>, !, ", #, ...]
[<|endoftext|>, <|padding|>, !, ", #, ...]
tokenizer.ggml.unknown_token_id
0
0
mradermacher.convert_type
hf
hf
mradermacher.quantize_version
2
2
mradermacher.quantized_at
2024-09-08T03:57:23+02:00
2024-09-08T03:57:23+02:00
mradermacher.quantized_by
mradermacher
mradermacher
mradermacher.quantized_on
backup1
backup1
Tensor
Name
Type
Shape
token_embd.weight
Q8_0
Q8_0
[512, 50304]
blk.0
blk.0.attn_norm.bias
F32
F32
[512]
blk.0.attn_norm.weight
F32
F32
[512]
blk.0.attn_output.bias
F32
F32
[512]
blk.0.attn_output.weight
Q8_0
Q8_0
[512, 512]
blk.0.attn_qkv.bias
F32
F32
[1536]
blk.0.attn_qkv.weight
Q8_0
Q8_0
[512, 1536]
blk.0.ffn_down.bias
F32
F32
[512]
blk.0.ffn_down.weight
Q8_0
Q8_0
[2048, 512]
blk.0.ffn_norm.bias
F32
F32
[512]
blk.0.ffn_norm.weight
F32
F32
[512]
blk.0.ffn_up.bias
F32
F32
[2048]
blk.0.ffn_up.weight
Q8_0
Q8_0
[512, 2048]
blk.1
blk.1.attn_norm.bias
F32
F32
[512]
blk.1.attn_norm.weight
F32
F32
[512]
blk.1.attn_output.bias
F32
F32
[512]
blk.1.attn_output.weight
Q8_0
Q8_0
[512, 512]
blk.1.attn_qkv.bias
F32
F32
[1536]
blk.1.attn_qkv.weight
Q8_0
Q8_0
[512, 1536]
blk.1.ffn_down.bias
F32
F32
[512]
blk.1.ffn_down.weight
Q8_0
Q8_0
[2048, 512]
blk.1.ffn_norm.bias
F32
F32
[512]
blk.1.ffn_norm.weight
F32
F32
[512]
blk.1.ffn_up.bias
F32
F32
[2048]
blk.1.ffn_up.weight
Q8_0
Q8_0
[512, 2048]
blk.2
blk.2.attn_norm.bias
F32
F32
[512]
blk.2.attn_norm.weight
F32
F32
[512]
blk.2.attn_output.bias
F32
F32
[512]
blk.2.attn_output.weight
Q8_0
Q8_0
[512, 512]
blk.2.attn_qkv.bias
F32
F32
[1536]
blk.2.attn_qkv.weight
Q8_0
Q8_0
[512, 1536]
blk.2.ffn_down.bias
F32
F32
[512]
blk.2.ffn_down.weight
Q8_0
Q8_0
[2048, 512]
blk.2.ffn_norm.bias
F32
F32
[512]
blk.2.ffn_norm.weight
F32
F32
[512]
blk.2.ffn_up.bias
F32
F32
[2048]
blk.2.ffn_up.weight
Q8_0
Q8_0
[512, 2048]
blk.3
blk.3.attn_norm.bias
F32
F32
[512]
blk.3.attn_norm.weight
F32
F32
[512]
blk.3.attn_output.bias
F32
F32
[512]
blk.3.attn_output.weight
Q8_0
Q8_0
[512, 512]
blk.3.attn_qkv.bias
F32
F32
[1536]
blk.3.attn_qkv.weight
Q8_0
Q8_0
[512, 1536]
blk.3.ffn_down.bias
F32
F32
[512]
blk.3.ffn_down.weight
Q8_0
Q8_0
[2048, 512]
blk.3.ffn_norm.bias
F32
F32
[512]
blk.3.ffn_norm.weight
F32
F32
[512]
blk.3.ffn_up.bias
F32
F32
[2048]
blk.3.ffn_up.weight
Q8_0
Q8_0
[512, 2048]
blk.4
blk.4.attn_norm.bias
F32
F32
[512]
blk.4.attn_norm.weight
F32
F32
[512]
blk.4.attn_output.bias
F32
F32
[512]
blk.4.attn_output.weight
Q8_0
Q8_0
[512, 512]
blk.4.attn_qkv.bias
F32
F32
[1536]
blk.4.attn_qkv.weight
Q8_0
Q8_0
[512, 1536]
blk.4.ffn_down.bias
F32
F32
[512]
blk.4.ffn_down.weight
Q8_0
Q8_0
[2048, 512]
blk.4.ffn_norm.bias
F32
F32
[512]
blk.4.ffn_norm.weight
F32
F32
[512]
blk.4.ffn_up.bias
F32
F32
[2048]
blk.4.ffn_up.weight
Q8_0
Q8_0
[512, 2048]
blk.5
blk.5.attn_norm.bias
F32
F32
[512]
blk.5.attn_norm.weight
F32
F32
[512]
blk.5.attn_output.bias
F32
F32
[512]
blk.5.attn_output.weight
Q8_0
Q8_0
[512, 512]
blk.5.attn_qkv.bias
F32
F32
[1536]
blk.5.attn_qkv.weight
Q8_0
Q8_0
[512, 1536]
blk.5.ffn_down.bias
F32
F32
[512]
blk.5.ffn_down.weight
Q8_0
Q8_0
[2048, 512]
blk.5.ffn_norm.bias
F32
F32
[512]
blk.5.ffn_norm.weight
F32
F32
[512]
blk.5.ffn_up.bias
F32
F32
[2048]
blk.5.ffn_up.weight
Q8_0
Q8_0
[512, 2048]
output.weight
Q8_0
Q8_0
[512, 50304]
output_norm.bias
F32
F32
[512]
output_norm.weight
F32
F32
[512]