Models
GitHub
Discord
Turbo
Sign in
Download
Models
Download
GitHub
Discord
Sign in
Hudson
/
pythia
:14m-q8_0
194
Downloads
Updated
11 months ago
A set of EleutherAI models trained on the Pile.
A set of EleutherAI models trained on the Pile.
Cancel
pythia:14m-q8_0
...
/
model
d7824bd3f55f · 17MB
Metadata
general.architecture
gptneox
gptneox
general.file_type
Q8_0
Q8_0
gptneox.attention.head_count
4
4
gptneox.attention.layer_norm_epsilon
1e-05
1e-05
gptneox.block_count
6
6
gptneox.context_length
2048
2048
gptneox.embedding_length
128
128
gptneox.feed_forward_length
512
512
gptneox.rope.dimension_count
8
8
gptneox.use_parallel_residual
true
true
tokenizer.ggml.bos_token_id
0
0
tokenizer.ggml.eos_token_id
0
0
tokenizer.ggml.merges
[Ġ Ġ, Ġ t, Ġ a, h e, i n, ...]
[Ġ Ġ, Ġ t, Ġ a, h e, i n, ...]
tokenizer.ggml.model
gpt2
gpt2
tokenizer.ggml.pre
olmo
olmo
tokenizer.ggml.token_type
[3, 3, 1, 1, 1, ...]
[3, 3, 1, 1, 1, ...]
tokenizer.ggml.tokens
[<|endoftext|>, <|padding|>, !, ", #, ...]
[<|endoftext|>, <|padding|>, !, ", #, ...]
tokenizer.ggml.unknown_token_id
0
0
Tensor
Name
Type
Shape
token_embd.weight
Q8_0
Q8_0
[128, 50304]
blk.0
blk.0.attn_norm.bias
F32
F32
[128]
blk.0.attn_norm.weight
F32
F32
[128]
blk.0.attn_output.bias
F32
F32
[128]
blk.0.attn_output.weight
Q8_0
Q8_0
[128, 128]
blk.0.attn_qkv.bias
F32
F32
[384]
blk.0.attn_qkv.weight
Q8_0
Q8_0
[128, 384]
blk.0.ffn_down.bias
F32
F32
[128]
blk.0.ffn_down.weight
Q8_0
Q8_0
[512, 128]
blk.0.ffn_norm.bias
F32
F32
[128]
blk.0.ffn_norm.weight
F32
F32
[128]
blk.0.ffn_up.bias
F32
F32
[512]
blk.0.ffn_up.weight
Q8_0
Q8_0
[128, 512]
blk.1
blk.1.attn_norm.bias
F32
F32
[128]
blk.1.attn_norm.weight
F32
F32
[128]
blk.1.attn_output.bias
F32
F32
[128]
blk.1.attn_output.weight
Q8_0
Q8_0
[128, 128]
blk.1.attn_qkv.bias
F32
F32
[384]
blk.1.attn_qkv.weight
Q8_0
Q8_0
[128, 384]
blk.1.ffn_down.bias
F32
F32
[128]
blk.1.ffn_down.weight
Q8_0
Q8_0
[512, 128]
blk.1.ffn_norm.bias
F32
F32
[128]
blk.1.ffn_norm.weight
F32
F32
[128]
blk.1.ffn_up.bias
F32
F32
[512]
blk.1.ffn_up.weight
Q8_0
Q8_0
[128, 512]
blk.2
blk.2.attn_norm.bias
F32
F32
[128]
blk.2.attn_norm.weight
F32
F32
[128]
blk.2.attn_output.bias
F32
F32
[128]
blk.2.attn_output.weight
Q8_0
Q8_0
[128, 128]
blk.2.attn_qkv.bias
F32
F32
[384]
blk.2.attn_qkv.weight
Q8_0
Q8_0
[128, 384]
blk.2.ffn_down.bias
F32
F32
[128]
blk.2.ffn_down.weight
Q8_0
Q8_0
[512, 128]
blk.2.ffn_norm.bias
F32
F32
[128]
blk.2.ffn_norm.weight
F32
F32
[128]
blk.2.ffn_up.bias
F32
F32
[512]
blk.2.ffn_up.weight
Q8_0
Q8_0
[128, 512]
blk.3
blk.3.attn_norm.bias
F32
F32
[128]
blk.3.attn_norm.weight
F32
F32
[128]
blk.3.attn_output.bias
F32
F32
[128]
blk.3.attn_output.weight
Q8_0
Q8_0
[128, 128]
blk.3.attn_qkv.bias
F32
F32
[384]
blk.3.attn_qkv.weight
Q8_0
Q8_0
[128, 384]
blk.3.ffn_down.bias
F32
F32
[128]
blk.3.ffn_down.weight
Q8_0
Q8_0
[512, 128]
blk.3.ffn_norm.bias
F32
F32
[128]
blk.3.ffn_norm.weight
F32
F32
[128]
blk.3.ffn_up.bias
F32
F32
[512]
blk.3.ffn_up.weight
Q8_0
Q8_0
[128, 512]
blk.4
blk.4.attn_norm.bias
F32
F32
[128]
blk.4.attn_norm.weight
F32
F32
[128]
blk.4.attn_output.bias
F32
F32
[128]
blk.4.attn_output.weight
Q8_0
Q8_0
[128, 128]
blk.4.attn_qkv.bias
F32
F32
[384]
blk.4.attn_qkv.weight
Q8_0
Q8_0
[128, 384]
blk.4.ffn_down.bias
F32
F32
[128]
blk.4.ffn_down.weight
Q8_0
Q8_0
[512, 128]
blk.4.ffn_norm.bias
F32
F32
[128]
blk.4.ffn_norm.weight
F32
F32
[128]
blk.4.ffn_up.bias
F32
F32
[512]
blk.4.ffn_up.weight
Q8_0
Q8_0
[128, 512]
blk.5
blk.5.attn_norm.bias
F32
F32
[128]
blk.5.attn_norm.weight
F32
F32
[128]
blk.5.attn_output.bias
F32
F32
[128]
blk.5.attn_output.weight
Q8_0
Q8_0
[128, 128]
blk.5.attn_qkv.bias
F32
F32
[384]
blk.5.attn_qkv.weight
Q8_0
Q8_0
[128, 384]
blk.5.ffn_down.bias
F32
F32
[128]
blk.5.ffn_down.weight
Q8_0
Q8_0
[512, 128]
blk.5.ffn_norm.bias
F32
F32
[128]
blk.5.ffn_norm.weight
F32
F32
[128]
blk.5.ffn_up.bias
F32
F32
[512]
blk.5.ffn_up.weight
Q8_0
Q8_0
[128, 512]
output.weight
Q8_0
Q8_0
[128, 50304]
output_norm.bias
F32
F32
[128]
output_norm.weight
F32
F32
[128]