Models
GitHub
Discord
Turbo
Sign in
Download
Models
Download
GitHub
Discord
Sign in
snowflake-arctic-embed
:22m
838K
Downloads
Updated
1 year ago
A suite of text embedding models by Snowflake, optimized for performance.
A suite of text embedding models by Snowflake, optimized for performance.
Cancel
embedding
22m
33m
110m
137m
335m
snowflake-arctic-embed:22m
...
/
model
a83b0493f894 · 46MB
Metadata
general.architecture
bert
bert
general.file_type
F16
F16
bert.attention.causal
false
false
bert.attention.head_count
12
12
bert.attention.layer_norm_epsilon
1e-12
1e-12
bert.block_count
6
6
bert.context_length
512
512
bert.embedding_length
384
384
bert.feed_forward_length
1536
1536
bert.pooling_type
CLS
CLS
tokenizer.ggml.cls_token_id
101
101
tokenizer.ggml.mask_token_id
103
103
tokenizer.ggml.model
bert
bert
tokenizer.ggml.padding_token_id
0
0
tokenizer.ggml.seperator_token_id
102
102
tokenizer.ggml.token_type
[3, 1, 1, 1, 1, ...]
[3, 1, 1, 1, 1, ...]
tokenizer.ggml.token_type_count
2
2
tokenizer.ggml.tokens
[[PAD], [unused0], [unused1], [unused2], [unused3], ...]
[[PAD], [unused0], [unused1], [unused2], [unused3], ...]
tokenizer.ggml.unknown_token_id
100
100
Tensor
Name
Type
Shape
token_embd.weight
F16
F16
[384, 30522]
blk.0
blk.0.attn_k.bias
F32
F32
[384]
blk.0.attn_k.weight
F16
F16
[384, 384]
blk.0.attn_output.bias
F32
F32
[384]
blk.0.attn_output.weight
F16
F16
[384, 384]
blk.0.attn_output_norm.bias
F32
F32
[384]
blk.0.attn_output_norm.weight
F32
F32
[384]
blk.0.attn_q.bias
F32
F32
[384]
blk.0.attn_q.weight
F16
F16
[384, 384]
blk.0.attn_v.bias
F32
F32
[384]
blk.0.attn_v.weight
F16
F16
[384, 384]
blk.0.ffn_down.bias
F32
F32
[384]
blk.0.ffn_down.weight
F16
F16
[1536, 384]
blk.0.ffn_up.bias
F32
F32
[1536]
blk.0.ffn_up.weight
F16
F16
[384, 1536]
blk.0.layer_output_norm.bias
F32
F32
[384]
blk.0.layer_output_norm.weight
F32
F32
[384]
blk.1
blk.1.attn_k.bias
F32
F32
[384]
blk.1.attn_k.weight
F16
F16
[384, 384]
blk.1.attn_output.bias
F32
F32
[384]
blk.1.attn_output.weight
F16
F16
[384, 384]
blk.1.attn_output_norm.bias
F32
F32
[384]
blk.1.attn_output_norm.weight
F32
F32
[384]
blk.1.attn_q.bias
F32
F32
[384]
blk.1.attn_q.weight
F16
F16
[384, 384]
blk.1.attn_v.bias
F32
F32
[384]
blk.1.attn_v.weight
F16
F16
[384, 384]
blk.1.ffn_down.bias
F32
F32
[384]
blk.1.ffn_down.weight
F16
F16
[1536, 384]
blk.1.ffn_up.bias
F32
F32
[1536]
blk.1.ffn_up.weight
F16
F16
[384, 1536]
blk.1.layer_output_norm.bias
F32
F32
[384]
blk.1.layer_output_norm.weight
F32
F32
[384]
blk.2
blk.2.attn_k.bias
F32
F32
[384]
blk.2.attn_k.weight
F16
F16
[384, 384]
blk.2.attn_output.bias
F32
F32
[384]
blk.2.attn_output.weight
F16
F16
[384, 384]
blk.2.attn_output_norm.bias
F32
F32
[384]
blk.2.attn_output_norm.weight
F32
F32
[384]
blk.2.attn_q.bias
F32
F32
[384]
blk.2.attn_q.weight
F16
F16
[384, 384]
blk.2.attn_v.bias
F32
F32
[384]
blk.2.attn_v.weight
F16
F16
[384, 384]
blk.2.ffn_down.bias
F32
F32
[384]
blk.2.ffn_down.weight
F16
F16
[1536, 384]
blk.2.ffn_up.bias
F32
F32
[1536]
blk.2.ffn_up.weight
F16
F16
[384, 1536]
blk.2.layer_output_norm.bias
F32
F32
[384]
blk.2.layer_output_norm.weight
F32
F32
[384]
blk.3
blk.3.attn_k.bias
F32
F32
[384]
blk.3.attn_k.weight
F16
F16
[384, 384]
blk.3.attn_output.bias
F32
F32
[384]
blk.3.attn_output.weight
F16
F16
[384, 384]
blk.3.attn_output_norm.bias
F32
F32
[384]
blk.3.attn_output_norm.weight
F32
F32
[384]
blk.3.attn_q.bias
F32
F32
[384]
blk.3.attn_q.weight
F16
F16
[384, 384]
blk.3.attn_v.bias
F32
F32
[384]
blk.3.attn_v.weight
F16
F16
[384, 384]
blk.3.ffn_down.bias
F32
F32
[384]
blk.3.ffn_down.weight
F16
F16
[1536, 384]
blk.3.ffn_up.bias
F32
F32
[1536]
blk.3.ffn_up.weight
F16
F16
[384, 1536]
blk.3.layer_output_norm.bias
F32
F32
[384]
blk.3.layer_output_norm.weight
F32
F32
[384]
blk.4
blk.4.attn_k.bias
F32
F32
[384]
blk.4.attn_k.weight
F16
F16
[384, 384]
blk.4.attn_output.bias
F32
F32
[384]
blk.4.attn_output.weight
F16
F16
[384, 384]
blk.4.attn_output_norm.bias
F32
F32
[384]
blk.4.attn_output_norm.weight
F32
F32
[384]
blk.4.attn_q.bias
F32
F32
[384]
blk.4.attn_q.weight
F16
F16
[384, 384]
blk.4.attn_v.bias
F32
F32
[384]
blk.4.attn_v.weight
F16
F16
[384, 384]
blk.4.ffn_down.bias
F32
F32
[384]
blk.4.ffn_down.weight
F16
F16
[1536, 384]
blk.4.ffn_up.bias
F32
F32
[1536]
blk.4.ffn_up.weight
F16
F16
[384, 1536]
blk.4.layer_output_norm.bias
F32
F32
[384]
blk.4.layer_output_norm.weight
F32
F32
[384]
blk.5
blk.5.attn_k.bias
F32
F32
[384]
blk.5.attn_k.weight
F16
F16
[384, 384]
blk.5.attn_output.bias
F32
F32
[384]
blk.5.attn_output.weight
F16
F16
[384, 384]
blk.5.attn_output_norm.bias
F32
F32
[384]
blk.5.attn_output_norm.weight
F32
F32
[384]
blk.5.attn_q.bias
F32
F32
[384]
blk.5.attn_q.weight
F16
F16
[384, 384]
blk.5.attn_v.bias
F32
F32
[384]
blk.5.attn_v.weight
F16
F16
[384, 384]
blk.5.ffn_down.bias
F32
F32
[384]
blk.5.ffn_down.weight
F16
F16
[1536, 384]
blk.5.ffn_up.bias
F32
F32
[1536]
blk.5.ffn_up.weight
F16
F16
[384, 1536]
blk.5.layer_output_norm.bias
F32
F32
[384]
blk.5.layer_output_norm.weight
F32
F32
[384]
position_embd.weight
F16
F16
[384, 512]
token_embd_norm.bias
F32
F32
[384]
token_embd_norm.weight
F32
F32
[384]
token_types.weight
F32
F32
[384, 2]