Resolving Interference When Merging Models
Paper • 2306.01708 • Published • 18
winget install llama.cpp
# Start a local OpenAI-compatible server with a web UI:
llama-server -hf QuantFactory/sepctrum-ties-sqlcoder-8b-GGUF:# Run inference directly in the terminal:
llama-cli -hf QuantFactory/sepctrum-ties-sqlcoder-8b-GGUF:# Download pre-built binary from:
# https://github.com/ggerganov/llama.cpp/releases# Start a local OpenAI-compatible server with a web UI:
./llama-server -hf QuantFactory/sepctrum-ties-sqlcoder-8b-GGUF:# Run inference directly in the terminal:
./llama-cli -hf QuantFactory/sepctrum-ties-sqlcoder-8b-GGUF:git clone https://github.com/ggerganov/llama.cpp.git
cd llama.cpp
cmake -B build
cmake --build build -j --target llama-server llama-cli# Start a local OpenAI-compatible server with a web UI:
./build/bin/llama-server -hf QuantFactory/sepctrum-ties-sqlcoder-8b-GGUF:# Run inference directly in the terminal:
./build/bin/llama-cli -hf QuantFactory/sepctrum-ties-sqlcoder-8b-GGUF:docker model run hf.co/QuantFactory/sepctrum-ties-sqlcoder-8b-GGUF:This is quantized version of arcee-ai/sepctrum-ties-sqlcoder-8b created using llama.cpp
This is a merge of pre-trained language models created using mergekit.
This model was merged using the TIES merge method using meta-llama/Meta-Llama-3-8B-Instruct as a base.
The following models were included in the merge:
The following YAML configuration was used to produce this model:
merge_method: ties
base_model: meta-llama/Meta-Llama-3-8B-Instruct
models:
- model: defog/llama-3-sqlcoder-8b
parameters:
weight:
- filter: mlp.down_proj
value: [0, 0, 0, 0, 0, 0, 0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0, 0.5, 0, 0, 0, 0, 0, 0.5, 0.5, 0.5, 0.5, 0.5, 0, 0, 0]
- filter: mlp.gate_proj
value: [0, 0, 0, 0, 0, 0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 0.5]
- filter: mlp.up_proj
value: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 0.5, 0, 0, 0, 0, 0.5, 0, 0.5, 0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
- filter: self_attn.k_proj
value: [0.5, 0.5, 0.5, 0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0, 0.5, 0.5, 0.5, 0.5, 0.5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 0]
- filter: self_attn.o_proj
value: [0.5, 0.5, 0.5, 0.5, 0.5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 0.5, 0.5, 0.5, 0.5, 0, 0.5, 0, 0, 0, 0.5, 0.5, 0.5, 0.5, 0.5, 0]
- filter: self_attn.q_proj
value: [0, 0, 0.5, 0.5, 0.5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0, 0.5, 0.5, 0.5, 0.5, 0.5]
- filter: self_attn.v_proj
value: [0.5, 0, 0.5, 0, 0, 0.5, 0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0, 0, 0, 0, 0.5, 0.5, 0, 0, 0, 0, 0.5, 0, 0, 0.5, 0, 0, 0.5, 0.5]
- value: [0]
density: 0.75
- model: meta-llama/Meta-Llama-3-8B-Instruct
parameters:
weight:
- filter: mlp.down_proj
value: [1, 1, 1, 1, 1, 1, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1, 0.5, 1, 1, 1, 1, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 1, 1, 1]
- filter: mlp.gate_proj
value: [1, 1, 1, 1, 1, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.5, 0.5]
- filter: mlp.up_proj
value: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.5, 0.5, 1, 1, 1, 1, 0.5, 1, 0.5, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
- filter: self_attn.k_proj
value: [0.5, 0.5, 0.5, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.5, 1]
- filter: self_attn.o_proj
value: [0.5, 0.5, 0.5, 0.5, 0.5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 1, 0.5, 1, 1, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 1]
- filter: self_attn.q_proj
value: [1, 1, 0.5, 0.5, 0.5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1, 0.5, 0.5, 0.5, 0.5, 0.5]
- filter: self_attn.v_proj
value: [0.5, 1, 0.5, 1, 1, 0.5, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1, 1, 1, 1, 0.5, 0.5, 1, 1, 1, 1, 0.5, 1, 1, 0.5, 1, 1, 0.5, 0.5]
- value: [1]
density: 1.0
parameters: {normalize: true, int8_mask: true}
dtype: bfloat16
2-bit
3-bit
4-bit
5-bit
6-bit
8-bit
Install from brew
# Start a local OpenAI-compatible server with a web UI: llama-server -hf QuantFactory/sepctrum-ties-sqlcoder-8b-GGUF:# Run inference directly in the terminal: llama-cli -hf QuantFactory/sepctrum-ties-sqlcoder-8b-GGUF: