GLM 4.1V 9B MLX 4bit
模型简介
该模型是从THUDM/GLM-4.1V-9B-Thinking转换而来,采用MLX格式,支持视觉语言理解和生成任务。
模型特点
MLX格式支持
模型已转换为MLX格式,适用于苹果芯片设备
4位量化
模型经过4位量化处理,减少内存占用
视觉语言能力
支持图像理解和基于图像的文本生成
模型能力
视觉语言理解
图像描述生成
视觉问答
多模态推理
使用案例
内容生成
图像描述生成
根据输入图像生成详细描述
智能问答
视觉问答
回答关于图像内容的问题
🚀 Rainnighttram/GLM-4.1V-9B-MLX-4bit
本项目将模型 Rainnighttram/GLM-4.1V-9B-MLX-4bit 从 THUDM/GLM-4.1V-9B-Thinking 转换为 MLX 格式,使用的是 mlx-lm 版本 0.26.0。
🚀 快速开始
注意事项
这并非该模型的官方仓库,因此不会有官方对该模型提供支持。若要加载此模型,你需要手动调整 MLX-VLM 包。目前,模型的转换和加载过程可能会出现问题和混乱。
安装依赖
pip install mlx-lm mlx-vlm mlx torchvision
配置模型文件
在 "models" 目录下为 mlx-vlm 配置模型文件:
mkdir glm4v
cd glm4v
创建必要的模型文件
__init__.py
nano __init__.py
# In file: mlx_vlm/models/glm4v/__init__.py
from .glm4v import Model, ModelConfig
from .language import LanguageModel, TextConfig
from .vision import VisionModel, VisionConfig
# save and exit
language.py
nano language.py
# In file: language.py
import inspect
from dataclasses import dataclass
from typing import Any, Optional, Dict, List, Tuple
import mlx.core as mx
import mlx.nn as nn
from ..base import (
create_attention_mask,
scaled_dot_product_attention,
)
# Define the complete output class with all optional attributes the generator might check for.
@dataclass
class CausalLMOutput:
logits: mx.array
cross_attention_states: Optional[Tuple] = None
encoder_outputs: Optional[Tuple] = None
hidden_states: Optional[Tuple] = None
attentions: Optional[Tuple] = None
@dataclass
class TextConfig:
model_type: str
hidden_size: int
num_hidden_layers: int
intermediate_size: int
num_attention_heads: int
attention_bias: bool
rms_norm_eps: float
vocab_size: int
num_key_value_heads: int
partial_rotary_factor: float
rope_theta: float
rope_traditional: bool = True
max_position_embeddings: int = 65536
@classmethod
def from_dict(cls, params):
return cls(
**{
k: v
for k, v in params.items()
if k in inspect.signature(cls).parameters
}
)
class Glm4MLP(nn.Module):
def __init__(self, args: TextConfig):
super().__init__()
self.gate_up_proj = nn.QuantizedLinear(
args.hidden_size, 2 * args.intermediate_size, bias=False
)
self.down_proj = nn.QuantizedLinear(
args.intermediate_size, args.hidden_size, bias=False
)
def __call__(self, x) -> mx.array:
x = self.gate_up_proj(x)
gate, up_states = mx.split(x, 2, axis=-1)
return self.down_proj(nn.silu(gate) * up_states)
class Glm4Attention(nn.Module):
def __init__(self, args: TextConfig):
super().__init__()
self.head_dim = args.hidden_size // args.num_attention_heads
self.n_heads = args.num_attention_heads
self.n_kv_heads = args.num_key_value_heads
self.scale = self.head_dim ** -0.5
bias = args.attention_bias
q_out = args.num_attention_heads * self.head_dim
kv_out = args.num_key_value_heads * self.head_dim
self.q_proj = nn.QuantizedLinear(args.hidden_size, q_out, bias=bias)
self.k_proj = nn.QuantizedLinear(args.hidden_size, kv_out, bias=bias)
self.v_proj = nn.QuantizedLinear(args.hidden_size, kv_out, bias=bias)
self.o_proj = nn.QuantizedLinear(q_out, args.hidden_size, bias=False)
self.rope = nn.RoPE(
dims=int(self.head_dim * args.partial_rotary_factor),
base=args.rope_theta,
traditional=args.rope_traditional,
)
def __call__(
self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None
) -> mx.array:
B, L, D = x.shape
queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x)
queries = queries.reshape(B, L, self.n_heads, -1).transpose(0, 2, 1, 3)
keys = keys.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3)
values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3)
if cache is not None:
queries = self.rope(queries, offset=cache.offset)
keys = self.rope(keys, offset=cache.offset)
keys, values = cache.update_and_fetch(keys, values)
else:
queries = self.rope(queries)
keys = self.rope(keys)
output = scaled_dot_product_attention(
queries, keys, values, cache=cache, scale=self.scale, mask=mask
)
output = output.transpose(0, 2, 1, 3).reshape(B, L, -1)
return self.o_proj(output)
class Glm4DecoderLayer(nn.Module):
def __init__(self, args: TextConfig):
super().__init__()
self.self_attn = Glm4Attention(args=args)
self.mlp = Glm4MLP(args)
self.input_layernorm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps)
self.post_attention_layernorm = nn.RMSNorm(
args.hidden_size, eps=args.rms_norm_eps
)
self.post_self_attn_layernorm = nn.RMSNorm(
args.hidden_size, eps=args.rms_norm_eps
)
self.post_mlp_layernorm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps)
def __call__(
self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None
) -> mx.array:
x = x + self.post_self_attn_layernorm(
self.self_attn(self.input_layernorm(x), mask, cache)
)
residual = x
x = (
self.post_mlp_layernorm(self.mlp(self.post_attention_layernorm(x)))
+ residual
)
return x
class Glm4Model(nn.Module):
def __init__(self, args: TextConfig):
super().__init__()
self.embed_tokens = nn.QuantizedEmbedding(args.vocab_size, args.hidden_size)
self.layers = [
Glm4DecoderLayer(args=args) for _ in range(args.num_hidden_layers)
]
self.norm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps)
def __call__(
self,
inputs: mx.array,
mask: Optional[mx.array] = None,
cache: Optional[Any] = None,
inputs_embeds: Optional[mx.array] = None,
):
if inputs_embeds is not None:
h = inputs_embeds
else:
h = self.embed_tokens(inputs)
if mask is None:
mask = create_attention_mask(h, cache)
if cache is None:
cache = [None] * len(self.layers)
for layer, c in zip(self.layers, cache):
h = layer(h, mask, cache=c)
return self.norm(h)
class LanguageModel(nn.Module):
def __init__(self, config: TextConfig):
super().__init__()
self.config = config
self.model_type = config.model_type
self.model = Glm4Model(config)
self.lm_head = nn.QuantizedLinear(config.hidden_size, config.vocab_size, bias=False)
def __call__(
self,
inputs: mx.array,
inputs_embeds: Optional[mx.array] = None,
mask: Optional[mx.array] = None,
cache=None,
):
out = self.model(inputs, inputs_embeds=inputs_embeds, mask=mask, cache=cache)
out = self.lm_head(out)
# --- THIS IS THE FIX ---
# Return a consistent object type
return CausalLMOutput(logits=out)
@property
def layers(self):
return self.model.layers
# save and exit
vision.py
nano vision.py
#In file vision.py
import inspect
from dataclasses import dataclass
from typing import Any, Optional, Dict, List, Tuple
import mlx.core as mx
import mlx.nn as nn
from ..base import (
create_attention_mask,
scaled_dot_product_attention,
)
# Define the complete output class with all optional attributes the generator might check for.
@dataclass
class CausalLMOutput:
logits: mx.array
cross_attention_states: Optional[Tuple] = None
encoder_outputs: Optional[Tuple] = None
hidden_states: Optional[Tuple] = None
attentions: Optional[Tuple] = None
@dataclass
class TextConfig:
model_type: str
hidden_size: int
num_hidden_layers: int
intermediate_size: int
num_attention_heads: int
attention_bias: bool
rms_norm_eps: float
vocab_size: int
num_key_value_heads: int
partial_rotary_factor: float
rope_theta: float
rope_traditional: bool = True
max_position_embeddings: int = 65536
@classmethod
def from_dict(cls, params):
return cls(
**{
k: v
for k, v in params.items()
if k in inspect.signature(cls).parameters
}
)
class Glm4MLP(nn.Module):
def __init__(self, args: TextConfig):
super().__init__()
self.gate_up_proj = nn.QuantizedLinear(
args.hidden_size, 2 * args.intermediate_size, bias=False
)
self.down_proj = nn.QuantizedLinear(
args.intermediate_size, args.hidden_size, bias=False
)
def __call__(self, x) -> mx.array:
x = self.gate_up_proj(x)
gate, up_states = mx.split(x, 2, axis=-1)
return self.down_proj(nn.silu(gate) * up_states)
class Glm4Attention(nn.Module):
def __init__(self, args: TextConfig):
super().__init__()
self.head_dim = args.hidden_size // args.num_attention_heads
self.n_heads = args.num_attention_heads
self.n_kv_heads = args.num_key_value_heads
self.scale = self.head_dim ** -0.5
bias = args.attention_bias
q_out = args.num_attention_heads * self.head_dim
kv_out = args.num_key_value_heads * self.head_dim
self.q_proj = nn.QuantizedLinear(args.hidden_size, q_out, bias=bias)
self.k_proj = nn.QuantizedLinear(args.hidden_size, kv_out, bias=bias)
self.v_proj = nn.QuantizedLinear(args.hidden_size, kv_out, bias=bias)
self.o_proj = nn.QuantizedLinear(q_out, args.hidden_size, bias=False)
self.rope = nn.RoPE(
dims=int(self.head_dim * args.partial_rotary_factor),
base=args.rope_theta,
traditional=args.rope_traditional,
)
def __call__(
self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None
) -> mx.array:
B, L, D = x.shape
queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x)
queries = queries.reshape(B, L, self.n_heads, -1).transpose(0, 2, 1, 3)
keys = keys.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3)
values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3)
if cache is not None:
queries = self.rope(queries, offset=cache.offset)
keys = self.rope(keys, offset=cache.offset)
keys, values = cache.update_and_fetch(keys, values)
else:
queries = self.rope(queries)
keys = self.rope(keys)
output = scaled_dot_product_attention(
queries, keys, values, cache=cache, scale=self.scale, mask=mask
)
output = output.transpose(0, 2, 1, 3).reshape(B, L, -1)
return self.o_proj(output)
class Glm4DecoderLayer(nn.Module):
def __init__(self, args: TextConfig):
super().__init__()
self.self_attn = Glm4Attention(args=args)
self.mlp = Glm4MLP(args)
self.input_layernorm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps)
self.post_attention_layernorm = nn.RMSNorm(
args.hidden_size, eps=args.rms_norm_eps
)
self.post_self_attn_layernorm = nn.RMSNorm(
args.hidden_size, eps=args.rms_norm_eps
)
self.post_mlp_layernorm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps)
def __call__(
self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None
) -> mx.array:
x = x + self.post_self_attn_layernorm(
self.self_attn(self.input_layernorm(x), mask, cache)
)
residual = x
x = (
self.post_mlp_layernorm(self.mlp(self.post_attention_layernorm(x)))
+ residual
)
return x
class Glm4Model(nn.Module):
def __init__(self, args: TextConfig):
super().__init__()
self.embed_tokens = nn.QuantizedEmbedding(args.vocab_size, args.hidden_size)
self.layers = [
Glm4DecoderLayer(args=args) for _ in range(args.num_hidden_layers)
]
self.norm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps)
def __call__(
self,
inputs: mx.array,
mask: Optional[mx.array] = None,
cache: Optional[Any] = None,
inputs_embeds: Optional[mx.array] = None,
):
if inputs_embeds is not None:
h = inputs_embeds
else:
h = self.embed_tokens(inputs)
if mask is None:
mask = create_attention_mask(h, cache)
if cache is None:
cache = [None] * len(self.layers)
for layer, c in zip(self.layers, cache):
h = layer(h, mask, cache=c)
return self.norm(h)
class LanguageModel(nn.Module):
def __init__(self, config: TextConfig):
super().__init__()
self.config = config
self.model_type = config.model_type
self.model = Glm4Model(config)
self.lm_head = nn.QuantizedLinear(config.hidden_size, config.vocab_size, bias=False)
def __call__(
self,
inputs: mx.array,
inputs_embeds: Optional[mx.array] = None,
mask: Optional[mx.array] = None,
cache=None,
):
out = self.model(inputs, inputs_embeds=inputs_embeds, mask=mask, cache=cache)
out = self.lm_head(out)
# --- THIS IS THE FIX ---
# Return a consistent object type
return CausalLMOutput(logits=out)
@property
def layers(self):
return self.model.layers
#Save and Exit
glmv4.py
nano glmv4.py
#in the file glmv4.py
import inspect
from dataclasses import dataclass
from typing import Any, Optional, Dict, List, Tuple
import mlx.core as mx
import mlx.nn as nn
from ..base import (
create_attention_mask,
scaled_dot_product_attention,
)
# Define the complete output class with all optional attributes the generator might check for.
@dataclass
class CausalLMOutput:
logits: mx.array
cross_attention_states: Optional[Tuple] = None
encoder_outputs: Optional[Tuple] = None
hidden_states: Optional[Tuple] = None
attentions: Optional[Tuple] = None
@dataclass
class TextConfig:
model_type: str
hidden_size: int
num_hidden_layers: int
intermediate_size: int
num_attention_heads: int
attention_bias: bool
rms_norm_eps: float
vocab_size: int
num_key_value_heads: int
partial_rotary_factor: float
rope_theta: float
rope_traditional: bool = True
max_position_embeddings: int = 65536
@classmethod
def from_dict(cls, params):
return cls(
**{
k: v
for k, v in params.items()
if k in inspect.signature(cls).parameters
}
)
class Glm4MLP(nn.Module):
def __init__(self, args: TextConfig):
super().__init__()
self.gate_up_proj = nn.QuantizedLinear(
args.hidden_size, 2 * args.intermediate_size, bias=False
)
self.down_proj = nn.QuantizedLinear(
args.intermediate_size, args.hidden_size, bias=False
)
def __call__(self, x) -> mx.array:
x = self.gate_up_proj(x)
gate, up_states = mx.split(x, 2, axis=-1)
return self.down_proj(nn.silu(gate) * up_states)
class Glm4Attention(nn.Module):
def __init__(self, args: TextConfig):
super().__init__()
self.head_dim = args.hidden_size // args.num_attention_heads
self.n_heads = args.num_attention_heads
self.n_kv_heads = args.num_key_value_heads
self.scale = self.head_dim ** -0.5
bias = args.attention_bias
q_out = args.num_attention_heads * self.head_dim
kv_out = args.num_key_value_heads * self.head_dim
self.q_proj = nn.QuantizedLinear(args.hidden_size, q_out, bias=bias)
self.k_proj = nn.QuantizedLinear(args.hidden_size, kv_out, bias=bias)
self.v_proj = nn.QuantizedLinear(args.hidden_size, kv_out, bias=bias)
self.o_proj = nn.QuantizedLinear(q_out, args.hidden_size, bias=False)
self.rope = nn.RoPE(
dims=int(self.head_dim * args.partial_rotary_factor),
base=args.rope_theta,
traditional=args.rope_traditional,
)
def __call__(
self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None
) -> mx.array:
B, L, D = x.shape
queries, keys, values = self.q_proj(x), self.k_proj(x), self.v_proj(x)
queries = queries.reshape(B, L, self.n_heads, -1).transpose(0, 2, 1, 3)
keys = keys.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3)
values = values.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3)
if cache is not None:
queries = self.rope(queries, offset=cache.offset)
keys = self.rope(keys, offset=cache.offset)
keys, values = cache.update_and_fetch(keys, values)
else:
queries = self.rope(queries)
keys = self.rope(keys)
output = scaled_dot_product_attention(
queries, keys, values, cache=cache, scale=self.scale, mask=mask
)
output = output.transpose(0, 2, 1, 3).reshape(B, L, -1)
return self.o_proj(output)
class Glm4DecoderLayer(nn.Module):
def __init__(self, args: TextConfig):
super().__init__()
self.self_attn = Glm4Attention(args=args)
self.mlp = Glm4MLP(args)
self.input_layernorm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps)
self.post_attention_layernorm = nn.RMSNorm(
args.hidden_size, eps=args.rms_norm_eps
)
self.post_self_attn_layernorm = nn.RMSNorm(
args.hidden_size, eps=args.rms_norm_eps
)
self.post_mlp_layernorm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps)
def __call__(
self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Any] = None
) -> mx.array:
x = x + self.post_self_attn_layernorm(
self.self_attn(self.input_layernorm(x), mask, cache)
)
residual = x
x = (
self.post_mlp_layernorm(self.mlp(self.post_attention_layernorm(x)))
+ residual
)
return x
class Glm4Model(nn.Module):
def __init__(self, args: TextConfig):
super().__init__()
self.embed_tokens = nn.QuantizedEmbedding(args.vocab_size, args.hidden_size)
self.layers = [
Glm4DecoderLayer(args=args) for _ in range(args.num_hidden_layers)
]
self.norm = nn.RMSNorm(args.hidden_size, eps=args.rms_norm_eps)
def __call__(
self,
inputs: mx.array,
mask: Optional[mx.array] = None,
cache: Optional[Any] = None,
inputs_embeds: Optional[mx.array] = None,
):
if inputs_embeds is not None:
h = inputs_embeds
else:
h = self.embed_tokens(inputs)
if mask is None:
mask = create_attention_mask(h, cache)
if cache is None:
cache = [None] * len(self.layers)
for layer, c in zip(self.layers, cache):
h = layer(h, mask, cache=c)
return self.norm(h)
class LanguageModel(nn.Module):
def __init__(self, config: TextConfig):
super().__init__()
self.config = config
self.model_type = config.model_type
📄 许可证
本项目采用 MIT 许可证。
属性 | 详情 |
---|---|
模型类型 | 文本生成 |
基础模型 | THUDM/GLM-4.1V-9B-Thinking |
库名称 | mlx |
标签 | 推理、mlx |
Clip Vit Large Patch14 336
基于Vision Transformer架构的大规模视觉语言预训练模型,支持图像与文本的跨模态理解
文本生成图像
Transformers

C
openai
5.9M
241
Fashion Clip
MIT
FashionCLIP是基于CLIP开发的视觉语言模型,专门针对时尚领域进行微调,能够生成通用产品表征。
文本生成图像
Transformers 英语

F
patrickjohncyh
3.8M
222
Gemma 3 1b It
Gemma 3是Google推出的轻量级先进开放模型系列,基于与Gemini模型相同的研究和技术构建。该模型是多模态模型,能够处理文本和图像输入并生成文本输出。
文本生成图像
Transformers

G
google
2.1M
347
Blip Vqa Base
Bsd-3-clause
BLIP是一个统一的视觉语言预训练框架,擅长视觉问答任务,通过语言-图像联合训练实现多模态理解与生成能力
文本生成图像
Transformers

B
Salesforce
1.9M
154
CLIP ViT H 14 Laion2b S32b B79k
MIT
基于OpenCLIP框架在LAION-2B英文数据集上训练的视觉-语言模型,支持零样本图像分类和跨模态检索任务
文本生成图像
Safetensors
C
laion
1.8M
368
CLIP ViT B 32 Laion2b S34b B79k
MIT
基于OpenCLIP框架在LAION-2B英语子集上训练的视觉-语言模型,支持零样本图像分类和跨模态检索
文本生成图像
Safetensors
C
laion
1.1M
112
Pickscore V1
PickScore v1 是一个针对文本生成图像的评分函数,可用于预测人类偏好、评估模型性能和图像排序等任务。
文本生成图像
Transformers

P
yuvalkirstain
1.1M
44
Owlv2 Base Patch16 Ensemble
Apache-2.0
OWLv2是一种零样本文本条件目标检测模型,可通过文本查询在图像中定位对象。
文本生成图像
Transformers

O
google
932.80k
99
Llama 3.2 11B Vision Instruct
Llama 3.2 是 Meta 发布的多语言多模态大型语言模型,支持图像文本到文本的转换任务,具备强大的跨模态理解能力。
文本生成图像
Transformers 支持多种语言

L
meta-llama
784.19k
1,424
Owlvit Base Patch32
Apache-2.0
OWL-ViT是一个零样本文本条件目标检测模型,可以通过文本查询搜索图像中的对象,无需特定类别的训练数据。
文本生成图像
Transformers

O
google
764.95k
129
精选推荐AI模型
Llama 3 Typhoon V1.5x 8b Instruct
专为泰语设计的80亿参数指令模型,性能媲美GPT-3.5-turbo,优化了应用场景、检索增强生成、受限生成和推理任务
大型语言模型
Transformers 支持多种语言

L
scb10x
3,269
16
Cadet Tiny
Openrail
Cadet-Tiny是一个基于SODA数据集训练的超小型对话模型,专为边缘设备推理设计,体积仅为Cosmo-3B模型的2%左右。
对话系统
Transformers 英语

C
ToddGoldfarb
2,691
6
Roberta Base Chinese Extractive Qa
基于RoBERTa架构的中文抽取式问答模型,适用于从给定文本中提取答案的任务。
问答系统 中文
R
uer
2,694
98
智启未来,您的人工智能解决方案智库
简体中文