Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions inferencer.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
from PIL import Image
import torch

from data.data_utils import pil_img2rgb
from modeling.bagel.qwen2_navit import NaiveCache
from .data.data_utils import pil_img2rgb
from .modeling.bagel.qwen2_navit import NaiveCache



Expand Down
2 changes: 1 addition & 1 deletion modeling/bagel/bagel.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from transformers.configuration_utils import PretrainedConfig
from transformers.modeling_utils import PreTrainedModel

from data.data_utils import (
from ...data.data_utils import (
create_sparse_mask,
get_flattened_position_ids_extrapolate,
get_flattened_position_ids_interpolate,
Expand Down
4 changes: 2 additions & 2 deletions modeling/bagel/qwen2_navit.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from transformers.utils import ModelOutput

from flash_attn import flash_attn_varlen_func
from modeling.qwen2.modeling_qwen2 import (
from ..qwen2.modeling_qwen2 import (
Qwen2Attention,
Qwen2MLP,
Qwen2PreTrainedModel,
Expand All @@ -31,7 +31,7 @@
apply_rotary_pos_emb,
)

from modeling.qwen2.configuration_qwen2 import Qwen2Config as _Qwen2Config
from ..qwen2.configuration_qwen2 import Qwen2Config as _Qwen2Config


torch._dynamo.config.cache_size_limit = 512
Expand Down
4 changes: 2 additions & 2 deletions modeling/bagel/siglip_navit.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@
from torch import nn

from transformers.activations import ACT2FN
from modeling.siglip.configuration_siglip import SiglipVisionConfig as _SiglipVisionConfig
from modeling.siglip.modeling_siglip import SiglipAttention, SiglipPreTrainedModel
from ..siglip.configuration_siglip import SiglipVisionConfig as _SiglipVisionConfig
from ..siglip.modeling_siglip import SiglipAttention, SiglipPreTrainedModel
from flash_attn import flash_attn_varlen_func


Expand Down
14 changes: 7 additions & 7 deletions nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,15 +22,15 @@
from accelerate import infer_auto_device_map, load_checkpoint_and_dispatch, init_empty_weights
from safetensors.torch import load_file

from data.transforms import ImageTransform
from data.data_utils import pil_img2rgb, add_special_tokens
from modeling.bagel import (
from .data.transforms import ImageTransform
from .data.data_utils import pil_img2rgb, add_special_tokens
from .modeling.bagel import (
BagelConfig, Bagel, Qwen2Config, Qwen2ForCausalLM, SiglipVisionConfig, SiglipVisionModel
)
from modeling.qwen2 import Qwen2Tokenizer
from modeling.bagel.qwen2_navit import NaiveCache
from modeling.autoencoder import load_ae
from inferencer import InterleaveInferencer
from .modeling.qwen2 import Qwen2Tokenizer
from .modeling.bagel.qwen2_navit import NaiveCache
from .modeling.autoencoder import load_ae
from .inferencer import InterleaveInferencer


class LoadBAGELModel:
Expand Down