push:
paths:
- '.github/workflows/python-type-check.yml'
- - 'pyrightconfig.json'
+ - 'ty.toml'
- '**.py'
- '**/requirements*.txt'
+ # - 'pyrightconfig.json'
pull_request:
paths:
- '.github/workflows/python-type-check.yml'
- - 'pyrightconfig.json'
+ - 'ty.toml'
- '**.py'
- '**/requirements*.txt'
+ # - 'pyrightconfig.json'
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
jobs:
python-type-check:
- runs-on: ubuntu-latest
- name: pyright type-check
+ runs-on: ubuntu-slim
+ name: python type-check
steps:
- name: Check out source repository
uses: actions/checkout@v6
uses: actions/setup-python@v6
with:
python-version: "3.11"
- pip-install: -r requirements/requirements-all.txt
- - name: Type-check with Pyright
- uses: jakebailey/pyright-action@v2
- with:
- version: 1.1.382
- level: warning
- warnings: true
+ pip-install: -r requirements/requirements-all.txt ty==0.0.24
+ # - name: Type-check with Pyright
+ # uses: jakebailey/pyright-action@v2
+ # with:
+ # version: 1.1.382
+ # level: warning
+ # warnings: true
+ - name: Type-check with ty
+ run: |
+ ty check --output-format=github
from gguf.vocab import MistralTokenizerType, MistralVocab
try:
- from mistral_common.tokens.tokenizers.base import TokenizerVersion # pyright: ignore[reportMissingImports]
- from mistral_common.tokens.tokenizers.multimodal import DATASET_MEAN as _MISTRAL_COMMON_DATASET_MEAN, DATASET_STD as _MISTRAL_COMMON_DATASET_STD # pyright: ignore[reportMissingImports]
- from mistral_common.tokens.tokenizers.tekken import Tekkenizer # pyright: ignore[reportMissingImports]
- from mistral_common.tokens.tokenizers.sentencepiece import ( # pyright: ignore[reportMissingImports]
+ from mistral_common.tokens.tokenizers.base import TokenizerVersion # type: ignore[import-not-found]
+ from mistral_common.tokens.tokenizers.multimodal import DATASET_MEAN as _MISTRAL_COMMON_DATASET_MEAN, DATASET_STD as _MISTRAL_COMMON_DATASET_STD # type: ignore[import-not-found]
+ from mistral_common.tokens.tokenizers.tekken import Tekkenizer # type: ignore[import-not-found]
+ from mistral_common.tokens.tokenizers.sentencepiece import ( # type: ignore[import-not-found]
SentencePieceTokenizer,
)
_MISTRAL_COMMON_DATASET_STD = (0.26862954, 0.26130258, 0.27577711)
_mistral_common_installed = False
- TokenizerVersion = None
- Tekkenizer = None
- SentencePieceTokenizer = None
+ TokenizerVersion: Any = None
+ Tekkenizer: Any = None
+ SentencePieceTokenizer: Any = None
_mistral_import_error_msg = (
"Mistral format requires `mistral-common` to be installed. Please run "
"`pip install mistral-common[image,audio]` to install it."
if weight_map is None or not isinstance(weight_map, dict):
raise ValueError(f"Can't load 'weight_map' from {index_name!r}")
tensor_names_from_index.update(weight_map.keys())
- part_dict: dict[str, None] = dict.fromkeys(weight_map.values(), None)
+ part_dict: dict[str, None] = dict.fromkeys(weight_map.values(), None) # ty: ignore[invalid-assignment]
part_names = sorted(part_dict.keys())
else:
weight_map = {}
logger.error(f'Error: Missing {tokenizer_path}')
sys.exit(1)
- sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
+ sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue] # ty: ignore[unresolved-attribute]
sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
vocab_size = max(self.hparams.get("vocab_size", 0), tokenizer.vocab_size)
else:
- sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
+ sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue] # ty: ignore[unresolved-attribute]
sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
if not tokenizer_path.is_file():
raise FileNotFoundError(f"File not found: {tokenizer_path}")
- sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
+ sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue] # ty: ignore[unresolved-attribute]
sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
# some models like Pile-T5 family use BPE tokenizer instead of Unigram
if not tokenizer_path.is_file():
raise FileNotFoundError(f"File not found: {tokenizer_path}")
- sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
+ sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue] # ty: ignore[unresolved-attribute]
sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
# some models like Pile-T5 family use BPE tokenizer instead of Unigram
kwargs = {}
if func is torch.Tensor.numpy:
+ assert len(args)
return args[0].numpy()
return cls._wrap_fn(func)(*args, **kwargs)
(n_dims, name_len, dtype) = struct.unpack('<3I', data[offset:offset + 12])
assert n_dims >= 0 and n_dims <= 4, f'Invalid tensor dimensions {n_dims}'
assert name_len < 4096, 'Absurd tensor name length'
- quant = gguf.GGML_QUANT_SIZES.get(dtype)
+ self.dtype = gguf.GGMLQuantizationType(dtype)
+ quant = gguf.GGML_QUANT_SIZES.get(self.dtype)
assert quant is not None, 'Unknown tensor type'
(blksize, tysize) = quant
offset += 12
- self.dtype= gguf.GGMLQuantizationType(dtype)
self.dims = struct.unpack(f'<{n_dims}I', data[offset:offset + (4 * n_dims)])
offset += 4 * n_dims
self.name = bytes(data[offset:offset + name_len])
kwargs = {}
if func is torch.permute:
+ assert len(args)
return type(args[0]).permute(*args, **kwargs)
elif func is torch.reshape:
+ assert len(args)
return type(args[0]).reshape(*args, **kwargs)
elif func is torch.stack:
+ assert len(args)
assert isinstance(args[0], Sequence)
dim = kwargs.get("dim", 0)
assert dim == 0
torch.stack([b._lora_B for b in args[0]], dim),
)
elif func is torch.cat:
+ assert len(args)
assert isinstance(args[0], Sequence)
dim = kwargs.get("dim", 0)
assert dim == 0
logger.error(f"Model {hparams['architectures'][0]} is not supported")
sys.exit(1)
- class LoraModel(model_class):
+ class LoraModel(model_class): # ty: ignore[unsupported-base]
model_arch = model_class.model_arch
lora_alpha: float
return f'({result})?' if min_items == 0 else result
def _generate_min_max_int(min_value: Optional[int], max_value: Optional[int], out: list, decimals_left: int = 16, top_level: bool = True):
- has_min = min_value != None
- has_max = max_value != None
-
def digit_range(from_char: str, to_char: str):
out.append("[")
if from_char == to_char:
out.append(to_str[i])
out.append("]")
- if has_min and has_max:
+ if min_value is not None and max_value is not None:
if min_value < 0 and max_value < 0:
out.append("\"-\" (")
_generate_min_max_int(-max_value, -min_value, out, decimals_left, top_level=True)
less_decimals = max(decimals_left - 1, 1)
- if has_min:
+ if min_value is not None:
if min_value < 0:
out.append("\"-\" (")
_generate_min_max_int(None, -min_value, out, decimals_left, top_level=False)
more_digits(length - 1, less_decimals)
return
- if has_max:
+ if max_value is not None:
if max_value >= 0:
if top_level:
out.append("\"-\" [1-9] ")
print("Using SentenceTransformer to apply all numbered layers")
model = SentenceTransformer(model_path)
tokenizer = model.tokenizer
- config = model[0].auto_model.config # type: ignore
+ config = model[0].auto_model.config
else:
tokenizer = AutoTokenizer.from_pretrained(model_path)
config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
print(f"Model file: {type(model).__module__}")
# Verify the model is using the correct sliding window
- if hasattr(model.config, 'sliding_window'): # type: ignore
- print(f"Model's sliding_window: {model.config.sliding_window}") # type: ignore
+ if hasattr(model.config, 'sliding_window'):
+ print(f"Model's sliding_window: {model.config.sliding_window}")
else:
print("Model config does not have sliding_window attribute")
device = next(model.parameters()).device
else:
# For SentenceTransformer, get device from the underlying model
- device = next(model[0].auto_model.parameters()).device # type: ignore
+ device = next(model[0].auto_model.parameters()).device
model_name = os.path.basename(model_path)
print(f"{token_id:6d} -> '{token_str}'")
print(f"Embeddings shape (after all SentenceTransformer layers): {all_embeddings.shape}")
- print(f"Embedding dimension: {all_embeddings.shape[1] if len(all_embeddings.shape) > 1 else all_embeddings.shape[0]}") # type: ignore
+ print(f"Embedding dimension: {all_embeddings.shape[1] if len(all_embeddings.shape) > 1 else all_embeddings.shape[0]}")
else:
# Standard approach: use base model output only
encoded = tokenizer(
print(f"Embedding dimension: {all_embeddings.shape[1]}")
if len(all_embeddings.shape) == 1:
- n_embd = all_embeddings.shape[0] # type: ignore
+ n_embd = all_embeddings.shape[0]
n_embd_count = 1
all_embeddings = all_embeddings.reshape(1, -1)
else:
- n_embd = all_embeddings.shape[1] # type: ignore
- n_embd_count = all_embeddings.shape[0] # type: ignore
+ n_embd = all_embeddings.shape[1]
+ n_embd_count = all_embeddings.shape[0]
print()
import argparse
import sys
-from common import compare_tokens # type: ignore
+from common import compare_tokens # type: ignore[import-not-found]
def parse_arguments():
from copy import copy
from enum import Enum
from inspect import getdoc, isclass
-from typing import TYPE_CHECKING, Any, Callable, List, Optional, Union, get_args, get_origin, get_type_hints
+from typing import TYPE_CHECKING, Any, Callable, Optional, Union, get_args, get_origin, get_type_hints
from docstring_parser import parse
from pydantic import BaseModel, create_model
# Assert that the parameter has a type annotation
if param.annotation == inspect.Parameter.empty:
- raise TypeError(f"Parameter '{param.name}' in function '{func.__name__}' lacks a type annotation")
+ raise TypeError(f"""Parameter '{param.name}' in function '{getattr(func, "__name__", "")}' lacks a type annotation""")
# Find the parameter's description in the docstring
param_doc = next((d for d in docstring.params if d.arg_name == param.name), None)
# Assert that the parameter has a description
if not param_doc or not param_doc.description:
raise ValueError(
- f"Parameter '{param.name}' in function '{func.__name__}' lacks a description in the docstring")
+ f"""Parameter '{param.name}' in function '{getattr(func, "__name__", "")}' lacks a description in the docstring""")
# Add parameter details to the schema
param_docs.append((param.name, param_doc))
dynamic_fields[param.name] = (
param.annotation if param.annotation != inspect.Parameter.empty else str, default_value)
# Creating the dynamic model
- dynamic_model = create_model(f"{func.__name__}", **dynamic_fields)
+ dynamic_model = create_model(f"{getattr(func, '__name__')}", **dynamic_fields)
for name, param_doc in param_docs:
dynamic_model.model_fields[name].description = param_doc.description
if items != {}:
array = {"properties": items}
array_type = convert_dictionary_to_pydantic_model(array, f"{model_name}_{field_name}_items")
- fields[field_name] = (List[array_type], ...)
+ fields[field_name] = (list[array_type], ...) # ty: ignore[invalid-type-form]
else:
fields[field_name] = (list, ...)
elif field_type == "object":
else:
raise ValueError("Invalid GGUF metadata value type or value")
- return kv_data
+ return bytes(kv_data)
@staticmethod
def format_n_bytes_to_str(num: int) -> str:
if isinstance(meta_noop, tuple):
dtype, shape = meta_noop
assert callable(shape)
- res = cls.meta_with_dtype_and_shape(dtype, shape(res.shape))
+ res = cls.meta_with_dtype_and_shape(dtype, shape(res.shape)) # ty: ignore[call-top-callable]
else:
res = cls.meta_with_dtype_and_shape(meta_noop, res.shape)
def __init_subclass__(cls, qtype: GGMLQuantizationType) -> None:
cls.qtype = qtype
cls.block_size, cls.type_size = GGML_QUANT_SIZES[qtype]
- cls.__quantize_lazy = LazyNumpyTensor._wrap_fn(
+ cls.__quantize_lazy: Any = LazyNumpyTensor._wrap_fn(
cls.__quantize_array,
meta_noop=(np.uint8, cls.__shape_to_bytes)
)
- cls.__dequantize_lazy = LazyNumpyTensor._wrap_fn(
+ cls.__dequantize_lazy: Any = LazyNumpyTensor._wrap_fn(
cls.__dequantize_array,
meta_noop=(np.float32, cls.__shape_from_bytes)
)
try:
from sentencepiece import SentencePieceProcessor
except ImportError:
- SentencePieceProcessor = None
+ SentencePieceProcessor: Any = None
try:
- from mistral_common.tokens.tokenizers.mistral import MistralTokenizer # pyright: ignore[reportMissingImports]
- from mistral_common.tokens.tokenizers.tekken import Tekkenizer # pyright: ignore[reportMissingImports]
- from mistral_common.tokens.tokenizers.utils import ( # pyright: ignore[reportMissingImports]
+ from mistral_common.tokens.tokenizers.mistral import MistralTokenizer # type: ignore[import-not-found]
+ from mistral_common.tokens.tokenizers.tekken import Tekkenizer # type: ignore[import-not-found]
+ from mistral_common.tokens.tokenizers.utils import ( # type: ignore[import-not-found]
_filter_valid_tokenizer_files,
)
- from mistral_common.tokens.tokenizers.sentencepiece import ( # pyright: ignore[reportMissingImports]
+ from mistral_common.tokens.tokenizers.sentencepiece import ( # type: ignore[import-not-found]
SentencePieceTokenizer,
)
except ImportError:
_mistral_common_installed = False
- MistralTokenizer = None
- Tekkenizer = None
- SentencePieceTokenizer = None
- _filter_valid_tokenizer_files = None
+ MistralTokenizer: Any = None
+ Tekkenizer: Any = None
+ SentencePieceTokenizer: Any = None
+ _filter_valid_tokenizer_files: Any = None
else:
_mistral_common_installed = True
try:
- from mistral_common.tokens.tokenizers.utils import ( # pyright: ignore[reportMissingImports]
+ from mistral_common.tokens.tokenizers.utils import ( # type: ignore[import-not-found]
get_one_valid_tokenizer_file,
)
except ImportError:
# We still want the conversion to work with older mistral-common versions.
- get_one_valid_tokenizer_file = None
+ get_one_valid_tokenizer_file: Any = None
import gguf
tokenizer_file_path = base_path / tokenizer_file
- self.tokenizer = MistralTokenizer.from_file(
+ self.tokenizer: Any = MistralTokenizer.from_file(
tokenizer_file_path
).instruct_tokenizer.tokenizer
self.tokenizer_type = (
{
- "extraPaths": ["gguf-py", "examples/model-conversion/scripts"],
+ "extraPaths": ["gguf-py", "examples/model-conversion/scripts", "examples/model-conversion/scripts/utils"],
"pythonVersion": "3.9",
"pythonPlatform": "All",
"reportUnusedImport": "warning",
sys.exit(1)
+assert isinstance(hexsha8_baseline, str)
name_baseline = bench_data.get_commit_name(hexsha8_baseline)
hexsha8_compare = name_compare = None
parser.print_help()
sys.exit(1)
+assert isinstance(hexsha8_compare, str)
name_compare = bench_data.get_commit_name(hexsha8_compare)
# Get tool-specific configuration
if not self.isReadOnly():
selection = QTextEdit.ExtraSelection()
line_color = QColorConstants.Yellow.lighter(160)
- selection.format.setBackground(line_color) # pyright: ignore[reportAttributeAccessIssue]
- selection.format.setProperty(QTextFormat.Property.FullWidthSelection, True) # pyright: ignore[reportAttributeAccessIssue]
- selection.cursor = self.textCursor() # pyright: ignore[reportAttributeAccessIssue]
- selection.cursor.clearSelection() # pyright: ignore[reportAttributeAccessIssue]
+ selection.format.setBackground(line_color) # pyright: ignore[reportAttributeAccessIssue] # ty: ignore[unresolved-attribute]
+ selection.format.setProperty(QTextFormat.Property.FullWidthSelection, True) # pyright: ignore[reportAttributeAccessIssue] # ty: ignore[unresolved-attribute]
+ selection.cursor = self.textCursor() # pyright: ignore[reportAttributeAccessIssue] # ty: ignore[unresolved-attribute]
+ selection.cursor.clearSelection() # pyright: ignore[reportAttributeAccessIssue] # ty: ignore[unresolved-attribute]
extra_selections.append(selection)
self.setExtraSelections(extra_selections)
)
extra = QTextEdit.ExtraSelection()
- extra.format.setBackground(color.lighter(160)) # pyright: ignore[reportAttributeAccessIssue]
- extra.cursor = cursor # pyright: ignore[reportAttributeAccessIssue]
+ extra.format.setBackground(color.lighter(160)) # pyright: ignore[reportAttributeAccessIssue] # ty: ignore[unresolved-attribute]
+ extra.cursor = cursor # pyright: ignore[reportAttributeAccessIssue] # ty: ignore[unresolved-attribute]
self.setExtraSelections(self.extraSelections() + [extra])
cursor.select(QTextCursor.SelectionType.LineUnderCursor)
extra = QTextEdit.ExtraSelection()
- extra.format.setBackground(color.lighter(160)) # pyright: ignore[reportAttributeAccessIssue]
- extra.cursor = cursor # pyright: ignore[reportAttributeAccessIssue]
+ extra.format.setBackground(color.lighter(160)) # pyright: ignore[reportAttributeAccessIssue] # ty: ignore[unresolved-attribute]
+ extra.cursor = cursor # pyright: ignore[reportAttributeAccessIssue] # ty: ignore[unresolved-attribute]
self.setExtraSelections(self.extraSelections() + [extra])
ensure_ascii=ensure_ascii,
)
)
- env.globals["strftime_now"] = lambda format: datetime.now().strftime(format)
- env.globals["raise_exception"] = raise_exception
+ env.globals["strftime_now"] = lambda format: datetime.now().strftime(format) # ty: ignore[invalid-assignment]
+ env.globals["raise_exception"] = raise_exception # ty: ignore[invalid-assignment]
try:
template = env.from_string(template_str)
output = template.render(context)
data: list[dict] = []
+ assert isinstance(prompts, list)
for i, p in enumerate(prompts):
if seed_offset >= 0:
random.seed(3 * (seed_offset + 1000 * i) + 1)
import unicodedata
from pathlib import Path
-from typing import Any, Iterator, cast
-from typing_extensions import Buffer
+from typing import Any, Iterator
import cffi
from transformers import AutoTokenizer, PreTrainedTokenizer
while num < 0 and len(self.text_buff) < (16 << 20):
self.text_buff = self.ffi.new("uint8_t[]", -2 * num)
num = self.lib.llama_detokenize(self.model, self.token_ids, len(ids), self.text_buff, len(self.text_buff), remove_special, unparse_special)
- return str(cast(Buffer, self.ffi.buffer(self.text_buff, num)), encoding="utf-8", errors="replace") # replace errors with '\uFFFD'
+ return str(self.ffi.buffer(self.text_buff, num), encoding="utf-8", errors="replace") # replace errors with '\uFFFD' # pyright: ignore[reportArgumentType]
class Tokenizer:
decode_errors = 0
MAX_ERRORS = 10
- logger.info("%s: %s" % (generator.__qualname__, "ini"))
+ logger.info("%s: %s" % (getattr(generator, "__qualname__", ""), "ini"))
for text in generator:
# print(repr(text), text.encode())
# print(repr(text), hex(ord(text[0])), text.encode())
break
t_total = time.perf_counter() - t_start
- logger.info(f"{generator.__qualname__}: end, {t_encode1=:.3f} {t_encode2=:.3f} {t_decode1=:.3f} {t_decode2=:.3f} {t_total=:.3f}")
+ logger.info(f"{getattr(generator, '__qualname__', '')}: end, {t_encode1=:.3f} {t_encode2=:.3f} {t_decode1=:.3f} {t_decode2=:.3f} {t_total=:.3f}")
def main(argv: list[str] | None = None):
}
server_process = subprocess.Popen(
args,
- **pkwargs) # pyright: ignore[reportArgumentType, reportCallIssue]
+ **pkwargs) # pyright: ignore[reportArgumentType, reportCallIssue] # ty: ignore[no-matching-overload]
def server_log(in_stream, out_stream):
for line in iter(in_stream.readline, b''):
from utils import *
from enum import Enum
+from typing import TypedDict
server: ServerProcess
NORMAL = "normal"
STREAMED = "streamed"
-TEST_TOOL = {
- "type":"function",
- "function": {
- "name": "test",
- "description": "",
- "parameters": {
- "type": "object",
- "properties": {
- "success": {"type": "boolean", "const": True},
+class ToolParameters(TypedDict):
+ type: str
+ properties: dict[str, dict]
+ required: list[str]
+
+class ToolFunction(TypedDict):
+ name: str
+ description: str
+ parameters: ToolParameters
+
+class ToolDefinition(TypedDict):
+ type: str
+ function: ToolFunction
+
+TEST_TOOL = ToolDefinition(
+ type = "function",
+ function = ToolFunction(
+ name = "test",
+ description = "",
+ parameters = ToolParameters(
+ type = "object",
+ properties = {
+ "success": {
+ "type": "boolean",
+ "const": True,
+ },
},
- "required": ["success"]
- }
- }
-}
-
-PYTHON_TOOL = {
- "type": "function",
- "function": {
- "name": "python",
- "description": "Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.",
- "parameters": {
- "type": "object",
- "properties": {
+ required = ["success"],
+ ),
+ ),
+)
+
+PYTHON_TOOL = ToolDefinition(
+ type = "function",
+ function = ToolFunction(
+ name = "python",
+ description = "Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.",
+ parameters = ToolParameters(
+ type = "object",
+ properties = {
"code": {
"type": "string",
- "description": "The code to run in the ipython interpreter."
- }
+ "description": "The code to run in the ipython interpreter.",
+ },
+ },
+ required = ["code"],
+ ),
+ ),
+)
+
+WEATHER_TOOL = ToolDefinition(
+ type = "function",
+ function = ToolFunction(
+ name = "get_current_weather",
+ description = "Get the current weather in a given location",
+ parameters = ToolParameters(
+ type = "object",
+ properties = {
+ "location": {
+ "type": "string",
+ "description": "The city and country/state, e.g. 'San Francisco, CA', or 'Paris, France'",
+ },
},
- "required": ["code"]
- }
- }
-}
-
-WEATHER_TOOL = {
- "type":"function",
- "function":{
- "name":"get_current_weather",
- "description":"Get the current weather in a given location",
- "parameters":{
- "type":"object",
- "properties":{
- "location":{
- "type":"string",
- "description":"The city and country/state, e.g. 'San Francisco, CA', or 'Paris, France'"
- }
- },
- "required":["location"]
- }
- }
-}
+ required = ["location"],
+ ),
+ ),
+)
def do_test_completion_with_required_tool_tiny(server: ServerProcess, tool: dict, argument_key: str | None, n_predict, **kwargs):
body = server.make_any_request("POST", "/v1/chat/completions", data={
--- /dev/null
+[environment]
+extra-paths = ["./gguf-py", "./examples/model-conversion/scripts", "./tools/server/tests"]
+python-version = "3.10"
+
+[rules]
+deprecated = "warn"
+
+[src]
+exclude = [
+ "./tools/mtmd/legacy-models/**",
+]
+
+[[overrides]]
+include = [
+ "./tools/server/tests/**",
+]
+
+[overrides.rules]
+unresolved-reference = "ignore"
+unresolved-import = "ignore"
+unresolved-attribute = "ignore"
+
+[[overrides]]
+include = [
+ "./examples/pydantic_models_to_grammar.py",
+]
+
+[overrides.rules]
+unsupported-operator = "ignore"
+not-subscriptable = "ignore"