odoo13源码-004: models_1

# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.


"""
    Object Relational Mapping module:
     * Hierarchical structure
     * Constraints consistency and validation
     * Object metadata depends on its status
     * Optimised processing by complex query (multiple actions at once)
     * Default field values
     * Permissions optimisation
     * Persistent object: DB postgresql
     * Data conversion
     * Multi-level caching system
     * Two different inheritance mechanisms
     * Rich set of field types:
          - classical (varchar, integer, boolean, ...)
          - relational (one2many, many2one, many2many)
          - functional

"""

import collections
import datetime
import dateutil
import fnmatch
import functools
import itertools
import io
import logging
import operator
import pytz
import re
import uuid
from collections import defaultdict, MutableMapping, OrderedDict
from contextlib import closing
from inspect import getmembers, currentframe
from operator import attrgetter, itemgetter

import babel.dates
import dateutil.relativedelta
import psycopg2
from lxml import etree
from lxml.builder import E
from psycopg2.extensions import AsIs

import odoo
from . import SUPERUSER_ID
from . import api
from . import tools
from .exceptions import AccessError, MissingError, ValidationError, UserError
from .osv.query import Query
from .tools import frozendict, lazy_classproperty, lazy_property, ormcache, \
                   Collector, LastOrderedSet, OrderedSet, IterableGenerator, \
                   groupby
from .tools.config import config
from .tools.func import frame_codeinfo
from .tools.misc import CountingStream, clean_context, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT, get_lang
from .tools.safe_eval import safe_eval
from .tools.translate import _
from .tools import date_utils

_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__ + '.schema')
_unlink = logging.getLogger(__name__ + '.unlink')

regex_order = re.compile('^(\s*([a-z0-9:_]+|"[a-z0-9:_]+")(\s+(desc|asc))?\s*(,|$))+(?<!,)$', re.I)
regex_object_name = re.compile(r'^[a-z0-9_.]+$')
regex_pg_name = re.compile(r'^[a-z_][a-z0-9_$]*$', re.I)
regex_field_agg = re.compile(r'(\w+)(?::(\w+)(?:\((\w+)\))?)?')

AUTOINIT_RECALCULATE_STORED_FIELDS = 1000

def check_object_name(name):
    """ Check if the given name is a valid model name.
        检查给出的名称是否是一个有效的模型名称.
        The _name attribute in osv and osv_memory object is subject to
        some restrictions. This function returns True or False whether
        the given name is allowed or not.

        TODO: this is an approximation. The goal in this approximation
        is to disallow uppercase characters (in some places, we quote
        table/column names and in other not, which leads to this kind
        of errors:

            psycopg2.ProgrammingError: relation "xxx" does not exist).

        The same restriction should apply to both osv and osv_memory
        objects for consistency.

    """
    if regex_object_name.match(name) is None:
        return False
    return True

def raise_on_invalid_object_name(name):
    if not check_object_name(name):
        msg = "The _name attribute %s is not valid." % name
        raise ValueError(msg)

def check_pg_name(name):
    """ Check whether the given name is a valid PostgreSQL identifier name. 检查PG名称是否合法"""
    if not regex_pg_name.match(name):
        raise ValidationError("Invalid characters in table name %r" % name)
    # PG表格的名称不能超过63个字符
    if len(name) > 63:
        raise ValidationError("Table name %r is too long" % name)

# match private methods, to prevent their remote invocation #匹配私有方法,以防止其远程调用
regex_private = re.compile(r'^(_.*|init)$')

def check_method_name(name):
    """ Raise an ``AccessError`` if ``name`` is a private method name.
    如果方法名称是一个私有的, 就报错.
    """
    if regex_private.match(name):
        raise AccessError(_('Private methods (such as %s) cannot be called remotely.') % (name,))

def same_name(f, g):
    """ Test whether functions ``f`` and ``g`` are identical or have the same name """
    return f == g or getattr(f, '__name__', 0) == getattr(g, '__name__', 1)

def fix_import_export_id_paths(fieldname):
    """
    Fixes the id fields in import and exports, and splits field paths
    on '/'.修复导入和导出中的 ID 字段,并根据"/"拆分字段路径。

    :param str fieldname: name of the field to import/export
    :return: split field name
    :rtype: list of str
    """
    fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
    fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
    return fixed_external_id.split('/')

def trigger_tree_merge(node1, node2):
    """ Merge two trigger trees. 合并两个触发器树 """
    for key, val in node2.items():
        if key is None:
            node1.setdefault(None, set())
            node1[None].update(val)
        else:
            node1.setdefault(key, {})
            trigger_tree_merge(node1[key], node2[key])


class MetaModel(api.Meta):
    """ The metaclass of all model classes.
        Its main purpose is to register the models per module.
        所有模型类的元类, 其主要目的是注册每个模块的模型。
    """

    module_to_models = defaultdict(list)

    def __init__(self, name, bases, attrs): # 如果没有注册的就注册
        if not self._register:
            self._register = True
            super(MetaModel, self).__init__(name, bases, attrs)
            return

        if not hasattr(self, '_module'):
            self._module = self._get_addon_name(self.__module__)

        # Remember which models to instanciate for this module.
        # 记住要在此模块中集成哪些模型.
        if not self._custom:
            self.module_to_models[self._module].append(self)

        # check for new-api conversion error: leave comma after field definition
        # 检查新 api 转换错误:在字段定义后留下逗号
        for key, val in attrs.items():
            if type(val) is tuple and len(val) == 1 and isinstance(val[0], Field):
                _logger.error("Trailing comma after field definition: %s.%s", self, key)
            if isinstance(val, Field):
                val.args = dict(val.args, _module=self._module)

    def _get_addon_name(self, full_name):
        # The (OpenERP) module name can be in the ``odoo.addons`` namespace
        # or not. For instance, module ``sale`` can be imported as
        # ``odoo.addons.sale`` (the right way) or ``sale`` (for backward
        # compatibility).
        module_parts = full_name.split('.')
        if len(module_parts) > 2 and module_parts[:2] == ['odoo', 'addons']:
            addon_name = full_name.split('.')[2]
        else:
            addon_name = full_name.split('.')[0]
        return addon_name


class NewId(object):
    """ Pseudo-ids for new records, encapsulating an optional origin id (actual
        record id) and an optional reference (any value).
    """
    __slots__ = ['origin', 'ref']

    def __init__(self, origin=None, ref=None):
        self.origin = origin
        self.ref = ref

    def __bool__(self):
        return False

    def __eq__(self, other):
        return isinstance(other, NewId) and (
            (self.origin and other.origin and self.origin == other.origin)
            or (self.ref and other.ref and self.ref == other.ref)
        )

    def __hash__(self):
        return hash(self.origin or self.ref or id(self))

    def __repr__(self):
        return (
            "<NewId origin=%r>" % self.origin if self.origin else
            "<NewId ref=%r>" % self.ref if self.ref else
            "<NewId 0x%x>" % id(self)
        )


def origin_ids(ids):
    """ Return an iterator over the origin ids corresponding to ``ids``.
        Actual ids are returned as is, and ids without origin are not returned.
    """
    return ((id_ or id_.origin) for id_ in ids if (id_ or getattr(id_, "origin", None)))


IdType = (int, str, NewId)


# maximum number of prefetched records, 预提取记录的最大数量
PREFETCH_MAX = 1000

# special columns automatically created by the ORM  #ORM会自动创建的魔法字段.
LOG_ACCESS_COLUMNS = ['create_uid', 'create_date', 'write_uid', 'write_date']
MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS # 再自动加上一个ID

# valid SQL aggregation functions
VALID_AGGREGATE_FUNCTIONS = {
    'array_agg', 'count', 'count_distinct',
    'bool_and', 'bool_or', 'max', 'min', 'avg', 'sum',
}


class BaseModel(MetaModel('DummyModel', (object,), {'_register': False})):
    """Base class for Odoo models.  #Odoo 的基类

    Odoo models are created by inheriting one of the following:
    创建odoo模型的3种继承类型

    *   :class:`Model` for regular database-persisted models, 1),常规模型

    *   :class:`TransientModel` for temporary data, stored in the database but
        automatically vacuumed every so often, 2),瞬态模型

    *   :class:`AbstractModel` for abstract super classes meant to be shared by
        multiple inheriting models, 3),抽象模型

    The system automatically instantiates every model once per database. Those
    instances represent the available models on each database, and depend on
    which modules are installed on that database. The actual class of each
    instance is built from the Python classes that create and inherit from the
    corresponding model.
    系统在每个数据库自动实例化每个模型一次。那些实例来自于每个数据库上的可用模型,并取决于
    该数据库上安装哪些模块。每个的实际类实例是从相应的模型生成。

    Every model instance is a "recordset", i.e., an ordered collection of
    records of the model. Recordsets are returned by methods like
    :meth:`~.browse`, :meth:`~.search`, or field accesses. Records have no
    explicit representation: a record is represented as a recordset of one
    record. 每个模型就是一个数据表

    To create a class that should not be instantiated,创建不应实例化的类
    the :attr:`~odoo.models.BaseModel._register` attribute may be set to False.
    """

    _auto = False  # 是否自动创建数据
    """Whether a database table should be created (default: ``True``).
    If set to ``False``, override :meth:`~odoo.models.BaseModel.init`
    to create the database table.

    .. tip:: To create a model without any table, inherit
            from :class:`~odoo.models.AbstractModel`.
    """
    _register = False           #: not visible in ORM registry 是否ORM中可见, 不是的,Orm不可见.
    _abstract = True            #: whether model is abstract  是否是抽象类, 是的.
    _transient = False          #: whether model is transient 是否参与序列化

    _name = None                #: the model name (in dot-notation, module namespace)  名称 #_rec_name的默认值,在需要用来展示的时候使用
    _description = None         #: the model's informal name    描述
    _custom = False             #: should be True for custom models only  自定义模型才为TRUE.

    _inherit = None
    """Python-inherited models:  Python的继承模块, 带名称和不带名称分别为1)类继承和2)原型继承.
        1),类继承,没有_name或者_name和父类保持一直,是对父类的扩展,数据是添加在当前父类中, 数据库中的反应是同一张表格.
        2),原型继承,复制父类模型,并创建一个新模型,并不改变父类的夫模型,数据库中的反应是创建了一个新表格

    :type: str or list(str)

    .. note::

        * If :attr:`._name` is set, name(s) of parent models to inherit from
        * If :attr:`._name` is unset, name of a single model to extend in-place
    """ 
    _inherits = {} # 3), 代理继承或者委托继承. 通过字典映射继承与字段的关系.优点是不用重复复制父类的数据. 数据库中反应是多一个表格,字段映射到父类表格
    """dictionary {'parent_model': 'm2o_field'} mapping the _name of the parent business
    objects to the names of the corresponding foreign key fields to use::

      _inherits = {
          'a.model': 'a_field_id',
          'b.model': 'b_field_id'
      }

    implements composition-based inheritance: the new model exposes all
    the fields of the inherited models but stores none of them:
    the values themselves remain stored on the linked record.

    .. warning::

      if multiple fields with the same name are defined in the
      :attr:`~odoo.models.Model._inherits`-ed models, the inherited field will
      correspond to the last one (in the inherits list order).
    """
    _table = None               #: SQL table name used by model if :attr:`_auto` #表格名称,在name未指定时
    _sequence = None            #: SQL sequence to use for ID field
    _sql_constraints = []       #: SQL constraints [(name, sql_def, message)]

    _rec_name = None            #: field to use for labeling records, default: ``name`` # 数据显示名称,如设置则返回其指定的字段值
    _order = 'id'               #: default order field for searching results
    _parent_name = 'parent_id'  #: the many2one field used as parent field
    _parent_store = False
    """set to True to compute parent_path field.

    Alongside a :attr:`~.parent_path` field, sets up an indexed storage
    of the tree structure of records, to enable faster hierarchical queries
    on the records of the current model using the ``child_of`` and
    ``parent_of`` domain operators.
    """
    _date_name = 'date'         #: field to use for default calendar view
    _fold_name = 'fold'         #: field to determine folded groups in kanban views

    _needaction = False         # whether the model supports "need actions" (Old API)
    _translate = True           # False disables translations export for this model (Old API)
    _check_company_auto = False
    """On write and create, call ``_check_company`` to ensure companies
    consistency on the relational fields having ``check_company=True``
    as attribute.
    """

    # default values for _transient_vacuum()  #瞬时数据回收的默认值
    _transient_check_count = 0
    _transient_max_count = lazy_classproperty(lambda _: config.get('osv_memory_count_limit')) #odoo.conf的默认配置
    _transient_max_hours = lazy_classproperty(lambda _: config.get('osv_memory_age_limit')) #odoo.conf的默认配置

    CONCURRENCY_CHECK_FIELD = '__last_update'

    @api.model
    def view_init(self, fields_list):
        """ Override this method to do specific things when a form view is
        opened. This method is invoked by :meth:`~default_get`.
        重写此方法可以在一个"窗体视图"为打开时以做一些比较特殊的事情。此方法由 :meth:'_default_get'调用。
        """
        pass

    def _reflect(self):
        """ Reflect the model and its fields in the models 'ir.model' and
        'ir.model.fields'. Also create entries in 'ir.model.data' if the key
        'module' is passed to the context.
        在模型"ir.model"和'ir.model.fields'中映射模型及其字段。
        也可以在"ir.model.data"中创建条目, 如果"module"关键字由上下文传递过来
        """
        self.env['ir.model']._reflect_model(self)
        self.env['ir.model.fields']._reflect_model(self)
        self.env['ir.model.fields.selection']._reflect_model(self)
        self.env['ir.model.constraint']._reflect_model(self)
        self.invalidate_cache()

    @api.model
    def _add_field(self, name, field):
        """ Add the given ``field`` under the given ``name`` in the class """
        cls = type(self)
        # add field as an attribute and in cls._fields (for reflection)
        if not isinstance(getattr(cls, name, field), Field):
            _logger.warning("In model %r, field %r overriding existing value", cls._name, name)
        setattr(cls, name, field)
        cls._fields[name] = field

        # basic setup of field
        field.setup_base(self, name)

    @api.model
    def _pop_field(self, name):
        """ Remove the field with the given ``name`` from the model.
            This method should only be used for manual fields.
        """
        cls = type(self)
        field = cls._fields.pop(name, None)
        if hasattr(cls, name):
            delattr(cls, name)
        return field

    @api.model
    def _add_magic_fields(self):    #给类增加魔法字段
        """ Introduce magic fields on the current class

        * id is a "normal" field (with a specific getter)
        * create_uid, create_date, write_uid and write_date have become
          "normal" fields
        * $CONCURRENCY_CHECK_FIELD is a computed field with its computing
          method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
          to get the same structure as the previous
          ``(now() at time zone 'UTC')::timestamp``::

              # select (now() at time zone 'UTC')::timestamp;
                        timezone
              ----------------------------
               2013-06-18 08:30:37.292809

              >>> str(datetime.datetime.utcnow())
              '2013-06-18 08:31:32.821177'
        """
        def add(name, field):
            """ add ``field`` with the given ``name`` if it does not exist yet """
            if name not in self._fields:
                self._add_field(name, field)

        # cyclic import
        from . import fields

        # this field 'id' must override any other column or field
        self._add_field('id', fields.Id(automatic=True))

        add('display_name', fields.Char(string='Display Name', automatic=True,
            compute='_compute_display_name'))

        #  模型中真正增加魔法字段的地方, 判断_log_access条件是否为真
        # 如果不需要系统自动增加这些魔法字段, 则需要在自定义模型中设置 _log_access=False
        if self._log_access:
            add('create_uid', fields.Many2one(
                'res.users', string='Created by', automatic=True, readonly=True))
            add('create_date', fields.Datetime(
                string='Created on', automatic=True, readonly=True))
            add('write_uid', fields.Many2one(
                'res.users', string='Last Updated by', automatic=True, readonly=True))
            add('write_date', fields.Datetime(
                string='Last Updated on', automatic=True, readonly=True))
            last_modified_name = 'compute_concurrency_field_with_access'
        else:
            last_modified_name = 'compute_concurrency_field'

        # this field must override any other column or field 此字段必须被其他列或者字段覆盖
        self._add_field(self.CONCURRENCY_CHECK_FIELD, fields.Datetime(
            string='Last Modified on', compute=last_modified_name,
            compute_sudo=False, automatic=True))

    def compute_concurrency_field(self):
        for record in self:
            record[self.CONCURRENCY_CHECK_FIELD] = odoo.fields.Datetime.now()

    @api.depends('create_date', 'write_date')
    def compute_concurrency_field_with_access(self):
        for record in self:
            record[self.CONCURRENCY_CHECK_FIELD] = \
                record.write_date or record.create_date or odoo.fields.Datetime.now()

    #
    # Goal: try to apply inheritance at the instantiation level and
    #       put objects in the pool var
    #   尝试在实例化级别应用继承并把对象放到对象池中
    @classmethod
    def _build_model(cls, pool, cr):
        """ Instantiate a given model in the registry. 在注册表中实例化给定模型

        This method creates or extends a "registry" class for the given model.
        This "registry" class carries inferred model metadata, and inherits (in
        the Python sense) from all classes that define the model, and possibly
        other registry classes.

        """

        # In the simplest case, the model's registry class inherits from cls and
        # the other classes that define the model in a flat hierarchy. The
        # registry contains the instance ``model`` (on the left). Its class,
        # ``ModelClass``, carries inferred metadata that is shared between all
        # the model's instances for this registry only.
        #
        #   class A1(Model):                          Model
        #       _name = 'a'                           / | \
        #                                            A3 A2 A1
        #   class A2(Model):                          \ | /
        #       _inherit = 'a'                      ModelClass
        #                                             /   \
        #   class A3(Model):                      model   recordset
        #       _inherit = 'a'
        #
        # When a model is extended by '_inherit', its base classes are modified
        # to include the current class and the other inherited model classes.
        # Note that we actually inherit from other ``ModelClass``, so that
        # extensions to an inherited model are immediately visible in the
        # current model class, like in the following example:
        #
        #   class A1(Model):
        #       _name = 'a'                           Model
        #                                            / / \ \
        #   class B1(Model):                        / A2 A1 \
        #       _name = 'b'                        /   \ /   \
        #                                         B2  ModelA  B1
        #   class B2(Model):                       \    |    /
        #       _name = 'b'                         \   |   /
        #       _inherit = ['a', 'b']                \  |  /
        #                                             ModelB
        #   class A2(Model):
        #       _inherit = 'a'

        # 如果使用_constraints给日志里面记录警告信息, 提醒使用@api.constrains.
        if getattr(cls, '_constraints', None):
            _logger.warning("Model attribute '_constraints' is no longer supported, "
                            "please use @api.constrains on methods instead.")

        # Keep links to non-inherited constraints in cls; 在cls中保持指向非继承约束的链接
        # this is useful for instance when exporting translations. 这很有用,例如导出翻译时.
        cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])

        # determine inherited models 确定继承的模型
        parents = cls._inherit
        parents = [parents] if isinstance(parents, str) else (parents or [])

        # determine the model's name
        name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__

        # all models except 'base' implicitly inherit from 'base'
        # 除"base"外的所有模型均从"base"隐式继承.
        if name != 'base':
            parents = list(parents) + ['base']

        # create or retrieve the model's class  创建或者检索模型的类
        if name in parents:
            if name not in pool:
                raise TypeError("Model %r does not exist in registry." % name)
            ModelClass = pool[name]
            ModelClass._build_model_check_base(cls)
            check_parent = ModelClass._build_model_check_parent
        else:
            ModelClass = type(name, (BaseModel,), {
                '_name': name,
                '_register': False,
                '_original_module': cls._module,
                '_inherit_children': OrderedSet(),      # names of children models
                '_inherits_children': set(),            # names of children models
                '_fields': OrderedDict(),               # populated in _setup_base()
            })
            check_parent = cls._build_model_check_parent

        # determine all the classes the model should inherit from  确定模型应继承的所有类
        bases = LastOrderedSet([cls])
        for parent in parents:
            if parent not in pool:
                raise TypeError("Model %r inherits from non-existing model %r." % (name, parent))
            parent_class = pool[parent]
            if parent == name:
                for base in parent_class.__bases__:
                    bases.add(base)
            else:
                check_parent(cls, parent_class)
                bases.add(parent_class)
                parent_class._inherit_children.add(name)
        ModelClass.__bases__ = tuple(bases)

        # determine the attributes of the model's class  确定模型类的属性, 以实施其访问权限策略
        ModelClass._build_model_attributes(pool)

        check_pg_name(ModelClass._table)

        # Transience 瞬态 瞬态模型必须是log_access魔法字段打开.
        if ModelClass._transient:
            assert ModelClass._log_access, \
                "TransientModels must have log_access turned on, " \
                "in order to implement their access rights policy"

        # link the class to the registry, and update the registry
        # 将类链接到注册表,并更新注册表
        ModelClass.pool = pool
        pool[name] = ModelClass

        # backward compatibility: instantiate the model, and initialize it
        # 向后兼容性:实例化模型,并初始化它
        model = object.__new__(ModelClass)
        model.__init__(pool, cr)

        return ModelClass

    @classmethod
    def _build_model_check_base(model_class, cls):
        """ Check whether ``model_class`` can be extended with ``cls``. """
        if model_class._abstract and not cls._abstract:
            msg = ("%s transforms the abstract model %r into a non-abstract model. "
                   "That class should either inherit from AbstractModel, or set a different '_name'.")
            raise TypeError(msg % (cls, model_class._name))
        if model_class._transient != cls._transient:
            if model_class._transient:
                msg = ("%s transforms the transient model %r into a non-transient model. "
                       "That class should either inherit from TransientModel, or set a different '_name'.")
            else:
                msg = ("%s transforms the model %r into a transient model. "
                       "That class should either inherit from Model, or set a different '_name'.")
            raise TypeError(msg % (cls, model_class._name))

    @classmethod
    def _build_model_check_parent(model_class, cls, parent_class):
        """ Check whether ``model_class`` can inherit from ``parent_class``. """
        if model_class._abstract and not parent_class._abstract:
            msg = ("In %s, the abstract model %r cannot inherit from the non-abstract model %r.")
            raise TypeError(msg % (cls, model_class._name, parent_class._name))

    @classmethod
    def _build_model_attributes(cls, pool):
        """ Initialize base model attributes. 初始化base模块的属性"""
        cls._description = cls._name
        cls._table = cls._name.replace('.', '_')
        cls._sequence = None
        cls._log_access = cls._auto
        cls._inherits = {}
        cls._sql_constraints = {}

        for base in reversed(cls.__bases__):
            if not getattr(base, 'pool', None):
                # the following attributes are not taken from model classes
                parents = [base._inherit] if base._inherit and isinstance(base._inherit, str) else (base._inherit or [])
                if cls._name not in parents and not base._description:
                    _logger.warning("The model %s has no _description", cls._name)
                cls._description = base._description or cls._description
                cls._table = base._table or cls._table
                cls._sequence = base._sequence or cls._sequence
                cls._log_access = getattr(base, '_log_access', cls._log_access)

            cls._inherits.update(base._inherits)

            for cons in base._sql_constraints:
                cls._sql_constraints[cons[0]] = cons

        cls._sequence = cls._sequence or (cls._table + '_id_seq')
        cls._sql_constraints = list(cls._sql_constraints.values())

        # update _inherits_children of parent models
        for parent_name in cls._inherits:
            pool[parent_name]._inherits_children.add(cls._name)

        # recompute attributes of _inherit_children models
        for child_name in cls._inherit_children:
            child_class = pool[child_name]
            child_class._build_model_attributes(pool)

    @classmethod
    def _init_constraints_onchanges(cls):
        # store list of sql constraint qualified names
        for (key, _, _) in cls._sql_constraints:
            cls.pool._sql_constraints.add(cls._table + '_' + key)

        # reset properties memoized on cls
        cls._constraint_methods = BaseModel._constraint_methods
        cls._onchange_methods = BaseModel._onchange_methods

    @property
    def _constraint_methods(self):
        """ Return a list of methods implementing Python constraints. """
        def is_constraint(func):
            return callable(func) and hasattr(func, '_constrains')

        cls = type(self)
        methods = []
        for attr, func in getmembers(cls, is_constraint):
            for name in func._constrains:
                field = cls._fields.get(name)
                if not field:
                    _logger.warning("method %s.%s: @constrains parameter %r is not a field name", cls._name, attr, name)
                elif not (field.store or field.inverse or field.inherited):
                    _logger.warning("method %s.%s: @constrains parameter %r is not writeable", cls._name, attr, name)
            methods.append(func)

        # optimization: memoize result on cls, it will not be recomputed
        cls._constraint_methods = methods
        return methods

    @property
    def _onchange_methods(self):
        """ Return a dictionary mapping field names to onchange methods.
        将字典映射字段名称返回到更改方法
        """
        def is_onchange(func):
            return callable(func) and hasattr(func, '_onchange')

        # collect onchange methods on the model's class
        cls = type(self)
        methods = defaultdict(list)
        for attr, func in getmembers(cls, is_onchange):
            for name in func._onchange:
                if name not in cls._fields:
                    _logger.warning("@onchange%r parameters must be field names", func._onchange)
                methods[name].append(func)

        # add onchange methods to implement "change_default" on fields
        # 添加更改方法,在字段上实现"change_default"
        def onchange_default(field, self):
            value = field.convert_to_write(self[field.name], self)
            condition = "%s=%s" % (field.name, value)
            defaults = self.env['ir.default'].get_model_defaults(self._name, condition)
            self.update(defaults)

        for name, field in cls._fields.items():
            if field.change_default:
                methods[name].append(functools.partial(onchange_default, field))

        # optimization: memoize result on cls, it will not be recomputed
        # 优化:在 cls 上记住结果,不会重新计算
        cls._onchange_methods = methods
        return methods

    def __new__(cls):
        # In the past, this method was registering the model class in the server.
        # This job is now done entirely by the metaclass MetaModel.
        # 这个是旧版的功能, 现在被新版的metaclass的代替.
        return None

    def __init__(self, pool, cr):
        """ Deprecated method to initialize the model.
        废弃的方法
        """
        pass

    @api.model
    @ormcache()
    # 检查表格是否是一个普通的表格
    def _is_an_ordinary_table(self):
        return tools.table_kind(self.env.cr, self._table) == 'r'

    def __ensure_xml_id(self, skip=False):
        """ Create missing external ids for records in ``self``, and return an
            iterator of pairs ``(record, xmlid)`` for the records in ``self``.

        :rtype: Iterable[Model, str | None]
        """
        if skip:
            return ((record, None) for record in self)

        if not self:
            return iter([])

        if not self._is_an_ordinary_table():
            raise Exception(
                "You can not export the column ID of model %s, because the "
                "table %s is not an ordinary table."
                % (self._name, self._table))

        modname = '__export__'

        cr = self.env.cr
        cr.execute("""
            SELECT res_id, module, name
            FROM ir_model_data
            WHERE model = %s AND res_id in %s
        """, (self._name, tuple(self.ids)))
        xids = {
            res_id: (module, name)
            for res_id, module, name in cr.fetchall()
        }
        def to_xid(record_id):
            (module, name) = xids[record_id]
            return ('%s.%s' % (module, name)) if module else name

        # create missing xml ids  创建缺少的 xml ids
        missing = self.filtered(lambda r: r.id not in xids)
        if not missing:
            return (
                (record, to_xid(record.id))
                for record in self
            )

        xids.update(
            (r.id, (modname, '%s_%s_%s' % (
                r._table,
                r.id,
                uuid.uuid4().hex[:8],
            )))
            for r in missing
        )
        fields = ['module', 'model', 'name', 'res_id']
        cr.copy_from(io.StringIO(
            u'\n'.join(
                u"%s\t%s\t%s\t%d" % (
                    modname,
                    record._name,
                    xids[record.id][1],
                    record.id,
                )
                for record in missing
            )),
            table='ir_model_data',
            columns=fields,
        )
        self.env['ir.model.data'].invalidate_cache(fnames=fields)

        return (
            (record, to_xid(record.id))
            for record in self
        )

    def _export_rows(self, fields, *, _is_toplevel_call=True):
        """ Export fields of the records in ``self``.

            :param fields: list of lists of fields to traverse
            :param bool _is_toplevel_call:
                used when recursing, avoid using when calling from outside
            :return: list of lists of corresponding values
        """
        import_compatible = self.env.context.get('import_compat', True)
        lines = []

        def splittor(rs):
            """ Splits the self recordset in batches of 1000 (to avoid
            entire-recordset-prefetch-effects) & removes the previous batch
            from the cache after it's been iterated in full
            """
            for idx in range(0, len(rs), 1000):
                sub = rs[idx:idx+1000]
                for rec in sub:
                    yield rec
                rs.invalidate_cache(ids=sub.ids)
        if not _is_toplevel_call:
            splittor = lambda rs: rs

        # memory stable but ends up prefetching 275 fields (???)
        for record in splittor(self):
            # main line of record, initially empty
            current = [''] * len(fields)
            lines.append(current)

            # list of primary fields followed by secondary field(s)
            primary_done = []

            # process column by column
            for i, path in enumerate(fields):
                if not path:
                    continue

                name = path[0]
                if name in primary_done:
                    continue

                if name == '.id':
                    current[i] = str(record.id)
                elif name == 'id':
                    current[i] = (record._name, record.id)
                else:
                    field = record._fields[name]
                    value = record[name]

                    # this part could be simpler, but it has to be done this way
                    # in order to reproduce the former behavior
                    if not isinstance(value, BaseModel):
                        current[i] = field.convert_to_export(value, record)
                    else:
                        primary_done.append(name)

                        # in import_compat mode, m2m should always be exported as
                        # a comma-separated list of xids in a single cell
                        if import_compatible and field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
                            xml_ids = [xid for _, xid in value.__ensure_xml_id()]
                            current[i] = ','.join(xml_ids) or False
                            continue

                        # recursively export the fields that follow name; use
                        # 'display_name' where no subfield is exported
                        fields2 = [(p[1:] or ['display_name'] if p and p[0] == name else [])
                                   for p in fields]
                        lines2 = value._export_rows(fields2, _is_toplevel_call=False)
                        if lines2:
                            # merge first line with record's main line
                            for j, val in enumerate(lines2[0]):
                                if val or isinstance(val, bool):
                                    current[j] = val
                            # append the other lines at the end
                            lines += lines2[1:]
                        else:
                            current[i] = False

        # if any xid should be exported, only do so at toplevel
        if _is_toplevel_call and any(f[-1] == 'id' for f in fields):
            bymodels = collections.defaultdict(set)
            xidmap = collections.defaultdict(list)
            # collect all the tuples in "lines" (along with their coordinates)
            for i, line in enumerate(lines):
                for j, cell in enumerate(line):
                    if type(cell) is tuple:
                        bymodels[cell[0]].add(cell[1])
                        xidmap[cell].append((i, j))
            # for each model, xid-export everything and inject in matrix
            for model, ids in bymodels.items():
                for record, xid in self.env[model].browse(ids).__ensure_xml_id():
                    for i, j in xidmap.pop((record._name, record.id)):
                        lines[i][j] = xid
            assert not xidmap, "failed to export xids for %s" % ', '.join('{}:{}' % it for it in xidmap.items())

        return lines

    # backward compatibility 向后兼容性
    __export_rows = _export_rows

    def export_data(self, fields_to_export):
        """ Export fields for selected objects

            :param fields_to_export: list of fields
            :param raw_data: True to return value in native Python type
            :rtype: dictionary with a *datas* matrix

            This method is used when exporting data via client menu
        """
        fields_to_export = [fix_import_export_id_paths(f) for f in fields_to_export]
        return {'datas': self._export_rows(fields_to_export)}

    @api.model
    def load(self, fields, data):
        """
        Attempts to load the data matrix, and returns a list of ids (or
        ``False`` if there was an error and no id could be generated) and a
        list of messages.

        The ids are those of the records created and saved (in database), in
        the same order they were extracted from the file. They can be passed
        directly to :meth:`~read`

        :param fields: list of fields to import, at the same index as the corresponding data
        :type fields: list(str)
        :param data: row-major matrix of data to import
        :type data: list(list(str))
        :returns: {ids: list(int)|False, messages: [Message][, lastrow: int]}
        """
        self.flush()

        # determine values of mode, current_module and noupdate
        mode = self._context.get('mode', 'init')
        current_module = self._context.get('module', '__import__')
        noupdate = self._context.get('noupdate', False)
        # add current module in context for the conversion of xml ids
        self = self.with_context(_import_current_module=current_module)

        cr = self._cr
        cr.execute('SAVEPOINT model_load')

        fields = [fix_import_export_id_paths(f) for f in fields]
        fg = self.fields_get()

        ids = []
        messages = []
        ModelData = self.env['ir.model.data']

        # list of (xid, vals, info) for records to be created in batch
        batch = []
        batch_xml_ids = set()

        def flush(xml_id=None):
            if not batch:
                return
            if xml_id and xml_id not in batch_xml_ids:
                return

            data_list = [
                dict(xml_id=xid, values=vals, info=info, noupdate=noupdate)
                for xid, vals, info in batch
            ]
            batch.clear()
            batch_xml_ids.clear()

            try:
                cr.execute('SAVEPOINT model_load_save')
            except psycopg2.InternalError as e:
                # broken transaction, exit and hope the source error was
                # already logged
                if not any(message['type'] == 'error' for message in messages):
                    info = data_list[0]['info']
                    messages.append(dict(info, type='error', message=u"Unknown database error: '%s'" % e))
                return

            # try to create in batch
            try:
                with cr.savepoint():
                    recs = self._load_records(data_list, mode == 'update')
                    ids.extend(recs.ids)
                return
            except Exception:
                pass

            # try again, this time record by record
            for rec_data in data_list:
                try:
                    with cr.savepoint():
                        rec = self._load_records([rec_data], mode == 'update')
                        ids.append(rec.id)
                except psycopg2.Warning as e:
                    info = rec_data['info']
                    messages.append(dict(info, type='warning', message=str(e)))
                except psycopg2.Error as e:
                    info = rec_data['info']
                    messages.append(dict(info, type='error', **PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
                    # Failed to write, log to messages, rollback savepoint (to
                    # avoid broken transaction) and keep going
                except Exception as e:
                    _logger.exception("Error while loading record")
                    info = rec_data['info']
                    message = (_(u'Unknown error during import:') + u' %s: %s' % (type(e), e))
                    moreinfo = _('Resolve other errors first')
                    messages.append(dict(info, type='error', message=message, moreinfo=moreinfo))
                    # Failed for some reason, perhaps due to invalid data supplied,
                    # rollback savepoint and keep going

        # make 'flush' available to the methods below, in the case where XMLID
        # resolution fails, for instance
        flush_self = self.with_context(import_flush=flush)

        # TODO: break load's API instead of smuggling via context?
        limit = self._context.get('_import_limit')
        if limit is None:
            limit = float('inf')
        extracted = flush_self._extract_records(fields, data, log=messages.append, limit=limit)

        converted = flush_self._convert_records(extracted, log=messages.append)

        info = {'rows': {'to': -1}}
        for id, xid, record, info in converted:
            if xid:
                xid = xid if '.' in xid else "%s.%s" % (current_module, xid)
                batch_xml_ids.update(ModelData._generate_xmlids(xid, self))
            elif id:
                record['id'] = id
            batch.append((xid, record, info))

        flush()
        if any(message['type'] == 'error' for message in messages):
            cr.execute('ROLLBACK TO SAVEPOINT model_load')
            ids = False
            # cancel all changes done to the registry/ormcache
            # 取消对orm所做的的任何更改
            self.pool.reset_changes()

        nextrow = info['rows']['to'] + 1
        if nextrow < limit:
            nextrow = 0
        return {
            'ids': ids,
            'messages': messages,
            'nextrow': nextrow,
        }

    def _add_fake_fields(self, fields):
        from odoo.fields import Char, Integer
        fields[None] = Char('rec_name')
        fields['id'] = Char('External ID')
        fields['.id'] = Integer('Database ID')
        return fields

    def _extract_records(self, fields_, data, log=lambda a: None, limit=float('inf')):
        """ Generates record dicts from the data sequence.

        The result is a generator of dicts mapping field names to raw
        (unconverted, unvalidated) values.

        For relational fields, if sub-fields were provided the value will be
        a list of sub-records

        The following sub-fields may be set on the record (by key):
        * None is the name_get for the record (to use with name_create/name_search)
        * "id" is the External ID for the record
        * ".id" is the Database ID for the record
        """
        fields = dict(self._fields)
        # Fake fields to avoid special cases in extractor
        fields = self._add_fake_fields(fields)
        # m2o fields can't be on multiple lines so exclude them from the
        # is_relational field rows filter, but special-case it later on to
        # be handled with relational fields (as it can have subfields)
        is_relational = lambda field: fields[field].relational
        get_o2m_values = itemgetter_tuple([
            index
            for index, fnames in enumerate(fields_)
            if fields[fnames[0]].type == 'one2many'
        ])
        get_nono2m_values = itemgetter_tuple([
            index
            for index, fnames in enumerate(fields_)
            if fields[fnames[0]].type != 'one2many'
        ])
        # Checks if the provided row has any non-empty one2many fields
        def only_o2m_values(row):
            return any(get_o2m_values(row)) and not any(get_nono2m_values(row))

        index = 0
        while index < len(data) and index < limit:
            row = data[index]

            # copy non-relational fields to record dict
            record = {fnames[0]: value
                      for fnames, value in zip(fields_, row)
                      if not is_relational(fnames[0])}

            # Get all following rows which have relational values attached to
            # the current record (no non-relational values)
            record_span = itertools.takewhile(
                only_o2m_values, itertools.islice(data, index + 1, None))
            # stitch record row back on for relational fields
            record_span = list(itertools.chain([row], record_span))
            for relfield in set(fnames[0] for fnames in fields_ if is_relational(fnames[0])):
                comodel = self.env[fields[relfield].comodel_name]

                # get only cells for this sub-field, should be strictly
                # non-empty, field path [None] is for name_get field
                indices, subfields = zip(*((index, fnames[1:] or [None])
                                           for index, fnames in enumerate(fields_)
                                           if fnames[0] == relfield))

                # return all rows which have at least one value for the
                # subfields of relfield
                relfield_data = [it for it in map(itemgetter_tuple(indices), record_span) if any(it)]
                record[relfield] = [
                    subrecord
                    for subrecord, _subinfo in comodel._extract_records(subfields, relfield_data, log=log)
                ]

            yield record, {'rows': {
                'from': index,
                'to': index + len(record_span) - 1,
            }}
            index += len(record_span)

    @api.model
    def _convert_records(self, records, log=lambda a: None):
        """ Converts records from the source iterable (recursive dicts of
        strings) into forms which can be written to the database (via
        self.create or (ir.model.data)._update)

        :returns: a list of triplets of (id, xid, record)
        :rtype: list((int|None, str|None, dict))
        """
        field_names = {name: field.string for name, field in self._fields.items()}
        if self.env.lang:
            field_names.update(self.env['ir.translation'].get_field_string(self._name))

        convert = self.env['ir.fields.converter'].for_model(self)

        def _log(base, record, field, exception):
            type = 'warning' if isinstance(exception, Warning) else 'error'
            # logs the logical (not human-readable) field name for automated
            # processing of response, but injects human readable in message
            exc_vals = dict(base, record=record, field=field_names[field])
            record = dict(base, type=type, record=record, field=field,
                          message=str(exception.args[0]) % exc_vals)
            if len(exception.args) > 1 and exception.args[1]:
                record.update(exception.args[1])
            log(record)

        stream = CountingStream(records)
        for record, extras in stream:
            # xid
            xid = record.get('id', False)
            # dbid
            dbid = False
            if '.id' in record:
                try:
                    dbid = int(record['.id'])
                except ValueError:
                    # in case of overridden id column
                    dbid = record['.id']
                if not self.search([('id', '=', dbid)]):
                    log(dict(extras,
                        type='error',
                        record=stream.index,
                        field='.id',
                        message=_(u"Unknown database identifier '%s'") % dbid))
                    dbid = False

            converted = convert(record, functools.partial(_log, extras, stream.index))

            yield dbid, xid, converted, dict(extras, record=stream.index)

    def _validate_fields(self, field_names):
        field_names = set(field_names)
        for check in self._constraint_methods:
            if not field_names.isdisjoint(check._constrains):
                check(self)

    @api.model
    def default_get(self, fields_list):
        """ default_get(fields) -> default_values

        Return default values for the fields in ``fields_list``. Default
        values are determined by the context, user defaults, and the model
        itself.
        返回"fields_list"中字段的默认值。默认
        值由上下文、用户默认值和模型本身决定.

        :param fields_list: a list of field names
        :return: a dictionary mapping each field name to its corresponding
            default value, if it has one.

        """
        # trigger view init hook
        self.view_init(fields_list)

        defaults = {}
        parent_fields = defaultdict(list)
        ir_defaults = self.env['ir.default'].get_model_defaults(self._name)

        for name in fields_list:
            # 1. look up context
            key = 'default_' + name
            if key in self._context:
                defaults[name] = self._context[key]
                continue

            # 2. look up ir.default
            if name in ir_defaults:
                defaults[name] = ir_defaults[name]
                continue

            field = self._fields.get(name)

            # 3. look up field.default
            if field and field.default:
                defaults[name] = field.default(self)
                continue

            # 4. delegate to parent model
            if field and field.inherited:
                field = field.related_field
                parent_fields[field.model_name].append(field.name)

        # convert default values to the right format 将默认值转换为正确的格式
        #
        # we explicitly avoid using _convert_to_write() for x2many fields,
        # because the latter leaves values like [(4, 2), (4, 3)], which are not
        # supported by the web client as default values; stepping through the
        # cache allows to normalize such a list to [(6, 0, [2, 3])], which is
        # properly supported by the web client
        for fname, value in defaults.items():
            if fname in self._fields:
                field = self._fields[fname]
                value = field.convert_to_cache(value, self, validate=False)
                defaults[fname] = field.convert_to_write(value, self)

        # add default values for inherited fields
        for model, names in parent_fields.items():
            defaults.update(self.env[model].default_get(names))

        return defaults

    @api.model
    def fields_get_keys(self):
        return list(self._fields)

    @api.model
    def _rec_name_fallback(self):
        # if self._rec_name is set, it belongs to self._fields
        return self._rec_name or 'id'

    #
    # Override this method if you need a window title that depends on the context
    # 如果需要依赖于上下文的窗口标题,则重写此方法.
    @api.model
    def view_header_get(self, view_id=None, view_type='form'):
        return False

    @api.model
    def user_has_groups(self, groups):
        """Return true if the user is member of at least one of the groups in
        ``groups``, and is not a member of any of the groups in ``groups``
        preceded by ``!``. Typically used to resolve ``groups`` attribute in
        view and model definitions.

        :param str groups: comma-separated list of fully-qualified group
            external IDs, e.g., ``base.group_user,base.group_system``,
            optionally preceded by ``!``
        :return: True if the current user is a member of one of the given groups
            not preceded by ``!`` and is not member of any of the groups
            preceded by ``!``
            系统会根据user是否有组来判断用户是否是内部用户
        """
        from odoo.http import request
        user = self.env.user

        has_groups = []
        not_has_groups = []
        for group_ext_id in groups.split(','):
            group_ext_id = group_ext_id.strip()
            if group_ext_id[0] == '!':
                not_has_groups.append(group_ext_id[1:])
            else:
                has_groups.append(group_ext_id)

        for group_ext_id in not_has_groups:
            if group_ext_id == 'base.group_no_one':
                # check: the group_no_one is effective in debug mode only
                if user.has_group(group_ext_id) and request and request.session.debug:
                    return False
            else:
                if user.has_group(group_ext_id):
                    return False

        for group_ext_id in has_groups:
            if group_ext_id == 'base.group_no_one':
                # check: the group_no_one is effective in debug mode only
                if user.has_group(group_ext_id) and request and request.session.debug:
                    return True
            else:
                if user.has_group(group_ext_id):
                    return True

        return not has_groups

    @api.model
    def _get_default_form_view(self):   # 窗体视图
        """ Generates a default single-line form view using all fields
        of the current model. 使用所有字段生成默认当前模型的单行窗体视图

        :returns: a form view as an lxml document
        :rtype: etree._Element
        """
        group = E.group(col="4")
        for fname, field in self._fields.items():
            if field.automatic:
                continue
            elif field.type in ('one2many', 'many2many', 'text', 'html'):
                group.append(E.newline())
                group.append(E.field(name=fname, colspan="4"))
                group.append(E.newline())
            else:
                group.append(E.field(name=fname))
        group.append(E.separator())
        return E.form(E.sheet(group, string=self._description))

    @api.model
    def _get_default_search_view(self):  # 搜索视图
        """ Generates a single-field search view, based on _rec_name.

        :returns: a tree view as an lxml document
        :rtype: etree._Element
        """
        element = E.field(name=self._rec_name_fallback())
        return E.search(element, string=self._description)

    @api.model
    def _get_default_tree_view(self):  # 列表视图
        """ Generates a single-field tree view, based on _rec_name.

        :returns: a tree view as an lxml document
        :rtype: etree._Element
        """
        element = E.field(name=self._rec_name_fallback())
        return E.tree(element, string=self._description)

    @api.model
    def _get_default_pivot_view(self):   # 透视图
        """ Generates an empty pivot view.

        :returns: a pivot view as an lxml document
        :rtype: etree._Element
        """
        return E.pivot(string=self._description)

    @api.model
    def _get_default_kanban_view(self): # 看板视图
        """ Generates a single-field kanban view, based on _rec_name.

        :returns: a kanban view as an lxml document
        :rtype: etree._Element
        """

        field = E.field(name=self._rec_name_fallback())
        content_div = E.div(field, {'class': "o_kanban_card_content"})
        card_div = E.div(content_div, {'t-attf-class': "oe_kanban_card oe_kanban_global_click"})
        kanban_box = E.t(card_div, {'t-name': "kanban-box"})
        templates = E.templates(kanban_box)
        return E.kanban(templates, string=self._description)

    @api.model
    def _get_default_graph_view(self): # 图标视图
        """ Generates a single-field graph view, based on _rec_name.

        :returns: a graph view as an lxml document
        :rtype: etree._Element
        """
        element = E.field(name=self._rec_name_fallback())
        return E.graph(element, string=self._description)

    @api.model
    def _get_default_calendar_view(self): # 日历视图
        """ Generates a default calendar view by trying to infer
        calendar fields from a number of pre-set attribute names

        :returns: a calendar view
        :rtype: etree._Element
        """
        def set_first_of(seq, in_, to):
            """Sets the first value of ``seq`` also found in ``in_`` to
            the ``to`` attribute of the ``view`` being closed over.

            Returns whether it's found a suitable value (and set it on
            the attribute) or not
            """
            for item in seq:
                if item in in_:
                    view.set(to, item)
                    return True
            return False

        view = E.calendar(string=self._description)
        view.append(E.field(name=self._rec_name_fallback()))

        if self._date_name not in self._fields:
            date_found = False
            for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
                if dt in self._fields:
                    self._date_name = dt
                    break
            else:
                raise UserError(_("Insufficient fields for Calendar View!"))
        view.set('date_start', self._date_name)

        set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
                     self._fields, 'color')

        if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
                            self._fields, 'date_stop'):
            if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
                                self._fields, 'date_delay'):
                raise UserError(_("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay") % self._name)

        return view

    @api.model
    def load_views(self, views, options=None):  # 加载视图
        """ Returns the fields_views of given views, along with the fields of
            the current model, and optionally its filters for the given action.

        :param views: list of [view_id, view_type]
        :param options['toolbar']: True to include contextual actions when loading fields_views
        :param options['load_filters']: True to return the model's filters
        :param options['action_id']: id of the action to get the filters
        :return: dictionary with fields_views, fields and optionally filters
        """
        options = options or {}
        result = {}

        toolbar = options.get('toolbar')
        result['fields_views'] = {
            v_type: self.fields_view_get(v_id, v_type if v_type != 'list' else 'tree',
                                         toolbar=toolbar if v_type != 'search' else False)
            for [v_id, v_type] in views
        }
        result['fields'] = self.fields_get()

        if options.get('load_filters'):
            result['filters'] = self.env['ir.filters'].get_filters(self._name, options.get('action_id'))


        return result

    @api.model
    def _fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
        View = self.env['ir.ui.view']
        result = {
            'model': self._name,
            'field_parent': False,
        }

        # try to find a view_id if none provided
        if not view_id:
            # <view_type>_view_ref in context can be used to overrride the default view
            view_ref_key = view_type + '_view_ref'
            view_ref = self._context.get(view_ref_key)
            if view_ref:
                if '.' in view_ref:
                    module, view_ref = view_ref.split('.', 1)
                    query = "SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s"
                    self._cr.execute(query, (module, view_ref))
                    view_ref_res = self._cr.fetchone()
                    if view_ref_res:
                        view_id = view_ref_res[0]
                else:
                    _logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
                        'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
                        self._name)

            if not view_id:
                # otherwise try to find the lowest priority matching ir.ui.view
                view_id = View.default_view(self._name, view_type)

        if view_id:
            # read the view with inherited views applied
            root_view = View.browse(view_id).read_combined(['id', 'name', 'field_parent', 'type', 'model', 'arch'])
            result['arch'] = root_view['arch']
            result['name'] = root_view['name']
            result['type'] = root_view['type']
            result['view_id'] = root_view['id']
            result['field_parent'] = root_view['field_parent']
            result['base_model'] = root_view['model']
        else:
            # fallback on default views methods if no ir.ui.view could be found
            try:
                arch_etree = getattr(self, '_get_default_%s_view' % view_type)()
                result['arch'] = etree.tostring(arch_etree, encoding='unicode')
                result['type'] = view_type
                result['name'] = 'default'
            except AttributeError:
                raise UserError(_("No default view of type '%s' could be found !") % view_type)
        return result

    @api.model
    def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
        """ fields_view_get([view_id | view_type='form'])

        Get the detailed composition of the requested view like fields, model, view architecture

        :param int view_id: id of the view or None
        :param str view_type: type of the view to return if view_id is None ('form', 'tree', ...)
        :param bool toolbar: true to include contextual actions
        :param submenu: deprecated
        :return: composition of the requested view (including inherited views and extensions)
        :rtype: dict
        :raise AttributeError:
                * if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
                * if some tag other than 'position' is found in parent view
        :raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
        """
        View = self.env['ir.ui.view']

        # Get the view arch and all other attributes describing the composition of the view
        result = self._fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)

        # Override context for postprocessing
        if view_id and result.get('base_model', self._name) != self._name:
            View = View.with_context(base_model_name=result['base_model'])

        # Apply post processing, groups and modifiers etc...
        xarch, xfields = View.postprocess_and_fields(self._name, etree.fromstring(result['arch']), view_id)
        result['arch'] = xarch
        result['fields'] = xfields

        # Add related action information if aksed
        if toolbar:
            vt = 'list' if view_type == 'tree' else view_type
            bindings = self.env['ir.actions.actions'].get_bindings(self._name)
            resreport = [action
                         for action in bindings['report']
                         if vt in (action.get('binding_view_types') or vt).split(',')]
            resaction = [action
                         for action in bindings['action']
                         if vt in (action.get('binding_view_types') or vt).split(',')]

            result['toolbar'] = {
                'print': resreport,
                'action': resaction,
            }
        return result

    def get_formview_id(self, access_uid=None):
        """ Return an view id to open the document ``self`` with. This method is
            meant to be overridden in addons that want to give specific view ids
            for example.

            Optional access_uid holds the user that would access the form view
            id different from the current environment user.
        """
        return False

    def get_formview_action(self, access_uid=None):
        """ Return an action to open the document ``self``. This method is meant
            to be overridden in addons that want to give specific view ids for
            example.

        An optional access_uid holds the user that will access the document
        that could be different from the current user. """
        view_id = self.sudo().get_formview_id(access_uid=access_uid)
        return {
            'type': 'ir.actions.act_window',
            'res_model': self._name,
            'view_type': 'form',
            'view_mode': 'form',
            'views': [(view_id, 'form')],
            'target': 'current',
            'res_id': self.id,
            'context': dict(self._context),
        }

    def get_access_action(self, access_uid=None):
        """ Return an action to open the document. This method is meant to be
        overridden in addons that want to give specific access to the document.
        By default it opens the formview of the document.

        An optional access_uid holds the user that will access the document
        that could be different from the current user.
        """
        return self[0].get_formview_action(access_uid=access_uid)

    @api.model
    def search_count(self, args):
        """ search_count(args) -> int

        Returns the number of records in the current model matching :ref:`the
        provided domain <reference/orm/domains>`.
        """
        res = self.search(args, count=True)
        return res if isinstance(res, int) else len(res)

    @api.model
    @api.returns('self',
        upgrade=lambda self, value, args, offset=0, limit=None, order=None, count=False: value if count else self.browse(value),
        downgrade=lambda self, value, args, offset=0, limit=None, order=None, count=False: value if count else value.ids)
    def search(self, args, offset=0, limit=None, order=None, count=False):
        """ search(args[, offset=0][, limit=None][, order=None][, count=False])
        搜索视图的几个参数和默认值来源此处.
        Searches for records based on the ``args``
        :ref:`search domain <reference/orm/domains>`.

        :param args: :ref:`A search domain <reference/orm/domains>`. Use an empty
                     list to match all records.
        :param int offset: number of results to ignore (default: none)
        :param int limit: maximum number of records to return (default: all)
        :param str order: sort string
        :param bool count: if True, only counts and returns the number of matching records (default: False)
        :returns: at most ``limit`` records matching the search criteria

        :raise AccessError: * if user tries to bypass access rules for read on the requested object.
        """
        res = self._search(args, offset=offset, limit=limit, order=order, count=count)
        return res if count else self.browse(res)

    #
    # display_name, name_get, name_create, name_search
    #

    @api.depends(lambda self: (self._rec_name,) if self._rec_name else ())
    def _compute_display_name(self):
        """Compute the value of the `display_name` field.

        In general `display_name` is equal to calling `name_get()[0][1]`.

        In that case, it is recommended to use `display_name` to uniformize the
        code and to potentially take advantage of prefetch when applicable.

        However some models might override this method. For them, the behavior
        might differ, and it is important to select which of `display_name` or
        `name_get()[0][1]` to call depending on the desired result.
        """
        names = dict(self.name_get())
        for record in self:
            record.display_name = names.get(record.id, False)

    def name_get(self):
        """ name_get() -> [(id, name), ...]

        Returns a textual representation for the records in ``self``.
        By default this is the value of the ``display_name`` field.

        :return: list of pairs ``(id, text_repr)`` for each records
        :rtype: list(tuple)
        """
        result = []
        name = self._rec_name
        if name in self._fields:
            convert = self._fields[name].convert_to_display_name
            for record in self:
                result.append((record.id, convert(record[name], record)))
        else:
            for record in self:
                result.append((record.id, "%s,%s" % (record._name, record.id)))

        return result

    @api.model
    def name_create(self, name):
        """ name_create(name) -> record

        Create a new record by calling :meth:`~.create` with only one value
        provided: the display name of the new record.

        The new record will be initialized with any default values
        applicable to this model, or provided through the context. The usual
        behavior of :meth:`~.create` applies.

        :param name: display name of the record to create
        :rtype: tuple
        :return: the :meth:`~.name_get` pair value of the created record
        """
        if self._rec_name:
            record = self.create({self._rec_name: name})
            return record.name_get()[0]
        else:
            _logger.warning("Cannot execute name_create, no _rec_name defined on %s", self._name)
            return False

    @api.model
    def name_search(self, name='', args=None, operator='ilike', limit=100):
        """ name_search(name='', args=None, operator='ilike', limit=100) -> records

        Search for records that have a display name matching the given
        ``name`` pattern when compared with the given ``operator``, while also
        matching the optional search domain (``args``).

        This is used for example to provide suggestions based on a partial
        value for a relational field. Sometimes be seen as the inverse
        function of :meth:`~.name_get`, but it is not guaranteed to be.

        This method is equivalent to calling :meth:`~.search` with a search
        domain based on ``display_name`` and then :meth:`~.name_get` on the
        result of the search.

        :param str name: the name pattern to match
        :param list args: optional search domain (see :meth:`~.search` for
                          syntax), specifying further restrictions
        :param str operator: domain operator for matching ``name``, such as
                             ``'like'`` or ``'='``.
        :param int limit: optional max number of records to return
        :rtype: list
        :return: list of pairs ``(id, text_repr)`` for all matching records.
        """
        return self._name_search(name, args, operator, limit=limit)

    @api.model
    def _name_search(self, name='', args=None, operator='ilike', limit=100, name_get_uid=None):
        # private implementation of name_search, allows passing a dedicated user
        # for the name_get part to solve some access rights issues
        args = list(args or [])
        # optimize out the default criterion of ``ilike ''`` that matches everything
        if not self._rec_name:
            _logger.warning("Cannot execute name_search, no _rec_name defined on %s", self._name)
        elif not (name == '' and operator == 'ilike'):
            args += [(self._rec_name, operator, name)]
        access_rights_uid = name_get_uid or self._uid
        ids = self._search(args, limit=limit, access_rights_uid=access_rights_uid)
        recs = self.browse(ids)
        return lazy_name_get(recs.with_user(access_rights_uid))

    @api.model
    def _add_missing_default_values(self, values):
        # avoid overriding inherited values when parent is set
        avoid_models = {
            parent_model
            for parent_model, parent_field in self._inherits.items()
            if parent_field in values
        }

        # compute missing fields
        missing_defaults = {
            name
            for name, field in self._fields.items()
            if name not in values
            if not (field.inherited and field.related_field.model_name in avoid_models)
        }

        if not missing_defaults:
            return values

        # override defaults with the provided values, never allow the other way around
        defaults = self.default_get(list(missing_defaults))
        for name, value in defaults.items():
            if self._fields[name].type == 'many2many' and value and isinstance(value[0], int):
                # convert a list of ids into a list of commands
                defaults[name] = [(6, 0, value)]
            elif self._fields[name].type == 'one2many' and value and isinstance(value[0], dict):
                # convert a list of dicts into a list of commands
                defaults[name] = [(0, 0, x) for x in value]
        defaults.update(values)
        return defaults

    @classmethod
    def clear_caches(cls):
        """ Clear the caches

        This clears the caches associated to methods decorated with
        ``tools.ormcache`` or ``tools.ormcache_multi``.
        """
        cls.pool._clear_cache()

    @api.model
    def _read_group_fill_results(self, domain, groupby, remaining_groupbys,
                                 aggregated_fields, count_field,
                                 read_group_result, read_group_order=None):
        """Helper method for filling in empty groups for all possible values of
           the field being grouped by"""
        field = self._fields[groupby]
        if not field.group_expand:
            return read_group_result

        # field.group_expand is the name of a method that returns the groups
        # that we want to display for this field, in the form of a recordset or
        # a list of values (depending on the type of the field). This is useful
        # to implement kanban views for instance, where some columns should be
        # displayed even if they don't contain any record.

        # determine all groups that should be returned
        values = [line[groupby] for line in read_group_result if line[groupby]]

        if field.relational:
            # groups is a recordset; determine order on groups's model
            groups = self.env[field.comodel_name].browse([value[0] for value in values])
            order = groups._order
            if read_group_order == groupby + ' desc':
                order = tools.reverse_order(order)
            groups = getattr(self, field.group_expand)(groups, domain, order)
            groups = groups.sudo()
            values = lazy_name_get(groups)
            value2key = lambda value: value and value[0]

        else:
            # groups is a list of values
            values = getattr(self, field.group_expand)(values, domain, None)
            if read_group_order == groupby + ' desc':
                values.reverse()
            value2key = lambda value: value

        # Merge the current results (list of dicts) with all groups. Determine
        # the global order of results groups, which is supposed to be in the
        # same order as read_group_result (in the case of a many2one field).
        result = OrderedDict((value2key(value), {}) for value in values)

        # fill in results from read_group_result
        for line in read_group_result:
            key = value2key(line[groupby])
            if not result.get(key):
                result[key] = line
            else:
                result[key][count_field] = line[count_field]

        # fill in missing results from all groups
        for value in values:
            key = value2key(value)
            if not result[key]:
                line = dict.fromkeys(aggregated_fields, False)
                line[groupby] = value
                line[groupby + '_count'] = 0
                line['__domain'] = [(groupby, '=', key)] + domain
                if remaining_groupbys:
                    line['__context'] = {'group_by': remaining_groupbys}
                result[key] = line

        # add folding information if present
        if field.relational and groups._fold_name in groups._fields:
            fold = {group.id: group[groups._fold_name]
                    for group in groups.browse([key for key in result if key])}
            for key, line in result.items():
                line['__fold'] = fold.get(key, False)

        return list(result.values())

    @api.model
    def _read_group_fill_temporal(self, data, groupby, aggregated_fields, annotated_groupbys,
                                  interval=dateutil.relativedelta.relativedelta(months=1)):
        """Helper method for filling date/datetime 'holes' in a result set.

        We are in a use case where data are grouped by a date field (typically
        months but it could be any other interval) and displayed in a chart.

        Assume we group records by month, and we only have data for August,
        September and December. By default, plotting the result gives something
        like:
                                                ___
                                      ___      |   |
                                     |   |     |   |
                                     |   | ___ |   |
                                     |   ||   ||   |
                                     |___||___||___|
                                      Aug  Sep  Dec

        The problem is that December data follows immediately September data,
        which is misleading for the user. Adding explicit zeroes for missing data
        gives something like:
                                                     ___
                                 ___                |   |
                                |   |               |   |
                                |   | ___           |   |
                                |   ||   |          |   |
                                |___||___| ___  ___ |___|
                                 Aug  Sep  Oct  Nov  Dec

        :param list data: the data containing groups
        :param list groupby: name of the first group by
        :param list aggregated_fields: list of aggregated fields in the query
        :param relativedelta interval: interval between to temporal groups
                expressed as a relativedelta month by default
        :rtype: list
        :return: list
        """
        first_a_gby = annotated_groupbys[0]
        if not data:
            return data
        if first_a_gby['type'] not in ('date', 'datetime'):
            return data
        interval = first_a_gby['interval']
        groupby_name = groupby[0]

        # existing non null datetimes
        existing = [d[groupby_name] for d in data if d[groupby_name]]

        if len(existing) < 2:
            return data

        # assumption: existing data is sorted by field 'groupby_name'
        first, last = existing[0], existing[-1]

        empty_item = {'id': False, (groupby_name.split(':')[0] + '_count'): 0}
        empty_item.update({key: False for key in aggregated_fields})
        empty_item.update({key: False for key in [group['groupby'] for group in annotated_groupbys[1:]]})

        grouped_data = collections.defaultdict(list)
        for d in data:
            grouped_data[d[groupby_name]].append(d)

        result = []

        for dt in date_utils.date_range(first, last, interval):
            result.extend(grouped_data[dt] or [dict(empty_item, **{groupby_name: dt})])

        if False in grouped_data:
            result.extend(grouped_data[False])

        return result

    @api.model
    def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
        """
        Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
        to the query if order should be computed against m2o field. 
        :param orderby: the orderby definition in the form "%(field)s %(order)s"
        :param aggregated_fields: list of aggregated fields in the query
        :param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
                These dictionaries contains the qualified name of each groupby
                (fully qualified SQL name for the corresponding field),
                and the (non raw) field name.
        :param osv.Query query: the query under construction
        :return: (groupby_terms, orderby_terms)
        """
        orderby_terms = []
        groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
        if not orderby:
            return groupby_terms, orderby_terms

        self._check_qorder(orderby)

        # when a field is grouped as 'foo:bar', both orderby='foo' and
        # orderby='foo:bar' generate the clause 'ORDER BY "foo:bar"'
        groupby_fields = {
            gb[key]: gb['groupby']
            for gb in annotated_groupbys
            for key in ('field', 'groupby')
        }
        for order_part in orderby.split(','):
            order_split = order_part.split()
            order_field = order_split[0]
            if order_field == 'id' or order_field in groupby_fields:
                if self._fields[order_field.split(':')[0]].type == 'many2one':
                    order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
                    if order_clause:
                        orderby_terms.append(order_clause)
                        groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
                else:
                    order_split[0] = '"%s"' % groupby_fields.get(order_field, order_field)
                    orderby_terms.append(' '.join(order_split))
            elif order_field in aggregated_fields:
                order_split[0] = '"%s"' % order_field
                orderby_terms.append(' '.join(order_split))
            elif order_field not in self._fields:
                raise ValueError("Invalid field %r on model %r" % (order_field, self._name))
            else:
                # Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
                _logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
                             self._name, order_part)

        return groupby_terms, orderby_terms

    @api.model
    def _read_group_process_groupby(self, gb, query):
        """
            Helper method to collect important information about groupbys: raw
            field name, type, time information, qualified name, ...
        """
        split = gb.split(':')
        field = self._fields.get(split[0])
        if not field:
            raise ValueError("Invalid field %r on model %r" % (split[0], self._name))
        field_type = field.type
        gb_function = split[1] if len(split) == 2 else None
        temporal = field_type in ('date', 'datetime')
        tz_convert = field_type == 'datetime' and self._context.get('tz') in pytz.all_timezones
        qualified_field = self._inherits_join_calc(self._table, split[0], query)
        if temporal:
            display_formats = {
                # Careful with week/year formats:
                #  - yyyy (lower) must always be used, *except* for week+year formats
                #  - YYYY (upper) must always be used for week+year format
                #         e.g. 2006-01-01 is W52 2005 in some locales (de_DE),
                #                         and W1 2006 for others
                #
                # Mixing both formats, e.g. 'MMM YYYY' would yield wrong results,
                # such as 2006-01-01 being formatted as "January 2005" in some locales.
                # Cfr: http://babel.pocoo.org/en/latest/dates.html#date-fields
                'hour': 'hh:00 dd MMM',
                'day': 'dd MMM yyyy', # yyyy = normal year
                'week': "'W'w YYYY",  # w YYYY = ISO week-year
                'month': 'MMMM yyyy',
                'quarter': 'QQQ yyyy',
                'year': 'yyyy',
            }
            time_intervals = {
                'hour': dateutil.relativedelta.relativedelta(hours=1),
                'day': dateutil.relativedelta.relativedelta(days=1),
                'week': datetime.timedelta(days=7),
                'month': dateutil.relativedelta.relativedelta(months=1),
                'quarter': dateutil.relativedelta.relativedelta(months=3),
                'year': dateutil.relativedelta.relativedelta(years=1)
            }
            if tz_convert:
                qualified_field = "timezone('%s', timezone('UTC',%s))" % (self._context.get('tz', 'UTC'), qualified_field)
            qualified_field = "date_trunc('%s', %s::timestamp)" % (gb_function or 'month', qualified_field)
        if field_type == 'boolean':
            qualified_field = "coalesce(%s,false)" % qualified_field
        return {
            'field': split[0],
            'groupby': gb,
            'type': field_type, 
            'display_format': display_formats[gb_function or 'month'] if temporal else None,
            'interval': time_intervals[gb_function or 'month'] if temporal else None,                
            'tz_convert': tz_convert,
            'qualified_field': qualified_field,
        }

    @api.model
    def _read_group_prepare_data(self, key, value, groupby_dict):
        """
            Helper method to sanitize the data received by read_group. The None
            values are converted to False, and the date/datetime are formatted,
            and corrected according to the timezones.
        """
        value = False if value is None else value
        gb = groupby_dict.get(key)
        if gb and gb['type'] in ('date', 'datetime') and value:
            if isinstance(value, str):
                dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
                value = datetime.datetime.strptime(value, dt_format)
            if gb['tz_convert']:
                value = pytz.timezone(self._context['tz']).localize(value)
        return value

    @api.model
    def _read_group_format_result(self, data, annotated_groupbys, groupby, domain):
        """
            Helper method to format the data contained in the dictionary data by 
            adding the domain corresponding to its values, the groupbys in the 
            context and by properly formatting the date/datetime values.

        :param data: a single group
        :param annotated_groupbys: expanded grouping metainformation
        :param groupby: original grouping metainformation
        :param domain: original domain for read_group
        """

        sections = []
        for gb in annotated_groupbys:
            ftype = gb['type']
            value = data[gb['groupby']]

            # full domain for this groupby spec
            d = None
            if value:
                if ftype == 'many2one':
                    value = value[0]
                elif ftype in ('date', 'datetime'):
                    locale = get_lang(self.env).code
                    fmt = DEFAULT_SERVER_DATETIME_FORMAT if ftype == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
                    tzinfo = None
                    range_start = value
                    range_end = value + gb['interval']
                    # value from postgres is in local tz (so range is
                    # considered in local tz e.g. "day" is [00:00, 00:00[
                    # local rather than UTC which could be [11:00, 11:00]
                    # local) but domain and raw value should be in UTC
                    if gb['tz_convert']:
                        tzinfo = range_start.tzinfo
                        range_start = range_start.astimezone(pytz.utc)
                        range_end = range_end.astimezone(pytz.utc)

                    range_start = range_start.strftime(fmt)
                    range_end = range_end.strftime(fmt)
                    if ftype == 'datetime':
                        label = babel.dates.format_datetime(
                            value, format=gb['display_format'],
                            tzinfo=tzinfo, locale=locale
                        )
                    else:
                        label = babel.dates.format_date(
                            value, format=gb['display_format'],
                            locale=locale
                        )
                    data[gb['groupby']] = ('%s/%s' % (range_start, range_end), label)
                    d = [
                        '&',
                        (gb['field'], '>=', range_start),
                        (gb['field'], '<', range_end),
                    ]

            if d is None:
                d = [(gb['field'], '=', value)]
            sections.append(d)
        sections.append(domain)

        data['__domain'] = expression.AND(sections)
        if len(groupby) - len(annotated_groupbys) >= 1:
            data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
        del data['id']
        return data

    @api.model
    def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
        """Get the list of records in list view grouped by the given ``groupby`` fields.

        :param list domain: :ref:`A search domain <reference/orm/domains>`. Use an empty
                     list to match all records.
        :param list fields: list of fields present in the list view specified on the object.
                Each element is either 'field' (field name, using the default aggregation),
                or 'field:agg' (aggregate field with aggregation function 'agg'),
                or 'name:agg(field)' (aggregate field with 'agg' and return it as 'name').
                The possible aggregation functions are the ones provided by PostgreSQL
                (https://www.postgresql.org/docs/current/static/functions-aggregate.html)
                and 'count_distinct', with the expected meaning.
        :param list groupby: list of groupby descriptions by which the records will be grouped.  
                A groupby description is either a field (then it will be grouped by that field)
                or a string 'field:groupby_function'.  Right now, the only functions supported
                are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for 
                date/datetime fields.
        :param int offset: optional number of records to skip
        :param int limit: optional max number of records to return
        :param list orderby: optional ``order by`` specification, for
                             overriding the natural sort ordering of the
                             groups, see also :py:meth:`~osv.osv.osv.search`
                             (supported only for many2one fields currently)
        :param bool lazy: if true, the results are only grouped by the first groupby and the 
                remaining groupbys are put in the __context key.  If false, all the groupbys are
                done in one call.
        :return: list of dictionaries(one dictionary for each record) containing:

                    * the values of fields grouped by the fields in ``groupby`` argument
                    * __domain: list of tuples specifying the search criteria
                    * __context: dictionary with argument like ``groupby``
        :rtype: [{'field_name_1': value, ...]
        :raise AccessError: * if user has no read rights on the requested object
                            * if user tries to bypass access rules for read on the requested object
        """
        result = self._read_group_raw(domain, fields, groupby, offset=offset, limit=limit, orderby=orderby, lazy=lazy)

        groupby = [groupby] if isinstance(groupby, str) else list(OrderedSet(groupby))
        dt = [
            f for f in groupby
            if self._fields[f.split(':')[0]].type in ('date', 'datetime')    # e.g. 'date:month'
        ]

        # iterate on all results and replace the "full" date/datetime value
        # (range, label) by just the formatted label, in-place
        for group in result:
            for df in dt:
                # could group on a date(time) field which is empty in some
                # records, in which case as with m2o the _raw value will be
                # `False` instead of a (value, label) pair. In that case,
                # leave the `False` value alone
                if group.get(df):
                    group[df] = group[df][1]
        return result

    @api.model
    def _read_group_raw(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
        self.check_access_rights('read')
        query = self._where_calc(domain)
        fields = fields or [f.name for f in self._fields.values() if f.store]

        groupby = [groupby] if isinstance(groupby, str) else list(OrderedSet(groupby))
        groupby_list = groupby[:1] if lazy else groupby
        annotated_groupbys = [self._read_group_process_groupby(gb, query) for gb in groupby_list]
        groupby_fields = [g['field'] for g in annotated_groupbys]
        order = orderby or ','.join([g for g in groupby_list])
        groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}

        self._apply_ir_rules(query, 'read')
        for gb in groupby_fields:
            assert gb in self._fields, "Unknown field %r in 'groupby'" % gb
            gb_field = self._fields[gb].base_field
            assert gb_field.store and gb_field.column_type, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"

        aggregated_fields = []
        select_terms = []
        fnames = []                     # list of fields to flush

        for fspec in fields:
            if fspec == 'sequence':
                continue
            if fspec == '__count':
                # the web client sometimes adds this pseudo-field in the list
                continue

            match = regex_field_agg.match(fspec)
            if not match:
                raise UserError(_("Invalid field specification %r.") % fspec)

            name, func, fname = match.groups()
            if func:
                # we have either 'name:func' or 'name:func(fname)'
                fname = fname or name
                field = self._fields.get(fname)
                if not field:
                    raise ValueError("Invalid field %r on model %r" % (fname, self._name))
                if not (field.base_field.store and field.base_field.column_type):
                    raise UserError(_("Cannot aggregate field %r.") % fname)
                if func not in VALID_AGGREGATE_FUNCTIONS:
                    raise UserError(_("Invalid aggregation function %r.") % func)
            else:
                # we have 'name', retrieve the aggregator on the field
                field = self._fields.get(name)
                if not field:
                    raise ValueError("Invalid field %r on model %r" % (name, self._name))
                if not (field.base_field.store and
                        field.base_field.column_type and field.group_operator):
                    continue
                func, fname = field.group_operator, name

            fnames.append(fname)

            if fname in groupby_fields:
                continue
            if name in aggregated_fields:
                raise UserError(_("Output name %r is used twice.") % name)
            aggregated_fields.append(name)

            expr = self._inherits_join_calc(self._table, fname, query)
            if func.lower() == 'count_distinct':
                term = 'COUNT(DISTINCT %s) AS "%s"' % (expr, name)
            else:
                term = '%s(%s) AS "%s"' % (func, expr, name)
            select_terms.append(term)

        for gb in annotated_groupbys:
            select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))

        self._flush_search(domain, fields=fnames + groupby_fields)

        groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
        from_clause, where_clause, where_clause_params = query.get_sql()
        if lazy and (len(groupby_fields) >= 2 or not self._context.get('group_by_no_leaf')):
            count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
        else:
            count_field = '_'
        count_field += '_count'

        prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
        prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''

        query = """
            SELECT min("%(table)s".id) AS id, count("%(table)s".id) AS "%(count_field)s" %(extra_fields)s
            FROM %(from)s
            %(where)s
            %(groupby)s
            %(orderby)s
            %(limit)s
            %(offset)s
        """ % {
            'table': self._table,
            'count_field': count_field,
            'extra_fields': prefix_terms(',', select_terms),
            'from': from_clause,
            'where': prefix_term('WHERE', where_clause),
            'groupby': prefix_terms('GROUP BY', groupby_terms),
            'orderby': prefix_terms('ORDER BY', orderby_terms),
            'limit': prefix_term('LIMIT', int(limit) if limit else None),
            'offset': prefix_term('OFFSET', int(offset) if limit else None),
        }
        self._cr.execute(query, where_clause_params)
        fetched_data = self._cr.dictfetchall()

        if not groupby_fields:
            return fetched_data

        self._read_group_resolve_many2one_fields(fetched_data, annotated_groupbys)

        data = [{k: self._read_group_prepare_data(k, v, groupby_dict) for k, v in r.items()} for r in fetched_data]

        if self.env.context.get('fill_temporal') and data:
            data = self._read_group_fill_temporal(data, groupby, aggregated_fields,
                                                  annotated_groupbys)

        result = [self._read_group_format_result(d, annotated_groupbys, groupby, domain) for d in data]

        if lazy:
            # Right now, read_group only fill results in lazy mode (by default).
            # If you need to have the empty groups in 'eager' mode, then the
            # method _read_group_fill_results need to be completely reimplemented
            # in a sane way 
            result = self._read_group_fill_results(
                domain, groupby_fields[0], groupby[len(annotated_groupbys):],
                aggregated_fields, count_field, result, read_group_order=order,
            )
        return result

    def _read_group_resolve_many2one_fields(self, data, fields):
        many2onefields = {field['field'] for field in fields if field['type'] == 'many2one'}
        for field in many2onefields:
            ids_set = {d[field] for d in data if d[field]}
            m2o_records = self.env[self._fields[field].comodel_name].browse(ids_set)
            data_dict = dict(lazy_name_get(m2o_records.sudo()))
            for d in data:
                d[field] = (d[field], data_dict[d[field]]) if d[field] else False

    def _inherits_join_add(self, current_model, parent_model_name, query):
        """
        Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
        :param current_model: current model object
        :param parent_model_name: name of the parent model for which the clauses should be added
        :param query: query object on which the JOIN should be added
        """
        inherits_field = current_model._inherits[parent_model_name]
        parent_model = self.env[parent_model_name]
        parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
        return parent_alias

    @api.model
    def _inherits_join_calc(self, alias, fname, query, implicit=True, outer=False):
        """
        Adds missing table select and join clause(s) to ``query`` for reaching
        the field coming from an '_inherits' parent table (no duplicates).

        :param alias: name of the initial SQL alias
        :param fname: name of inherited field to reach
        :param query: query object on which the JOIN should be added
        :return: qualified name of field, to be used in SELECT clause
        """
        # INVARIANT: alias is the SQL alias of model._table in query
        model, field = self, self._fields[fname]
        while field.inherited:
            # retrieve the parent model where field is inherited from
            parent_model = self.env[field.related_field.model_name]
            parent_fname = field.related[0]
            # JOIN parent_model._table AS parent_alias ON alias.parent_fname = parent_alias.id
            parent_alias, _ = query.add_join(
                (alias, parent_model._table, parent_fname, 'id', parent_fname),
                implicit=implicit, outer=outer,
            )
            model, alias, field = parent_model, parent_alias, field.related_field
        # handle the case where the field is translated
        if field.translate is True:
            return model._generate_translated_field(alias, fname, query)
        else:
            return '"%s"."%s"' % (alias, fname)

    def _parent_store_compute(self):
        """ Compute parent_path field from scratch. """
        if not self._parent_store:
            return

        # Each record is associated to a string 'parent_path', that represents
        # the path from the record's root node to the record. The path is made
        # of the node ids suffixed with a slash (see example below). The nodes
        # in the subtree of record are the ones where 'parent_path' starts with
        # the 'parent_path' of record.
        #
        #               a                 node | id | parent_path
        #              / \                  a  | 42 | 42/
        #            ...  b                 b  | 63 | 42/63/
        #                / \                c  | 84 | 42/63/84/
        #               c   d               d  | 85 | 42/63/85/
        #
        # Note: the final '/' is necessary to match subtrees correctly: '42/63'
        # is a prefix of '42/630', but '42/63/' is not a prefix of '42/630/'.
        _logger.info('Computing parent_path for table %s...', self._table)
        query = """
            WITH RECURSIVE __parent_store_compute(id, parent_path) AS (
                SELECT row.id, concat(row.id, '/')
                FROM {table} row
                WHERE row.{parent} IS NULL
            UNION
                SELECT row.id, concat(comp.parent_path, row.id, '/')
                FROM {table} row, __parent_store_compute comp
                WHERE row.{parent} = comp.id
            )
            UPDATE {table} row SET parent_path = comp.parent_path
            FROM __parent_store_compute comp
            WHERE row.id = comp.id
        """.format(table=self._table, parent=self._parent_name)
        self.env.cr.execute(query)
        self.invalidate_cache(['parent_path'])
        return True

    def _check_removed_columns(self, log=False):
        # iterate on the database columns to drop the NOT NULL constraints of
        # fields which were required but have been removed (or will be added by
        # another module)
        cr = self._cr
        cols = [name for name, field in self._fields.items()
                     if field.store and field.column_type]
        cr.execute("SELECT a.attname, a.attnotnull"
                   "  FROM pg_class c, pg_attribute a"
                   " WHERE c.relname=%s"
                   "   AND c.oid=a.attrelid"
                   "   AND a.attisdropped=%s"
                   "   AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
                   "   AND a.attname NOT IN %s", (self._table, False, tuple(cols))),

        for row in cr.dictfetchall():
            if log:
                _logger.debug("column %s is in the table %s but not in the corresponding object %s",
                              row['attname'], self._table, self._name)
            if row['attnotnull']:
                tools.drop_not_null(cr, self._table, row['attname'])

    def _init_column(self, column_name):
        """ Initialize the value of the given column for existing rows. """
        # get the default value; ideally, we should use default_get(), but it
        # fails due to ir.default not being ready
        field = self._fields[column_name]
        if field.default:
            value = field.default(self)
            value = field.convert_to_write(value, self)
            value = field.convert_to_column(value, self)
        else:
            value = None
        # Write value if non-NULL, except for booleans for which False means
        # the same as NULL - this saves us an expensive query on large tables.
        necessary = (value is not None) if field.type != 'boolean' else value
        if necessary:
            _logger.debug("Table '%s': setting default value of new column %s to %r",
                          self._table, column_name, value)
            query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" IS NULL' % (
                self._table, column_name, field.column_format, column_name)
            self._cr.execute(query, (value,))

    @ormcache()
    def _table_has_rows(self):
        """ Return whether the model's table has rows. This method should only
            be used when updating the database schema (:meth:`~._auto_init`).
        """
        self.env.cr.execute('SELECT 1 FROM "%s" LIMIT 1' % self._table)
        return self.env.cr.rowcount

    def _auto_init(self):
        """ Initialize the database schema of ``self``: 初始化"self"的数据库架构
            - create the corresponding table,
            - create/update the necessary columns/tables for fields,
            - initialize new columns on existing rows,
            - add the SQL constraints given on the model,
            - add the indexes on indexed fields,

            Also prepare post-init stuff to:
            - add foreign key constraints,
            - reflect models, fields, relations and constraints,
            - mark fields to recompute on existing records.

            Note: you should not override this method. Instead, you can modify
            the model's database schema by overriding method :meth:`~.init`,
            which is called right after this one.
        """
        raise_on_invalid_object_name(self._name)

        # This prevents anything called by this method (in particular default
        # values) from prefetching a field for which the corresponding column
        # has not been added in database yet!
        self = self.with_context(prefetch_fields=False)

        self.pool.post_init(self._reflect)

        cr = self._cr
        update_custom_fields = self._context.get('update_custom_fields', False)
        must_create_table = not tools.table_exists(cr, self._table)
        parent_path_compute = False

        if self._auto:
            if must_create_table:
                tools.create_model_table(cr, self._table, self._description)

            if self._parent_store:
                if not tools.column_exists(cr, self._table, 'parent_path'):
                    self._create_parent_columns()
                    parent_path_compute = True

            self._check_removed_columns(log=False)

            # update the database schema for fields
            columns = tools.table_columns(cr, self._table)
            fields_to_compute = []

            for field in self._fields.values():
                if not field.store:
                    continue
                if field.manual and not update_custom_fields:
                    continue            # don't update custom fields
                new = field.update_db(self, columns)
                if new and field.compute:
                    fields_to_compute.append(field.name)

            if fields_to_compute:
                @self.pool.post_init
                def mark_fields_to_compute():
                    recs = self.with_context(active_test=False).search([])
                    for field in fields_to_compute:
                        _logger.info("Storing computed values of %s", field)
                        self.env.add_to_compute(recs._fields[field], recs)

        if self._auto:
            self._add_sql_constraints()

        if must_create_table:
            self._execute_sql()

        if parent_path_compute:
            self._parent_store_compute()

    def init(self):
        """ This method is called after :meth:`~._auto_init`, and may be
            overridden to create or modify a model's database schema.
        """
        pass

    def _create_parent_columns(self):
        tools.create_column(self._cr, self._table, 'parent_path', 'VARCHAR')
        if 'parent_path' not in self._fields:
            _logger.error("add a field parent_path on model %s: parent_path = fields.Char(index=True)", self._name)
        elif not self._fields['parent_path'].index:
            _logger.error('parent_path field on model %s must be indexed! Add index=True to the field definition)', self._name)

    def _add_sql_constraints(self):
        """

        Modify this model's database table constraints so they match the one in
        _sql_constraints.

        """
        cr = self._cr
        foreign_key_re = re.compile(r'\s*foreign\s+key\b.*', re.I)

        def process(key, definition):
            conname = '%s_%s' % (self._table, key)
            current_definition = tools.constraint_definition(cr, self._table, conname)
            if not current_definition:
                # constraint does not exists
                tools.add_constraint(cr, self._table, conname, definition)
            elif current_definition != definition:
                # constraint exists but its definition may have changed
                tools.drop_constraint(cr, self._table, conname)
                tools.add_constraint(cr, self._table, conname, definition)

        for (key, definition, _) in self._sql_constraints:
            if foreign_key_re.match(definition):
                self.pool.post_init(process, key, definition)
            else:
                process(key, definition)

    def _execute_sql(self):
        """ Execute the SQL code from the _sql attribute (if any). 执行SQL"""
        if hasattr(self, "_sql"):
            self._cr.execute(self._sql)

    #
    # Update objects that uses this one to update their _inherits fields
    #

    @api.model
    def _add_inherited_fields(self):
        """ Determine inherited fields. """
        # determine candidate inherited fields
        fields = {}
        for parent_model, parent_field in self._inherits.items():
            parent = self.env[parent_model]
            for name, field in parent._fields.items():
                # inherited fields are implemented as related fields, with the
                # following specific properties:
                #  - reading inherited fields should not bypass access rights
                #  - copy inherited fields iff their original field is copied
                fields[name] = field.new(
                    inherited=True,
                    inherited_field=field,
                    related=(parent_field, name),
                    related_sudo=False,
                    copy=field.copy,
                    readonly=field.readonly,
                )

        # add inherited fields that are not redefined locally
        for name, field in fields.items():
            if name not in self._fields:
                self._add_field(name, field)

    @api.model
    def _inherits_check(self):
        for table, field_name in self._inherits.items():
            field = self._fields.get(field_name)
            if not field:
                _logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, self._name)
                from .fields import Many2one
                field = Many2one(table, string="Automatically created field to link to parent %s" % table, required=True, ondelete="cascade")
                self._add_field(field_name, field)
            elif not field.required or field.ondelete.lower() not in ("cascade", "restrict"):
                _logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, self._name)
                field.required = True
                field.ondelete = "cascade"
            field.delegate = True

        # reflect fields with delegate=True in dictionary self._inherits
        for field in self._fields.values():
            if field.type == 'many2one' and not field.related and field.delegate:
                if not field.required:
                    _logger.warning("Field %s with delegate=True must be required.", field)
                    field.required = True
                if field.ondelete.lower() not in ('cascade', 'restrict'):
                    field.ondelete = 'cascade'
                self._inherits[field.comodel_name] = field.name

    @api.model
    def _prepare_setup(self):
        """ Prepare the setup of the model. """
        cls = type(self)
        cls._setup_done = False
        # a model's base structure depends on its mro (without registry classes)
        cls._model_cache_key = tuple(c for c in cls.mro() if getattr(c, 'pool', None) is None)

    @api.model
    def _setup_base(self):
        """ Determine the inherited and custom fields of the model. """
        cls = type(self)
        if cls._setup_done:
            return

        # 1. determine the proper fields of the model: the fields defined on the
        # class and magic fields, not the inherited or custom ones
        cls0 = cls.pool.model_cache.get(cls._model_cache_key)
        if cls0 and cls0._model_cache_key == cls._model_cache_key:
            # cls0 is either a model class from another registry, or cls itself.
            # The point is that it has the same base classes. We retrieve stuff
            # from cls0 to optimize the setup of cls. cls0 is guaranteed to be
            # properly set up: registries are loaded under a global lock,
            # therefore two registries are never set up at the same time.

            # remove fields that are not proper to cls
            for name in OrderedSet(cls._fields) - cls0._proper_fields:
                delattr(cls, name)
                cls._fields.pop(name, None)
            # collect proper fields on cls0, and add them on cls
            for name in cls0._proper_fields:
                field = cls0._fields[name]
                # regular fields are shared, while related fields are setup from scratch
                if not field.related:
                    self._add_field(name, field)
                else:
                    self._add_field(name, field.new(**field.args))
            cls._proper_fields = OrderedSet(cls._fields)

        else:
            # retrieve fields from parent classes, and duplicate them on cls to
            # avoid clashes with inheritance between different models
            for name in cls._fields:
                delattr(cls, name)
            cls._fields = OrderedDict()
            for name, field in sorted(getmembers(cls, Field.__instancecheck__), key=lambda f: f[1]._sequence):
                # do not retrieve magic, custom and inherited fields
                if not any(field.args.get(k) for k in ('automatic', 'manual', 'inherited')):
                    self._add_field(name, field.new())
            self._add_magic_fields()
            cls._proper_fields = OrderedSet(cls._fields)

        cls.pool.model_cache[cls._model_cache_key] = cls

        # 2. add manual fields
        if self.pool._init_modules:
            self.env['ir.model.fields']._add_manual_fields(self)

        # 3. make sure that parent models determine their own fields, then add
        # inherited fields to cls
        self._inherits_check()
        for parent in self._inherits:
            self.env[parent]._setup_base()
        self._add_inherited_fields()

        # 4. initialize more field metadata
        cls._field_computed = {}            # fields computed with the same method
        cls._field_inverses = Collector()   # inverse fields for related fields

        cls._setup_done = True

        # 5. determine and validate rec_name
        if cls._rec_name:
            assert cls._rec_name in cls._fields, \
                "Invalid rec_name %s for model %s" % (cls._rec_name, cls._name)
        elif 'name' in cls._fields:
            cls._rec_name = 'name'
        elif 'x_name' in cls._fields:
            cls._rec_name = 'x_name'

    @api.model
    def _setup_fields(self):
        """ Setup the fields, except for recomputation triggers. """
        cls = type(self)

        # set up fields
        bad_fields = []
        for name, field in cls._fields.items():
            try:
                field.setup_full(self)
            except Exception:
                if not self.pool.loaded and field.base_field.manual:
                    # Something goes wrong when setup a manual field.
                    # This can happen with related fields using another manual many2one field
                    # that hasn't been loaded because the comodel does not exist yet.
                    # This can also be a manual function field depending on not loaded fields yet.
                    bad_fields.append(name)
                    continue
                raise

        for name in bad_fields:
            del cls._fields[name]
            delattr(cls, name)

        # fix up _rec_name
        if 'x_name' in bad_fields and cls._rec_name == 'x_name':
            cls._rec_name = None
            field = cls._fields['display_name']
            field.depends = tuple(name for name in field.depends if name != 'x_name')

        # map each field to the fields computed with the same method
        groups = defaultdict(list)
        for field in cls._fields.values():
            if field.compute:
                cls._field_computed[field] = group = groups[field.compute]
                group.append(field)
        for fields in groups.values():
            compute_sudo = fields[0].compute_sudo
            if not all(field.compute_sudo == compute_sudo for field in fields):
                _logger.warning("%s: inconsistent 'compute_sudo' for computed fields: %s",
                                self._name, ", ".join(field.name for field in fields))

    @api.model
    def _setup_complete(self):
        """ Setup recomputation triggers, and complete the model setup. """
        cls = type(self)

        # The triggers of a field F is a tree that contains the fields that
        # depend on F, together with the fields to inverse to find out which
        # records to recompute.
        #
        # For instance, assume that G depends on F, H depends on X.F, I depends
        # on W.X.F, and J depends on Y.F. The triggers of F will be the tree:
        #
        #                              [G]
        #                            X/   \Y
        #                          [H]     [J]
        #                        W/
        #                      [I]
        #
        # This tree provides perfect support for the trigger mechanism:
        # when F is # modified on records,
        #  - mark G to recompute on records,
        #  - mark H to recompute on inverse(X, records),
        #  - mark I to recompute on inverse(W, inverse(X, records)),
        #  - mark J to recompute on inverse(Y, records).
        cls._field_triggers = cls.pool.field_triggers

        # register constraints and onchange methods
        cls._init_constraints_onchanges()

    @api.model
    def fields_get(self, allfields=None, attributes=None):
        """ fields_get([fields][, attributes])

        Return the definition of each field.

        The returned value is a dictionary (indexed by field name) of
        dictionaries. The _inherits'd fields are included. The string, help,
        and selection (if present) attributes are translated.

        :param allfields: list of fields to document, all if empty or not provided
        :param attributes: list of description attributes to return for each field, all if empty or not provided
        """
        has_access = functools.partial(self.check_access_rights, raise_exception=False)
        readonly = not (has_access('write') or has_access('create'))

        res = {}
        for fname, field in self._fields.items():
            if allfields and fname not in allfields:
                continue
            if field.groups and not self.env.su and not self.user_has_groups(field.groups):
                continue

            description = field.get_description(self.env)
            if readonly:
                description['readonly'] = True
                description['states'] = {}
            if attributes:
                description = {key: val
                               for key, val in description.items()
                               if key in attributes}
            res[fname] = description

        return res

    @api.model
    def get_empty_list_help(self, help):
        """ Generic method giving the help message displayed when having
            no result to display in a list or kanban view. By default it returns
            the help given in parameter that is generally the help message
            defined in the action.
        """
        return help

    @api.model
    def check_field_access_rights(self, operation, fields):
        """
        Check the user access rights on the given fields. This raises Access
        Denied if the user does not have the rights. Otherwise it returns the
        fields (as is if the fields is not falsy, or the readable/writable
        fields if fields is falsy).
        """
        if self.env.su:
            return fields or list(self._fields)

        def valid(fname):
            """ determine whether user has access to field ``fname`` """
            field = self._fields.get(fname)
            if field and field.groups:
                return self.user_has_groups(field.groups)
            else:
                return True

        if not fields:
            fields = [name for name in self._fields if valid(name)]
        else:
            invalid_fields = {name for name in fields if not valid(name)}
            if invalid_fields:
                _logger.info('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s',
                             operation, self._uid, self._name, ', '.join(invalid_fields))

                description = self.env['ir.model']._get(self._name).name
                if not self.env.user.has_group('base.group_no_one'):
                    raise AccessError(
                        _('You do not have enough rights to access the fields "%(fields)s" on %(document_kind)s (%(document_model)s). '\
                          'Please contact your system administrator.\n\n(Operation: %(operation)s)') % {
                        'fields': ','.join(list(invalid_fields)),
                        'document_kind': description,
                        'document_model': self._name,
                        'operation': operation,
                    })

                def format_groups(field):
                    anyof = self.env['res.groups']
                    noneof = self.env['res.groups']
                    for g in field.groups.split(','):
                        if g.startswith('!'):
                            noneof |= self.env.ref(g[1:])
                        else:
                            anyof |= self.env.ref(g)
                    strs = []
                    if anyof:
                        strs.append(_("allowed for groups %s") % ', '.join(
                            anyof.sorted(lambda g: g.id)
                                 .mapped(lambda g: repr(g.display_name))
                        ))
                    if noneof:
                        strs.append(_("forbidden for groups %s") % ', '.join(
                            noneof.sorted(lambda g: g.id)
                                  .mapped(lambda g: repr(g.display_name))
                        ))
                    return '; '.join(strs)

                raise AccessError(_("""The requested operation can not be completed due to security restrictions.

Document type: %(document_kind)s (%(document_model)s)
Operation: %(operation)s
User: %(user)s
Fields:
%(fields_list)s""") % {
                    'document_model': self._name,
                    'document_kind': description or self._name,
                    'operation': operation,
                    'user': self._uid,
                    'fields_list': '\n'.join(
                        '- %s (%s)' % (f, format_groups(self._fields[f]))
                        for f in sorted(invalid_fields)
                    )
                })

        return fields

    def read(self, fields=None, load='_classic_read'):
        """ read([fields])

        Reads the requested fields for the records in ``self``, low-level/RPC
        method. In Python code, prefer :meth:`~.browse`.

        :param fields: list of field names to return (default is all fields)
        :return: a list of dictionaries mapping field names to their values,
                 with one dictionary per record
        :raise AccessError: if user has no read rights on some of the given
                records
        """
        fields = self.check_field_access_rights('read', fields)

        # fetch stored fields from the database to the cache
        stored_fields = set()
        for name in fields:
            field = self._fields.get(name)
            if not field:
                raise ValueError("Invalid field %r on model %r" % (name, self._name))
            if field.store:
                stored_fields.add(name)
            elif field.compute:
                # optimization: prefetch direct field dependencies
                for dotname in field.depends:
                    f = self._fields[dotname.split('.')[0]]
                    if f.prefetch and (not f.groups or self.user_has_groups(f.groups)):
                        stored_fields.add(f.name)
        self._read(stored_fields)

        # retrieve results from records; this takes values from the cache and
        # computes remaining fields
        data = [(record, {'id': record._ids[0]}) for record in self]
        use_name_get = (load == '_classic_read')
        for name in fields:
            convert = self._fields[name].convert_to_read
            for record, vals in data:
                # missing records have their vals empty
                if not vals:
                    continue
                try:
                    vals[name] = convert(record[name], record, use_name_get)
                except MissingError:
                    vals.clear()
        result = [vals for record, vals in data if vals]

        return result

    def _fetch_field(self, field):
        """ Read from the database in order to fetch ``field`` (:class:`Field`
            instance) for ``self`` in cache.
        """
        self.check_field_access_rights('read', [field.name])
        # determine which fields can be prefetched
        if self._context.get('prefetch_fields', True) and field.prefetch:
            fnames = [
                name
                for name, f in self._fields.items()
                # select fields that can be prefetched
                if f.prefetch
                # discard fields with groups that the user may not access
                if not (f.groups and not self.user_has_groups(f.groups))
                # discard fields that must be recomputed
                if not (f.compute and self.env.records_to_compute(f))
            ]
            if field.name not in fnames:
                fnames.append(field.name)
                self = self - self.env.records_to_compute(field)
        else:
            fnames = [field.name]
        self._read(fnames)

    def _read(self, fields):
        """ Read the given fields of the records in ``self`` from the database,
            and store them in cache. Access errors are also stored in cache.
            Skip fields that are not stored.

            :param field_names: list of column names of model ``self``; all those
                fields are guaranteed to be read
            :param inherited_field_names: list of column names from parent
                models; some of those fields may not be read
        """
        if not self:
            return
        self.check_access_rights('read')

        # if a read() follows a write(), we must flush updates, as read() will
        # fetch from database and overwrites the cache (`test_update_with_id`)
        self.flush(fields, self)

        field_names = []
        inherited_field_names = []
        for name in fields:
            field = self._fields.get(name)
            if field:
                if field.store:
                    field_names.append(name)
                elif field.base_field.store:
                    inherited_field_names.append(name)
            else:
                _logger.warning("%s.read() with unknown field '%s'", self._name, name)

        env = self.env
        cr, user, context, su = env.args

        # make a query object for selecting ids, and apply security rules to it
        param_ids = object()
        query = Query(['"%s"' % self._table], ['"%s".id IN %%s' % self._table], [param_ids])
        self._apply_ir_rules(query, 'read')

        # determine the fields that are stored as columns in tables; ignore 'id'
        fields_pre = [
            field
            for field in (self._fields[name] for name in field_names + inherited_field_names)
            if field.name != 'id'
            if field.base_field.store and field.base_field.column_type
            if not (field.inherited and callable(field.base_field.translate))
        ]

        # the query may involve several tables: we need fully-qualified names
        def qualify(field):
            col = field.name
            res = self._inherits_join_calc(self._table, field.name, query)
            if field.type == 'binary' and (context.get('bin_size') or context.get('bin_size_' + col)):
                # PG 9.2 introduces conflicting pg_size_pretty(numeric) -> need ::cast
                res = 'pg_size_pretty(length(%s)::bigint)' % res
            return '%s as "%s"' % (res, col)

        # selected fields are: 'id' followed by fields_pre
        qual_names = [qualify(name) for name in [self._fields['id']] + fields_pre]

        # determine the actual query to execute
        from_clause, where_clause, params = query.get_sql()
        query_str = "SELECT %s FROM %s WHERE %s" % (",".join(qual_names), from_clause, where_clause)

        # fetch one list of record values per field
        param_pos = params.index(param_ids)

        result = []
        for sub_ids in cr.split_for_in_conditions(self.ids):
            params[param_pos] = tuple(sub_ids)
            cr.execute(query_str, params)
            result += cr.fetchall()

        fetched = self.browse()
        if result:
            cols = zip(*result)
            ids = next(cols)
            fetched = self.browse(ids)

            for field in fields_pre:
                values = next(cols)
                if context.get('lang') and not field.inherited and callable(field.translate):
                    translate = field.get_trans_func(fetched)
                    values = list(values)
                    for index in range(len(ids)):
                        values[index] = translate(ids[index], values[index])

                # store values in cache
                self.env.cache.update(fetched, field, values)

            # determine the fields that must be processed now;
            # for the sake of simplicity, we ignore inherited fields
            for name in field_names:
                field = self._fields[name]
                if not field.column_type:
                    field.read(fetched)
                if field.deprecated:
                    _logger.warning('Field %s is deprecated: %s', field, field.deprecated)

        # possibly raise exception for the records that could not be read
        missing = self - fetched
        if missing:
            extras = fetched - self
            if extras:
                raise AccessError(
                    _("Database fetch misses ids ({}) and has extra ids ({}), may be caused by a type incoherence in a previous request").format(
                        missing._ids, extras._ids,
                    ))
            # mark non-existing records in missing
            forbidden = missing.exists()
            if forbidden:
                raise self.env['ir.rule']._make_access_error('read', forbidden)

    def get_metadata(self):
        """Return some metadata about the given records.

        :return: list of ownership dictionaries for each requested record
        :rtype: list of dictionaries with the following keys:

            * id: object id
            * create_uid: user who created the record
            * create_date: date when the record was created
            * write_uid: last user who changed the record
            * write_date: date of the last change to the record
            * xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
            * noupdate: A boolean telling if the record will be updated or not
        """

        IrModelData = self.env['ir.model.data'].sudo()
        if self._log_access:
            res = self.sudo().read(LOG_ACCESS_COLUMNS)
        else:
            res = [{'id': x} for x in self.ids]
        xml_data = dict((x['res_id'], x) for x in IrModelData.search_read([('model', '=', self._name),
                                                                           ('res_id', 'in', self.ids)],
                                                                          ['res_id', 'noupdate', 'module', 'name'],
                                                                          order='id',
                                                                          limit=1))
        for r in res:
            value = xml_data.get(r['id'], {})
            r['xmlid'] = '%(module)s.%(name)s' % value if value else False
            r['noupdate'] = value.get('noupdate', False)
        return res

    def get_base_url(self):
        """
        Returns rooturl for a specific given record.

        By default, it return the ir.config.parameter of base_url
        but it can be overidden by model.

        :return: the base url for this record
        :rtype: string

        """
        self.ensure_one()
        return self.env['ir.config_parameter'].sudo().get_param('web.base.url')

    def _check_concurrency(self):
        if not (self._log_access and self._context.get(self.CONCURRENCY_CHECK_FIELD)):
            return
        check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
        for sub_ids in self._cr.split_for_in_conditions(self.ids):
            nclauses = 0
            params = []
            for id in sub_ids:
                id_ref = "%s,%s" % (self._name, id)
                update_date = self._context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None) # 乐观锁
                if update_date: # 乐观锁
                    nclauses += 1
                    params.extend([id, update_date])
            if not nclauses:
                continue
            query = "SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause] * nclauses))
            self._cr.execute(query, tuple(params))
            res = self._cr.fetchone()
            if res:
                # mention the first one only to keep the error message readable
                raise ValidationError(_('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))

    def _check_company(self, fnames=None):
        """ Check the companies of the values of the given field names. """
        if fnames is None:
            fnames = self._fields

        regular_fields = []
        property_fields = []
        for name in fnames:
            field = self._fields[name]
            if field.relational and field.check_company and \
                    'company_id' in self.env[field.comodel_name]:
                if not field.company_dependent:
                    regular_fields.append(name)
                else:
                    property_fields.append(name)

        if not (regular_fields or property_fields):
            return

        inconsistent_fields = set()
        inconsistent_recs = self.browse()
        for record in self:
            company = record.company_id if record._name != 'res.company' else record
            # The first part of the check verifies that all records linked via relation fields are compatible
            # with the company of the origin document, i.e. `self.account_id.company_id == self.company_id`
            for name in regular_fields:
                # Special case with `res.users` since an user can belong to multiple companies.
                if record[name]._name == 'res.users' and record[name].company_ids:
                    if not (company <= record[name].company_ids):
                        inconsistent_fields.add(name)
                        inconsistent_recs |= record
                elif not (record[name].company_id <= company):
                    inconsistent_fields.add(name)
                    inconsistent_recs |= record
            # The second part of the check (for property / company-dependent fields) verifies that the records
            # linked via those relation fields are compatible with the company that owns the property value, i.e.
            # the company for which the value is being assigned, i.e:
            #      `self.property_account_payable_id.company_id == self.env.context['force_company']`
            if self.env.context.get('force_company'):
                company = self.env['res.company'].browse(self.env.context['force_company'])
            else:
                company = self.env.company
            for name in property_fields:
                # Special case with `res.users` since an user can belong to multiple companies.
                if record[name]._name == 'res.users' and record[name].company_ids:
                    if not (company <= record[name].company_ids):
                        inconsistent_fields.add(name)
                        inconsistent_recs |= record
                elif not (record[name].company_id <= company):
                    inconsistent_fields.add(name)
                    inconsistent_recs |= record

        if inconsistent_fields:
            message = _("""Some records are incompatible with the company of the %(document_descr)s.

Incompatibilities:
Fields: %(fields)s
Record ids: %(records)s
""")
            raise UserError(message % {
                'document_descr': self.env['ir.model']._get(self._name).name,
                'fields': ', '.join(sorted(inconsistent_fields)),
                'records': ', '.join([str(a) for a in inconsistent_recs.ids[:6]]),
            })

    @api.model
    def check_access_rights(self, operation, raise_exception=True):
        """ Verifies that the operation given by ``operation`` is allowed for
            the current user according to the access rights.
        """
        return self.env['ir.model.access'].check(self._name, operation, raise_exception)

    def check_access_rule(self, operation):
        """ Verifies that the operation given by ``operation`` is allowed for
            the current user according to ir.rules.

           :param operation: one of ``write``, ``unlink``
           :raise UserError: * if current ir.rules do not permit this operation.
           :return: None if the operation is allowed
        """
        if self.env.su:
            return

        # SQL Alternative if computing in-memory is too slow for large dataset
        # invalid = self - self._filter_access_rules(operation)
        invalid = self - self._filter_access_rules_python(operation)
        if not invalid:
            return

        forbidden = invalid.exists()
        if forbidden:
            # the invalid records are (partially) hidden by access rules
            if self.is_transient():
                raise AccessError(_('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))

            raise self.env['ir.rule']._make_access_error(operation, forbidden)

        # If we get here, the invalid records are not in the database.
        if operation in ('read', 'unlink'):
            # No need to warn about deleting an already deleted record.
            # And no error when reading a record that was deleted, to prevent spurious
            # errors for non-transactional search/read sequences coming from clients.
            return
        _logger.info('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, self._uid, self._name)
        raise MissingError(
            _('Missing document(s)') + ':' + _('One of the documents you are trying to access has been deleted, please try again after refreshing.')
            + '\n\n({} {}, {} {}, {} {}, {} {})'.format(
                _('Document type:'), self._name, _('Operation:'), operation,
                _('Records:'), invalid.ids[:6], _('User:'), self._uid,
            )
        )

    def _filter_access_rules(self, operation):
        """ Return the subset of ``self`` for which ``operation`` is allowed. """
        if self.env.su:
            return self

        if not self._ids:
            return self

        if self.is_transient():
            # Only one single implicit access rule for transient models: owner only!
            # This is ok to hardcode because we assert that TransientModels always
            # have log_access enabled so that the create_uid column is always there.
            # And even with _inherits, these fields are always present in the local
            # table too, so no need for JOINs.
            query = "SELECT id FROM {} WHERE id IN %s AND create_uid=%s".format(self._table)
            self._cr.execute(query, (tuple(self.ids), self._uid))
            return self.browse([row[0] for row in self._cr.fetchall()])

        where_clause, where_params, tables = self.env['ir.rule'].domain_get(self._name, operation)
        if not where_clause:
            return self

        # detemine ids in database that satisfy ir.rules
        # TODO: we should add a flush here, based on domain's arguments
        valid_ids = set()
        query = "SELECT {}.id FROM {} WHERE {}.id IN %s AND {}".format(
            self._table, ",".join(tables), self._table, " AND ".join(where_clause),
        )
        self._flush_search([])
        for sub_ids in self._cr.split_for_in_conditions(self.ids):
            self._cr.execute(query, [sub_ids] + where_params)
            valid_ids.update(row[0] for row in self._cr.fetchall())

        # return new ids without origin and ids with origin in valid_ids
        return self.browse([
            it
            for it in self._ids
            if not (it or it.origin) or (it or it.origin) in valid_ids
        ])

    def _filter_access_rules_python(self, operation):
        dom = self.env['ir.rule']._compute_domain(self._name, operation)
        return self.sudo().filtered_domain(dom or [])

    def unlink(self):
        """ unlink()

        Deletes the records of the current set

        :raise AccessError: * if user has no unlink rights on the requested object
                            * if user tries to bypass access rules for unlink on the requested object
        :raise UserError: if the record is default property for other records

        """
        if not self:
            return True

        self.check_access_rights('unlink')
        self._check_concurrency()

        # mark fields that depend on 'self' to recompute them after 'self' has
        # been deleted (like updating a sum of lines after deleting one line)
        self.flush()
        self.modified(self._fields)

        # Check if the records are used as default properties.
        refs = ['%s,%s' % (self._name, i) for i in self.ids]
        if self.env['ir.property'].search([('res_id', '=', False), ('value_reference', 'in', refs)]):
            raise UserError(_('Unable to delete this document because it is used as a default property'))

        # Delete the records' properties.
        with self.env.norecompute():
            self.check_access_rule('unlink')
            self.env['ir.property'].search([('res_id', 'in', refs)]).sudo().unlink()

            cr = self._cr
            Data = self.env['ir.model.data'].sudo().with_context({})
            Defaults = self.env['ir.default'].sudo()
            Attachment = self.env['ir.attachment'].sudo()
            ir_model_data_unlink = Data
            ir_attachment_unlink = Attachment

            # TOFIX: this avoids an infinite loop when trying to recompute a
            # field, which triggers the recomputation of another field using the
            # same compute function, which then triggers again the computation
            # of those two fields
            for field in self._fields.values():
                self.env.remove_to_compute(field, self)

            for sub_ids in cr.split_for_in_conditions(self.ids):
                query = "DELETE FROM %s WHERE id IN %%s" % self._table
                cr.execute(query, (sub_ids,))

                # Removing the ir_model_data reference if the record being deleted
                # is a record created by xml/csv file, as these are not connected
                # with real database foreign keys, and would be dangling references.
                #
                # Note: the following steps are performed as superuser to avoid
                # access rights restrictions, and with no context to avoid possible
                # side-effects during admin calls.
                data = Data.search([('model', '=', self._name), ('res_id', 'in', sub_ids)])
                if data:
                    ir_model_data_unlink |= data

                # For the same reason, remove the defaults having some of the
                # records as value
                Defaults.discard_records(self.browse(sub_ids))

                # For the same reason, remove the relevant records in ir_attachment
                # (the search is performed with sql as the search method of
                # ir_attachment is overridden to hide attachments of deleted
                # records)
                query = 'SELECT id FROM ir_attachment WHERE res_model=%s AND res_id IN %s'
                cr.execute(query, (self._name, sub_ids))
                attachments = Attachment.browse([row[0] for row in cr.fetchall()])
                if attachments:
                    ir_attachment_unlink |= attachments.sudo()

            # invalidate the *whole* cache, since the orm does not handle all
            # changes made in the database, like cascading delete!
            self.invalidate_cache()
            if ir_model_data_unlink:
                ir_model_data_unlink.unlink()
            if ir_attachment_unlink:
                ir_attachment_unlink.unlink()
            # DLE P93: flush after the unlink, for recompute fields depending on
            # the modified of the unlink
            self.flush()

        # auditing: deletions are infrequent and leave no trace in the database
        _unlink.info('User #%s deleted %s records with IDs: %r', self._uid, self._name, self.ids)

        return True

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值