彻底解决ODOO时区问题

环境

odoo版本:13

部署方式:docker-compose

数据库版本:12

linux:Ubuntu 18

方案

系统默认的datetime字段修改(create_date、write_date......)

比较暴力,直接打补丁,改写write和create

TIMEZONE = odoo.tools.config.get('timezone', 'Asia/Shanghai')
def _write(self, vals):
    # low-level implementation of write()
    if not self:
        return True

    write_uid = vals.get('write_uid')
    self._check_concurrency()
    cr = self._cr

    # determine records that require updating parent_path
    parent_records = self._parent_store_update_prepare(vals)

    # determine SQL values
    columns = []  # list of (column_name, format, value)

    for name, val in vals.items():
        if self._log_access and name in LOG_ACCESS_COLUMNS and not val:
            continue
        field = self._fields[name]
        assert field.store

        if field.deprecated:
            _logger.warning('Field %s is deprecated: %s', field, field.deprecated)

        assert field.column_type
        columns.append((name, field.column_format, val))

    # if self.write_uid and write_uid == 1:
    #     if self.env['ir.model.fields'].search_count([('model', '=', self._name),
    #                                                  ('name', '=', 'write_uid')]):
    #         vals = dict(vals)
    #         del vals['write_uid']
    #         if 'write_date' in vals:
    #             del vals['write_date']
    # if self._log_access and write_uid != 1:
    if self._log_access:
        if not vals.get('write_uid'):
            columns.append(('write_uid', '%s', self._uid))
        if not vals.get('write_date'):
            columns.append(('write_date', '%s', AsIs(f"(now() at time zone '{TIMEZONE}')")))

    # update columns
    if columns:
        query = 'UPDATE "%s" SET %s WHERE id IN %%s' % (
            self._table, ','.join('"%s"=%s' % (column[0], column[1]) for column in columns),
        )
        params = [column[2] for column in columns]
        for sub_ids in cr.split_for_in_conditions(set(self.ids)):
            cr.execute(query, params + [sub_ids])
            if cr.rowcount != len(sub_ids):
                raise MissingError(
                    _(
                        'One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description
                    + '\n\n({} {}, {} {})'.format(_('Records:'), sub_ids[:6], _('User:'), self._uid)
                )

    # update parent_path
    if parent_records:
        parent_records._parent_store_update()

    return True
BM._write = _write

@api.model
def _create(self, data_list):
    """ Create records from the stored field values in ``data_list``. """
    assert data_list
    cr = self.env.cr
    quote = '"{}"'.format

    # insert rows
    ids = []                        # ids of created records
    other_fields = set()            # non-column fields
    translated_fields = set()       # translated fields

    # column names, formats and values (for common fields)
    columns0 = [('id', "nextval(%s)", self._sequence)]
    if self._log_access:
        columns0.append(('create_uid', "%s", self._uid))
        columns0.append(('create_date', "%s", AsIs(f"(now() at time zone '{TIMEZONE}')")))
        columns0.append(('write_uid', "%s", self._uid))
        columns0.append(('write_date', "%s", AsIs(f"(now() at time zone '{TIMEZONE}')")))

    for data in data_list:
        # determine column values
        stored = data['stored']
        columns = [column for column in columns0 if column[0] not in stored]
        for name, val in sorted(stored.items()):
            field = self._fields[name]
            assert field.store

            if field.column_type:
                col_val = field.convert_to_column(val, self, stored)
                columns.append((name, field.column_format, col_val))
                if field.translate is True:
                    translated_fields.add(field)
            else:
                other_fields.add(field)

        # Insert rows one by one
        # - as records don't all specify the same columns, code building batch-insert query
        #   was very complex
        # - and the gains were low, so not worth spending so much complexity
        #
        # It also seems that we have to be careful with INSERTs in batch, because they have the
        # same problem as SELECTs:
        # If we inject a lot of data in a single query, we fall into pathological perfs in
        # terms of SQL parser and the execution of the query itself.
        # In SELECT queries, we inject max 1000 ids (integers) when we can, because we know
        # that this limit is well managed by PostgreSQL.
        # In INSERT queries, we inject integers (small) and larger data (TEXT blocks for
        # example).
        #
        # The problem then becomes: how to "estimate" the right size of the batch to have
        # good performance?
        #
        # This requires extensive testing, and it was prefered not to introduce INSERTs in
        # batch, to avoid regressions as much as possible.
        #
        # That said, we haven't closed the door completely.
        query = "INSERT INTO {} ({}) VALUES ({}) RETURNING id".format(
            quote(self._table),
            ", ".join(quote(name) for name, fmt, val in columns),
            ", ".join(fmt for name, fmt, val in columns),
        )
        params = [val for name, fmt, val in columns]
        cr.execute(query, params)
        ids.append(cr.fetchone()[0])

    # put the new records in cache, and update inverse fields, for many2one
    #
    # cachetoclear is an optimization to avoid modified()'s cost until other_fields are processed
    cachetoclear = []
    records = self.browse(ids)
    inverses_update = defaultdict(list)     # {(field, value): ids}
    for data, record in zip(data_list, records):
        data['record'] = record
        # DLE P104: test_inherit.py, test_50_search_one2many
        vals = dict({k: v for d in data['inherited'].values() for k, v in d.items()}, **data['stored'])
        set_vals = list(vals) + LOG_ACCESS_COLUMNS + [self.CONCURRENCY_CHECK_FIELD, 'id', 'parent_path']
        for field in self._fields.values():
            if field.type in ('one2many', 'many2many'):
                self.env.cache.set(record, field, ())
            elif field.related and not field.column_type:
                self.env.cache.set(record, field, field.convert_to_cache(None, record))
            # DLE P123: `test_adv_activity`, `test_message_assignation_inbox`, `test_message_log`, `test_create_mail_simple`, ...
            # Set `mail.message.parent_id` to False in cache so it doesn't do the useless SELECT when computing the modified of `child_ids`
            # in other words, if `parent_id` is not set, no other message `child_ids` are impacted.
            # + avoid the fetch of fields which are False. e.g. if a boolean field is not passed in vals and as no default set in the field attributes,
            # then we know it can be set to False in the cache in the case of a create.
            elif field.name not in set_vals and not field.compute:
                self.env.cache.set(record, field, field.convert_to_cache(None, record))
        for fname, value in vals.items():
            field = self._fields[fname]
            if field.type in ('one2many', 'many2many'):
                cachetoclear.append((record, field))
            else:
                cache_value = field.convert_to_cache(value, record)
                self.env.cache.set(record, field, cache_value)
                if field.type in ('many2one', 'many2one_reference') and record._field_inverses[field]:
                    inverses_update[(field, cache_value)].append(record.id)

    for (field, value), record_ids in inverses_update.items():
        field._update_inverses(self.browse(record_ids), value)

    # update parent_path
    records._parent_store_create()

    # protect fields being written against recomputation
    protected = [(data['protected'], data['record']) for data in data_list]
    with self.env.protecting(protected):
        # mark computed fields as todo
        records.modified(self._fields, create=True)

        if other_fields:
            # discard default values from context for other fields
            others = records.with_context(clean_context(self._context))
            for field in sorted(other_fields, key=attrgetter('_sequence')):
                field.create([
                    (other, data['stored'][field.name])
                    for other, data in zip(others, data_list)
                    if field.name in data['stored']
                ])

            # mark fields to recompute
            records.modified([field.name for field in other_fields], create=True)

        # if value in cache has not been updated by other_fields, remove it
        for record, field in cachetoclear:
            if self.env.cache.contains(record, field) and not self.env.cache.get(record, field):
                self.env.cache.remove(record, field)

    # check Python constraints for stored fields
    records._validate_fields(name for data in data_list for name in data['stored'])
    records.check_access_rule('create')

    # add translations
    if self.env.lang and self.env.lang != 'en_US':
        Translations = self.env['ir.translation']
        for field in translated_fields:
            tname = "%s,%s" % (field.model_name, field.name)
            for data in data_list:
                if field.name in data['stored']:
                    record = data['record']
                    val = data['stored'][field.name]
                    Translations._set_ids(tname, 'model', self.env.lang, record.ids, val, val)

    return records
BM._create = _create

sql中的“at timezone 'UTC'”

既然会执行sql,那么。。。直接改写execute方法啊!!!遇到UTC直接替换config配置的时区~

def execute(self, query, params=None, log_exceptions=None):
    if 'UTC' in query or 'utc' in query:
        query = query.replace('UTC', 'Asia/Shanghai')
        query = query.replace('utc', 'Asia/Shanghai')
    if params and not isinstance(params, (tuple, list, dict)):
        # psycopg2's TypeError is not clear if you mess up the params
        raise ValueError("SQL query parameters should be a tuple, list or dict; got %r" % (params,))

    if self.sql_log:
        encoding = psycopg2.extensions.encodings[self.connection.encoding]
        _logger.debug("query: %s", self._obj.mogrify(query, params).decode(encoding, 'replace'))
    now = time.time()
    try:
        params = params or None
        res = self._obj.execute(query, params)
    except Exception as e:
        if self._default_log_exceptions if log_exceptions is None else log_exceptions:
            _logger.error("bad query: %s\nERROR: %s", tools.ustr(self._obj.query or query), e)
        raise

    # simple query count is always computed
    self.sql_log_count += 1
    delay = (time.time() - now)
    if hasattr(threading.current_thread(), 'query_count'):
        threading.current_thread().query_count += 1
        threading.current_thread().query_time += delay

    # advanced stats only if sql_log is enabled
    if self.sql_log:
        delay *= 1E6

        query_lower = self._obj.query.decode().lower()
        res_from = re_from.match(query_lower)
        if res_from:
            self.sql_from_log.setdefault(res_from.group(1), [0, 0])
            self.sql_from_log[res_from.group(1)][0] += 1
            self.sql_from_log[res_from.group(1)][1] += delay
        res_into = re_into.match(query_lower)
        if res_into:
            self.sql_into_log.setdefault(res_into.group(1), [0, 0])
            self.sql_into_log[res_into.group(1)][0] += 1
            self.sql_into_log[res_into.group(1)][1] += delay
    return res
cr.execute = execute

orm取时间方法(fields.Date.now()...)

from odoo import fields as FD, api

TZ = pytz.timezone(TIMEZONE)
@staticmethod
def now(*args):
    return datetime.strptime(datetime.now(tz=TZ).strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S')

@staticmethod
def today(*args):
    return datetime.strptime(datetime.now(tz=TZ).strftime('%Y-%m-%d 00:00:00'), '%Y-%m-%d 00:00:00')

@staticmethod
def date_today(*args):
    return datetime.strptime(datetime.now(tz=TZ).strftime('%Y-%m-%d'), '%Y-%m-%d').date()

FD.Datetime.now = now
FD.Datetime.today = today
FD.Date.today = date_today

数据库时区修改

定时任务!这个最头大。。。

def cron_thread(self, number):
    # from odoo.addons.base.models.ir_cron import ir_cron
    from san_tools.models.timezone import IrCron
    while True:
        time.sleep(SLEEP_INTERVAL + number)  # Steve Reich timing style
        registries = odoo.modules.registry.Registry.registries
        _logger.debug('cron%d polling for jobs', number)
        for db_name, registry in registries.items():
            if registry.ready:
                thread = threading.currentThread()
                thread.start_time = time.time()
                try:
                    IrCron._acquire_job(db_name)
                except Exception:
                    _logger.warning('cron%d encountered an Exception:', number, exc_info=True)
                thread.start_time = None

TS.cron_thread = cron_thread

class IrCron(models.Model):
    _inherit = 'ir.cron'

    @classmethod
    def _acquire_job(cls, db_name):
        """ Try to process all cron jobs.

        This selects in database all the jobs that should be processed. It then
        tries to lock each of them and, if it succeeds, run the cron job (if it
        doesn't succeed, it means the job was already locked to be taken care
        of by another thread) and return.

        This method hides most exceptions related to the database's version, the
        modules' state, and such.
        """
        try:
            cls._process_jobs(db_name)
        except BadVersion:
            _logger.warning('Skipping database %s as its base version is not %s.', db_name, BASE_VERSION)
        except BadModuleState:
            _logger.warning('Skipping database %s because of modules to install/upgrade/remove.', db_name)
        except psycopg2.ProgrammingError as e:
            if e.pgcode == '42P01':
                # Class 42 — Syntax Error or Access Rule Violation; 42P01: undefined_table
                # The table ir_cron does not exist; this is probably not an OpenERP database.
                _logger.warning('Tried to poll an undefined table on database %s.', db_name)
            else:
                raise
        except Exception:
            _logger.warning('Exception in cron:', exc_info=True)

    @classmethod
    def _process_jobs(cls, db_name):
        """ Try to process all cron jobs.

        This selects in database all the jobs that should be processed. It then
        tries to lock each of them and, if it succeeds, run the cron job (if it
        doesn't succeed, it means the job was already locked to be taken care
        of by another thread) and return.

        :raise BadVersion: if the version is different from the worker's
        :raise BadModuleState: if modules are to install/upgrade/remove
        """
        db = odoo.sql_db.db_connect(db_name)
        threading.current_thread().dbname = db_name
        try:
            with db.cursor() as cr:
                # Make sure the database has the same version as the code of
                # base and that no module must be installed/upgraded/removed
                cr.execute("SELECT latest_version FROM ir_module_module WHERE name=%s", ['base'])
                (version,) = cr.fetchone()
                cr.execute("SELECT COUNT(*) FROM ir_module_module WHERE state LIKE %s", ['to %'])
                (changes,) = cr.fetchone()
                if version is None:
                    raise BadModuleState()
                elif version != BASE_VERSION:
                    raise BadVersion()
                # Careful to compare timestamps with 'UTC' - everything is UTC as of v6.1.
                cr.execute("""SELECT * FROM ir_cron
                              WHERE numbercall != 0
                                  AND active AND nextcall <= (now() at time zone 'UTC')
                              ORDER BY priority""")
                jobs = cr.dictfetchall()
            if changes:
                if not jobs:
                    raise BadModuleState()
                # nextcall is never updated if the cron is not executed,
                # it is used as a sentinel value to check whether cron jobs
                # have been locked for a long time (stuck)
                parse = fields.Datetime.from_string
                oldest = min([parse(job['nextcall']) for job in jobs])
                if fields.Datetime.now() - oldest > MAX_FAIL_TIME:
                    odoo.modules.reset_modules_state(db_name)
                else:
                    raise BadModuleState()

            for job in jobs:
                lock_cr = db.cursor()
                try:
                    # Try to grab an exclusive lock on the job row from within the task transaction
                    # Restrict to the same conditions as for the search since the job may have already
                    # been run by an other thread when cron is running in multi thread
                    lock_cr.execute("""SELECT *
                                       FROM ir_cron
                                       WHERE numbercall != 0
                                          AND active
                                          AND nextcall <= (now() at time zone 'UTC')
                                          AND id=%s
                                       FOR UPDATE NOWAIT""",
                                   (job['id'],), log_exceptions=False)

                    locked_job = lock_cr.fetchone()
                    if not locked_job:
                        _logger.debug("Job `%s` already executed by another process/thread. skipping it", job['cron_name'])
                        continue
                    # Got the lock on the job row, run its code
                    _logger.info('Starting job `%s`.', job['cron_name'])
                    job_cr = db.cursor()
                    try:
                        cls._process_job(job_cr, job, lock_cr)
                        _logger.info('Job `%s` done.', job['cron_name'])
                    except Exception:
                        _logger.exception('Unexpected exception while processing cron job %r', job)
                    finally:
                        job_cr.close()

                except psycopg2.OperationalError as e:
                    if e.pgcode == '55P03':
                        # Class 55: Object not in prerequisite state; 55P03: lock_not_available
                        _logger.debug('Another process/thread is already busy executing job `%s`, skipping it.', job['cron_name'])
                        continue
                    else:
                        # Unexpected OperationalError
                        raise
                finally:
                    # we're exiting due to an exception while acquiring the lock
                    lock_cr.close()

        finally:
            if hasattr(threading.current_thread(), 'dbname'):
                del threading.current_thread().dbname

    @classmethod
    def _process_job(cls, job_cr, job, cron_cr):
        """ Run a given job taking care of the repetition.

        :param job_cr: cursor to use to execute the job, safe to commit/rollback
        :param job: job to be run (as a dictionary).
        :param cron_cr: cursor holding lock on the cron job row, to use to update the next exec date,
            must not be committed/rolled back!
        """
        try:
            with api.Environment.manage():
                cron = api.Environment(job_cr, job['user_id'], {
                    'lastcall': fields.Datetime.from_string(job['lastcall'])
                })['ir.cron']
                # Use the user's timezone to compare and compute datetimes,
                # otherwise unexpected results may appear. For instance, adding
                # 1 month in UTC to July 1st at midnight in GMT+2 gives July 30
                # instead of August 1st!
                now = fields.Datetime.now()
                # nextcall = fields.Datetime.context_timestamp(cron, fields.Datetime.from_string(job['nextcall']))
                nextcall = fields.Datetime.from_string(job['nextcall'])
                numbercall = job['numbercall']

                ok = False
                while nextcall < now and numbercall:
                    if numbercall > 0:
                        numbercall -= 1
                    if not ok or job['doall']:
                        cron._callback(job['cron_name'], job['ir_actions_server_id'], job['id'])
                    if numbercall:
                        nextcall += _intervalTypes[job['interval_type']](job['interval_number'])
                    ok = True
                addsql = ''
                if not numbercall:
                    addsql = ', active=False'
                cron_cr.execute("UPDATE ir_cron SET nextcall=%s, numbercall=%s, lastcall=%s" + addsql + " WHERE id=%s",
                                (
                                    fields.Datetime.to_string(nextcall),
                                    numbercall,
                                    fields.Datetime.to_string(now),
                                    job['id']
                                ))
                cron.flush()
                cron.invalidate_cache()

        finally:
            job_cr.commit()
            cron_cr.commit()

odoo前端显示

odoo.define('san_tools.timezone', function (require) {
"use strict";

var Session = require('web.Session')

Session.include({
    getTZOffset: function (date) {
        return 0;
        // return -new Date(date).getTimezoneOffset();
        // 上面注释的代码取的是本地时间的时区(是本地!!),因为我们项目只在国内使用,所以直接暴力改为0.
    },
})
});

结尾

到此为止odoo时区问题彻底解决。数据库存的是正常本时区时间,显示也是正常本时区时间。

注:所有代码均是通过补丁方式,对代码升级不会造成影响。

  • 1
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 3
    评论
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值