desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'It should create remote dirs'
def test_action_backup_sftp_mkdirs(self):
rec_id = self.new_record() with self.mock_assets(): with self.patch_filtered_sftp(rec_id): conn = rec_id.sftp_connection().__enter__() rec_id.action_backup() conn.makedirs.assert_called_once_with(rec_id.folder)
'It should guard from ConnectionException on remote.mkdirs'
def test_action_backup_sftp_mkdirs_conn_exception(self):
rec_id = self.new_record() with self.mock_assets(): with self.patch_filtered_sftp(rec_id): conn = rec_id.sftp_connection().__enter__() conn.makedirs.side_effect = TestConnectionException rec_id.action_backup() self.assertTrue(True)
'It should open remote file w/ proper args'
def test_action_backup_sftp_remote_open(self):
rec_id = self.new_record() with self.mock_assets() as assets: with self.patch_filtered_sftp(rec_id): conn = rec_id.sftp_connection().__enter__() rec_id.action_backup() conn.open.assert_called_once_with(assets['os'].path.join(), 'wb')
'It should open remote file w/ proper args'
def test_action_backup_sftp_remote_open(self):
rec_id = self.new_record() with self.mock_assets() as assets: with self.patch_filtered_sftp(rec_id): conn = rec_id.sftp_connection().__enter__() rec_id.action_backup() conn.open.assert_called_once_with(assets['os'].path.join(), 'wb')
'It should search all records'
def test_action_backup_all_search(self):
rec_id = self.new_record() with mock.patch.object(rec_id, 'search'): rec_id.action_backup_all() rec_id.search.assert_called_once_with([])
'It should return result of backup operation'
def test_action_backup_all_return(self):
rec_id = self.new_record() with mock.patch.object(rec_id, 'search'): res = rec_id.action_backup_all() self.assertEqual(rec_id.search().action_backup(), res)
'It should initiate SFTP connection w/ proper args and pass'
@mock.patch(('%s.pysftp' % model)) def test_sftp_connection_init_passwd(self, pysftp):
rec_id = self.new_record() rec_id.sftp_connection() pysftp.Connection.assert_called_once_with(host=rec_id.sftp_host, username=rec_id.sftp_user, port=rec_id.sftp_port, password=rec_id.sftp_password)
'It should initiate SFTP connection w/ proper args and key'
@mock.patch(('%s.pysftp' % model)) def test_sftp_connection_init_key(self, pysftp):
rec_id = self.new_record() rec_id.write({'sftp_private_key': 'pkey', 'sftp_password': 'pkeypass'}) rec_id.sftp_connection() pysftp.Connection.assert_called_once_with(host=rec_id.sftp_host, username=rec_id.sftp_user, port=rec_id.sftp_port, private_key=rec_id.sftp_private_key, private_key_pass=rec_id.sftp_password)
'It should return new sftp connection'
@mock.patch(('%s.pysftp' % model)) def test_sftp_connection_return(self, pysftp):
rec_id = self.new_record() res = rec_id.sftp_connection() self.assertEqual(pysftp.Connection(), res)
'It should not error and should return a .dump.zip file str'
def test_filename(self):
now = datetime.now() res = self.Model.filename(now) self.assertTrue(res.endswith('.dump.zip'))
'Workaround https://github.com/odoo/odoo/issues/12237.'
def tearDown(self):
super(UICase, self).tearDown() self.icp.set_param('auth_signup.allow_uninvited', self.old_allow_uninvited) self.cr.commit()
'Get an HTML LXML document.'
def html_doc(self, url='/web/signup', data=None, timeout=10):
if data: data = bytes(urlencode(data)) return document_fromstring(self.url_open(url, data, timeout).read())
'Get a valid CSRF token.'
def csrf_token(self):
doc = self.html_doc() return doc.xpath("//input[@name='csrf_token']")[0].get('value')
'Search for any element containing the text.'
def search_text(self, doc, text):
return doc.xpath(("//*[contains(text(), '%s')]" % text))
'Test rejection of bad emails.'
def test_bad_email(self):
self.data['login'] = 'bad email' doc = self.html_doc(data=self.data) self.assertTrue(self.search_text(doc, self.msg['badmail']))
'Test acceptance of good emails. This test could lead to success if your SMTP settings are correct, or to failure otherwise. Any case is expected, since tests usually run under unconfigured demo instances.'
def test_good_email(self):
self.data['login'] = '[email protected]' doc = self.html_doc(data=self.data) self.assertTrue((self.search_text(doc, self.msg['failure']) or self.search_text(doc, self.msg['success'])))
'Useful when creating stages from a Kanban view for another model'
@api.model def _default_res_model_id(self):
action_id = self.env.context.get('params', {}).get('action') action = self.env['ir.actions.act_window'].browse(action_id) default_model = action.res_model if (default_model != self._name): return self.env['ir.model'].search([('model', '=', default_model)])
'It builds a model from model_cls in order to test abstract models. Note that this does not actually create a table in the database, so there may be some unidentified edge cases. Args: model_cls (openerp.models.BaseModel): Class of model to initialize Returns: model_cls: Instance'
@classmethod def _init_test_model(cls, model_cls):
registry = cls.env.registry cr = cls.env.cr inst = model_cls._build_model(registry, cr) model = cls.env[model_cls._name].with_context(todo=[]) model._prepare_setup() model._setup_base(partial=False) model._setup_fields(partial=False) model._setup_complete() model._auto_init() model.init() model._auto_end() cls.test_model_record = cls.env['ir.model'].search([('name', '=', model._name)]) return inst
'It should return the correct recordset.'
def test_read_group_stage_ids(self):
self.assertEqual(self.test_model._read_group_stage_ids(self.env['base.kanban.stage'], [], 'id'), self.env['base.kanban.stage'].search([], order='id'))
'It should return an empty RecordSet'
def test_default_stage_id(self):
self.assertEqual(self.env['base.kanban.abstract']._default_stage_id(), self.env['base.kanban.stage'])
'It should return empty ir.model Recordset if no params in context'
def test_default_res_model_id_no_params(self):
test_stage = self.env['base.kanban.stage'].with_context({}) res_model_id = test_stage._default_res_model_id() self.assertFalse(res_model_id) self.assertEqual(res_model_id._name, 'ir.model')
'It should return empty ir.model Recordset if no action in params'
def test_default_res_model_id_no_action(self):
test_stage = self.env['base.kanban.stage'].with_context(params={}) res_model_id = test_stage._default_res_model_id() self.assertFalse(res_model_id) self.assertEqual(res_model_id._name, 'ir.model')
'It should return correct ir.model record if info in context'
def test_default_res_model_id_info_in_context(self):
test_action = self.env['ir.actions.act_window'].create({'name': 'Test Action', 'res_model': 'res.users'}) test_stage = self.env['base.kanban.stage'].with_context(params={'action': test_action.id}) self.assertEqual(test_stage._default_res_model_id(), self.env['ir.model'].search([('model', '=', 'res.users')]))
'It should not return ir.model record corresponding to stage model'
def test_default_res_model_id_ignore_self(self):
test_action = self.env['ir.actions.act_window'].create({'name': 'Test Action', 'res_model': 'base.kanban.stage'}) test_stage = self.env['base.kanban.stage'].with_context(params={'action': test_action.id}) self.assertFalse(test_stage._default_res_model_id())
'Add columns to model dynamically and init some properties'
@classmethod def _build_model(cls, pool, cr):
ModelClass = super(ServerConfiguration, cls)._build_model(pool, cr) ModelClass._add_columns() ModelClass.running_env = system_base_config['running_env'] ModelClass.show_passwords = (ModelClass.running_env in ('dev',)) ModelClass._arch = None ModelClass._build_osv() return ModelClass
'Add columns to model dynamically'
@classmethod def _add_columns(cls):
cols = chain(cls._get_base_cols().items(), cls._get_env_cols().items(), cls._get_system_cols().items()) for (col, value) in cols: col_name = col.replace('.', '_') setattr(ServerConfiguration, col_name, fields.Char(string=col, readonly=True)) cls._conf_defaults[col_name] = value
'Compute base fields'
@classmethod def _get_base_cols(cls):
res = {} for (col, item) in system_base_config.options.items(): key = cls._format_key('odoo', col) res[key] = item return res
'Compute base fields'
@classmethod def _get_env_cols(cls, sections=None):
res = {} sections = (sections if sections else serv_config.sections()) for section in sections: for (col, item) in serv_config.items(section): key = cls._format_key(section, col) res[key] = item return res
'Compute system fields'
@classmethod def _get_system_cols(cls):
res = {} for (col, item) in get_server_environment(): key = cls._format_key('system', col) res[key] = item return res
'Return an XML chunk which represents a group of fields.'
@classmethod def _group(cls, items):
names = [] for key in sorted(items): names.append(key.replace('.', '_')) return (('<group col="2" colspan="4">' + ''.join([('<field name="%s" readonly="1"/>' % _escape(name)) for name in names])) + '</group>')
'Build the view for the current configuration.'
@classmethod def _build_osv(cls):
arch = '<?xml version="1.0" encoding="utf-8"?><form string="Configuration Form"><notebook colspan="4">' rcfile = system_base_config.rcfile items = cls._get_base_cols() arch += '<page string="Odoo">' arch += ('<separator string="%s" colspan="4"/>' % _escape(rcfile)) arch += cls._group(items) arch += '<separator colspan="4"/></page>' arch += '<page string="Environment based configurations">' for section in sorted(serv_config.sections()): items = cls._get_env_cols(sections=[section]) arch += ('<separator string="[%s]" colspan="4"/>' % _escape(section)) arch += cls._group(items) arch += '<separator colspan="4"/></page>' arch += '<page string="System">' arch += '<separator string="Server Environment" colspan="4"/>' arch += cls._group(cls._get_system_cols()) arch += '<separator colspan="4"/></page>' arch += '</notebook></form>' cls._arch = etree.fromstring(arch)
'Overwrite the default method to render the custom view.'
@api.model def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
res = super(ServerConfiguration, self).fields_view_get(view_id, view_type, toolbar) View = self.env['ir.ui.view'] if (view_type == 'form'): arch_node = self._arch (xarch, xfields) = View.postprocess_and_fields(self._name, arch_node, view_id) res['arch'] = xarch res['fields'] = xfields return res
'It should close the connection'
def test_connection_close_pyodbc(self):
connection = mock.MagicMock() res = self.dbsource.connection_close_pyodbc(connection) self.assertEqual(res, connection.close())
'It should open the connection with the full conn string'
@mock.patch(ADAPTER) def test_connection_open_pyodbc(self, pyodbc):
self.dbsource.connection_open_pyodbc() pyodbc.connect.assert_called_once_with(self.dbsource.conn_string_full)
'It should return the newly opened connection'
@mock.patch(ADAPTER) def test_connection_open_pyodbc_return(self, pyodbc):
res = self.dbsource.connection_open_pyodbc() self.assertEqual(res, pyodbc.connect())
'It should call the generic execute method w/ proper args'
def test_execute_pyodbc(self):
expect = ('sqlquery', 'sqlparams', 'metadata') with mock.patch.object(self.dbsource, '_execute_generic') as execute: self.dbsource.execute_pyodbc(*expect) execute.assert_called_once_with(*expect)
'Copy of auth_ldap\'s funtion, changing only the SQL, so that it returns all fields in the table.'
def get_ldap_dicts(self):
return self.sudo().search([('ldap_server', '!=', False)], order='sequence').read([])
'This method must be used in a constraint that must be created in the object that inherits for base.exception. for sale : @api.constrains(\'ignore_exception\',) def sale_check_exception(self): self._check_exception'
@api.multi def _check_exception(self):
exception_ids = self.detect_exceptions() if exception_ids: exceptions = self.env['exception.rule'].browse(exception_ids) raise ValidationError('\n'.join(exceptions.mapped('name')))
'Condition method for the workflow from draft to confirm'
@api.multi def test_exceptions(self):
if self.detect_exceptions(): return False return True
'returns the list of exception_ids for all the considered base.exceptions'
@api.multi def detect_exceptions(self):
if (not self): return [] exception_obj = self.env['exception.rule'] all_exceptions = exception_obj.sudo().search([('rule_group', '=', self[0].rule_group)]) model_exceptions = all_exceptions.filtered((lambda ex: (ex.model == self._name))) sub_exceptions = all_exceptions.filtered((lambda ex: (ex.model != self._name))) all_exception_ids = [] for obj in self: if obj.ignore_exception: continue exception_ids = obj._detect_exceptions(model_exceptions, sub_exceptions) obj.exception_ids = [(6, 0, exception_ids)] all_exception_ids += exception_ids return all_exception_ids
'Inherit in your module to define for which company field you don\'t want have a matching related field'
def _filter_field(self, field_key):
return True
'Delete all logs older than ``days``. This includes: - CRUD logs (create, read, write, unlink) - HTTP requests - HTTP user sessions Called from a cron.'
@api.model def autovacuum(self, days):
days = (((days > 0) and int(days)) or 0) deadline = (datetime.now() - timedelta(days=days)) data_models = ('auditlog.log', 'auditlog.http.request', 'auditlog.http.session') for data_model in data_models: records = self.env[data_model].search([('create_date', '<=', fields.Datetime.to_string(deadline))]) nb_records = len(records) records.unlink() _logger.info(u"AUTOVACUUM - %s '%s' records deleted", nb_records, data_model) return True
'Create a log corresponding to the current HTTP user session, and returns its ID. This method can be called several times during the HTTP query/response cycle, it will only log the user session on the first call. If no HTTP user session is available, returns `False`.'
@api.model def current_http_session(self):
if (not request): return False httpsession = request.session if httpsession: existing_session = self.search([('name', '=', httpsession.sid), ('user_id', '=', request.uid)], limit=1) if existing_session: return existing_session.id vals = {'name': httpsession.sid, 'user_id': request.uid} httpsession.auditlog_http_session_id = self.create(vals).id return httpsession.auditlog_http_session_id return False
'Get all rules and apply them to log method calls.'
def _register_hook(self):
super(AuditlogRule, self)._register_hook() if (not hasattr(self.pool, '_auditlog_field_cache')): self.pool._auditlog_field_cache = {} if (not hasattr(self.pool, '_auditlog_model_cache')): self.pool._auditlog_model_cache = {} if (not self): self = self.search([('state', '=', 'subscribed')]) return self._patch_methods()
'Patch ORM methods of models defined in rules to log their calls.'
@api.multi def _patch_methods(self):
updated = False model_cache = self.pool._auditlog_model_cache for rule in self: if (rule.state != 'subscribed'): continue if (not self.pool.get(rule.model_id.model)): continue model_cache[rule.model_id.model] = rule.model_id.id model_model = self.env[rule.model_id.model] check_attr = 'auditlog_ruled_create' if (getattr(rule, 'log_create') and (not hasattr(model_model, check_attr))): model_model._patch_method('create', rule._make_create()) setattr(type(model_model), check_attr, True) updated = True check_attr = 'auditlog_ruled_read' if (getattr(rule, 'log_read') and (not hasattr(model_model, check_attr))): model_model._patch_method('read', rule._make_read()) setattr(type(model_model), check_attr, True) updated = True check_attr = 'auditlog_ruled_write' if (getattr(rule, 'log_write') and (not hasattr(model_model, check_attr))): model_model._patch_method('write', rule._make_write()) setattr(type(model_model), check_attr, True) updated = True check_attr = 'auditlog_ruled_unlink' if (getattr(rule, 'log_unlink') and (not hasattr(model_model, check_attr))): model_model._patch_method('unlink', rule._make_unlink()) setattr(type(model_model), check_attr, True) updated = True return updated
'Restore original ORM methods of models defined in rules.'
@api.multi def _revert_methods(self):
updated = False for rule in self: model_model = self.env[rule.model_id.model] for method in ['create', 'read', 'write', 'unlink']: if (getattr(rule, ('log_%s' % method)) and hasattr(getattr(model_model, method), 'origin')): model_model._revert_method(method) delattr(type(model_model), ('auditlog_ruled_%s' % method)) updated = True if updated: modules.registry.RegistryManager.signal_registry_change(self.env.cr.dbname)
'Update the registry when a new rule is created.'
@api.model def create(self, vals):
new_record = super(AuditlogRule, self).create(vals) if new_record._register_hook(): modules.registry.RegistryManager.signal_registry_change(self.env.cr.dbname) return new_record
'Update the registry when existing rules are updated.'
@api.multi def write(self, vals):
super(AuditlogRule, self).write(vals) if self._register_hook(): modules.registry.RegistryManager.signal_registry_change(self.env.cr.dbname) return True
'Unsubscribe rules before removing them.'
@api.multi def unlink(self):
self.unsubscribe() return super(AuditlogRule, self).unlink()
'Instanciate a create method that log its calls.'
@api.multi def _make_create(self):
self.ensure_one() log_type = self.log_type @api.model @api.returns('self', (lambda value: value.id)) def create_full(self, vals, **kwargs): self = self.with_context(auditlog_disabled=True) rule_model = self.env['auditlog.rule'] new_record = create_full.origin(self, vals, **kwargs) new_values = dict(((d['id'], d) for d in new_record.sudo().with_context(prefetch_fields=False).read(list(self._fields)))) rule_model.sudo().create_logs(self.env.uid, self._name, new_record.ids, 'create', None, new_values, {'log_type': log_type}) return new_record @api.model @api.returns('self', (lambda value: value.id)) def create_fast(self, vals, **kwargs): self = self.with_context(auditlog_disabled=True) rule_model = self.env['auditlog.rule'] vals2 = dict(vals) new_record = create_fast.origin(self, vals, **kwargs) new_values = {new_record.id: vals2} rule_model.sudo().create_logs(self.env.uid, self._name, new_record.ids, 'create', None, new_values, {'log_type': log_type}) return new_record return (create_full if (self.log_type == 'full') else create_fast)
'Instanciate a read method that log its calls.'
@api.multi def _make_read(self):
self.ensure_one() log_type = self.log_type def read(self, *args, **kwargs): result = read.origin(self, *args, **kwargs) result2 = result if (not isinstance(result2, list)): result2 = [result] read_values = dict(((d['id'], d) for d in result2)) if (args and isinstance(args[0], sql_db.Cursor)): (cr, uid, ids) = (args[0], args[1], args[2]) if isinstance(ids, (int, long)): ids = [ids] if kwargs.get('context', {}).get('auditlog_disabled'): return result env = api.Environment(cr, uid, {'auditlog_disabled': True}) rule_model = env['auditlog.rule'] rule_model.sudo().create_logs(env.uid, self._name, ids, 'read', read_values, None, {'log_type': log_type}) else: if self.env.context.get('auditlog_disabled'): return result self = self.with_context(auditlog_disabled=True) rule_model = self.env['auditlog.rule'] rule_model.sudo().create_logs(self.env.uid, self._name, self.ids, 'read', read_values, None, {'log_type': log_type}) return result return read
'Instanciate a write method that log its calls.'
@api.multi def _make_write(self):
self.ensure_one() log_type = self.log_type @api.multi def write_full(self, vals, **kwargs): self = self.with_context(auditlog_disabled=True) rule_model = self.env['auditlog.rule'] old_values = dict(((d['id'], d) for d in self.sudo().with_context(prefetch_fields=False).read(list(self._fields)))) result = write_full.origin(self, vals, **kwargs) new_values = dict(((d['id'], d) for d in self.sudo().with_context(prefetch_fields=False).read(list(self._fields)))) rule_model.sudo().create_logs(self.env.uid, self._name, self.ids, 'write', old_values, new_values, {'log_type': log_type}) return result @api.multi def write_fast(self, vals, **kwargs): self = self.with_context(auditlog_disabled=True) rule_model = self.env['auditlog.rule'] vals2 = dict(vals) old_vals2 = dict.fromkeys(vals2.keys(), False) old_values = dict(((id_, old_vals2) for id_ in self.ids)) new_values = dict(((id_, vals2) for id_ in self.ids)) result = write_fast.origin(self, vals, **kwargs) rule_model.sudo().create_logs(self.env.uid, self._name, self.ids, 'write', old_values, new_values, {'log_type': log_type}) return result return (write_full if (self.log_type == 'full') else write_fast)
'Instanciate an unlink method that log its calls.'
@api.multi def _make_unlink(self):
self.ensure_one() log_type = self.log_type @api.multi def unlink_full(self, **kwargs): self = self.with_context(auditlog_disabled=True) rule_model = self.env['auditlog.rule'] old_values = dict(((d['id'], d) for d in self.sudo().with_context(prefetch_fields=False).read(list(self._fields)))) rule_model.sudo().create_logs(self.env.uid, self._name, self.ids, 'unlink', old_values, None, {'log_type': log_type}) return unlink_full.origin(self, **kwargs) @api.multi def unlink_fast(self, **kwargs): self = self.with_context(auditlog_disabled=True) rule_model = self.env['auditlog.rule'] rule_model.sudo().create_logs(self.env.uid, self._name, self.ids, 'unlink', None, None, {'log_type': log_type}) return unlink_fast.origin(self, **kwargs) return (unlink_full if (self.log_type == 'full') else unlink_fast)
'Create logs. `old_values` and `new_values` are dictionaries, e.g: {RES_ID: {\'FIELD\': VALUE, ...}}'
def create_logs(self, uid, res_model, res_ids, method, old_values=None, new_values=None, additional_log_values=None):
if (old_values is None): old_values = EMPTY_DICT if (new_values is None): new_values = EMPTY_DICT log_model = self.env['auditlog.log'] http_request_model = self.env['auditlog.http.request'] http_session_model = self.env['auditlog.http.session'] for res_id in res_ids: model_model = self.env[res_model] name = model_model.browse(res_id).name_get() res_name = (name and name[0] and name[0][1]) vals = {'name': res_name, 'model_id': self.pool._auditlog_model_cache[res_model], 'res_id': res_id, 'method': method, 'user_id': uid, 'http_request_id': http_request_model.current_http_request(), 'http_session_id': http_session_model.current_http_session()} vals.update((additional_log_values or {})) log = log_model.create(vals) diff = DictDiffer(new_values.get(res_id, EMPTY_DICT), old_values.get(res_id, EMPTY_DICT)) if (method is 'create'): self._create_log_line_on_create(log, diff.added(), new_values) elif (method is 'read'): self._create_log_line_on_read(log, old_values.get(res_id, EMPTY_DICT).keys(), old_values) elif (method is 'write'): self._create_log_line_on_write(log, diff.changed(), old_values, new_values)
'Log field filled on a \'read\' operation.'
def _create_log_line_on_read(self, log, fields_list, read_values):
log_line_model = self.env['auditlog.log.line'] for field_name in fields_list: if (field_name in FIELDS_BLACKLIST): continue field = self._get_field(log.model_id, field_name) if field: log_vals = self._prepare_log_line_vals_on_read(log, field, read_values) log_line_model.create(log_vals)
'Prepare the dictionary of values used to create a log line on a \'read\' operation.'
def _prepare_log_line_vals_on_read(self, log, field, read_values):
vals = {'field_id': field['id'], 'log_id': log.id, 'old_value': read_values[log.res_id][field['name']], 'old_value_text': read_values[log.res_id][field['name']], 'new_value': False, 'new_value_text': False} if (field['relation'] and ('2many' in field['ttype'])): old_value_text = self.env[field['relation']].browse(vals['old_value']).name_get() vals['old_value_text'] = old_value_text return vals
'Log field updated on a \'write\' operation.'
def _create_log_line_on_write(self, log, fields_list, old_values, new_values):
log_line_model = self.env['auditlog.log.line'] for field_name in fields_list: if (field_name in FIELDS_BLACKLIST): continue field = self._get_field(log.model_id, field_name) if field: log_vals = self._prepare_log_line_vals_on_write(log, field, old_values, new_values) log_line_model.create(log_vals)
'Prepare the dictionary of values used to create a log line on a \'write\' operation.'
def _prepare_log_line_vals_on_write(self, log, field, old_values, new_values):
vals = {'field_id': field['id'], 'log_id': log.id, 'old_value': old_values[log.res_id][field['name']], 'old_value_text': old_values[log.res_id][field['name']], 'new_value': new_values[log.res_id][field['name']], 'new_value_text': new_values[log.res_id][field['name']]} if ((log.log_type == 'full') and field['relation'] and ('2many' in field['ttype'])): existing_ids = self.env[field['relation']]._search([('id', 'in', vals['old_value'])]) old_value_text = [] if existing_ids: existing_values = self.env[field['relation']].browse(existing_ids).name_get() old_value_text.extend(existing_values) deleted_ids = (set(vals['old_value']) - set(existing_ids)) for deleted_id in deleted_ids: old_value_text.append((deleted_id, 'DELETED')) vals['old_value_text'] = old_value_text new_value_text = self.env[field['relation']].browse(vals['new_value']).name_get() vals['new_value_text'] = new_value_text return vals
'Log field filled on a \'create\' operation.'
def _create_log_line_on_create(self, log, fields_list, new_values):
log_line_model = self.env['auditlog.log.line'] for field_name in fields_list: if (field_name in FIELDS_BLACKLIST): continue field = self._get_field(log.model_id, field_name) if field: log_vals = self._prepare_log_line_vals_on_create(log, field, new_values) log_line_model.create(log_vals)
'Prepare the dictionary of values used to create a log line on a \'create\' operation.'
def _prepare_log_line_vals_on_create(self, log, field, new_values):
vals = {'field_id': field['id'], 'log_id': log.id, 'old_value': False, 'old_value_text': False, 'new_value': new_values[log.res_id][field['name']], 'new_value_text': new_values[log.res_id][field['name']]} if ((log.log_type == 'full') and field['relation'] and ('2many' in field['ttype'])): new_value_text = self.env[field['relation']].browse(vals['new_value']).name_get() vals['new_value_text'] = new_value_text return vals
'Subscribe Rule for auditing changes on model and apply shortcut to view logs on that model.'
@api.multi def subscribe(self):
act_window_model = self.env['ir.actions.act_window'] model_ir_values = self.env['ir.values'] for rule in self: domain = ("[('model_id', '=', %s), ('res_id', '=', active_id)]" % rule.model_id.id) vals = {'name': _(u'View logs'), 'res_model': 'auditlog.log', 'src_model': rule.model_id.model, 'domain': domain} act_window = act_window_model.sudo().create(vals) rule.write({'state': 'subscribed', 'action_id': act_window.id}) keyword = 'client_action_relate' value = ('ir.actions.act_window,%s' % act_window.id) model_ir_values.sudo().set_action(('View_log_' + rule.model_id.model), action_slot=keyword, model=rule.model_id.model, action=value) return True
'Unsubscribe Auditing Rule on model.'
@api.multi def unsubscribe(self):
act_window_model = self.env['ir.actions.act_window'] ir_values_model = self.env['ir.values'] self._revert_methods() for rule in self: act_window = act_window_model.search([('name', '=', 'View Log'), ('res_model', '=', 'auditlog.log'), ('src_model', '=', rule.model_id.model)]) if act_window: value = ('ir.actions.act_window,%s' % act_window.id) act_window.unlink() ir_value = ir_values_model.search([('model', '=', rule.model_id.model), ('value', '=', value)]) if ir_value: ir_value.unlink() self.write({'state': 'draft'}) return True
'Create a log corresponding to the current HTTP request, and returns its ID. This method can be called several times during the HTTP query/response cycle, it will only log the request on the first call. If no HTTP request is available, returns `False`.'
@api.model def current_http_request(self):
if (not request): return False http_session_model = self.env['auditlog.http.session'] httprequest = request.httprequest if httprequest: if hasattr(httprequest, 'auditlog_http_request_id'): self.env.cr.execute('SELECT id FROM %s WHERE id = %s', (AsIs(self._table), httprequest.auditlog_http_request_id)) if self.env.cr.fetchone(): return httprequest.auditlog_http_request_id vals = {'name': httprequest.path, 'root_url': httprequest.url_root, 'user_id': request.uid, 'http_session_id': http_session_model.current_http_session(), 'user_context': request.context} httprequest.auditlog_http_request_id = self.create(vals).id return httprequest.auditlog_http_request_id return False
'First test, caching some data.'
def test_LogCreation(self):
auditlog_log = self.env['auditlog.log'] group = self.env['res.groups'].create({'name': 'testgroup1'}) self.assertTrue(auditlog_log.search([('model_id', '=', self.groups_model_id), ('method', '=', 'create'), ('res_id', '=', group.id)]).ensure_one()) group.write({'name': 'Testgroup1'}) self.assertTrue(auditlog_log.search([('model_id', '=', self.groups_model_id), ('method', '=', 'write'), ('res_id', '=', group.id)]).ensure_one()) group.unlink() self.assertTrue(auditlog_log.search([('model_id', '=', self.groups_model_id), ('method', '=', 'unlink'), ('res_id', '=', group.id)]).ensure_one())
'Second test, using cached data of the first one.'
def test_LogCreation2(self):
auditlog_log = self.env['auditlog.log'] testgroup2 = self.env['res.groups'].create({'name': 'testgroup2'}) self.assertTrue(auditlog_log.search([('model_id', '=', self.groups_model_id), ('method', '=', 'create'), ('res_id', '=', testgroup2.id)]).ensure_one())
'Third test, two groups, the latter being the parent of the former. Then we remove it right after (with (2, X) tuple) to test the creation of a \'write\' log with a deleted resource (so with no text representation).'
def test_LogCreation3(self):
auditlog_log = self.env['auditlog.log'] testgroup3 = testgroup3 = self.env['res.groups'].create({'name': 'testgroup3'}) testgroup4 = self.env['res.groups'].create({'name': 'testgroup4', 'implied_ids': [(4, testgroup3.id)]}) testgroup4.write({'implied_ids': [(2, testgroup3.id)]}) self.assertTrue(auditlog_log.search([('model_id', '=', self.groups_model_id), ('method', '=', 'create'), ('res_id', '=', testgroup3.id)]).ensure_one()) self.assertTrue(auditlog_log.search([('model_id', '=', self.groups_model_id), ('method', '=', 'create'), ('res_id', '=', testgroup4.id)]).ensure_one()) self.assertTrue(auditlog_log.search([('model_id', '=', self.groups_model_id), ('method', '=', 'write'), ('res_id', '=', testgroup4.id)]).ensure_one())
'Check the generation of the where clause.'
def test_fuzzy_where_generation(self):
self.assertIn('%', expression.TERM_OPERATORS) query = self.ResPartner._where_calc([('name', '%', 'test')], active_test=False) (from_clause, where_clause, where_clause_params) = query.get_sql() self.assertEqual(where_clause, '("res_partner"."name" %% %s)') complete_where = self.env.cr.mogrify(('SELECT FROM %s WHERE %s' % (from_clause, where_clause)), where_clause_params) self.assertEqual(complete_where, 'SELECT FROM "res_partner" WHERE ("res_partner"."name" % \'test\')')
'Check the generation of the where clause for translatable fields.'
def test_fuzzy_where_generation_translatable(self):
ctx = {'lang': 'de_DE'} query = self.ResPartnerCategory.with_context(ctx)._where_calc([('name', '%', 'Goschaeftlic')], active_test=False) (from_clause, where_clause, where_clause_params) = query.get_sql() self.assertIn('SELECT id FROM temp_irt_current WHERE name %% %s', where_clause) complete_where = self.env.cr.mogrify(('SELECT FROM %s WHERE %s' % (from_clause, where_clause)), where_clause_params) self.assertIn("SELECT id FROM temp_irt_current WHERE name % 'Goschaeftlic'", complete_where)
'Check the generation of the where clause.'
def test_fuzzy_order_generation(self):
order = ("similarity(%s.name, 'test') DESC" % self.ResPartner._table) query = self.ResPartner._where_calc([('name', '%', 'test')], active_test=False) order_by = self.ResPartner._generate_order_by(order, query) self.assertEqual((' ORDER BY %s' % order), order_by)
'Test the fuzzy search itself.'
def test_fuzzy_search(self):
if (self.TrgmIndex._trgm_extension_exists() != 'installed'): return if (not self.TrgmIndex.index_exists('res.partner', 'name')): field_partner_name = self.env.ref('base.field_res_partner_name') self.TrgmIndex.create({'field_id': field_partner_name.id, 'index_type': 'gin'}) partner1 = self.ResPartner.create({'name': 'John Smith'}) partner2 = self.ResPartner.create({'name': 'John Smizz'}) partner3 = self.ResPartner.create({'name': 'Linus Torvalds'}) res = self.ResPartner.search([('name', '%', 'Jon Smith')]) self.assertIn(partner1.id, res.ids) self.assertIn(partner2.id, res.ids) self.assertNotIn(partner3.id, res.ids) res = self.ResPartner.search([('name', '%', 'Smith John')]) self.assertIn(partner1.id, res.ids) self.assertIn(partner2.id, res.ids) self.assertNotIn(partner3.id, res.ids)
'Change the table that is used for CRUD operations'
@api.multi def change_table(self, name):
self.current_table = name
'It closes the connection to the data source. This method calls adapter method of this same name, suffixed with the adapter type.'
@api.multi def connection_close(self, connection):
method = self._get_adapter_method('connection_close') return method(connection)
'It provides a context manager for the data source. This method calls adapter method of this same name, suffixed with the adapter type.'
@api.multi @contextmanager def connection_open(self):
method = self._get_adapter_method('connection_open') try: connection = method() (yield connection) finally: try: self.connection_close(connection) except: _logger.exception('Connection close failure.')
'Executes a query and returns a list of rows. "execute_params" can be a dict of values, that can be referenced in the SQL statement using "%(key)s" or, in the case of Oracle, ":key". Example: query = "SELECT * FROM mytable WHERE city = %(city)s AND date > %(dt)s" execute_params = { \'city\': \'Lisbon\', \'dt\': datetime.datetime(2000, 12, 31), If metadata=True, it will instead return a dict containing the rows list and the columns list, in the format: { \'cols\': [ \'col_a\', \'col_b\', ...] , \'rows\': [ (a0, b0, ...), (a1, b1, ...), ...] }'
@api.multi def execute(self, query=None, execute_params=None, metadata=False, **kwargs):
if (not query): try: query = kwargs['sqlquery'] except KeyError: raise TypeError(_('query is a required argument')) if (not execute_params): try: execute_params = kwargs['sqlparams'] except KeyError: pass method = self._get_adapter_method('execute') (rows, cols) = method(query, execute_params, metadata) if metadata: return {'cols': cols, 'rows': rows} else: return rows
'It tests the connection Raises: ConnectionSuccessError: On connection success ConnectionFailedError: On connection failed'
@api.multi def connection_test(self):
for obj in self: try: with self.connection_open(): pass except Exception as e: raise ConnectionFailedError((_('Connection test failed:\nHere is what we got instead:\n%s') % tools.ustr(e))) raise ConnectionSuccessError(_('Connection test succeeded:\nEverything seems properly set up!'))
'It browses for and returns the records from remote by ID This method calls adapter method of this same name, suffixed with the adapter type. Args: record_ids: (list) List of remote IDs to browse. *args: Positional arguments to be passed to adapter method. **kwargs: Keyword arguments to be passed to adapter method. Returns: (iter) Iterator of record mappings that match the ID.'
@api.multi def remote_browse(self, record_ids, *args, **kwargs):
assert self.current_table method = self._get_adapter_method('remote_browse') return method(record_ids, *args, **kwargs)
'It creates a record on the remote data source. This method calls adapter method of this same name, suffixed with the adapter type. Args: vals: (dict) Values to use for creation. *args: Positional arguments to be passed to adapter method. **kwargs: Keyword arguments to be passed to adapter method. Returns: (mapping) A mapping of the record that was created.'
@api.multi def remote_create(self, vals, *args, **kwargs):
assert self.current_table method = self._get_adapter_method('remote_create') return method(vals, *args, **kwargs)
'It deletes records by ID on remote This method calls adapter method of this same name, suffixed with the adapter type. Args: record_ids: (list) List of remote IDs to delete. *args: Positional arguments to be passed to adapter method. **kwargs: Keyword arguments to be passed to adapter method. Returns: (iter) Iterator of bools indicating delete status.'
@api.multi def remote_delete(self, record_ids, *args, **kwargs):
assert self.current_table method = self._get_adapter_method('remote_delete') return method(record_ids, *args, **kwargs)
'It searches the remote for the query. This method calls adapter method of this same name, suffixed with the adapter type. Args: query: (mixed) Query domain as required by the adapter. *args: Positional arguments to be passed to adapter method. **kwargs: Keyword arguments to be passed to adapter method. Returns: (iter) Iterator of record mappings that match query.'
@api.multi def remote_search(self, query, *args, **kwargs):
assert self.current_table method = self._get_adapter_method('remote_search') return method(query, *args, **kwargs)
'It updates the remote records with the vals This method calls adapter method of this same name, suffixed with the adapter type. Args: record_ids: (list) List of remote IDs to delete. *args: Positional arguments to be passed to adapter method. **kwargs: Keyword arguments to be passed to adapter method. Returns: (iter) Iterator of record mappings that were updated.'
@api.multi def remote_update(self, record_ids, vals, *args, **kwargs):
assert self.current_table method = self._get_adapter_method('remote_update') return method(record_ids, vals, *args, **kwargs)
'It opens and returns a connection to the remote data source. This method calls adapter method of this same name, suffixed with the adapter type. Deprecate: This method has been replaced with ``connection_open``.'
@api.multi def conn_open(self):
with self.connection_open() as connection: return connection
'It returns the connector adapter method for ``method_prefix``. Args: method_prefix: (str) Prefix of adapter method (such as ``connection_open``). Raises: NotImplementedError: When the method is not found Returns: (instancemethod)'
def _get_adapter_method(self, method_prefix):
self.ensure_one() method = ('%s_%s' % (method_prefix, self.connector)) try: return getattr(self, method) except AttributeError: raise (NotImplementedError(_('"%s" method not found, check that all assets are installed for the %s connector type.')) % (method, self.connector))
'It should add password if string interpolation not detected'
def test_conn_string_full(self):
self.dbsource.conn_string = 'User=Derp;' self.dbsource.password = 'password' expect = (self.dbsource.conn_string + ('PWD=%s;' % self.dbsource.password)) self.assertEqual(self.dbsource.conn_string_full, expect)
'It should raise for successful connection'
def test_connection_success(self):
with self.assertRaises(ConnectionSuccessError): self.dbsource.connection_test()
'It should raise for failed/invalid connection'
def test_connection_fail(self):
with mock.patch.object(self.dbsource, 'connection_open') as conn: conn.side_effect = Exception with self.assertRaises(ConnectionFailedError): self.dbsource.connection_test()
'It should close connection after context ends'
def test_connection_open_calls_close(self):
with mock.patch.object(self.dbsource, 'connection_close') as close: with self.dbsource.connection_open(): pass close.assert_called_once()
'It should call adapter\'s close method'
def test_connection_close(self):
args = [mock.MagicMock()] (res, adapter) = self._test_adapter_method('connection_close', args=args) adapter.assert_called_once_with(args[0])
'It should raise a TypeError if query and sqlquery not in args'
def test_execute_asserts_query_arg(self):
with self.assertRaises(TypeError): self.dbsource.execute()
'It should call the adapter methods with proper args'
def test_execute_calls_adapter(self):
expect = ('query', 'execute', 'metadata') return_value = ('rows', 'cols') (res, adapter) = self._test_adapter_method('execute', args=expect, return_value=return_value) adapter.assert_called_once_with(*expect)
'It should return rows if not metadata'
def test_execute_return(self):
expect = (True, True, False) return_value = ('rows', 'cols') (res, adapter) = self._test_adapter_method('execute', args=expect, return_value=return_value) self.assertEqual(res, return_value[0])
'It should return rows and cols if metadata'
def test_execute_return_metadata(self):
expect = (True, True, True) return_value = ('rows', 'cols') (res, adapter) = self._test_adapter_method('execute', args=expect, return_value=return_value) self.assertEqual(res, {'rows': return_value[0], 'cols': return_value[1]})
'It should call the adapter method with proper args'
def test_remote_browse(self):
args = ([1], 'args') kwargs = {'kwargs': True} self.dbsource.current_table = 'table' (res, adapter) = self._test_adapter_method('remote_browse', create=True, args=args, kwargs=kwargs) adapter.assert_called_once_with(*args, **kwargs) self.assertEqual(res, adapter())
'It should raise AssertionError if a table not selected'
def test_remote_browse_asserts_current_table(self):
args = ([1], 'args') kwargs = {'kwargs': True} with self.assertRaises(AssertionError): (res, adapter) = self._test_adapter_method('remote_browse', create=True, args=args, kwargs=kwargs)
'It should call the adapter method with proper args'
def test_remote_create(self):
args = ({'val': 'Value'}, 'args') kwargs = {'kwargs': True} self.dbsource.current_table = 'table' (res, adapter) = self._test_adapter_method('remote_create', create=True, args=args, kwargs=kwargs) adapter.assert_called_once_with(*args, **kwargs) self.assertEqual(res, adapter())
'It should raise AssertionError if a table not selected'
def test_remote_create_asserts_current_table(self):
args = ([1], 'args') kwargs = {'kwargs': True} with self.assertRaises(AssertionError): (res, adapter) = self._test_adapter_method('remote_create', create=True, args=args, kwargs=kwargs)
'It should call the adapter method with proper args'
def test_remote_delete(self):
args = ([1], 'args') kwargs = {'kwargs': True} self.dbsource.current_table = 'table' (res, adapter) = self._test_adapter_method('remote_delete', create=True, args=args, kwargs=kwargs) adapter.assert_called_once_with(*args, **kwargs) self.assertEqual(res, adapter())
'It should raise AssertionError if a table not selected'
def test_remote_delete_asserts_current_table(self):
args = ([1], 'args') kwargs = {'kwargs': True} with self.assertRaises(AssertionError): (res, adapter) = self._test_adapter_method('remote_delete', create=True, args=args, kwargs=kwargs)
'It should call the adapter method with proper args'
def test_remote_search(self):
args = ({'search': 'query'}, 'args') kwargs = {'kwargs': True} self.dbsource.current_table = 'table' (res, adapter) = self._test_adapter_method('remote_search', create=True, args=args, kwargs=kwargs) adapter.assert_called_once_with(*args, **kwargs) self.assertEqual(res, adapter())
'It should raise AssertionError if a table not selected'
def test_remote_search_asserts_current_table(self):
args = ([1], 'args') kwargs = {'kwargs': True} with self.assertRaises(AssertionError): (res, adapter) = self._test_adapter_method('remote_search', create=True, args=args, kwargs=kwargs)
'It should call the adapter method with proper args'
def test_remote_update(self):
args = ([1], {'vals': 'Value'}, 'args') kwargs = {'kwargs': True} self.dbsource.current_table = 'table' (res, adapter) = self._test_adapter_method('remote_update', create=True, args=args, kwargs=kwargs) adapter.assert_called_once_with(*args, **kwargs) self.assertEqual(res, adapter())