repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
magsilva/scriptLattes
scriptLattes/producoesBibliograficas/apresentacaoDeTrabalho.py
1
3793
#!/usr/bin/python # encoding: utf-8 # filename: apresentacaoDeTrabalho.py # # scriptLattes V8 # Copyright 2005-2013: Jesús P. Mena-Chalco e Roberto M. Cesar-Jr. # http://scriptlattes.sourceforge.net/ # # # Este programa é um software livre; você pode redistribui-lo e/ou # modifica-lo dentro dos termos da Licença Pública Geral GNU como # publicada pela Fundação do Software Livre (FSF); na versão 2 da # Licença, ou (na sua opinião) qualquer versão. # # Este programa é distribuído na esperança que possa ser util, # mas SEM NENHUMA GARANTIA; sem uma garantia implicita de ADEQUAÇÂO a qualquer # MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a # Licença Pública Geral GNU para maiores detalhes. # # Você deve ter recebido uma cópia da Licença Pública Geral GNU # junto com este programa, se não, escreva para a Fundação do Software # Livre(FSF) Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # from scriptLattes import * from scriptLattes.geradorDePaginasWeb import * import re from scriptLattes.util import compararCadeias class ApresentacaoDeTrabalho: item = None # dado bruto idMembro = None relevante = None autores = None titulo = None ano = None natureza = None # tipo de apresentacao chave = None def __init__(self, idMembro, partesDoItem='', relevante=''): self.idMembro = set([]) self.idMembro.add(idMembro) if not partesDoItem=='': # partesDoItem[0]: Numero (NAO USADO) # partesDoItem[1]: Descricao do livro (DADO BRUTO) self.relevante = relevante self.item = partesDoItem[1] # Dividir o item na suas partes constituintes partes = self.item.partition(" . ") self.autores = partes[0].strip() partes = partes[2] aux = re.findall(u' \((.*?)\)', partes) if len(aux)>0: self.natureza = aux[-1] partes = partes.rpartition(" (") partes = partes[0] else: self.natureza = '' aux = re.findall(u'. ((?:19|20)\d\d)\\b', partes) if len(aux)>0: self.ano = aux[-1] #.strip().rstrip(".").rstrip(",") partes = partes.rpartition(". ") partes = partes[0] else: self.ano = '' self.titulo = partes.strip().rstrip(".") self.chave = self.autores # chave de comparação entre os objetos else: self.relevante = '' self.autores = '' self.titulo = '' self.ano = '' self.natureza = '' def compararCom(self, objeto): if self.idMembro.isdisjoint(objeto.idMembro) and compararCadeias(self.titulo, objeto.titulo): # Os IDs dos membros são agrupados. # Essa parte é importante para a criação do GRAFO de colaborações self.idMembro.update(objeto.idMembro) if len(self.autores)<len(objeto.autores): self.autores = objeto.autores if len(self.titulo)<len(objeto.titulo): self.titulo = objeto.titulo if len(self.natureza)<len(objeto.natureza): self.natureza = objeto.natureza return self else: # nao similares return None def html(self, listaDeMembros): s = self.autores + '. <b>' + self.titulo + '</b>. ' s+= str(self.ano) + '. ' if str(self.ano).isdigit() else '. ' s+= self.natureza if not self.natureza=='' else '' # s+= menuHTMLdeBuscaPB(self.titulo) return s # ------------------------------------------------------------------------ # def __str__(self): s = "\n[APRESENTACAO DE TRABALHO] \n" s += "+ID-MEMBRO : " + str(self.idMembro) + "\n" s += "+RELEVANTE : " + str(self.relevante) + "\n" s += "+AUTORES : " + self.autores.encode('utf8','replace') + "\n" s += "+TITULO : " + self.titulo.encode('utf8','replace') + "\n" s += "+ANO : " + str(self.ano) + "\n" s += "+NATUREZA : " + self.natureza.encode('utf8','replace') + "\n" s += "+item : " + self.item.encode('utf8','replace') + "\n" return s
gpl-2.0
sveetch/djangotribune
djangotribune/views/help.py
3
2094
# -*- coding: utf-8 -*- """ View to display help DEPRECATED: now we use the doc on readthedoc """ import os from django import http from django.views.generic.base import TemplateView import djangotribune class DummySourceParser(object): """Dummy parser, return the source untransformed""" def __init__(self, source, *args, **kwargs): self.source = source def __str__(self): return self.source def __unicode__(self): return self.source.decode('utf-8') def __repr__(self): return "<DummySourceParser>" try: from rstview.parser import SourceParser except ImportError: class SourceParser(DummySourceParser): pass class ConditionalParserView(TemplateView): """ Page with conditional render and mimetype Si le parser de rstview est disponible, renvoi une réponse HTML avec le contenu transformé par docutils. Sinon renvoi une réponse plain-text avec directement le contenu du document sans transformation. L'encoding attendu du document source est *utf-8*. """ template_name = "djangotribune/help.html" source_doc_name = "README.rst" source_doc_title = "README" def get(self, request, *args, **kwargs): tribune_root = os.path.abspath(os.path.dirname(djangotribune.__file__)) f = open(os.path.join(tribune_root, self.source_doc_name)) content = SourceParser(f.read(), initial_header_level=1, silent=False) f.close() if isinstance(content, DummySourceParser): return self.plain_response(content) return self.html_response(content) def plain_response(self, content): return http.HttpResponse(content, mimetype="text/plain; charset=utf-8") def html_response(self, content): context = {'content' : content, 'doc_title' : self.source_doc_title} return self.render_to_response(context) class ReadmePage(ConditionalParserView): """ Page d'aide sur le module """ source_doc_name = "../README.rst" source_doc_title = "Sveetchies-tribune"
mit
hsum/sqlalchemy
lib/sqlalchemy/dialects/mysql/zxjdbc.py
59
3942
# mysql/zxjdbc.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: mysql+zxjdbc :name: zxjdbc for Jython :dbapi: zxjdbc :connectstring: mysql+zxjdbc://<user>:<password>@<hostname>[:<port>]/\ <database> :driverurl: http://dev.mysql.com/downloads/connector/j/ .. note:: Jython is not supported by current versions of SQLAlchemy. The zxjdbc dialect should be considered as experimental. Character Sets -------------- SQLAlchemy zxjdbc dialects pass unicode straight through to the zxjdbc/JDBC layer. To allow multiple character sets to be sent from the MySQL Connector/J JDBC driver, by default SQLAlchemy sets its ``characterEncoding`` connection property to ``UTF-8``. It may be overridden via a ``create_engine`` URL parameter. """ import re from ... import types as sqltypes, util from ...connectors.zxJDBC import ZxJDBCConnector from .base import BIT, MySQLDialect, MySQLExecutionContext class _ZxJDBCBit(BIT): def result_processor(self, dialect, coltype): """Converts boolean or byte arrays from MySQL Connector/J to longs.""" def process(value): if value is None: return value if isinstance(value, bool): return int(value) v = 0 for i in value: v = v << 8 | (i & 0xff) value = v return value return process class MySQLExecutionContext_zxjdbc(MySQLExecutionContext): def get_lastrowid(self): cursor = self.create_cursor() cursor.execute("SELECT LAST_INSERT_ID()") lastrowid = cursor.fetchone()[0] cursor.close() return lastrowid class MySQLDialect_zxjdbc(ZxJDBCConnector, MySQLDialect): jdbc_db_name = 'mysql' jdbc_driver_name = 'com.mysql.jdbc.Driver' execution_ctx_cls = MySQLExecutionContext_zxjdbc colspecs = util.update_copy( MySQLDialect.colspecs, { sqltypes.Time: sqltypes.Time, BIT: _ZxJDBCBit } ) def _detect_charset(self, connection): """Sniff out the character set in use for connection results.""" # Prefer 'character_set_results' for the current connection over the # value in the driver. SET NAMES or individual variable SETs will # change the charset without updating the driver's view of the world. # # If it's decided that issuing that sort of SQL leaves you SOL, then # this can prefer the driver value. rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'") opts = dict((row[0], row[1]) for row in self._compat_fetchall(rs)) for key in ('character_set_connection', 'character_set'): if opts.get(key, None): return opts[key] util.warn("Could not detect the connection character set. " "Assuming latin1.") return 'latin1' def _driver_kwargs(self): """return kw arg dict to be sent to connect().""" return dict(characterEncoding='UTF-8', yearIsDateType='false') def _extract_error_code(self, exception): # e.g.: DBAPIError: (Error) Table 'test.u2' doesn't exist # [SQLCode: 1146], [SQLState: 42S02] 'DESCRIBE `u2`' () m = re.compile(r"\[SQLCode\: (\d+)\]").search(str(exception.args)) c = m.group(1) if c: return int(c) def _get_server_version_info(self, connection): dbapi_con = connection.connection version = [] r = re.compile('[.\-]') for n in r.split(dbapi_con.dbversion): try: version.append(int(n)) except ValueError: version.append(n) return tuple(version) dialect = MySQLDialect_zxjdbc
mit
Wafflespeanut/servo
tests/wpt/web-platform-tests/webvtt/parsing/file-parsing/tools/parser.py
89
18918
""" A direct translation of the webvtt file parsing algorithm. See https://w3c.github.io/webvtt/#file-parsing for documentation """ import re import string SPACE_CHARACTERS = [' ', '\t', '\n', '\f', '\r'] SPACE_SPLIT_PATTERN = r"[{}]*".format(''.join(SPACE_CHARACTERS)) DIGITS = string.digits class DictInit: def __init__(self, **dict): self.__dict__.update(dict) class VTTCue(DictInit): pass class VTTRegion(DictInit): pass class Stylesheet(DictInit): pass class W3CParser: input = None position = None def collect_characters(self, condition): result = "" while self.position < len(self.input) and condition(self.input[self.position]): result += self.input[self.position] self.position += 1 return result def skip_whitespace(self): self.collect_characters(lambda c: c in SPACE_CHARACTERS) def parse_percentage_string(self, input): 'parse a percentage string' # 1. input = input # 2. if not re.match(r'^\d+(\.\d+)?%$', input): return None # 3. percentage = float(input[:-1]) # 4. if percentage < 0 or percentage > 100: return None # 5. return percentage class VTTParser(W3CParser): def __init__(self, input): self.input = input self.position = 0 self.seen_cue = False self.text_tracks = [] self.stylesheets = [] self.regions = [] self.errors = [] def parse(self): 'WebVTT parser algorithm' # 1. self.input = self.input.replace('\0', '\ufffd').replace('\r\n', '\n').replace('\r', '\n') # 2. self.position = 0 # 3. self.seen_cue = False # 4. if len(self.input) < 6: self.errors.append('input too small for webvtt') return # 5. if len(self.input) == 6 and self.input != 'WEBVTT': self.errors.append('invalid webvtt header') return # 6. if len(self.input) > 6: if not (self.input[0:6] == 'WEBVTT' and self.input[6] in ['\u0020', '\u0009', '\u000A']): self.errors.append('invalid webvtt header') return # 7. self.collect_characters(lambda c: c != '\n') # 8. if self.position >= len(self.input): return # 9. if self.input[self.position] == '\n': self.position += 1 # 10. if self.position >= len(self.input): return # 11. if self.input[self.position] != '\n': self.collect_block(in_header = True) else: self.position += 1 # 12. self.collect_characters(lambda c: c == '\n') # 13. self.regions = [] # 14. while self.position < len(self.input): # 1. block = self.collect_block() # 2. if isinstance(block, VTTCue): self.text_tracks.append(block) # 3. elif isinstance(block, Stylesheet): self.stylesheets.append(block) # 4. elif isinstance(block, VTTRegion): self.regions.append(block) # 5. self.collect_characters(lambda c: c == '\n') # 15. return def collect_block(self, in_header = False): 'collect a WebVTT block' # 1. (done by class) line_count = 0 # 2. previous_position = self.position # 3. line = "" # 4. buffer = "" # 5. seen_eof = False # 6. seen_arrow = False # 7. cue = None # 8. stylesheet = None # 9. region = None # 10. # 11. while True: # 1. line = self.collect_characters(lambda c: c != '\n') # 2. line_count += 1 # 3. if self.position >= len(self.input): seen_eof = True else: self.position += 1 # 4. if '-->' in line: # 1. if not in_header and (line_count == 1 or line_count == 2 and not seen_arrow): # 1. seen_arrow = True # 2. previous_position = self.position # 3. cue = VTTCue( id = buffer, pause_on_exit = False, region = None, writing_direction = 'horizontal', snap_to_lines = True, line = 'auto', line_alignment = 'start alignment', position = 'auto', position_alignment = 'auto', cue_size = 100, text_alignment = 'center', text = '', ) # 4. if not VTTCueParser(self, line, cue).collect_cue_timings_and_settings(): cue = None else: buffer = '' self.seen_cue = True # DIFFERENCE else: self.errors.append('invalid webvtt cue block') self.position = previous_position break # 5. elif line == '': break # 6. else: # 1. if not in_header and line_count == 2: # 1. if not self.seen_cue and re.match(r'^STYLE\s*$', buffer): stylesheet = Stylesheet( location = None, parent = None, owner_node = None, owner_rule = None, media = None, title = None, alternate = False, origin_clean = True, source = None, ) buffer = '' # 2. elif not self.seen_cue and re.match(r'^REGION\s*$', buffer): region = VTTRegion( id = '', width = 100, lines = 3, anchor_point = (0, 100), viewport_anchor_point = (0, 100), scroll_value = None, ) buffer = '' # 2. if buffer != '': buffer += '\n' # 3. buffer += line # 4. previous_position = self.position # 7. if seen_eof: break # 12. if cue is not None: cue.text = buffer return cue # 13. elif stylesheet is not None: stylesheet.source = buffer return stylesheet # 14. elif region is not None: self.collect_region_settings(region, buffer) return region # 15. return None def collect_region_settings(self, region, input): 'collect WebVTT region settings' # 1. settings = re.split(SPACE_SPLIT_PATTERN, input) # 2. for setting in settings: # 1. if ':' not in setting: continue index = setting.index(':') if index in [0, len(setting) - 1]: continue # 2. name = setting[:index] # 3. value = setting[index + 1:] # 4. if name == "id": region.id = value elif name == "width": percentage = self.parse_percentage_string(value) if percentage is not None: region.width = percentage elif name == "lines": # 1. if not re.match(r'^\d+$', value): continue # 2. number = int(value) # 3. region.lines = number elif name == "regionanchor": # 1. if ',' not in value: continue #. 2. index = value.index(',') anchorX = value[:index] # 3. anchorY = value[index + 1:] # 4. percentageX = self.parse_percentage_string(anchorX) percentageY = self.parse_percentage_string(anchorY) if None in [percentageX, percentageY]: continue # 5. region.anchor_point = (percentageX, percentageY) elif name == "viewportanchor": # 1. if ',' not in value: continue #. 2. index = value.index(',') viewportanchorX = value[:index] # 3. viewportanchorY = value[index + 1:] # 4. percentageX = self.parse_percentage_string(viewportanchorX) percentageY = self.parse_percentage_string(viewportanchorY) if None in [percentageX, percentageY]: continue # 5. region.viewport_anchor_point = (percentageX, percentageY) elif name == "scroll": # 1. if value == "up": region.scroll_value = "up" # 5. continue class VTTCueParser(W3CParser): def __init__(self, parent, input, cue): self.parent = parent self.errors = self.parent.errors self.input = input self.position = 0 self.cue = cue def collect_cue_timings_and_settings(self): 'collect WebVTT cue timings and settings' # 1. (handled by class) # 2. self.position = 0 # 3. self.skip_whitespace() # 4. timestamp = self.collect_timestamp() if timestamp is None: self.errors.append('invalid start time for VTTCue') return False self.cue.start_time = timestamp # 5. self.skip_whitespace() # 6. if self.input[self.position] != '-': return False self.position += 1 # 7. if self.input[self.position] != '-': return False self.position += 1 # 8. if self.input[self.position] != '>': return False self.position += 1 # 9. self.skip_whitespace() # 10. timestamp = self.collect_timestamp() if timestamp is None: self.errors.append('invalid end time for VTTCue') return False self.cue.end_time = timestamp # 11. remainder = self.input[self.position:] # 12. self.parse_settings(remainder) # Extra return True def parse_settings(self, input): 'parse the WebVTT cue settings' # 1. settings = re.split(SPACE_SPLIT_PATTERN, input) # 2. for setting in settings: # 1. if ':' not in setting: continue index = setting.index(':') if index in [0, len(setting) - 1]: continue # 2. name = setting[:index] # 3. value = setting[index + 1:] # 4. if name == 'region': # 1. last_regions = (region for region in reversed(self.parent.regions) if region.id == value) self.cue.region = next(last_regions, None) elif name == 'vertical': # 1. and 2. if value in ['rl', 'lr']: self.cue.writing_direction = value elif name == 'line': # 1. if ',' in value: index = value.index(',') linepos = value[:index] linealign = value[index + 1:] # 2. else: linepos = value linealign = None # 3. if not re.search(r'\d', linepos): continue # 4. if linepos[-1] == '%': number = self.parse_percentage_string(linepos) if number is None: continue else: # 1. if not re.match(r'^[-\.\d]*$', linepos): continue # 2. if '-' in linepos[1:]: continue # 3. if linepos.count('.') > 1: continue # 4. if '.' in linepos: if not re.search(r'\d\.\d', linepos): continue # 5. number = float(linepos) # 5. if linealign == "start": self.cue.line_alignment = 'start' # 6. elif linealign == "center": self.cue.line_alignment = 'center' # 7. elif linealign == "end": self.cue.line_alignment = 'end' # 8. elif linealign != None: continue # 9. self.cue.line = number # 10. if linepos[-1] == '%': self.cue.snap_to_lines = False else: self.cue.snap_to_lines = True elif name == 'position': # 1. if ',' in value: index = value.index(',') colpos = value[:index] colalign = value[index + 1:] # 2. else: colpos = value colalign = None # 3. number = self.parse_percentage_string(colpos) if number is None: continue # 4. if colalign == "line-left": self.cue.line_alignment = 'line-left' # 5. elif colalign == "center": self.cue.line_alignment = 'center' # 6. elif colalign == "line-right": self.cue.line_alignment = 'line-right' # 7. elif colalign != None: continue # 8. self.cue.position = number elif name == 'size': # 1. number = self.parse_percentage_string(value) if number is None: continue # 2. self.cue.cue_size = number elif name == 'align': # 1. if value == 'start': self.cue.text_alignment = 'start' # 2. if value == 'center': self.cue.text_alignment = 'center' # 3. if value == 'end': self.cue.text_alignment = 'end' # 4. if value == 'left': self.cue.text_alignment = 'left' # 5. if value == 'right': self.cue.text_alignment = 'right' # 5. continue def collect_timestamp(self): 'collect a WebVTT timestamp' # 1. (handled by class) # 2. most_significant_units = 'minutes' # 3. if self.position >= len(self.input): return None # 4. if self.input[self.position] not in DIGITS: return None # 5. string = self.collect_characters(lambda c: c in DIGITS) # 6. value_1 = int(string) # 7. if len(string) != 2 or value_1 > 59: most_significant_units = 'hours' # 8. if self.position >= len(self.input) or self.input[self.position] != ':': return None self.position += 1 # 9. string = self.collect_characters(lambda c: c in DIGITS) # 10. if len(string) != 2: return None # 11. value_2 = int(string) # 12. if most_significant_units == 'hours' or self.position < len(self.input) and self.input[self.position] == ':': # 1. if self.position >= len(self.input) or self.input[self.position] != ':': return None self.position += 1 # 2. string = self.collect_characters(lambda c: c in DIGITS) # 3. if len(string) != 2: return None # 4. value_3 = int(string) else: value_3 = value_2 value_2 = value_1 value_1 = 0 # 13. if self.position >= len(self.input) or self.input[self.position] != '.': return None self.position += 1 # 14. string = self.collect_characters(lambda c: c in DIGITS) # 15. if len(string) != 3: return None # 16. value_4 = int(string) # 17. if value_2 >= 59 or value_3 >= 59: return None # 18. result = value_1 * 60 * 60 + value_2 * 60 + value_3 + value_4 / 1000 # 19. return result def main(argv): files = [open(path, 'r') for path in argv[1:]] try: for file in files: parser = VTTParser(file.read()) parser.parse() print("Results: {}".format(file)) print(" Cues: {}".format(parser.text_tracks)) print(" StyleSheets: {}".format(parser.stylesheets)) print(" Regions: {}".format(parser.regions)) print(" Errors: {}".format(parser.errors)) finally: for file in files: file.close() if __name__ == '__main__': import sys main(sys.argv);
mpl-2.0
clchiou/garage
py/garage/garage/threads/tasklets.py
1
2796
__all__ = [ 'TaskQueue', 'tasklet', ] import logging from garage.assertions import ASSERT from garage.threads import actors from garage.threads import queues LOG = logging.getLogger(__name__) class TaskQueue(queues.ForwardingQueue): """A task queue (vs executor) is for scenarios that the number of total tasks is not known in advance (and thus you do not know when you may close the queue). This happens when a task may spawn more tasks depending on the task's outcome. We implement a simple strategy to determine when a task queue may safely close itself: a task queue tracks the number of tasks and running tasklets, and it closes itself when both are zero. If no tasklet is running, no new tasks will be put into the queue. If at the same time, there is no task in the queue, we should be safe to conclude that there will never be new tasks (unless you are still putting new tasks into the queue - which you shouldn't; see below). NOTE: The limitation of this simple strategy is that once you put the initial tasks into the task queue, you should not put any more tasks into the queue because the queue may have already been closed. If you do want to put tasks into the queue after tasklets start, you will have to implement your task queue. (But this simple strategy should work for most scenarios.) You may use this auto-close feature to wait for the completion of all tasks. """ def __init__(self, queue): super().__init__(queue) self.__num_running_tasklets = 0 def get_task(self): ASSERT.greater_or_equal(self.__num_running_tasklets, 0) with self.lock: task = self.get() self.__num_running_tasklets += 1 return task # idle = not running def notify_tasklet_idle(self): ASSERT.greater(self.__num_running_tasklets, 0) with self.lock: self.__num_running_tasklets -= 1 ASSERT.greater_or_equal(self.__num_running_tasklets, 0) # We may close the queue when both conditions (no running # tasklets and no tasks) are met. if self.__num_running_tasklets == 0 and not self: self.close() @actors.OneShotActor.from_func def tasklet(task_queue): """A tasklet consumes task from a task queue, and it exits when the task queue is closed. A tasklet notifies the task queue when it has executed the task and becomes idle again. """ LOG.info('start') while True: try: task = task_queue.get_task() except queues.Closed: break try: task() finally: task_queue.notify_tasklet_idle() del task LOG.info('exit')
mit
ktan2020/legacy-automation
win/Lib/unittest/test/test_runner.py
9
8340
import unittest from cStringIO import StringIO import pickle from .support import LoggingResult, ResultWithNoStartTestRunStopTestRun class TestCleanUp(unittest.TestCase): def testCleanUp(self): class TestableTest(unittest.TestCase): def testNothing(self): pass test = TestableTest('testNothing') self.assertEqual(test._cleanups, []) cleanups = [] def cleanup1(*args, **kwargs): cleanups.append((1, args, kwargs)) def cleanup2(*args, **kwargs): cleanups.append((2, args, kwargs)) test.addCleanup(cleanup1, 1, 2, 3, four='hello', five='goodbye') test.addCleanup(cleanup2) self.assertEqual(test._cleanups, [(cleanup1, (1, 2, 3), dict(four='hello', five='goodbye')), (cleanup2, (), {})]) result = test.doCleanups() self.assertTrue(result) self.assertEqual(cleanups, [(2, (), {}), (1, (1, 2, 3), dict(four='hello', five='goodbye'))]) def testCleanUpWithErrors(self): class TestableTest(unittest.TestCase): def testNothing(self): pass class MockResult(object): errors = [] def addError(self, test, exc_info): self.errors.append((test, exc_info)) result = MockResult() test = TestableTest('testNothing') test._resultForDoCleanups = result exc1 = Exception('foo') exc2 = Exception('bar') def cleanup1(): raise exc1 def cleanup2(): raise exc2 test.addCleanup(cleanup1) test.addCleanup(cleanup2) self.assertFalse(test.doCleanups()) (test1, (Type1, instance1, _)), (test2, (Type2, instance2, _)) = reversed(MockResult.errors) self.assertEqual((test1, Type1, instance1), (test, Exception, exc1)) self.assertEqual((test2, Type2, instance2), (test, Exception, exc2)) def testCleanupInRun(self): blowUp = False ordering = [] class TestableTest(unittest.TestCase): def setUp(self): ordering.append('setUp') if blowUp: raise Exception('foo') def testNothing(self): ordering.append('test') def tearDown(self): ordering.append('tearDown') test = TestableTest('testNothing') def cleanup1(): ordering.append('cleanup1') def cleanup2(): ordering.append('cleanup2') test.addCleanup(cleanup1) test.addCleanup(cleanup2) def success(some_test): self.assertEqual(some_test, test) ordering.append('success') result = unittest.TestResult() result.addSuccess = success test.run(result) self.assertEqual(ordering, ['setUp', 'test', 'tearDown', 'cleanup2', 'cleanup1', 'success']) blowUp = True ordering = [] test = TestableTest('testNothing') test.addCleanup(cleanup1) test.run(result) self.assertEqual(ordering, ['setUp', 'cleanup1']) def testTestCaseDebugExecutesCleanups(self): ordering = [] class TestableTest(unittest.TestCase): def setUp(self): ordering.append('setUp') self.addCleanup(cleanup1) def testNothing(self): ordering.append('test') def tearDown(self): ordering.append('tearDown') test = TestableTest('testNothing') def cleanup1(): ordering.append('cleanup1') test.addCleanup(cleanup2) def cleanup2(): ordering.append('cleanup2') test.debug() self.assertEqual(ordering, ['setUp', 'test', 'tearDown', 'cleanup1', 'cleanup2']) class Test_TextTestRunner(unittest.TestCase): """Tests for TextTestRunner.""" def test_init(self): runner = unittest.TextTestRunner() self.assertFalse(runner.failfast) self.assertFalse(runner.buffer) self.assertEqual(runner.verbosity, 1) self.assertTrue(runner.descriptions) self.assertEqual(runner.resultclass, unittest.TextTestResult) def testBufferAndFailfast(self): class Test(unittest.TestCase): def testFoo(self): pass result = unittest.TestResult() runner = unittest.TextTestRunner(stream=StringIO(), failfast=True, buffer=True) # Use our result object runner._makeResult = lambda: result runner.run(Test('testFoo')) self.assertTrue(result.failfast) self.assertTrue(result.buffer) def testRunnerRegistersResult(self): class Test(unittest.TestCase): def testFoo(self): pass originalRegisterResult = unittest.runner.registerResult def cleanup(): unittest.runner.registerResult = originalRegisterResult self.addCleanup(cleanup) result = unittest.TestResult() runner = unittest.TextTestRunner(stream=StringIO()) # Use our result object runner._makeResult = lambda: result self.wasRegistered = 0 def fakeRegisterResult(thisResult): self.wasRegistered += 1 self.assertEqual(thisResult, result) unittest.runner.registerResult = fakeRegisterResult runner.run(unittest.TestSuite()) self.assertEqual(self.wasRegistered, 1) def test_works_with_result_without_startTestRun_stopTestRun(self): class OldTextResult(ResultWithNoStartTestRunStopTestRun): separator2 = '' def printErrors(self): pass class Runner(unittest.TextTestRunner): def __init__(self): super(Runner, self).__init__(StringIO()) def _makeResult(self): return OldTextResult() runner = Runner() runner.run(unittest.TestSuite()) def test_startTestRun_stopTestRun_called(self): class LoggingTextResult(LoggingResult): separator2 = '' def printErrors(self): pass class LoggingRunner(unittest.TextTestRunner): def __init__(self, events): super(LoggingRunner, self).__init__(StringIO()) self._events = events def _makeResult(self): return LoggingTextResult(self._events) events = [] runner = LoggingRunner(events) runner.run(unittest.TestSuite()) expected = ['startTestRun', 'stopTestRun'] self.assertEqual(events, expected) def test_pickle_unpickle(self): # Issue #7197: a TextTestRunner should be (un)pickleable. This is # required by test_multiprocessing under Windows (in verbose mode). from StringIO import StringIO as PickleableIO # cStringIO objects are not pickleable, but StringIO objects are. stream = PickleableIO("foo") runner = unittest.TextTestRunner(stream) for protocol in range(pickle.HIGHEST_PROTOCOL + 1): s = pickle.dumps(runner, protocol=protocol) obj = pickle.loads(s) # StringIO objects never compare equal, a cheap test instead. self.assertEqual(obj.stream.getvalue(), stream.getvalue()) def test_resultclass(self): def MockResultClass(*args): return args STREAM = object() DESCRIPTIONS = object() VERBOSITY = object() runner = unittest.TextTestRunner(STREAM, DESCRIPTIONS, VERBOSITY, resultclass=MockResultClass) self.assertEqual(runner.resultclass, MockResultClass) expectedresult = (runner.stream, DESCRIPTIONS, VERBOSITY) self.assertEqual(runner._makeResult(), expectedresult) if __name__ == '__main__': unittest.main()
mit
nzavagli/UnrealPy
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/pycrypto-2.6.1/lib/Crypto/SelfTest/Hash/__init__.py
116
2518
# -*- coding: utf-8 -*- # # SelfTest/Hash/__init__.py: Self-test for hash modules # # Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net> # # =================================================================== # The contents of this file are dedicated to the public domain. To # the extent that dedication to the public domain is not available, # everyone is granted a worldwide, perpetual, royalty-free, # non-exclusive license to exercise all rights associated with the # contents of this file for any purpose whatsoever. # No rights are reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================== """Self-test for hash modules""" __revision__ = "$Id$" def get_tests(config={}): tests = [] from Crypto.SelfTest.Hash import test_HMAC; tests += test_HMAC.get_tests(config=config) from Crypto.SelfTest.Hash import test_MD2; tests += test_MD2.get_tests(config=config) from Crypto.SelfTest.Hash import test_MD4; tests += test_MD4.get_tests(config=config) from Crypto.SelfTest.Hash import test_MD5; tests += test_MD5.get_tests(config=config) from Crypto.SelfTest.Hash import test_RIPEMD; tests += test_RIPEMD.get_tests(config=config) from Crypto.SelfTest.Hash import test_SHA; tests += test_SHA.get_tests(config=config) from Crypto.SelfTest.Hash import test_SHA256; tests += test_SHA256.get_tests(config=config) try: from Crypto.SelfTest.Hash import test_SHA224; tests += test_SHA224.get_tests(config=config) from Crypto.SelfTest.Hash import test_SHA384; tests += test_SHA384.get_tests(config=config) from Crypto.SelfTest.Hash import test_SHA512; tests += test_SHA512.get_tests(config=config) except ImportError: import sys sys.stderr.write("SelfTest: warning: not testing SHA224/SHA384/SHA512 modules (not available)\n") return tests if __name__ == '__main__': import unittest suite = lambda: unittest.TestSuite(get_tests()) unittest.main(defaultTest='suite') # vim:set ts=4 sw=4 sts=4 expandtab:
mit
mandeepdhami/nova
nova/tests/functional/v3/test_simple_tenant_usage.py
24
2813
# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import urllib from oslo_config import cfg from oslo_utils import timeutils from nova.tests.functional.v3 import test_servers CONF = cfg.CONF CONF.import_opt('osapi_compute_extension', 'nova.api.openstack.compute.extensions') class SimpleTenantUsageSampleJsonTest(test_servers.ServersSampleBase): extension_name = "os-simple-tenant-usage" extra_extensions_to_load = ["os-access-ips"] _api_version = 'v2' def _get_flags(self): f = super(SimpleTenantUsageSampleJsonTest, self)._get_flags() f['osapi_compute_extension'] = CONF.osapi_compute_extension[:] f['osapi_compute_extension'].append( 'nova.api.openstack.compute.contrib.simple_tenant_usage.' 'Simple_tenant_usage') return f def setUp(self): """setUp method for simple tenant usage.""" super(SimpleTenantUsageSampleJsonTest, self).setUp() started = timeutils.utcnow() now = started + datetime.timedelta(hours=1) timeutils.set_time_override(started) self._post_server() timeutils.set_time_override(now) self.query = { 'start': str(started), 'end': str(now) } def tearDown(self): """tearDown method for simple tenant usage.""" super(SimpleTenantUsageSampleJsonTest, self).tearDown() timeutils.clear_time_override() def test_get_tenants_usage(self): # Get api sample to get all tenants usage request. response = self._do_get('os-simple-tenant-usage?%s' % ( urllib.urlencode(self.query))) subs = self._get_regexes() self._verify_response('simple-tenant-usage-get', subs, response, 200) def test_get_tenant_usage_details(self): # Get api sample to get specific tenant usage request. tenant_id = 'openstack' response = self._do_get('os-simple-tenant-usage/%s?%s' % (tenant_id, urllib.urlencode(self.query))) subs = self._get_regexes() self._verify_response('simple-tenant-usage-get-specific', subs, response, 200)
apache-2.0
srikantbmandal/ansible
lib/ansible/modules/packaging/os/zypper.py
18
16707
#!/usr/bin/python -tt # -*- coding: utf-8 -*- # (c) 2013, Patrick Callahan <pmc@patrickcallahan.com> # based on # openbsd_pkg # (c) 2013 # Patrik Lundin <patrik.lundin.swe@gmail.com> # # yum # (c) 2012, Red Hat, Inc # Written by Seth Vidal <skvidal at fedoraproject.org> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: zypper author: - "Patrick Callahan (@dirtyharrycallahan)" - "Alexander Gubin (@alxgu)" - "Thomas O'Donnell (@andytom)" - "Robin Roth (@robinro)" - "Andrii Radyk (@AnderEnder)" version_added: "1.2" short_description: Manage packages on SUSE and openSUSE description: - Manage packages on SUSE and openSUSE using the zypper and rpm tools. options: name: description: - Package name C(name) or package specifier. - Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to update the package within the version range given. - You can also pass a url or a local path to a rpm file. - When using state=latest, this can be '*', which updates all installed packages. required: true aliases: [ 'pkg' ] state: description: - C(present) will make sure the package is installed. C(latest) will make sure the latest version of the package is installed. C(absent) will make sure the specified package is not installed. required: false choices: [ present, latest, absent ] default: "present" type: description: - The type of package to be operated on. required: false choices: [ package, patch, pattern, product, srcpackage, application ] default: "package" version_added: "2.0" disable_gpg_check: description: - Whether to disable to GPG signature checking of the package signature being installed. Has an effect only if state is I(present) or I(latest). required: false default: "no" choices: [ "yes", "no" ] disable_recommends: version_added: "1.8" description: - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does install recommended packages. required: false default: "yes" choices: [ "yes", "no" ] force: version_added: "2.2" description: - Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture. required: false default: "no" choices: [ "yes", "no" ] update_cache: version_added: "2.2" description: - Run the equivalent of C(zypper refresh) before the operation. Disabled in check mode. required: false default: "no" choices: [ "yes", "no" ] aliases: [ "refresh" ] oldpackage: version_added: "2.2" description: - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a version is specified as part of the package name. required: false default: "no" choices: [ "yes", "no" ] # informational: requirements for nodes requirements: - "zypper >= 1.0 # included in openSuSE >= 11.1 or SuSE Linux Enterprise Server/Desktop >= 11.0" - python-xml - rpm ''' EXAMPLES = ''' # Install "nmap" - zypper: name: nmap state: present # Install apache2 with recommended packages - zypper: name: apache2 state: present disable_recommends: no # Apply a given patch - zypper: name: openSUSE-2016-128 state: present type: patch # Remove the "nmap" package - zypper: name: nmap state: absent # Install the nginx rpm from a remote repo - zypper: name: 'http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm' state: present # Install local rpm file - zypper: name: /tmp/fancy-software.rpm state: present # Update all packages - zypper: name: '*' state: latest # Apply all available patches - zypper: name: '*' state: latest type: patch # Refresh repositories and update package "openssl" - zypper: name: openssl state: present update_cache: yes # Install specific version (possible comparisons: <, >, <=, >=, =) - zypper: name: 'docker>=1.10' state: present # Wait 20 seconds to acquire the lock before failing - zypper: name: mosh state: present environment: ZYPP_LOCK_TIMEOUT: 20 ''' import xml import re from xml.dom.minidom import parseString as parseXML from ansible.module_utils.six import iteritems class Package: def __init__(self, name, prefix, version): self.name = name self.prefix = prefix self.version = version self.shouldinstall = (prefix == '+') def __str__(self): return self.prefix + self.name + self.version def split_name_version(name): """splits of the package name and desired version example formats: - docker>=1.10 - apache=2.4 Allowed version specifiers: <, >, <=, >=, = Allowed version format: [0-9.-]* Also allows a prefix indicating remove "-", "~" or install "+" """ prefix = '' if name[0] in ['-', '~', '+']: prefix = name[0] name = name[1:] if prefix == '~': prefix = '-' version_check = re.compile('^(.*?)((?:<|>|<=|>=|=)[0-9.-]*)?$') try: reres = version_check.match(name) name, version = reres.groups() if version is None: version = '' return prefix, name, version except: return prefix, name, '' def get_want_state(names, remove=False): packages = [] urls = [] for name in names: if '://' in name or name.endswith('.rpm'): urls.append(name) else: prefix, pname, version = split_name_version(name) if prefix not in ['-', '+']: if remove: prefix = '-' else: prefix = '+' packages.append(Package(pname, prefix, version)) return packages, urls def get_installed_state(m, packages): "get installed state of packages" cmd = get_cmd(m, 'search') cmd.extend(['--match-exact', '--details', '--installed-only']) cmd.extend([p.name for p in packages]) return parse_zypper_xml(m, cmd, fail_not_found=False)[0] def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None): rc, stdout, stderr = m.run_command(cmd, check_rc=False) try: dom = parseXML(stdout) except xml.parsers.expat.ExpatError: e = get_exception() m.fail_json(msg="Failed to parse zypper xml output: %s" % e, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) if rc == 104: # exit code 104 is ZYPPER_EXIT_INF_CAP_NOT_FOUND (no packages found) if fail_not_found: errmsg = dom.getElementsByTagName('message')[-1].childNodes[0].data m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) else: return {}, rc, stdout, stderr elif rc in [0, 106, 103]: # zypper exit codes # 0: success # 106: signature verification failed # 103: zypper was upgraded, run same command again if packages is None: firstrun = True packages = {} solvable_list = dom.getElementsByTagName('solvable') for solvable in solvable_list: name = solvable.getAttribute('name') packages[name] = {} packages[name]['version'] = solvable.getAttribute('edition') packages[name]['oldversion'] = solvable.getAttribute('edition-old') status = solvable.getAttribute('status') packages[name]['installed'] = status == "installed" packages[name]['group'] = solvable.parentNode.nodeName if rc == 103 and firstrun: # if this was the first run and it failed with 103 # run zypper again with the same command to complete update return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages) return packages, rc, stdout, stderr m.fail_json(msg='Zypper run command failed with return code %s.'%rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd) def get_cmd(m, subcommand): "puts together the basic zypper command arguments with those passed to the module" is_install = subcommand in ['install', 'update', 'patch'] is_refresh = subcommand == 'refresh' cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive', '--xmlout'] # add global options before zypper command if (is_install or is_refresh) and m.params['disable_gpg_check']: cmd.append('--no-gpg-checks') cmd.append(subcommand) if subcommand != 'patch' and not is_refresh: cmd.extend(['--type', m.params['type']]) if m.check_mode and subcommand != 'search': cmd.append('--dry-run') if is_install: cmd.append('--auto-agree-with-licenses') if m.params['disable_recommends']: cmd.append('--no-recommends') if m.params['force']: cmd.append('--force') if m.params['oldpackage']: cmd.append('--oldpackage') return cmd def set_diff(m, retvals, result): # TODO: if there is only one package, set before/after to version numbers packages = {'installed': [], 'removed': [], 'upgraded': []} if result: for p in result: group = result[p]['group'] if group == 'to-upgrade': versions = ' (' + result[p]['oldversion'] + ' => ' + result[p]['version'] + ')' packages['upgraded'].append(p + versions) elif group == 'to-install': packages['installed'].append(p) elif group == 'to-remove': packages['removed'].append(p) output = '' for state in packages: if packages[state]: output += state + ': ' + ', '.join(packages[state]) + '\n' if 'diff' not in retvals: retvals['diff'] = {} if 'prepared' not in retvals['diff']: retvals['diff']['prepared'] = output else: retvals['diff']['prepared'] += '\n' + output def package_present(m, name, want_latest): "install and update (if want_latest) the packages in name_install, while removing the packages in name_remove" retvals = {'rc': 0, 'stdout': '', 'stderr': ''} packages, urls = get_want_state(name) # add oldpackage flag when a version is given to allow downgrades if any(p.version for p in packages): m.params['oldpackage'] = True if not want_latest: # for state=present: filter out already installed packages # if a version is given leave the package in to let zypper handle the version # resolution packageswithoutversion = [p for p in packages if not p.version] prerun_state = get_installed_state(m, packageswithoutversion) # generate lists of packages to install or remove packages = [p for p in packages if p.shouldinstall != (p.name in prerun_state)] if not packages and not urls: # nothing to install/remove and nothing to update return None, retvals # zypper install also updates packages cmd = get_cmd(m, 'install') cmd.append('--') cmd.extend(urls) # pass packages to zypper # allow for + or - prefixes in install/remove lists # also add version specifier if given # do this in one zypper run to allow for dependency-resolution # for example "-exim postfix" runs without removing packages depending on mailserver cmd.extend([str(p) for p in packages]) retvals['cmd'] = cmd result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) return result, retvals def package_update_all(m): "run update or patch on all available packages" retvals = {'rc': 0, 'stdout': '', 'stderr': ''} if m.params['type'] == 'patch': cmdname = 'patch' else: cmdname = 'update' cmd = get_cmd(m, cmdname) retvals['cmd'] = cmd result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) return result, retvals def package_absent(m, name): "remove the packages in name" retvals = {'rc': 0, 'stdout': '', 'stderr': ''} # Get package state packages, urls = get_want_state(name, remove=True) if any(p.prefix == '+' for p in packages): m.fail_json(msg="Can not combine '+' prefix with state=remove/absent.") if urls: m.fail_json(msg="Can not remove via URL.") if m.params['type'] == 'patch': m.fail_json(msg="Can not remove patches.") prerun_state = get_installed_state(m, packages) packages = [p for p in packages if p.name in prerun_state] if not packages: return None, retvals cmd = get_cmd(m, 'remove') cmd.extend([p.name + p.version for p in packages]) retvals['cmd'] = cmd result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) return result, retvals def repo_refresh(m): "update the repositories" retvals = {'rc': 0, 'stdout': '', 'stderr': ''} cmd = get_cmd(m, 'refresh') retvals['cmd'] = cmd result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd) return retvals # =========================================== # Main control flow def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True, aliases=['pkg'], type='list'), state = dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']), type = dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']), disable_gpg_check = dict(required=False, default='no', type='bool'), disable_recommends = dict(required=False, default='yes', type='bool'), force = dict(required=False, default='no', type='bool'), update_cache = dict(required=False, aliases=['refresh'], default='no', type='bool'), oldpackage = dict(required=False, default='no', type='bool'), ), supports_check_mode = True ) name = module.params['name'] state = module.params['state'] update_cache = module.params['update_cache'] # remove empty strings from package list name = filter(None, name) # Refresh repositories if update_cache and not module.check_mode: retvals = repo_refresh(module) if retvals['rc'] != 0: module.fail_json(msg="Zypper refresh run failed.", **retvals) # Perform requested action if name == ['*'] and state == 'latest': packages_changed, retvals = package_update_all(module) else: if state in ['absent', 'removed']: packages_changed, retvals = package_absent(module, name) elif state in ['installed', 'present', 'latest']: packages_changed, retvals = package_present(module, name, state == 'latest') retvals['changed'] = retvals['rc'] == 0 and bool(packages_changed) if module._diff: set_diff(module, retvals, packages_changed) if retvals['rc'] != 0: module.fail_json(msg="Zypper run failed.", **retvals) if not retvals['changed']: del retvals['stdout'] del retvals['stderr'] module.exit_json(name=name, state=state, update_cache=update_cache, **retvals) # import module snippets from ansible.module_utils.basic import AnsibleModule if __name__ == "__main__": main()
gpl-3.0
dgwakeman/mne-python
examples/inverse/plot_gamma_map_inverse.py
30
2316
""" =============================================================================== Compute a sparse inverse solution using the Gamma-Map empirical Bayesian method =============================================================================== See Wipf et al. "A unified Bayesian framework for MEG/EEG source imaging." NeuroImage, vol. 44, no. 3, pp. 947?66, Mar. 2009. """ # Author: Martin Luessi <mluessi@nmr.mgh.harvard.edu> # # License: BSD (3-clause) import numpy as np import mne from mne.datasets import sample from mne.inverse_sparse import gamma_map from mne.viz import plot_sparse_source_estimates print(__doc__) data_path = sample.data_path() subjects_dir = data_path + '/subjects' fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif' evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif' cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif' # Read the evoked response and crop it condition = 'Left visual' evoked = mne.read_evokeds(evoked_fname, condition=condition, baseline=(None, 0)) evoked.crop(tmin=-50e-3, tmax=300e-3) # Read the forward solution forward = mne.read_forward_solution(fwd_fname, surf_ori=True, force_fixed=False) # Read noise noise covariance matrix and regularize it cov = mne.read_cov(cov_fname) cov = mne.cov.regularize(cov, evoked.info) # Run the Gamma-MAP method alpha = 0.5 stc, residual = gamma_map(evoked, forward, cov, alpha, xyz_same_gamma=True, return_residual=True) # View in 2D and 3D ("glass" brain like 3D plot) # Show the sources as spheres scaled by their strength scale_factors = np.max(np.abs(stc.data), axis=1) scale_factors = 0.5 * (1 + scale_factors / np.max(scale_factors)) plot_sparse_source_estimates( forward['src'], stc, bgcolor=(1, 1, 1), modes=['sphere'], opacity=0.1, scale_factors=(scale_factors, None), fig_name="Gamma-MAP") # Show the evoked response and the residual for gradiometers ylim = dict(grad=[-120, 120]) evoked.pick_types(meg='grad', exclude='bads') evoked.plot(titles=dict(grad='Evoked Response Gradiometers'), ylim=ylim, proj=True) residual.pick_types(meg='grad', exclude='bads') residual.plot(titles=dict(grad='Residuals Gradiometers'), ylim=ylim, proj=True)
bsd-3-clause
vinegret/youtube-dl
youtube_dl/extractor/medialaan.py
22
9994
from __future__ import unicode_literals import re from .gigya import GigyaBaseIE from ..compat import compat_str from ..utils import ( int_or_none, parse_duration, try_get, unified_timestamp, ) class MedialaanIE(GigyaBaseIE): _VALID_URL = r'''(?x) https?:// (?:www\.|nieuws\.)? (?: (?P<site_id>vtm|q2|vtmkzoom)\.be/ (?: video(?:/[^/]+/id/|/?\?.*?\baid=)| (?:[^/]+/)* ) ) (?P<id>[^/?#&]+) ''' _NETRC_MACHINE = 'medialaan' _APIKEY = '3_HZ0FtkMW_gOyKlqQzW5_0FHRC7Nd5XpXJZcDdXY4pk5eES2ZWmejRW5egwVm4ug-' _SITE_TO_APP_ID = { 'vtm': 'vtm_watch', 'q2': 'q2', 'vtmkzoom': 'vtmkzoom', } _TESTS = [{ # vod 'url': 'http://vtm.be/video/volledige-afleveringen/id/vtm_20170219_VM0678361_vtmwatch', 'info_dict': { 'id': 'vtm_20170219_VM0678361_vtmwatch', 'ext': 'mp4', 'title': 'Allemaal Chris afl. 6', 'description': 'md5:4be86427521e7b07e0adb0c9c554ddb2', 'timestamp': 1487533280, 'upload_date': '20170219', 'duration': 2562, 'series': 'Allemaal Chris', 'season': 'Allemaal Chris', 'season_number': 1, 'season_id': '256936078124527', 'episode': 'Allemaal Chris afl. 6', 'episode_number': 6, 'episode_id': '256936078591527', }, 'params': { 'skip_download': True, }, 'skip': 'Requires account credentials', }, { # clip 'url': 'http://vtm.be/video?aid=168332', 'info_dict': { 'id': '168332', 'ext': 'mp4', 'title': '"Veronique liegt!"', 'description': 'md5:1385e2b743923afe54ba4adc38476155', 'timestamp': 1489002029, 'upload_date': '20170308', 'duration': 96, }, }, { # vod 'url': 'http://vtm.be/video/volledige-afleveringen/id/257107153551000', 'only_matching': True, }, { # vod 'url': 'http://vtm.be/video?aid=163157', 'only_matching': True, }, { # vod 'url': 'http://www.q2.be/video/volledige-afleveringen/id/2be_20170301_VM0684442_q2', 'only_matching': True, }, { # clip 'url': 'http://vtmkzoom.be/k3-dansstudio/een-nieuw-seizoen-van-k3-dansstudio', 'only_matching': True, }, { # http/s redirect 'url': 'https://vtmkzoom.be/video?aid=45724', 'info_dict': { 'id': '257136373657000', 'ext': 'mp4', 'title': 'K3 Dansstudio Ushuaia afl.6', }, 'params': { 'skip_download': True, }, 'skip': 'Requires account credentials', }, { # nieuws.vtm.be 'url': 'https://nieuws.vtm.be/stadion/stadion/genk-nog-moeilijk-programma', 'only_matching': True, }] def _real_initialize(self): self._logged_in = False def _login(self): username, password = self._get_login_info() if username is None: self.raise_login_required() auth_data = { 'APIKey': self._APIKEY, 'sdk': 'js_6.1', 'format': 'json', 'loginID': username, 'password': password, } auth_info = self._gigya_login(auth_data) self._uid = auth_info['UID'] self._uid_signature = auth_info['UIDSignature'] self._signature_timestamp = auth_info['signatureTimestamp'] self._logged_in = True def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id, site_id = mobj.group('id', 'site_id') webpage = self._download_webpage(url, video_id) config = self._parse_json( self._search_regex( r'videoJSConfig\s*=\s*JSON\.parse\(\'({.+?})\'\);', webpage, 'config', default='{}'), video_id, transform_source=lambda s: s.replace( '\\\\', '\\').replace(r'\"', '"').replace(r"\'", "'")) vod_id = config.get('vodId') or self._search_regex( (r'\\"vodId\\"\s*:\s*\\"(.+?)\\"', r'"vodId"\s*:\s*"(.+?)"', r'<[^>]+id=["\']vod-(\d+)'), webpage, 'video_id', default=None) # clip, no authentication required if not vod_id: player = self._parse_json( self._search_regex( r'vmmaplayer\(({.+?})\);', webpage, 'vmma player', default=''), video_id, transform_source=lambda s: '[%s]' % s, fatal=False) if player: video = player[-1] if video['videoUrl'] in ('http', 'https'): return self.url_result(video['url'], MedialaanIE.ie_key()) info = { 'id': video_id, 'url': video['videoUrl'], 'title': video['title'], 'thumbnail': video.get('imageUrl'), 'timestamp': int_or_none(video.get('createdDate')), 'duration': int_or_none(video.get('duration')), } else: info = self._parse_html5_media_entries( url, webpage, video_id, m3u8_id='hls')[0] info.update({ 'id': video_id, 'title': self._html_search_meta('description', webpage), 'duration': parse_duration(self._html_search_meta('duration', webpage)), }) # vod, authentication required else: if not self._logged_in: self._login() settings = self._parse_json( self._search_regex( r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);', webpage, 'drupal settings', default='{}'), video_id) def get(container, item): return try_get( settings, lambda x: x[container][item], compat_str) or self._search_regex( r'"%s"\s*:\s*"([^"]+)' % item, webpage, item, default=None) app_id = get('vod', 'app_id') or self._SITE_TO_APP_ID.get(site_id, 'vtm_watch') sso = get('vod', 'gigyaDatabase') or 'vtm-sso' data = self._download_json( 'http://vod.medialaan.io/api/1.0/item/%s/video' % vod_id, video_id, query={ 'app_id': app_id, 'user_network': sso, 'UID': self._uid, 'UIDSignature': self._uid_signature, 'signatureTimestamp': self._signature_timestamp, }) formats = self._extract_m3u8_formats( data['response']['uri'], video_id, entry_protocol='m3u8_native', ext='mp4', m3u8_id='hls') self._sort_formats(formats) info = { 'id': vod_id, 'formats': formats, } api_key = get('vod', 'apiKey') channel = get('medialaanGigya', 'channel') if api_key: videos = self._download_json( 'http://vod.medialaan.io/vod/v2/videos', video_id, fatal=False, query={ 'channels': channel, 'ids': vod_id, 'limit': 1, 'apikey': api_key, }) if videos: video = try_get( videos, lambda x: x['response']['videos'][0], dict) if video: def get(container, item, expected_type=None): return try_get( video, lambda x: x[container][item], expected_type) def get_string(container, item): return get(container, item, compat_str) info.update({ 'series': get_string('program', 'title'), 'season': get_string('season', 'title'), 'season_number': int_or_none(get('season', 'number')), 'season_id': get_string('season', 'id'), 'episode': get_string('episode', 'title'), 'episode_number': int_or_none(get('episode', 'number')), 'episode_id': get_string('episode', 'id'), 'duration': int_or_none( video.get('duration')) or int_or_none( video.get('durationMillis'), scale=1000), 'title': get_string('episode', 'title'), 'description': get_string('episode', 'text'), 'timestamp': unified_timestamp(get_string( 'publication', 'begin')), }) if not info.get('title'): info['title'] = try_get( config, lambda x: x['videoConfig']['title'], compat_str) or self._html_search_regex( r'\\"title\\"\s*:\s*\\"(.+?)\\"', webpage, 'title', default=None) or self._og_search_title(webpage) if not info.get('description'): info['description'] = self._html_search_regex( r'<div[^>]+class="field-item\s+even">\s*<p>(.+?)</p>', webpage, 'description', default=None) return info
unlicense
bencejuhaasz/L0C4L1Z3R
L0C_SDR.py
1
4881
import math import time import socket from subprocess import call k1 = [] def string2float(s): try: return(float(s)) except: return(s) def convert_deg2rad(deg): rad = (deg / 180) * math.pi return rad def FlushSock(): global sock sock.setblocking(False) while 1: try: PacketBytes = sock.recv(1024) except: break; sock.setblocking(True) def convert_m2lat(m): global lat lat_K = 40075560 #6371001 * math.cos(lat) lat2 = m / (lat_K/360) return lat2 def convert_lat2m(lat): lat_K = 40075560 m = lat * (lat_K/360) return(m) def convert_m2lon(m): lon = m / (30000000/360) return(lon) def convert_lon2m(lon): m = (30000000/360) * lon return(m) ##################################### UDP_IP = "0.0.0.0" UDP_PORT = 5005 sock = socket.socket(socket.AF_INET, # Internet socket.SOCK_DGRAM) # UDP sock.bind((UDP_IP, UDP_PORT)) ################################### i = 0 x1 = [] m1 = [] y1 = [] exit = False min_pwr = input('minimum rxpower if the antenna is pointed at the signal source: ') while(True): #call(["ls", "-l"]) call(["rm", "./tmp/*"]) call(["rtl_power", "-f", "88M:89M:20k", "./tmp/radio.csv", "-i", "1", "-e", "1"]) f = open("./tmp/radio.csv") pwr = f.readline().split(",")[38].strip() print(pwr) pwr = float(pwr) min_pwr = float(min_pwr) FlushSock() while(not exit): try: data, addr = sock.recvfrom(1024) data = str(data) x = data.split(",")[28].strip() y = data.split(",")[29].strip() lat = data.split(",")[2].strip() lat = float(lat) lon = data.split(",")[3].strip() lon = float(lon) h = data.split(",")[4].strip() print(x, y, lat, lon, h) exit = True break except(IndexError): pass data1, addr = sock.recvfrom(1024) data1 = str(data1) if(data1 != data and len(data1) == 247): data = data1 x = data.split(",")[28].strip() y = data.split(",")[29].strip() lat = data.split(",")[2].strip() lat = float(lat) lon = data.split(",")[3].strip() lon = float(lon) h = data.split(",")[4].strip() if(len(data) != 67): hdg = convert_deg2rad(float(x)) vert_deg = convert_deg2rad(float(y)) print(x, y, lat, lon, h) gps = 1 if(data1 != data and len(data1) != 247): data = data1 data = data.split(",") x = data[len(data)-3] y = data[len(data)-2] if(len(data) != 67): hdg = convert_deg2rad(float(x)) vert_deg = convert_deg2rad(float(y)) gps = 0 if((gps == 1 and float(pwr) >= min_pwr) or i == 0): if(len(data) != 67): hdg = convert_deg2rad(float(x)) vert_deg = convert_deg2rad(float(y)) r = 1 a = float(h) * math.sin(vert_deg) Bdifflatm = a * math.sin(hdg) Bdifflonm = a * math.cos(hdg) difflat = convert_m2lat(Bdifflatm) difflon = convert_m2lon(Bdifflonm) B0lat = float(lat) + float(difflat) B0lon = float(lon) + float(difflon) avglat = 0 avglon = 0 x1.append((B0lat - lat)) y1.append((B0lon - lon)) if((lat - B0lat) != 0): m1.append((lon - B0lon) / (lat - B0lat)) #print("#########") #print(str(len(y1))+" "+str(len(x1))+" "+str(len(m1))+" "+str(len(k1))+" "+str(i)) #print("#########") k1.append(y1[i] - (x1[i] - m1[i])) if(i>=2 and (x1[i]*m1[i] + k1[i]) - (x1[i-1]*m1[i-1] + k1[i-1]) < r and float(pwr) >= min_pwr): avglon = convert_m2lon(y1[i]) + lon avglat = convert_m2lat(x1[i]) + lat a_ = (avglat - a)**2 + (avglon - a)**2 b = 180 - 90 - vert_deg rh = a_ * math.tan(b) print("###############################") print("router_avg_pos: "+str(avglat)+" "+str(avglon)+" "+str(rh)) print("###############################") print("###############################") #print("Routerlat/Blon:") #print(str(B0lat)+" "+str(B0lon)) #print(" ") print("lat/lon") print(str(lat)+" "+str(lon)) print(" ") print("ALT:") print(str(h)) print(" ") print("vert_deg:") print(str(vert_deg)) print("hdg:") print(str(x)) print(str(hdg)) print("src_ADDR:", addr) print("len_data:"+str(len(data))) #print("A: ", a) print("PWR: "+str(pwr)) print("###############################") #print(data) i = i + 1 time.sleep(1) gps = 0
gpl-3.0
gkarlin/django-jenkins
build/Django/django/utils/tree.py
109
5851
""" A class for storing a tree graph. Primarily used for filter constructs in the ORM. """ import copy class Node(object): """ A single internal node in the tree graph. A Node should be viewed as a connection (the root) with the children being either leaf nodes or other Node instances. """ # Standard connector type. Clients usually won't use this at all and # subclasses will usually override the value. default = 'DEFAULT' def __init__(self, children=None, connector=None, negated=False): """ Constructs a new Node. If no connector is given, the default will be used. Warning: You probably don't want to pass in the 'negated' parameter. It is NOT the same as constructing a node and calling negate() on the result. """ self.children = children and children[:] or [] self.connector = connector or self.default self.subtree_parents = [] self.negated = negated # We need this because of django.db.models.query_utils.Q. Q. __init__() is # problematic, but it is a natural Node subclass in all other respects. def _new_instance(cls, children=None, connector=None, negated=False): """ This is called to create a new instance of this class when we need new Nodes (or subclasses) in the internal code in this class. Normally, it just shadows __init__(). However, subclasses with an __init__ signature that is not an extension of Node.__init__ might need to implement this method to allow a Node to create a new instance of them (if they have any extra setting up to do). """ obj = Node(children, connector, negated) obj.__class__ = cls return obj _new_instance = classmethod(_new_instance) def __str__(self): if self.negated: return '(NOT (%s: %s))' % (self.connector, ', '.join([str(c) for c in self.children])) return '(%s: %s)' % (self.connector, ', '.join([str(c) for c in self.children])) def __deepcopy__(self, memodict): """ Utility method used by copy.deepcopy(). """ obj = Node(connector=self.connector, negated=self.negated) obj.__class__ = self.__class__ obj.children = copy.deepcopy(self.children, memodict) obj.subtree_parents = copy.deepcopy(self.subtree_parents, memodict) return obj def __len__(self): """ The size of a node if the number of children it has. """ return len(self.children) def __bool__(self): """ For truth value testing. """ return bool(self.children) def __nonzero__(self): # Python 2 compatibility return type(self).__bool__(self) def __contains__(self, other): """ Returns True is 'other' is a direct child of this instance. """ return other in self.children def add(self, node, conn_type): """ Adds a new node to the tree. If the conn_type is the same as the root's current connector type, the node is added to the first level. Otherwise, the whole tree is pushed down one level and a new root connector is created, connecting the existing tree and the new node. """ if node in self.children and conn_type == self.connector: return if len(self.children) < 2: self.connector = conn_type if self.connector == conn_type: if isinstance(node, Node) and (node.connector == conn_type or len(node) == 1): self.children.extend(node.children) else: self.children.append(node) else: obj = self._new_instance(self.children, self.connector, self.negated) self.connector = conn_type self.children = [obj, node] def negate(self): """ Negate the sense of the root connector. This reorganises the children so that the current node has a single child: a negated node containing all the previous children. This slightly odd construction makes adding new children behave more intuitively. Interpreting the meaning of this negate is up to client code. This method is useful for implementing "not" arrangements. """ self.children = [self._new_instance(self.children, self.connector, not self.negated)] self.connector = self.default def start_subtree(self, conn_type): """ Sets up internal state so that new nodes are added to a subtree of the current node. The conn_type specifies how the sub-tree is joined to the existing children. """ if len(self.children) == 1: self.connector = conn_type elif self.connector != conn_type: self.children = [self._new_instance(self.children, self.connector, self.negated)] self.connector = conn_type self.negated = False self.subtree_parents.append(self.__class__(self.children, self.connector, self.negated)) self.connector = self.default self.negated = False self.children = [] def end_subtree(self): """ Closes off the most recently unmatched start_subtree() call. This puts the current state into a node of the parent tree and returns the current instances state to be the parent. """ obj = self.subtree_parents.pop() node = self.__class__(self.children, self.connector) self.connector = obj.connector self.negated = obj.negated self.children = obj.children self.children.append(node)
lgpl-3.0
nikhil93uf/Qemu
scripts/tracetool/backend/__init__.py
83
3953
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Backend management. Creating new backends --------------------- A new backend named 'foo-bar' corresponds to Python module 'tracetool/backend/foo_bar.py'. A backend module should provide a docstring, whose first non-empty line will be considered its short description. All backends must generate their contents through the 'tracetool.out' routine. Backend attributes ------------------ ========= ==================================================================== Attribute Description ========= ==================================================================== PUBLIC If exists and is set to 'True', the backend is considered "public". ========= ==================================================================== Backend functions ----------------- All the following functions are optional, and no output will be generated if they do not exist. =============================== ============================================== Function Description =============================== ============================================== generate_<format>_begin(events) Generate backend- and format-specific file header contents. generate_<format>_end(events) Generate backend- and format-specific file footer contents. generate_<format>(event) Generate backend- and format-specific contents for the given event. =============================== ============================================== """ __author__ = "Lluís Vilanova <vilanova@ac.upc.edu>" __copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>" __license__ = "GPL version 2 or (at your option) any later version" __maintainer__ = "Stefan Hajnoczi" __email__ = "stefanha@linux.vnet.ibm.com" import os import tracetool def get_list(only_public = False): """Get a list of (name, description) pairs.""" res = [("nop", "Tracing disabled.")] modnames = [] for filename in os.listdir(tracetool.backend.__path__[0]): if filename.endswith('.py') and filename != '__init__.py': modnames.append(filename.rsplit('.', 1)[0]) for modname in sorted(modnames): module = tracetool.try_import("tracetool.backend." + modname) # just in case; should never fail unless non-module files are put there if not module[0]: continue module = module[1] public = getattr(module, "PUBLIC", False) if only_public and not public: continue doc = module.__doc__ if doc is None: doc = "" doc = doc.strip().split("\n")[0] name = modname.replace("_", "-") res.append((name, doc)) return res def exists(name): """Return whether the given backend exists.""" if len(name) == 0: return False if name == "nop": return True name = name.replace("-", "_") return tracetool.try_import("tracetool.backend." + name)[1] class Wrapper: def __init__(self, backends, format): self._backends = [backend.replace("-", "_") for backend in backends] self._format = format.replace("-", "_") for backend in self._backends: assert exists(backend) assert tracetool.format.exists(self._format) def _run_function(self, name, *args, **kwargs): for backend in self._backends: func = tracetool.try_import("tracetool.backend." + backend, name % self._format, None)[1] if func is not None: func(*args, **kwargs) def generate_begin(self, events): self._run_function("generate_%s_begin", events) def generate(self, event): self._run_function("generate_%s", event) def generate_end(self, events): self._run_function("generate_%s_end", events)
gpl-2.0
CoolProp/CoolProp
Web/scripts/fluid_properties.Mixtures.py
2
2243
from __future__ import print_function from CPWeb.BibtexTools import getCitationOrAlternative, getBibtexParser import CoolProp import os.path web_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) csvfile = os.path.join(web_dir, 'fluid_properties', 'Mixtures.csv') def merge_args(*args): return " :raw-html:`<br/>` ".join(list(args)) def printCoeff(number): if number is None or \ len(str(number).strip()) < 1: return " " number = float(number) short = "{0:.4e}".format(number) long = "{0:.14e}".format(number) return u':raw-html:`<span title="{1}">{0}</span>`'.format(short, long) class Dossier: def __init__(self): self.data = {} def add(self, key, value): if key not in self.data: self.data[key] = [] self.data[key].append(value) d = Dossier() pairs = CoolProp.get('mixture_binary_pairs_list') print(len(pairs.split(','))) for pair in pairs.split(','): CAS1, CAS2 = pair.split('&') d.add('CAS1', CAS1) d.add('CAS2', CAS2) for key in ['name1', 'name2', 'F', 'function', 'BibTeX', 'xi', 'zeta', 'betaT', 'betaV', 'gammaT', 'gammaV']: try: d.add(key, CoolProp.CoolProp.get_mixture_binary_pair_data(CAS1, CAS2, key)) except BaseException as BE: d.add(key, '') import pandas df = pandas.DataFrame(d.data) df = df.sort_values(by=['BibTeX', 'name1'], ascending=[0, 1]) bibtexer = getBibtexParser() # filename = '../../../CoolPropBibTeXLibrary.bib') with open(csvfile, 'w') as fp: header = 'Ref.,Name1,Name2,function,F,' header += merge_args("xi", "zeta,") header += merge_args("betaT", "betaV,") header += merge_args("gammaT", "gammaV") header += '\n' fp.write(header) for index, row in df.iterrows(): text = ','.join([ \ getCitationOrAlternative(bibtexer, row['BibTeX']), row['name1'], row['name2'], row['function'], row['F'], merge_args(printCoeff(row['xi']), printCoeff(row['zeta'])), merge_args(printCoeff(row['betaT']), printCoeff(row['betaV'])), merge_args(printCoeff(row['gammaT']), printCoeff(row['gammaV'])) ]) + '\n' fp.write(text)
mit
darkryder/django
tests/generic_relations/tests.py
24
30730
from __future__ import unicode_literals from django import forms from django.contrib.contenttypes.forms import generic_inlineformset_factory from django.contrib.contenttypes.models import ContentType from django.core.exceptions import FieldError from django.db import IntegrityError from django.db.models import Q from django.test import SimpleTestCase, TestCase from .models import ( AllowsNullGFK, Animal, Carrot, Comparison, ConcreteRelatedModel, ForConcreteModelModel, ForProxyModelModel, Gecko, ManualPK, Mineral, ProxyRelatedModel, Rock, TaggedItem, ValuableRock, ValuableTaggedItem, Vegetable, ) class GenericRelationsTests(TestCase): def setUp(self): self.lion = Animal.objects.create( common_name="Lion", latin_name="Panthera leo") self.platypus = Animal.objects.create( common_name="Platypus", latin_name="Ornithorhynchus anatinus") Vegetable.objects.create(name="Eggplant", is_yucky=True) self.bacon = Vegetable.objects.create(name="Bacon", is_yucky=False) self.quartz = Mineral.objects.create(name="Quartz", hardness=7) # Tagging stuff. self.bacon.tags.create(tag="fatty") self.bacon.tags.create(tag="salty") self.lion.tags.create(tag="yellow") self.lion.tags.create(tag="hairy") # Original list of tags: self.comp_func = lambda obj: ( obj.tag, obj.content_type.model_class(), obj.object_id ) def test_generic_update_or_create_when_created(self): """ Should be able to use update_or_create from the generic related manager to create a tag. Refs #23611. """ count = self.bacon.tags.count() tag, created = self.bacon.tags.update_or_create(tag='stinky') self.assertTrue(created) self.assertEqual(count + 1, self.bacon.tags.count()) def test_generic_update_or_create_when_updated(self): """ Should be able to use update_or_create from the generic related manager to update a tag. Refs #23611. """ count = self.bacon.tags.count() tag = self.bacon.tags.create(tag='stinky') self.assertEqual(count + 1, self.bacon.tags.count()) tag, created = self.bacon.tags.update_or_create(defaults={'tag': 'juicy'}, id=tag.id) self.assertFalse(created) self.assertEqual(count + 1, self.bacon.tags.count()) self.assertEqual(tag.tag, 'juicy') def test_generic_get_or_create_when_created(self): """ Should be able to use get_or_create from the generic related manager to create a tag. Refs #23611. """ count = self.bacon.tags.count() tag, created = self.bacon.tags.get_or_create(tag='stinky') self.assertTrue(created) self.assertEqual(count + 1, self.bacon.tags.count()) def test_generic_get_or_create_when_exists(self): """ Should be able to use get_or_create from the generic related manager to get a tag. Refs #23611. """ count = self.bacon.tags.count() tag = self.bacon.tags.create(tag="stinky") self.assertEqual(count + 1, self.bacon.tags.count()) tag, created = self.bacon.tags.get_or_create(id=tag.id, defaults={'tag': 'juicy'}) self.assertFalse(created) self.assertEqual(count + 1, self.bacon.tags.count()) # shouldn't had changed the tag self.assertEqual(tag.tag, 'stinky') def test_generic_relations_m2m_mimic(self): """ Objects with declared GenericRelations can be tagged directly -- the API mimics the many-to-many API. """ self.assertQuerysetEqual(self.lion.tags.all(), [ "<TaggedItem: hairy>", "<TaggedItem: yellow>" ]) self.assertQuerysetEqual(self.bacon.tags.all(), [ "<TaggedItem: fatty>", "<TaggedItem: salty>" ]) def test_access_content_object(self): """ Test accessing the content object like a foreign key. """ tagged_item = TaggedItem.objects.get(tag="salty") self.assertEqual(tagged_item.content_object, self.bacon) def test_query_content_object(self): qs = TaggedItem.objects.filter( animal__isnull=False).order_by('animal__common_name', 'tag') self.assertQuerysetEqual( qs, ["<TaggedItem: hairy>", "<TaggedItem: yellow>"] ) mpk = ManualPK.objects.create(id=1) mpk.tags.create(tag='mpk') qs = TaggedItem.objects.filter( Q(animal__isnull=False) | Q(manualpk__id=1)).order_by('tag') self.assertQuerysetEqual( qs, ["hairy", "mpk", "yellow"], lambda x: x.tag) def test_exclude_generic_relations(self): """ Test lookups over an object without GenericRelations. """ # Recall that the Mineral class doesn't have an explicit GenericRelation # defined. That's OK, because you can create TaggedItems explicitly. # However, excluding GenericRelations means your lookups have to be a # bit more explicit. TaggedItem.objects.create(content_object=self.quartz, tag="shiny") TaggedItem.objects.create(content_object=self.quartz, tag="clearish") ctype = ContentType.objects.get_for_model(self.quartz) q = TaggedItem.objects.filter( content_type__pk=ctype.id, object_id=self.quartz.id ) self.assertQuerysetEqual(q, [ "<TaggedItem: clearish>", "<TaggedItem: shiny>" ]) def test_access_via_content_type(self): """ Test lookups through content type. """ self.lion.delete() self.platypus.tags.create(tag="fatty") ctype = ContentType.objects.get_for_model(self.platypus) self.assertQuerysetEqual( Animal.objects.filter(tags__content_type=ctype), ["<Animal: Platypus>"]) def test_set_foreign_key(self): """ You can set a generic foreign key in the way you'd expect. """ tag1 = TaggedItem.objects.create(content_object=self.quartz, tag="shiny") tag1.content_object = self.platypus tag1.save() self.assertQuerysetEqual( self.platypus.tags.all(), ["<TaggedItem: shiny>"]) def test_queries_across_generic_relations(self): """ Queries across generic relations respect the content types. Even though there are two TaggedItems with a tag of "fatty", this query only pulls out the one with the content type related to Animals. """ self.assertQuerysetEqual(Animal.objects.order_by('common_name'), [ "<Animal: Lion>", "<Animal: Platypus>" ]) def test_queries_content_type_restriction(self): """ Create another fatty tagged instance with different PK to ensure there is a content type restriction in the generated queries below. """ mpk = ManualPK.objects.create(id=self.lion.pk) mpk.tags.create(tag="fatty") self.platypus.tags.create(tag="fatty") self.assertQuerysetEqual( Animal.objects.filter(tags__tag='fatty'), ["<Animal: Platypus>"]) self.assertQuerysetEqual( Animal.objects.exclude(tags__tag='fatty'), ["<Animal: Lion>"]) def test_object_deletion_with_generic_relation(self): """ If you delete an object with an explicit Generic relation, the related objects are deleted when the source object is deleted. """ self.assertQuerysetEqual(TaggedItem.objects.all(), [ ('fatty', Vegetable, self.bacon.pk), ('hairy', Animal, self.lion.pk), ('salty', Vegetable, self.bacon.pk), ('yellow', Animal, self.lion.pk) ], self.comp_func ) self.lion.delete() self.assertQuerysetEqual(TaggedItem.objects.all(), [ ('fatty', Vegetable, self.bacon.pk), ('salty', Vegetable, self.bacon.pk), ], self.comp_func ) def test_object_deletion_without_generic_relation(self): """ If Generic Relation is not explicitly defined, any related objects remain after deletion of the source object. """ TaggedItem.objects.create(content_object=self.quartz, tag="clearish") quartz_pk = self.quartz.pk self.quartz.delete() self.assertQuerysetEqual(TaggedItem.objects.all(), [ ('clearish', Mineral, quartz_pk), ('fatty', Vegetable, self.bacon.pk), ('hairy', Animal, self.lion.pk), ('salty', Vegetable, self.bacon.pk), ('yellow', Animal, self.lion.pk), ], self.comp_func ) def test_tag_deletion_related_objects_unaffected(self): """ If you delete a tag, the objects using the tag are unaffected (other than losing a tag). """ ctype = ContentType.objects.get_for_model(self.lion) tag = TaggedItem.objects.get( content_type__pk=ctype.id, object_id=self.lion.id, tag="hairy") tag.delete() self.assertQuerysetEqual(self.lion.tags.all(), ["<TaggedItem: yellow>"]) self.assertQuerysetEqual(TaggedItem.objects.all(), [ ('fatty', Vegetable, self.bacon.pk), ('salty', Vegetable, self.bacon.pk), ('yellow', Animal, self.lion.pk) ], self.comp_func ) def test_add_bulk(self): bacon = Vegetable.objects.create(name="Bacon", is_yucky=False) t1 = TaggedItem.objects.create(content_object=self.quartz, tag="shiny") t2 = TaggedItem.objects.create(content_object=self.quartz, tag="clearish") # One update() query. with self.assertNumQueries(1): bacon.tags.add(t1, t2) self.assertEqual(t1.content_object, bacon) self.assertEqual(t2.content_object, bacon) def test_add_bulk_false(self): bacon = Vegetable.objects.create(name="Bacon", is_yucky=False) t1 = TaggedItem.objects.create(content_object=self.quartz, tag="shiny") t2 = TaggedItem.objects.create(content_object=self.quartz, tag="clearish") # One save() for each object. with self.assertNumQueries(2): bacon.tags.add(t1, t2, bulk=False) self.assertEqual(t1.content_object, bacon) self.assertEqual(t2.content_object, bacon) def test_add_rejects_unsaved_objects(self): t1 = TaggedItem(content_object=self.quartz, tag="shiny") msg = "<TaggedItem: shiny> instance isn't saved. Use bulk=False or save the object first." with self.assertRaisesMessage(ValueError, msg): self.bacon.tags.add(t1) def test_set(self): bacon = Vegetable.objects.create(name="Bacon", is_yucky=False) fatty = bacon.tags.create(tag="fatty") salty = bacon.tags.create(tag="salty") bacon.tags.set([fatty, salty]) self.assertQuerysetEqual(bacon.tags.all(), [ "<TaggedItem: fatty>", "<TaggedItem: salty>", ]) bacon.tags.set([fatty]) self.assertQuerysetEqual(bacon.tags.all(), [ "<TaggedItem: fatty>", ]) bacon.tags.set([]) self.assertQuerysetEqual(bacon.tags.all(), []) bacon.tags.set([fatty, salty], bulk=False, clear=True) self.assertQuerysetEqual(bacon.tags.all(), [ "<TaggedItem: fatty>", "<TaggedItem: salty>", ]) bacon.tags.set([fatty], bulk=False, clear=True) self.assertQuerysetEqual(bacon.tags.all(), [ "<TaggedItem: fatty>", ]) bacon.tags.set([], clear=True) self.assertQuerysetEqual(bacon.tags.all(), []) def test_assign(self): bacon = Vegetable.objects.create(name="Bacon", is_yucky=False) fatty = bacon.tags.create(tag="fatty") salty = bacon.tags.create(tag="salty") bacon.tags.set([fatty, salty]) self.assertQuerysetEqual(bacon.tags.all(), [ "<TaggedItem: fatty>", "<TaggedItem: salty>", ]) bacon.tags.set([fatty]) self.assertQuerysetEqual(bacon.tags.all(), [ "<TaggedItem: fatty>", ]) bacon.tags.set([]) self.assertQuerysetEqual(bacon.tags.all(), []) def test_assign_with_queryset(self): # Ensure that querysets used in reverse GFK assignments are pre-evaluated # so their value isn't affected by the clearing operation in # ManyRelatedManager.set() (#19816). bacon = Vegetable.objects.create(name="Bacon", is_yucky=False) bacon.tags.create(tag="fatty") bacon.tags.create(tag="salty") self.assertEqual(2, bacon.tags.count()) qs = bacon.tags.filter(tag="fatty") bacon.tags.set(qs) self.assertEqual(1, bacon.tags.count()) self.assertEqual(1, qs.count()) def test_generic_relation_related_name_default(self): # Test that GenericRelation by default isn't usable from # the reverse side. with self.assertRaises(FieldError): TaggedItem.objects.filter(vegetable__isnull=True) def test_multiple_gfk(self): # Simple tests for multiple GenericForeignKeys # only uses one model, since the above tests should be sufficient. tiger = Animal.objects.create(common_name="tiger") cheetah = Animal.objects.create(common_name="cheetah") bear = Animal.objects.create(common_name="bear") # Create directly Comparison.objects.create( first_obj=cheetah, other_obj=tiger, comparative="faster" ) Comparison.objects.create( first_obj=tiger, other_obj=cheetah, comparative="cooler" ) # Create using GenericRelation tiger.comparisons.create(other_obj=bear, comparative="cooler") tiger.comparisons.create(other_obj=cheetah, comparative="stronger") self.assertQuerysetEqual(cheetah.comparisons.all(), [ "<Comparison: cheetah is faster than tiger>" ]) # Filtering works self.assertQuerysetEqual(tiger.comparisons.filter(comparative="cooler"), [ "<Comparison: tiger is cooler than cheetah>", "<Comparison: tiger is cooler than bear>", ], ordered=False) # Filtering and deleting works subjective = ["cooler"] tiger.comparisons.filter(comparative__in=subjective).delete() self.assertQuerysetEqual(Comparison.objects.all(), [ "<Comparison: cheetah is faster than tiger>", "<Comparison: tiger is stronger than cheetah>" ], ordered=False) # If we delete cheetah, Comparisons with cheetah as 'first_obj' will be # deleted since Animal has an explicit GenericRelation to Comparison # through first_obj. Comparisons with cheetah as 'other_obj' will not # be deleted. cheetah.delete() self.assertQuerysetEqual(Comparison.objects.all(), [ "<Comparison: tiger is stronger than None>" ]) def test_gfk_subclasses(self): # GenericForeignKey should work with subclasses (see #8309) quartz = Mineral.objects.create(name="Quartz", hardness=7) valuedtag = ValuableTaggedItem.objects.create( content_object=quartz, tag="shiny", value=10 ) self.assertEqual(valuedtag.content_object, quartz) def test_generic_relation_to_inherited_child(self): # GenericRelations to models that use multi-table inheritance work. granite = ValuableRock.objects.create(name='granite', hardness=5) ValuableTaggedItem.objects.create(content_object=granite, tag="countertop", value=1) self.assertEqual(ValuableRock.objects.filter(tags__value=1).count(), 1) # We're generating a slightly inefficient query for tags__tag - we # first join ValuableRock -> TaggedItem -> ValuableTaggedItem, and then # we fetch tag by joining TaggedItem from ValuableTaggedItem. The last # join isn't necessary, as TaggedItem <-> ValuableTaggedItem is a # one-to-one join. self.assertEqual(ValuableRock.objects.filter(tags__tag="countertop").count(), 1) granite.delete() # deleting the rock should delete the related tag. self.assertEqual(ValuableTaggedItem.objects.count(), 0) def test_generic_inline_formsets(self): GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) formset = GenericFormSet() self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), """<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag"> Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text" name="generic_relations-taggeditem-content_type-object_id-0-tag" maxlength="50" /></p> <p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label> <input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE" id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE" /> <input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id" id="id_generic_relations-taggeditem-content_type-object_id-0-id" /></p>""" ) formset = GenericFormSet(instance=Animal()) self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), """<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag"> Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text" name="generic_relations-taggeditem-content_type-object_id-0-tag" maxlength="50" /></p> <p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label> <input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE" id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE" /><input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id" id="id_generic_relations-taggeditem-content_type-object_id-0-id" /></p>""" ) platypus = Animal.objects.create( common_name="Platypus", latin_name="Ornithorhynchus anatinus" ) platypus.tags.create(tag="shiny") GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) formset = GenericFormSet(instance=platypus) tagged_item_id = TaggedItem.objects.get( tag='shiny', object_id=platypus.id ).id self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), """<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text" name="generic_relations-taggeditem-content_type-object_id-0-tag" value="shiny" maxlength="50" /></p> <p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label> <input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE" id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE" /> <input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id" value="%s" id="id_generic_relations-taggeditem-content_type-object_id-0-id" /></p> <p><label for="id_generic_relations-taggeditem-content_type-object_id-1-tag">Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-1-tag" type="text" name="generic_relations-taggeditem-content_type-object_id-1-tag" maxlength="50" /></p> <p><label for="id_generic_relations-taggeditem-content_type-object_id-1-DELETE">Delete:</label> <input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-1-DELETE" id="id_generic_relations-taggeditem-content_type-object_id-1-DELETE" /> <input type="hidden" name="generic_relations-taggeditem-content_type-object_id-1-id" id="id_generic_relations-taggeditem-content_type-object_id-1-id" /></p>""" % tagged_item_id ) lion = Animal.objects.create(common_name="Lion", latin_name="Panthera leo") formset = GenericFormSet(instance=lion, prefix='x') self.assertHTMLEqual( ''.join(form.as_p() for form in formset.forms), """<p><label for="id_x-0-tag">Tag:</label> <input id="id_x-0-tag" type="text" name="x-0-tag" maxlength="50" /></p> <p><label for="id_x-0-DELETE">Delete:</label> <input type="checkbox" name="x-0-DELETE" id="id_x-0-DELETE" /> <input type="hidden" name="x-0-id" id="id_x-0-id" /></p>""" ) def test_gfk_manager(self): # GenericForeignKey should not use the default manager (which may filter objects) #16048 tailless = Gecko.objects.create(has_tail=False) tag = TaggedItem.objects.create(content_object=tailless, tag="lizard") self.assertEqual(tag.content_object, tailless) def test_subclasses_with_gen_rel(self): """ Test that concrete model subclasses with generic relations work correctly (ticket 11263). """ granite = Rock.objects.create(name='granite', hardness=5) TaggedItem.objects.create(content_object=granite, tag="countertop") self.assertEqual(Rock.objects.get(tags__tag="countertop"), granite) def test_subclasses_with_parent_gen_rel(self): """ Generic relations on a base class (Vegetable) work correctly in subclasses (Carrot). """ bear = Carrot.objects.create(name='carrot') TaggedItem.objects.create(content_object=bear, tag='orange') self.assertEqual(Carrot.objects.get(tags__tag='orange'), bear) def test_generic_inline_formsets_initial(self): """ Test for #17927 Initial values support for BaseGenericInlineFormSet. """ quartz = Mineral.objects.create(name="Quartz", hardness=7) GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1) ctype = ContentType.objects.get_for_model(quartz) initial_data = [{ 'tag': 'lizard', 'content_type': ctype.pk, 'object_id': quartz.pk, }] formset = GenericFormSet(initial=initial_data) self.assertEqual(formset.forms[0].initial, initial_data[0]) def test_get_or_create(self): # get_or_create should work with virtual fields (content_object) quartz = Mineral.objects.create(name="Quartz", hardness=7) tag, created = TaggedItem.objects.get_or_create(tag="shiny", defaults={'content_object': quartz}) self.assertTrue(created) self.assertEqual(tag.tag, "shiny") self.assertEqual(tag.content_object.id, quartz.id) def test_update_or_create_defaults(self): # update_or_create should work with virtual fields (content_object) quartz = Mineral.objects.create(name="Quartz", hardness=7) diamond = Mineral.objects.create(name="Diamond", hardness=7) tag, created = TaggedItem.objects.update_or_create(tag="shiny", defaults={'content_object': quartz}) self.assertTrue(created) self.assertEqual(tag.content_object.id, quartz.id) tag, created = TaggedItem.objects.update_or_create(tag="shiny", defaults={'content_object': diamond}) self.assertFalse(created) self.assertEqual(tag.content_object.id, diamond.id) def test_query_content_type(self): msg = "Field 'content_object' does not generate an automatic reverse relation" with self.assertRaisesMessage(FieldError, msg): TaggedItem.objects.get(content_object='') def test_unsaved_instance_on_generic_foreign_key(self): """ Assigning an unsaved object to GenericForeignKey should raise an exception on model.save(). """ quartz = Mineral(name="Quartz", hardness=7) with self.assertRaises(IntegrityError): TaggedItem.objects.create(tag="shiny", content_object=quartz) def test_cache_invalidation_for_content_type_id(self): # Create a Vegetable and Mineral with the same id. new_id = max(Vegetable.objects.order_by('-id')[0].id, Mineral.objects.order_by('-id')[0].id) + 1 broccoli = Vegetable.objects.create(id=new_id, name="Broccoli") diamond = Mineral.objects.create(id=new_id, name="Diamond", hardness=7) tag = TaggedItem.objects.create(content_object=broccoli, tag="yummy") tag.content_type = ContentType.objects.get_for_model(diamond) self.assertEqual(tag.content_object, diamond) def test_cache_invalidation_for_object_id(self): broccoli = Vegetable.objects.create(name="Broccoli") cauliflower = Vegetable.objects.create(name="Cauliflower") tag = TaggedItem.objects.create(content_object=broccoli, tag="yummy") tag.object_id = cauliflower.id self.assertEqual(tag.content_object, cauliflower) def test_assign_content_object_in_init(self): spinach = Vegetable(name="spinach") tag = TaggedItem(content_object=spinach) self.assertEqual(tag.content_object, spinach) class CustomWidget(forms.TextInput): pass class TaggedItemForm(forms.ModelForm): class Meta: model = TaggedItem fields = '__all__' widgets = {'tag': CustomWidget} class GenericInlineFormsetTest(TestCase): def test_generic_inlineformset_factory(self): """ Regression for #14572: Using base forms with widgets defined in Meta should not raise errors. """ Formset = generic_inlineformset_factory(TaggedItem, TaggedItemForm) form = Formset().forms[0] self.assertIsInstance(form['tag'].field.widget, CustomWidget) def test_save_new_uses_form_save(self): """ Regression for #16260: save_new should call form.save() """ class SaveTestForm(forms.ModelForm): def save(self, *args, **kwargs): self.instance.saved_by = "custom method" return super(SaveTestForm, self).save(*args, **kwargs) Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', form=SaveTestForm) instance = ProxyRelatedModel.objects.create() data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', 'form-0-title': 'foo', } formset = Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj = formset.save()[0] self.assertEqual(new_obj.saved_by, "custom method") def test_save_new_for_proxy(self): Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=False) instance = ProxyRelatedModel.objects.create() data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', 'form-0-title': 'foo', } formset = Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj, = formset.save() self.assertEqual(new_obj.obj, instance) def test_save_new_for_concrete(self): Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=True) instance = ProxyRelatedModel.objects.create() data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', 'form-0-title': 'foo', } formset = Formset(data, instance=instance, prefix='form') self.assertTrue(formset.is_valid()) new_obj, = formset.save() self.assertNotIsInstance(new_obj.obj, ProxyRelatedModel) class ProxyRelatedModelTest(TestCase): def test_default_behavior(self): """ The default for for_concrete_model should be True """ base = ForConcreteModelModel() base.obj = rel = ProxyRelatedModel.objects.create() base.save() base = ForConcreteModelModel.objects.get(pk=base.pk) rel = ConcreteRelatedModel.objects.get(pk=rel.pk) self.assertEqual(base.obj, rel) def test_works_normally(self): """ When for_concrete_model is False, we should still be able to get an instance of the concrete class. """ base = ForProxyModelModel() base.obj = rel = ConcreteRelatedModel.objects.create() base.save() base = ForProxyModelModel.objects.get(pk=base.pk) self.assertEqual(base.obj, rel) def test_proxy_is_returned(self): """ Instances of the proxy should be returned when for_concrete_model is False. """ base = ForProxyModelModel() base.obj = ProxyRelatedModel.objects.create() base.save() base = ForProxyModelModel.objects.get(pk=base.pk) self.assertIsInstance(base.obj, ProxyRelatedModel) def test_query(self): base = ForProxyModelModel() base.obj = rel = ConcreteRelatedModel.objects.create() base.save() self.assertEqual(rel, ConcreteRelatedModel.objects.get(bases__id=base.id)) def test_query_proxy(self): base = ForProxyModelModel() base.obj = rel = ProxyRelatedModel.objects.create() base.save() self.assertEqual(rel, ProxyRelatedModel.objects.get(bases__id=base.id)) def test_generic_relation(self): base = ForProxyModelModel() base.obj = ProxyRelatedModel.objects.create() base.save() base = ForProxyModelModel.objects.get(pk=base.pk) rel = ProxyRelatedModel.objects.get(pk=base.obj.pk) self.assertEqual(base, rel.bases.get()) def test_generic_relation_set(self): base = ForProxyModelModel() base.obj = ConcreteRelatedModel.objects.create() base.save() newrel = ConcreteRelatedModel.objects.create() newrel.bases.set([base]) newrel = ConcreteRelatedModel.objects.get(pk=newrel.pk) self.assertEqual(base, newrel.bases.get()) class TestInitWithNoneArgument(SimpleTestCase): def test_none_allowed(self): # AllowsNullGFK doesn't require a content_type, so None argument should # also be allowed. AllowsNullGFK(content_object=None) # TaggedItem requires a content_type but initializing with None should # be allowed. TaggedItem(content_object=None)
bsd-3-clause
voostar/pdfmergeWEB
pdfmergeWEB/settings.py
1
5511
# Django settings for pdfmergeWEB project. import os DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME': '', # Or path to database file if using sqlite3. # The following settings are not used with sqlite3: 'USER': '', 'PASSWORD': '', 'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP. 'PORT': '', # Set to empty string for default. } } # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts ALLOWED_HOSTS = [] # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'Asia/Shanghai' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'zh-cn' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://example.com/media/", "http://media.example.com/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = 'rsr)!nlw9ramvnybqbq0i^e!m2j8c6o1mpul3vtjanwvx#h$z-' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', # 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'pdfmergeWEB.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'pdfmergeWEB.wsgi.application' PROJECT_PATH = os.path.dirname(os.path.dirname(__file__)) TEMPLATE_DIRS = ( os.path.abspath(os.path.join(PROJECT_PATH, 'templates/')), # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Uncomment the next line to enable the admin: # 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', 'webpage', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } }
mit
okwasi/gyp
test/gyp-defines/gyptest-regyp.py
268
1260
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies that when the same value is repeated for a gyp define, duplicates are stripped from the regeneration rule. """ import os import TestGyp # Regenerating build files when a gyp file changes is currently only supported # by the make generator. test = TestGyp.TestGyp(formats=['make']) os.environ['GYP_DEFINES'] = 'key=repeated_value key=value1 key=repeated_value' test.run_gyp('defines.gyp') test.build('defines.gyp') # The last occurrence of a repeated set should take precedence over other # values. See gyptest-multiple-values.py. test.must_contain('action.txt', 'repeated_value') # So the regeneration rule needs to use the correct order. test.must_not_contain( 'Makefile', '"-Dkey=repeated_value" "-Dkey=value1" "-Dkey=repeated_value"') test.must_contain('Makefile', '"-Dkey=value1" "-Dkey=repeated_value"') # Sleep so that the changed gyp file will have a newer timestamp than the # previously generated build files. test.sleep() os.utime("defines.gyp", None) test.build('defines.gyp') test.must_contain('action.txt', 'repeated_value') test.pass_test()
bsd-3-clause
citrix-openstack-build/python-neutronclient
neutronclient/openstack/common/timeutils.py
33
5625
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Time related utilities and helper functions. """ import calendar import datetime import iso8601 import six # ISO 8601 extended time format with microseconds _ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' _ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND def isotime(at=None, subsecond=False): """Stringify time in ISO 8601 format.""" if not at: at = utcnow() st = at.strftime(_ISO8601_TIME_FORMAT if not subsecond else _ISO8601_TIME_FORMAT_SUBSECOND) tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' st += ('Z' if tz == 'UTC' else tz) return st def parse_isotime(timestr): """Parse time from ISO 8601 format.""" try: return iso8601.parse_date(timestr) except iso8601.ParseError as e: raise ValueError(unicode(e)) except TypeError as e: raise ValueError(unicode(e)) def strtime(at=None, fmt=PERFECT_TIME_FORMAT): """Returns formatted utcnow.""" if not at: at = utcnow() return at.strftime(fmt) def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): """Turn a formatted time back into a datetime.""" return datetime.datetime.strptime(timestr, fmt) def normalize_time(timestamp): """Normalize time in arbitrary timezone to UTC naive object.""" offset = timestamp.utcoffset() if offset is None: return timestamp return timestamp.replace(tzinfo=None) - offset def is_older_than(before, seconds): """Return True if before is older than seconds.""" if isinstance(before, six.string_types): before = parse_strtime(before).replace(tzinfo=None) return utcnow() - before > datetime.timedelta(seconds=seconds) def is_newer_than(after, seconds): """Return True if after is newer than seconds.""" if isinstance(after, six.string_types): after = parse_strtime(after).replace(tzinfo=None) return after - utcnow() > datetime.timedelta(seconds=seconds) def utcnow_ts(): """Timestamp version of our utcnow function.""" return calendar.timegm(utcnow().timetuple()) def utcnow(): """Overridable version of utils.utcnow.""" if utcnow.override_time: try: return utcnow.override_time.pop(0) except AttributeError: return utcnow.override_time return datetime.datetime.utcnow() def iso8601_from_timestamp(timestamp): """Returns a iso8601 formated date from timestamp.""" return isotime(datetime.datetime.utcfromtimestamp(timestamp)) utcnow.override_time = None def set_time_override(override_time=datetime.datetime.utcnow()): """Overrides utils.utcnow. Make it return a constant time or a list thereof, one at a time. """ utcnow.override_time = override_time def advance_time_delta(timedelta): """Advance overridden time using a datetime.timedelta.""" assert(not utcnow.override_time is None) try: for dt in utcnow.override_time: dt += timedelta except TypeError: utcnow.override_time += timedelta def advance_time_seconds(seconds): """Advance overridden time by seconds.""" advance_time_delta(datetime.timedelta(0, seconds)) def clear_time_override(): """Remove the overridden time.""" utcnow.override_time = None def marshall_now(now=None): """Make an rpc-safe datetime with microseconds. Note: tzinfo is stripped, but not required for relative times. """ if not now: now = utcnow() return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, minute=now.minute, second=now.second, microsecond=now.microsecond) def unmarshall_time(tyme): """Unmarshall a datetime dict.""" return datetime.datetime(day=tyme['day'], month=tyme['month'], year=tyme['year'], hour=tyme['hour'], minute=tyme['minute'], second=tyme['second'], microsecond=tyme['microsecond']) def delta_seconds(before, after): """Return the difference between two timing objects. Compute the difference in seconds between two date, time, or datetime objects (as a float, to microsecond resolution). """ delta = after - before try: return delta.total_seconds() except AttributeError: return ((delta.days * 24 * 3600) + delta.seconds + float(delta.microseconds) / (10 ** 6)) def is_soon(dt, window): """Determines if time is going to happen in the next window seconds. :params dt: the time :params window: minimum seconds to remain to consider the time not soon :return: True if expiration is within the given duration """ soon = (utcnow() + datetime.timedelta(seconds=window)) return normalize_time(dt) <= soon
apache-2.0
rsalveti/xbmc-eden-flattened
addons/script.module.simplejson/lib/simplejson/scanner.py
928
2227
"""JSON token scanner """ import re try: from simplejson._speedups import make_scanner as c_make_scanner except ImportError: c_make_scanner = None __all__ = ['make_scanner'] NUMBER_RE = re.compile( r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?', (re.VERBOSE | re.MULTILINE | re.DOTALL)) def py_make_scanner(context): parse_object = context.parse_object parse_array = context.parse_array parse_string = context.parse_string match_number = NUMBER_RE.match encoding = context.encoding strict = context.strict parse_float = context.parse_float parse_int = context.parse_int parse_constant = context.parse_constant object_hook = context.object_hook def _scan_once(string, idx): try: nextchar = string[idx] except IndexError: raise StopIteration if nextchar == '"': return parse_string(string, idx + 1, encoding, strict) elif nextchar == '{': return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook) elif nextchar == '[': return parse_array((string, idx + 1), _scan_once) elif nextchar == 'n' and string[idx:idx + 4] == 'null': return None, idx + 4 elif nextchar == 't' and string[idx:idx + 4] == 'true': return True, idx + 4 elif nextchar == 'f' and string[idx:idx + 5] == 'false': return False, idx + 5 m = match_number(string, idx) if m is not None: integer, frac, exp = m.groups() if frac or exp: res = parse_float(integer + (frac or '') + (exp or '')) else: res = parse_int(integer) return res, m.end() elif nextchar == 'N' and string[idx:idx + 3] == 'NaN': return parse_constant('NaN'), idx + 3 elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity': return parse_constant('Infinity'), idx + 8 elif nextchar == '-' and string[idx:idx + 9] == '-Infinity': return parse_constant('-Infinity'), idx + 9 else: raise StopIteration return _scan_once make_scanner = c_make_scanner or py_make_scanner
gpl-2.0
appleseedhq/gaffer
python/GafferSceneUI/ShaderTweaksUI.py
1
12283
########################################################################## # # Copyright (c) 2016, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import functools import imath import IECore import IECoreScene import Gaffer import GafferUI import GafferScene import GafferSceneUI Gaffer.Metadata.registerNode( GafferScene.ShaderTweaks, "description", """ Makes modifications to shader parameter values. """, plugs = { "shader" : [ "description", """ The type of shader to modify. This is actually the name of an attribute which contains the shader network. """, "plugValueWidget:type", "GafferUI.PresetsPlugValueWidget", "presetsPlugValueWidget:allowCustom", True, "preset:None", "", ], "ignoreMissing" : [ "description", """ Ignores tweaks targeting missing parameters. When off, missing parameters cause the node to error. """ ], "tweaks" : [ "description", """ The tweaks to be made to the parameters of the shader. Arbitrary numbers of user defined tweaks may be added as children of this plug via the user interface, or using the ShaderTweaks API via python. """, "plugValueWidget:type", "GafferUI.LayoutPlugValueWidget", "layout:customWidget:footer:widgetType", "GafferSceneUI.ShaderTweaksUI._TweaksFooter", "layout:customWidget:footer:index", -1, "nodule:type", "GafferUI::CompoundNodule", "noduleLayout:section", "left", "noduleLayout:spacing", 0.2, # Add + button for showing and hiding parameters in the GraphEditor "noduleLayout:customGadget:addButton:gadgetType", "GafferSceneUI.ShaderTweaksUI.PlugAdder", ], "tweaks.*" : [ "noduleLayout:visible", False, # Can be shown individually using PlugAdder above ], } ) ########################################################################## # Internal utilities ########################################################################## def _shaderTweaksNode( plugValueWidget ) : # The plug may not belong to a ShaderTweaks node # directly. Instead it may have been promoted # elsewhere and be driving a target plug on a # ShaderTweaks node. def walkOutputs( plug ) : if isinstance( plug.node(), GafferScene.ShaderTweaks ) : return plug.node() for output in plug.outputs() : node = walkOutputs( output ) if node is not None : return node return walkOutputs( plugValueWidget.getPlug() ) def _pathsFromAffected( plugValueWidget ) : node = _shaderTweaksNode( plugValueWidget ) if node is None : return [] pathMatcher = IECore.PathMatcher() with plugValueWidget.getContext() : GafferScene.SceneAlgo.matchingPaths( node["filter"], node["in"], pathMatcher ) return pathMatcher.paths() def _pathsFromSelection( plugValueWidget ) : node = _shaderTweaksNode( plugValueWidget ) if node is None : return [] paths = GafferSceneUI.ContextAlgo.getSelectedPaths( plugValueWidget.getContext() ) paths = paths.paths() if paths else [] with plugValueWidget.getContext() : paths = [ p for p in paths if node["in"].exists( p ) ] return paths def _shaderAttributes( plugValueWidget, paths, affectedOnly ) : result = {} node = _shaderTweaksNode( plugValueWidget ) if node is None : return result with plugValueWidget.getContext() : attributeNamePatterns = node["shader"].getValue() if affectedOnly else "*" for path in paths : attributes = node["in"].attributes( path ) for name, attribute in attributes.items() : if not IECore.StringAlgo.matchMultiple( name, attributeNamePatterns ) : continue if not isinstance( attribute, IECoreScene.ShaderNetwork ) or not len( attribute ) : continue result.setdefault( path, {} )[name] = attribute return result ########################################################################## # _TweaksFooter ########################################################################## class _TweaksFooter( GafferUI.PlugValueWidget ) : def __init__( self, plug ) : row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal ) GafferUI.PlugValueWidget.__init__( self, row, plug ) with row : GafferUI.Spacer( imath.V2i( GafferUI.PlugWidget.labelWidth(), 1 ) ) GafferUI.MenuButton( image = "plus.png", hasFrame = False, menu = GafferUI.Menu( Gaffer.WeakMethod( self.__menuDefinition ) ) ) GafferUI.Spacer( imath.V2i( 1 ), imath.V2i( 999999, 1 ), parenting = { "expand" : True } ) def _updateFromPlug( self ) : self.setEnabled( self._editable() ) def __menuDefinition( self ) : result = IECore.MenuDefinition() result.append( "/From Affected", { "subMenu" : Gaffer.WeakMethod( self.__addFromAffectedMenuDefinition ) } ) result.append( "/From Selection", { "subMenu" : Gaffer.WeakMethod( self.__addFromSelectedMenuDefinition ) } ) result.append( "/FromPathsDivider", { "divider" : True } ) # TODO - would be nice to share these default options with other users of TweakPlug for item in [ Gaffer.BoolPlug, Gaffer.FloatPlug, Gaffer.IntPlug, "NumericDivider", Gaffer.StringPlug, "StringDivider", Gaffer.V2iPlug, Gaffer.V3iPlug, Gaffer.V2fPlug, Gaffer.V3fPlug, "VectorDivider", Gaffer.Color3fPlug, Gaffer.Color4fPlug ] : if isinstance( item, basestring ) : result.append( "/" + item, { "divider" : True } ) else : result.append( "/" + item.__name__.replace( "Plug", "" ), { "command" : functools.partial( Gaffer.WeakMethod( self.__addTweak ), "", item ), } ) return result def __addFromAffectedMenuDefinition( self ) : return self.__addFromPathsMenuDefinition( _pathsFromAffected( self ) ) def __addFromSelectedMenuDefinition( self ) : return self.__addFromPathsMenuDefinition( _pathsFromSelection( self ) ) def __addFromPathsMenuDefinition( self, paths ) : result = IECore.MenuDefinition() shaderAttributes = _shaderAttributes( self, paths, affectedOnly = True ) if not len( shaderAttributes ) : result.append( "/No Shaders Found", { "active" : False } ) return result shaders = {} for attributes in shaderAttributes.values() : for attributeName, network in attributes.items() : for shaderName, shader in network.shaders().items() : if shaderName == network.getOutput().shader : shaderName = "" shaderParameters = shaders.setdefault( shaderName, {} ) for parameterName, parameterValue in shader.parameters.items() : if parameterName.startswith( "__" ) : continue shaderParameters[parameterName] = parameterValue if not len( shaders ) : result.append( "/No Parameters Found", { "active" : False } ) return result for shaderName, shader in shaders.items() : menuPrefix = "/" tweakPrefix = "" if len( shaders ) > 1 : menuPrefix = "/Other/{0}/".format( shaderName ) if shaderName else "/Main/" tweakPrefix = "{0}.".format( shaderName ) if shaderName else "" for parameterName in sorted( shader.keys() ) : result.append( menuPrefix + parameterName, { "command" : functools.partial( Gaffer.WeakMethod( self.__addTweak ), tweakPrefix + parameterName, shader[parameterName] ) } ) return result def __addTweak( self, name, plugTypeOrValue ) : if isinstance( plugTypeOrValue, IECore.Data ) : plug = GafferScene.TweakPlug( name, plugTypeOrValue ) else : plug = GafferScene.TweakPlug( name, plugTypeOrValue() ) if name : plug.setName( name.replace( ".", "_" ) ) with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) : self.getPlug().addChild( plug ) ########################################################################## # PlugValueWidget context menu ########################################################################## def __setShaderFromAffectedMenuDefinition( menu ) : plugValueWidget = menu.ancestor( GafferUI.PlugValueWidget ) return __setShaderFromPathsMenuDefinition( plugValueWidget, _pathsFromAffected( plugValueWidget ) ) def __setShaderFromSelectionMenuDefinition( menu ) : plugValueWidget = menu.ancestor( GafferUI.PlugValueWidget ) return __setShaderFromPathsMenuDefinition( plugValueWidget, _pathsFromSelection( plugValueWidget ) ) def __setShader( plug, value ) : with Gaffer.UndoScope( plug.ancestor( Gaffer.ScriptNode ) ) : plug.setValue( value ) def __setShaderFromPathsMenuDefinition( plugValueWidget, paths ) : shaderAttributes = _shaderAttributes( plugValueWidget, paths, affectedOnly = False ) names = set().union( *[ set( a.keys() ) for a in shaderAttributes.values() ] ) result = IECore.MenuDefinition() for name in sorted( names ) : result.append( "/" + name, { "command" : functools.partial( __setShader, plugValueWidget.getPlug(), name ), "active" : not plugValueWidget.getReadOnly() and not Gaffer.MetadataAlgo.readOnly( plugValueWidget.getPlug() ), } ) return result def __plugPopupMenu( menuDefinition, plugValueWidget ) : plug = plugValueWidget.getPlug() if plug is None : return node = plug.node() if not isinstance( node, GafferScene.ShaderTweaks ) : return if plug != node["shader"] : return menuDefinition.prepend( "/ShaderTweaksDivider/", { "divider" : True } ) menuDefinition.prepend( "/From Selection/", { "subMenu" : __setShaderFromSelectionMenuDefinition } ) menuDefinition.prepend( "/From Affected/", { "subMenu" : __setShaderFromAffectedMenuDefinition } ) GafferUI.PlugValueWidget.popupMenuSignal().connect( __plugPopupMenu, scoped = False ) ########################################################################## # Nodule context menu ########################################################################## def __setPlugMetadata( plug, key, value ) : with Gaffer.UndoScope( plug.ancestor( Gaffer.ScriptNode ) ) : Gaffer.Metadata.registerValue( plug, key, value ) def __graphEditorPlugContextMenu( graphEditor, plug, menuDefinition ) : if not isinstance( plug.node(), GafferScene.ShaderTweaks ) : return tweakPlug = plug.parent() if not isinstance( tweakPlug, GafferScene.TweakPlug ) : return False if tweakPlug.parent() != plug.node()["tweaks"] : return if len( menuDefinition.items() ) : menuDefinition.append( "/HideDivider", { "divider" : True } ) menuDefinition.append( "/Hide", { "command" : functools.partial( __setPlugMetadata, tweakPlug, "noduleLayout:visible", False ), "active" : plug.getInput() is None and not Gaffer.readOnly( tweakPlug ), } ) GafferUI.GraphEditor.plugContextMenuSignal().connect( __graphEditorPlugContextMenu, scoped = False )
bsd-3-clause
im-infamou5/volatility
volatility/win32/crashdump.py
58
28101
# Volatility # Copyright (c) 2007-2013 Volatility Foundation # Copyright (c) 2008 Brendan Dolan-Gavitt <bdolangavitt@wesleyan.edu> # # This file is part of Volatility. # # Volatility is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Volatility is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Volatility. If not, see <http://www.gnu.org/licenses/>. # """ @author: AAron Walters and Brendan Dolan-Gavitt @license: GNU General Public License 2.0 @contact: awalters@4tphi.net,bdolangavitt@wesleyan.edu @organization: Volatility Foundation Tool: This tool generates a crash dump from a image of ram """ #pylint: disable-msg=C0111 #from forensics.object import get_obj_offset #from forensics.win32.info import find_psactiveprocesshead #from forensics.win32.info import find_psloadedmodulelist #from forensics.win32.info import find_mmpfndatabase #from forensics.win32.info import find_kddebuggerdatablock #from forensics.win32.info import find_systemtime #from forensics.win32.info import find_suitemask #from forensics.win32.tasks import process_list #from forensics.win32.tasks import process_addr_space #from forensics.win32.tasks import peb_number_processors #from forensics.win32.tasks import process_peb #from forensics.win32.tasks import * dump_hdr = "" # 0x00 dump_hdr += "\x50\x41\x47\x45\x44\x55\x4D\x50\x0F\x00\x00\x00\x28\x0A\x00\x00" # 0x10 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x20 dump_hdr += "\x4C\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x30 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x50\x41\x47\x45" # 0x40 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x50 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x00\x41\x47\x45" # 0x60 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x70 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x80 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x90 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0xa0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0xb0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0xc0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0xd0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0xe0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0xf0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x100 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x110 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x120 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x130 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x140 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x150 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x160 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x170 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x180 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x190 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x1a0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x1b0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x1c0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x1d0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x1e0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x1f0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x200 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x210 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x220 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x230 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x240 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x250 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x260 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x270 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x280 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x290 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x2a0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x2b0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x2c0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x2d0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x2e0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x2f0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x300 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x310 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x320 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x330 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x340 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x350 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x360 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x370 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x380 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x390 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x3a0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x3b0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x3c0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x3d0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x3e0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x3f0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x400 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x410 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x420 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x430 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x440 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x450 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x460 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x470 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x480 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x490 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x4a0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x4b0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x4c0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x4d0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x4e0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x4f0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x500 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x510 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x520 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x530 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x540 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x550 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x560 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x570 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x580 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x590 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x5a0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x5b0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x5c0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x5d0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x5e0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x50\x41\x47\x45" # 0x5f0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x600 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x610 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x620 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x630 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x640 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x650 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x660 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x670 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x680 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x690 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x6a0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x6b0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x6c0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x6d0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x6e0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x6F0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x700 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x710 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x720 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x730 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x740 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x750 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x760 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x770 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x780 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x790 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x7a0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x7b0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x7c0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x00\x41\x47\x45" # 0x7d0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x7e0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x7f0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x800 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x810 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" # 0x820 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x830 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x840 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x850 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x860 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x870 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x880 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x890 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x8a0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x8b0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x8c0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x8d0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x8e0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x8f0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x900 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x910 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x920 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x930 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x940 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x950 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x960 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x970 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x980 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x990 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x9a0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x9b0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x9c0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x9d0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x9e0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0x9f0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xA00 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xA10 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xA20 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xA30 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xA40 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xA50 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xA60 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xA70 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xA80 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xA90 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xAa0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xAb0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xAc0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xAd0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xAe0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xAf0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xb00 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xb10 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xb20 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xb30 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xb40 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xb50 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xb60 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xb70 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xb80 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xb90 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xba0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xbb0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xbc0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xbd0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xbe0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xbf0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xc00 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xc10 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xc20 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xc30 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xc40 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xc50 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xc60 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xc70 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xc80 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xc90 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xca0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xcb0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xcc0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xcd0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xce0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xcf0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xd00 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xd10 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xd20 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xd30 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xd40 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xd50 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xd60 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xd70 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xd80 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xd90 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xda0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xdb0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xdc0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xdd0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xde0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xdf0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xe00 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xe10 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xe20 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xe30 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xe40 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xe50 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xe60 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xe70 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xe80 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xe90 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xea0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xeb0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xec0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xed0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xee0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xef0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" #0xf00 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" #0xf10 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" #0xf20 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" #0xf30 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" #0xf40 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" #0xf50 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xf60 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xf70 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xf80 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x01\x00\x00\x00\x50\x41\x47\x45" # 0xF90 dump_hdr += "\x50\x41\x47\x45\x01\x00\x00\x00\x10\x01\x00\x00\x00\x00\x00\x00" # 0xFA0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x50\x41\x47\x45\x00\x41\x47\x45" # 0xFB0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x00\x00\x00\x00\x00\x00\x00\x00" # 0xFC0 dump_hdr += "\x00\x00\x00\x00\x00\x00\x00\x00\x50\x41\x47\x45\x50\x41\x47\x45" # 0xFD0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xFE0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" # 0xFF0 dump_hdr += "\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45\x50\x41\x47\x45" num_of_runs = 0x00000001 base_page = 0x00000000 pae_enabled = 0x01 #def find_numberprocessors(addr_space, types): # # NumberOfProcessorsDict = dict() # all_tasks = process_list(addr_space, types) # # for task in all_tasks: # # if not addr_space.is_valid_address(task): # continue # # process_address_space = process_addr_space(addr_space, types, task, addr_space.base.fname) # if process_address_space is None: # continue # # peb = process_peb(addr_space, types, task) # # try: # if not process_address_space.is_valid_address(peb): # continue # except: # continue # # NumberOfProcessors = peb_number_processors(process_address_space, types, peb) # if NumberOfProcessors in NumberOfProcessorsDict: # NumberOfProcessorsDict[NumberOfProcessors] += 1 # else: # NumberOfProcessorsDict[NumberOfProcessors] = 1 # # MaxNumberOfProcessors = max([ (NumberOfProcessorsDict[x], x) for x in NumberOfProcessorsDict])[1] # # return MaxNumberOfProcessors # #def write_char_phys(value, member_list, hdr, types): # # (offset, _current_type) = get_obj_offset(types, member_list) # new_hdr = hdr[:offset] + struct.pack('=B', value) + hdr[offset+1:] # return new_hdr # #def write_long_phys(value, member_list, hdr, types): # # (offset, _current_type) = get_obj_offset(types, member_list) # new_hdr = hdr[:offset] + struct.pack('=I', value) + hdr[offset+4:] # return new_hdr # #def write_long_long_phys(value, member_list, hdr, types): # # (offset, _current_type) = get_obj_offset(types, member_list) # new_hdr = hdr[:offset] + struct.pack('=Q', value) + hdr[offset+8:] # return new_hdr # #def dd_to_crash(addr_space, types, _symbol_table, opts): # # outfile = opts.outfile # filename = opts.filename # # DirectoryTableBaseValue = addr_space.pgd_vaddr # # PsActiveProcessHead = find_psactiveprocesshead(addr_space, types) # # PsLoadedModuleList = find_psloadedmodulelist(addr_space, types) # # MmPfnDatabase = find_mmpfndatabase(addr_space, types) # # KdDebuggerDataBlock = find_kddebuggerdatablock(addr_space, types) # # NumberOfProcessors = find_numberprocessors(addr_space, types) # # SuiteMask = find_suitemask(addr_space, types) # # SystemTime = find_systemtime(addr_space, types) # # num_pages = os.path.getsize(filename)/4096 # # new_hdr = write_long_phys(DirectoryTableBaseValue, ['_DMP_HEADER', 'DirectoryTableBase'], dump_hdr, types) # new_hdr = write_long_phys(PsLoadedModuleList, ['_DMP_HEADER', 'PsLoadedModuleList'], new_hdr, types) # new_hdr = write_long_phys(PsActiveProcessHead, ['_DMP_HEADER', 'PsActiveProcessHead'], new_hdr, types) # new_hdr = write_long_phys(KdDebuggerDataBlock, ['_DMP_HEADER', 'KdDebuggerDataBlock'], new_hdr, types) # new_hdr = write_long_phys(NumberOfProcessors, ['_DMP_HEADER', 'NumberProcessors'], new_hdr, types) # new_hdr = write_long_phys(MmPfnDatabase, ['_DMP_HEADER', 'PfnDataBase'], new_hdr, types) # new_hdr = write_long_phys(SuiteMask, ['_DMP_HEADER', 'SuiteMask'], new_hdr, types) # new_hdr = write_long_long_phys(SystemTime, ['_DMP_HEADER', 'SystemTime'], new_hdr, types) # # if addr_space.pae == True: # new_hdr = write_char_phys(pae_enabled, ['_DMP_HEADER', 'PaeEnabled'], new_hdr, types) # # new_hdr = new_hdr[:100] + struct.pack('=I', num_of_runs) + \ # struct.pack('=I', num_pages) + \ # struct.pack('=I', 0x00000000) + \ # struct.pack('=I', num_pages) + \ # new_hdr[116:] # # MI = open(outfile, 'wb') # MI.write("%s" % new_hdr) # # FILEOPEN = open(filename, 'rb') # # offset = 0 # end = os.path.getsize(filename) # # while offset <= end: # fdata = FILEOPEN.read(0x1000) # if fdata == None: # break # MI.write("%s"%fdata) # # progress.update(offset) # offset += 0x1000 # # print # # FILEOPEN.close() # MI.close() # # return
gpl-2.0
web30s/odoo-9.0c-20160402
hello/templates/openerp/addons/stock/stock.py
1
296175
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from datetime import date, datetime from dateutil import relativedelta import json import time import sets import openerp from openerp.osv import fields, osv from openerp.tools.float_utils import float_compare, float_round from openerp.tools.translate import _ from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT from openerp import SUPERUSER_ID, api, models import openerp.addons.decimal_precision as dp from openerp.addons.procurement import procurement import logging from openerp.exceptions import UserError _logger = logging.getLogger(__name__) #---------------------------------------------------------- # Incoterms #---------------------------------------------------------- class stock_incoterms(osv.osv): _name = "stock.incoterms" _description = "Incoterms" _columns = { 'name': fields.char('Name', required=True, help="Incoterms are series of sales terms. They are used to divide transaction costs and responsibilities between buyer and seller and reflect state-of-the-art transportation practices."), 'code': fields.char('Code', size=3, required=True, help="Incoterm Standard Code"), 'active': fields.boolean('Active', help="By unchecking the active field, you may hide an INCOTERM you will not use."), } _defaults = { 'active': True, } #---------------------------------------------------------- # Stock Location #---------------------------------------------------------- class stock_location(osv.osv): _name = "stock.location" _description = "Inventory Locations" _parent_name = "location_id" _parent_store = True _parent_order = 'name' _order = 'parent_left' _rec_name = 'complete_name' def _location_owner(self, cr, uid, location, context=None): ''' Return the company owning the location if any ''' return location and (location.usage == 'internal') and location.company_id or False def _complete_name(self, cr, uid, ids, name, args, context=None): """ Forms complete name of location from parent location to child location. @return: Dictionary of values """ res = {} for m in self.browse(cr, uid, ids, context=context): res[m.id] = m.name parent = m.location_id while parent: res[m.id] = parent.name + ' / ' + res[m.id] parent = parent.location_id return res def _get_sublocations(self, cr, uid, ids, context=None): """ return all sublocations of the given stock locations (included) """ if context is None: context = {} context_with_inactive = context.copy() context_with_inactive['active_test'] = False return self.search(cr, uid, [('id', 'child_of', ids)], context=context_with_inactive) def _name_get(self, cr, uid, location, context=None): name = location.name while location.location_id and location.usage != 'view': location = location.location_id name = location.name + '/' + name return name def name_get(self, cr, uid, ids, context=None): res = [] for location in self.browse(cr, uid, ids, context=context): res.append((location.id, self._name_get(cr, uid, location, context=context))) return res _columns = { 'name': fields.char('Location Name', required=True, translate=True), 'active': fields.boolean('Active', help="By unchecking the active field, you may hide a location without deleting it."), 'usage': fields.selection([ ('supplier', 'Vendor Location'), ('view', 'View'), ('internal', 'Internal Location'), ('customer', 'Customer Location'), ('inventory', 'Inventory Loss'), ('procurement', 'Procurement'), ('production', 'Production'), ('transit', 'Transit Location')], 'Location Type', required=True, help="""* Vendor Location: Virtual location representing the source location for products coming from your vendors \n* View: Virtual location used to create a hierarchical structures for your warehouse, aggregating its child locations ; can't directly contain products \n* Internal Location: Physical locations inside your own warehouses, \n* Customer Location: Virtual location representing the destination location for products sent to your customers \n* Inventory Loss: Virtual location serving as counterpart for inventory operations used to correct stock levels (Physical inventories) \n* Procurement: Virtual location serving as temporary counterpart for procurement operations when the source (vendor or production) is not known yet. This location should be empty when the procurement scheduler has finished running. \n* Production: Virtual counterpart location for production operations: this location consumes the raw material and produces finished products \n* Transit Location: Counterpart location that should be used in inter-companies or inter-warehouses operations """, select=True), 'complete_name': fields.function(_complete_name, type='char', string="Full Location Name", store={'stock.location': (_get_sublocations, ['name', 'location_id', 'active'], 10)}), 'location_id': fields.many2one('stock.location', 'Parent Location', select=True, ondelete='cascade'), 'child_ids': fields.one2many('stock.location', 'location_id', 'Contains'), 'partner_id': fields.many2one('res.partner', 'Owner', help="Owner of the location if not internal"), 'comment': fields.text('Additional Information'), 'posx': fields.integer('Corridor (X)', help="Optional localization details, for information purpose only"), 'posy': fields.integer('Shelves (Y)', help="Optional localization details, for information purpose only"), 'posz': fields.integer('Height (Z)', help="Optional localization details, for information purpose only"), 'parent_left': fields.integer('Left Parent', select=1), 'parent_right': fields.integer('Right Parent', select=1), 'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this location is shared between companies'), 'scrap_location': fields.boolean('Is a Scrap Location?', help='Check this box to allow using this location to put scrapped/damaged goods.'), 'return_location': fields.boolean('Is a Return Location?', help='Check this box to allow using this location as a return location.'), 'removal_strategy_id': fields.many2one('product.removal', 'Removal Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to take the products from, which lot etc. for this location. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here."), 'putaway_strategy_id': fields.many2one('product.putaway', 'Put Away Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to store the products. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here."), 'barcode': fields.char('Barcode', copy=False, oldname='loc_barcode'), } _defaults = { 'active': True, 'usage': 'internal', 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location', context=c), 'posx': 0, 'posy': 0, 'posz': 0, 'scrap_location': False, } _sql_constraints = [('barcode_company_uniq', 'unique (barcode,company_id)', 'The barcode for a location must be unique per company !')] def create(self, cr, uid, default, context=None): if not default.get('barcode', False): default.update({'barcode': default.get('complete_name', False)}) return super(stock_location, self).create(cr, uid, default, context=context) def get_putaway_strategy(self, cr, uid, location, product, context=None): ''' Returns the location where the product has to be put, if any compliant putaway strategy is found. Otherwise returns None.''' putaway_obj = self.pool.get('product.putaway') loc = location while loc: if loc.putaway_strategy_id: res = putaway_obj.putaway_apply(cr, uid, loc.putaway_strategy_id, product, context=context) if res: return res loc = loc.location_id def _default_removal_strategy(self, cr, uid, context=None): return 'fifo' def get_removal_strategy(self, cr, uid, qty, move, ops=False, context=None): ''' Returns the removal strategy to consider for the given move/ops :rtype: char ''' product = move.product_id location = move.location_id if product.categ_id.removal_strategy_id: return product.categ_id.removal_strategy_id.method loc = location while loc: if loc.removal_strategy_id: return loc.removal_strategy_id.method loc = loc.location_id return self._default_removal_strategy(cr, uid, context=context) def get_warehouse(self, cr, uid, location, context=None): """ Returns warehouse id of warehouse that contains location :param location: browse record (stock.location) """ wh_obj = self.pool.get("stock.warehouse") whs = wh_obj.search(cr, uid, [('view_location_id.parent_left', '<=', location.parent_left), ('view_location_id.parent_right', '>=', location.parent_left)], context=context) return whs and whs[0] or False #---------------------------------------------------------- # Routes #---------------------------------------------------------- class stock_location_route(osv.osv): _name = 'stock.location.route' _description = "Inventory Routes" _order = 'sequence' _columns = { 'name': fields.char('Route Name', required=True, translate=True), 'sequence': fields.integer('Sequence'), 'pull_ids': fields.one2many('procurement.rule', 'route_id', 'Procurement Rules', copy=True), 'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the route without removing it."), 'push_ids': fields.one2many('stock.location.path', 'route_id', 'Push Rules', copy=True), 'product_selectable': fields.boolean('Applicable on Product', help="When checked, the route will be selectable in the Inventory tab of the Product form. It will take priority over the Warehouse route. "), 'product_categ_selectable': fields.boolean('Applicable on Product Category', help="When checked, the route will be selectable on the Product Category. It will take priority over the Warehouse route. "), 'warehouse_selectable': fields.boolean('Applicable on Warehouse', help="When a warehouse is selected for this route, this route should be seen as the default route when products pass through this warehouse. This behaviour can be overridden by the routes on the Product/Product Categories or by the Preferred Routes on the Procurement"), 'supplied_wh_id': fields.many2one('stock.warehouse', 'Supplied Warehouse'), 'supplier_wh_id': fields.many2one('stock.warehouse', 'Supplying Warehouse'), 'company_id': fields.many2one('res.company', 'Company', select=1, help='Leave this field empty if this route is shared between all companies'), #Reverse many2many fields: 'product_ids': fields.many2many('product.template', 'stock_route_product', 'route_id', 'product_id', 'Products'), 'categ_ids': fields.many2many('product.category', 'stock_location_route_categ', 'route_id', 'categ_id', 'Product Categories'), 'warehouse_ids': fields.many2many('stock.warehouse', 'stock_route_warehouse', 'route_id', 'warehouse_id', 'Warehouses'), } _defaults = { 'sequence': lambda self, cr, uid, ctx: 0, 'active': True, 'product_selectable': True, 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location.route', context=c), } def write(self, cr, uid, ids, vals, context=None): '''when a route is deactivated, deactivate also its pull and push rules''' if isinstance(ids, (int, long)): ids = [ids] res = super(stock_location_route, self).write(cr, uid, ids, vals, context=context) if 'active' in vals: push_ids = [] pull_ids = [] for route in self.browse(cr, uid, ids, context=context): if route.push_ids: push_ids += [r.id for r in route.push_ids if r.active != vals['active']] if route.pull_ids: pull_ids += [r.id for r in route.pull_ids if r.active != vals['active']] if push_ids: self.pool.get('stock.location.path').write(cr, uid, push_ids, {'active': vals['active']}, context=context) if pull_ids: self.pool.get('procurement.rule').write(cr, uid, pull_ids, {'active': vals['active']}, context=context) return res def view_product_ids(self, cr, uid, ids, context=None): return { 'name': _('Products'), 'view_type': 'form', 'view_mode': 'tree,form', 'res_model': 'product.template', 'type': 'ir.actions.act_window', 'domain': [('route_ids', 'in', ids[0])], } def view_categ_ids(self, cr, uid, ids, context=None): return { 'name': _('Product Categories'), 'view_type': 'form', 'view_mode': 'tree,form', 'res_model': 'product.category', 'type': 'ir.actions.act_window', 'domain': [('route_ids', 'in', ids[0])], } #---------------------------------------------------------- # Quants #---------------------------------------------------------- class stock_quant(osv.osv): """ Quants are the smallest unit of stock physical instances """ _name = "stock.quant" _description = "Quants" def _get_quant_name(self, cr, uid, ids, name, args, context=None): """ Forms complete name of location from parent location to child location. @return: Dictionary of values """ res = {} for q in self.browse(cr, uid, ids, context=context): res[q.id] = q.product_id.code or '' if q.lot_id: res[q.id] = q.lot_id.name res[q.id] += ': ' + str(q.qty) + q.product_id.uom_id.name return res def _calc_inventory_value(self, cr, uid, ids, name, attr, context=None): context = dict(context or {}) res = {} uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id for quant in self.browse(cr, uid, ids, context=context): context.pop('force_company', None) if quant.company_id.id != uid_company_id: #if the company of the quant is different than the current user company, force the company in the context #then re-do a browse to read the property fields for the good company. context['force_company'] = quant.company_id.id quant = self.browse(cr, uid, quant.id, context=context) res[quant.id] = self._get_inventory_value(cr, uid, quant, context=context) return res def _get_inventory_value(self, cr, uid, quant, context=None): return quant.product_id.standard_price * quant.qty _columns = { 'name': fields.function(_get_quant_name, type='char', string='Identifier'), 'product_id': fields.many2one('product.product', 'Product', required=True, ondelete="restrict", readonly=True, select=True), 'location_id': fields.many2one('stock.location', 'Location', required=True, ondelete="restrict", readonly=True, select=True, auto_join=True), 'qty': fields.float('Quantity', required=True, help="Quantity of products in this quant, in the default unit of measure of the product", readonly=True, select=True), 'product_uom_id': fields.related('product_id', 'uom_id', type='many2one', relation="product.uom", string='Unit of Measure', readonly=True), 'package_id': fields.many2one('stock.quant.package', string='Package', help="The package containing this quant", readonly=True, select=True), 'packaging_type_id': fields.related('package_id', 'packaging_id', type='many2one', relation='product.packaging', string='Type of packaging', readonly=True, store=True), 'reservation_id': fields.many2one('stock.move', 'Reserved for Move', help="The move the quant is reserved for", readonly=True, select=True), 'lot_id': fields.many2one('stock.production.lot', 'Lot', readonly=True, select=True, ondelete="restrict"), 'cost': fields.float('Unit Cost'), 'owner_id': fields.many2one('res.partner', 'Owner', help="This is the owner of the quant", readonly=True, select=True), 'create_date': fields.datetime('Creation Date', readonly=True), 'in_date': fields.datetime('Incoming Date', readonly=True, select=True), 'history_ids': fields.many2many('stock.move', 'stock_quant_move_rel', 'quant_id', 'move_id', 'Moves', help='Moves that operate(d) on this quant', copy=False), 'company_id': fields.many2one('res.company', 'Company', help="The company to which the quants belong", required=True, readonly=True, select=True), 'inventory_value': fields.function(_calc_inventory_value, string="Inventory Value", type='float', readonly=True), # Used for negative quants to reconcile after compensated by a new positive one 'propagated_from_id': fields.many2one('stock.quant', 'Linked Quant', help='The negative quant this is coming from', readonly=True, select=True), 'negative_move_id': fields.many2one('stock.move', 'Move Negative Quant', help='If this is a negative quant, this will be the move that caused this negative quant.', readonly=True), 'negative_dest_location_id': fields.related('negative_move_id', 'location_dest_id', type='many2one', relation='stock.location', string="Negative Destination Location", readonly=True, help="Technical field used to record the destination location of a move that created a negative quant"), } _defaults = { 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.quant', context=c), } def init(self, cr): cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('stock_quant_product_location_index',)) if not cr.fetchone(): cr.execute('CREATE INDEX stock_quant_product_location_index ON stock_quant (product_id, location_id, company_id, qty, in_date, reservation_id)') def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True): ''' Overwrite the read_group in order to sum the function field 'inventory_value' in group by''' res = super(stock_quant, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy) if 'inventory_value' in fields: for line in res: if '__domain' in line: lines = self.search(cr, uid, line['__domain'], context=context) inv_value = 0.0 for line2 in self.browse(cr, uid, lines, context=context): inv_value += line2.inventory_value line['inventory_value'] = inv_value return res def action_view_quant_history(self, cr, uid, ids, context=None): ''' This function returns an action that display the history of the quant, which mean all the stock moves that lead to this quant creation with this quant quantity. ''' mod_obj = self.pool.get('ir.model.data') act_obj = self.pool.get('ir.actions.act_window') result = mod_obj.get_object_reference(cr, uid, 'stock', 'action_move_form2') id = result and result[1] or False result = act_obj.read(cr, uid, [id], context={})[0] move_ids = [] for quant in self.browse(cr, uid, ids, context=context): move_ids += [move.id for move in quant.history_ids] result['domain'] = "[('id','in',[" + ','.join(map(str, move_ids)) + "])]" return result def quants_reserve(self, cr, uid, quants, move, link=False, context=None): '''This function reserves quants for the given move (and optionally given link). If the total of quantity reserved is enough, the move's state is also set to 'assigned' :param quants: list of tuple(quant browse record or None, qty to reserve). If None is given as first tuple element, the item will be ignored. Negative quants should not be received as argument :param move: browse record :param link: browse record (stock.move.operation.link) ''' toreserve = [] reserved_availability = move.reserved_availability #split quants if needed for quant, qty in quants: if qty <= 0.0 or (quant and quant.qty <= 0.0): raise UserError(_('You can not reserve a negative quantity or a negative quant.')) if not quant: continue self._quant_split(cr, uid, quant, qty, context=context) toreserve.append(quant.id) reserved_availability += quant.qty #reserve quants if toreserve: self.write(cr, SUPERUSER_ID, toreserve, {'reservation_id': move.id}, context=context) #check if move'state needs to be set as 'assigned' rounding = move.product_id.uom_id.rounding if float_compare(reserved_availability, move.product_qty, precision_rounding=rounding) == 0 and move.state in ('confirmed', 'waiting') : self.pool.get('stock.move').write(cr, uid, [move.id], {'state': 'assigned'}, context=context) elif float_compare(reserved_availability, 0, precision_rounding=rounding) > 0 and not move.partially_available: self.pool.get('stock.move').write(cr, uid, [move.id], {'partially_available': True}, context=context) def quants_move(self, cr, uid, quants, move, location_to, location_from=False, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False, entire_pack=False, context=None): """Moves all given stock.quant in the given destination location. Unreserve from current move. :param quants: list of tuple(browse record(stock.quant) or None, quantity to move) :param move: browse record (stock.move) :param location_to: browse record (stock.location) depicting where the quants have to be moved :param location_from: optional browse record (stock.location) explaining where the quant has to be taken (may differ from the move source location in case a removal strategy applied). This parameter is only used to pass to _quant_create if a negative quant must be created :param lot_id: ID of the lot that must be set on the quants to move :param owner_id: ID of the partner that must own the quants to move :param src_package_id: ID of the package that contains the quants to move :param dest_package_id: ID of the package that must be set on the moved quant """ quants_reconcile = [] to_move_quants = [] self._check_location(cr, uid, location_to, context=context) for quant, qty in quants: if not quant: #If quant is None, we will create a quant to move (and potentially a negative counterpart too) quant = self._quant_create(cr, uid, qty, move, lot_id=lot_id, owner_id=owner_id, src_package_id=src_package_id, dest_package_id=dest_package_id, force_location_from=location_from, force_location_to=location_to, context=context) else: self._quant_split(cr, uid, quant, qty, context=context) to_move_quants.append(quant) quants_reconcile.append(quant) if to_move_quants: to_recompute_move_ids = [x.reservation_id.id for x in to_move_quants if x.reservation_id and x.reservation_id.id != move.id] self.move_quants_write(cr, uid, to_move_quants, move, location_to, dest_package_id, lot_id=lot_id, entire_pack=entire_pack, context=context) self.pool.get('stock.move').recalculate_move_state(cr, uid, to_recompute_move_ids, context=context) if location_to.usage == 'internal': # Do manual search for quant to avoid full table scan (order by id) cr.execute(""" SELECT 0 FROM stock_quant, stock_location WHERE product_id = %s AND stock_location.id = stock_quant.location_id AND ((stock_location.parent_left >= %s AND stock_location.parent_left < %s) OR stock_location.id = %s) AND qty < 0.0 LIMIT 1 """, (move.product_id.id, location_to.parent_left, location_to.parent_right, location_to.id)) if cr.fetchone(): for quant in quants_reconcile: self._quant_reconcile_negative(cr, uid, quant, move, context=context) def move_quants_write(self, cr, uid, quants, move, location_dest_id, dest_package_id, lot_id = False, entire_pack=False, context=None): context=context or {} vals = {'location_id': location_dest_id.id, 'history_ids': [(4, move.id)], 'reservation_id': False} if lot_id and any(x.id for x in quants if not x.lot_id.id): vals['lot_id'] = lot_id if not entire_pack: vals.update({'package_id': dest_package_id}) self.write(cr, SUPERUSER_ID, [q.id for q in quants], vals, context=context) def quants_get_preferred_domain(self, cr, uid, qty, move, ops=False, lot_id=False, domain=None, preferred_domain_list=[], context=None): ''' This function tries to find quants for the given domain and move/ops, by trying to first limit the choice on the quants that match the first item of preferred_domain_list as well. But if the qty requested is not reached it tries to find the remaining quantity by looping on the preferred_domain_list (tries with the second item and so on). Make sure the quants aren't found twice => all the domains of preferred_domain_list should be orthogonal ''' context = context or {} domain = domain or [('qty', '>', 0.0)] domain = list(domain) quants = [(None, qty)] if ops: restrict_lot_id = lot_id location = ops.location_id domain += [('owner_id', '=', ops.owner_id.id)] if ops.package_id and not ops.product_id: domain += [('package_id', 'child_of', ops.package_id.id)] elif ops.package_id and ops.product_id: domain += [('package_id', '=', ops.package_id.id)] else: domain += [('package_id', '=', False)] domain += [('location_id', '=', ops.location_id.id)] else: restrict_lot_id = move.restrict_lot_id.id location = move.location_id domain += [('owner_id', '=', move.restrict_partner_id.id)] domain += [('location_id', 'child_of', move.location_id.id)] if context.get('force_company'): domain += [('company_id', '=', context.get('force_company'))] else: domain += [('company_id', '=', move.company_id.id)] removal_strategy = self.pool.get('stock.location').get_removal_strategy(cr, uid, qty, move, ops=ops, context=context) product = move.product_id domain += [('product_id', '=', move.product_id.id)] #don't look for quants in location that are of type production, supplier or inventory. if location.usage in ['inventory', 'production', 'supplier']: return quants res_qty = qty if restrict_lot_id: if not preferred_domain_list: preferred_domain_list = [[('lot_id', '=', restrict_lot_id)], [('lot_id', '=', False)]] else: lot_list = [] no_lot_list = [] for pref_domain in preferred_domain_list: pref_lot_domain = pref_domain + [('lot_id', '=', restrict_lot_id)] pref_no_lot_domain = pref_domain + [('lot_id', '=', False)] lot_list.append(pref_lot_domain) no_lot_list.append(pref_no_lot_domain) preferred_domain_list = lot_list + no_lot_list if not preferred_domain_list: return self.quants_get(cr, uid, qty, move, ops=ops, domain=domain, removal_strategy=removal_strategy, context=context) for preferred_domain in preferred_domain_list: res_qty_cmp = float_compare(res_qty, 0, precision_rounding=product.uom_id.rounding) if res_qty_cmp > 0: #try to replace the last tuple (None, res_qty) with something that wasn't chosen at first because of the preferred order quants.pop() tmp_quants = self.quants_get(cr, uid, res_qty, move, ops=ops, domain=domain + preferred_domain, removal_strategy=removal_strategy, context=context) for quant in tmp_quants: if quant[0]: res_qty -= quant[1] quants += tmp_quants return quants def quants_get(self, cr, uid, qty, move, ops=False, domain=None, removal_strategy='fifo', context=None): """ Use the removal strategies of product to search for the correct quants If you inherit, put the super at the end of your method. :location: browse record of the parent location where the quants have to be found :product: browse record of the product to find :qty in UoM of product """ domain = domain or [('qty', '>', 0.0)] return self.apply_removal_strategy(cr, uid, qty, move, ops=ops, domain=domain, removal_strategy=removal_strategy, context=context) def apply_removal_strategy(self, cr, uid, quantity, move, ops=False, domain=None, removal_strategy='fifo', context=None): if removal_strategy == 'fifo': order = 'in_date, id' return self._quants_get_order(cr, uid, quantity, move, ops=ops, domain=domain, orderby=order, context=context) elif removal_strategy == 'lifo': order = 'in_date desc, id desc' return self._quants_get_order(cr, uid, quantity, move, ops=ops, domain=domain, orderby=order, context=context) raise UserError(_('Removal strategy %s not implemented.') % (removal_strategy,)) def _quant_create(self, cr, uid, qty, move, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False, force_location_from=False, force_location_to=False, context=None): '''Create a quant in the destination location and create a negative quant in the source location if it's an internal location. ''' if context is None: context = {} price_unit = self.pool.get('stock.move').get_price_unit(cr, uid, move, context=context) location = force_location_to or move.location_dest_id rounding = move.product_id.uom_id.rounding vals = { 'product_id': move.product_id.id, 'location_id': location.id, 'qty': float_round(qty, precision_rounding=rounding), 'cost': price_unit, 'history_ids': [(4, move.id)], 'in_date': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT), 'company_id': move.company_id.id, 'lot_id': lot_id, 'owner_id': owner_id, 'package_id': dest_package_id, } if move.location_id.usage == 'internal': #if we were trying to move something from an internal location and reach here (quant creation), #it means that a negative quant has to be created as well. negative_vals = vals.copy() negative_vals['location_id'] = force_location_from and force_location_from.id or move.location_id.id negative_vals['qty'] = float_round(-qty, precision_rounding=rounding) negative_vals['cost'] = price_unit negative_vals['negative_move_id'] = move.id negative_vals['package_id'] = src_package_id negative_quant_id = self.create(cr, SUPERUSER_ID, negative_vals, context=context) vals.update({'propagated_from_id': negative_quant_id}) # In case of serial tracking, check if the product does not exist somewhere internally already picking_type = move.picking_id and move.picking_id.picking_type_id or False if lot_id and move.product_id.tracking == 'serial' and (not picking_type or (picking_type.use_create_lots or picking_type.use_existing_lots)): if qty != 1.0: raise UserError(_('You should only receive by the piece with the same serial number')) other_quants = self.search(cr, uid, [('product_id', '=', move.product_id.id), ('lot_id', '=', lot_id), ('qty', '>', 0.0), ('location_id.usage', '=', 'internal')], context=context) if other_quants: lot_name = self.pool['stock.production.lot'].browse(cr, uid, lot_id, context=context).name raise UserError(_('The serial number %s is already in stock.') % lot_name + _("Otherwise make sure the right stock/owner is set.")) #create the quant as superuser, because we want to restrict the creation of quant manually: we should always use this method to create quants quant_id = self.create(cr, SUPERUSER_ID, vals, context=context) return self.browse(cr, uid, quant_id, context=context) def _quant_split(self, cr, uid, quant, qty, context=None): context = context or {} rounding = quant.product_id.uom_id.rounding if float_compare(abs(quant.qty), abs(qty), precision_rounding=rounding) <= 0: # if quant <= qty in abs, take it entirely return False qty_round = float_round(qty, precision_rounding=rounding) new_qty_round = float_round(quant.qty - qty, precision_rounding=rounding) # Fetch the history_ids manually as it will not do a join with the stock moves then (=> a lot faster) cr.execute("""SELECT move_id FROM stock_quant_move_rel WHERE quant_id = %s""", (quant.id,)) res = cr.fetchall() new_quant = self.copy(cr, SUPERUSER_ID, quant.id, default={'qty': new_qty_round, 'history_ids': [(4, x[0]) for x in res]}, context=context) self.write(cr, SUPERUSER_ID, quant.id, {'qty': qty_round}, context=context) return self.browse(cr, uid, new_quant, context=context) def _get_latest_move(self, cr, uid, quant, context=None): move = False for m in quant.history_ids: if not move or m.date > move.date: move = m return move @api.cr_uid_ids_context def _quants_merge(self, cr, uid, solved_quant_ids, solving_quant, context=None): path = [] for move in solving_quant.history_ids: path.append((4, move.id)) self.write(cr, SUPERUSER_ID, solved_quant_ids, {'history_ids': path}, context=context) def _search_quants_to_reconcile(self, cr, uid, quant, context=None): """ Searches negative quants to reconcile for where the quant to reconcile is put """ dom = [('qty', '<', 0)] order = 'in_date' dom += [('location_id', 'child_of', quant.location_id.id), ('product_id', '=', quant.product_id.id), ('owner_id', '=', quant.owner_id.id)] if quant.package_id.id: dom += [('package_id', '=', quant.package_id.id)] if quant.lot_id: dom += ['|', ('lot_id', '=', False), ('lot_id', '=', quant.lot_id.id)] order = 'lot_id, in_date' # Do not let the quant eat itself, or it will kill its history (e.g. returns / Stock -> Stock) dom += [('id', '!=', quant.propagated_from_id.id)] quants_search = self.search(cr, uid, dom, order=order, context=context) product = quant.product_id quants = [] quantity = quant.qty for quant in self.browse(cr, uid, quants_search, context=context): rounding = product.uom_id.rounding if float_compare(quantity, abs(quant.qty), precision_rounding=rounding) >= 0: quants += [(quant, abs(quant.qty))] quantity -= abs(quant.qty) elif float_compare(quantity, 0.0, precision_rounding=rounding) != 0: quants += [(quant, quantity)] quantity = 0 break return quants def _quant_reconcile_negative(self, cr, uid, quant, move, context=None): """ When new quant arrive in a location, try to reconcile it with negative quants. If it's possible, apply the cost of the new quant to the counterpart of the negative quant. """ context = context or {} context = dict(context) context.update({'force_unlink': True}) solving_quant = quant quants = self._search_quants_to_reconcile(cr, uid, quant, context=context) product_uom_rounding = quant.product_id.uom_id.rounding for quant_neg, qty in quants: if not quant_neg or not solving_quant: continue to_solve_quant_ids = self.search(cr, uid, [('propagated_from_id', '=', quant_neg.id)], context=context) if not to_solve_quant_ids: continue solving_qty = qty solved_quant_ids = [] for to_solve_quant in self.browse(cr, uid, to_solve_quant_ids, context=context): if float_compare(solving_qty, 0, precision_rounding=product_uom_rounding) <= 0: continue solved_quant_ids.append(to_solve_quant.id) self._quant_split(cr, uid, to_solve_quant, min(solving_qty, to_solve_quant.qty), context=context) solving_qty -= min(solving_qty, to_solve_quant.qty) remaining_solving_quant = self._quant_split(cr, uid, solving_quant, qty, context=context) remaining_neg_quant = self._quant_split(cr, uid, quant_neg, -qty, context=context) #if the reconciliation was not complete, we need to link together the remaining parts if remaining_neg_quant: remaining_to_solve_quant_ids = self.search(cr, uid, [('propagated_from_id', '=', quant_neg.id), ('id', 'not in', solved_quant_ids)], context=context) if remaining_to_solve_quant_ids: self.write(cr, SUPERUSER_ID, remaining_to_solve_quant_ids, {'propagated_from_id': remaining_neg_quant.id}, context=context) if solving_quant.propagated_from_id and solved_quant_ids: self.write(cr, SUPERUSER_ID, solved_quant_ids, {'propagated_from_id': solving_quant.propagated_from_id.id}, context=context) #delete the reconciled quants, as it is replaced by the solved quants self.unlink(cr, SUPERUSER_ID, [quant_neg.id], context=context) if solved_quant_ids: #price update + accounting entries adjustments self._price_update(cr, uid, solved_quant_ids, solving_quant.cost, context=context) #merge history (and cost?) self._quants_merge(cr, uid, solved_quant_ids, solving_quant, context=context) self.unlink(cr, SUPERUSER_ID, [solving_quant.id], context=context) solving_quant = remaining_solving_quant def _price_update(self, cr, uid, ids, newprice, context=None): self.write(cr, SUPERUSER_ID, ids, {'cost': newprice}, context=context) def quants_unreserve(self, cr, uid, move, context=None): related_quants = [x.id for x in move.reserved_quant_ids] if related_quants: #if move has a picking_id, write on that picking that pack_operation might have changed and need to be recomputed if move.partially_available: self.pool.get("stock.move").write(cr, uid, [move.id], {'partially_available': False}, context=context) self.write(cr, SUPERUSER_ID, related_quants, {'reservation_id': False}, context=context) def _quants_get_order(self, cr, uid, quantity, move, ops=False, domain=[], orderby='in_date', context=None): ''' Implementation of removal strategies If it can not reserve, it will return a tuple (None, qty) ''' if context is None: context = {} product = move.product_id res = [] offset = 0 while float_compare(quantity, 0, precision_rounding=product.uom_id.rounding) > 0: quants = self.search(cr, uid, domain, order=orderby, limit=10, offset=offset, context=context) if not quants: res.append((None, quantity)) break for quant in self.browse(cr, uid, quants, context=context): rounding = product.uom_id.rounding if float_compare(quantity, abs(quant.qty), precision_rounding=rounding) >= 0: res += [(quant, abs(quant.qty))] quantity -= abs(quant.qty) elif float_compare(quantity, 0.0, precision_rounding=rounding) != 0: res += [(quant, quantity)] quantity = 0 break offset += 10 return res def _check_location(self, cr, uid, location, context=None): if location.usage == 'view': raise UserError(_('You cannot move to a location of type view %s.') % (location.name)) return True def unlink(self, cr, uid, ids, context=None): context = context or {} if not context.get('force_unlink'): raise UserError(_('Under no circumstances should you delete or change quants yourselves!')) super(stock_quant, self).unlink(cr, uid, ids, context=context) #---------------------------------------------------------- # Stock Picking #---------------------------------------------------------- class stock_picking(models.Model): _name = "stock.picking" _inherit = ['mail.thread'] _description = "Transfer" _order = "priority desc, date asc, id desc" def _set_min_date(self, cr, uid, id, field, value, arg, context=None): move_obj = self.pool.get("stock.move") if value: move_ids = [move.id for move in self.browse(cr, uid, id, context=context).move_lines] move_obj.write(cr, uid, move_ids, {'date_expected': value}, context=context) def _set_priority(self, cr, uid, id, field, value, arg, context=None): move_obj = self.pool.get("stock.move") if value: move_ids = [move.id for move in self.browse(cr, uid, id, context=context).move_lines] move_obj.write(cr, uid, move_ids, {'priority': value}, context=context) def get_min_max_date(self, cr, uid, ids, field_name, arg, context=None): """ Finds minimum and maximum dates for picking. @return: Dictionary of values """ res = {} for id in ids: res[id] = {'min_date': False, 'max_date': False, 'priority': '1'} if not ids: return res cr.execute("""select picking_id, min(date_expected), max(date_expected), max(priority) from stock_move where picking_id IN %s group by picking_id""", (tuple(ids),)) for pick, dt1, dt2, prio in cr.fetchall(): res[pick]['min_date'] = dt1 res[pick]['max_date'] = dt2 res[pick]['priority'] = prio return res def create(self, cr, user, vals, context=None): context = context or {} if ('name' not in vals) or (vals.get('name') in ('/', False)): ptype_id = vals.get('picking_type_id', context.get('default_picking_type_id', False)) sequence_id = self.pool.get('stock.picking.type').browse(cr, user, ptype_id, context=context).sequence_id.id vals['name'] = self.pool.get('ir.sequence').next_by_id(cr, user, sequence_id, context=context) # As the on_change in one2many list is WIP, we will overwrite the locations on the stock moves here # As it is a create the format will be a list of (0, 0, dict) if vals.get('move_lines') and vals.get('location_id') and vals.get('location_dest_id'): for move in vals['move_lines']: if len(move) == 3: move[2]['location_id'] = vals['location_id'] move[2]['location_dest_id'] = vals['location_dest_id'] return super(stock_picking, self).create(cr, user, vals, context) def write(self, cr, uid, ids, vals, context=None): res = super(stock_picking, self).write(cr, uid, ids, vals, context=context) after_vals = {} if vals.get('location_id'): after_vals['location_id'] = vals['location_id'] if vals.get('location_dest_id'): after_vals['location_dest_id'] = vals['location_dest_id'] # Change locations of moves if those of the picking change if after_vals: moves = [] for pick in self.browse(cr, uid, ids, context=context): moves += [x.id for x in pick.move_lines if not x.scrapped] if moves: self.pool['stock.move'].write(cr, uid, moves, after_vals, context=context) return res def _state_get(self, cr, uid, ids, field_name, arg, context=None): '''The state of a picking depends on the state of its related stock.move draft: the picking has no line or any one of the lines is draft done, draft, cancel: all lines are done / draft / cancel confirmed, waiting, assigned, partially_available depends on move_type (all at once or partial) ''' res = {} for pick in self.browse(cr, uid, ids, context=context): if not pick.move_lines: res[pick.id] = pick.launch_pack_operations and 'assigned' or 'draft' continue if any([x.state == 'draft' for x in pick.move_lines]): res[pick.id] = 'draft' continue if all([x.state == 'cancel' for x in pick.move_lines]): res[pick.id] = 'cancel' continue if all([x.state in ('cancel', 'done') for x in pick.move_lines]): res[pick.id] = 'done' continue order = {'confirmed': 0, 'waiting': 1, 'assigned': 2} order_inv = {0: 'confirmed', 1: 'waiting', 2: 'assigned'} lst = [order[x.state] for x in pick.move_lines if x.state not in ('cancel', 'done')] if pick.move_type == 'one': res[pick.id] = order_inv[min(lst)] else: #we are in the case of partial delivery, so if all move are assigned, picking #should be assign too, else if one of the move is assigned, or partially available, picking should be #in partially available state, otherwise, picking is in waiting or confirmed state res[pick.id] = order_inv[max(lst)] if not all(x == 2 for x in lst): if any(x == 2 for x in lst): res[pick.id] = 'partially_available' else: #if all moves aren't assigned, check if we have one product partially available for move in pick.move_lines: if move.partially_available: res[pick.id] = 'partially_available' break return res def _get_pickings(self, cr, uid, ids, context=None): res = set() for move in self.browse(cr, uid, ids, context=context): if move.picking_id: res.add(move.picking_id.id) return list(res) def _get_pickings_dates_priority(self, cr, uid, ids, context=None): res = set() for move in self.browse(cr, uid, ids, context=context): if move.picking_id and (not (move.picking_id.min_date < move.date_expected < move.picking_id.max_date) or move.priority > move.picking_id.priority): res.add(move.picking_id.id) return list(res) def _get_pack_operation_exist(self, cr, uid, ids, field_name, arg, context=None): res = {} for pick in self.browse(cr, uid, ids, context=context): res[pick.id] = False if pick.pack_operation_ids: res[pick.id] = True return res def _get_quant_reserved_exist(self, cr, uid, ids, field_name, arg, context=None): res = {} for pick in self.browse(cr, uid, ids, context=context): res[pick.id] = False for move in pick.move_lines: if move.reserved_quant_ids: res[pick.id] = True continue return res def action_assign_owner(self, cr, uid, ids, context=None): for picking in self.browse(cr, uid, ids, context=context): packop_ids = [op.id for op in picking.pack_operation_ids] self.pool.get('stock.pack.operation').write(cr, uid, packop_ids, {'owner_id': picking.owner_id.id}, context=context) def onchange_picking_type(self, cr, uid, ids, picking_type_id, partner_id, context=None): res = {} if picking_type_id: picking_type = self.pool['stock.picking.type'].browse(cr, uid, picking_type_id, context=context) if not picking_type.default_location_src_id: if partner_id: partner = self.pool['res.partner'].browse(cr, uid, partner_id, context=context) location_id = partner.property_stock_supplier.id else: customerloc, supplierloc = self.pool['stock.warehouse']._get_partner_locations(cr, uid, [], context=context) location_id = supplierloc.id else: location_id = picking_type.default_location_src_id.id if not picking_type.default_location_dest_id: if partner_id: partner = self.pool['res.partner'].browse(cr, uid, partner_id, context=context) location_dest_id = partner.property_stock_customer.id else: customerloc, supplierloc = self.pool['stock.warehouse']._get_partner_locations(cr, uid, [], context=context) location_dest_id = customerloc.id else: location_dest_id = picking_type.default_location_dest_id.id res['value'] = {'location_id': location_id, 'location_dest_id': location_dest_id,} return res def _default_location_destination(self): # retrieve picking type from context; if none this returns an empty recordset picking_type_id = self._context.get('default_picking_type_id') picking_type = self.env['stock.picking.type'].browse(picking_type_id) return picking_type.default_location_dest_id def _default_location_source(self): # retrieve picking type from context; if none this returns an empty recordset picking_type_id = self._context.get('default_picking_type_id') picking_type = self.env['stock.picking.type'].browse(picking_type_id) return picking_type.default_location_src_id _columns = { 'name': fields.char('Reference', select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=False), 'origin': fields.char('Source Document', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="Reference of the document", select=True), 'backorder_id': fields.many2one('stock.picking', 'Back Order of', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True, copy=False), 'note': fields.text('Notes'), 'move_type': fields.selection([('direct', 'Partial'), ('one', 'All at once')], 'Delivery Method', required=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="It specifies goods to be deliver partially or all at once"), 'state': fields.function(_state_get, type="selection", copy=False, store={ 'stock.picking': (lambda self, cr, uid, ids, ctx: ids, ['move_type', 'launch_pack_operations'], 20), 'stock.move': (_get_pickings, ['state', 'picking_id', 'partially_available'], 20)}, selection=[ ('draft', 'Draft'), ('cancel', 'Cancelled'), ('waiting', 'Waiting Another Operation'), ('confirmed', 'Waiting Availability'), ('partially_available', 'Partially Available'), ('assigned', 'Available'), ('done', 'Done'), ], string='Status', readonly=True, select=True, track_visibility='onchange', help=""" * Draft: not confirmed yet and will not be scheduled until confirmed\n * Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n * Waiting Availability: still waiting for the availability of products\n * Partially Available: some products are available and reserved\n * Ready to Transfer: products reserved, simply waiting for confirmation.\n * Transferred: has been processed, can't be modified or cancelled anymore\n * Cancelled: has been cancelled, can't be confirmed anymore""" ), 'location_id': fields.many2one('stock.location', required=True, string="Source Location Zone", default=_default_location_source, readonly=True, states={'draft': [('readonly', False)]}), 'location_dest_id': fields.many2one('stock.location', required=True,string="Destination Location Zone", default=_default_location_destination, readonly=True, states={'draft': [('readonly', False)]}), 'move_lines': fields.one2many('stock.move', 'picking_id', string="Stock Moves", copy=True), 'move_lines_related': fields.related('move_lines', type='one2many', relation='stock.move', string="Move Lines"), 'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, required=True), 'picking_type_code': fields.related('picking_type_id', 'code', type='selection', selection=[('incoming', 'Suppliers'), ('outgoing', 'Customers'), ('internal', 'Internal')]), 'picking_type_entire_packs': fields.related('picking_type_id', 'show_entire_packs', type='boolean'), 'priority': fields.function(get_min_max_date, multi="min_max_date", fnct_inv=_set_priority, type='selection', selection=procurement.PROCUREMENT_PRIORITIES, string='Priority', store={'stock.move': (_get_pickings_dates_priority, ['priority', 'picking_id'], 20)}, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, select=1, help="Priority for this picking. Setting manually a value here would set it as priority for all the moves", track_visibility='onchange', required=True), 'min_date': fields.function(get_min_max_date, multi="min_max_date", fnct_inv=_set_min_date, store={'stock.move': (_get_pickings_dates_priority, ['date_expected', 'picking_id'], 20)}, type='datetime', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, string='Scheduled Date', select=1, help="Scheduled time for the first part of the shipment to be processed. Setting manually a value here would set it as expected date for all the stock moves.", track_visibility='onchange'), 'max_date': fields.function(get_min_max_date, multi="min_max_date", store={'stock.move': (_get_pickings_dates_priority, ['date_expected', 'picking_id'], 20)}, type='datetime', string='Max. Expected Date', select=2, help="Scheduled time for the last part of the shipment to be processed"), 'date': fields.datetime('Creation Date', help="Creation Date, usually the time of the order", select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, track_visibility='onchange'), 'date_done': fields.datetime('Date of Transfer', help="Completion Date of Transfer", readonly=True, copy=False), 'quant_reserved_exist': fields.function(_get_quant_reserved_exist, type='boolean', string='Has quants already reserved', help='Check the existance of quants linked to this picking'), 'partner_id': fields.many2one('res.partner', 'Partner', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}), 'company_id': fields.many2one('res.company', 'Company', required=True, select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}), 'pack_operation_ids': fields.one2many('stock.pack.operation', 'picking_id', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, string='Related Packing Operations'), 'pack_operation_product_ids': fields.one2many('stock.pack.operation', 'picking_id', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, domain=[('product_id', '!=', False)], string='Non pack'), 'pack_operation_pack_ids': fields.one2many('stock.pack.operation', 'picking_id', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, domain=[('product_id', '=', False)], string='Pack'), # technical field for attrs in view 'pack_operation_exist': fields.function(_get_pack_operation_exist, type='boolean', string='Has Pack Operations', help='Check the existance of pack operation on the picking'), 'owner_id': fields.many2one('res.partner', 'Owner', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="Default Owner"), 'printed': fields.boolean('Printed'), # Used to search on pickings 'product_id': fields.related('move_lines', 'product_id', type='many2one', relation='product.product', string='Product'), 'recompute_pack_op': fields.boolean('Recompute pack operation?', help='True if reserved quants changed, which mean we might need to recompute the package operations', copy=False), 'group_id': fields.related('move_lines', 'group_id', type='many2one', relation='procurement.group', string='Procurement Group', readonly=True, store={ 'stock.picking': (lambda self, cr, uid, ids, ctx: ids, ['move_lines'], 10), 'stock.move': (_get_pickings, ['group_id', 'picking_id'], 10), }), 'launch_pack_operations': fields.boolean("Launch Pack Operations", copy=False), } _defaults = { 'name': '/', 'state': 'draft', 'move_type': 'direct', 'printed': False, 'priority': '1', # normal 'date': fields.datetime.now, 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.picking', context=c), 'recompute_pack_op': False, 'launch_pack_operations': False, } _sql_constraints = [ ('name_uniq', 'unique(name, company_id)', 'Reference must be unique per company!'), ] def do_print_picking(self, cr, uid, ids, context=None): '''This function prints the picking list''' context = dict(context or {}, active_ids=ids) self.write(cr, uid, ids, {'printed': True}, context=context) return self.pool.get("report").get_action(cr, uid, ids, 'stock.report_picking', context=context) def launch_packops(self, cr, uid, ids, context=None): self.write(cr, uid, ids, {'launch_pack_operations': True}, context=context) def action_confirm(self, cr, uid, ids, context=None): todo = [] todo_force_assign = [] for picking in self.browse(cr, uid, ids, context=context): if not picking.move_lines: self.launch_packops(cr, uid, [picking.id], context=context) if picking.location_id.usage in ('supplier', 'inventory', 'production'): todo_force_assign.append(picking.id) for r in picking.move_lines: if r.state == 'draft': todo.append(r.id) if len(todo): self.pool.get('stock.move').action_confirm(cr, uid, todo, context=context) if todo_force_assign: self.force_assign(cr, uid, todo_force_assign, context=context) return True def action_assign(self, cr, uid, ids, context=None): """ Check availability of picking moves. This has the effect of changing the state and reserve quants on available moves, and may also impact the state of the picking as it is computed based on move's states. @return: True """ for pick in self.browse(cr, uid, ids, context=context): if pick.state == 'draft': self.action_confirm(cr, uid, [pick.id], context=context) #skip the moves that don't need to be checked move_ids = [x.id for x in pick.move_lines if x.state not in ('draft', 'cancel', 'done')] if not move_ids: raise UserError(_('Nothing to check the availability for.')) self.pool.get('stock.move').action_assign(cr, uid, move_ids, context=context) return True def force_assign(self, cr, uid, ids, context=None): """ Changes state of picking to available if moves are confirmed or waiting. @return: True """ pickings = self.browse(cr, uid, ids, context=context) for pick in pickings: move_ids = [x.id for x in pick.move_lines if x.state in ['confirmed', 'waiting']] self.pool.get('stock.move').force_assign(cr, uid, move_ids, context=context) return True def action_cancel(self, cr, uid, ids, context=None): for pick in self.browse(cr, uid, ids, context=context): ids2 = [move.id for move in pick.move_lines] self.pool.get('stock.move').action_cancel(cr, uid, ids2, context) return True def action_done(self, cr, uid, ids, context=None): """Changes picking state to done by processing the Stock Moves of the Picking Normally that happens when the button "Done" is pressed on a Picking view. @return: True """ for pick in self.browse(cr, uid, ids, context=context): todo = [] for move in pick.move_lines: if move.state == 'draft': todo.extend(self.pool.get('stock.move').action_confirm(cr, uid, [move.id], context=context)) elif move.state in ('assigned', 'confirmed'): todo.append(move.id) if len(todo): self.pool.get('stock.move').action_done(cr, uid, todo, context=context) return True def unlink(self, cr, uid, ids, context=None): #on picking deletion, cancel its move then unlink them too move_obj = self.pool.get('stock.move') context = context or {} for pick in self.browse(cr, uid, ids, context=context): move_ids = [move.id for move in pick.move_lines] move_obj.action_cancel(cr, uid, move_ids, context=context) move_obj.unlink(cr, uid, move_ids, context=context) return super(stock_picking, self).unlink(cr, uid, ids, context=context) def _create_backorder(self, cr, uid, picking, backorder_moves=[], context=None): """ Move all non-done lines into a new backorder picking. If the key 'do_only_split' is given in the context, then move all lines not in context.get('split', []) instead of all non-done lines. """ if not backorder_moves: backorder_moves = picking.move_lines backorder_move_ids = [x.id for x in backorder_moves if x.state not in ('done', 'cancel')] if 'do_only_split' in context and context['do_only_split']: backorder_move_ids = [x.id for x in backorder_moves if x.id not in context.get('split', [])] if backorder_move_ids: backorder_id = self.copy(cr, uid, picking.id, { 'name': '/', 'move_lines': [], 'pack_operation_ids': [], 'backorder_id': picking.id, }) backorder = self.browse(cr, uid, backorder_id, context=context) self.message_post(cr, uid, picking.id, body=_("Back order <em>%s</em> <b>created</b>.") % (backorder.name), context=context) move_obj = self.pool.get("stock.move") move_obj.write(cr, uid, backorder_move_ids, {'picking_id': backorder_id}, context=context) if not picking.date_done: self.write(cr, uid, [picking.id], {'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context) self.action_confirm(cr, uid, [backorder_id], context=context) self.action_assign(cr, uid, [backorder_id], context=context) return backorder_id return False @api.cr_uid_ids_context def recheck_availability(self, cr, uid, picking_ids, context=None): self.action_assign(cr, uid, picking_ids, context=context) self.do_prepare_partial(cr, uid, picking_ids, context=context) def _get_top_level_packages(self, cr, uid, quants_suggested_locations, context=None): """This method searches for the higher level packages that can be moved as a single operation, given a list of quants to move and their suggested destination, and returns the list of matching packages. """ # Try to find as much as possible top-level packages that can be moved pack_obj = self.pool.get("stock.quant.package") quant_obj = self.pool.get("stock.quant") top_lvl_packages = set() quants_to_compare = quants_suggested_locations.keys() for pack in list(set([x.package_id for x in quants_suggested_locations.keys() if x and x.package_id])): loop = True test_pack = pack good_pack = False pack_destination = False while loop: pack_quants = pack_obj.get_content(cr, uid, [test_pack.id], context=context) all_in = True for quant in quant_obj.browse(cr, uid, pack_quants, context=context): # If the quant is not in the quants to compare and not in the common location if not quant in quants_to_compare: all_in = False break else: #if putaway strat apply, the destination location of each quant may be different (and thus the package should not be taken as a single operation) if not pack_destination: pack_destination = quants_suggested_locations[quant] elif pack_destination != quants_suggested_locations[quant]: all_in = False break if all_in: good_pack = test_pack if test_pack.parent_id: test_pack = test_pack.parent_id else: #stop the loop when there's no parent package anymore loop = False else: #stop the loop when the package test_pack is not totally reserved for moves of this picking #(some quants may be reserved for other picking or not reserved at all) loop = False if good_pack: top_lvl_packages.add(good_pack) return list(top_lvl_packages) def _prepare_pack_ops(self, cr, uid, picking, quants, forced_qties, context=None): """ returns a list of dict, ready to be used in create() of stock.pack.operation. :param picking: browse record (stock.picking) :param quants: browse record list (stock.quant). List of quants associated to the picking :param forced_qties: dictionary showing for each product (keys) its corresponding quantity (value) that is not covered by the quants associated to the picking """ def _picking_putaway_apply(product): location = False # Search putaway strategy if product_putaway_strats.get(product.id): location = product_putaway_strats[product.id] else: location = self.pool.get('stock.location').get_putaway_strategy(cr, uid, picking.location_dest_id, product, context=context) product_putaway_strats[product.id] = location return location or picking.location_dest_id.id # If we encounter an UoM that is smaller than the default UoM or the one already chosen, use the new one instead. product_uom = {} # Determines UoM used in pack operations location_dest_id = None location_id = None for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]: if not product_uom.get(move.product_id.id): product_uom[move.product_id.id] = move.product_id.uom_id if move.product_uom.id != move.product_id.uom_id.id and move.product_uom.factor > product_uom[move.product_id.id].factor: product_uom[move.product_id.id] = move.product_uom if not move.scrapped: if location_dest_id and move.location_dest_id.id != location_dest_id: raise UserError(_('The destination location must be the same for all the moves of the picking.')) location_dest_id = move.location_dest_id.id if location_id and move.location_id.id != location_id: raise UserError(_('The source location must be the same for all the moves of the picking.')) location_id = move.location_id.id pack_obj = self.pool.get("stock.quant.package") quant_obj = self.pool.get("stock.quant") vals = [] qtys_grouped = {} lots_grouped = {} #for each quant of the picking, find the suggested location quants_suggested_locations = {} product_putaway_strats = {} for quant in quants: if quant.qty <= 0: continue suggested_location_id = _picking_putaway_apply(quant.product_id) quants_suggested_locations[quant] = suggested_location_id #find the packages we can movei as a whole top_lvl_packages = self._get_top_level_packages(cr, uid, quants_suggested_locations, context=context) # and then create pack operations for the top-level packages found for pack in top_lvl_packages: pack_quant_ids = pack_obj.get_content(cr, uid, [pack.id], context=context) pack_quants = quant_obj.browse(cr, uid, pack_quant_ids, context=context) vals.append({ 'picking_id': picking.id, 'package_id': pack.id, 'product_qty': 1.0, 'location_id': pack.location_id.id, 'location_dest_id': quants_suggested_locations[pack_quants[0]], 'owner_id': pack.owner_id.id, }) #remove the quants inside the package so that they are excluded from the rest of the computation for quant in pack_quants: del quants_suggested_locations[quant] # Go through all remaining reserved quants and group by product, package, owner, source location and dest location # Lots will go into pack operation lot object for quant, dest_location_id in quants_suggested_locations.items(): key = (quant.product_id.id, quant.package_id.id, quant.owner_id.id, quant.location_id.id, dest_location_id) if qtys_grouped.get(key): qtys_grouped[key] += quant.qty else: qtys_grouped[key] = quant.qty if quant.product_id.tracking != 'none' and quant.lot_id: lots_grouped.setdefault(key, {}).setdefault(quant.lot_id.id, 0.0) lots_grouped[key][quant.lot_id.id] += quant.qty # Do the same for the forced quantities (in cases of force_assign or incomming shipment for example) for product, qty in forced_qties.items(): if qty <= 0: continue suggested_location_id = _picking_putaway_apply(product) key = (product.id, False, picking.owner_id.id, picking.location_id.id, suggested_location_id) if qtys_grouped.get(key): qtys_grouped[key] += qty else: qtys_grouped[key] = qty # Create the necessary operations for the grouped quants and remaining qtys uom_obj = self.pool.get('product.uom') prevals = {} for key, qty in qtys_grouped.items(): product = self.pool.get("product.product").browse(cr, uid, key[0], context=context) uom_id = product.uom_id.id qty_uom = qty if product_uom.get(key[0]): uom_id = product_uom[key[0]].id qty_uom = uom_obj._compute_qty(cr, uid, product.uom_id.id, qty, uom_id) pack_lot_ids = [] if lots_grouped.get(key): for lot in lots_grouped[key].keys(): pack_lot_ids += [(0, 0, {'lot_id': lot, 'qty': 0.0, 'qty_todo': lots_grouped[key][lot]})] val_dict = { 'picking_id': picking.id, 'product_qty': qty_uom, 'product_id': key[0], 'package_id': key[1], 'owner_id': key[2], 'location_id': key[3], 'location_dest_id': key[4], 'product_uom_id': uom_id, 'pack_lot_ids': pack_lot_ids, } if key[0] in prevals: prevals[key[0]].append(val_dict) else: prevals[key[0]] = [val_dict] # prevals var holds the operations in order to create them in the same order than the picking stock moves if possible processed_products = set() for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]: if move.product_id.id not in processed_products: vals += prevals.get(move.product_id.id, []) processed_products.add(move.product_id.id) return vals @api.cr_uid_ids_context def do_prepare_partial(self, cr, uid, picking_ids, context=None): context = context or {} pack_operation_obj = self.pool.get('stock.pack.operation') #get list of existing operations and delete them existing_package_ids = pack_operation_obj.search(cr, uid, [('picking_id', 'in', picking_ids)], context=context) if existing_package_ids: pack_operation_obj.unlink(cr, uid, existing_package_ids, context) for picking in self.browse(cr, uid, picking_ids, context=context): forced_qties = {} # Quantity remaining after calculating reserved quants picking_quants = [] #Calculate packages, reserved quants, qtys of this picking's moves for move in picking.move_lines: if move.state not in ('assigned', 'confirmed', 'waiting'): continue move_quants = move.reserved_quant_ids picking_quants += move_quants forced_qty = (move.state == 'assigned') and move.product_qty - sum([x.qty for x in move_quants]) or 0 #if we used force_assign() on the move, or if the move is incoming, forced_qty > 0 if float_compare(forced_qty, 0, precision_rounding=move.product_id.uom_id.rounding) > 0: if forced_qties.get(move.product_id): forced_qties[move.product_id] += forced_qty else: forced_qties[move.product_id] = forced_qty for vals in self._prepare_pack_ops(cr, uid, picking, picking_quants, forced_qties, context=context): vals['fresh_record'] = False pack_operation_obj.create(cr, uid, vals, context=context) #recompute the remaining quantities all at once self.do_recompute_remaining_quantities(cr, uid, picking_ids, context=context) self.write(cr, uid, picking_ids, {'recompute_pack_op': False}, context=context) @api.cr_uid_ids_context def do_unreserve(self, cr, uid, picking_ids, context=None): """ Will remove all quants for picking in picking_ids """ moves_to_unreserve = [] pack_line_to_unreserve = [] for picking in self.browse(cr, uid, picking_ids, context=context): moves_to_unreserve += [m.id for m in picking.move_lines if m.state not in ('done', 'cancel')] pack_line_to_unreserve += [p.id for p in picking.pack_operation_ids] if moves_to_unreserve: if pack_line_to_unreserve: self.pool.get('stock.pack.operation').unlink(cr, uid, pack_line_to_unreserve, context=context) self.pool.get('stock.move').do_unreserve(cr, uid, moves_to_unreserve, context=context) def recompute_remaining_qty(self, cr, uid, picking, done_qtys=False, context=None): def _create_link_for_index(operation_id, index, product_id, qty_to_assign, quant_id=False): move_dict = prod2move_ids[product_id][index] qty_on_link = min(move_dict['remaining_qty'], qty_to_assign) self.pool.get('stock.move.operation.link').create(cr, uid, {'move_id': move_dict['move'].id, 'operation_id': operation_id, 'qty': qty_on_link, 'reserved_quant_id': quant_id}, context=context) if move_dict['remaining_qty'] == qty_on_link: prod2move_ids[product_id].pop(index) else: move_dict['remaining_qty'] -= qty_on_link return qty_on_link def _create_link_for_quant(operation_id, quant, qty): """create a link for given operation and reserved move of given quant, for the max quantity possible, and returns this quantity""" if not quant.reservation_id.id: return _create_link_for_product(operation_id, quant.product_id.id, qty) qty_on_link = 0 for i in range(0, len(prod2move_ids[quant.product_id.id])): if prod2move_ids[quant.product_id.id][i]['move'].id != quant.reservation_id.id: continue qty_on_link = _create_link_for_index(operation_id, i, quant.product_id.id, qty, quant_id=quant.id) break return qty_on_link def _create_link_for_product(operation_id, product_id, qty): '''method that creates the link between a given operation and move(s) of given product, for the given quantity. Returns True if it was possible to create links for the requested quantity (False if there was not enough quantity on stock moves)''' qty_to_assign = qty prod_obj = self.pool.get("product.product") product = prod_obj.browse(cr, uid, product_id) rounding = product.uom_id.rounding qtyassign_cmp = float_compare(qty_to_assign, 0.0, precision_rounding=rounding) if prod2move_ids.get(product_id): while prod2move_ids[product_id] and qtyassign_cmp > 0: qty_on_link = _create_link_for_index(operation_id, 0, product_id, qty_to_assign, quant_id=False) qty_to_assign -= qty_on_link qtyassign_cmp = float_compare(qty_to_assign, 0.0, precision_rounding=rounding) return qtyassign_cmp == 0 uom_obj = self.pool.get('product.uom') package_obj = self.pool.get('stock.quant.package') quant_obj = self.pool.get('stock.quant') link_obj = self.pool.get('stock.move.operation.link') quants_in_package_done = set() prod2move_ids = {} still_to_do = [] #make a dictionary giving for each product, the moves and related quantity that can be used in operation links moves = sorted([x for x in picking.move_lines if x.state not in ('done', 'cancel')], key=lambda x: (((x.state == 'assigned') and -2 or 0) + (x.partially_available and -1 or 0))) for move in moves: if not prod2move_ids.get(move.product_id.id): prod2move_ids[move.product_id.id] = [{'move': move, 'remaining_qty': move.product_qty}] else: prod2move_ids[move.product_id.id].append({'move': move, 'remaining_qty': move.product_qty}) need_rereserve = False #sort the operations in order to give higher priority to those with a package, then a serial number operations = picking.pack_operation_ids operations = sorted(operations, key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.pack_lot_ids and -1 or 0)) #delete existing operations to start again from scratch links = link_obj.search(cr, uid, [('operation_id', 'in', [x.id for x in operations])], context=context) if links: link_obj.unlink(cr, uid, links, context=context) #1) first, try to create links when quants can be identified without any doubt for ops in operations: lot_qty = {} for packlot in ops.pack_lot_ids: lot_qty[packlot.lot_id.id] = uom_obj._compute_qty(cr, uid, ops.product_uom_id.id, packlot.qty, ops.product_id.uom_id.id) #for each operation, create the links with the stock move by seeking on the matching reserved quants, #and deffer the operation if there is some ambiguity on the move to select if ops.package_id and not ops.product_id and (not done_qtys or ops.qty_done): #entire package quant_ids = package_obj.get_content(cr, uid, [ops.package_id.id], context=context) for quant in quant_obj.browse(cr, uid, quant_ids, context=context): remaining_qty_on_quant = quant.qty if quant.reservation_id: #avoid quants being counted twice quants_in_package_done.add(quant.id) qty_on_link = _create_link_for_quant(ops.id, quant, quant.qty) remaining_qty_on_quant -= qty_on_link if remaining_qty_on_quant: still_to_do.append((ops, quant.product_id.id, remaining_qty_on_quant)) need_rereserve = True elif ops.product_id.id: #Check moves with same product product_qty = ops.qty_done if done_qtys else ops.product_qty qty_to_assign = uom_obj._compute_qty_obj(cr, uid, ops.product_uom_id, product_qty, ops.product_id.uom_id, context=context) for move_dict in prod2move_ids.get(ops.product_id.id, []): move = move_dict['move'] for quant in move.reserved_quant_ids: if not qty_to_assign > 0: break if quant.id in quants_in_package_done: continue #check if the quant is matching the operation details if ops.package_id: flag = quant.package_id and bool(package_obj.search(cr, uid, [('id', 'child_of', [ops.package_id.id])], context=context)) or False else: flag = not quant.package_id.id flag = flag and (ops.owner_id.id == quant.owner_id.id) if flag: if not lot_qty: max_qty_on_link = min(quant.qty, qty_to_assign) qty_on_link = _create_link_for_quant(ops.id, quant, max_qty_on_link) qty_to_assign -= qty_on_link else: if lot_qty.get(quant.lot_id.id): #if there is still some qty left max_qty_on_link = min(quant.qty, qty_to_assign, lot_qty[quant.lot_id.id]) qty_on_link = _create_link_for_quant(ops.id, quant, max_qty_on_link) qty_to_assign -= qty_on_link lot_qty[quant.lot_id.id] -= qty_on_link qty_assign_cmp = float_compare(qty_to_assign, 0, precision_rounding=ops.product_id.uom_id.rounding) if qty_assign_cmp > 0: #qty reserved is less than qty put in operations. We need to create a link but it's deferred after we processed #all the quants (because they leave no choice on their related move and needs to be processed with higher priority) still_to_do += [(ops, ops.product_id.id, qty_to_assign)] need_rereserve = True #2) then, process the remaining part all_op_processed = True for ops, product_id, remaining_qty in still_to_do: all_op_processed = _create_link_for_product(ops.id, product_id, remaining_qty) and all_op_processed return (need_rereserve, all_op_processed) def picking_recompute_remaining_quantities(self, cr, uid, picking, done_qtys=False, context=None): need_rereserve = False all_op_processed = True if picking.pack_operation_ids: need_rereserve, all_op_processed = self.recompute_remaining_qty(cr, uid, picking, done_qtys=done_qtys, context=context) return need_rereserve, all_op_processed @api.cr_uid_ids_context def do_recompute_remaining_quantities(self, cr, uid, picking_ids, done_qtys=False, context=None): for picking in self.browse(cr, uid, picking_ids, context=context): if picking.pack_operation_ids: self.recompute_remaining_qty(cr, uid, picking, done_qtys=done_qtys, context=context) def _prepare_values_extra_move(self, cr, uid, op, product, remaining_qty, context=None): """ Creates an extra move when there is no corresponding original move to be copied """ uom_obj = self.pool.get("product.uom") uom_id = product.uom_id.id qty = remaining_qty if op.product_id and op.product_uom_id and op.product_uom_id.id != product.uom_id.id: if op.product_uom_id.factor > product.uom_id.factor: #If the pack operation's is a smaller unit uom_id = op.product_uom_id.id #HALF-UP rounding as only rounding errors will be because of propagation of error from default UoM qty = uom_obj._compute_qty_obj(cr, uid, product.uom_id, remaining_qty, op.product_uom_id, rounding_method='HALF-UP') picking = op.picking_id ref = product.default_code name = '[' + ref + ']' + ' ' + product.name if ref else product.name proc_id = False for m in op.linked_move_operation_ids: if m.move_id.procurement_id: proc_id = m.move_id.procurement_id.id break res = { 'picking_id': picking.id, 'location_id': picking.location_id.id, 'location_dest_id': picking.location_dest_id.id, 'product_id': product.id, 'procurement_id': proc_id, 'product_uom': uom_id, 'product_uom_qty': qty, 'name': _('Extra Move: ') + name, 'state': 'draft', 'restrict_partner_id': op.owner_id, 'group_id': picking.group_id.id, } return res def _create_extra_moves(self, cr, uid, picking, context=None): '''This function creates move lines on a picking, at the time of do_transfer, based on unexpected product transfers (or exceeding quantities) found in the pack operations. ''' move_obj = self.pool.get('stock.move') operation_obj = self.pool.get('stock.pack.operation') moves = [] for op in picking.pack_operation_ids: for product, remaining_qty in operation_obj._get_remaining_prod_quantities(cr, uid, op, context=context).items(): if float_compare(remaining_qty, 0, precision_rounding=product.uom_id.rounding) > 0: vals = self._prepare_values_extra_move(cr, uid, op, product, remaining_qty, context=context) moves.append(move_obj.create(cr, uid, vals, context=context)) if moves: move_obj.action_confirm(cr, uid, moves, context=context) return moves def rereserve_pick(self, cr, uid, ids, context=None): """ This can be used to provide a button that rereserves taking into account the existing pack operations """ for pick in self.browse(cr, uid, ids, context=context): self.rereserve_quants(cr, uid, pick, move_ids = [x.id for x in pick.move_lines if x.state not in ('done', 'cancel')], context=context) def rereserve_quants(self, cr, uid, picking, move_ids=[], context=None): """ Unreserve quants then try to reassign quants.""" stock_move_obj = self.pool.get('stock.move') if not move_ids: self.do_unreserve(cr, uid, [picking.id], context=context) self.action_assign(cr, uid, [picking.id], context=context) else: stock_move_obj.do_unreserve(cr, uid, move_ids, context=context) stock_move_obj.action_assign(cr, uid, move_ids, no_prepare=True, context=context) def do_new_transfer(self, cr, uid, ids, context=None): pack_op_obj = self.pool['stock.pack.operation'] data_obj = self.pool['ir.model.data'] for pick in self.browse(cr, uid, ids, context=context): to_delete = [] if not pick.move_lines and not pick.pack_operation_ids: raise UserError(_('Please create some Initial Demand or Mark as Todo and create some Operations. ')) # In draft or with no pack operations edited yet, ask if we can just do everything if pick.state == 'draft' or all([x.qty_done == 0.0 for x in pick.pack_operation_ids]): # If no lots when needed, raise error picking_type = pick.picking_type_id if (picking_type.use_create_lots or picking_type.use_existing_lots): for pack in pick.pack_operation_ids: if pack.product_id and pack.product_id.tracking != 'none': raise UserError(_('Some products require lots, so you need to specify those first!')) view = data_obj.xmlid_to_res_id(cr, uid, 'stock.view_immediate_transfer') wiz_id = self.pool['stock.immediate.transfer'].create(cr, uid, {'pick_id': pick.id}, context=context) return { 'name': _('Immediate Transfer?'), 'type': 'ir.actions.act_window', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'stock.immediate.transfer', 'views': [(view, 'form')], 'view_id': view, 'target': 'new', 'res_id': wiz_id, 'context': context, } # Check backorder should check for other barcodes if self.check_backorder(cr, uid, pick, context=context): view = data_obj.xmlid_to_res_id(cr, uid, 'stock.view_backorder_confirmation') wiz_id = self.pool['stock.backorder.confirmation'].create(cr, uid, {'pick_id': pick.id}, context=context) return { 'name': _('Create Backorder?'), 'type': 'ir.actions.act_window', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'stock.backorder.confirmation', 'views': [(view, 'form')], 'view_id': view, 'target': 'new', 'res_id': wiz_id, 'context': context, } for operation in pick.pack_operation_ids: if operation.qty_done < 0: raise UserError(_('No negative quantities allowed')) if operation.qty_done > 0: pack_op_obj.write(cr, uid, operation.id, {'product_qty': operation.qty_done}, context=context) else: to_delete.append(operation.id) if to_delete: pack_op_obj.unlink(cr, uid, to_delete, context=context) self.do_transfer(cr, uid, ids, context=context) return def check_backorder(self, cr, uid, picking, context=None): need_rereserve, all_op_processed = self.picking_recompute_remaining_quantities(cr, uid, picking, done_qtys=True, context=context) for move in picking.move_lines: if float_compare(move.remaining_qty, 0, precision_rounding = move.product_id.uom_id.rounding) != 0: return True return False def create_lots_for_picking(self, cr, uid, ids, context=None): lot_obj = self.pool['stock.production.lot'] opslot_obj = self.pool['stock.pack.operation.lot'] to_unlink = [] for picking in self.browse(cr, uid, ids, context=context): for ops in picking.pack_operation_ids: for opslot in ops.pack_lot_ids: if not opslot.lot_id: lot_id = lot_obj.create(cr, uid, {'name': opslot.lot_name, 'product_id': ops.product_id.id}, context=context) opslot_obj.write(cr, uid, [opslot.id], {'lot_id':lot_id}, context=context) #Unlink pack operations where qty = 0 to_unlink += [x.id for x in ops.pack_lot_ids if x.qty == 0.0] opslot_obj.unlink(cr, uid, to_unlink, context=context) def do_transfer(self, cr, uid, ids, context=None): """ If no pack operation, we do simple action_done of the picking Otherwise, do the pack operations """ if not context: context = {} notrack_context = dict(context, mail_notrack=True) stock_move_obj = self.pool.get('stock.move') self.create_lots_for_picking(cr, uid, ids, context=context) for picking in self.browse(cr, uid, ids, context=context): if not picking.pack_operation_ids: self.action_done(cr, uid, [picking.id], context=context) continue else: need_rereserve, all_op_processed = self.picking_recompute_remaining_quantities(cr, uid, picking, context=context) #create extra moves in the picking (unexpected product moves coming from pack operations) todo_move_ids = [] if not all_op_processed: todo_move_ids += self._create_extra_moves(cr, uid, picking, context=context) #split move lines if needed toassign_move_ids = [] for move in picking.move_lines: remaining_qty = move.remaining_qty if move.state in ('done', 'cancel'): #ignore stock moves cancelled or already done continue elif move.state == 'draft': toassign_move_ids.append(move.id) if float_compare(remaining_qty, 0, precision_rounding = move.product_id.uom_id.rounding) == 0: if move.state in ('draft', 'assigned', 'confirmed'): todo_move_ids.append(move.id) elif float_compare(remaining_qty,0, precision_rounding = move.product_id.uom_id.rounding) > 0 and \ float_compare(remaining_qty, move.product_qty, precision_rounding = move.product_id.uom_id.rounding) < 0: new_move = stock_move_obj.split(cr, uid, move, remaining_qty, context=notrack_context) todo_move_ids.append(move.id) #Assign move as it was assigned before toassign_move_ids.append(new_move) todo_move_ids = list(set(todo_move_ids)) if need_rereserve or not all_op_processed: if not picking.location_id.usage in ("supplier", "production", "inventory"): self.rereserve_quants(cr, uid, picking, move_ids=todo_move_ids, context=context) self.do_recompute_remaining_quantities(cr, uid, [picking.id], context=context) if todo_move_ids and not context.get('do_only_split'): self.pool.get('stock.move').action_done(cr, uid, todo_move_ids, context=notrack_context) elif context.get('do_only_split'): context = dict(context, split=todo_move_ids) self._create_backorder(cr, uid, picking, context=context) return True @api.cr_uid_ids_context def do_split(self, cr, uid, picking_ids, context=None): """ just split the picking (create a backorder) without making it 'done' """ if context is None: context = {} ctx = context.copy() ctx['do_only_split'] = True return self.do_transfer(cr, uid, picking_ids, context=ctx) def put_in_pack(self, cr, uid, ids, context=None): stock_move_obj = self.pool["stock.move"] stock_operation_obj = self.pool["stock.pack.operation"] package_obj = self.pool["stock.quant.package"] package_id = False for pick in self.browse(cr, uid, ids, context=context): operations = [x for x in pick.pack_operation_ids if x.qty_done > 0 and (not x.result_package_id)] pack_operation_ids = [] for operation in operations: #If we haven't done all qty in operation, we have to split into 2 operation op = operation if operation.qty_done < operation.product_qty: new_operation = stock_operation_obj.copy(cr, uid, operation.id, {'product_qty': operation.qty_done,'qty_done': operation.qty_done}, context=context) stock_operation_obj.write(cr, uid, operation.id, {'product_qty': operation.product_qty - operation.qty_done,'qty_done': 0}, context=context) if operation.pack_lot_ids: packlots_transfer = [(4, x.id) for x in operation.pack_lot_ids] stock_operation_obj.write(cr, uid, [new_operation], {'pack_lot_ids': packlots_transfer}, context=context) op = stock_operation_obj.browse(cr, uid, new_operation, context=context) pack_operation_ids.append(op.id) if operations: stock_operation_obj.check_tracking(cr, uid, pack_operation_ids, context=context) package_id = package_obj.create(cr, uid, {}, context=context) stock_operation_obj.write(cr, uid, pack_operation_ids, {'result_package_id': package_id}, context=context) else: raise UserError(_('Please process some quantities to put in the pack first!')) return package_id class stock_production_lot(osv.osv): _name = 'stock.production.lot' _inherit = ['mail.thread'] _description = 'Lot/Serial' _columns = { 'name': fields.char('Serial Number', required=True, help="Unique Serial Number"), 'ref': fields.char('Internal Reference', help="Internal reference number in case it differs from the manufacturer's serial number"), 'product_id': fields.many2one('product.product', 'Product', required=True, domain=[('type', 'in', ['product', 'consu'])]), 'quant_ids': fields.one2many('stock.quant', 'lot_id', 'Quants', readonly=True), 'create_date': fields.datetime('Creation Date'), } _defaults = { 'name': lambda x, y, z, c: x.pool.get('ir.sequence').next_by_code(y, z, 'stock.lot.serial'), 'product_id': lambda x, y, z, c: c.get('product_id', False), } _sql_constraints = [ ('name_ref_uniq', 'unique (name, product_id)', 'The combination of serial number and product must be unique !'), ] def action_traceability(self, cr, uid, ids, context=None): """ It traces the information of lots @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param ids: List of IDs selected @param context: A standard dictionary @return: A dictionary of values """ quant_obj = self.pool.get("stock.quant") quants = quant_obj.search(cr, uid, [('lot_id', 'in', ids)], context=context) moves = set() for quant in quant_obj.browse(cr, uid, quants, context=context): moves |= {move.id for move in quant.history_ids} if moves: return { 'domain': "[('id','in',[" + ','.join(map(str, list(moves))) + "])]", 'name': _('Traceability'), 'view_mode': 'tree,form', 'view_type': 'form', 'context': {'tree_view_ref': 'stock.view_move_tree'}, 'res_model': 'stock.move', 'type': 'ir.actions.act_window', } return False # ---------------------------------------------------- # Move # ---------------------------------------------------- class stock_move(osv.osv): _name = "stock.move" _description = "Stock Move" _order = 'picking_id, sequence, id' def get_price_unit(self, cr, uid, move, context=None): """ Returns the unit price to store on the quant """ return move.price_unit or move.product_id.standard_price def name_get(self, cr, uid, ids, context=None): res = [] for line in self.browse(cr, uid, ids, context=context): name = line.location_id.name + ' > ' + line.location_dest_id.name if line.product_id.code: name = line.product_id.code + ': ' + name if line.picking_id.origin: name = line.picking_id.origin + '/ ' + name res.append((line.id, name)) return res def _quantity_normalize(self, cr, uid, ids, name, args, context=None): uom_obj = self.pool.get('product.uom') res = {} for m in self.browse(cr, uid, ids, context=context): res[m.id] = uom_obj._compute_qty_obj(cr, uid, m.product_uom, m.product_uom_qty, m.product_id.uom_id, context=context) return res def _get_remaining_qty(self, cr, uid, ids, field_name, args, context=None): uom_obj = self.pool.get('product.uom') res = {} for move in self.browse(cr, uid, ids, context=context): qty = move.product_qty for record in move.linked_move_operation_ids: qty -= record.qty # Keeping in product default UoM res[move.id] = float_round(qty, precision_rounding=move.product_id.uom_id.rounding) return res def _get_lot_ids(self, cr, uid, ids, field_name, args, context=None): res = dict.fromkeys(ids, False) for move in self.browse(cr, uid, ids, context=context): if move.state == 'done': res[move.id] = [q.lot_id.id for q in move.quant_ids if q.lot_id] else: res[move.id] = [q.lot_id.id for q in move.reserved_quant_ids if q.lot_id] return res def _get_product_availability(self, cr, uid, ids, field_name, args, context=None): quant_obj = self.pool.get('stock.quant') res = dict.fromkeys(ids, False) for move in self.browse(cr, uid, ids, context=context): if move.state == 'done': res[move.id] = move.product_qty else: sublocation_ids = self.pool.get('stock.location').search(cr, uid, [('id', 'child_of', [move.location_id.id])], context=context) quant_ids = quant_obj.search(cr, uid, [('location_id', 'in', sublocation_ids), ('product_id', '=', move.product_id.id), ('reservation_id', '=', False)], context=context) availability = 0 for quant in quant_obj.browse(cr, uid, quant_ids, context=context): availability += quant.qty res[move.id] = min(move.product_qty, availability) return res def _get_string_qty_information(self, cr, uid, ids, field_name, args, context=None): settings_obj = self.pool.get('stock.config.settings') uom_obj = self.pool.get('product.uom') res = dict.fromkeys(ids, '') precision = self.pool['decimal.precision'].precision_get(cr, uid, 'Product Unit of Measure') for move in self.browse(cr, uid, ids, context=context): if move.state in ('draft', 'done', 'cancel') or move.location_id.usage != 'internal': res[move.id] = '' # 'not applicable' or 'n/a' could work too continue total_available = min(move.product_qty, move.reserved_availability + move.availability) total_available = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, total_available, move.product_uom, round=False, context=context) total_available = float_round(total_available, precision_digits=precision) info = str(total_available) #look in the settings if we need to display the UoM name or not config_ids = settings_obj.search(cr, uid, [], limit=1, order='id DESC', context=context) if config_ids: stock_settings = settings_obj.browse(cr, uid, config_ids[0], context=context) if stock_settings.group_uom: info += ' ' + move.product_uom.name if move.reserved_availability: if move.reserved_availability != total_available: #some of the available quantity is assigned and some are available but not reserved reserved_available = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, move.reserved_availability, move.product_uom, round=False, context=context) reserved_available = float_round(reserved_available, precision_digits=precision) info += _(' (%s reserved)') % str(reserved_available) else: #all available quantity is assigned info += _(' (reserved)') res[move.id] = info return res def _get_reserved_availability(self, cr, uid, ids, field_name, args, context=None): res = dict.fromkeys(ids, 0) for move in self.browse(cr, uid, ids, context=context): res[move.id] = sum([quant.qty for quant in move.reserved_quant_ids]) return res def _get_move(self, cr, uid, ids, context=None): res = set() for quant in self.browse(cr, uid, ids, context=context): if quant.reservation_id: res.add(quant.reservation_id.id) return list(res) def _get_move_ids(self, cr, uid, ids, context=None): res = [] for picking in self.browse(cr, uid, ids, context=context): res += [x.id for x in picking.move_lines] return res def _get_moves_from_prod(self, cr, uid, ids, context=None): if ids: return self.pool.get('stock.move').search(cr, uid, [('product_id', 'in', ids)], context=context) return [] def _set_product_qty(self, cr, uid, id, field, value, arg, context=None): """ The meaning of product_qty field changed lately and is now a functional field computing the quantity in the default product UoM. This code has been added to raise an error if a write is made given a value for `product_qty`, where the same write should set the `product_uom_qty` field instead, in order to detect errors. """ raise UserError(_('The requested operation cannot be processed because of a programming error setting the `product_qty` field instead of the `product_uom_qty`.')) _columns = { 'sequence': fields.integer('Sequence'), 'name': fields.char('Description', required=True, select=True), 'priority': fields.selection(procurement.PROCUREMENT_PRIORITIES, 'Priority'), 'create_date': fields.datetime('Creation Date', readonly=True, select=True), 'date': fields.datetime('Date', required=True, select=True, help="Move date: scheduled date until move is done, then date of actual move processing", states={'done': [('readonly', True)]}), 'date_expected': fields.datetime('Expected Date', states={'done': [('readonly', True)]}, required=True, select=True, help="Scheduled date for the processing of this move"), 'product_id': fields.many2one('product.product', 'Product', required=True, select=True, domain=[('type', 'in', ['product', 'consu'])], states={'done': [('readonly', True)]}), 'product_qty': fields.function(_quantity_normalize, fnct_inv=_set_product_qty, type='float', digits=0, store={ _name: (lambda self, cr, uid, ids, c={}: ids, ['product_id', 'product_uom', 'product_uom_qty'], 10), }, string='Quantity', help='Quantity in the default UoM of the product'), 'product_uom_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True, states={'done': [('readonly', True)]}, help="This is the quantity of products from an inventory " "point of view. For moves in the state 'done', this is the " "quantity of products that were actually moved. For other " "moves, this is the quantity of product that is planned to " "be moved. Lowering this quantity does not generate a " "backorder. Changing this quantity on assigned moves affects " "the product reservation, and should be done with care." ), 'product_uom': fields.many2one('product.uom', 'Unit of Measure', required=True, states={'done': [('readonly', True)]}), 'product_tmpl_id': fields.related('product_id', 'product_tmpl_id', type='many2one', relation='product.template', string='Product Template'), 'product_packaging': fields.many2one('product.packaging', 'preferred Packaging', help="It specifies attributes of packaging like type, quantity of packaging,etc."), 'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True, auto_join=True, states={'done': [('readonly', True)]}, help="Sets a location if you produce at a fixed location. This can be a partner location if you subcontract the manufacturing operations."), 'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True, states={'done': [('readonly', True)]}, select=True, auto_join=True, help="Location where the system will stock the finished products."), 'partner_id': fields.many2one('res.partner', 'Destination Address ', states={'done': [('readonly', True)]}, help="Optional address where goods are to be delivered, specifically used for allotment"), 'picking_partner_id': fields.related('picking_id', 'partner_id', type='many2one', relation='res.partner', string='Transfer Destination Address'), 'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Optional: next stock move when chaining them", select=True, copy=False), 'move_orig_ids': fields.one2many('stock.move', 'move_dest_id', 'Original Move', help="Optional: previous stock move when chaining them", select=True), 'picking_id': fields.many2one('stock.picking', 'Transfer Reference', select=True, states={'done': [('readonly', True)]}), 'note': fields.text('Notes'), 'state': fields.selection([('draft', 'New'), ('cancel', 'Cancelled'), ('waiting', 'Waiting Another Move'), ('confirmed', 'Waiting Availability'), ('assigned', 'Available'), ('done', 'Done'), ], 'Status', readonly=True, select=True, copy=False, help= "* New: When the stock move is created and not yet confirmed.\n"\ "* Waiting Another Move: This state can be seen when a move is waiting for another one, for example in a chained flow.\n"\ "* Waiting Availability: This state is reached when the procurement resolution is not straight forward. It may need the scheduler to run, a component to me manufactured...\n"\ "* Available: When products are reserved, it is set to \'Available\'.\n"\ "* Done: When the shipment is processed, the state is \'Done\'."), 'partially_available': fields.boolean('Partially Available', readonly=True, help="Checks if the move has some stock reserved", copy=False), 'price_unit': fields.float('Unit Price', help="Technical field used to record the product cost set by the user during a picking confirmation (when costing method used is 'average price' or 'real'). Value given in company currency and in product uom."), # as it's a technical field, we intentionally don't provide the digits attribute 'company_id': fields.many2one('res.company', 'Company', required=True, select=True), 'split_from': fields.many2one('stock.move', string="Move Split From", help="Technical field used to track the origin of a split move, which can be useful in case of debug", copy=False), 'backorder_id': fields.related('picking_id', 'backorder_id', type='many2one', relation="stock.picking", string="Back Order of", select=True), 'origin': fields.char("Source Document"), 'procure_method': fields.selection([('make_to_stock', 'Default: Take From Stock'), ('make_to_order', 'Advanced: Apply Procurement Rules')], 'Supply Method', required=True, help="""By default, the system will take from the stock in the source location and passively wait for availability. The other possibility allows you to directly create a procurement on the source location (and thus ignore its current stock) to gather products. If we want to chain moves and have this one to wait for the previous, this second option should be chosen."""), # used for colors in tree views: 'scrapped': fields.related('location_dest_id', 'scrap_location', type='boolean', relation='stock.location', string='Scrapped', readonly=True), 'quant_ids': fields.many2many('stock.quant', 'stock_quant_move_rel', 'move_id', 'quant_id', 'Moved Quants', copy=False), 'reserved_quant_ids': fields.one2many('stock.quant', 'reservation_id', 'Reserved quants'), 'linked_move_operation_ids': fields.one2many('stock.move.operation.link', 'move_id', string='Linked Operations', readonly=True, help='Operations that impact this move for the computation of the remaining quantities'), 'remaining_qty': fields.function(_get_remaining_qty, type='float', string='Remaining Quantity', digits=0, states={'done': [('readonly', True)]}, help="Remaining Quantity in default UoM according to operations matched with this move"), 'procurement_id': fields.many2one('procurement.order', 'Procurement'), 'group_id': fields.many2one('procurement.group', 'Procurement Group'), 'rule_id': fields.many2one('procurement.rule', 'Procurement Rule', help='The procurement rule that created this stock move'), 'push_rule_id': fields.many2one('stock.location.path', 'Push Rule', help='The push rule that created this stock move'), 'propagate': fields.boolean('Propagate cancel and split', help='If checked, when this move is cancelled, cancel the linked move too'), 'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type'), 'inventory_id': fields.many2one('stock.inventory', 'Inventory'), 'lot_ids': fields.function(_get_lot_ids, type='many2many', relation='stock.production.lot', string='Lots'), 'origin_returned_move_id': fields.many2one('stock.move', 'Origin return move', help='move that created the return move', copy=False), 'returned_move_ids': fields.one2many('stock.move', 'origin_returned_move_id', 'All returned moves', help='Optional: all returned moves created from this move'), 'reserved_availability': fields.function(_get_reserved_availability, type='float', string='Quantity Reserved', readonly=True, help='Quantity that has already been reserved for this move'), 'availability': fields.function(_get_product_availability, type='float', string='Forecasted Quantity', readonly=True, help='Quantity in stock that can still be reserved for this move'), 'string_availability_info': fields.function(_get_string_qty_information, type='text', string='Availability', readonly=True, help='Show various information on stock availability for this move'), 'restrict_lot_id': fields.many2one('stock.production.lot', 'Lot', help="Technical field used to depict a restriction on the lot of quants to consider when marking this move as 'done'"), 'restrict_partner_id': fields.many2one('res.partner', 'Owner ', help="Technical field used to depict a restriction on the ownership of quants to consider when marking this move as 'done'"), 'route_ids': fields.many2many('stock.location.route', 'stock_location_route_move', 'move_id', 'route_id', 'Destination route', help="Preferred route to be followed by the procurement order"), 'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', help="Technical field depicting the warehouse to consider for the route selection on the next procurement (if any)."), } def _default_destination_address(self, cr, uid, context=None): return False def _default_group_id(self, cr, uid, context=None): context = context or {} if context.get('default_picking_id', False): picking = self.pool.get('stock.picking').browse(cr, uid, context['default_picking_id'], context=context) return picking.group_id.id return False _defaults = { 'partner_id': _default_destination_address, 'state': 'draft', 'priority': '1', 'product_uom_qty': 1.0, 'sequence': 10, 'scrapped': False, 'date': fields.datetime.now, 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.move', context=c), 'date_expected': fields.datetime.now, 'procure_method': 'make_to_stock', 'propagate': True, 'partially_available': False, 'group_id': _default_group_id, } def _check_uom(self, cr, uid, ids, context=None): for move in self.browse(cr, uid, ids, context=context): if move.product_id.uom_id.category_id.id != move.product_uom.category_id.id: return False return True _constraints = [ (_check_uom, 'You try to move a product using a UoM that is not compatible with the UoM of the product moved. Please use an UoM in the same UoM category.', ['product_uom']), ] def init(self, cr): cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('stock_move_product_location_index',)) if not cr.fetchone(): cr.execute('CREATE INDEX stock_move_product_location_index ON stock_move (product_id, location_id, location_dest_id, company_id, state)') @api.cr_uid_ids_context def do_unreserve(self, cr, uid, move_ids, context=None): quant_obj = self.pool.get("stock.quant") for move in self.browse(cr, uid, move_ids, context=context): if move.state in ('done', 'cancel'): raise UserError(_('Cannot unreserve a done move')) quant_obj.quants_unreserve(cr, uid, move, context=context) if self.find_move_ancestors(cr, uid, move, context=context): self.write(cr, uid, [move.id], {'state': 'waiting'}, context=context) else: self.write(cr, uid, [move.id], {'state': 'confirmed'}, context=context) def _prepare_procurement_from_move(self, cr, uid, move, context=None): origin = (move.group_id and (move.group_id.name + ":") or "") + (move.rule_id and move.rule_id.name or move.origin or move.picking_id.name or "/") group_id = move.group_id and move.group_id.id or False if move.rule_id: if move.rule_id.group_propagation_option == 'fixed' and move.rule_id.group_id: group_id = move.rule_id.group_id.id elif move.rule_id.group_propagation_option == 'none': group_id = False return { 'name': move.rule_id and move.rule_id.name or "/", 'origin': origin, 'company_id': move.company_id and move.company_id.id or False, 'date_planned': move.date, 'product_id': move.product_id.id, 'product_qty': move.product_uom_qty, 'product_uom': move.product_uom.id, 'location_id': move.location_id.id, 'move_dest_id': move.id, 'group_id': group_id, 'route_ids': [(4, x.id) for x in move.route_ids], 'warehouse_id': move.warehouse_id.id or (move.picking_type_id and move.picking_type_id.warehouse_id.id or False), 'priority': move.priority, } def _push_apply(self, cr, uid, moves, context=None): push_obj = self.pool.get("stock.location.path") for move in moves: #1) if the move is already chained, there is no need to check push rules #2) if the move is a returned move, we don't want to check push rules, as returning a returned move is the only decent way # to receive goods without triggering the push rules again (which would duplicate chained operations) if not move.move_dest_id: domain = [('location_from_id', '=', move.location_dest_id.id)] #priority goes to the route defined on the product and product category route_ids = [x.id for x in move.product_id.route_ids + move.product_id.categ_id.total_route_ids] rules = push_obj.search(cr, uid, domain + [('route_id', 'in', route_ids)], order='route_sequence, sequence', context=context) if not rules: #then we search on the warehouse if a rule can apply wh_route_ids = [] if move.warehouse_id: wh_route_ids = [x.id for x in move.warehouse_id.route_ids] elif move.picking_id.picking_type_id.warehouse_id: wh_route_ids = [x.id for x in move.picking_id.picking_type_id.warehouse_id.route_ids] if wh_route_ids: rules = push_obj.search(cr, uid, domain + [('route_id', 'in', wh_route_ids)], order='route_sequence, sequence', context=context) if not rules: #if no specialized push rule has been found yet, we try to find a general one (without route) rules = push_obj.search(cr, uid, domain + [('route_id', '=', False)], order='sequence', context=context) if rules: rule = push_obj.browse(cr, uid, rules[0], context=context) # Make sure it is not returning the return if (not move.origin_returned_move_id or move.origin_returned_move_id.location_id.id != rule.location_dest_id.id): push_obj._apply(cr, uid, rule, move, context=context) return True def _create_procurement(self, cr, uid, move, context=None): """ This will create a procurement order """ return self.pool.get("procurement.order").create(cr, uid, self._prepare_procurement_from_move(cr, uid, move, context=context), context=context) def _create_procurements(self, cr, uid, moves, context=None): res = [] for move in moves: res.append(self._create_procurement(cr, uid, move, context=context)) # Run procurements immediately when generated from multiple moves self.pool['procurement.order'].run(cr, uid, res, context=context) return res def create(self, cr, uid, vals, context=None): if context is None: context = {} picking_obj = self.pool['stock.picking'] track = not context.get('mail_notrack') and vals.get('picking_id') if track: picking = picking_obj.browse(cr, uid, vals['picking_id'], context=context) initial_values = {picking.id: {'state': picking.state}} res = super(stock_move, self).create(cr, uid, vals, context=context) if track: picking_obj.message_track(cr, uid, [vals['picking_id']], picking_obj.fields_get(cr, uid, ['state'], context=context), initial_values, context=context) return res def write(self, cr, uid, ids, vals, context=None): if context is None: context = {} if isinstance(ids, (int, long)): ids = [ids] picking_obj = self.pool['stock.picking'] # Check that we do not modify a stock.move which is done frozen_fields = set(['product_qty', 'product_uom', 'location_id', 'location_dest_id', 'product_id']) moves = self.browse(cr, uid, ids, context=context) for move in moves: if move.state == 'done': if frozen_fields.intersection(vals): raise UserError(_('Quantities, Units of Measure, Products and Locations cannot be modified on stock moves that have already been processed (except by the Administrator).')) propagated_changes_dict = {} #propagation of quantity change if vals.get('product_uom_qty'): propagated_changes_dict['product_uom_qty'] = vals['product_uom_qty'] if vals.get('product_uom_id'): propagated_changes_dict['product_uom_id'] = vals['product_uom_id'] if vals.get('product_uos_qty'): propagated_changes_dict['product_uos_qty'] = vals['product_uos_qty'] if vals.get('product_uos_id'): propagated_changes_dict['product_uos_id'] = vals['product_uos_id'] #propagation of expected date: propagated_date_field = False if vals.get('date_expected'): #propagate any manual change of the expected date propagated_date_field = 'date_expected' elif (vals.get('state', '') == 'done' and vals.get('date')): #propagate also any delta observed when setting the move as done propagated_date_field = 'date' if not context.get('do_not_propagate', False) and (propagated_date_field or propagated_changes_dict): #any propagation is (maybe) needed for move in self.browse(cr, uid, ids, context=context): if move.move_dest_id and move.propagate: if 'date_expected' in propagated_changes_dict: propagated_changes_dict.pop('date_expected') if propagated_date_field: current_date = datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) new_date = datetime.strptime(vals.get(propagated_date_field), DEFAULT_SERVER_DATETIME_FORMAT) delta = new_date - current_date if abs(delta.days) >= move.company_id.propagation_minimum_delta: old_move_date = datetime.strptime(move.move_dest_id.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) new_move_date = (old_move_date + relativedelta.relativedelta(days=delta.days or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT) propagated_changes_dict['date_expected'] = new_move_date #For pushed moves as well as for pulled moves, propagate by recursive call of write(). #Note that, for pulled moves we intentionally don't propagate on the procurement. if propagated_changes_dict: self.write(cr, uid, [move.move_dest_id.id], propagated_changes_dict, context=context) track_pickings = not context.get('mail_notrack') and any(field in vals for field in ['state', 'picking_id', 'partially_available']) if track_pickings: to_track_picking_ids = set([move.picking_id.id for move in moves if move.picking_id]) if vals.get('picking_id'): to_track_picking_ids.add(vals['picking_id']) to_track_picking_ids = list(to_track_picking_ids) pickings = picking_obj.browse(cr, uid, to_track_picking_ids, context=context) initial_values = dict((picking.id, {'state': picking.state}) for picking in pickings) res = super(stock_move, self).write(cr, uid, ids, vals, context=context) if track_pickings: picking_obj.message_track(cr, uid, to_track_picking_ids, picking_obj.fields_get(cr, uid, ['state'], context=context), initial_values, context=context) return res def onchange_quantity(self, cr, uid, ids, product_id, product_qty, product_uom): """ On change of product quantity finds UoM @param product_id: Product id @param product_qty: Changed Quantity of product @param product_uom: Unit of measure of product @return: Dictionary of values """ warning = {} result = {} if (not product_id) or (product_qty <= 0.0): result['product_qty'] = 0.0 return {'value': result} product_obj = self.pool.get('product.product') # Warn if the quantity was decreased if ids: for move in self.read(cr, uid, ids, ['product_qty']): if product_qty < move['product_qty']: warning.update({ 'title': _('Information'), 'message': _("By changing this quantity here, you accept the " "new quantity as complete: Odoo will not " "automatically generate a back order.")}) break return {'warning': warning} def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False, loc_dest_id=False, partner_id=False): """ On change of product id, if finds UoM, quantity @param prod_id: Changed Product id @param loc_id: Source location id @param loc_dest_id: Destination location id @param partner_id: Address id of partner @return: Dictionary of values """ if not prod_id: return {'domain': {'product_uom': []}} user = self.pool.get('res.users').browse(cr, uid, uid) lang = user and user.lang or False if partner_id: addr_rec = self.pool.get('res.partner').browse(cr, uid, partner_id) if addr_rec: lang = addr_rec and addr_rec.lang or False ctx = {'lang': lang} product = self.pool.get('product.product').browse(cr, uid, [prod_id], context=ctx)[0] result = { 'name': product.partner_ref, 'product_uom': product.uom_id.id, 'product_uom_qty': 1.00, } if loc_id: result['location_id'] = loc_id if loc_dest_id: result['location_dest_id'] = loc_dest_id res = {'value': result, 'domain': {'product_uom': [('category_id', '=', product.uom_id.category_id.id)]} } return res def _prepare_picking_assign(self, cr, uid, move, context=None): """ Prepares a new picking for this move as it could not be assigned to another picking. This method is designed to be inherited. """ values = { 'origin': move.origin, 'company_id': move.company_id and move.company_id.id or False, 'move_type': move.group_id and move.group_id.move_type or 'direct', 'partner_id': move.partner_id.id or False, 'picking_type_id': move.picking_type_id and move.picking_type_id.id or False, 'location_id': move.location_id.id, 'location_dest_id': move.location_dest_id.id, } return values @api.cr_uid_ids_context def _picking_assign(self, cr, uid, move_ids, context=None): """Try to assign the moves to an existing picking that has not been reserved yet and has the same procurement group, locations and picking type (moves should already have them identical) Otherwise, create a new picking to assign them to. """ move = self.browse(cr, uid, move_ids, context=context)[0] pick_obj = self.pool.get("stock.picking") picks = pick_obj.search(cr, uid, [ ('group_id', '=', move.group_id.id), ('location_id', '=', move.location_id.id), ('location_dest_id', '=', move.location_dest_id.id), ('picking_type_id', '=', move.picking_type_id.id), ('printed', '=', False), ('state', 'in', ['draft', 'confirmed', 'waiting', 'partially_available', 'assigned'])], limit=1, context=context) if picks: pick = picks[0] else: values = self._prepare_picking_assign(cr, uid, move, context=context) pick = pick_obj.create(cr, uid, values, context=context) return self.write(cr, uid, move_ids, {'picking_id': pick}, context=context) def onchange_date(self, cr, uid, ids, date, date_expected, context=None): """ On change of Scheduled Date gives a Move date. @param date_expected: Scheduled Date @param date: Move Date @return: Move Date """ if not date_expected: date_expected = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT) return {'value': {'date': date_expected}} def attribute_price(self, cr, uid, move, context=None): """ Attribute price to move, important in inter-company moves or receipts with only one partner """ if not move.price_unit: price = move.product_id.standard_price self.write(cr, uid, [move.id], {'price_unit': price}) def action_confirm(self, cr, uid, ids, context=None): """ Confirms stock move or put it in waiting if it's linked to another move. @return: List of ids. """ if not context: context = {} if isinstance(ids, (int, long)): ids = [ids] states = { 'confirmed': [], 'waiting': [] } to_assign = {} for move in self.browse(cr, uid, ids, context=context): self.attribute_price(cr, uid, move, context=context) state = 'confirmed' #if the move is preceeded, then it's waiting (if preceeding move is done, then action_assign has been called already and its state is already available) if move.move_orig_ids: state = 'waiting' #if the move is split and some of the ancestor was preceeded, then it's waiting as well elif move.split_from: move2 = move.split_from while move2 and state != 'waiting': if move2.move_orig_ids: state = 'waiting' move2 = move2.split_from states[state].append(move.id) if not move.picking_id and move.picking_type_id: key = (move.group_id.id, move.location_id.id, move.location_dest_id.id) if key not in to_assign: to_assign[key] = [] to_assign[key].append(move.id) moves = [move for move in self.browse(cr, uid, states['confirmed'], context=context) if move.procure_method == 'make_to_order'] self._create_procurements(cr, uid, moves, context=context) for move in moves: states['waiting'].append(move.id) states['confirmed'].remove(move.id) for state, write_ids in states.items(): if len(write_ids): self.write(cr, uid, write_ids, {'state': state}, context=context) #assign picking in batch for all confirmed move that share the same details for key, move_ids in to_assign.items(): self._picking_assign(cr, uid, move_ids, context=context) moves = self.browse(cr, uid, ids, context=context) self._push_apply(cr, uid, moves, context=context) return ids def force_assign(self, cr, uid, ids, context=None): """ Changes the state to assigned. @return: True """ res = self.write(cr, uid, ids, {'state': 'assigned'}, context=context) self.check_recompute_pack_op(cr, uid, ids, context=context) return res def check_tracking(self, cr, uid, move, ops, context=None): """ Checks if serial number is assigned to stock move or not and raise an error if it had to. """ if move.picking_id and (move.picking_id.picking_type_id.use_existing_lots or move.picking_id.picking_type_id.use_create_lots) and \ move.product_id.tracking != 'none': if not (move.restrict_lot_id or (ops and (ops.product_id and ops.pack_lot_ids)) or (ops and not ops.product_id)): raise UserError(_('You need to provide a Lot/Serial Number for product %s') % move.product_id.name) def check_recompute_pack_op(self, cr, uid, ids, context=None): pickings = list(set([x.picking_id for x in self.browse(cr, uid, ids, context=context) if x.picking_id])) pickings_partial = [] pickings_write = [] pick_obj = self.pool['stock.picking'] for pick in pickings: if pick.state in ('waiting', 'confirmed'): #In case of 'all at once' delivery method it should not prepare pack operations continue # Check if someone was treating the picking already if not any([x.qty_done > 0 for x in pick.pack_operation_ids]): pickings_partial.append(pick.id) else: pickings_write.append(pick.id) if pickings_partial: pick_obj.do_prepare_partial(cr, uid, pickings_partial, context=context) if pickings_write: pick_obj.write(cr, uid, pickings_write, {'recompute_pack_op': True}, context=context) def action_assign(self, cr, uid, ids, no_prepare=False, context=None): """ Checks the product type and accordingly writes the state. """ context = context or {} quant_obj = self.pool.get("stock.quant") uom_obj = self.pool['product.uom'] to_assign_moves = set() main_domain = {} todo_moves = [] operations = set() self.do_unreserve(cr, uid, [x.id for x in self.browse(cr, uid, ids, context=context) if x.reserved_quant_ids and x.state in ['confirmed', 'waiting', 'assigned']], context=context) for move in self.browse(cr, uid, ids, context=context): if move.state not in ('confirmed', 'waiting', 'assigned'): continue if move.location_id.usage in ('supplier', 'inventory', 'production'): to_assign_moves.add(move.id) #in case the move is returned, we want to try to find quants before forcing the assignment if not move.origin_returned_move_id: continue if move.product_id.type == 'consu': to_assign_moves.add(move.id) continue else: todo_moves.append(move) #we always search for yet unassigned quants main_domain[move.id] = [('reservation_id', '=', False), ('qty', '>', 0)] #if the move is preceeded, restrict the choice of quants in the ones moved previously in original move ancestors = self.find_move_ancestors(cr, uid, move, context=context) if move.state == 'waiting' and not ancestors: #if the waiting move hasn't yet any ancestor (PO/MO not confirmed yet), don't find any quant available in stock main_domain[move.id] += [('id', '=', False)] elif ancestors: main_domain[move.id] += [('history_ids', 'in', ancestors)] #if the move is returned from another, restrict the choice of quants to the ones that follow the returned move if move.origin_returned_move_id: main_domain[move.id] += [('history_ids', 'in', move.origin_returned_move_id.id)] for link in move.linked_move_operation_ids: operations.add(link.operation_id) # Check all ops and sort them: we want to process first the packages, then operations with lot then the rest operations = list(operations) operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.pack_lot_ids and -1 or 0)) for ops in operations: #first try to find quants based on specific domains given by linked operations for the case where we want to rereserve according to existing pack operations if not (ops.product_id and ops.pack_lot_ids): for record in ops.linked_move_operation_ids: move = record.move_id if move.id in main_domain: qty = record.qty domain = main_domain[move.id] if qty: quants = quant_obj.quants_get_preferred_domain(cr, uid, qty, move, ops=ops, domain=domain, preferred_domain_list=[], context=context) quant_obj.quants_reserve(cr, uid, quants, move, record, context=context) else: lot_qty = {} rounding = ops.product_id.uom_id.rounding for pack_lot in ops.pack_lot_ids: lot_qty[pack_lot.lot_id.id] = uom_obj._compute_qty(cr, uid, ops.product_uom_id.id, pack_lot.qty, ops.product_id.uom_id.id) for record in ops.linked_move_operation_ids: move_qty = record.qty move = record.move_id domain = main_domain[move.id] for lot in lot_qty: if float_compare(lot_qty[lot], 0, precision_rounding=rounding) > 0 and float_compare(move_qty, 0, precision_rounding=rounding) > 0: qty = min(lot_qty[lot], move_qty) quants = quant_obj.quants_get_preferred_domain(cr, uid, qty, move, ops=ops, lot_id=lot, domain=domain, preferred_domain_list=[], context=context) quant_obj.quants_reserve(cr, uid, quants, move, record, context=context) lot_qty[lot] -= qty move_qty -= qty for move in todo_moves: #then if the move isn't totally assigned, try to find quants without any specific domain if move.state != 'assigned': qty_already_assigned = move.reserved_availability qty = move.product_qty - qty_already_assigned quants = quant_obj.quants_get_preferred_domain(cr, uid, qty, move, domain=main_domain[move.id], preferred_domain_list=[], context=context) quant_obj.quants_reserve(cr, uid, quants, move, context=context) #force assignation of consumable products and incoming from supplier/inventory/production # Do not take force_assign as it would create pack operations if to_assign_moves: self.write(cr, uid, list(to_assign_moves), {'state': 'assigned'}, context=context) if not no_prepare: self.check_recompute_pack_op(cr, uid, ids, context=context) def action_cancel(self, cr, uid, ids, context=None): """ Cancels the moves and if all moves are cancelled it cancels the picking. @return: True """ procurement_obj = self.pool.get('procurement.order') context = context or {} procs_to_check = set() for move in self.browse(cr, uid, ids, context=context): if move.state == 'done': raise UserError(_('You cannot cancel a stock move that has been set to \'Done\'.')) if move.reserved_quant_ids: self.pool.get("stock.quant").quants_unreserve(cr, uid, move, context=context) if context.get('cancel_procurement'): if move.propagate: procurement_ids = procurement_obj.search(cr, uid, [('move_dest_id', '=', move.id)], context=context) procurement_obj.cancel(cr, uid, procurement_ids, context=context) else: if move.move_dest_id: if move.propagate: self.action_cancel(cr, uid, [move.move_dest_id.id], context=context) elif move.move_dest_id.state == 'waiting': #If waiting, the chain will be broken and we are not sure if we can still wait for it (=> could take from stock instead) self.write(cr, uid, [move.move_dest_id.id], {'state': 'confirmed'}, context=context) if move.procurement_id: # Does the same as procurement check, only eliminating a refresh procs_to_check.add(move.procurement_id.id) res = self.write(cr, uid, ids, {'state': 'cancel', 'move_dest_id': False}, context=context) if procs_to_check: procurement_obj.check(cr, uid, list(procs_to_check), context=context) return res def _check_package_from_moves(self, cr, uid, ids, context=None): pack_obj = self.pool.get("stock.quant.package") packs = set() for move in self.browse(cr, uid, ids, context=context): packs |= set([q.package_id for q in move.quant_ids if q.package_id and q.qty > 0]) return pack_obj._check_location_constraint(cr, uid, list(packs), context=context) def find_move_ancestors(self, cr, uid, move, context=None): '''Find the first level ancestors of given move ''' ancestors = [] move2 = move while move2: ancestors += [x.id for x in move2.move_orig_ids] #loop on the split_from to find the ancestor of split moves only if the move has not direct ancestor (priority goes to them) move2 = not move2.move_orig_ids and move2.split_from or False return ancestors @api.cr_uid_ids_context def recalculate_move_state(self, cr, uid, move_ids, context=None): '''Recompute the state of moves given because their reserved quants were used to fulfill another operation''' for move in self.browse(cr, uid, move_ids, context=context): vals = {} reserved_quant_ids = move.reserved_quant_ids if len(reserved_quant_ids) > 0 and not move.partially_available: vals['partially_available'] = True if len(reserved_quant_ids) == 0 and move.partially_available: vals['partially_available'] = False if move.state == 'assigned': if self.find_move_ancestors(cr, uid, move, context=context): vals['state'] = 'waiting' else: vals['state'] = 'confirmed' if vals: self.write(cr, uid, [move.id], vals, context=context) def _move_quants_by_lot(self, cr, uid, ops, lot_qty, quants_taken, false_quants, lot_move_qty, quant_dest_package_id, context=None): """ This function is used to process all the pack operation lots of a pack operation For every move: First, we check the quants with lot already reserved (and those are already subtracted from the lots to do) Then go through all the lots to process: Add reserved false lots lot by lot Check if there are not reserved quants or reserved elsewhere with that lot or without lot (with the traditional method) """ quant_obj = self.pool['stock.quant'] fallback_domain = [('reservation_id', '=', False)] fallback_domain2 = ['&', ('reservation_id', 'not in', [x for x in lot_move_qty.keys()]), ('reservation_id', '!=', False)] preferred_domain_list = [fallback_domain] + [fallback_domain2] rounding = ops.product_id.uom_id.rounding for move in lot_move_qty: move_quants_dict = {} move_rec = self.pool['stock.move'].browse(cr, uid, move, context=context) # Assign quants already reserved with lot to the correct for quant in quants_taken: move_quants_dict.setdefault(quant[0].lot_id.id, []) move_quants_dict[quant[0].lot_id.id] += [quant] false_quants_move = [x for x in false_quants if x[0].reservation_id.id == move] for lot in lot_qty: move_quants_dict.setdefault(lot, []) redo_false_quants = False # Take remaining reserved quants with no lot first # (This will be used mainly when incoming had no lot and you do outgoing with) while false_quants_move and float_compare(lot_qty[lot], 0, precision_rounding=rounding) > 0 and float_compare(lot_move_qty[move], 0, precision_rounding=rounding) > 0: qty_min = min(lot_qty[lot], lot_move_qty[move]) if false_quants_move[0].qty > qty_min: move_quants_dict[lot] += [(false_quants_move[0], qty_min)] qty = qty_min redo_false_quants = True else: qty = false_quants_move[0].qty move_quants_dict[lot] += [(false_quants_move[0], qty)] false_quants_move.pop(0) lot_qty[lot] -= qty lot_move_qty[move] -= qty # Search other with first matching lots and then without lots if float_compare(lot_move_qty[move], 0, precision_rounding=rounding) > 0 and float_compare(lot_qty[lot], 0, precision_rounding=rounding) > 0: # Search if we can find quants with that lot domain = [('qty', '>', 0)] qty = min(lot_qty[lot], lot_move_qty[move]) quants = quant_obj.quants_get_preferred_domain(cr, uid, qty, move_rec, ops=ops, lot_id=lot, domain=domain, preferred_domain_list=preferred_domain_list, context=context) move_quants_dict[lot] += quants lot_qty[lot] -= qty lot_move_qty[move] -= qty #Move all the quants related to that lot/move if move_quants_dict[lot]: quant_obj.quants_move(cr, uid, move_quants_dict[lot], move_rec, ops.location_dest_id, location_from=ops.location_id, lot_id=lot, owner_id=ops.owner_id.id, src_package_id=ops.package_id.id, dest_package_id=quant_dest_package_id, context=context) if redo_false_quants: move_rec = self.pool['stock.move'].browse(cr, uid, move, context=context) false_quants_move = [x for x in move_rec.reserved_quant_ids if (not x.lot_id) and (x.owner_id.id == ops.owner_id.id) \ and (x.location_id.id == ops.location_id.id) and (x.package_id.id != ops.package_id.id)] def action_done(self, cr, uid, ids, context=None): """ Process completely the moves given as ids and if all moves are done, it will finish the picking. """ context = context or {} picking_obj = self.pool.get("stock.picking") quant_obj = self.pool.get("stock.quant") uom_obj = self.pool.get("product.uom") todo = [move.id for move in self.browse(cr, uid, ids, context=context) if move.state == "draft"] if todo: ids = self.action_confirm(cr, uid, todo, context=context) pickings = set() procurement_ids = set() #Search operations that are linked to the moves operations = set() move_qty = {} for move in self.browse(cr, uid, ids, context=context): move_qty[move.id] = move.product_qty for link in move.linked_move_operation_ids: operations.add(link.operation_id) #Sort operations according to entire packages first, then package + lot, package only, lot only operations = list(operations) operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.pack_lot_ids and -1 or 0)) for ops in operations: if ops.picking_id: pickings.add(ops.picking_id.id) entire_pack=False if ops.product_id: #If a product is given, the result is always put immediately in the result package (if it is False, they are without package) quant_dest_package_id = ops.result_package_id.id else: # When a pack is moved entirely, the quants should not be written anything for the destination package quant_dest_package_id = False entire_pack=True lot_qty = {} tot_qty = 0.0 for pack_lot in ops.pack_lot_ids: qty = uom_obj._compute_qty(cr, uid, ops.product_uom_id.id, pack_lot.qty, ops.product_id.uom_id.id) lot_qty[pack_lot.lot_id.id] = qty tot_qty += pack_lot.qty if ops.pack_lot_ids and ops.product_id and float_compare(tot_qty, ops.product_qty, precision_rounding=ops.product_uom_id.rounding) != 0.0: raise UserError(_('You have a difference between the quantity on the operation and the quantities specified for the lots. ')) quants_taken = [] false_quants = [] lot_move_qty = {} #Group links by move first move_qty_ops = {} for record in ops.linked_move_operation_ids: move = record.move_id if not move_qty_ops.get(move): move_qty_ops[move] = record.qty else: move_qty_ops[move] += record.qty #Process every move only once for every pack operation for move in move_qty_ops: main_domain = [('qty', '>', 0)] self.check_tracking(cr, uid, move, ops, context=context) preferred_domain = [('reservation_id', '=', move.id)] fallback_domain = [('reservation_id', '=', False)] fallback_domain2 = ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)] if not ops.pack_lot_ids: preferred_domain_list = [preferred_domain] + [fallback_domain] + [fallback_domain2] quants = quant_obj.quants_get_preferred_domain(cr, uid, move_qty_ops[move], move, ops=ops, domain=main_domain, preferred_domain_list=preferred_domain_list, context=context) quant_obj.quants_move(cr, uid, quants, move, ops.location_dest_id, location_from=ops.location_id, lot_id=False, owner_id=ops.owner_id.id, src_package_id=ops.package_id.id, dest_package_id=quant_dest_package_id, entire_pack=entire_pack, context=context) else: # Check what you can do with reserved quants already qty_on_link = move_qty_ops[move] rounding = ops.product_id.uom_id.rounding for reserved_quant in move.reserved_quant_ids: if (reserved_quant.owner_id.id != ops.owner_id.id) or (reserved_quant.location_id.id != ops.location_id.id) or \ (reserved_quant.package_id.id != ops.package_id.id): continue if not reserved_quant.lot_id: false_quants += [reserved_quant] elif float_compare(lot_qty.get(reserved_quant.lot_id.id, 0), 0, precision_rounding=rounding) > 0: if float_compare(lot_qty[reserved_quant.lot_id.id], reserved_quant.qty, precision_rounding=rounding) >= 0: lot_qty[reserved_quant.lot_id.id] -= reserved_quant.qty quants_taken += [(reserved_quant, reserved_quant.qty)] qty_on_link -= reserved_quant.qty else: quants_taken += [(reserved_quant, lot_qty[reserved_quant.lot_id.id])] lot_qty[reserved_quant.lot_id.id] = 0 qty_on_link -= lot_qty[reserved_quant.lot_id.id] lot_move_qty[move.id] = qty_on_link if not move_qty.get(move.id): raise UserError(_("The roundings of your Unit of Measures %s on the move vs. %s on the product don't allow to do these operations or you are not transferring the picking at once. ") % (move.product_uom.name, move.product_id.uom_id.name)) move_qty[move.id] -= move_qty_ops[move] #Handle lots separately if ops.pack_lot_ids: self._move_quants_by_lot(cr, uid, ops, lot_qty, quants_taken, false_quants, lot_move_qty, quant_dest_package_id, context=context) # Handle pack in pack if not ops.product_id and ops.package_id and ops.result_package_id.id != ops.package_id.parent_id.id: self.pool.get('stock.quant.package').write(cr, SUPERUSER_ID, [ops.package_id.id], {'parent_id': ops.result_package_id.id}, context=context) #Check for remaining qtys and unreserve/check move_dest_id in move_dest_ids = set() for move in self.browse(cr, uid, ids, context=context): move_qty_cmp = float_compare(move_qty[move.id], 0, precision_rounding=move.product_id.uom_id.rounding) if move_qty_cmp > 0: # (=In case no pack operations in picking) main_domain = [('qty', '>', 0)] preferred_domain = [('reservation_id', '=', move.id)] fallback_domain = [('reservation_id', '=', False)] fallback_domain2 = ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)] preferred_domain_list = [preferred_domain] + [fallback_domain] + [fallback_domain2] self.check_tracking(cr, uid, move, False, context=context) qty = move_qty[move.id] quants = quant_obj.quants_get_preferred_domain(cr, uid, qty, move, domain=main_domain, preferred_domain_list=preferred_domain_list, context=context) quant_obj.quants_move(cr, uid, quants, move, move.location_dest_id, lot_id=move.restrict_lot_id.id, owner_id=move.restrict_partner_id.id, context=context) # If the move has a destination, add it to the list to reserve if move.move_dest_id and move.move_dest_id.state in ('waiting', 'confirmed'): move_dest_ids.add(move.move_dest_id.id) if move.procurement_id: procurement_ids.add(move.procurement_id.id) #unreserve the quants and make them available for other operations/moves quant_obj.quants_unreserve(cr, uid, move, context=context) # Check the packages have been placed in the correct locations self._check_package_from_moves(cr, uid, ids, context=context) #set the move as done self.write(cr, uid, ids, {'state': 'done', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context) self.pool.get('procurement.order').check(cr, uid, list(procurement_ids), context=context) #assign destination moves if move_dest_ids: self.action_assign(cr, uid, list(move_dest_ids), context=context) #check picking state to set the date_done is needed done_picking = [] for picking in picking_obj.browse(cr, uid, list(pickings), context=context): if picking.state == 'done' and not picking.date_done: done_picking.append(picking.id) if done_picking: picking_obj.write(cr, uid, done_picking, {'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context) return True def unlink(self, cr, uid, ids, context=None): context = context or {} for move in self.browse(cr, uid, ids, context=context): if move.state not in ('draft', 'cancel'): raise UserError(_('You can only delete draft moves.')) return super(stock_move, self).unlink(cr, uid, ids, context=context) def action_scrap(self, cr, uid, ids, quantity, location_id, restrict_lot_id=False, restrict_partner_id=False, context=None): """ Move the scrap/damaged product into scrap location @param cr: the database cursor @param uid: the user id @param ids: ids of stock move object to be scrapped @param quantity : specify scrap qty @param location_id : specify scrap location @param context: context arguments @return: Scraped lines """ quant_obj = self.pool.get("stock.quant") #quantity should be given in MOVE UOM if quantity <= 0: raise UserError(_('Please provide a positive quantity to scrap.')) res = [] for move in self.browse(cr, uid, ids, context=context): source_location = move.location_id if move.state == 'done': source_location = move.location_dest_id #Previously used to prevent scraping from virtual location but not necessary anymore #if source_location.usage != 'internal': #restrict to scrap from a virtual location because it's meaningless and it may introduce errors in stock ('creating' new products from nowhere) #raise UserError(_('Forbidden operation: it is not allowed to scrap products from a virtual location.')) move_qty = move.product_qty default_val = { 'location_id': source_location.id, 'product_uom_qty': quantity, 'state': move.state, 'scrapped': True, 'location_dest_id': location_id, 'restrict_lot_id': restrict_lot_id, 'restrict_partner_id': restrict_partner_id, } new_move = self.copy(cr, uid, move.id, default_val) res += [new_move] product_obj = self.pool.get('product.product') for product in product_obj.browse(cr, uid, [move.product_id.id], context=context): if move.picking_id: uom = product.uom_id.name if product.uom_id else '' message = _("%s %s %s has been <b>moved to</b> scrap.") % (quantity, uom, product.name) move.picking_id.message_post(body=message) # We "flag" the quant from which we want to scrap the products. To do so: # - we select the quants related to the move we scrap from # - we reserve the quants with the scrapped move # See self.action_done, et particularly how is defined the "preferred_domain" for clarification scrap_move = self.browse(cr, uid, new_move, context=context) if move.state == 'done' and scrap_move.location_id.usage not in ('supplier', 'inventory', 'production'): domain = [('qty', '>', 0), ('history_ids', 'in', [move.id])] # We use scrap_move data since a reservation makes sense for a move not already done quants = quant_obj.quants_get_preferred_domain(cr, uid, quantity, scrap_move, domain=domain, context=context) quant_obj.quants_reserve(cr, uid, quants, scrap_move, context=context) self.action_done(cr, uid, res, context=context) return res def split(self, cr, uid, move, qty, restrict_lot_id=False, restrict_partner_id=False, context=None): """ Splits qty from move move into a new move :param move: browse record :param qty: float. quantity to split (given in product UoM) :param restrict_lot_id: optional production lot that can be given in order to force the new move to restrict its choice of quants to this lot. :param restrict_partner_id: optional partner that can be given in order to force the new move to restrict its choice of quants to the ones belonging to this partner. :param context: dictionay. can contains the special key 'source_location_id' in order to force the source location when copying the move returns the ID of the backorder move created """ if move.state in ('done', 'cancel'): raise UserError(_('You cannot split a move done')) if move.state == 'draft': #we restrict the split of a draft move because if not confirmed yet, it may be replaced by several other moves in #case of phantom bom (with mrp module). And we don't want to deal with this complexity by copying the product that will explode. raise UserError(_('You cannot split a draft move. It needs to be confirmed first.')) if move.product_qty <= qty or qty == 0: return move.id uom_obj = self.pool.get('product.uom') context = context or {} #HALF-UP rounding as only rounding errors will be because of propagation of error from default UoM uom_qty = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, qty, move.product_uom, rounding_method='HALF-UP', context=context) defaults = { 'product_uom_qty': uom_qty, 'procure_method': 'make_to_stock', 'restrict_lot_id': restrict_lot_id, 'split_from': move.id, 'procurement_id': move.procurement_id.id, 'move_dest_id': move.move_dest_id.id, 'origin_returned_move_id': move.origin_returned_move_id.id, } if restrict_partner_id: defaults['restrict_partner_id'] = restrict_partner_id if context.get('source_location_id'): defaults['location_id'] = context['source_location_id'] new_move = self.copy(cr, uid, move.id, defaults, context=context) ctx = context.copy() ctx['do_not_propagate'] = True self.write(cr, uid, [move.id], { 'product_uom_qty': move.product_uom_qty - uom_qty, }, context=ctx) if move.move_dest_id and move.propagate and move.move_dest_id.state not in ('done', 'cancel'): new_move_prop = self.split(cr, uid, move.move_dest_id, qty, context=context) self.write(cr, uid, [new_move], {'move_dest_id': new_move_prop}, context=context) #returning the first element of list returned by action_confirm is ok because we checked it wouldn't be exploded (and #thus the result of action_confirm should always be a list of 1 element length) return self.action_confirm(cr, uid, [new_move], context=context)[0] def get_code_from_locs(self, cr, uid, move, location_id=False, location_dest_id=False, context=None): """ Returns the code the picking type should have. This can easily be used to check if a move is internal or not move, location_id and location_dest_id are browse records """ code = 'internal' src_loc = location_id or move.location_id dest_loc = location_dest_id or move.location_dest_id if src_loc.usage == 'internal' and dest_loc.usage != 'internal': code = 'outgoing' if src_loc.usage != 'internal' and dest_loc.usage == 'internal': code = 'incoming' return code def show_picking(self, cr, uid, ids, context=None): assert len(ids) > 0 picking_id = self.browse(cr, uid, ids[0], context=context).picking_id.id if picking_id: data_obj = self.pool['ir.model.data'] view = data_obj.xmlid_to_res_id(cr, uid, 'stock.view_picking_form') return { 'name': _('Transfer'), 'type': 'ir.actions.act_window', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'stock.picking', 'views': [(view, 'form')], 'view_id': view, 'target': 'new', 'res_id': picking_id, } class stock_inventory(osv.osv): _name = "stock.inventory" _description = "Inventory" def _get_move_ids_exist(self, cr, uid, ids, field_name, arg, context=None): res = {} for inv in self.browse(cr, uid, ids, context=context): res[inv.id] = False if inv.move_ids: res[inv.id] = True return res def _get_available_filters(self, cr, uid, context=None): """ This function will return the list of filter allowed according to the options checked in 'Settings\Warehouse'. :rtype: list of tuple """ #default available choices res_filter = [('none', _('All products')), ('partial', _('Select products manually')), ('product', _('One product only'))] settings_obj = self.pool.get('stock.config.settings') config_ids = settings_obj.search(cr, uid, [], limit=1, order='id DESC', context=context) #If we don't have updated config until now, all fields are by default false and so should be not dipslayed if not config_ids: return res_filter stock_settings = settings_obj.browse(cr, uid, config_ids[0], context=context) if stock_settings.group_stock_tracking_owner: res_filter.append(('owner', _('One owner only'))) res_filter.append(('product_owner', _('One product for a specific owner'))) if stock_settings.group_stock_production_lot: res_filter.append(('lot', _('One Lot/Serial Number'))) if stock_settings.group_stock_tracking_lot: res_filter.append(('pack', _('A Pack'))) return res_filter def _get_total_qty(self, cr, uid, ids, field_name, args, context=None): res = {} for inv in self.browse(cr, uid, ids, context=context): res[inv.id] = sum([x.product_qty for x in inv.line_ids]) return res INVENTORY_STATE_SELECTION = [ ('draft', 'Draft'), ('cancel', 'Cancelled'), ('confirm', 'In Progress'), ('done', 'Validated'), ] _columns = { 'name': fields.char('Inventory Reference', required=True, readonly=True, states={'draft': [('readonly', False)]}, help="Inventory Name."), 'date': fields.datetime('Inventory Date', required=True, readonly=True, help="The date that will be used for the stock level check of the products and the validation of the stock move related to this inventory."), 'line_ids': fields.one2many('stock.inventory.line', 'inventory_id', 'Inventories', readonly=False, states={'done': [('readonly', True)]}, help="Inventory Lines.", copy=True), 'move_ids': fields.one2many('stock.move', 'inventory_id', 'Created Moves', help="Inventory Moves.", states={'done': [('readonly', True)]}), 'state': fields.selection(INVENTORY_STATE_SELECTION, 'Status', readonly=True, select=True, copy=False), 'company_id': fields.many2one('res.company', 'Company', required=True, select=True, readonly=True, states={'draft': [('readonly', False)]}), 'location_id': fields.many2one('stock.location', 'Inventoried Location', required=True, readonly=True, states={'draft': [('readonly', False)]}), 'product_id': fields.many2one('product.product', 'Inventoried Product', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Product to focus your inventory on a particular Product."), 'package_id': fields.many2one('stock.quant.package', 'Inventoried Pack', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Pack to focus your inventory on a particular Pack."), 'partner_id': fields.many2one('res.partner', 'Inventoried Owner', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Owner to focus your inventory on a particular Owner."), 'lot_id': fields.many2one('stock.production.lot', 'Inventoried Lot/Serial Number', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Lot/Serial Number to focus your inventory on a particular Lot/Serial Number.", copy=False), # technical field for attrs in view 'move_ids_exist': fields.function(_get_move_ids_exist, type='boolean', string='Has Stock Moves', help='Check the existance of stock moves linked to this inventory'), 'filter': fields.selection(_get_available_filters, 'Inventory of', required=True, help="If you do an entire inventory, you can choose 'All Products' and it will prefill the inventory with the current stock. If you only do some products "\ "(e.g. Cycle Counting) you can choose 'Manual Selection of Products' and the system won't propose anything. You can also let the "\ "system propose for a single product / lot /... "), 'total_qty': fields.function(_get_total_qty, type="float"), } def _default_stock_location(self, cr, uid, context=None): try: warehouse = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'warehouse0') return warehouse.lot_stock_id.id except: return False _defaults = { 'date': fields.datetime.now, 'state': 'draft', 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c), 'location_id': _default_stock_location, 'filter': 'none', } def reset_real_qty(self, cr, uid, ids, context=None): inventory = self.browse(cr, uid, ids[0], context=context) line_ids = [line.id for line in inventory.line_ids] self.pool.get('stock.inventory.line').write(cr, uid, line_ids, {'product_qty': 0}) return True def action_done(self, cr, uid, ids, context=None): """ Finish the inventory @return: True """ for inv in self.browse(cr, uid, ids, context=context): for inventory_line in inv.line_ids: if inventory_line.product_qty < 0 and inventory_line.product_qty != inventory_line.theoretical_qty: raise UserError(_('You cannot set a negative product quantity in an inventory line:\n\t%s - qty: %s') % (inventory_line.product_id.name, inventory_line.product_qty)) self.action_check(cr, uid, [inv.id], context=context) self.write(cr, uid, [inv.id], {'state': 'done'}, context=context) self.post_inventory(cr, uid, inv, context=context) return True def post_inventory(self, cr, uid, inv, context=None): #The inventory is posted as a single step which means quants cannot be moved from an internal location to another using an inventory #as they will be moved to inventory loss, and other quants will be created to the encoded quant location. This is a normal behavior #as quants cannot be reuse from inventory location (users can still manually move the products before/after the inventory if they want). move_obj = self.pool.get('stock.move') move_obj.action_done(cr, uid, [x.id for x in inv.move_ids if x.state != 'done'], context=context) def action_check(self, cr, uid, ids, context=None): """ Checks the inventory and computes the stock move to do @return: True """ inventory_line_obj = self.pool.get('stock.inventory.line') stock_move_obj = self.pool.get('stock.move') for inventory in self.browse(cr, uid, ids, context=context): #first remove the existing stock moves linked to this inventory move_ids = [move.id for move in inventory.move_ids] stock_move_obj.unlink(cr, uid, move_ids, context=context) for line in inventory.line_ids: #compare the checked quantities on inventory lines to the theorical one stock_move = inventory_line_obj._resolve_inventory_line(cr, uid, line, context=context) def action_cancel_draft(self, cr, uid, ids, context=None): """ Cancels the stock move and change inventory state to draft. @return: True """ for inv in self.browse(cr, uid, ids, context=context): self.write(cr, uid, [inv.id], {'line_ids': [(5,)]}, context=context) self.pool.get('stock.move').action_cancel(cr, uid, [x.id for x in inv.move_ids], context=context) self.write(cr, uid, [inv.id], {'state': 'draft'}, context=context) return True def action_cancel_inventory(self, cr, uid, ids, context=None): self.action_cancel_draft(cr, uid, ids, context=context) def prepare_inventory(self, cr, uid, ids, context=None): inventory_line_obj = self.pool.get('stock.inventory.line') for inventory in self.browse(cr, uid, ids, context=context): # If there are inventory lines already (e.g. from import), respect those and set their theoretical qty line_ids = [line.id for line in inventory.line_ids] if not line_ids and inventory.filter != 'partial': #compute the inventory lines and create them vals = self._get_inventory_lines(cr, uid, inventory, context=context) for product_line in vals: inventory_line_obj.create(cr, uid, product_line, context=context) return self.write(cr, uid, ids, {'state': 'confirm', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}) def _get_inventory_lines(self, cr, uid, inventory, context=None): location_obj = self.pool.get('stock.location') product_obj = self.pool.get('product.product') location_ids = location_obj.search(cr, uid, [('id', 'child_of', [inventory.location_id.id])], context=context) domain = ' location_id in %s' args = (tuple(location_ids),) if inventory.partner_id: domain += ' and owner_id = %s' args += (inventory.partner_id.id,) if inventory.lot_id: domain += ' and lot_id = %s' args += (inventory.lot_id.id,) if inventory.product_id: domain += ' and product_id = %s' args += (inventory.product_id.id,) if inventory.package_id: domain += ' and package_id = %s' args += (inventory.package_id.id,) cr.execute(''' SELECT product_id, sum(qty) as product_qty, location_id, lot_id as prod_lot_id, package_id, owner_id as partner_id FROM stock_quant WHERE''' + domain + ''' GROUP BY product_id, location_id, lot_id, package_id, partner_id ''', args) vals = [] for product_line in cr.dictfetchall(): #replace the None the dictionary by False, because falsy values are tested later on for key, value in product_line.items(): if not value: product_line[key] = False product_line['inventory_id'] = inventory.id product_line['theoretical_qty'] = product_line['product_qty'] if product_line['product_id']: product = product_obj.browse(cr, uid, product_line['product_id'], context=context) product_line['product_uom_id'] = product.uom_id.id vals.append(product_line) return vals def _check_filter_product(self, cr, uid, ids, context=None): for inventory in self.browse(cr, uid, ids, context=context): if inventory.filter == 'none' and inventory.product_id and inventory.location_id and inventory.lot_id: return True if inventory.filter not in ('product', 'product_owner') and inventory.product_id: return False if inventory.filter != 'lot' and inventory.lot_id: return False if inventory.filter not in ('owner', 'product_owner') and inventory.partner_id: return False if inventory.filter != 'pack' and inventory.package_id: return False return True def onchange_filter(self, cr, uid, ids, filter, context=None): to_clean = { 'value': {} } if filter not in ('product', 'product_owner'): to_clean['value']['product_id'] = False if filter != 'lot': to_clean['value']['lot_id'] = False if filter not in ('owner', 'product_owner'): to_clean['value']['partner_id'] = False if filter != 'pack': to_clean['value']['package_id'] = False return to_clean _constraints = [ (_check_filter_product, 'The selected inventory options are not coherent.', ['filter', 'product_id', 'lot_id', 'partner_id', 'package_id']), ] class stock_inventory_line(osv.osv): _name = "stock.inventory.line" _description = "Inventory Line" _order = "inventory_id, location_name, product_code, product_name, prodlot_name" def _get_product_name_change(self, cr, uid, ids, context=None): return self.pool.get('stock.inventory.line').search(cr, uid, [('product_id', 'in', ids)], context=context) def _get_location_change(self, cr, uid, ids, context=None): return self.pool.get('stock.inventory.line').search(cr, uid, [('location_id', 'in', ids)], context=context) def _get_prodlot_change(self, cr, uid, ids, context=None): return self.pool.get('stock.inventory.line').search(cr, uid, [('prod_lot_id', 'in', ids)], context=context) def _get_theoretical_qty(self, cr, uid, ids, name, args, context=None): res = {} quant_obj = self.pool["stock.quant"] uom_obj = self.pool["product.uom"] for line in self.browse(cr, uid, ids, context=context): quant_ids = self._get_quants(cr, uid, line, context=context) quants = quant_obj.browse(cr, uid, quant_ids, context=context) tot_qty = sum([x.qty for x in quants]) if line.product_uom_id and line.product_id.uom_id.id != line.product_uom_id.id: tot_qty = uom_obj._compute_qty_obj(cr, uid, line.product_id.uom_id, tot_qty, line.product_uom_id, context=context) res[line.id] = tot_qty return res _columns = { 'inventory_id': fields.many2one('stock.inventory', 'Inventory', ondelete='cascade', select=True), 'location_id': fields.many2one('stock.location', 'Location', required=True, select=True), 'product_id': fields.many2one('product.product', 'Product', required=True, select=True), 'package_id': fields.many2one('stock.quant.package', 'Pack', select=True), 'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure', required=True), 'product_qty': fields.float('Checked Quantity', digits_compute=dp.get_precision('Product Unit of Measure')), 'company_id': fields.related('inventory_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, select=True, readonly=True), 'prod_lot_id': fields.many2one('stock.production.lot', 'Serial Number', domain="[('product_id','=',product_id)]"), 'state': fields.related('inventory_id', 'state', type='char', string='Status', readonly=True), 'theoretical_qty': fields.function(_get_theoretical_qty, type='float', digits_compute=dp.get_precision('Product Unit of Measure'), store={'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['location_id', 'product_id', 'package_id', 'product_uom_id', 'company_id', 'prod_lot_id', 'partner_id'], 20),}, readonly=True, string="Theoretical Quantity"), 'partner_id': fields.many2one('res.partner', 'Owner'), 'product_name': fields.related('product_id', 'name', type='char', string='Product Name', store={ 'product.product': (_get_product_name_change, ['name', 'default_code'], 20), 'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['product_id'], 20),}), 'product_code': fields.related('product_id', 'default_code', type='char', string='Product Code', store={ 'product.product': (_get_product_name_change, ['name', 'default_code'], 20), 'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['product_id'], 20),}), 'location_name': fields.related('location_id', 'complete_name', type='char', string='Location Name', store={ 'stock.location': (_get_location_change, ['name', 'location_id', 'active'], 20), 'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['location_id'], 20),}), 'prodlot_name': fields.related('prod_lot_id', 'name', type='char', string='Serial Number Name', store={ 'stock.production.lot': (_get_prodlot_change, ['name'], 20), 'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['prod_lot_id'], 20),}), } _defaults = { 'product_qty': 0, 'product_uom_id': lambda self, cr, uid, ctx=None: self.pool['ir.model.data'].get_object_reference(cr, uid, 'product', 'product_uom_unit')[1] } def create(self, cr, uid, values, context=None): product_obj = self.pool.get('product.product') dom = [('product_id', '=', values.get('product_id')), ('inventory_id.state', '=', 'confirm'), ('location_id', '=', values.get('location_id')), ('partner_id', '=', values.get('partner_id')), ('package_id', '=', values.get('package_id')), ('prod_lot_id', '=', values.get('prod_lot_id'))] res = self.search(cr, uid, dom, context=context) if res: location = self.pool['stock.location'].browse(cr, uid, values.get('location_id'), context=context) product = product_obj.browse(cr, uid, values.get('product_id'), context=context) raise UserError(_("You cannot have two inventory adjustements in state 'in Progess' with the same product(%s), same location(%s), same package, same owner and same lot. Please first validate the first inventory adjustement with this product before creating another one.") % (product.name, location.name)) if 'product_id' in values and not 'product_uom_id' in values: values['product_uom_id'] = product_obj.browse(cr, uid, values.get('product_id'), context=context).uom_id.id return super(stock_inventory_line, self).create(cr, uid, values, context=context) def _get_quants(self, cr, uid, line, context=None): quant_obj = self.pool["stock.quant"] dom = [('company_id', '=', line.company_id.id), ('location_id', '=', line.location_id.id), ('lot_id', '=', line.prod_lot_id.id), ('product_id','=', line.product_id.id), ('owner_id', '=', line.partner_id.id), ('package_id', '=', line.package_id.id)] quants = quant_obj.search(cr, uid, dom, context=context) return quants def onchange_createline(self, cr, uid, ids, location_id=False, product_id=False, uom_id=False, package_id=False, prod_lot_id=False, partner_id=False, company_id=False, context=None): quant_obj = self.pool["stock.quant"] uom_obj = self.pool["product.uom"] res = {'value': {}} # If no UoM already put the default UoM of the product if product_id: product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) uom = self.pool['product.uom'].browse(cr, uid, uom_id, context=context) if product.uom_id.category_id.id != uom.category_id.id: res['value']['product_uom_id'] = product.uom_id.id res['domain'] = {'product_uom_id': [('category_id','=',product.uom_id.category_id.id)]} uom_id = product.uom_id.id # Calculate theoretical quantity by searching the quants as in quants_get if product_id and location_id: product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) if not company_id: company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id dom = [('company_id', '=', company_id), ('location_id', '=', location_id), ('lot_id', '=', prod_lot_id), ('product_id','=', product_id), ('owner_id', '=', partner_id), ('package_id', '=', package_id)] quants = quant_obj.search(cr, uid, dom, context=context) th_qty = sum([x.qty for x in quant_obj.browse(cr, uid, quants, context=context)]) if product_id and uom_id and product.uom_id.id != uom_id: th_qty = uom_obj._compute_qty(cr, uid, product.uom_id.id, th_qty, uom_id) res['value']['theoretical_qty'] = th_qty res['value']['product_qty'] = th_qty return res def _resolve_inventory_line(self, cr, uid, inventory_line, context=None): stock_move_obj = self.pool.get('stock.move') quant_obj = self.pool.get('stock.quant') diff = inventory_line.theoretical_qty - inventory_line.product_qty if not diff: return #each theorical_lines where difference between theoretical and checked quantities is not 0 is a line for which we need to create a stock move vals = { 'name': _('INV:') + (inventory_line.inventory_id.name or ''), 'product_id': inventory_line.product_id.id, 'product_uom': inventory_line.product_uom_id.id, 'date': inventory_line.inventory_id.date, 'company_id': inventory_line.inventory_id.company_id.id, 'inventory_id': inventory_line.inventory_id.id, 'state': 'confirmed', 'restrict_lot_id': inventory_line.prod_lot_id.id, 'restrict_partner_id': inventory_line.partner_id.id, } inventory_location_id = inventory_line.product_id.property_stock_inventory.id if diff < 0: #found more than expected vals['location_id'] = inventory_location_id vals['location_dest_id'] = inventory_line.location_id.id vals['product_uom_qty'] = -diff else: #found less than expected vals['location_id'] = inventory_line.location_id.id vals['location_dest_id'] = inventory_location_id vals['product_uom_qty'] = diff move_id = stock_move_obj.create(cr, uid, vals, context=context) move = stock_move_obj.browse(cr, uid, move_id, context=context) if diff > 0: domain = [('qty', '>', 0.0), ('package_id', '=', inventory_line.package_id.id), ('lot_id', '=', inventory_line.prod_lot_id.id), ('location_id', '=', inventory_line.location_id.id)] preferred_domain_list = [[('reservation_id', '=', False)], [('reservation_id.inventory_id', '!=', inventory_line.inventory_id.id)]] quants = quant_obj.quants_get_preferred_domain(cr, uid, move.product_qty, move, domain=domain, preferred_domain_list=preferred_domain_list) quant_obj.quants_reserve(cr, uid, quants, move, context=context) elif inventory_line.package_id: stock_move_obj.action_done(cr, uid, move_id, context=context) quants = [x.id for x in move.quant_ids] quant_obj.write(cr, uid, quants, {'package_id': inventory_line.package_id.id}, context=context) res = quant_obj.search(cr, uid, [('qty', '<', 0.0), ('product_id', '=', move.product_id.id), ('location_id', '=', move.location_dest_id.id), ('package_id', '!=', False)], limit=1, context=context) if res: for quant in move.quant_ids: if quant.location_id.id == move.location_dest_id.id: #To avoid we take a quant that was reconcile already quant_obj._quant_reconcile_negative(cr, uid, quant, move, context=context) return move_id # Should be left out in next version def restrict_change(self, cr, uid, ids, theoretical_qty, context=None): return {} # Should be left out in next version def on_change_product_id(self, cr, uid, ids, product, uom, theoretical_qty, context=None): """ Changes UoM @param location_id: Location id @param product: Changed product_id @param uom: UoM product @return: Dictionary of changed values """ if not product: return {'value': {'product_uom_id': False}} obj_product = self.pool.get('product.product').browse(cr, uid, product, context=context) return {'value': {'product_uom_id': uom or obj_product.uom_id.id}} #---------------------------------------------------------- # Stock Warehouse #---------------------------------------------------------- class stock_warehouse(osv.osv): _name = "stock.warehouse" _description = "Warehouse" _columns = { 'name': fields.char('Warehouse Name', required=True, select=True), 'company_id': fields.many2one('res.company', 'Company', required=True, readonly=True, select=True), 'partner_id': fields.many2one('res.partner', 'Address'), 'view_location_id': fields.many2one('stock.location', 'View Location', required=True, domain=[('usage', '=', 'view')]), 'lot_stock_id': fields.many2one('stock.location', 'Location Stock', domain=[('usage', '=', 'internal')], required=True), 'code': fields.char('Short Name', size=5, required=True, help="Short name used to identify your warehouse"), 'route_ids': fields.many2many('stock.location.route', 'stock_route_warehouse', 'warehouse_id', 'route_id', 'Routes', domain="[('warehouse_selectable', '=', True)]", help='Defaults routes through the warehouse'), 'reception_steps': fields.selection([ ('one_step', 'Receive goods directly in stock (1 step)'), ('two_steps', 'Unload in input location then go to stock (2 steps)'), ('three_steps', 'Unload in input location, go through a quality control before being admitted in stock (3 steps)')], 'Incoming Shipments', help="Default incoming route to follow", required=True), 'delivery_steps': fields.selection([ ('ship_only', 'Ship directly from stock (Ship only)'), ('pick_ship', 'Bring goods to output location before shipping (Pick + Ship)'), ('pick_pack_ship', 'Make packages into a dedicated location, then bring them to the output location for shipping (Pick + Pack + Ship)')], 'Outgoing Shippings', help="Default outgoing route to follow", required=True), 'wh_input_stock_loc_id': fields.many2one('stock.location', 'Input Location'), 'wh_qc_stock_loc_id': fields.many2one('stock.location', 'Quality Control Location'), 'wh_output_stock_loc_id': fields.many2one('stock.location', 'Output Location'), 'wh_pack_stock_loc_id': fields.many2one('stock.location', 'Packing Location'), 'mto_pull_id': fields.many2one('procurement.rule', 'MTO rule'), 'pick_type_id': fields.many2one('stock.picking.type', 'Pick Type'), 'pack_type_id': fields.many2one('stock.picking.type', 'Pack Type'), 'out_type_id': fields.many2one('stock.picking.type', 'Out Type'), 'in_type_id': fields.many2one('stock.picking.type', 'In Type'), 'int_type_id': fields.many2one('stock.picking.type', 'Internal Type'), 'crossdock_route_id': fields.many2one('stock.location.route', 'Crossdock Route'), 'reception_route_id': fields.many2one('stock.location.route', 'Receipt Route'), 'delivery_route_id': fields.many2one('stock.location.route', 'Delivery Route'), 'resupply_from_wh': fields.boolean('Resupply From Other Warehouses', help='Unused field'), 'resupply_wh_ids': fields.many2many('stock.warehouse', 'stock_wh_resupply_table', 'supplied_wh_id', 'supplier_wh_id', 'Resupply Warehouses'), 'resupply_route_ids': fields.one2many('stock.location.route', 'supplied_wh_id', 'Resupply Routes', help="Routes will be created for these resupply warehouses and you can select them on products and product categories"), 'default_resupply_wh_id': fields.many2one('stock.warehouse', 'Default Resupply Warehouse', help="Goods will always be resupplied from this warehouse"), } def onchange_filter_default_resupply_wh_id(self, cr, uid, ids, default_resupply_wh_id, resupply_wh_ids, context=None): resupply_wh_ids = set([x['id'] for x in (self.resolve_2many_commands(cr, uid, 'resupply_wh_ids', resupply_wh_ids, ['id']))]) if default_resupply_wh_id: #If we are removing the default resupply, we don't have default_resupply_wh_id resupply_wh_ids.add(default_resupply_wh_id) resupply_wh_ids = list(resupply_wh_ids) return {'value': {'resupply_wh_ids': resupply_wh_ids}} def _get_external_transit_location(self, cr, uid, warehouse, context=None): ''' returns browse record of inter company transit location, if found''' data_obj = self.pool.get('ir.model.data') location_obj = self.pool.get('stock.location') try: inter_wh_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_inter_wh')[1] except: return False return location_obj.browse(cr, uid, inter_wh_loc, context=context) def _get_inter_wh_route(self, cr, uid, warehouse, wh, context=None): return { 'name': _('%s: Supply Product from %s') % (warehouse.name, wh.name), 'warehouse_selectable': False, 'product_selectable': True, 'product_categ_selectable': True, 'supplied_wh_id': warehouse.id, 'supplier_wh_id': wh.id, } def _create_resupply_routes(self, cr, uid, warehouse, supplier_warehouses, default_resupply_wh, context=None): route_obj = self.pool.get('stock.location.route') pull_obj = self.pool.get('procurement.rule') #create route selectable on the product to resupply the warehouse from another one external_transit_location = self._get_external_transit_location(cr, uid, warehouse, context=context) internal_transit_location = warehouse.company_id.internal_transit_location_id input_loc = warehouse.wh_input_stock_loc_id if warehouse.reception_steps == 'one_step': input_loc = warehouse.lot_stock_id for wh in supplier_warehouses: transit_location = wh.company_id.id == warehouse.company_id.id and internal_transit_location or external_transit_location if transit_location: output_loc = wh.wh_output_stock_loc_id if wh.delivery_steps == 'ship_only': output_loc = wh.lot_stock_id # Create extra MTO rule (only for 'ship only' because in the other cases MTO rules already exists) mto_pull_vals = self._get_mto_pull_rule(cr, uid, wh, [(output_loc, transit_location, wh.out_type_id.id)], context=context)[0] pull_obj.create(cr, uid, mto_pull_vals, context=context) inter_wh_route_vals = self._get_inter_wh_route(cr, uid, warehouse, wh, context=context) inter_wh_route_id = route_obj.create(cr, uid, vals=inter_wh_route_vals, context=context) values = [(output_loc, transit_location, wh.out_type_id.id, wh), (transit_location, input_loc, warehouse.in_type_id.id, warehouse)] pull_rules_list = self._get_supply_pull_rules(cr, uid, wh.id, values, inter_wh_route_id, context=context) for pull_rule in pull_rules_list: pull_obj.create(cr, uid, vals=pull_rule, context=context) #if the warehouse is also set as default resupply method, assign this route automatically to the warehouse if default_resupply_wh and default_resupply_wh.id == wh.id: self.write(cr, uid, [warehouse.id, wh.id], {'route_ids': [(4, inter_wh_route_id)]}, context=context) _defaults = { 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c), 'reception_steps': 'one_step', 'delivery_steps': 'ship_only', } _sql_constraints = [ ('warehouse_name_uniq', 'unique(name, company_id)', 'The name of the warehouse must be unique per company!'), ('warehouse_code_uniq', 'unique(code, company_id)', 'The code of the warehouse must be unique per company!'), ] def _get_partner_locations(self, cr, uid, ids, context=None): ''' returns a tuple made of the browse record of customer location and the browse record of supplier location''' data_obj = self.pool.get('ir.model.data') location_obj = self.pool.get('stock.location') try: customer_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_customers')[1] supplier_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_suppliers')[1] except: customer_loc = location_obj.search(cr, uid, [('usage', '=', 'customer')], context=context) customer_loc = customer_loc and customer_loc[0] or False supplier_loc = location_obj.search(cr, uid, [('usage', '=', 'supplier')], context=context) supplier_loc = supplier_loc and supplier_loc[0] or False if not (customer_loc and supplier_loc): raise UserError(_('Can\'t find any customer or supplier location.')) return location_obj.browse(cr, uid, [customer_loc, supplier_loc], context=context) def _location_used(self, cr, uid, location_id, warehouse, context=None): pull_obj = self.pool['procurement.rule'] push_obj = self.pool['stock.location.path'] domain = ['&', ('route_id', 'not in', [x.id for x in warehouse.route_ids]), '|', ('location_src_id', '=', location_id), # noqa ('location_id', '=', location_id) ] pulls = pull_obj.search_count(cr, uid, domain, context=context) domain = ['&', ('route_id', 'not in', [x.id for x in warehouse.route_ids]), '|', ('location_from_id', '=', location_id), # noqa ('location_dest_id', '=', location_id) ] pushs = push_obj.search_count(cr, uid, domain, context=context) if pulls or pushs: return True return False def switch_location(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None): location_obj = self.pool.get('stock.location') new_reception_step = new_reception_step or warehouse.reception_steps new_delivery_step = new_delivery_step or warehouse.delivery_steps if warehouse.reception_steps != new_reception_step: if not self._location_used(cr, uid, warehouse.wh_input_stock_loc_id.id, warehouse, context=context): location_obj.write(cr, uid, [warehouse.wh_input_stock_loc_id.id, warehouse.wh_qc_stock_loc_id.id], {'active': False}, context=context) if new_reception_step != 'one_step': location_obj.write(cr, uid, warehouse.wh_input_stock_loc_id.id, {'active': True}, context=context) if new_reception_step == 'three_steps': location_obj.write(cr, uid, warehouse.wh_qc_stock_loc_id.id, {'active': True}, context=context) if warehouse.delivery_steps != new_delivery_step: if not self._location_used(cr, uid, warehouse.wh_output_stock_loc_id.id, warehouse, context=context): location_obj.write(cr, uid, [warehouse.wh_output_stock_loc_id.id], {'active': False}, context=context) if not self._location_used(cr, uid, warehouse.wh_pack_stock_loc_id.id, warehouse, context=context): location_obj.write(cr, uid, [warehouse.wh_pack_stock_loc_id.id], {'active': False}, context=context) if new_delivery_step != 'ship_only': location_obj.write(cr, uid, warehouse.wh_output_stock_loc_id.id, {'active': True}, context=context) if new_delivery_step == 'pick_pack_ship': location_obj.write(cr, uid, warehouse.wh_pack_stock_loc_id.id, {'active': True}, context=context) return True def _get_reception_delivery_route(self, cr, uid, warehouse, route_name, context=None): return { 'name': self._format_routename(cr, uid, warehouse, route_name, context=context), 'product_categ_selectable': True, 'product_selectable': False, 'sequence': 10, } def _get_supply_pull_rules(self, cr, uid, supply_warehouse, values, new_route_id, context=None): pull_rules_list = [] for from_loc, dest_loc, pick_type_id, warehouse in values: pull_rules_list.append({ 'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context), 'location_src_id': from_loc.id, 'location_id': dest_loc.id, 'route_id': new_route_id, 'action': 'move', 'picking_type_id': pick_type_id, 'procure_method': warehouse.lot_stock_id.id != from_loc.id and 'make_to_order' or 'make_to_stock', # first part of the resuply route is MTS 'warehouse_id': warehouse.id, 'propagate_warehouse_id': supply_warehouse, }) return pull_rules_list def _get_push_pull_rules(self, cr, uid, warehouse, active, values, new_route_id, context=None): first_rule = True push_rules_list = [] pull_rules_list = [] for from_loc, dest_loc, pick_type_id in values: push_rules_list.append({ 'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context), 'location_from_id': from_loc.id, 'location_dest_id': dest_loc.id, 'route_id': new_route_id, 'auto': 'manual', 'picking_type_id': pick_type_id, 'active': active, 'warehouse_id': warehouse.id, }) pull_rules_list.append({ 'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context), 'location_src_id': from_loc.id, 'location_id': dest_loc.id, 'route_id': new_route_id, 'action': 'move', 'picking_type_id': pick_type_id, 'procure_method': first_rule is True and 'make_to_stock' or 'make_to_order', 'active': active, 'warehouse_id': warehouse.id, }) first_rule = False return push_rules_list, pull_rules_list def _get_mto_route(self, cr, uid, context=None): route_obj = self.pool.get('stock.location.route') data_obj = self.pool.get('ir.model.data') try: mto_route_id = data_obj.get_object_reference(cr, uid, 'stock', 'route_warehouse0_mto')[1] except: mto_route_id = route_obj.search(cr, uid, [('name', 'like', _('Make To Order'))], context=context) mto_route_id = mto_route_id and mto_route_id[0] or False if not mto_route_id: raise UserError(_('Can\'t find any generic Make To Order route.')) return mto_route_id def _check_remove_mto_resupply_rules(self, cr, uid, warehouse, context=None): """ Checks that the moves from the different """ pull_obj = self.pool.get('procurement.rule') mto_route_id = self._get_mto_route(cr, uid, context=context) rules = pull_obj.search(cr, uid, ['&', ('location_src_id', '=', warehouse.lot_stock_id.id), ('location_id.usage', '=', 'transit')], context=context) pull_obj.unlink(cr, uid, rules, context=context) def _get_mto_pull_rule(self, cr, uid, warehouse, values, context=None): mto_route_id = self._get_mto_route(cr, uid, context=context) res = [] for value in values: from_loc, dest_loc, pick_type_id = value res += [{ 'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context) + _(' MTO'), 'location_src_id': from_loc.id, 'location_id': dest_loc.id, 'route_id': mto_route_id, 'action': 'move', 'picking_type_id': pick_type_id, 'procure_method': 'make_to_order', 'active': True, 'warehouse_id': warehouse.id, }] return res def _get_crossdock_route(self, cr, uid, warehouse, route_name, context=None): return { 'name': self._format_routename(cr, uid, warehouse, route_name, context=context), 'warehouse_selectable': False, 'product_selectable': True, 'product_categ_selectable': True, 'active': warehouse.delivery_steps != 'ship_only' and warehouse.reception_steps != 'one_step', 'sequence': 20, } def create_routes(self, cr, uid, ids, warehouse, context=None): wh_route_ids = [] route_obj = self.pool.get('stock.location.route') pull_obj = self.pool.get('procurement.rule') push_obj = self.pool.get('stock.location.path') routes_dict = self.get_routes_dict(cr, uid, ids, warehouse, context=context) #create reception route and rules route_name, values = routes_dict[warehouse.reception_steps] route_vals = self._get_reception_delivery_route(cr, uid, warehouse, route_name, context=context) reception_route_id = route_obj.create(cr, uid, route_vals, context=context) wh_route_ids.append((4, reception_route_id)) push_rules_list, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, reception_route_id, context=context) #create the push/procurement rules for push_rule in push_rules_list: push_obj.create(cr, uid, vals=push_rule, context=context) for pull_rule in pull_rules_list: #all procurement rules in reception route are mto, because we don't want to wait for the scheduler to trigger an orderpoint on input location pull_rule['procure_method'] = 'make_to_order' pull_obj.create(cr, uid, vals=pull_rule, context=context) #create MTS route and procurement rules for delivery and a specific route MTO to be set on the product route_name, values = routes_dict[warehouse.delivery_steps] route_vals = self._get_reception_delivery_route(cr, uid, warehouse, route_name, context=context) #create the route and its procurement rules delivery_route_id = route_obj.create(cr, uid, route_vals, context=context) wh_route_ids.append((4, delivery_route_id)) dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, delivery_route_id, context=context) for pull_rule in pull_rules_list: pull_obj.create(cr, uid, vals=pull_rule, context=context) #create MTO procurement rule and link it to the generic MTO route mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, values, context=context)[0] mto_pull_id = pull_obj.create(cr, uid, mto_pull_vals, context=context) #create a route for cross dock operations, that can be set on products and product categories route_name, values = routes_dict['crossdock'] crossdock_route_vals = self._get_crossdock_route(cr, uid, warehouse, route_name, context=context) crossdock_route_id = route_obj.create(cr, uid, vals=crossdock_route_vals, context=context) wh_route_ids.append((4, crossdock_route_id)) dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, warehouse.delivery_steps != 'ship_only' and warehouse.reception_steps != 'one_step', values, crossdock_route_id, context=context) for pull_rule in pull_rules_list: # Fixed cross-dock is logically mto pull_rule['procure_method'] = 'make_to_order' pull_obj.create(cr, uid, vals=pull_rule, context=context) #create route selectable on the product to resupply the warehouse from another one self._create_resupply_routes(cr, uid, warehouse, warehouse.resupply_wh_ids, warehouse.default_resupply_wh_id, context=context) #return routes and mto procurement rule to store on the warehouse return { 'route_ids': wh_route_ids, 'mto_pull_id': mto_pull_id, 'reception_route_id': reception_route_id, 'delivery_route_id': delivery_route_id, 'crossdock_route_id': crossdock_route_id, } def change_route(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None): picking_type_obj = self.pool.get('stock.picking.type') pull_obj = self.pool.get('procurement.rule') push_obj = self.pool.get('stock.location.path') route_obj = self.pool.get('stock.location.route') new_reception_step = new_reception_step or warehouse.reception_steps new_delivery_step = new_delivery_step or warehouse.delivery_steps #change the default source and destination location and (de)activate picking types input_loc = warehouse.wh_input_stock_loc_id if new_reception_step == 'one_step': input_loc = warehouse.lot_stock_id output_loc = warehouse.wh_output_stock_loc_id if new_delivery_step == 'ship_only': output_loc = warehouse.lot_stock_id picking_type_obj.write(cr, uid, warehouse.in_type_id.id, {'default_location_dest_id': input_loc.id}, context=context) picking_type_obj.write(cr, uid, warehouse.out_type_id.id, {'default_location_src_id': output_loc.id}, context=context) picking_type_obj.write(cr, uid, warehouse.pick_type_id.id, { 'active': new_delivery_step != 'ship_only', 'default_location_dest_id': output_loc.id if new_delivery_step == 'pick_ship' else warehouse.wh_pack_stock_loc_id.id, }, context=context) picking_type_obj.write(cr, uid, warehouse.pack_type_id.id, {'active': new_delivery_step == 'pick_pack_ship'}, context=context) routes_dict = self.get_routes_dict(cr, uid, ids, warehouse, context=context) #update delivery route and rules: unlink the existing rules of the warehouse delivery route and recreate it pull_obj.unlink(cr, uid, [pu.id for pu in warehouse.delivery_route_id.pull_ids], context=context) route_name, values = routes_dict[new_delivery_step] route_obj.write(cr, uid, warehouse.delivery_route_id.id, {'name': self._format_routename(cr, uid, warehouse, route_name, context=context)}, context=context) dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, warehouse.delivery_route_id.id, context=context) #create the procurement rules for pull_rule in pull_rules_list: pull_obj.create(cr, uid, vals=pull_rule, context=context) #update receipt route and rules: unlink the existing rules of the warehouse receipt route and recreate it pull_obj.unlink(cr, uid, [pu.id for pu in warehouse.reception_route_id.pull_ids], context=context) push_obj.unlink(cr, uid, [pu.id for pu in warehouse.reception_route_id.push_ids], context=context) route_name, values = routes_dict[new_reception_step] route_obj.write(cr, uid, warehouse.reception_route_id.id, {'name': self._format_routename(cr, uid, warehouse, route_name, context=context)}, context=context) push_rules_list, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, warehouse.reception_route_id.id, context=context) #create the push/procurement rules for push_rule in push_rules_list: push_obj.create(cr, uid, vals=push_rule, context=context) for pull_rule in pull_rules_list: #all procurement rules in receipt route are mto, because we don't want to wait for the scheduler to trigger an orderpoint on input location pull_rule['procure_method'] = 'make_to_order' pull_obj.create(cr, uid, vals=pull_rule, context=context) route_obj.write(cr, uid, warehouse.crossdock_route_id.id, {'active': new_reception_step != 'one_step' and new_delivery_step != 'ship_only'}, context=context) #change MTO rule dummy, values = routes_dict[new_delivery_step] mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, values, context=context)[0] pull_obj.write(cr, uid, warehouse.mto_pull_id.id, mto_pull_vals, context=context) return True def create_sequences_and_picking_types(self, cr, uid, warehouse, context=None): seq_obj = self.pool.get('ir.sequence') picking_type_obj = self.pool.get('stock.picking.type') #create new sequences in_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence in'), 'prefix': warehouse.code + '/IN/', 'padding': 5}, context=context) out_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence out'), 'prefix': warehouse.code + '/OUT/', 'padding': 5}, context=context) pack_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence packing'), 'prefix': warehouse.code + '/PACK/', 'padding': 5}, context=context) pick_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence picking'), 'prefix': warehouse.code + '/PICK/', 'padding': 5}, context=context) int_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence internal'), 'prefix': warehouse.code + '/INT/', 'padding': 5}, context=context) wh_stock_loc = warehouse.lot_stock_id wh_input_stock_loc = warehouse.wh_input_stock_loc_id wh_output_stock_loc = warehouse.wh_output_stock_loc_id wh_pack_stock_loc = warehouse.wh_pack_stock_loc_id #create in, out, internal picking types for warehouse input_loc = wh_input_stock_loc if warehouse.reception_steps == 'one_step': input_loc = wh_stock_loc output_loc = wh_output_stock_loc if warehouse.delivery_steps == 'ship_only': output_loc = wh_stock_loc #choose the next available color for the picking types of this warehouse color = 0 available_colors = [0, 3, 4, 5, 6, 7, 8, 1, 2] # put white color first all_used_colors = self.pool.get('stock.picking.type').search_read(cr, uid, [('warehouse_id', '!=', False), ('color', '!=', False)], ['color'], order='color') #don't use sets to preserve the list order for x in all_used_colors: if x['color'] in available_colors: available_colors.remove(x['color']) if available_colors: color = available_colors[0] #order the picking types with a sequence allowing to have the following suit for each warehouse: reception, internal, pick, pack, ship. max_sequence = self.pool.get('stock.picking.type').search_read(cr, uid, [], ['sequence'], order='sequence desc') max_sequence = max_sequence and max_sequence[0]['sequence'] or 0 internal_active_false = (warehouse.reception_steps == 'one_step') and (warehouse.delivery_steps == 'ship_only') internal_active_false = internal_active_false and not self.user_has_groups(cr, uid, 'stock.group_locations') in_type_id = picking_type_obj.create(cr, uid, vals={ 'name': _('Receipts'), 'warehouse_id': warehouse.id, 'code': 'incoming', 'use_create_lots': True, 'use_existing_lots': False, 'sequence_id': in_seq_id, 'default_location_src_id': False, 'default_location_dest_id': input_loc.id, 'sequence': max_sequence + 1, 'color': color}, context=context) out_type_id = picking_type_obj.create(cr, uid, vals={ 'name': _('Delivery Orders'), 'warehouse_id': warehouse.id, 'code': 'outgoing', 'use_create_lots': False, 'use_existing_lots': True, 'sequence_id': out_seq_id, 'return_picking_type_id': in_type_id, 'default_location_src_id': output_loc.id, 'default_location_dest_id': False, 'sequence': max_sequence + 4, 'color': color}, context=context) picking_type_obj.write(cr, uid, [in_type_id], {'return_picking_type_id': out_type_id}, context=context) int_type_id = picking_type_obj.create(cr, uid, vals={ 'name': _('Internal Transfers'), 'warehouse_id': warehouse.id, 'code': 'internal', 'use_create_lots': False, 'use_existing_lots': True, 'sequence_id': int_seq_id, 'default_location_src_id': wh_stock_loc.id, 'default_location_dest_id': wh_stock_loc.id, 'active': not internal_active_false, 'sequence': max_sequence + 2, 'color': color}, context=context) pack_type_id = picking_type_obj.create(cr, uid, vals={ 'name': _('Pack'), 'warehouse_id': warehouse.id, 'code': 'internal', 'use_create_lots': False, 'use_existing_lots': True, 'sequence_id': pack_seq_id, 'default_location_src_id': wh_pack_stock_loc.id, 'default_location_dest_id': output_loc.id, 'active': warehouse.delivery_steps == 'pick_pack_ship', 'sequence': max_sequence + 3, 'color': color}, context=context) pick_type_id = picking_type_obj.create(cr, uid, vals={ 'name': _('Pick'), 'warehouse_id': warehouse.id, 'code': 'internal', 'use_create_lots': False, 'use_existing_lots': True, 'sequence_id': pick_seq_id, 'default_location_src_id': wh_stock_loc.id, 'default_location_dest_id': output_loc.id if warehouse.delivery_steps == 'pick_ship' else wh_pack_stock_loc.id, 'active': warehouse.delivery_steps != 'ship_only', 'sequence': max_sequence + 2, 'color': color}, context=context) #write picking types on WH vals = { 'in_type_id': in_type_id, 'out_type_id': out_type_id, 'pack_type_id': pack_type_id, 'pick_type_id': pick_type_id, 'int_type_id': int_type_id, } super(stock_warehouse, self).write(cr, uid, warehouse.id, vals=vals, context=context) def create(self, cr, uid, vals, context=None): if context is None: context = {} if vals is None: vals = {} data_obj = self.pool.get('ir.model.data') seq_obj = self.pool.get('ir.sequence') picking_type_obj = self.pool.get('stock.picking.type') location_obj = self.pool.get('stock.location') #create view location for warehouse loc_vals = { 'name': _(vals.get('code')), 'usage': 'view', 'location_id': data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_locations')[1], } if vals.get('company_id'): loc_vals['company_id'] = vals.get('company_id') wh_loc_id = location_obj.create(cr, uid, loc_vals, context=context) vals['view_location_id'] = wh_loc_id #create all location def_values = self.default_get(cr, uid, {'reception_steps', 'delivery_steps'}) reception_steps = vals.get('reception_steps', def_values['reception_steps']) delivery_steps = vals.get('delivery_steps', def_values['delivery_steps']) context_with_inactive = context.copy() context_with_inactive['active_test'] = False sub_locations = [ {'name': _('Stock'), 'active': True, 'field': 'lot_stock_id'}, {'name': _('Input'), 'active': reception_steps != 'one_step', 'field': 'wh_input_stock_loc_id'}, {'name': _('Quality Control'), 'active': reception_steps == 'three_steps', 'field': 'wh_qc_stock_loc_id'}, {'name': _('Output'), 'active': delivery_steps != 'ship_only', 'field': 'wh_output_stock_loc_id'}, {'name': _('Packing Zone'), 'active': delivery_steps == 'pick_pack_ship', 'field': 'wh_pack_stock_loc_id'}, ] for values in sub_locations: loc_vals = { 'name': values['name'], 'usage': 'internal', 'location_id': wh_loc_id, 'active': values['active'], } if vals.get('company_id'): loc_vals['company_id'] = vals.get('company_id') location_id = location_obj.create(cr, uid, loc_vals, context=context_with_inactive) vals[values['field']] = location_id #create WH new_id = super(stock_warehouse, self).create(cr, uid, vals=vals, context=context) warehouse = self.browse(cr, uid, new_id, context=context) self.create_sequences_and_picking_types(cr, uid, warehouse, context=context) #create routes and push/procurement rules new_objects_dict = self.create_routes(cr, uid, new_id, warehouse, context=context) self.write(cr, uid, warehouse.id, new_objects_dict, context=context) # If partner assigned if vals.get('partner_id'): comp_obj = self.pool['res.company'] if vals.get('company_id'): transit_loc = comp_obj.browse(cr, uid, vals.get('company_id'), context=context).internal_transit_location_id.id else: transit_loc = comp_obj.browse(cr, uid, comp_obj._company_default_get(cr, uid, 'stock.warehouse', context=context)).internal_transit_location_id.id self.pool['res.partner'].write(cr, uid, [vals['partner_id']], {'property_stock_customer': transit_loc, 'property_stock_supplier': transit_loc}, context=context) return new_id def _format_rulename(self, cr, uid, obj, from_loc, dest_loc, context=None): return obj.code + ': ' + from_loc.name + ' -> ' + dest_loc.name def _format_routename(self, cr, uid, obj, name, context=None): return obj.name + ': ' + name def get_routes_dict(self, cr, uid, ids, warehouse, context=None): #fetch customer and supplier locations, for references customer_loc, supplier_loc = self._get_partner_locations(cr, uid, ids, context=context) return { 'one_step': (_('Receipt in 1 step'), []), 'two_steps': (_('Receipt in 2 steps'), [(warehouse.wh_input_stock_loc_id, warehouse.lot_stock_id, warehouse.int_type_id.id)]), 'three_steps': (_('Receipt in 3 steps'), [(warehouse.wh_input_stock_loc_id, warehouse.wh_qc_stock_loc_id, warehouse.int_type_id.id), (warehouse.wh_qc_stock_loc_id, warehouse.lot_stock_id, warehouse.int_type_id.id)]), 'crossdock': (_('Cross-Dock'), [(warehouse.wh_input_stock_loc_id, warehouse.wh_output_stock_loc_id, warehouse.int_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]), 'ship_only': (_('Ship Only'), [(warehouse.lot_stock_id, customer_loc, warehouse.out_type_id.id)]), 'pick_ship': (_('Pick + Ship'), [(warehouse.lot_stock_id, warehouse.wh_output_stock_loc_id, warehouse.pick_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]), 'pick_pack_ship': (_('Pick + Pack + Ship'), [(warehouse.lot_stock_id, warehouse.wh_pack_stock_loc_id, warehouse.pick_type_id.id), (warehouse.wh_pack_stock_loc_id, warehouse.wh_output_stock_loc_id, warehouse.pack_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]), } def _handle_renaming(self, cr, uid, warehouse, name, code, context=None): location_obj = self.pool.get('stock.location') route_obj = self.pool.get('stock.location.route') pull_obj = self.pool.get('procurement.rule') push_obj = self.pool.get('stock.location.path') #rename location location_id = warehouse.lot_stock_id.location_id.id location_obj.write(cr, uid, location_id, {'name': code}, context=context) #rename route and push-procurement rules for route in warehouse.route_ids: route_obj.write(cr, uid, route.id, {'name': route.name.replace(warehouse.name, name, 1)}, context=context) for pull in route.pull_ids: pull_obj.write(cr, uid, pull.id, {'name': pull.name.replace(warehouse.name, name, 1)}, context=context) for push in route.push_ids: push_obj.write(cr, uid, push.id, {'name': push.name.replace(warehouse.name, name, 1)}, context=context) #change the mto procurement rule name if warehouse.mto_pull_id.id: pull_obj.write(cr, uid, warehouse.mto_pull_id.id, {'name': warehouse.mto_pull_id.name.replace(warehouse.name, name, 1)}, context=context) def _check_delivery_resupply(self, cr, uid, warehouse, new_location, change_to_multiple, context=None): """ Will check if the resupply routes from this warehouse follow the changes of number of delivery steps """ #Check routes that are being delivered by this warehouse and change the rule going to transit location route_obj = self.pool.get("stock.location.route") pull_obj = self.pool.get("procurement.rule") routes = route_obj.search(cr, uid, [('supplier_wh_id','=', warehouse.id)], context=context) pulls = pull_obj.search(cr, uid, ['&', ('route_id', 'in', routes), ('location_id.usage', '=', 'transit')], context=context) if pulls: pull_obj.write(cr, uid, pulls, {'location_src_id': new_location, 'procure_method': change_to_multiple and "make_to_order" or "make_to_stock"}, context=context) # Create or clean MTO rules mto_route_id = self._get_mto_route(cr, uid, context=context) if not change_to_multiple: # If single delivery we should create the necessary MTO rules for the resupply # pulls = pull_obj.search(cr, uid, ['&', ('route_id', '=', mto_route_id), ('location_id.usage', '=', 'transit'), ('location_src_id', '=', warehouse.lot_stock_id.id)], context=context) pull_recs = pull_obj.browse(cr, uid, pulls, context=context) transfer_locs = list(set([x.location_id for x in pull_recs])) vals = [(warehouse.lot_stock_id , x, warehouse.out_type_id.id) for x in transfer_locs] mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, vals, context=context) for mto_pull_val in mto_pull_vals: pull_obj.create(cr, uid, mto_pull_val, context=context) else: # We need to delete all the MTO procurement rules, otherwise they risk to be used in the system pulls = pull_obj.search(cr, uid, ['&', ('route_id', '=', mto_route_id), ('location_id.usage', '=', 'transit'), ('location_src_id', '=', warehouse.lot_stock_id.id)], context=context) if pulls: pull_obj.unlink(cr, uid, pulls, context=context) def _check_reception_resupply(self, cr, uid, warehouse, new_location, context=None): """ Will check if the resupply routes to this warehouse follow the changes of number of receipt steps """ #Check routes that are being delivered by this warehouse and change the rule coming from transit location route_obj = self.pool.get("stock.location.route") pull_obj = self.pool.get("procurement.rule") routes = route_obj.search(cr, uid, [('supplied_wh_id','=', warehouse.id)], context=context) pulls= pull_obj.search(cr, uid, ['&', ('route_id', 'in', routes), ('location_src_id.usage', '=', 'transit')]) if pulls: pull_obj.write(cr, uid, pulls, {'location_id': new_location}, context=context) def _check_resupply(self, cr, uid, warehouse, reception_new, delivery_new, context=None): if reception_new: old_val = warehouse.reception_steps new_val = reception_new change_to_one = (old_val != 'one_step' and new_val == 'one_step') change_to_multiple = (old_val == 'one_step' and new_val != 'one_step') if change_to_one or change_to_multiple: new_location = change_to_one and warehouse.lot_stock_id.id or warehouse.wh_input_stock_loc_id.id self._check_reception_resupply(cr, uid, warehouse, new_location, context=context) if delivery_new: old_val = warehouse.delivery_steps new_val = delivery_new change_to_one = (old_val != 'ship_only' and new_val == 'ship_only') change_to_multiple = (old_val == 'ship_only' and new_val != 'ship_only') if change_to_one or change_to_multiple: new_location = change_to_one and warehouse.lot_stock_id.id or warehouse.wh_output_stock_loc_id.id self._check_delivery_resupply(cr, uid, warehouse, new_location, change_to_multiple, context=context) def write(self, cr, uid, ids, vals, context=None): if context is None: context = {} if isinstance(ids, (int, long)): ids = [ids] seq_obj = self.pool.get('ir.sequence') route_obj = self.pool.get('stock.location.route') context_with_inactive = context.copy() context_with_inactive['active_test'] = False for warehouse in self.browse(cr, uid, ids, context=context_with_inactive): #first of all, check if we need to delete and recreate route if vals.get('reception_steps') or vals.get('delivery_steps'): #activate and deactivate location according to reception and delivery option self.switch_location(cr, uid, warehouse.id, warehouse, vals.get('reception_steps', False), vals.get('delivery_steps', False), context=context) # switch between route self.change_route(cr, uid, ids, warehouse, vals.get('reception_steps', False), vals.get('delivery_steps', False), context=context_with_inactive) # Check if we need to change something to resupply warehouses and associated MTO rules self._check_resupply(cr, uid, warehouse, vals.get('reception_steps'), vals.get('delivery_steps'), context=context) if vals.get('code') or vals.get('name'): name = warehouse.name #rename sequence if vals.get('name'): name = vals.get('name', warehouse.name) self._handle_renaming(cr, uid, warehouse, name, vals.get('code', warehouse.code), context=context_with_inactive) if warehouse.in_type_id: seq_obj.write(cr, uid, warehouse.in_type_id.sequence_id.id, {'name': name + _(' Sequence in'), 'prefix': vals.get('code', warehouse.code) + '\IN\\'}, context=context) if warehouse.out_type_id: seq_obj.write(cr, uid, warehouse.out_type_id.sequence_id.id, {'name': name + _(' Sequence out'), 'prefix': vals.get('code', warehouse.code) + '\OUT\\'}, context=context) if warehouse.pack_type_id: seq_obj.write(cr, uid, warehouse.pack_type_id.sequence_id.id, {'name': name + _(' Sequence packing'), 'prefix': vals.get('code', warehouse.code) + '\PACK\\'}, context=context) if warehouse.pick_type_id: seq_obj.write(cr, uid, warehouse.pick_type_id.sequence_id.id, {'name': name + _(' Sequence picking'), 'prefix': vals.get('code', warehouse.code) + '\PICK\\'}, context=context) if warehouse.int_type_id: seq_obj.write(cr, uid, warehouse.int_type_id.sequence_id.id, {'name': name + _(' Sequence internal'), 'prefix': vals.get('code', warehouse.code) + '\INT\\'}, context=context) if vals.get('resupply_wh_ids') and not vals.get('resupply_route_ids'): for cmd in vals.get('resupply_wh_ids'): if cmd[0] == 6: new_ids = set(cmd[2]) old_ids = set([wh.id for wh in warehouse.resupply_wh_ids]) to_add_wh_ids = new_ids - old_ids if to_add_wh_ids: supplier_warehouses = self.browse(cr, uid, list(to_add_wh_ids), context=context) self._create_resupply_routes(cr, uid, warehouse, supplier_warehouses, warehouse.default_resupply_wh_id, context=context) to_remove_wh_ids = old_ids - new_ids if to_remove_wh_ids: to_remove_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', 'in', list(to_remove_wh_ids))], context=context) if to_remove_route_ids: route_obj.unlink(cr, uid, to_remove_route_ids, context=context) else: #not implemented pass if 'default_resupply_wh_id' in vals: if vals.get('default_resupply_wh_id') == warehouse.id: raise UserError(_('The default resupply warehouse should be different than the warehouse itself!')) if warehouse.default_resupply_wh_id: #remove the existing resupplying route on the warehouse to_remove_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', '=', warehouse.default_resupply_wh_id.id)], context=context) for inter_wh_route_id in to_remove_route_ids: self.write(cr, uid, [warehouse.id], {'route_ids': [(3, inter_wh_route_id)]}) if vals.get('default_resupply_wh_id'): #assign the new resupplying route on all products to_assign_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', '=', vals.get('default_resupply_wh_id'))], context=context) for inter_wh_route_id in to_assign_route_ids: self.write(cr, uid, [warehouse.id], {'route_ids': [(4, inter_wh_route_id)]}) # If another partner assigned if vals.get('partner_id'): if not vals.get('company_id'): company = self.browse(cr, uid, ids[0], context=context).company_id else: company = self.pool['res.company'].browse(cr, uid, vals['company_id']) transit_loc = company.internal_transit_location_id.id self.pool['res.partner'].write(cr, uid, [vals['partner_id']], {'property_stock_customer': transit_loc, 'property_stock_supplier': transit_loc}, context=context) return super(stock_warehouse, self).write(cr, uid, ids, vals=vals, context=context) def get_all_routes_for_wh(self, cr, uid, warehouse, context=None): route_obj = self.pool.get("stock.location.route") all_routes = [route.id for route in warehouse.route_ids] all_routes += route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id)], context=context) all_routes += [warehouse.mto_pull_id.route_id.id] return all_routes def view_all_routes_for_wh(self, cr, uid, ids, context=None): all_routes = [] for wh in self.browse(cr, uid, ids, context=context): all_routes += self.get_all_routes_for_wh(cr, uid, wh, context=context) domain = [('id', 'in', all_routes)] return { 'name': _('Warehouse\'s Routes'), 'domain': domain, 'res_model': 'stock.location.route', 'type': 'ir.actions.act_window', 'view_id': False, 'view_mode': 'tree,form', 'view_type': 'form', 'limit': 20 } class stock_location_path(osv.osv): _name = "stock.location.path" _description = "Pushed Flows" _order = "name" def _get_rules(self, cr, uid, ids, context=None): res = [] for route in self.browse(cr, uid, ids, context=context): res += [x.id for x in route.push_ids] return res _columns = { 'name': fields.char('Operation Name', required=True), 'company_id': fields.many2one('res.company', 'Company'), 'route_id': fields.many2one('stock.location.route', 'Route'), 'location_from_id': fields.many2one('stock.location', 'Source Location', ondelete='cascade', select=1, required=True, help="This rule can be applied when a move is confirmed that has this location as destination location"), 'location_dest_id': fields.many2one('stock.location', 'Destination Location', ondelete='cascade', select=1, required=True, help="The new location where the goods need to go"), 'delay': fields.integer('Delay (days)', help="Number of days needed to transfer the goods"), 'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type', required=True, help="This is the picking type that will be put on the stock moves"), 'auto': fields.selection( [('auto','Automatic Move'), ('manual','Manual Operation'),('transparent','Automatic No Step Added')], 'Automatic Move', required=True, select=1, help="The 'Automatic Move' / 'Manual Operation' value will create a stock move after the current one. " \ "With 'Automatic No Step Added', the location is replaced in the original move." ), 'propagate': fields.boolean('Propagate cancel and split', help='If checked, when the previous move is cancelled or split, the move generated by this move will too'), 'active': fields.boolean('Active'), 'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse'), 'route_sequence': fields.related('route_id', 'sequence', string='Route Sequence', store={ 'stock.location.route': (_get_rules, ['sequence'], 10), 'stock.location.path': (lambda self, cr, uid, ids, c={}: ids, ['route_id'], 10), }), 'sequence': fields.integer('Sequence'), } _defaults = { 'auto': 'auto', 'delay': 0, 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'procurement.order', context=c), 'propagate': True, 'active': True, } def _prepare_push_apply(self, cr, uid, rule, move, context=None): newdate = (datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta.relativedelta(days=rule.delay or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT) return { 'origin': move.origin or move.picking_id.name or "/", 'location_id': move.location_dest_id.id, 'location_dest_id': rule.location_dest_id.id, 'date': newdate, 'company_id': rule.company_id and rule.company_id.id or False, 'date_expected': newdate, 'picking_id': False, 'picking_type_id': rule.picking_type_id and rule.picking_type_id.id or False, 'propagate': rule.propagate, 'push_rule_id': rule.id, 'warehouse_id': rule.warehouse_id and rule.warehouse_id.id or False, } def _apply(self, cr, uid, rule, move, context=None): move_obj = self.pool.get('stock.move') newdate = (datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta.relativedelta(days=rule.delay or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT) if rule.auto == 'transparent': old_dest_location = move.location_dest_id.id move_obj.write(cr, uid, [move.id], { 'date': newdate, 'date_expected': newdate, 'location_dest_id': rule.location_dest_id.id }) #avoid looping if a push rule is not well configured if rule.location_dest_id.id != old_dest_location: #call again push_apply to see if a next step is defined move_obj._push_apply(cr, uid, [move], context=context) else: vals = self._prepare_push_apply(cr, uid, rule, move, context=context) move_id = move_obj.copy(cr, uid, move.id, vals, context=context) move_obj.write(cr, uid, [move.id], { 'move_dest_id': move_id, }) move_obj.action_confirm(cr, uid, [move_id], context=None) # ------------------------- # Packaging related stuff # ------------------------- from openerp.report import report_sxw class stock_package(osv.osv): """ These are the packages, containing quants and/or other packages """ _name = "stock.quant.package" _description = "Physical Packages" _parent_name = "parent_id" _parent_store = True _parent_order = 'name' _order = 'parent_left' def name_get(self, cr, uid, ids, context=None): res = self._complete_name(cr, uid, ids, 'complete_name', None, context=context) return res.items() def _complete_name(self, cr, uid, ids, name, args, context=None): """ Forms complete name of location from parent location to child location. @return: Dictionary of values """ res = {} for m in self.browse(cr, uid, ids, context=context): res[m.id] = m.name parent = m.parent_id while parent: res[m.id] = parent.name + ' / ' + res[m.id] parent = parent.parent_id return res def _get_packages(self, cr, uid, ids, context=None): """Returns packages from quants for store""" res = set() for quant in self.browse(cr, uid, ids, context=context): pack = quant.package_id while pack: res.add(pack.id) pack = pack.parent_id return list(res) def _get_package_info(self, cr, uid, ids, name, args, context=None): quant_obj = self.pool.get("stock.quant") default_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id res = dict((res_id, {'location_id': False, 'company_id': default_company_id, 'owner_id': False}) for res_id in ids) for pack in self.browse(cr, uid, ids, context=context): quants = quant_obj.search(cr, uid, [('package_id', 'child_of', pack.id)], context=context) if quants: quant = quant_obj.browse(cr, uid, quants[0], context=context) res[pack.id]['location_id'] = quant.location_id.id res[pack.id]['owner_id'] = quant.owner_id.id res[pack.id]['company_id'] = quant.company_id.id else: res[pack.id]['location_id'] = False res[pack.id]['owner_id'] = False res[pack.id]['company_id'] = False return res def _get_packages_to_relocate(self, cr, uid, ids, context=None): res = set() for pack in self.browse(cr, uid, ids, context=context): res.add(pack.id) if pack.parent_id: res.add(pack.parent_id.id) return list(res) _columns = { 'name': fields.char('Package Reference', select=True, copy=False), 'complete_name': fields.function(_complete_name, type='char', string="Package Name",), 'parent_left': fields.integer('Left Parent', select=1), 'parent_right': fields.integer('Right Parent', select=1), 'packaging_id': fields.many2one('product.packaging', 'Packaging', help="This field should be completed only if everything inside the package share the same product, otherwise it doesn't really makes sense.", select=True), 'location_id': fields.function(_get_package_info, type='many2one', relation='stock.location', string='Location', multi="package", store={ 'stock.quant': (_get_packages, ['location_id'], 10), 'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10), }, readonly=True, select=True), 'quant_ids': fields.one2many('stock.quant', 'package_id', 'Bulk Content', readonly=True), 'parent_id': fields.many2one('stock.quant.package', 'Parent Package', help="The package containing this item", ondelete='restrict', readonly=True), 'children_ids': fields.one2many('stock.quant.package', 'parent_id', 'Contained Packages', readonly=True), 'company_id': fields.function(_get_package_info, type="many2one", relation='res.company', string='Company', multi="package", store={ 'stock.quant': (_get_packages, ['company_id'], 10), 'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10), }, readonly=True, select=True), 'owner_id': fields.function(_get_package_info, type='many2one', relation='res.partner', string='Owner', multi="package", store={ 'stock.quant': (_get_packages, ['owner_id'], 10), 'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10), }, readonly=True, select=True), } _defaults = { 'name': lambda self, cr, uid, context: self.pool.get('ir.sequence').next_by_code(cr, uid, 'stock.quant.package') or _('Unknown Pack') } def _check_location_constraint(self, cr, uid, packs, context=None): '''checks that all quants in a package are stored in the same location. This function cannot be used as a constraint because it needs to be checked on pack operations (they may not call write on the package) ''' quant_obj = self.pool.get('stock.quant') for pack in packs: parent = pack while parent.parent_id: parent = parent.parent_id quant_ids = self.get_content(cr, uid, [parent.id], context=context) quants = [x for x in quant_obj.browse(cr, uid, quant_ids, context=context) if x.qty > 0] location_id = quants and quants[0].location_id.id or False if not [quant.location_id.id == location_id for quant in quants]: raise UserError(_('Everything inside a package should be in the same location')) return True def action_print(self, cr, uid, ids, context=None): context = dict(context or {}, active_ids=ids) return self.pool.get("report").get_action(cr, uid, ids, 'stock.report_package_barcode_small', context=context) def unpack(self, cr, uid, ids, context=None): quant_obj = self.pool.get('stock.quant') for package in self.browse(cr, uid, ids, context=context): quant_ids = [quant.id for quant in package.quant_ids] quant_obj.write(cr, SUPERUSER_ID, quant_ids, {'package_id': package.parent_id.id or False}, context=context) children_package_ids = [child_package.id for child_package in package.children_ids] self.write(cr, uid, children_package_ids, {'parent_id': package.parent_id.id or False}, context=context) #delete current package since it contains nothing anymore self.unlink(cr, uid, ids, context=context) return self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'stock', 'action_package_view', context=context) def get_content(self, cr, uid, ids, context=None): child_package_ids = self.search(cr, uid, [('id', 'child_of', ids)], context=context) return self.pool.get('stock.quant').search(cr, uid, [('package_id', 'in', child_package_ids)], context=context) def get_content_package(self, cr, uid, ids, context=None): quants_ids = self.get_content(cr, uid, ids, context=context) res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'stock', 'quantsact', context=context) res['domain'] = [('id', 'in', quants_ids)] return res def _get_product_total_qty(self, cr, uid, package_record, product_id, context=None): ''' find the total of given product 'product_id' inside the given package 'package_id''' quant_obj = self.pool.get('stock.quant') all_quant_ids = self.get_content(cr, uid, [package_record.id], context=context) total = 0 for quant in quant_obj.browse(cr, uid, all_quant_ids, context=context): if quant.product_id.id == product_id: total += quant.qty return total def _get_all_products_quantities(self, cr, uid, package_id, context=None): '''This function computes the different product quantities for the given package ''' quant_obj = self.pool.get('stock.quant') res = {} for quant in quant_obj.browse(cr, uid, self.get_content(cr, uid, package_id, context=context)): if quant.product_id not in res: res[quant.product_id] = 0 res[quant.product_id] += quant.qty return res #Remove me? def copy_pack(self, cr, uid, id, default_pack_values=None, default=None, context=None): stock_pack_operation_obj = self.pool.get('stock.pack.operation') if default is None: default = {} new_package_id = self.copy(cr, uid, id, default_pack_values, context=context) default['result_package_id'] = new_package_id op_ids = stock_pack_operation_obj.search(cr, uid, [('result_package_id', '=', id)], context=context) for op_id in op_ids: stock_pack_operation_obj.copy(cr, uid, op_id, default, context=context) class stock_pack_operation(osv.osv): _name = "stock.pack.operation" _description = "Packing Operation" _order = "result_package_id desc, id" def _get_remaining_prod_quantities(self, cr, uid, operation, context=None): '''Get the remaining quantities per product on an operation with a package. This function returns a dictionary''' #if the operation doesn't concern a package, it's not relevant to call this function if not operation.package_id or operation.product_id: return {operation.product_id: operation.remaining_qty} #get the total of products the package contains res = self.pool.get('stock.quant.package')._get_all_products_quantities(cr, uid, operation.package_id.id, context=context) #reduce by the quantities linked to a move for record in operation.linked_move_operation_ids: if record.move_id.product_id.id not in res: res[record.move_id.product_id] = 0 res[record.move_id.product_id] -= record.qty return res def _get_remaining_qty(self, cr, uid, ids, name, args, context=None): uom_obj = self.pool.get('product.uom') res = {} for ops in self.browse(cr, uid, ids, context=context): res[ops.id] = 0 if ops.package_id and not ops.product_id: #dont try to compute the remaining quantity for packages because it's not relevant (a package could include different products). #should use _get_remaining_prod_quantities instead continue else: qty = ops.product_qty if ops.product_uom_id: qty = uom_obj._compute_qty_obj(cr, uid, ops.product_uom_id, ops.product_qty, ops.product_id.uom_id, context=context) for record in ops.linked_move_operation_ids: qty -= record.qty res[ops.id] = float_round(qty, precision_rounding=ops.product_id.uom_id.rounding) return res def product_id_change(self, cr, uid, ids, product_id, product_uom_id, product_qty, context=None): res = self.on_change_tests(cr, uid, ids, product_id, product_uom_id, product_qty, context=context) uom_obj = self.pool['product.uom'] product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) if product_id and not product_uom_id or uom_obj.browse(cr, uid, product_uom_id, context=context).category_id.id != product.uom_id.category_id.id: res['value']['product_uom_id'] = product.uom_id.id if product: res['value']['lots_visible'] = (product.tracking != 'none') res['domain'] = {'product_uom_id': [('category_id','=',product.uom_id.category_id.id)]} else: res['domain'] = {'product_uom_id': []} return res def on_change_tests(self, cr, uid, ids, product_id, product_uom_id, product_qty, context=None): res = {'value': {}} uom_obj = self.pool.get('product.uom') if product_id: product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) product_uom_id = product_uom_id or product.uom_id.id selected_uom = uom_obj.browse(cr, uid, product_uom_id, context=context) if selected_uom.category_id.id != product.uom_id.category_id.id: res['warning'] = { 'title': _('Warning: wrong UoM!'), 'message': _('The selected UoM for product %s is not compatible with the UoM set on the product form. \nPlease choose an UoM within the same UoM category.') % (product.name) } if product_qty and 'warning' not in res: rounded_qty = uom_obj._compute_qty(cr, uid, product_uom_id, product_qty, product_uom_id, round=True) if rounded_qty != product_qty: res['warning'] = { 'title': _('Warning: wrong quantity!'), 'message': _('The chosen quantity for product %s is not compatible with the UoM rounding. It will be automatically converted at confirmation') % (product.name) } return res def _compute_location_description(self, cr, uid, ids, field_name, arg, context=None): res = {} for op in self.browse(cr, uid, ids, context=context): from_name = op.location_id.name to_name = op.location_dest_id.name if op.package_id and op.product_id: from_name += " : " + op.package_id.name if op.result_package_id: to_name += " : " + op.result_package_id.name res[op.id] = {'from_loc': from_name, 'to_loc': to_name} return res def _get_bool(self, cr, uid, ids, field_name, arg, context=None): res = {} for pack in self.browse(cr, uid, ids, context=context): res[pack.id] = (pack.qty_done > 0.0) return res def _set_processed_qty(self, cr, uid, id, field_name, field_value, arg, context=None): op = self.browse(cr, uid, id, context=context) if not op.product_id: if field_value and op.qty_done == 0: self.write(cr, uid, [id], {'qty_done': 1.0}, context=context) if not field_value and op.qty_done != 0: self.write(cr, uid, [id], {'qty_done': 0.0}, context=context) return True def _compute_lots_visible(self, cr, uid, ids, field_name, arg, context=None): res = {} for pack in self.browse(cr, uid, ids, context=context): if pack.pack_lot_ids: res[pack.id] = True continue pick = pack.picking_id product_requires = (pack.product_id.tracking != 'none') if pick.picking_type_id: res[pack.id] = (pick.picking_type_id.use_existing_lots or pick.picking_type_id.use_create_lots) and product_requires else: res[pack.id] = product_requires return res def _get_default_from_loc(self, cr, uid, context=None): default_loc = context.get('default_location_id') if default_loc: return self.pool['stock.location'].browse(cr, uid, default_loc, context=context).name def _get_default_to_loc(self, cr, uid, context=None): default_loc = context.get('default_location_dest_id') if default_loc: return self.pool['stock.location'].browse(cr, uid, default_loc, context=context).name _columns = { 'picking_id': fields.many2one('stock.picking', 'Stock Picking', help='The stock operation where the packing has been made', required=True), 'product_id': fields.many2one('product.product', 'Product', ondelete="CASCADE"), # 1 'product_uom_id': fields.many2one('product.uom', 'Unit of Measure'), 'product_qty': fields.float('To Do', digits_compute=dp.get_precision('Product Unit of Measure'), required=True), 'qty_done': fields.float('Done', digits_compute=dp.get_precision('Product Unit of Measure')), 'processed_boolean': fields.function(_get_bool, fnct_inv=_set_processed_qty, type='boolean', string='Done'), 'package_id': fields.many2one('stock.quant.package', 'Source Package'), # 2 'pack_lot_ids': fields.one2many('stock.pack.operation.lot', 'operation_id', 'Lots Used'), 'result_package_id': fields.many2one('stock.quant.package', 'Destination Package', help="If set, the operations are packed into this package", required=False, ondelete='cascade'), 'date': fields.datetime('Date', required=True), 'owner_id': fields.many2one('res.partner', 'Owner', help="Owner of the quants"), 'linked_move_operation_ids': fields.one2many('stock.move.operation.link', 'operation_id', string='Linked Moves', readonly=True, help='Moves impacted by this operation for the computation of the remaining quantities'), 'remaining_qty': fields.function(_get_remaining_qty, type='float', digits = 0, string="Remaining Qty", help="Remaining quantity in default UoM according to moves matched with this operation. "), 'location_id': fields.many2one('stock.location', 'Source Location', required=True), 'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True), 'picking_source_location_id': fields.related('picking_id', 'location_id', type='many2one', relation='stock.location'), 'picking_destination_location_id': fields.related('picking_id', 'location_dest_id', type='many2one', relation='stock.location'), 'from_loc': fields.function(_compute_location_description, type='char', string='From', multi='loc'), 'to_loc': fields.function(_compute_location_description, type='char', string='To', multi='loc'), 'fresh_record': fields.boolean('Newly created pack operation'), 'lots_visible': fields.function(_compute_lots_visible, type='boolean'), 'state': fields.related('picking_id', 'state', type='selection', selection=[ ('draft', 'Draft'), ('cancel', 'Cancelled'), ('waiting', 'Waiting Another Operation'), ('confirmed', 'Waiting Availability'), ('partially_available', 'Partially Available'), ('assigned', 'Available'), ('done', 'Done'), ]), } _defaults = { 'date': fields.date.context_today, 'qty_done': 0.0, 'product_qty': 0.0, 'processed_boolean': lambda *a: False, 'fresh_record': True, 'from_loc': _get_default_from_loc, 'to_loc': _get_default_to_loc, } def split_quantities(self, cr, uid, ids, context=None): for pack in self.browse(cr, uid, ids, context=context): if pack.product_qty - pack.qty_done > 0.0 and pack.qty_done < pack.product_qty: pack2 = self.copy(cr, uid, pack.id, default={'qty_done': 0.0, 'product_qty': pack.product_qty - pack.qty_done}, context=context) self.write(cr, uid, [pack.id], {'product_qty': pack.qty_done}, context=context) else: raise UserError(_('The quantity to split should be smaller than the quantity To Do. ')) return True def write(self, cr, uid, ids, vals, context=None): vals['fresh_record'] = False context = context or {} res = super(stock_pack_operation, self).write(cr, uid, ids, vals, context=context) return res def unlink(self, cr, uid, ids, context=None): if any([x.state in ('done', 'cancel') for x in self.browse(cr, uid, ids, context=context)]): raise UserError(_('You can not delete pack operations of a done picking')) return super(stock_pack_operation, self).unlink(cr, uid, ids, context=context) def check_tracking(self, cr, uid, ids, context=None): """ Checks if serial number is assigned to stock move or not and raise an error if it had to. """ operations = self.browse(cr, uid, ids, context=context) for ops in operations: if ops.picking_id and (ops.picking_id.picking_type_id.use_existing_lots or ops.picking_id.picking_type_id.use_create_lots) and \ ops.product_id and ops.product_id.tracking != 'none' and ops.qty_done > 0.0: if not ops.pack_lot_ids: raise UserError(_('You need to provide a Lot/Serial Number for product %s') % ops.product_id.name) if ops.product_id.tracking == 'serial': for opslot in ops.pack_lot_ids: if opslot.qty not in (1.0, 0.0): raise UserError(_('You should provide a different serial number for each piece')) def save(self, cr, uid, ids, context=None): for pack in self.browse(cr, uid, ids, context=context): if pack.product_id.tracking != 'none': qty_done = sum([x.qty for x in pack.pack_lot_ids]) self.pool['stock.pack.operation'].write(cr, uid, [pack.id], {'qty_done': qty_done}, context=context) return {'type': 'ir.actions.act_window_close'} def split_lot(self, cr, uid, ids, context=None): context = context or {} ctx=context.copy() assert len(ids) > 0 data_obj = self.pool['ir.model.data'] pack = self.browse(cr, uid, ids[0], context=context) picking_type = pack.picking_id.picking_type_id serial = (pack.product_id.tracking == 'serial') view = data_obj.xmlid_to_res_id(cr, uid, 'stock.view_pack_operation_lot_form') only_create = picking_type.use_create_lots and not picking_type.use_existing_lots show_reserved = any([x for x in pack.pack_lot_ids if x.qty_todo > 0.0]) ctx.update({'serial': serial, 'only_create': only_create, 'create_lots': picking_type.use_create_lots, 'state_done': pack.picking_id.state == 'done', 'show_reserved': show_reserved}) return { 'name': _('Lot Details'), 'type': 'ir.actions.act_window', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'stock.pack.operation', 'views': [(view, 'form')], 'view_id': view, 'target': 'new', 'res_id': pack.id, 'context': ctx, } def show_details(self, cr, uid, ids, context=None): data_obj = self.pool['ir.model.data'] view = data_obj.xmlid_to_res_id(cr, uid, 'stock.view_pack_operation_details_form_save') pack = self.browse(cr, uid, ids[0], context=context) return { 'name': _('Operation Details'), 'type': 'ir.actions.act_window', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'stock.pack.operation', 'views': [(view, 'form')], 'view_id': view, 'target': 'new', 'res_id': pack.id, 'context': context, } class stock_pack_operation_lot(osv.osv): _name = "stock.pack.operation.lot" _description = "Specifies lot/serial number for pack operations that need it" def _get_plus(self, cr, uid, ids, field_name, arg, context=None): res = {} for packlot in self.browse(cr, uid, ids, context=context): if packlot.operation_id.product_id.tracking == 'serial': res[packlot.id] = (packlot.qty == 0.0) else: res[packlot.id] = (packlot.qty_todo == 0.0) or (packlot.qty < packlot.qty_todo) return res _columns = { 'operation_id': fields.many2one('stock.pack.operation'), 'qty': fields.float('Done'), 'lot_id': fields.many2one('stock.production.lot', 'Lot/Serial Number'), 'lot_name': fields.char('Lot Name'), 'qty_todo': fields.float('To Do'), 'plus_visible': fields.function(_get_plus, type='boolean'), } _defaults = { 'qty': lambda cr, uid, ids, c: 1.0, 'qty_todo': lambda cr, uid, ids, c: 0.0, 'plus_visible': True, } def _check_lot(self, cr, uid, ids, context=None): for packlot in self.browse(cr, uid, ids, context=context): if not packlot.lot_name and not packlot.lot_id: return False return True _constraints = [ (_check_lot, 'Lot is required', ['lot_id', 'lot_name']), ] _sql_constraints = [ ('qty', 'CHECK(qty >= 0.0)','Quantity must be greater than or equal to 0.0!'), ('uniq_lot_id', 'unique(operation_id, lot_id)', 'You have already mentioned this lot in another line'), ('uniq_lot_name', 'unique(operation_id, lot_name)', 'You have already mentioned this lot name in another line')] def do_plus(self, cr, uid, ids, context=None): pack_obj = self.pool['stock.pack.operation'] for packlot in self.browse(cr, uid, ids, context=context): self.write(cr, uid, [packlot.id], {'qty': packlot.qty + 1}, context=context) pack = self.browse(cr, uid, ids[0], context=context).operation_id qty_done = sum([x.qty for x in pack.pack_lot_ids]) pack_obj.write(cr, uid, [pack.id], {'qty_done': qty_done}, context=context) return pack_obj.split_lot(cr, uid, [pack.id], context=context) def do_minus(self, cr, uid, ids, context=None): pack_obj = self.pool['stock.pack.operation'] for packlot in self.browse(cr, uid, ids, context=context): self.write(cr, uid, [packlot.id], {'qty': packlot.qty - 1}, context=context) pack = self.browse(cr, uid, ids[0], context=context).operation_id qty_done = sum([x.qty for x in pack.pack_lot_ids]) pack_obj.write(cr, uid, [pack.id], {'qty_done': qty_done}, context=context) return pack_obj.split_lot(cr, uid, [pack.id], context=context) class stock_move_operation_link(osv.osv): """ Table making the link between stock.moves and stock.pack.operations to compute the remaining quantities on each of these objects """ _name = "stock.move.operation.link" _description = "Link between stock moves and pack operations" _columns = { 'qty': fields.float('Quantity', help="Quantity of products to consider when talking about the contribution of this pack operation towards the remaining quantity of the move (and inverse). Given in the product main uom."), 'operation_id': fields.many2one('stock.pack.operation', 'Operation', required=True, ondelete="cascade"), 'move_id': fields.many2one('stock.move', 'Move', required=True, ondelete="cascade"), 'reserved_quant_id': fields.many2one('stock.quant', 'Reserved Quant', help="Technical field containing the quant that created this link between an operation and a stock move. Used at the stock_move_obj.action_done() time to avoid seeking a matching quant again"), } class stock_warehouse_orderpoint(osv.osv): """ Defines Minimum stock rules. """ _name = "stock.warehouse.orderpoint" _description = "Minimum Inventory Rule" def subtract_procurements_from_orderpoints(self, cr, uid, orderpoint_ids, context=None): '''This function returns quantity of product that needs to be deducted from the orderpoint computed quantity because there's already a procurement created with aim to fulfill it. ''' cr.execute("""select op.id, p.id, p.product_uom, p.product_qty, pt.uom_id, sm.product_qty from procurement_order as p left join stock_move as sm ON sm.procurement_id = p.id, stock_warehouse_orderpoint op, product_product pp, product_template pt WHERE p.orderpoint_id = op.id AND p.state not in ('done', 'cancel') AND (sm.state IS NULL OR sm.state not in ('draft')) AND pp.id = p.product_id AND pp.product_tmpl_id = pt.id AND op.id IN %s ORDER BY op.id, p.id """, (tuple(orderpoint_ids),)) results = cr.fetchall() current_proc = False current_op = False uom_obj = self.pool.get("product.uom") op_qty = 0 res = dict.fromkeys(orderpoint_ids, 0.0) for move_result in results: op = move_result[0] if current_op != op: if current_op: res[current_op] = op_qty current_op = op op_qty = 0 proc = move_result[1] if proc != current_proc: op_qty += uom_obj._compute_qty(cr, uid, move_result[2], move_result[3], move_result[4], round=False) current_proc = proc if move_result[5]: #If a move is associated (is move qty) op_qty -= move_result[5] if current_op: res[current_op] = op_qty return res def _check_product_uom(self, cr, uid, ids, context=None): ''' Check if the UoM has the same category as the product standard UoM ''' if not context: context = {} for rule in self.browse(cr, uid, ids, context=context): if rule.product_id.uom_id.category_id.id != rule.product_uom.category_id.id: return False return True _columns = { 'name': fields.char('Name', required=True, copy=False), 'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the orderpoint without removing it."), 'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', required=True, ondelete="cascade"), 'location_id': fields.many2one('stock.location', 'Location', required=True, ondelete="cascade"), 'product_id': fields.many2one('product.product', 'Product', required=True, ondelete='cascade', domain=[('type', '=', 'product')]), 'product_uom': fields.related('product_id', 'uom_id', type='many2one', relation='product.uom', string='Product Unit of Measure', readonly=True, required=True), 'product_min_qty': fields.float('Minimum Quantity', required=True, digits_compute=dp.get_precision('Product Unit of Measure'), help="When the virtual stock goes below the Min Quantity specified for this field, Odoo generates "\ "a procurement to bring the forecasted quantity to the Max Quantity."), 'product_max_qty': fields.float('Maximum Quantity', required=True, digits_compute=dp.get_precision('Product Unit of Measure'), help="When the virtual stock goes below the Min Quantity, Odoo generates "\ "a procurement to bring the forecasted quantity to the Quantity specified as Max Quantity."), 'qty_multiple': fields.float('Qty Multiple', required=True, digits_compute=dp.get_precision('Product Unit of Measure'), help="The procurement quantity will be rounded up to this multiple. If it is 0, the exact quantity will be used. "), 'procurement_ids': fields.one2many('procurement.order', 'orderpoint_id', 'Created Procurements'), 'group_id': fields.many2one('procurement.group', 'Procurement Group', help="Moves created through this orderpoint will be put in this procurement group. If none is given, the moves generated by procurement rules will be grouped into one big picking.", copy=False), 'company_id': fields.many2one('res.company', 'Company', required=True), 'lead_days': fields.integer('Lead Time', help="Number of days after the orderpoint is triggered to receive the products or to order to the vendor"), 'lead_type': fields.selection([ ('net', 'Day(s) to get the products'), ('supplier', 'Day(s) to purchase') ], 'Lead Type', required=True) } _defaults = { 'active': lambda *a: 1, 'lead_days': lambda *a: 1, 'lead_type': lambda *a: 'supplier', 'qty_multiple': lambda *a: 1, 'name': lambda self, cr, uid, context: self.pool.get('ir.sequence').next_by_code(cr, uid, 'stock.orderpoint') or '', 'product_uom': lambda self, cr, uid, context: context.get('product_uom', False), 'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.warehouse.orderpoint', context=context) } _sql_constraints = [ ('qty_multiple_check', 'CHECK( qty_multiple >= 0 )', 'Qty Multiple must be greater than or equal to zero.'), ] _constraints = [ (_check_product_uom, 'You have to select a product unit of measure in the same category than the default unit of measure of the product', ['product_id', 'product_uom']), ] def default_get(self, cr, uid, fields, context=None): warehouse_obj = self.pool.get('stock.warehouse') res = super(stock_warehouse_orderpoint, self).default_get(cr, uid, fields, context) # default 'warehouse_id' and 'location_id' if 'warehouse_id' not in res: warehouse_ids = res.get('company_id') and warehouse_obj.search(cr, uid, [('company_id', '=', res['company_id'])], limit=1, context=context) or [] res['warehouse_id'] = warehouse_ids and warehouse_ids[0] or False if 'location_id' not in res: res['location_id'] = res.get('warehouse_id') and warehouse_obj.browse(cr, uid, res['warehouse_id'], context).lot_stock_id.id or False return res def onchange_warehouse_id(self, cr, uid, ids, warehouse_id, context=None): """ Finds location id for changed warehouse. @param warehouse_id: Changed id of warehouse. @return: Dictionary of values. """ if warehouse_id: w = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context) v = {'location_id': w.lot_stock_id.id} return {'value': v} return {} def onchange_product_id(self, cr, uid, ids, product_id, context=None): """ Finds UoM for changed product. @param product_id: Changed id of product. @return: Dictionary of values. """ if product_id: prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context) d = {'product_uom': [('category_id', '=', prod.uom_id.category_id.id)]} v = {'product_uom': prod.uom_id.id} return {'value': v, 'domain': d} return {'domain': {'product_uom': []}} class stock_picking_type(osv.osv): _name = "stock.picking.type" _description = "The picking type determines the picking view" _order = 'sequence' def open_barcode_interface(self, cr, uid, ids, context=None): final_url = "/stock/barcode/#action=stock.ui&picking_type_id=" + str(ids[0]) if len(ids) else '0' return {'type': 'ir.actions.act_url', 'url': final_url, 'target': 'self'} def _get_tristate_values(self, cr, uid, ids, field_name, arg, context=None): picking_obj = self.pool.get('stock.picking') res = {} for picking_type_id in ids: #get last 10 pickings of this type picking_ids = picking_obj.search(cr, uid, [('picking_type_id', '=', picking_type_id), ('state', '=', 'done')], order='date_done desc', limit=10, context=context) tristates = [] for picking in picking_obj.browse(cr, uid, picking_ids, context=context): if picking.date_done > picking.date: tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('Late'), 'value': -1}) elif picking.backorder_id: tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('Backorder exists'), 'value': 0}) else: tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('OK'), 'value': 1}) res[picking_type_id] = json.dumps(tristates) return res def _get_picking_count(self, cr, uid, ids, field_names, arg, context=None): obj = self.pool.get('stock.picking') domains = { 'count_picking_draft': [('state', '=', 'draft')], 'count_picking_waiting': [('state', 'in', ('confirmed', 'waiting'))], 'count_picking_ready': [('state', 'in', ('assigned', 'partially_available'))], 'count_picking': [('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available'))], 'count_picking_late': [('min_date', '<', time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)), ('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available'))], 'count_picking_backorders': [('backorder_id', '!=', False), ('state', 'in', ('confirmed', 'assigned', 'waiting', 'partially_available'))], } result = {} for field in domains: data = obj.read_group(cr, uid, domains[field] + [('state', 'not in', ('done', 'cancel')), ('picking_type_id', 'in', ids)], ['picking_type_id'], ['picking_type_id'], context=context) count = dict(map(lambda x: (x['picking_type_id'] and x['picking_type_id'][0], x['picking_type_id_count']), data)) for tid in ids: result.setdefault(tid, {})[field] = count.get(tid, 0) for tid in ids: if result[tid]['count_picking']: result[tid]['rate_picking_late'] = result[tid]['count_picking_late'] * 100 / result[tid]['count_picking'] result[tid]['rate_picking_backorders'] = result[tid]['count_picking_backorders'] * 100 / result[tid]['count_picking'] else: result[tid]['rate_picking_late'] = 0 result[tid]['rate_picking_backorders'] = 0 return result def _get_action(self, cr, uid, ids, action, context=None): mod_obj = self.pool.get('ir.model.data') act_obj = self.pool.get('ir.actions.act_window') result = mod_obj.xmlid_to_res_id(cr, uid, action, raise_if_not_found=True) result = act_obj.read(cr, uid, [result], context=context)[0] if ids: picking_type = self.browse(cr, uid, ids[0], context=context) result['display_name'] = picking_type.display_name return result def get_action_picking_tree_late(self, cr, uid, ids, context=None): return self._get_action(cr, uid, ids, 'stock.action_picking_tree_late', context=context) def get_action_picking_tree_backorder(self, cr, uid, ids, context=None): return self._get_action(cr, uid, ids, 'stock.action_picking_tree_backorder', context=context) def get_action_picking_tree_waiting(self, cr, uid, ids, context=None): return self._get_action(cr, uid, ids, 'stock.action_picking_tree_waiting', context=context) def get_action_picking_tree_ready(self, cr, uid, ids, context=None): return self._get_action(cr, uid, ids, 'stock.action_picking_tree_ready', context=context) def get_stock_picking_action_picking_type(self, cr, uid, ids, context=None): return self._get_action(cr, uid, ids, 'stock.stock_picking_action_picking_type', context=context) def onchange_picking_code(self, cr, uid, ids, picking_code=False): if not picking_code: return False obj_data = self.pool.get('ir.model.data') stock_loc = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_stock') result = { 'default_location_src_id': stock_loc, 'default_location_dest_id': stock_loc, } if picking_code == 'incoming': result['default_location_src_id'] = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_suppliers') elif picking_code == 'outgoing': result['default_location_dest_id'] = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_customers') return {'value': result} def _get_name(self, cr, uid, ids, field_names, arg, context=None): return dict(self.name_get(cr, uid, ids, context=context)) def name_get(self, cr, uid, ids, context=None): """Overides orm name_get method to display 'Warehouse_name: PickingType_name' """ if context is None: context = {} if not isinstance(ids, list): ids = [ids] res = [] if not ids: return res for record in self.browse(cr, uid, ids, context=context): name = record.name if record.warehouse_id: name = record.warehouse_id.name + ': ' +name if context.get('special_shortened_wh_name'): if record.warehouse_id: name = record.warehouse_id.name else: name = _('Customer') + ' (' + record.name + ')' res.append((record.id, name)) return res def _default_warehouse(self, cr, uid, context=None): user = self.pool.get('res.users').browse(cr, uid, uid, context) res = self.pool.get('stock.warehouse').search(cr, uid, [('company_id', '=', user.company_id.id)], limit=1, context=context) return res and res[0] or False _columns = { 'name': fields.char('Picking Type Name', translate=True, required=True), 'complete_name': fields.function(_get_name, type='char', string='Name'), 'color': fields.integer('Color'), 'sequence': fields.integer('Sequence', help="Used to order the 'All Operations' kanban view"), 'sequence_id': fields.many2one('ir.sequence', 'Reference Sequence', required=True), 'default_location_src_id': fields.many2one('stock.location', 'Default Source Location', help="This is the default source location when you create a picking manually with this picking type. It is possible however to change it or that the routes put another location. If it is empty, it will check for the supplier location on the partner. "), 'default_location_dest_id': fields.many2one('stock.location', 'Default Destination Location', help="This is the default destination location when you create a picking manually with this picking type. It is possible however to change it or that the routes put another location. If it is empty, it will check for the customer location on the partner. "), 'code': fields.selection([('incoming', 'Suppliers'), ('outgoing', 'Customers'), ('internal', 'Internal')], 'Type of Operation', required=True), 'return_picking_type_id': fields.many2one('stock.picking.type', 'Picking Type for Returns'), 'show_entire_packs': fields.boolean('Allow moving packs', help="If checked, this shows the packs to be moved as a whole in the Operations tab all the time, even if there was no entire pack reserved."), 'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', ondelete='cascade'), 'active': fields.boolean('Active'), 'use_create_lots': fields.boolean('Create New Lots', help="If this is checked only, it will suppose you want to create new Serial Numbers / Lots, so you can provide them in a text field. "), 'use_existing_lots': fields.boolean('Use Existing Lots', help="If this is checked, you will be able to choose the Serial Number / Lots. You can also decide to not put lots in this picking type. This means it will create stock with no lot or not put a restriction on the lot taken. "), # Statistics for the kanban view 'last_done_picking': fields.function(_get_tristate_values, type='char', string='Last 10 Done Pickings'), 'count_picking_draft': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), 'count_picking_ready': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), 'count_picking': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), 'count_picking_waiting': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), 'count_picking_late': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), 'count_picking_backorders': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), 'rate_picking_late': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), 'rate_picking_backorders': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), # Barcode nomenclature 'barcode_nomenclature_id': fields.many2one('barcode.nomenclature','Barcode Nomenclature', help='A barcode nomenclature'), } _defaults = { 'warehouse_id': _default_warehouse, 'active': True, 'use_existing_lots': True, 'use_create_lots': True, } class barcode_rule(models.Model): _inherit = 'barcode.rule' def _get_type_selection(self): types = sets.Set(super(barcode_rule,self)._get_type_selection()) types.update([ ('weight', _('Weighted Product')), ('location', _('Location')), ('lot', _('Lot')), ('package', _('Package')) ]) return list(types) class StockPackOperation(models.Model): _inherit = 'stock.pack.operation' @api.onchange('pack_lot_ids') def _onchange_packlots(self): self.qty_done = sum([x.qty for x in self.pack_lot_ids])
gpl-3.0
hamish2014/optTune
optTune/tMOPSO_code/test_CPV_evaluation_manager.py
1
1419
#! /usr/bin/env python import sys, numpy sys.path.append('../../') from tMOPSO_module import CPV_evaluation_manager, get_F_vals_at_specified_OFE_budgets def optAlg(y): ''' y = [CPV_1, OFE_budget, randomSeed] ''' n = numpy.random.randint(5,10) E = numpy.arange(1,n) F = numpy.arange(1,n) return F, E def addtobatch(y): pass print('Test to check tMOPSOs mechanisms for handling algorithm which terminate before reaching the specified OFE budget') c = CPV_evaluation_manager(numpy.array([1,2]), optAlg, 10, 8, addtobatch) sampleSizes = [3,5] for i, repeats in enumerate(sampleSizes): evals, fvals = c.results(repeats) print(fvals) print('evals_used in generating extra %i runs : %i (total evals should be %i)' % (repeats, c.OFEs_used_over_last_n_runs(repeats),sum(fvals[len(fvals)-1,:]))) print('\ntesting, tMOPSO get_F_vals_at_specified_OFE_budgets') Fv = numpy.array([ 0.9, 0.5, 0.3, 0.2 , 0.15, 0.12 ]) Ev = numpy.arange(1,len(Fv)+1)*3 E_desired = numpy.array([3,7,10,11,14,20,25]) def sub_fun(F_in,E_in,E_d): print('zip(E_in,F_in) %s' %' '.join('%i,%1.2f ' % (e,f) for e,f in zip(E_in,F_in))) print(' E_desired %s' % ' '.join(map(str,E_d))) F_out, E_out = get_F_vals_at_specified_OFE_budgets(F_in,E_in,E_d) print(' zip(E_out,F_out) %s' %' '.join('%i,%1.2f ' % (e,f) for e,f in zip(E_out,F_out))) sub_fun(Fv,Ev,E_desired) sub_fun(Fv[1:],Ev[1:],E_desired)
gpl-3.0
rhyolight/nupic.son
tests/app/soc/mapreduce/test_convert_user.py
1
2682
# Copyright 2012 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for soc.logic.mapreduce_convert_user.""" import unittest from google.appengine.api import users from soc.models.user import User from soc.mapreduce import convert_user from soc.modules.seeder.logic.seeder import logic as seeder_logic from soc.modules.seeder.logic.providers import string as seeder_string class TestAccounts(unittest.TestCase): """Tests for convert_user logic. """ def setUp(self): self.link_id = seeder_string.LinkIDProvider(User).getValue() def convert(self, email, same_user_id=False): account = users.User(email=email) properties = { 'account': account, 'key_name': self.link_id, 'link_id': self.link_id, 'name': 'Test user', 'status': 'valid', } user = seeder_logic.seed(User, properties) if same_user_id: user = User.get_by_key_name(self.link_id) user.user_id = user.account.user_id() user.put() return convert_user.convert_user_txn(user.key()) def assertUserEqual(self, email): user = User.get_by_key_name(self.link_id) self.assertEqual(email, user.account.email()) self.assertTrue(user.account.user_id()) self.assertEqual(user.account.user_id(), user.user_id) def testNoop(self): result = self.convert('test@example.com', True) self.assertEqual(convert_user.IGNORED_USER, result) self.assertUserEqual('test@example.com') def testConverted(self): result = self.convert('test@gmail.com', True) self.assertEqual(convert_user.IGNORED_USER, result) self.assertUserEqual('test@gmail.com') def testPartiallyConverted(self): result = self.convert('test@gmail.com') self.assertEqual(convert_user.CONVERTED_USER, result) self.assertUserEqual('test@gmail.com') def testNonAuthConverted(self): result = self.convert('test@example.com') self.assertEqual(convert_user.CONVERTED_USER, result) self.assertUserEqual('test@example.com') def testFullConversion(self): result = self.convert('test') self.assertEqual(convert_user.CONVERTED_USER, result) self.assertUserEqual('test@gmail.com')
apache-2.0
gonboy/sl4a
python/src/Lib/test/test_weakref.py
55
41054
import gc import sys import unittest import UserList import weakref import operator from test import test_support # Used in ReferencesTestCase.test_ref_created_during_del() . ref_from_del = None class C: def method(self): pass class Callable: bar = None def __call__(self, x): self.bar = x def create_function(): def f(): pass return f def create_bound_method(): return C().method def create_unbound_method(): return C.method class TestBase(unittest.TestCase): def setUp(self): self.cbcalled = 0 def callback(self, ref): self.cbcalled += 1 class ReferencesTestCase(TestBase): def test_basic_ref(self): self.check_basic_ref(C) self.check_basic_ref(create_function) self.check_basic_ref(create_bound_method) self.check_basic_ref(create_unbound_method) # Just make sure the tp_repr handler doesn't raise an exception. # Live reference: o = C() wr = weakref.ref(o) `wr` # Dead reference: del o `wr` def test_basic_callback(self): self.check_basic_callback(C) self.check_basic_callback(create_function) self.check_basic_callback(create_bound_method) self.check_basic_callback(create_unbound_method) def test_multiple_callbacks(self): o = C() ref1 = weakref.ref(o, self.callback) ref2 = weakref.ref(o, self.callback) del o self.assert_(ref1() is None, "expected reference to be invalidated") self.assert_(ref2() is None, "expected reference to be invalidated") self.assert_(self.cbcalled == 2, "callback not called the right number of times") def test_multiple_selfref_callbacks(self): # Make sure all references are invalidated before callbacks are called # # What's important here is that we're using the first # reference in the callback invoked on the second reference # (the most recently created ref is cleaned up first). This # tests that all references to the object are invalidated # before any of the callbacks are invoked, so that we only # have one invocation of _weakref.c:cleanup_helper() active # for a particular object at a time. # def callback(object, self=self): self.ref() c = C() self.ref = weakref.ref(c, callback) ref1 = weakref.ref(c, callback) del c def test_proxy_ref(self): o = C() o.bar = 1 ref1 = weakref.proxy(o, self.callback) ref2 = weakref.proxy(o, self.callback) del o def check(proxy): proxy.bar self.assertRaises(weakref.ReferenceError, check, ref1) self.assertRaises(weakref.ReferenceError, check, ref2) self.assertRaises(weakref.ReferenceError, bool, weakref.proxy(C())) self.assert_(self.cbcalled == 2) def check_basic_ref(self, factory): o = factory() ref = weakref.ref(o) self.assert_(ref() is not None, "weak reference to live object should be live") o2 = ref() self.assert_(o is o2, "<ref>() should return original object if live") def check_basic_callback(self, factory): self.cbcalled = 0 o = factory() ref = weakref.ref(o, self.callback) del o self.assert_(self.cbcalled == 1, "callback did not properly set 'cbcalled'") self.assert_(ref() is None, "ref2 should be dead after deleting object reference") def test_ref_reuse(self): o = C() ref1 = weakref.ref(o) # create a proxy to make sure that there's an intervening creation # between these two; it should make no difference proxy = weakref.proxy(o) ref2 = weakref.ref(o) self.assert_(ref1 is ref2, "reference object w/out callback should be re-used") o = C() proxy = weakref.proxy(o) ref1 = weakref.ref(o) ref2 = weakref.ref(o) self.assert_(ref1 is ref2, "reference object w/out callback should be re-used") self.assert_(weakref.getweakrefcount(o) == 2, "wrong weak ref count for object") del proxy self.assert_(weakref.getweakrefcount(o) == 1, "wrong weak ref count for object after deleting proxy") def test_proxy_reuse(self): o = C() proxy1 = weakref.proxy(o) ref = weakref.ref(o) proxy2 = weakref.proxy(o) self.assert_(proxy1 is proxy2, "proxy object w/out callback should have been re-used") def test_basic_proxy(self): o = C() self.check_proxy(o, weakref.proxy(o)) L = UserList.UserList() p = weakref.proxy(L) self.failIf(p, "proxy for empty UserList should be false") p.append(12) self.assertEqual(len(L), 1) self.failUnless(p, "proxy for non-empty UserList should be true") p[:] = [2, 3] self.assertEqual(len(L), 2) self.assertEqual(len(p), 2) self.failUnless(3 in p, "proxy didn't support __contains__() properly") p[1] = 5 self.assertEqual(L[1], 5) self.assertEqual(p[1], 5) L2 = UserList.UserList(L) p2 = weakref.proxy(L2) self.assertEqual(p, p2) ## self.assertEqual(repr(L2), repr(p2)) L3 = UserList.UserList(range(10)) p3 = weakref.proxy(L3) self.assertEqual(L3[:], p3[:]) self.assertEqual(L3[5:], p3[5:]) self.assertEqual(L3[:5], p3[:5]) self.assertEqual(L3[2:5], p3[2:5]) def test_proxy_index(self): class C: def __index__(self): return 10 o = C() p = weakref.proxy(o) self.assertEqual(operator.index(p), 10) def test_proxy_div(self): class C: def __floordiv__(self, other): return 42 def __ifloordiv__(self, other): return 21 o = C() p = weakref.proxy(o) self.assertEqual(p // 5, 42) p //= 5 self.assertEqual(p, 21) # The PyWeakref_* C API is documented as allowing either NULL or # None as the value for the callback, where either means "no # callback". The "no callback" ref and proxy objects are supposed # to be shared so long as they exist by all callers so long as # they are active. In Python 2.3.3 and earlier, this guarantee # was not honored, and was broken in different ways for # PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.) def test_shared_ref_without_callback(self): self.check_shared_without_callback(weakref.ref) def test_shared_proxy_without_callback(self): self.check_shared_without_callback(weakref.proxy) def check_shared_without_callback(self, makeref): o = Object(1) p1 = makeref(o, None) p2 = makeref(o, None) self.assert_(p1 is p2, "both callbacks were None in the C API") del p1, p2 p1 = makeref(o) p2 = makeref(o, None) self.assert_(p1 is p2, "callbacks were NULL, None in the C API") del p1, p2 p1 = makeref(o) p2 = makeref(o) self.assert_(p1 is p2, "both callbacks were NULL in the C API") del p1, p2 p1 = makeref(o, None) p2 = makeref(o) self.assert_(p1 is p2, "callbacks were None, NULL in the C API") def test_callable_proxy(self): o = Callable() ref1 = weakref.proxy(o) self.check_proxy(o, ref1) self.assert_(type(ref1) is weakref.CallableProxyType, "proxy is not of callable type") ref1('twinkies!') self.assert_(o.bar == 'twinkies!', "call through proxy not passed through to original") ref1(x='Splat.') self.assert_(o.bar == 'Splat.', "call through proxy not passed through to original") # expect due to too few args self.assertRaises(TypeError, ref1) # expect due to too many args self.assertRaises(TypeError, ref1, 1, 2, 3) def check_proxy(self, o, proxy): o.foo = 1 self.assert_(proxy.foo == 1, "proxy does not reflect attribute addition") o.foo = 2 self.assert_(proxy.foo == 2, "proxy does not reflect attribute modification") del o.foo self.assert_(not hasattr(proxy, 'foo'), "proxy does not reflect attribute removal") proxy.foo = 1 self.assert_(o.foo == 1, "object does not reflect attribute addition via proxy") proxy.foo = 2 self.assert_( o.foo == 2, "object does not reflect attribute modification via proxy") del proxy.foo self.assert_(not hasattr(o, 'foo'), "object does not reflect attribute removal via proxy") def test_proxy_deletion(self): # Test clearing of SF bug #762891 class Foo: result = None def __delitem__(self, accessor): self.result = accessor g = Foo() f = weakref.proxy(g) del f[0] self.assertEqual(f.result, 0) def test_proxy_bool(self): # Test clearing of SF bug #1170766 class List(list): pass lyst = List() self.assertEqual(bool(weakref.proxy(lyst)), bool(lyst)) def test_getweakrefcount(self): o = C() ref1 = weakref.ref(o) ref2 = weakref.ref(o, self.callback) self.assert_(weakref.getweakrefcount(o) == 2, "got wrong number of weak reference objects") proxy1 = weakref.proxy(o) proxy2 = weakref.proxy(o, self.callback) self.assert_(weakref.getweakrefcount(o) == 4, "got wrong number of weak reference objects") del ref1, ref2, proxy1, proxy2 self.assert_(weakref.getweakrefcount(o) == 0, "weak reference objects not unlinked from" " referent when discarded.") # assumes ints do not support weakrefs self.assert_(weakref.getweakrefcount(1) == 0, "got wrong number of weak reference objects for int") def test_getweakrefs(self): o = C() ref1 = weakref.ref(o, self.callback) ref2 = weakref.ref(o, self.callback) del ref1 self.assert_(weakref.getweakrefs(o) == [ref2], "list of refs does not match") o = C() ref1 = weakref.ref(o, self.callback) ref2 = weakref.ref(o, self.callback) del ref2 self.assert_(weakref.getweakrefs(o) == [ref1], "list of refs does not match") del ref1 self.assert_(weakref.getweakrefs(o) == [], "list of refs not cleared") # assumes ints do not support weakrefs self.assert_(weakref.getweakrefs(1) == [], "list of refs does not match for int") def test_newstyle_number_ops(self): class F(float): pass f = F(2.0) p = weakref.proxy(f) self.assert_(p + 1.0 == 3.0) self.assert_(1.0 + p == 3.0) # this used to SEGV def test_callbacks_protected(self): # Callbacks protected from already-set exceptions? # Regression test for SF bug #478534. class BogusError(Exception): pass data = {} def remove(k): del data[k] def encapsulate(): f = lambda : () data[weakref.ref(f, remove)] = None raise BogusError try: encapsulate() except BogusError: pass else: self.fail("exception not properly restored") try: encapsulate() except BogusError: pass else: self.fail("exception not properly restored") def test_sf_bug_840829(self): # "weakref callbacks and gc corrupt memory" # subtype_dealloc erroneously exposed a new-style instance # already in the process of getting deallocated to gc, # causing double-deallocation if the instance had a weakref # callback that triggered gc. # If the bug exists, there probably won't be an obvious symptom # in a release build. In a debug build, a segfault will occur # when the second attempt to remove the instance from the "list # of all objects" occurs. import gc class C(object): pass c = C() wr = weakref.ref(c, lambda ignore: gc.collect()) del c # There endeth the first part. It gets worse. del wr c1 = C() c1.i = C() wr = weakref.ref(c1.i, lambda ignore: gc.collect()) c2 = C() c2.c1 = c1 del c1 # still alive because c2 points to it # Now when subtype_dealloc gets called on c2, it's not enough just # that c2 is immune from gc while the weakref callbacks associated # with c2 execute (there are none in this 2nd half of the test, btw). # subtype_dealloc goes on to call the base classes' deallocs too, # so any gc triggered by weakref callbacks associated with anything # torn down by a base class dealloc can also trigger double # deallocation of c2. del c2 def test_callback_in_cycle_1(self): import gc class J(object): pass class II(object): def acallback(self, ignore): self.J I = II() I.J = J I.wr = weakref.ref(J, I.acallback) # Now J and II are each in a self-cycle (as all new-style class # objects are, since their __mro__ points back to them). I holds # both a weak reference (I.wr) and a strong reference (I.J) to class # J. I is also in a cycle (I.wr points to a weakref that references # I.acallback). When we del these three, they all become trash, but # the cycles prevent any of them from getting cleaned up immediately. # Instead they have to wait for cyclic gc to deduce that they're # trash. # # gc used to call tp_clear on all of them, and the order in which # it does that is pretty accidental. The exact order in which we # built up these things manages to provoke gc into running tp_clear # in just the right order (I last). Calling tp_clear on II leaves # behind an insane class object (its __mro__ becomes NULL). Calling # tp_clear on J breaks its self-cycle, but J doesn't get deleted # just then because of the strong reference from I.J. Calling # tp_clear on I starts to clear I's __dict__, and just happens to # clear I.J first -- I.wr is still intact. That removes the last # reference to J, which triggers the weakref callback. The callback # tries to do "self.J", and instances of new-style classes look up # attributes ("J") in the class dict first. The class (II) wants to # search II.__mro__, but that's NULL. The result was a segfault in # a release build, and an assert failure in a debug build. del I, J, II gc.collect() def test_callback_in_cycle_2(self): import gc # This is just like test_callback_in_cycle_1, except that II is an # old-style class. The symptom is different then: an instance of an # old-style class looks in its own __dict__ first. 'J' happens to # get cleared from I.__dict__ before 'wr', and 'J' was never in II's # __dict__, so the attribute isn't found. The difference is that # the old-style II doesn't have a NULL __mro__ (it doesn't have any # __mro__), so no segfault occurs. Instead it got: # test_callback_in_cycle_2 (__main__.ReferencesTestCase) ... # Exception exceptions.AttributeError: # "II instance has no attribute 'J'" in <bound method II.acallback # of <?.II instance at 0x00B9B4B8>> ignored class J(object): pass class II: def acallback(self, ignore): self.J I = II() I.J = J I.wr = weakref.ref(J, I.acallback) del I, J, II gc.collect() def test_callback_in_cycle_3(self): import gc # This one broke the first patch that fixed the last two. In this # case, the objects reachable from the callback aren't also reachable # from the object (c1) *triggering* the callback: you can get to # c1 from c2, but not vice-versa. The result was that c2's __dict__ # got tp_clear'ed by the time the c2.cb callback got invoked. class C: def cb(self, ignore): self.me self.c1 self.wr c1, c2 = C(), C() c2.me = c2 c2.c1 = c1 c2.wr = weakref.ref(c1, c2.cb) del c1, c2 gc.collect() def test_callback_in_cycle_4(self): import gc # Like test_callback_in_cycle_3, except c2 and c1 have different # classes. c2's class (C) isn't reachable from c1 then, so protecting # objects reachable from the dying object (c1) isn't enough to stop # c2's class (C) from getting tp_clear'ed before c2.cb is invoked. # The result was a segfault (C.__mro__ was NULL when the callback # tried to look up self.me). class C(object): def cb(self, ignore): self.me self.c1 self.wr class D: pass c1, c2 = D(), C() c2.me = c2 c2.c1 = c1 c2.wr = weakref.ref(c1, c2.cb) del c1, c2, C, D gc.collect() def test_callback_in_cycle_resurrection(self): import gc # Do something nasty in a weakref callback: resurrect objects # from dead cycles. For this to be attempted, the weakref and # its callback must also be part of the cyclic trash (else the # objects reachable via the callback couldn't be in cyclic trash # to begin with -- the callback would act like an external root). # But gc clears trash weakrefs with callbacks early now, which # disables the callbacks, so the callbacks shouldn't get called # at all (and so nothing actually gets resurrected). alist = [] class C(object): def __init__(self, value): self.attribute = value def acallback(self, ignore): alist.append(self.c) c1, c2 = C(1), C(2) c1.c = c2 c2.c = c1 c1.wr = weakref.ref(c2, c1.acallback) c2.wr = weakref.ref(c1, c2.acallback) def C_went_away(ignore): alist.append("C went away") wr = weakref.ref(C, C_went_away) del c1, c2, C # make them all trash self.assertEqual(alist, []) # del isn't enough to reclaim anything gc.collect() # c1.wr and c2.wr were part of the cyclic trash, so should have # been cleared without their callbacks executing. OTOH, the weakref # to C is bound to a function local (wr), and wasn't trash, so that # callback should have been invoked when C went away. self.assertEqual(alist, ["C went away"]) # The remaining weakref should be dead now (its callback ran). self.assertEqual(wr(), None) del alist[:] gc.collect() self.assertEqual(alist, []) def test_callbacks_on_callback(self): import gc # Set up weakref callbacks *on* weakref callbacks. alist = [] def safe_callback(ignore): alist.append("safe_callback called") class C(object): def cb(self, ignore): alist.append("cb called") c, d = C(), C() c.other = d d.other = c callback = c.cb c.wr = weakref.ref(d, callback) # this won't trigger d.wr = weakref.ref(callback, d.cb) # ditto external_wr = weakref.ref(callback, safe_callback) # but this will self.assert_(external_wr() is callback) # The weakrefs attached to c and d should get cleared, so that # C.cb is never called. But external_wr isn't part of the cyclic # trash, and no cyclic trash is reachable from it, so safe_callback # should get invoked when the bound method object callback (c.cb) # -- which is itself a callback, and also part of the cyclic trash -- # gets reclaimed at the end of gc. del callback, c, d, C self.assertEqual(alist, []) # del isn't enough to clean up cycles gc.collect() self.assertEqual(alist, ["safe_callback called"]) self.assertEqual(external_wr(), None) del alist[:] gc.collect() self.assertEqual(alist, []) def test_gc_during_ref_creation(self): self.check_gc_during_creation(weakref.ref) def test_gc_during_proxy_creation(self): self.check_gc_during_creation(weakref.proxy) def check_gc_during_creation(self, makeref): thresholds = gc.get_threshold() gc.set_threshold(1, 1, 1) gc.collect() class A: pass def callback(*args): pass referenced = A() a = A() a.a = a a.wr = makeref(referenced) try: # now make sure the object and the ref get labeled as # cyclic trash: a = A() weakref.ref(referenced, callback) finally: gc.set_threshold(*thresholds) def test_ref_created_during_del(self): # Bug #1377858 # A weakref created in an object's __del__() would crash the # interpreter when the weakref was cleaned up since it would refer to # non-existent memory. This test should not segfault the interpreter. class Target(object): def __del__(self): global ref_from_del ref_from_del = weakref.ref(self) w = Target() def test_init(self): # Issue 3634 # <weakref to class>.__init__() doesn't check errors correctly r = weakref.ref(Exception) self.assertRaises(TypeError, r.__init__, 0, 0, 0, 0, 0) # No exception should be raised here gc.collect() class SubclassableWeakrefTestCase(TestBase): def test_subclass_refs(self): class MyRef(weakref.ref): def __init__(self, ob, callback=None, value=42): self.value = value super(MyRef, self).__init__(ob, callback) def __call__(self): self.called = True return super(MyRef, self).__call__() o = Object("foo") mr = MyRef(o, value=24) self.assert_(mr() is o) self.assert_(mr.called) self.assertEqual(mr.value, 24) del o self.assert_(mr() is None) self.assert_(mr.called) def test_subclass_refs_dont_replace_standard_refs(self): class MyRef(weakref.ref): pass o = Object(42) r1 = MyRef(o) r2 = weakref.ref(o) self.assert_(r1 is not r2) self.assertEqual(weakref.getweakrefs(o), [r2, r1]) self.assertEqual(weakref.getweakrefcount(o), 2) r3 = MyRef(o) self.assertEqual(weakref.getweakrefcount(o), 3) refs = weakref.getweakrefs(o) self.assertEqual(len(refs), 3) self.assert_(r2 is refs[0]) self.assert_(r1 in refs[1:]) self.assert_(r3 in refs[1:]) def test_subclass_refs_dont_conflate_callbacks(self): class MyRef(weakref.ref): pass o = Object(42) r1 = MyRef(o, id) r2 = MyRef(o, str) self.assert_(r1 is not r2) refs = weakref.getweakrefs(o) self.assert_(r1 in refs) self.assert_(r2 in refs) def test_subclass_refs_with_slots(self): class MyRef(weakref.ref): __slots__ = "slot1", "slot2" def __new__(type, ob, callback, slot1, slot2): return weakref.ref.__new__(type, ob, callback) def __init__(self, ob, callback, slot1, slot2): self.slot1 = slot1 self.slot2 = slot2 def meth(self): return self.slot1 + self.slot2 o = Object(42) r = MyRef(o, None, "abc", "def") self.assertEqual(r.slot1, "abc") self.assertEqual(r.slot2, "def") self.assertEqual(r.meth(), "abcdef") self.failIf(hasattr(r, "__dict__")) def test_subclass_refs_with_cycle(self): # Bug #3110 # An instance of a weakref subclass can have attributes. # If such a weakref holds the only strong reference to the object, # deleting the weakref will delete the object. In this case, # the callback must not be called, because the ref object is # being deleted. class MyRef(weakref.ref): pass # Use a local callback, for "regrtest -R::" # to detect refcounting problems def callback(w): self.cbcalled += 1 o = C() r1 = MyRef(o, callback) r1.o = o del o del r1 # Used to crash here self.assertEqual(self.cbcalled, 0) # Same test, with two weakrefs to the same object # (since code paths are different) o = C() r1 = MyRef(o, callback) r2 = MyRef(o, callback) r1.r = r2 r2.o = o del o del r2 del r1 # Used to crash here self.assertEqual(self.cbcalled, 0) class Object: def __init__(self, arg): self.arg = arg def __repr__(self): return "<Object %r>" % self.arg class MappingTestCase(TestBase): COUNT = 10 def test_weak_values(self): # # This exercises d.copy(), d.items(), d[], del d[], len(d). # dict, objects = self.make_weak_valued_dict() for o in objects: self.assert_(weakref.getweakrefcount(o) == 1, "wrong number of weak references to %r!" % o) self.assert_(o is dict[o.arg], "wrong object returned by weak dict!") items1 = dict.items() items2 = dict.copy().items() items1.sort() items2.sort() self.assert_(items1 == items2, "cloning of weak-valued dictionary did not work!") del items1, items2 self.assert_(len(dict) == self.COUNT) del objects[0] self.assert_(len(dict) == (self.COUNT - 1), "deleting object did not cause dictionary update") del objects, o self.assert_(len(dict) == 0, "deleting the values did not clear the dictionary") # regression on SF bug #447152: dict = weakref.WeakValueDictionary() self.assertRaises(KeyError, dict.__getitem__, 1) dict[2] = C() self.assertRaises(KeyError, dict.__getitem__, 2) def test_weak_keys(self): # # This exercises d.copy(), d.items(), d[] = v, d[], del d[], # len(d), d.has_key(). # dict, objects = self.make_weak_keyed_dict() for o in objects: self.assert_(weakref.getweakrefcount(o) == 1, "wrong number of weak references to %r!" % o) self.assert_(o.arg is dict[o], "wrong object returned by weak dict!") items1 = dict.items() items2 = dict.copy().items() self.assert_(set(items1) == set(items2), "cloning of weak-keyed dictionary did not work!") del items1, items2 self.assert_(len(dict) == self.COUNT) del objects[0] self.assert_(len(dict) == (self.COUNT - 1), "deleting object did not cause dictionary update") del objects, o self.assert_(len(dict) == 0, "deleting the keys did not clear the dictionary") o = Object(42) dict[o] = "What is the meaning of the universe?" self.assert_(dict.has_key(o)) self.assert_(not dict.has_key(34)) def test_weak_keyed_iters(self): dict, objects = self.make_weak_keyed_dict() self.check_iters(dict) # Test keyrefs() refs = dict.keyrefs() self.assertEqual(len(refs), len(objects)) objects2 = list(objects) for wr in refs: ob = wr() self.assert_(dict.has_key(ob)) self.assert_(ob in dict) self.assertEqual(ob.arg, dict[ob]) objects2.remove(ob) self.assertEqual(len(objects2), 0) # Test iterkeyrefs() objects2 = list(objects) self.assertEqual(len(list(dict.iterkeyrefs())), len(objects)) for wr in dict.iterkeyrefs(): ob = wr() self.assert_(dict.has_key(ob)) self.assert_(ob in dict) self.assertEqual(ob.arg, dict[ob]) objects2.remove(ob) self.assertEqual(len(objects2), 0) def test_weak_valued_iters(self): dict, objects = self.make_weak_valued_dict() self.check_iters(dict) # Test valuerefs() refs = dict.valuerefs() self.assertEqual(len(refs), len(objects)) objects2 = list(objects) for wr in refs: ob = wr() self.assertEqual(ob, dict[ob.arg]) self.assertEqual(ob.arg, dict[ob.arg].arg) objects2.remove(ob) self.assertEqual(len(objects2), 0) # Test itervaluerefs() objects2 = list(objects) self.assertEqual(len(list(dict.itervaluerefs())), len(objects)) for wr in dict.itervaluerefs(): ob = wr() self.assertEqual(ob, dict[ob.arg]) self.assertEqual(ob.arg, dict[ob.arg].arg) objects2.remove(ob) self.assertEqual(len(objects2), 0) def check_iters(self, dict): # item iterator: items = dict.items() for item in dict.iteritems(): items.remove(item) self.assert_(len(items) == 0, "iteritems() did not touch all items") # key iterator, via __iter__(): keys = dict.keys() for k in dict: keys.remove(k) self.assert_(len(keys) == 0, "__iter__() did not touch all keys") # key iterator, via iterkeys(): keys = dict.keys() for k in dict.iterkeys(): keys.remove(k) self.assert_(len(keys) == 0, "iterkeys() did not touch all keys") # value iterator: values = dict.values() for v in dict.itervalues(): values.remove(v) self.assert_(len(values) == 0, "itervalues() did not touch all values") def test_make_weak_keyed_dict_from_dict(self): o = Object(3) dict = weakref.WeakKeyDictionary({o:364}) self.assert_(dict[o] == 364) def test_make_weak_keyed_dict_from_weak_keyed_dict(self): o = Object(3) dict = weakref.WeakKeyDictionary({o:364}) dict2 = weakref.WeakKeyDictionary(dict) self.assert_(dict[o] == 364) def make_weak_keyed_dict(self): dict = weakref.WeakKeyDictionary() objects = map(Object, range(self.COUNT)) for o in objects: dict[o] = o.arg return dict, objects def make_weak_valued_dict(self): dict = weakref.WeakValueDictionary() objects = map(Object, range(self.COUNT)) for o in objects: dict[o.arg] = o return dict, objects def check_popitem(self, klass, key1, value1, key2, value2): weakdict = klass() weakdict[key1] = value1 weakdict[key2] = value2 self.assert_(len(weakdict) == 2) k, v = weakdict.popitem() self.assert_(len(weakdict) == 1) if k is key1: self.assert_(v is value1) else: self.assert_(v is value2) k, v = weakdict.popitem() self.assert_(len(weakdict) == 0) if k is key1: self.assert_(v is value1) else: self.assert_(v is value2) def test_weak_valued_dict_popitem(self): self.check_popitem(weakref.WeakValueDictionary, "key1", C(), "key2", C()) def test_weak_keyed_dict_popitem(self): self.check_popitem(weakref.WeakKeyDictionary, C(), "value 1", C(), "value 2") def check_setdefault(self, klass, key, value1, value2): self.assert_(value1 is not value2, "invalid test" " -- value parameters must be distinct objects") weakdict = klass() o = weakdict.setdefault(key, value1) self.assert_(o is value1) self.assert_(weakdict.has_key(key)) self.assert_(weakdict.get(key) is value1) self.assert_(weakdict[key] is value1) o = weakdict.setdefault(key, value2) self.assert_(o is value1) self.assert_(weakdict.has_key(key)) self.assert_(weakdict.get(key) is value1) self.assert_(weakdict[key] is value1) def test_weak_valued_dict_setdefault(self): self.check_setdefault(weakref.WeakValueDictionary, "key", C(), C()) def test_weak_keyed_dict_setdefault(self): self.check_setdefault(weakref.WeakKeyDictionary, C(), "value 1", "value 2") def check_update(self, klass, dict): # # This exercises d.update(), len(d), d.keys(), d.has_key(), # d.get(), d[]. # weakdict = klass() weakdict.update(dict) self.assert_(len(weakdict) == len(dict)) for k in weakdict.keys(): self.assert_(dict.has_key(k), "mysterious new key appeared in weak dict") v = dict.get(k) self.assert_(v is weakdict[k]) self.assert_(v is weakdict.get(k)) for k in dict.keys(): self.assert_(weakdict.has_key(k), "original key disappeared in weak dict") v = dict[k] self.assert_(v is weakdict[k]) self.assert_(v is weakdict.get(k)) def test_weak_valued_dict_update(self): self.check_update(weakref.WeakValueDictionary, {1: C(), 'a': C(), C(): C()}) def test_weak_keyed_dict_update(self): self.check_update(weakref.WeakKeyDictionary, {C(): 1, C(): 2, C(): 3}) def test_weak_keyed_delitem(self): d = weakref.WeakKeyDictionary() o1 = Object('1') o2 = Object('2') d[o1] = 'something' d[o2] = 'something' self.assert_(len(d) == 2) del d[o1] self.assert_(len(d) == 1) self.assert_(d.keys() == [o2]) def test_weak_valued_delitem(self): d = weakref.WeakValueDictionary() o1 = Object('1') o2 = Object('2') d['something'] = o1 d['something else'] = o2 self.assert_(len(d) == 2) del d['something'] self.assert_(len(d) == 1) self.assert_(d.items() == [('something else', o2)]) def test_weak_keyed_bad_delitem(self): d = weakref.WeakKeyDictionary() o = Object('1') # An attempt to delete an object that isn't there should raise # KeyError. It didn't before 2.3. self.assertRaises(KeyError, d.__delitem__, o) self.assertRaises(KeyError, d.__getitem__, o) # If a key isn't of a weakly referencable type, __getitem__ and # __setitem__ raise TypeError. __delitem__ should too. self.assertRaises(TypeError, d.__delitem__, 13) self.assertRaises(TypeError, d.__getitem__, 13) self.assertRaises(TypeError, d.__setitem__, 13, 13) def test_weak_keyed_cascading_deletes(self): # SF bug 742860. For some reason, before 2.3 __delitem__ iterated # over the keys via self.data.iterkeys(). If things vanished from # the dict during this (or got added), that caused a RuntimeError. d = weakref.WeakKeyDictionary() mutate = False class C(object): def __init__(self, i): self.value = i def __hash__(self): return hash(self.value) def __eq__(self, other): if mutate: # Side effect that mutates the dict, by removing the # last strong reference to a key. del objs[-1] return self.value == other.value objs = [C(i) for i in range(4)] for o in objs: d[o] = o.value del o # now the only strong references to keys are in objs # Find the order in which iterkeys sees the keys. objs = d.keys() # Reverse it, so that the iteration implementation of __delitem__ # has to keep looping to find the first object we delete. objs.reverse() # Turn on mutation in C.__eq__. The first time thru the loop, # under the iterkeys() business the first comparison will delete # the last item iterkeys() would see, and that causes a # RuntimeError: dictionary changed size during iteration # when the iterkeys() loop goes around to try comparing the next # key. After this was fixed, it just deletes the last object *our* # "for o in obj" loop would have gotten to. mutate = True count = 0 for o in objs: count += 1 del d[o] self.assertEqual(len(d), 0) self.assertEqual(count, 2) from test import mapping_tests class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol): """Check that WeakValueDictionary conforms to the mapping protocol""" __ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)} type2test = weakref.WeakValueDictionary def _reference(self): return self.__ref.copy() class WeakKeyDictionaryTestCase(mapping_tests.BasicTestMappingProtocol): """Check that WeakKeyDictionary conforms to the mapping protocol""" __ref = {Object("key1"):1, Object("key2"):2, Object("key3"):3} type2test = weakref.WeakKeyDictionary def _reference(self): return self.__ref.copy() libreftest = """ Doctest for examples in the library reference: weakref.rst >>> import weakref >>> class Dict(dict): ... pass ... >>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable >>> r = weakref.ref(obj) >>> print r() is obj True >>> import weakref >>> class Object: ... pass ... >>> o = Object() >>> r = weakref.ref(o) >>> o2 = r() >>> o is o2 True >>> del o, o2 >>> print r() None >>> import weakref >>> class ExtendedRef(weakref.ref): ... def __init__(self, ob, callback=None, **annotations): ... super(ExtendedRef, self).__init__(ob, callback) ... self.__counter = 0 ... for k, v in annotations.iteritems(): ... setattr(self, k, v) ... def __call__(self): ... '''Return a pair containing the referent and the number of ... times the reference has been called. ... ''' ... ob = super(ExtendedRef, self).__call__() ... if ob is not None: ... self.__counter += 1 ... ob = (ob, self.__counter) ... return ob ... >>> class A: # not in docs from here, just testing the ExtendedRef ... pass ... >>> a = A() >>> r = ExtendedRef(a, foo=1, bar="baz") >>> r.foo 1 >>> r.bar 'baz' >>> r()[1] 1 >>> r()[1] 2 >>> r()[0] is a True >>> import weakref >>> _id2obj_dict = weakref.WeakValueDictionary() >>> def remember(obj): ... oid = id(obj) ... _id2obj_dict[oid] = obj ... return oid ... >>> def id2obj(oid): ... return _id2obj_dict[oid] ... >>> a = A() # from here, just testing >>> a_id = remember(a) >>> id2obj(a_id) is a True >>> del a >>> try: ... id2obj(a_id) ... except KeyError: ... print 'OK' ... else: ... print 'WeakValueDictionary error' OK """ __test__ = {'libreftest' : libreftest} def test_main(): test_support.run_unittest( ReferencesTestCase, MappingTestCase, WeakValueDictionaryTestCase, WeakKeyDictionaryTestCase, SubclassableWeakrefTestCase, ) test_support.run_doctest(sys.modules[__name__]) if __name__ == "__main__": test_main()
apache-2.0
siongui/pali
tipitaka/setup/init2tocsToJson.py
2
2237
#!/usr/bin/env python # -*- coding:utf-8 -*- import os import json from variables import getInfoFilePath from variables import getTreeviewJsonPath """ tipitaka_toc.xml contains tipitaka, commentaries, and sun-commentaries toc1.xml contains only tipitaka """ separator = u'#@%' def prettyPrint(obj): print(json.dumps(obj, indent=4, separators=(',', ': '))) #print(json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))) def printData(stack, node): print('-----------------------------') prettyPrint(stack) print(node) def isLeafNode(node): if 'action' in node: return True else: return False def getNodeDict(line): array = line.split(separator) if len(array) == 3: # leaf node depth = int(array[0]) text = array[1] action = array[2] return {'depth': depth, 'text': text, 'action': action} else: # not leaf node depth = int(array[0]) text = array[1] return {'depth': depth, 'text': text} def updateStack(stack, node): #printData(stack, node, currentDepth) if stack[-1]['depth'] < node['depth']: if isLeafNode(node): node.pop('depth') stack[-1]['child'].append(node) else: node['child'] = [] stack.append(node) return stack elif stack[-1]['depth'] > node['depth']: deepestNode = stack.pop() deepestNode.pop('depth') stack[-1]['child'].append(deepestNode) return updateStack(stack, node) else: # stack[-1]['dpeth'] = node['depth'] deepestNode = stack.pop() deepestNode.pop('depth') stack[-1]['child'].append(deepestNode) if not isLeafNode(node): node['child'] = [] stack.append(node) return stack def infoFile2TreeviewData(path): rootNode = {'child': [], 'depth': 0} stack = [rootNode] with open(path, 'r') as f: for line in f.readlines(): # remove '\n' and decode as utf-8 line = line[:-1].decode('utf-8') # each line represents one node node = getNodeDict(line) stack = updateStack(stack, node) return rootNode if __name__ == '__main__': treeviewData = infoFile2TreeviewData(getInfoFilePath()) with open(getTreeviewJsonPath(), 'w') as f: f.write(json.dumps(treeviewData)) prettyPrint(treeviewData)
unlicense
baiyunping333/BurpSuite-Plugins
Sqlmap/lib/parse/configfile.py
2
3636
#!/usr/bin/env python """ Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import codecs from ConfigParser import MissingSectionHeaderError from ConfigParser import ParsingError from lib.core.common import checkFile from lib.core.common import openFile from lib.core.common import unArrayizeValue from lib.core.common import UnicodeRawConfigParser from lib.core.data import conf from lib.core.data import logger from lib.core.exception import SqlmapMissingMandatoryOptionException from lib.core.exception import SqlmapSyntaxException from lib.core.optiondict import optDict from lib.core.settings import UNICODE_ENCODING config = None def configFileProxy(section, option, boolean=False, integer=False): """ Parse configuration file and save settings into the configuration advanced dictionary. """ global config if config.has_option(section, option): try: if boolean: value = config.getboolean(section, option) if config.get(section, option) else False elif integer: value = config.getint(section, option) if config.get(section, option) else 0 else: value = config.get(section, option) except ValueError, ex: errMsg = "error occurred while processing the option " errMsg += "'%s' in provided configuration file ('%s')" % (option, str(ex)) raise SqlmapSyntaxException(errMsg) if value: conf[option] = value else: conf[option] = None else: debugMsg = "missing requested option '%s' (section " % option debugMsg += "'%s') into the configuration file, " % section debugMsg += "ignoring. Skipping to next." logger.debug(debugMsg) def configFileParser(configFile): """ Parse configuration file and save settings into the configuration advanced dictionary. """ global config debugMsg = "parsing configuration file" logger.debug(debugMsg) checkFile(configFile) configFP = openFile(configFile, "rb") try: config = UnicodeRawConfigParser() config.readfp(configFP) except (MissingSectionHeaderError, ParsingError), ex: errMsg = "you have provided an invalid configuration file ('%s')" % str(ex) raise SqlmapSyntaxException(errMsg) if not config.has_section("Target"): errMsg = "missing a mandatory section 'Target' in the configuration file" raise SqlmapMissingMandatoryOptionException(errMsg) condition = not config.has_option("Target", "direct") condition &= not config.has_option("Target", "url") condition &= not config.has_option("Target", "logFile") condition &= not config.has_option("Target", "bulkFile") condition &= not config.has_option("Target", "googleDork") condition &= not config.has_option("Target", "requestFile") condition &= not config.has_option("Target", "sitemapUrl") condition &= not config.has_option("Target", "wizard") if condition: errMsg = "missing a mandatory option in the configuration file " errMsg += "(direct, url, logFile, bulkFile, googleDork, requestFile, sitemapUrl or wizard)" raise SqlmapMissingMandatoryOptionException(errMsg) for family, optionData in optDict.items(): for option, datatype in optionData.items(): datatype = unArrayizeValue(datatype) boolean = datatype == "boolean" integer = datatype == "integer" configFileProxy(family, option, boolean, integer)
gpl-2.0
ryananguiano/kafka-gConsumer
tests/test_kafka_gconsumer.py
1
2174
#!/usr/bin/env python """ test_kafka_gconsumer ---------------------------------- Tests for `kafka_gconsumer` module. """ import os import gevent import pytest import random import string from confluent_kafka import Producer from confluent_kafka.avro import AvroProducer from kafka_gconsumer import Consumer, AvroConsumer def random_str(length): return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length)) @pytest.fixture def consumer_settings(): return { 'bootstrap.servers': os.environ.get('KAFKA_BOOTSTRAP_SERVERS'), 'group.id': 'gConsumer-test-consumer-{}'.format(random_str(5)), 'topic.auto.offset.reset': 'earliest', } @pytest.fixture(scope='session') def producer_settings(): return { 'bootstrap.servers': os.environ.get('KAFKA_BOOTSTRAP_SERVERS'), 'group.id': 'gConsumer-test-producer-{}'.format(random_str(5)), } @pytest.fixture def avro_consumer_settings(consumer_settings): settings = { 'schema.registry.url': os.environ.get('KAFKA_SCHEMA_REGISTRY_URL'), } settings.update(consumer_settings) return settings @pytest.fixture def avro_producer_settings(producer_settings): settings = { 'schema.registry.url': os.environ.get('KAFKA_SCHEMA_REGISTRY_URL'), } settings.update(producer_settings) return settings def produced_plain_messages(topic, count): pass def produced_avro_messages(topic): pass @pytest.fixture(scope='session') def many_produced_messages(producer_settings): producer = Producer(**producer_settings) for i in xrange(5): producer.produce('test_messages', 'test-{}'.format(i)) producer.poll(timeout=5) def test_consumer_settings(consumer_settings): Consumer(*consumer_settings) def test_avro_consumer_settings(avro_consumer_settings): AvroConsumer(avro_consumer_settings) def test_consumer(many_produced_messages, consumer_settings): def read_message(message): assert message.value() == '' thread = Consumer.spawn(topics='test_messages', settings=consumer_settings, handler=read_message) gevent.sleep(10) gevent.kill(thread)
mit
harshavardhana/tweepy
tweepy/cache.py
5
11544
# Tweepy # Copyright 2009-2010 Joshua Roesslein # See LICENSE for details. import time import threading import os try: import cPickle as pickle except ImportError: import pickle try: import hashlib except ImportError: # python 2.4 import md5 as hashlib try: import fcntl except ImportError: # Probably on a windows system # TODO: use win32file pass class Cache(object): """Cache interface""" def __init__(self, timeout=60): """Initialize the cache timeout: number of seconds to keep a cached entry """ self.timeout = timeout def store(self, key, value): """Add new record to cache key: entry key value: data of entry """ raise NotImplementedError def get(self, key, timeout=None): """Get cached entry if exists and not expired key: which entry to get timeout: override timeout with this value [optional] """ raise NotImplementedError def count(self): """Get count of entries currently stored in cache""" raise NotImplementedError def cleanup(self): """Delete any expired entries in cache.""" raise NotImplementedError def flush(self): """Delete all cached entries""" raise NotImplementedError class MemoryCache(Cache): """In-memory cache""" def __init__(self, timeout=60): Cache.__init__(self, timeout) self._entries = {} self.lock = threading.Lock() def __getstate__(self): # pickle return {'entries': self._entries, 'timeout': self.timeout} def __setstate__(self, state): # unpickle self.lock = threading.Lock() self._entries = state['entries'] self.timeout = state['timeout'] def _is_expired(self, entry, timeout): return timeout > 0 and (time.time() - entry[0]) >= timeout def store(self, key, value): self.lock.acquire() self._entries[key] = (time.time(), value) self.lock.release() def get(self, key, timeout=None): self.lock.acquire() try: # check to see if we have this key entry = self._entries.get(key) if not entry: # no hit, return nothing return None # use provided timeout in arguments if provided # otherwise use the one provided during init. if timeout is None: timeout = self.timeout # make sure entry is not expired if self._is_expired(entry, timeout): # entry expired, delete and return nothing del self._entries[key] return None # entry found and not expired, return it return entry[1] finally: self.lock.release() def count(self): return len(self._entries) def cleanup(self): self.lock.acquire() try: for k, v in self._entries.items(): if self._is_expired(v, self.timeout): del self._entries[k] finally: self.lock.release() def flush(self): self.lock.acquire() self._entries.clear() self.lock.release() class FileCache(Cache): """File-based cache""" # locks used to make cache thread-safe cache_locks = {} def __init__(self, cache_dir, timeout=60): Cache.__init__(self, timeout) if os.path.exists(cache_dir) is False: os.mkdir(cache_dir) self.cache_dir = cache_dir if cache_dir in FileCache.cache_locks: self.lock = FileCache.cache_locks[cache_dir] else: self.lock = threading.Lock() FileCache.cache_locks[cache_dir] = self.lock if os.name == 'posix': self._lock_file = self._lock_file_posix self._unlock_file = self._unlock_file_posix elif os.name == 'nt': self._lock_file = self._lock_file_win32 self._unlock_file = self._unlock_file_win32 else: print 'Warning! FileCache locking not supported on this system!' self._lock_file = self._lock_file_dummy self._unlock_file = self._unlock_file_dummy def _get_path(self, key): md5 = hashlib.md5() md5.update(key) return os.path.join(self.cache_dir, md5.hexdigest()) def _lock_file_dummy(self, path, exclusive=True): return None def _unlock_file_dummy(self, lock): return def _lock_file_posix(self, path, exclusive=True): lock_path = path + '.lock' if exclusive is True: f_lock = open(lock_path, 'w') fcntl.lockf(f_lock, fcntl.LOCK_EX) else: f_lock = open(lock_path, 'r') fcntl.lockf(f_lock, fcntl.LOCK_SH) if os.path.exists(lock_path) is False: f_lock.close() return None return f_lock def _unlock_file_posix(self, lock): lock.close() def _lock_file_win32(self, path, exclusive=True): # TODO: implement return None def _unlock_file_win32(self, lock): # TODO: implement return def _delete_file(self, path): os.remove(path) if os.path.exists(path + '.lock'): os.remove(path + '.lock') def store(self, key, value): path = self._get_path(key) self.lock.acquire() try: # acquire lock and open file f_lock = self._lock_file(path) datafile = open(path, 'wb') # write data pickle.dump((time.time(), value), datafile) # close and unlock file datafile.close() self._unlock_file(f_lock) finally: self.lock.release() def get(self, key, timeout=None): return self._get(self._get_path(key), timeout) def _get(self, path, timeout): if os.path.exists(path) is False: # no record return None self.lock.acquire() try: # acquire lock and open f_lock = self._lock_file(path, False) datafile = open(path, 'rb') # read pickled object created_time, value = pickle.load(datafile) datafile.close() # check if value is expired if timeout is None: timeout = self.timeout if timeout > 0 and (time.time() - created_time) >= timeout: # expired! delete from cache value = None self._delete_file(path) # unlock and return result self._unlock_file(f_lock) return value finally: self.lock.release() def count(self): c = 0 for entry in os.listdir(self.cache_dir): if entry.endswith('.lock'): continue c += 1 return c def cleanup(self): for entry in os.listdir(self.cache_dir): if entry.endswith('.lock'): continue self._get(os.path.join(self.cache_dir, entry), None) def flush(self): for entry in os.listdir(self.cache_dir): if entry.endswith('.lock'): continue self._delete_file(os.path.join(self.cache_dir, entry)) class MemCacheCache(Cache): """Cache interface""" def __init__(self, client, timeout=60): """Initialize the cache client: The memcache client timeout: number of seconds to keep a cached entry """ self.client = client self.timeout = timeout def store(self, key, value): """Add new record to cache key: entry key value: data of entry """ self.client.set(key, value, time=self.timeout) def get(self, key, timeout=None): """Get cached entry if exists and not expired key: which entry to get timeout: override timeout with this value [optional]. DOES NOT WORK HERE """ return self.client.get(key) def count(self): """Get count of entries currently stored in cache. RETURN 0""" raise NotImplementedError def cleanup(self): """Delete any expired entries in cache. NO-OP""" raise NotImplementedError def flush(self): """Delete all cached entries. NO-OP""" raise NotImplementedError class RedisCache(Cache): '''Cache running in a redis server''' def __init__(self, client, timeout=60, keys_container = 'tweepy:keys', pre_identifier = 'tweepy:'): Cache.__init__(self, timeout) self.client = client self.keys_container = keys_container self.pre_identifier = pre_identifier def _is_expired(self, entry, timeout): # Returns true if the entry has expired return timeout > 0 and (time.time() - entry[0]) >= timeout def store(self, key, value): '''Store the key, value pair in our redis server''' # Prepend tweepy to our key, this makes it easier to identify tweepy keys in our redis server key = self.pre_identifier + key # Get a pipe (to execute several redis commands in one step) pipe = self.client.pipeline() # Set our values in a redis hash (similar to python dict) pipe.set(key, pickle.dumps((time.time(), value))) # Set the expiration pipe.expire(key, self.timeout) # Add the key to a set containing all the keys pipe.sadd(self.keys_container, key) # Execute the instructions in the redis server pipe.execute() def get(self, key, timeout=None): '''Given a key, returns an element from the redis table''' key = self.pre_identifier + key # Check to see if we have this key unpickled_entry = self.client.get(key) if not unpickled_entry: # No hit, return nothing return None entry = pickle.loads(unpickled_entry) # Use provided timeout in arguments if provided # otherwise use the one provided during init. if timeout is None: timeout = self.timeout # Make sure entry is not expired if self._is_expired(entry, timeout): # entry expired, delete and return nothing self.delete_entry(key) return None # entry found and not expired, return it return entry[1] def count(self): '''Note: This is not very efficient, since it retreives all the keys from the redis server to know how many keys we have''' return len(self.client.smembers(self.keys_container)) def delete_entry(self, key): '''Delete an object from the redis table''' pipe = self.client.pipeline() pipe.srem(self.keys_container, key) pipe.delete(key) pipe.execute() def cleanup(self): '''Cleanup all the expired keys''' keys = self.client.smembers(self.keys_container) for key in keys: entry = self.client.get(key) if entry: entry = pickle.loads(entry) if self._is_expired(entry, self.timeout): self.delete_entry(key) def flush(self): '''Delete all entries from the cache''' keys = self.client.smembers(self.keys_container) for key in keys: self.delete_entry(key)
apache-2.0
tclose/python-neo
neo/io/asciispiketrainio.py
7
4247
# -*- coding: utf-8 -*- """ Classe for reading/writing SpikeTrains in a text file. It is the simple case where different spiketrains are written line by line. Supported : Read/Write Author: sgarcia """ import os import numpy as np import quantities as pq from neo.io.baseio import BaseIO from neo.core import Segment, SpikeTrain class AsciiSpikeTrainIO(BaseIO): """ Classe for reading/writing SpikeTrain in a text file. Each Spiketrain is a line. Usage: >>> from neo import io >>> r = io.AsciiSpikeTrainIO( filename = 'File_ascii_spiketrain_1.txt') >>> seg = r.read_segment(lazy = False, cascade = True,) >>> print seg.spiketrains # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE [<SpikeTrain(array([ 3.89981604, 4.73258781, 0.608428 , 4.60246277, 1.23805797, ... """ is_readable = True is_writable = True supported_objects = [Segment , SpikeTrain] readable_objects = [Segment] writeable_objects = [Segment] has_header = False is_streameable = False read_params = { Segment : [ ('delimiter' , {'value' : '\t', 'possible' : ['\t' , ' ' , ',' , ';'] }) , ('t_start' , { 'value' : 0., } ), ] } write_params = { Segment : [ ('delimiter' , {'value' : '\t', 'possible' : ['\t' , ' ' , ',' , ';'] }) , ] } name = None extensions = [ 'txt' ] mode = 'file' def __init__(self , filename = None) : """ This class read/write SpikeTrains in a text file. Each row is a spiketrain. **Arguments** filename : the filename to read/write """ BaseIO.__init__(self) self.filename = filename def read_segment(self, lazy = False, cascade = True, delimiter = '\t', t_start = 0.*pq.s, unit = pq.s, ): """ Arguments: delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';' t_start : time start of all spiketrain 0 by default unit : unit of spike times, can be a str or directly a Quantities """ unit = pq.Quantity(1, unit) seg = Segment(file_origin = os.path.basename(self.filename)) if not cascade: return seg f = open(self.filename, 'Ur') for i,line in enumerate(f) : alldata = line[:-1].split(delimiter) if alldata[-1] == '': alldata = alldata[:-1] if alldata[0] == '': alldata = alldata[1:] if lazy: spike_times = [ ] t_stop = t_start else: spike_times = np.array(alldata).astype('f') t_stop = spike_times.max()*unit sptr = SpikeTrain(spike_times*unit, t_start=t_start, t_stop=t_stop) if lazy: sptr.lazy_shape = len(alldata) sptr.annotate(channel_index = i) seg.spiketrains.append(sptr) f.close() seg.create_many_to_one_relationship() return seg def write_segment(self, segment, delimiter = '\t', ): """ Write SpikeTrain of a Segment in a txt file. Each row is a spiketrain. Arguments: segment : the segment to write. Only analog signals will be written. delimiter : columns delimiter in file '\t' or one space or two space or ',' or ';' information of t_start is lost """ f = open(self.filename, 'w') for s,sptr in enumerate(segment.spiketrains) : for ts in sptr : f.write('%f%s'% (ts , delimiter) ) f.write('\n') f.close()
bsd-3-clause
thnee/ansible
lib/ansible/modules/cloud/google/gcp_bigquery_table.py
10
58848
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2017 Google # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # ---------------------------------------------------------------------------- # # *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** # # ---------------------------------------------------------------------------- # # This file is automatically generated by Magic Modules and manual # changes will be clobbered when the file is regenerated. # # Please read more about how to change this file at # https://www.github.com/GoogleCloudPlatform/magic-modules # # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function __metaclass__ = type ################################################################################ # Documentation ################################################################################ ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gcp_bigquery_table description: - A Table that belongs to a Dataset . short_description: Creates a GCP Table version_added: '2.8' author: Google Inc. (@googlecloudplatform) requirements: - python >= 2.6 - requests >= 2.18.4 - google-auth >= 1.3.0 options: state: description: - Whether the given object should exist in GCP choices: - present - absent default: present type: str table_reference: description: - Reference describing the ID of this table. required: false type: dict suboptions: dataset_id: description: - The ID of the dataset containing this table. required: false type: str project_id: description: - The ID of the project containing this table. required: false type: str table_id: description: - The ID of the the table. required: false type: str clustering: description: - One or more fields on which data should be clustered. Only top-level, non-repeated, simple-type fields are supported. When you cluster a table using multiple columns, the order of columns you specify is important. The order of the specified columns determines the sort order of the data. required: false type: list version_added: '2.9' description: description: - A user-friendly description of the dataset. required: false type: str friendly_name: description: - A descriptive name for this table. required: false type: str labels: description: - The labels associated with this dataset. You can use these to organize and group your datasets . required: false type: dict name: description: - Name of the table. required: false type: str num_rows: description: - The number of rows of data in this table, excluding any data in the streaming buffer. required: false type: int version_added: '2.9' view: description: - The view definition. required: false type: dict suboptions: use_legacy_sql: description: - Specifies whether to use BigQuery's legacy SQL for this view . required: false type: bool user_defined_function_resources: description: - Describes user-defined function resources used in the query. required: false type: list suboptions: inline_code: description: - An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code. required: false type: str resource_uri: description: - A code resource to load from a Google Cloud Storage URI (gs://bucket/path). required: false type: str time_partitioning: description: - If specified, configures time-based partitioning for this table. required: false type: dict suboptions: expiration_ms: description: - Number of milliseconds for which to keep the storage for a partition. required: false type: int field: description: - If not set, the table is partitioned by pseudo column, referenced via either '_PARTITIONTIME' as TIMESTAMP type, or '_PARTITIONDATE' as DATE type. If field is specified, the table is instead partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. required: false type: str version_added: '2.9' type: description: - The only type supported is DAY, which will generate one partition per day. - 'Some valid choices include: "DAY"' required: false type: str schema: description: - Describes the schema of this table. required: false type: dict suboptions: fields: description: - Describes the fields in a table. required: false type: list suboptions: description: description: - The field description. The maximum length is 1,024 characters. required: false type: str fields: description: - Describes the nested schema fields if the type property is set to RECORD. required: false type: list mode: description: - The field mode. - 'Some valid choices include: "NULLABLE", "REQUIRED", "REPEATED"' required: false type: str name: description: - The field name. required: false type: str type: description: - The field data type. - 'Some valid choices include: "STRING", "BYTES", "INTEGER", "FLOAT", "TIMESTAMP", "DATE", "TIME", "DATETIME", "RECORD"' required: false type: str encryption_configuration: description: - Custom encryption configuration. required: false type: dict suboptions: kms_key_name: description: - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. required: false type: str expiration_time: description: - The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. required: false type: int external_data_configuration: description: - Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. required: false type: dict suboptions: autodetect: description: - Try to detect schema and format options automatically. Any option specified explicitly will be honored. required: false type: bool compression: description: - The compression type of the data source. - 'Some valid choices include: "GZIP", "NONE"' required: false type: str ignore_unknown_values: description: - Indicates if BigQuery should allow extra values that are not represented in the table schema . required: false type: bool max_bad_records: description: - The maximum number of bad records that BigQuery can ignore when reading data . required: false default: '0' type: int source_format: description: - The data format. - 'Some valid choices include: "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "DATASTORE_BACKUP", "BIGTABLE"' required: false type: str source_uris: description: - The fully-qualified URIs that point to your data in Google Cloud. - 'For Google Cloud Storage URIs: Each URI can contain one ''*'' wildcard character and it must come after the ''bucket'' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups, exactly one URI can be specified. Also, the ''*'' wildcard character is not allowed.' required: false type: list schema: description: - The schema for the data. Schema is required for CSV and JSON formats. required: false type: dict suboptions: fields: description: - Describes the fields in a table. required: false type: list suboptions: description: description: - The field description. required: false type: str fields: description: - Describes the nested schema fields if the type property is set to RECORD . required: false type: list mode: description: - Field mode. - 'Some valid choices include: "NULLABLE", "REQUIRED", "REPEATED"' required: false type: str name: description: - Field name. required: false type: str type: description: - Field data type. - 'Some valid choices include: "STRING", "BYTES", "INTEGER", "FLOAT", "TIMESTAMP", "DATE", "TIME", "DATETIME", "RECORD"' required: false type: str google_sheets_options: description: - Additional options if sourceFormat is set to GOOGLE_SHEETS. required: false type: dict suboptions: skip_leading_rows: description: - The number of rows at the top of a Google Sheet that BigQuery will skip when reading the data. required: false default: '0' type: int csv_options: description: - Additional properties to set if sourceFormat is set to CSV. required: false type: dict suboptions: allow_jagged_rows: description: - Indicates if BigQuery should accept rows that are missing trailing optional columns . required: false type: bool allow_quoted_newlines: description: - Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file . required: false type: bool encoding: description: - The character encoding of the data. - 'Some valid choices include: "UTF-8", "ISO-8859-1"' required: false type: str field_delimiter: description: - The separator for fields in a CSV file. required: false type: str quote: description: - The value that is used to quote data sections in a CSV file. required: false type: str skip_leading_rows: description: - The number of rows at the top of a CSV file that BigQuery will skip when reading the data. required: false default: '0' type: int bigtable_options: description: - Additional options if sourceFormat is set to BIGTABLE. required: false type: dict suboptions: ignore_unspecified_column_families: description: - If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema . required: false type: bool read_rowkey_as_string: description: - If field is true, then the rowkey column families will be read and converted to string. required: false type: bool column_families: description: - List of column families to expose in the table schema along with their types. required: false type: list suboptions: columns: description: - Lists of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. required: false type: list suboptions: encoding: description: - The encoding of the values when the type is not STRING. - 'Some valid choices include: "TEXT", "BINARY"' required: false type: str field_name: description: - If the qualifier is not a valid BigQuery field identifier, a valid identifier must be provided as the column field name and is used as field name in queries. required: false type: str only_read_latest: description: - If this is set, only the latest version of value in this column are exposed . required: false type: bool qualifier_string: description: - Qualifier of the column. required: true type: str type: description: - The type to convert the value in cells of this column. - 'Some valid choices include: "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN"' required: false type: str encoding: description: - The encoding of the values when the type is not STRING. - 'Some valid choices include: "TEXT", "BINARY"' required: false type: str family_id: description: - Identifier of the column family. required: false type: str only_read_latest: description: - If this is set only the latest version of value are exposed for all columns in this column family . required: false type: bool type: description: - The type to convert the value in cells of this column family. - 'Some valid choices include: "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN"' required: false type: str dataset: description: - Name of the dataset. required: false type: str project: description: - The Google Cloud Platform project to use. type: str auth_kind: description: - The type of credential used. type: str required: true choices: - application - machineaccount - serviceaccount service_account_contents: description: - The contents of a Service Account JSON file, either in a dictionary or as a JSON string that represents it. type: jsonarg service_account_file: description: - The path of a Service Account JSON file if serviceaccount is selected as type. type: path service_account_email: description: - An optional service account email address if machineaccount is selected and the user does not wish to use the default email. type: str scopes: description: - Array of scopes to be used type: list env_type: description: - Specifies which Ansible environment you're running this module within. - This should not be set unless you know what you're doing. - This only alters the User Agent string for any API requests. type: str ''' EXAMPLES = ''' - name: create a dataset gcp_bigquery_dataset: name: example_dataset dataset_reference: dataset_id: example_dataset project: "{{ gcp_project }}" auth_kind: "{{ gcp_cred_kind }}" service_account_file: "{{ gcp_cred_file }}" state: present register: dataset - name: create a table gcp_bigquery_table: name: example_table dataset: example_dataset table_reference: dataset_id: example_dataset project_id: test_project table_id: example_table project: test_project auth_kind: serviceaccount service_account_file: "/tmp/auth.pem" state: present ''' RETURN = ''' tableReference: description: - Reference describing the ID of this table. returned: success type: complex contains: datasetId: description: - The ID of the dataset containing this table. returned: success type: str projectId: description: - The ID of the project containing this table. returned: success type: str tableId: description: - The ID of the the table. returned: success type: str clustering: description: - One or more fields on which data should be clustered. Only top-level, non-repeated, simple-type fields are supported. When you cluster a table using multiple columns, the order of columns you specify is important. The order of the specified columns determines the sort order of the data. returned: success type: list creationTime: description: - The time when this dataset was created, in milliseconds since the epoch. returned: success type: int description: description: - A user-friendly description of the dataset. returned: success type: str friendlyName: description: - A descriptive name for this table. returned: success type: str id: description: - An opaque ID uniquely identifying the table. returned: success type: str labels: description: - The labels associated with this dataset. You can use these to organize and group your datasets . returned: success type: dict lastModifiedTime: description: - The time when this table was last modified, in milliseconds since the epoch. returned: success type: int location: description: - The geographic location where the table resides. This value is inherited from the dataset. returned: success type: str name: description: - Name of the table. returned: success type: str numBytes: description: - The size of this table in bytes, excluding any data in the streaming buffer. returned: success type: int numLongTermBytes: description: - The number of bytes in the table that are considered "long-term storage". returned: success type: int numRows: description: - The number of rows of data in this table, excluding any data in the streaming buffer. returned: success type: int requirePartitionFilter: description: - If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified. returned: success type: bool type: description: - Describes the table type. returned: success type: str view: description: - The view definition. returned: success type: complex contains: useLegacySql: description: - Specifies whether to use BigQuery's legacy SQL for this view . returned: success type: bool userDefinedFunctionResources: description: - Describes user-defined function resources used in the query. returned: success type: complex contains: inlineCode: description: - An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code. returned: success type: str resourceUri: description: - A code resource to load from a Google Cloud Storage URI (gs://bucket/path). returned: success type: str timePartitioning: description: - If specified, configures time-based partitioning for this table. returned: success type: complex contains: expirationMs: description: - Number of milliseconds for which to keep the storage for a partition. returned: success type: int field: description: - If not set, the table is partitioned by pseudo column, referenced via either '_PARTITIONTIME' as TIMESTAMP type, or '_PARTITIONDATE' as DATE type. If field is specified, the table is instead partitioned by this field. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. returned: success type: str type: description: - The only type supported is DAY, which will generate one partition per day. returned: success type: str streamingBuffer: description: - Contains information regarding this table's streaming buffer, if one is present. This field will be absent if the table is not being streamed to or if there is no data in the streaming buffer. returned: success type: complex contains: estimatedBytes: description: - A lower-bound estimate of the number of bytes currently in the streaming buffer. returned: success type: int estimatedRows: description: - A lower-bound estimate of the number of rows currently in the streaming buffer. returned: success type: int oldestEntryTime: description: - Contains the timestamp of the oldest entry in the streaming buffer, in milliseconds since the epoch, if the streaming buffer is available. returned: success type: int schema: description: - Describes the schema of this table. returned: success type: complex contains: fields: description: - Describes the fields in a table. returned: success type: complex contains: description: description: - The field description. The maximum length is 1,024 characters. returned: success type: str fields: description: - Describes the nested schema fields if the type property is set to RECORD. returned: success type: list mode: description: - The field mode. returned: success type: str name: description: - The field name. returned: success type: str type: description: - The field data type. returned: success type: str encryptionConfiguration: description: - Custom encryption configuration. returned: success type: complex contains: kmsKeyName: description: - Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. The BigQuery Service Account associated with your project requires access to this encryption key. returned: success type: str expirationTime: description: - The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. returned: success type: int externalDataConfiguration: description: - Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. returned: success type: complex contains: autodetect: description: - Try to detect schema and format options automatically. Any option specified explicitly will be honored. returned: success type: bool compression: description: - The compression type of the data source. returned: success type: str ignoreUnknownValues: description: - Indicates if BigQuery should allow extra values that are not represented in the table schema . returned: success type: bool maxBadRecords: description: - The maximum number of bad records that BigQuery can ignore when reading data . returned: success type: int sourceFormat: description: - The data format. returned: success type: str sourceUris: description: - The fully-qualified URIs that point to your data in Google Cloud. - 'For Google Cloud Storage URIs: Each URI can contain one ''*'' wildcard character and it must come after the ''bucket'' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups, exactly one URI can be specified. Also, the ''*'' wildcard character is not allowed.' returned: success type: list schema: description: - The schema for the data. Schema is required for CSV and JSON formats. returned: success type: complex contains: fields: description: - Describes the fields in a table. returned: success type: complex contains: description: description: - The field description. returned: success type: str fields: description: - Describes the nested schema fields if the type property is set to RECORD . returned: success type: list mode: description: - Field mode. returned: success type: str name: description: - Field name. returned: success type: str type: description: - Field data type. returned: success type: str googleSheetsOptions: description: - Additional options if sourceFormat is set to GOOGLE_SHEETS. returned: success type: complex contains: skipLeadingRows: description: - The number of rows at the top of a Google Sheet that BigQuery will skip when reading the data. returned: success type: int csvOptions: description: - Additional properties to set if sourceFormat is set to CSV. returned: success type: complex contains: allowJaggedRows: description: - Indicates if BigQuery should accept rows that are missing trailing optional columns . returned: success type: bool allowQuotedNewlines: description: - Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file . returned: success type: bool encoding: description: - The character encoding of the data. returned: success type: str fieldDelimiter: description: - The separator for fields in a CSV file. returned: success type: str quote: description: - The value that is used to quote data sections in a CSV file. returned: success type: str skipLeadingRows: description: - The number of rows at the top of a CSV file that BigQuery will skip when reading the data. returned: success type: int bigtableOptions: description: - Additional options if sourceFormat is set to BIGTABLE. returned: success type: complex contains: ignoreUnspecifiedColumnFamilies: description: - If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema . returned: success type: bool readRowkeyAsString: description: - If field is true, then the rowkey column families will be read and converted to string. returned: success type: bool columnFamilies: description: - List of column families to expose in the table schema along with their types. returned: success type: complex contains: columns: description: - Lists of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. returned: success type: complex contains: encoding: description: - The encoding of the values when the type is not STRING. returned: success type: str fieldName: description: - If the qualifier is not a valid BigQuery field identifier, a valid identifier must be provided as the column field name and is used as field name in queries. returned: success type: str onlyReadLatest: description: - If this is set, only the latest version of value in this column are exposed . returned: success type: bool qualifierString: description: - Qualifier of the column. returned: success type: str type: description: - The type to convert the value in cells of this column. returned: success type: str encoding: description: - The encoding of the values when the type is not STRING. returned: success type: str familyId: description: - Identifier of the column family. returned: success type: str onlyReadLatest: description: - If this is set only the latest version of value are exposed for all columns in this column family . returned: success type: bool type: description: - The type to convert the value in cells of this column family. returned: success type: str dataset: description: - Name of the dataset. returned: success type: str ''' ################################################################################ # Imports ################################################################################ from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict import json ################################################################################ # Main ################################################################################ def main(): """Main function""" module = GcpModule( argument_spec=dict( state=dict(default='present', choices=['present', 'absent'], type='str'), table_reference=dict(type='dict', options=dict(dataset_id=dict(type='str'), project_id=dict(type='str'), table_id=dict(type='str'))), clustering=dict(type='list', elements='str'), description=dict(type='str'), friendly_name=dict(type='str'), labels=dict(type='dict'), name=dict(type='str'), num_rows=dict(type='int'), view=dict( type='dict', options=dict( use_legacy_sql=dict(type='bool'), user_defined_function_resources=dict( type='list', elements='dict', options=dict(inline_code=dict(type='str'), resource_uri=dict(type='str')) ), ), ), time_partitioning=dict(type='dict', options=dict(expiration_ms=dict(type='int'), field=dict(type='str'), type=dict(type='str'))), schema=dict( type='dict', options=dict( fields=dict( type='list', elements='dict', options=dict( description=dict(type='str'), fields=dict(type='list', elements='str'), mode=dict(type='str'), name=dict(type='str'), type=dict(type='str'), ), ) ), ), encryption_configuration=dict(type='dict', options=dict(kms_key_name=dict(type='str'))), expiration_time=dict(type='int'), external_data_configuration=dict( type='dict', options=dict( autodetect=dict(type='bool'), compression=dict(type='str'), ignore_unknown_values=dict(type='bool'), max_bad_records=dict(default=0, type='int'), source_format=dict(type='str'), source_uris=dict(type='list', elements='str'), schema=dict( type='dict', options=dict( fields=dict( type='list', elements='dict', options=dict( description=dict(type='str'), fields=dict(type='list', elements='str'), mode=dict(type='str'), name=dict(type='str'), type=dict(type='str'), ), ) ), ), google_sheets_options=dict(type='dict', options=dict(skip_leading_rows=dict(default=0, type='int'))), csv_options=dict( type='dict', options=dict( allow_jagged_rows=dict(type='bool'), allow_quoted_newlines=dict(type='bool'), encoding=dict(type='str'), field_delimiter=dict(type='str'), quote=dict(type='str'), skip_leading_rows=dict(default=0, type='int'), ), ), bigtable_options=dict( type='dict', options=dict( ignore_unspecified_column_families=dict(type='bool'), read_rowkey_as_string=dict(type='bool'), column_families=dict( type='list', elements='dict', options=dict( columns=dict( type='list', elements='dict', options=dict( encoding=dict(type='str'), field_name=dict(type='str'), only_read_latest=dict(type='bool'), qualifier_string=dict(required=True, type='str'), type=dict(type='str'), ), ), encoding=dict(type='str'), family_id=dict(type='str'), only_read_latest=dict(type='bool'), type=dict(type='str'), ), ), ), ), ), ), dataset=dict(type='str'), ) ) if not module.params['scopes']: module.params['scopes'] = ['https://www.googleapis.com/auth/bigquery'] state = module.params['state'] kind = 'bigquery#table' fetch = fetch_resource(module, self_link(module), kind) changed = False if fetch: if state == 'present': if is_different(module, fetch): update(module, self_link(module), kind) fetch = fetch_resource(module, self_link(module), kind) changed = True else: delete(module, self_link(module), kind) fetch = {} changed = True else: if state == 'present': fetch = create(module, collection(module), kind) changed = True else: fetch = {} fetch.update({'changed': changed}) module.exit_json(**fetch) def create(module, link, kind): auth = GcpSession(module, 'bigquery') return return_if_object(module, auth.post(link, resource_to_request(module)), kind) def update(module, link, kind): auth = GcpSession(module, 'bigquery') return return_if_object(module, auth.put(link, resource_to_request(module)), kind) def delete(module, link, kind): auth = GcpSession(module, 'bigquery') return return_if_object(module, auth.delete(link), kind) def resource_to_request(module): request = { u'kind': 'bigquery#table', u'tableReference': TableTablereference(module.params.get('table_reference', {}), module).to_request(), u'clustering': module.params.get('clustering'), u'description': module.params.get('description'), u'friendlyName': module.params.get('friendly_name'), u'labels': module.params.get('labels'), u'name': module.params.get('name'), u'numRows': module.params.get('num_rows'), u'view': TableView(module.params.get('view', {}), module).to_request(), u'timePartitioning': TableTimepartitioning(module.params.get('time_partitioning', {}), module).to_request(), u'schema': TableSchema(module.params.get('schema', {}), module).to_request(), u'encryptionConfiguration': TableEncryptionconfiguration(module.params.get('encryption_configuration', {}), module).to_request(), u'expirationTime': module.params.get('expiration_time'), u'externalDataConfiguration': TableExternaldataconfiguration(module.params.get('external_data_configuration', {}), module).to_request(), } return_vals = {} for k, v in request.items(): if v or v is False: return_vals[k] = v return return_vals def fetch_resource(module, link, kind, allow_not_found=True): auth = GcpSession(module, 'bigquery') return return_if_object(module, auth.get(link), kind, allow_not_found) def self_link(module): return "https://www.googleapis.com/bigquery/v2/projects/{project}/datasets/{dataset}/tables/{name}".format(**module.params) def collection(module): return "https://www.googleapis.com/bigquery/v2/projects/{project}/datasets/{dataset}/tables".format(**module.params) def return_if_object(module, response, kind, allow_not_found=False): # If not found, return nothing. if allow_not_found and response.status_code == 404: return None # If no content, return nothing. if response.status_code == 204: return None try: module.raise_for_status(response) result = response.json() except getattr(json.decoder, 'JSONDecodeError', ValueError): module.fail_json(msg="Invalid JSON response with error: %s" % response.text) if navigate_hash(result, ['error', 'errors']): module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) return result def is_different(module, response): request = resource_to_request(module) response = response_to_hash(module, response) # Remove all output-only from response. response_vals = {} for k, v in response.items(): if k in request: response_vals[k] = v request_vals = {} for k, v in request.items(): if k in response: request_vals[k] = v return GcpRequest(request_vals) != GcpRequest(response_vals) # Remove unnecessary properties from the response. # This is for doing comparisons with Ansible's current parameters. def response_to_hash(module, response): return { u'tableReference': TableTablereference(response.get(u'tableReference', {}), module).from_response(), u'clustering': response.get(u'clustering'), u'creationTime': response.get(u'creationTime'), u'description': response.get(u'description'), u'friendlyName': response.get(u'friendlyName'), u'id': response.get(u'id'), u'labels': response.get(u'labels'), u'lastModifiedTime': response.get(u'lastModifiedTime'), u'location': response.get(u'location'), u'name': response.get(u'name'), u'numBytes': response.get(u'numBytes'), u'numLongTermBytes': response.get(u'numLongTermBytes'), u'numRows': response.get(u'numRows'), u'requirePartitionFilter': response.get(u'requirePartitionFilter'), u'type': response.get(u'type'), u'view': TableView(response.get(u'view', {}), module).from_response(), u'timePartitioning': TableTimepartitioning(response.get(u'timePartitioning', {}), module).from_response(), u'streamingBuffer': TableStreamingbuffer(response.get(u'streamingBuffer', {}), module).from_response(), u'schema': TableSchema(response.get(u'schema', {}), module).from_response(), u'encryptionConfiguration': TableEncryptionconfiguration(response.get(u'encryptionConfiguration', {}), module).from_response(), u'expirationTime': response.get(u'expirationTime'), u'externalDataConfiguration': TableExternaldataconfiguration(response.get(u'externalDataConfiguration', {}), module).from_response(), } class TableTablereference(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict( {u'datasetId': self.request.get('dataset_id'), u'projectId': self.request.get('project_id'), u'tableId': self.request.get('table_id')} ) def from_response(self): return remove_nones_from_dict( {u'datasetId': self.request.get(u'datasetId'), u'projectId': self.request.get(u'projectId'), u'tableId': self.request.get(u'tableId')} ) class TableView(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict( { u'useLegacySql': self.request.get('use_legacy_sql'), u'userDefinedFunctionResources': TableUserdefinedfunctionresourcesArray( self.request.get('user_defined_function_resources', []), self.module ).to_request(), } ) def from_response(self): return remove_nones_from_dict( { u'useLegacySql': self.request.get(u'useLegacySql'), u'userDefinedFunctionResources': TableUserdefinedfunctionresourcesArray( self.request.get(u'userDefinedFunctionResources', []), self.module ).from_response(), } ) class TableUserdefinedfunctionresourcesArray(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = [] def to_request(self): items = [] for item in self.request: items.append(self._request_for_item(item)) return items def from_response(self): items = [] for item in self.request: items.append(self._response_from_item(item)) return items def _request_for_item(self, item): return remove_nones_from_dict({u'inlineCode': item.get('inline_code'), u'resourceUri': item.get('resource_uri')}) def _response_from_item(self, item): return remove_nones_from_dict({u'inlineCode': item.get(u'inlineCode'), u'resourceUri': item.get(u'resourceUri')}) class TableTimepartitioning(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict( {u'expirationMs': self.request.get('expiration_ms'), u'field': self.request.get('field'), u'type': self.request.get('type')} ) def from_response(self): return remove_nones_from_dict( {u'expirationMs': self.request.get(u'expirationMs'), u'field': self.request.get(u'field'), u'type': self.request.get(u'type')} ) class TableStreamingbuffer(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict({}) def from_response(self): return remove_nones_from_dict({}) class TableSchema(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict({u'fields': TableFieldsArray(self.request.get('fields', []), self.module).to_request()}) def from_response(self): return remove_nones_from_dict({u'fields': TableFieldsArray(self.request.get(u'fields', []), self.module).from_response()}) class TableFieldsArray(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = [] def to_request(self): items = [] for item in self.request: items.append(self._request_for_item(item)) return items def from_response(self): items = [] for item in self.request: items.append(self._response_from_item(item)) return items def _request_for_item(self, item): return remove_nones_from_dict( { u'description': item.get('description'), u'fields': item.get('fields'), u'mode': item.get('mode'), u'name': item.get('name'), u'type': item.get('type'), } ) def _response_from_item(self, item): return remove_nones_from_dict( { u'description': item.get(u'description'), u'fields': item.get(u'fields'), u'mode': item.get(u'mode'), u'name': item.get(u'name'), u'type': item.get(u'type'), } ) class TableEncryptionconfiguration(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict({u'kmsKeyName': self.request.get('kms_key_name')}) def from_response(self): return remove_nones_from_dict({u'kmsKeyName': self.request.get(u'kmsKeyName')}) class TableExternaldataconfiguration(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict( { u'autodetect': self.request.get('autodetect'), u'compression': self.request.get('compression'), u'ignoreUnknownValues': self.request.get('ignore_unknown_values'), u'maxBadRecords': self.request.get('max_bad_records'), u'sourceFormat': self.request.get('source_format'), u'sourceUris': self.request.get('source_uris'), u'schema': TableSchema(self.request.get('schema', {}), self.module).to_request(), u'googleSheetsOptions': TableGooglesheetsoptions(self.request.get('google_sheets_options', {}), self.module).to_request(), u'csvOptions': TableCsvoptions(self.request.get('csv_options', {}), self.module).to_request(), u'bigtableOptions': TableBigtableoptions(self.request.get('bigtable_options', {}), self.module).to_request(), } ) def from_response(self): return remove_nones_from_dict( { u'autodetect': self.request.get(u'autodetect'), u'compression': self.request.get(u'compression'), u'ignoreUnknownValues': self.request.get(u'ignoreUnknownValues'), u'maxBadRecords': self.request.get(u'maxBadRecords'), u'sourceFormat': self.request.get(u'sourceFormat'), u'sourceUris': self.request.get(u'sourceUris'), u'schema': TableSchema(self.request.get(u'schema', {}), self.module).from_response(), u'googleSheetsOptions': TableGooglesheetsoptions(self.request.get(u'googleSheetsOptions', {}), self.module).from_response(), u'csvOptions': TableCsvoptions(self.request.get(u'csvOptions', {}), self.module).from_response(), u'bigtableOptions': TableBigtableoptions(self.request.get(u'bigtableOptions', {}), self.module).from_response(), } ) class TableSchema(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict({u'fields': TableFieldsArray(self.request.get('fields', []), self.module).to_request()}) def from_response(self): return remove_nones_from_dict({u'fields': TableFieldsArray(self.request.get(u'fields', []), self.module).from_response()}) class TableFieldsArray(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = [] def to_request(self): items = [] for item in self.request: items.append(self._request_for_item(item)) return items def from_response(self): items = [] for item in self.request: items.append(self._response_from_item(item)) return items def _request_for_item(self, item): return remove_nones_from_dict( { u'description': item.get('description'), u'fields': item.get('fields'), u'mode': item.get('mode'), u'name': item.get('name'), u'type': item.get('type'), } ) def _response_from_item(self, item): return remove_nones_from_dict( { u'description': item.get(u'description'), u'fields': item.get(u'fields'), u'mode': item.get(u'mode'), u'name': item.get(u'name'), u'type': item.get(u'type'), } ) class TableGooglesheetsoptions(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict({u'skipLeadingRows': self.request.get('skip_leading_rows')}) def from_response(self): return remove_nones_from_dict({u'skipLeadingRows': self.request.get(u'skipLeadingRows')}) class TableCsvoptions(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict( { u'allowJaggedRows': self.request.get('allow_jagged_rows'), u'allowQuotedNewlines': self.request.get('allow_quoted_newlines'), u'encoding': self.request.get('encoding'), u'fieldDelimiter': self.request.get('field_delimiter'), u'quote': self.request.get('quote'), u'skipLeadingRows': self.request.get('skip_leading_rows'), } ) def from_response(self): return remove_nones_from_dict( { u'allowJaggedRows': self.request.get(u'allowJaggedRows'), u'allowQuotedNewlines': self.request.get(u'allowQuotedNewlines'), u'encoding': self.request.get(u'encoding'), u'fieldDelimiter': self.request.get(u'fieldDelimiter'), u'quote': self.request.get(u'quote'), u'skipLeadingRows': self.request.get(u'skipLeadingRows'), } ) class TableBigtableoptions(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = {} def to_request(self): return remove_nones_from_dict( { u'ignoreUnspecifiedColumnFamilies': self.request.get('ignore_unspecified_column_families'), u'readRowkeyAsString': self.request.get('read_rowkey_as_string'), u'columnFamilies': TableColumnfamiliesArray(self.request.get('column_families', []), self.module).to_request(), } ) def from_response(self): return remove_nones_from_dict( { u'ignoreUnspecifiedColumnFamilies': self.request.get(u'ignoreUnspecifiedColumnFamilies'), u'readRowkeyAsString': self.request.get(u'readRowkeyAsString'), u'columnFamilies': TableColumnfamiliesArray(self.request.get(u'columnFamilies', []), self.module).from_response(), } ) class TableColumnfamiliesArray(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = [] def to_request(self): items = [] for item in self.request: items.append(self._request_for_item(item)) return items def from_response(self): items = [] for item in self.request: items.append(self._response_from_item(item)) return items def _request_for_item(self, item): return remove_nones_from_dict( { u'columns': TableColumnsArray(item.get('columns', []), self.module).to_request(), u'encoding': item.get('encoding'), u'familyId': item.get('family_id'), u'onlyReadLatest': item.get('only_read_latest'), u'type': item.get('type'), } ) def _response_from_item(self, item): return remove_nones_from_dict( { u'columns': TableColumnsArray(item.get(u'columns', []), self.module).from_response(), u'encoding': item.get(u'encoding'), u'familyId': item.get(u'familyId'), u'onlyReadLatest': item.get(u'onlyReadLatest'), u'type': item.get(u'type'), } ) class TableColumnsArray(object): def __init__(self, request, module): self.module = module if request: self.request = request else: self.request = [] def to_request(self): items = [] for item in self.request: items.append(self._request_for_item(item)) return items def from_response(self): items = [] for item in self.request: items.append(self._response_from_item(item)) return items def _request_for_item(self, item): return remove_nones_from_dict( { u'encoding': item.get('encoding'), u'fieldName': item.get('field_name'), u'onlyReadLatest': item.get('only_read_latest'), u'qualifierString': item.get('qualifier_string'), u'type': item.get('type'), } ) def _response_from_item(self, item): return remove_nones_from_dict( { u'encoding': item.get(u'encoding'), u'fieldName': item.get(u'fieldName'), u'onlyReadLatest': item.get(u'onlyReadLatest'), u'qualifierString': item.get(u'qualifierString'), u'type': item.get(u'type'), } ) if __name__ == '__main__': main()
gpl-3.0
paulproteus/django
django/template/loader.py
112
7927
# Wrapper for loading templates from storage of some sort (e.g. filesystem, database). # # This uses the TEMPLATE_LOADERS setting, which is a list of loaders to use. # Each loader is expected to have this interface: # # callable(name, dirs=[]) # # name is the template name. # dirs is an optional list of directories to search instead of TEMPLATE_DIRS. # # The loader should return a tuple of (template_source, path). The path returned # might be shown to the user for debugging purposes, so it should identify where # the template was loaded from. # # A loader may return an already-compiled template instead of the actual # template source. In that case the path returned should be None, since the # path information is associated with the template during the compilation, # which has already been done. # # Each loader should have an "is_usable" attribute set. This is a boolean that # specifies whether the loader can be used in this Python installation. Each # loader is responsible for setting this when it's initialized. # # For example, the eggs loader (which is capable of loading templates from # Python eggs) sets is_usable to False if the "pkg_resources" module isn't # installed, because pkg_resources is necessary to read eggs. from django.core.exceptions import ImproperlyConfigured from django.template.base import Origin, Template, Context, TemplateDoesNotExist, add_to_builtins from django.utils.importlib import import_module from django.conf import settings from django.utils import six template_source_loaders = None class BaseLoader(object): is_usable = False def __init__(self, *args, **kwargs): pass def __call__(self, template_name, template_dirs=None): return self.load_template(template_name, template_dirs) def load_template(self, template_name, template_dirs=None): source, display_name = self.load_template_source(template_name, template_dirs) origin = make_origin(display_name, self.load_template_source, template_name, template_dirs) try: template = get_template_from_string(source, origin, template_name) return template, None except TemplateDoesNotExist: # If compiling the template we found raises TemplateDoesNotExist, back off to # returning the source and display name for the template we were asked to load. # This allows for correct identification (later) of the actual template that does # not exist. return source, display_name def load_template_source(self, template_name, template_dirs=None): """ Returns a tuple containing the source and origin for the given template name. """ raise NotImplementedError def reset(self): """ Resets any state maintained by the loader instance (e.g., cached templates or cached loader modules). """ pass class LoaderOrigin(Origin): def __init__(self, display_name, loader, name, dirs): super(LoaderOrigin, self).__init__(display_name) self.loader, self.loadname, self.dirs = loader, name, dirs def reload(self): return self.loader(self.loadname, self.dirs)[0] def make_origin(display_name, loader, name, dirs): if settings.TEMPLATE_DEBUG and display_name: return LoaderOrigin(display_name, loader, name, dirs) else: return None def find_template_loader(loader): if isinstance(loader, (tuple, list)): loader, args = loader[0], loader[1:] else: args = [] if isinstance(loader, six.string_types): module, attr = loader.rsplit('.', 1) try: mod = import_module(module) except ImportError as e: raise ImproperlyConfigured('Error importing template source loader %s: "%s"' % (loader, e)) try: TemplateLoader = getattr(mod, attr) except AttributeError as e: raise ImproperlyConfigured('Error importing template source loader %s: "%s"' % (loader, e)) if hasattr(TemplateLoader, 'load_template_source'): func = TemplateLoader(*args) else: # Try loading module the old way - string is full path to callable if args: raise ImproperlyConfigured("Error importing template source loader %s - can't pass arguments to function-based loader." % loader) func = TemplateLoader if not func.is_usable: import warnings warnings.warn("Your TEMPLATE_LOADERS setting includes %r, but your Python installation doesn't support that type of template loading. Consider removing that line from TEMPLATE_LOADERS." % loader) return None else: return func else: raise ImproperlyConfigured('Loader does not define a "load_template" callable template source loader') def find_template(name, dirs=None): # Calculate template_source_loaders the first time the function is executed # because putting this logic in the module-level namespace may cause # circular import errors. See Django ticket #1292. global template_source_loaders if template_source_loaders is None: loaders = [] for loader_name in settings.TEMPLATE_LOADERS: loader = find_template_loader(loader_name) if loader is not None: loaders.append(loader) template_source_loaders = tuple(loaders) for loader in template_source_loaders: try: source, display_name = loader(name, dirs) return (source, make_origin(display_name, loader, name, dirs)) except TemplateDoesNotExist: pass raise TemplateDoesNotExist(name) def get_template(template_name): """ Returns a compiled Template object for the given template name, handling template inheritance recursively. """ template, origin = find_template(template_name) if not hasattr(template, 'render'): # template needs to be compiled template = get_template_from_string(template, origin, template_name) return template def get_template_from_string(source, origin=None, name=None): """ Returns a compiled Template object for the given template code, handling template inheritance recursively. """ return Template(source, origin, name) def render_to_string(template_name, dictionary=None, context_instance=None): """ Loads the given template_name and renders it with the given dictionary as context. The template_name may be a string to load a single template using get_template, or it may be a tuple to use select_template to find one of the templates in the list. Returns a string. """ dictionary = dictionary or {} if isinstance(template_name, (list, tuple)): t = select_template(template_name) else: t = get_template(template_name) if not context_instance: return t.render(Context(dictionary)) # Add the dictionary to the context stack, ensuring it gets removed again # to keep the context_instance in the same state it started in. context_instance.update(dictionary) try: return t.render(context_instance) finally: context_instance.pop() def select_template(template_name_list): "Given a list of template names, returns the first that can be loaded." if not template_name_list: raise TemplateDoesNotExist("No template names provided") not_found = [] for template_name in template_name_list: try: return get_template(template_name) except TemplateDoesNotExist as e: if e.args[0] not in not_found: not_found.append(e.args[0]) continue # If we get here, none of the templates could be loaded raise TemplateDoesNotExist(', '.join(not_found)) add_to_builtins('django.template.loader_tags')
bsd-3-clause
Jorge-Rodriguez/ansible
lib/ansible/modules/network/fortios/fortios_authentication_setting.py
24
8962
#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2018 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # # the lib use python logging can get it if the following is set in your # Ansible config. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_authentication_setting short_description: Configure authentication setting in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS by allowing the user to configure authentication feature and setting category. Examples includes all options and need to be adjusted to datasources before usage. Tested with FOS v6.0.2 version_added: "2.8" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate ip address. required: true username: description: - FortiOS or FortiGate username. required: true password: description: - FortiOS or FortiGate password. default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol type: bool default: false authentication_setting: description: - Configure authentication setting. default: null suboptions: active-auth-scheme: description: - Active authentication method (scheme name). Source authentication.scheme.name. captive-portal: description: - Captive portal host name. Source firewall.address.name. captive-portal-ip: description: - Captive portal IP address. captive-portal-ip6: description: - Captive portal IPv6 address. captive-portal-port: description: - Captive portal port number (1 - 65535, default = 0). captive-portal-type: description: - Captive portal type. choices: - fqdn - ip captive-portal6: description: - IPv6 captive portal host name. Source firewall.address6.name. sso-auth-scheme: description: - Single-Sign-On authentication method (scheme name). Source authentication.scheme.name. ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" tasks: - name: Configure authentication setting. fortios_authentication_setting: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" authentication_setting: active-auth-scheme: "<your_own_value> (source authentication.scheme.name)" captive-portal: "<your_own_value> (source firewall.address.name)" captive-portal-ip: "<your_own_value>" captive-portal-ip6: "<your_own_value>" captive-portal-port: "7" captive-portal-type: "fqdn" captive-portal6: "<your_own_value> (source firewall.address6.name)" sso-auth-scheme: "<your_own_value> (source authentication.scheme.name)" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule fos = None def login(data): host = data['host'] username = data['username'] password = data['password'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password) def filter_authentication_setting_data(json): option_list = ['active-auth-scheme', 'captive-portal', 'captive-portal-ip', 'captive-portal-ip6', 'captive-portal-port', 'captive-portal-type', 'captive-portal6', 'sso-auth-scheme'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def authentication_setting(data, fos): vdom = data['vdom'] authentication_setting_data = data['authentication_setting'] filtered_data = filter_authentication_setting_data(authentication_setting_data) return fos.set('authentication', 'setting', data=filtered_data, vdom=vdom) def fortios_authentication(data, fos): login(data) methodlist = ['authentication_setting'] for method in methodlist: if data[method]: resp = eval(method)(data, fos) break fos.logout() return not resp['status'] == "success", resp['status'] == "success", resp def main(): fields = { "host": {"required": True, "type": "str"}, "username": {"required": True, "type": "str"}, "password": {"required": False, "type": "str", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": "False"}, "authentication_setting": { "required": False, "type": "dict", "options": { "active-auth-scheme": {"required": False, "type": "str"}, "captive-portal": {"required": False, "type": "str"}, "captive-portal-ip": {"required": False, "type": "str"}, "captive-portal-ip6": {"required": False, "type": "str"}, "captive-portal-port": {"required": False, "type": "int"}, "captive-portal-type": {"required": False, "type": "str", "choices": ["fqdn", "ip"]}, "captive-portal6": {"required": False, "type": "str"}, "sso-auth-scheme": {"required": False, "type": "str"} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") global fos fos = FortiOSAPI() is_error, has_changed, result = fortios_authentication(module.params, fos) if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
gpl-3.0
Dave667/service
plugin.video.tvrain.ru/default.py
2
1096
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (c) 2011 XBMC-Russia, HD-lab Team, E-mail: dev@hd-lab.ru # Writer (c) 12/03/2011, Kostynoy S.A., E-mail: seppius2@gmail.com # # This Program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This Program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; see the file COPYING. If not, write to # the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. # http://www.gnu.org/licenses/gpl.html import sys, os sys.path.append(os.path.join(os.getcwd().replace(';', ''), 'resources', 'lib')) if (__name__ == "__main__" ): import addon addon.addon_main()
gpl-2.0
RafaelTorrealba/odoo
openerp/report/common.py
457
3337
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## pageSize = { 'A4': (210,297), 'A5': (148.5,105) } odt_namespace = { "office":"{urn:oasis:names:tc:opendocument:xmlns:office:1.0}", "style":"{urn:oasis:names:tc:opendocument:xmlns:style:1.0}", "text":"{urn:oasis:names:tc:opendocument:xmlns:text:1.0}", "table":"{urn:oasis:names:tc:opendocument:xmlns:table:1.0}", "draw":"{urn:oasis:names:tc:opendocument:xmlns:drawing:1.0}", "fo":"{urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0}", "xlink":"{http://www.w3.org/1999/xlink}", "dc":"{http://purl.org/dc/elements/1.1/}", "meta":"{urn:oasis:names:tc:opendocument:xmlns:meta:1.0}", "number":"{urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0}", "svg":"{urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0}", "chart":"{urn:oasis:names:tc:opendocument:xmlns:chart:1.0}", "dr3d":"{urn:oasis:names:tc:opendocument:xmlns:dr3d:1.0}", "math":"{http://www.w3.org/1998/Math/MathML}", "form":"{urn:oasis:names:tc:opendocument:xmlns:form:1.0}", "script":"{urn:oasis:names:tc:opendocument:xmlns:script:1.0}", "ooo":"{http://openoffice.org/2004/office}", "ooow":"{http://openoffice.org/2004/writer}", "oooc":"{http://openoffice.org/2004/calc}", "dom":"{http://www.w3.org/2001/xml-events}" } sxw_namespace = { "office":"{http://openoffice.org/2000/office}", "style":"{http://openoffice.org/2000/style}", "text":"{http://openoffice.org/2000/text}", "table":"{http://openoffice.org/2000/table}", "draw":"{http://openoffice.org/2000/drawing}", "fo":"{http://www.w3.org/1999/XSL/Format}", "xlink":"{http://www.w3.org/1999/xlink}", "dc":"{http://purl.org/dc/elements/1.1/}", "meta":"{http://openoffice.org/2000/meta}", "number":"{http://openoffice.org/2000/datastyle}", "svg":"{http://www.w3.org/2000/svg}", "chart":"{http://openoffice.org/2000/chart}", "dr3d":"{http://openoffice.org/2000/dr3d}", "math":"{http://www.w3.org/1998/Math/MathML}", "form":"{http://openoffice.org/2000/form}", "script":"{http://openoffice.org/2000/script}", "ooo":"{http://openoffice.org/2004/office}", "ooow":"{http://openoffice.org/2004/writer}", "oooc":"{http://openoffice.org/2004/calc}", "dom":"{http://www.w3.org/2001/xml-events}"} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
adico-somoto/deep-learning
transfer-learning/tensorflow_vgg/test_vgg19_trainable.py
152
1435
""" Simple tester for the vgg19_trainable """ import tensorflow as tf from tensoflow_vgg import vgg19_trainable as vgg19 from tensoflow_vgg import utils img1 = utils.load_image("./test_data/tiger.jpeg") img1_true_result = [1 if i == 292 else 0 for i in range(1000)] # 1-hot result for tiger batch1 = img1.reshape((1, 224, 224, 3)) with tf.device('/cpu:0'): sess = tf.Session() images = tf.placeholder(tf.float32, [1, 224, 224, 3]) true_out = tf.placeholder(tf.float32, [1, 1000]) train_mode = tf.placeholder(tf.bool) vgg = vgg19.Vgg19('./vgg19.npy') vgg.build(images, train_mode) # print number of variables used: 143667240 variables, i.e. ideal size = 548MB print(vgg.get_var_count()) sess.run(tf.global_variables_initializer()) # test classification prob = sess.run(vgg.prob, feed_dict={images: batch1, train_mode: False}) utils.print_prob(prob[0], './synset.txt') # simple 1-step training cost = tf.reduce_sum((vgg.prob - true_out) ** 2) train = tf.train.GradientDescentOptimizer(0.0001).minimize(cost) sess.run(train, feed_dict={images: batch1, true_out: [img1_true_result], train_mode: True}) # test classification again, should have a higher probability about tiger prob = sess.run(vgg.prob, feed_dict={images: batch1, train_mode: False}) utils.print_prob(prob[0], './synset.txt') # test save vgg.save_npy(sess, './test-save.npy')
mit
Garrett-R/scikit-learn
examples/linear_model/plot_ols_ridge_variance.py
387
2060
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Ordinary Least Squares and Ridge Regression Variance ========================================================= Due to the few points in each dimension and the straight line that linear regression uses to follow these points as well as it can, noise on the observations will cause great variance as shown in the first plot. Every line's slope can vary quite a bit for each prediction due to the noise induced in the observations. Ridge regression is basically minimizing a penalised version of the least-squared function. The penalising `shrinks` the value of the regression coefficients. Despite the few data points in each dimension, the slope of the prediction is much more stable and the variance in the line itself is greatly reduced, in comparison to that of the standard linear regression """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model X_train = np.c_[.5, 1].T y_train = [.5, 1] X_test = np.c_[0, 2].T np.random.seed(0) classifiers = dict(ols=linear_model.LinearRegression(), ridge=linear_model.Ridge(alpha=.1)) fignum = 1 for name, clf in classifiers.items(): fig = plt.figure(fignum, figsize=(4, 3)) plt.clf() plt.title(name) ax = plt.axes([.12, .12, .8, .8]) for _ in range(6): this_X = .1 * np.random.normal(size=(2, 1)) + X_train clf.fit(this_X, y_train) ax.plot(X_test, clf.predict(X_test), color='.5') ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10) clf.fit(X_train, y_train) ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue') ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10) ax.set_xticks(()) ax.set_yticks(()) ax.set_ylim((0, 1.6)) ax.set_xlabel('X') ax.set_ylabel('y') ax.set_xlim(0, 2) fignum += 1 plt.show()
bsd-3-clause
vseledkin/neon
neon/backends/nervanacpu.py
7
47051
# ---------------------------------------------------------------------------- # Copyright 2014 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- """ Our CPU based backend interface and tensor data structure. Our implementation wraps :mod:`numpy` ndarray and related operations """ import numpy as np import logging from neon.backends.backend import Tensor, Backend, OpTreeNode, OpCollection from neon.backends.layer_cpu import ConvLayer, DeconvLayer, PoolLayer _none_slice = slice(None, None, None) logger = logging.getLogger(__name__) # TODO: enable this flag to find numerical problems # np.seterr(all='raise') class CPUTensor(Tensor): """ The n-dimensional array data structure that resides in host memory, and is meant to be manipulated on the CPU. wrapped `numpy.ndarray` tensor. Arguments: dtype (numpy.ndtype, optional): underlying data type of the elements. ary (data array, optional): optionally it can be Instantiated with a data array persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls See also: NervanaCPU class """ _tensor = None def __init__(self, backend, shape=None, dtype=np.float32, ary=None, name=None, persist_values=True): super(CPUTensor, self).__init__(backend, shape, dtype, name, persist_values) # supported dtypes assert dtype in (np.float16, np.float32, np.float64, np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32) dtype = np.dtype(dtype) if type(ary) != np.ndarray: self._tensor = np.array(ary, dtype) elif ary.dtype != dtype: self._tensor = ary.astype(dtype) else: self._tensor = ary self.shape = self._tensor.shape try: size = 1 for dim in self.shape: size *= dim except TypeError: assert isinstance(self.shape, (int, long, np.integer)) size = self.shape self.shape = (self.shape,) self.size = size def __str__(self): """ Returns a string representation of this Tensor. Returns: str: the representation. """ if self._tensor.base is not None: base_id = id(self._tensor.base) else: base_id = id(self._tensor) return ("CPUTensor(base 0x%x) name:%s shape:%s dtype:%s strides:%s" " is_c_contiguous:%s" % (base_id, self.name, self.shape, self.dtype, self._tensor.strides, self._tensor.flags.c_contiguous)) def __repr__(self): """ Returns a more unambiguous string representation of the Tensor. Returns: str: the representation. """ return self.__str__() def __len__(self): """ Return the size of the leading dimension of self. """ if len(self.shape): return self.shape[0] else: return 0 def __setitem__(self, key, value): """ Assign the specified value to a subset of elements found via slice style indexing along each dimension. e.g. A[5:10, :] = 4.5. Each slice consists of start_idx:stop_idx:step_size triplets. If step_size isn't specified it defaults to 1. If start_idx isn't specified it defaults to 0. If stop_idx isn't specified it defaults to the total number of elements along that dimension. As such a slice value of ':' allows one to select all elements along that dimension. Arguments: key (int, slice, tuple): indices of each dimension's slice. value (numeric array, CPUTensor): values to be assigned to the extracted element subset. If an array it should be the same shape as what key indexes (or be broadcastable as such). """ self.__getitem__(key)._assign(value) return self def __getitem__(self, key): """ Extract a subset view of the items via slice style indexing along each dimension. e.g. A[5:10, :]. Each slice consists of start_idx:stop_idx:step_size triplets. If step_size isn't specified it defaults to 1. If start_idx isn't specified it defaults to 0. If stop_idx isn't specified it defaults to the total number of elements along that dimension. As such a slice value of ':' allows one to select all elements along that dimension Arguments: key (int, slice, tuple): indices of each dimension's slice. Returns: CPUTensor: view of self corresponding to the subset items. """ # speed up common case of [:] if not isinstance(key, tuple): if key == _none_slice: return self key = (key,) # ensure we return a view # exact same behavior as cpu # let a.shape = (3,4) # a[1,1] = 10 # cpu, gpu and numpy # type(a[1,1]) # for cpu and gpu type is Tensor; for numpy type is float if type(self._tensor[key]) is not np.ndarray: first_int_idx = None is_all_int = True for idx, k in enumerate(key): if type(k) is int: if first_int_idx is None: first_int_idx = idx else: is_all_int = False break if is_all_int: key_list = list(key) idx = key_list[first_int_idx] key_list[first_int_idx] = slice(idx, idx + 1, None) key = tuple(key_list) # return a view of the tensor return self.__class__( backend=self.backend, ary=self._tensor[key], dtype=self._tensor.dtype) def _assign(self, value): """ Assign an input value to the CPU tensor. The NervanaCPU does clipping for int and uint types, when overflow happens Arguments: value (GPUTennsor, OpTreNode, numeric): the value to be assigned. """ if isinstance(value, (CPUTensor, OpTreeNode)): OpTreeNode.build("assign", self, value) elif isinstance(value, (int, float, np.ndarray)): self.set(value) else: raise TypeError("Invalid type for assignment: %s" % type(value)) return self def set(self, value): """ Wrap the value into NervanaCPU tensor. Arguments: value: Array or single input. If it is array, check and Convert the dtype and shape. If it is single value, broadcast to the memory Returns: self """ if np.dtype(self.dtype).kind == 'i' or np.dtype(self.dtype).kind == 'u': clipping_str = _overflow_clipping_ops[ "clipping"].format('value', 'self.dtype') exec(clipping_str) if isinstance(value, np.ndarray): if value.dtype is not self.dtype: value = value.astype(self.dtype) self._tensor[:] = value return self def get(self): """ return the array """ return self._tensor def asnumpyarray(self): """ Convert the CPUTensor to an in host memory `numpy.ndarray`. A copy of the data may be made depending on where the CPUTensor normally resides. Returns: numpy.ndarray view or copy of the CPUTensor data. """ return self._tensor def take(self, indices, axis=None): """ Select a subset of elements from an array across an axis Arguments: indices (Tensor, numpy ndarray): indicies of elements to select axis (int): axis across which to select the values Returns: Tensor: Tensor with selected values """ if type(indices) == self.__class__: indices = indices._tensor # if indices are nx1 or 1xn, much of our code assumes these dims are # collapsed, hence the squeeze call. if type(indices) == np.ndarray: indices = indices.squeeze() return self.__class__( backend=self.backend, ary=self._tensor.take(indices, axis), dtype=self._tensor.dtype) def fill(self, value): """ Assign specified value to each element of this CPUTensor. Arguments: value (numeric): The value to be assigned to each element. Return: CPUTensor: updated view of the data. """ self._tensor.fill(value) return self def copy(self, a): """ Construct and return a deep copy of the Tensor passed. Arguments: a (Tensor): the object to copy Returns: Tensor: new array object with the same values as input tensor """ return self._assign(a) def copy_from(self, a): """ alias of copy """ return self._assign(a) def reshape(self, *shape): """ return a reshaped view """ if isinstance(shape[0], (tuple, list)): shape = tuple(shape[0]) if shape == self.shape: return self return self.__class__( backend=self.backend, ary=self._tensor.reshape(shape), dtype=self._tensor.dtype) @property def T(self): """ Return a transposed view For 2D tensor, will do a normal transpose For 3D tensor, will keep the 0 dim, swap the 1 and 2 dimensions """ if len(self.shape) <= 2: ary = self._tensor.transpose() else: # support for batched dot. # perserve outer dimension but reverse inner dims # shape = np.concatenate((shape[-1:], shape[:-1]) ary = self._tensor.swapaxes(1, 2) return self.__class__( backend=self.backend, ary=ary, dtype=self._tensor.dtype) def transpose(self, out=None): """ Return a transposed view of the data. Alias of .T property """ if out: return OpTreeNode.build("assign", out, self.T) return self.T def hist(self, tag): """ Compute a histogram of the current tensor values. Arguments: tag (string): Tag to identify the current state of the tensor, useful for disambiguating multiple histograms of the same tensor at different points in time. Returns: Tensor containing the histogram data. """ nbins = self.backend.hist_bins offset = self.backend.hist_offset bins = np.arange(nbins + 1) + float(offset) bins[0] = -float('Inf') np_inp_log_abs = np.rint( np.log2(np.abs(self.get().astype(np.float32)))) np_hist, edges = np.histogram(np_inp_log_abs, density=False, bins=bins) nc_hist = self.backend._hist_tensor(tag)._assign(np_hist) return nc_hist # def repeat(self, repeats, axis): # return self.__class__( # backend=self.backend, # ary=self._tensor.repeat(repeats, axis)) class CustomNumpy: @staticmethod def argmax(x, axis=1, keepdims=True): """ calls numpy argmax with keepdims """ new_shape = list(x.shape) new_shape[axis] = 1 new_shape = tuple(new_shape) return np.argmax(x, axis=axis).reshape(new_shape) @staticmethod def argmin(x, axis=1, keepdims=True): """ calls numpy argmin with keepdims """ new_shape = list(x.shape) new_shape[axis] = 1 new_shape = tuple(new_shape) return np.argmin(x, axis=axis).reshape(new_shape) _overflow_clipping_ops = { "clipping": '{0}=np.around({0}); {0}=np.clip({0}, np.iinfo({1}).min, np.iinfo({1}).max)', } def _assign_right_to_left(left, right): left[:] = right numpy_call_dict = { # assign "assign": _assign_right_to_left, # zero_operand ops # unary ops "neg": lambda left: -left, "abs": lambda left: np.abs(left), "sgn": lambda left: np.sign(left), "sqrt": lambda left: np.sqrt(left), "sqr": lambda left: np.square(left), "exp": lambda left: np.exp(left), "log": lambda left: np.log(left), "exp2": lambda left: np.exp2(left), "log2": lambda left: np.log2(left), "sig": lambda left: 1. / (1. + np.exp(-left)), "sig2": lambda left: 1. / (1. + np.exp2(-left)), "tanh": lambda left: np.tanh(left), "tanh2": lambda left: (np.exp2(2. * left) - 1.) / (np.exp2(2. * left) + 1.), # binary ops "add": lambda left, right: left + right, "sub": lambda left, right: left - right, "mul": lambda left, right: left * right, "div": lambda left, right: left / right, "eq": lambda left, right: left == right, "ne": lambda left, right: left != right, "lt": lambda left, right: left < right, "le": lambda left, right: left <= right, "gt": lambda left, right: left > right, "ge": lambda left, right: left >= right, "pow": lambda left, right: np.power(left, right), "minimum": lambda left, right: np.minimum(left, right), "maximum": lambda left, right: np.maximum(left, right), "dot": lambda left, right: np.dot(left, right), # reduction ops "sum": lambda op_dict, left: np.sum(left, axis=op_dict['axis'], keepdims=True), "max": lambda op_dict, left: np.max(left, axis=op_dict['axis'], keepdims=True), "min": lambda op_dict, left: np.min(left, axis=op_dict['axis'], keepdims=True), "argmax": lambda op_dict, left: CustomNumpy.argmax(left, axis=op_dict['axis'], keepdims=True), "argmin": lambda op_dict, left: CustomNumpy.argmin(left, axis=op_dict['axis'], keepdims=True), } class NervanaCPU(Backend): """ Sets up a :mod:`numpy` based backend for matrix ops. By default, we use 32-bit element data types for any arrays constructed. Attributes: default_dtype (dtype): default element data type. tensor_cls: underlying Tensor type. For CPU backend, it will be CPU tensor See also: CPUTensor """ def __init__(self, rng_seed=None, default_dtype=np.float32, hist_bins=64, hist_offset=-48): if default_dtype not in [np.float16, np.float32, np.float64]: logger.error('Default data type for nervanagpu ' 'backend must be float16, 32 or 64') raise ValueError super(NervanaCPU, self).__init__(rng_seed, default_dtype) self.tensor_cls = CPUTensor # log logger.info("Initialized NervanaCPU") self.hist_bins = hist_bins self.hist_offset = hist_offset self.hist_max = 4096 self.hist_buf = self.empty((self.hist_max, hist_bins), dtype=np.int32) self.hist_idx = 0 self.hist_map = dict() def rng_reset(self): """ Reset the random state to the state where the Backend is first initialized. """ self.rng.set_state(self.init_rng_state) def execute(self, optree): """ Arguments: optree: (OpTreeNode): the OpTreeNode object that represents all the operations """ # deal with onehot specially for now if (len(optree) == 3 and isinstance(optree[2], OpTreeNode) and optree[2][0]['op'] == 'onehot'): assert optree[0]['op'] == 'assign' assert isinstance(optree[1], Tensor) # get the output buffer array_output = optree[1].get() # get the output shape and onehot representation length will be on # this axis numpy_axis = optree[2][0]['axis'] numpy_ind0 = optree[2][0]['idx'].get().squeeze() numpy_ind_len = numpy_ind0.size numpy_ind1 = range(numpy_ind_len) # ind for indexing numpy_ind = np.zeros((2, numpy_ind_len), dtype=np.int32) numpy_ind[numpy_axis] = numpy_ind0 numpy_ind[1 - numpy_axis] = numpy_ind1 array_output[:] = 0 array_output[numpy_ind.tolist()] = 1 return array_output # get post order stack postfix_stack = optree.traverse(list()) # init compute stack compute_stack = [] # iterate through postfix stack to compute result for p in postfix_stack: if isinstance(p, dict): # TODO add rand and onehot here if p['op'] in OpCollection.unary_ops: left = compute_stack.pop() compute_stack.append(numpy_call_dict[p['op']](left)) elif p['op'] in OpCollection.binary_ops: right = compute_stack.pop() left = compute_stack.pop() compute_stack.append(numpy_call_dict[p['op']](left, right)) elif p['op'] in OpCollection.reduction_ops: left = compute_stack.pop() compute_stack.append(numpy_call_dict[p['op']](p, left)) elif p['op'] in OpCollection.zero_operand_ops: compute_stack.append(numpy_call_dict[p['op']](None)) else: raise NotImplementedError elif isinstance(p, CPUTensor): compute_stack.append(p._tensor) else: compute_stack.append(p) assert len(compute_stack) == 1 return postfix_stack[0] def empty(self, shape, dtype=None, name=None, persist_values=True): """ Instantiate a new instance of the CPUTensor class without initializing individual element values. Arguments: shape (int, list): The size of each dimension of the Tensor. dtype (dtype, optional): Element data type. If not specified we use default_dtype value persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: CPUTensor: newly created data structure reference """ dtype = self.default_dtype if dtype is None else dtype return self.tensor_cls( backend=self, ary=np.zeros(shape, dtype), dtype=dtype, name=name, persist_values=persist_values) def array(self, ary, dtype=None, name=None, persist_values=True): """ Instantiate a new instance of the CPUTensor class setting each element value to what is specified in ary. Arguments: ary (numpy.ndarray): The data structure containing element values spread across a number of dimensions. Python built-in types like ints and lists are supported. dtype (dtype, optional): Element data type. If not specified we use default_dtype value ('float32' unless overridden). persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: CPUTensor: newly created data structure reference """ dtype = self.default_dtype if dtype is None else dtype return self.tensor_cls( backend=self, ary=np.array(ary, dtype), dtype=dtype, name=name, persist_values=persist_values) def zeros(self, shape, dtype=None, name=None, persist_values=True): """ Instantiate a new instance of the CPUTensor class setting each element value to 0. Arguments: shape (list of ints): The size of each dimension of the Tensor. dtype (dtype, optional): Element data type. If not specified we use default_dtype value ('float32' unless overridden). persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: CPUTensor: newly created data structure reference """ dtype = self.default_dtype if dtype is None else dtype return self.tensor_cls( backend=self, ary=np.zeros(shape, dtype), dtype=dtype, name=name, persist_values=persist_values) def ones(self, shape, dtype=None, name=None, persist_values=True): """ Instantiate a new instance of the CPUTensor class setting each element value to 1. Arguments: shape (list of ints): The size of each dimension of the Tensor. dtype (dtype, optional): Element data type. If not specified we use default_dtype value ('float32' unless overridden). persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: CPUTensor: newly created data structure reference """ dtype = self.default_dtype if dtype is None else dtype return self.tensor_cls( backend=self, ary=np.ones(shape, dtype), dtype=dtype, name=name, persist_values=persist_values) def empty_like(self, ary, dtype=None, name=None, persist_values=True): """ Instantiate a new instance of this backend's Tensor class, with the shape taken from ary. Arguments: ary (tensor object): Tensor to inherit the dimensions of. dtype (data-type, optional): If present, specifies the underlying type to employ for each element. persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: Tensor: array object """ dtype = self.default_dtype if dtype is None else dtype return self.tensor_cls( backend=self, ary=np.zeros(ary.shape, dtype), dtype=dtype, name=name, persist_values=persist_values) def zeros_like(self, ary, dtype=None, name=None, persist_values=True): """ Instantiate a new instance of this backend's Tensor class, with the shape taken from ary and populating each element with a value of 0. Arguments: ary (tensor object): Tensor to inherit the dimensions of. dtype (data-type, optional): If present, specifies the underlying type to employ for each element. persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls Returns: Tensor: array object """ dtype = self.default_dtype if dtype is None else dtype return self.tensor_cls( backend=self, ary=np.zeros(ary.shape, dtype), dtype=dtype, name=name, persist_values=persist_values) def compound_dot(self, A, B, C, alpha=1.0, beta=0.0, relu=False): """ Doing following operations (* is dot product) C = alpha * A * B + beta * C C = alpha * A.T * B + beta * C C = alpha * A * B.T + beta * C relu: if true applied before output (and prior to beta addition) The operation will be short-circuited to: out <- alpha * left * right if beta has value 0 (the default). Arguments: A, B (CPUTensor): input operands C (CPUTensor): output alpha (float): scale A*B term beta (float): scale C term before sum relu (bool): whether to apply ReLu before output """ # checking type and shape assert A.dtype == B.dtype == C.dtype assert A.shape[0] == C.shape[0] assert B.shape[1] == C.shape[1] assert A.shape[1] == B.shape[0] # cleaner implementation, shall be equivalent to the one below # if relu: # C[:] = self.log(1. + self.exp(alpha * self.dot(A, B))) + beta * C # else: # C[:] = alpha * self.dot(A, B) + beta * C if beta == 0: if C._tensor.flags['C_CONTIGUOUS'] is not True: tmp = np.empty(C.shape, dtype=C.dtype) np.dot(A._tensor, B._tensor, tmp) C._tensor[:] = tmp.copy() else: np.dot(A._tensor, B._tensor, C._tensor) if relu: self.Relu(C._tensor, C._tensor) else: np.multiply(C._tensor, beta, C._tensor) tmp = np.empty(C.shape, dtype=C.dtype) np.dot(A._tensor, B._tensor, tmp) np.multiply(tmp, alpha, tmp) if relu: self.Relu(tmp, tmp) np.add(C._tensor, tmp, C._tensor) return C def batched_dot(self, A, B, C, alpha=1.0, beta=0.0, relu=False): """ Doing following operations: 1. For fprop: A(K, C), B(X,C,N), C(X,K,N) --> call batched_dot(A, B, C) 2. For bprop: A(K, C), B(X,K,N), C(X,C,N) --> call batched_dot(A.T, B, C) 3. For update: A(X,K,N), B(X,C,N), C(K,C) --> call batched_dot(A, B.T, C) Arguments: A, B (CPUTensor): input operands C (CPUTensor): output alpha, beta, relu: see usage in dot() """ assert A.dtype == B.dtype == C.dtype dima, dimb, dimc = 0, 0, 0 # ldaz, ldbz, ldcz = 0, 0, 0 # commented for stylecheck batch_grid, batch_loops = 1, 1 if len(A.shape) == 3: dima = 1 if len(B.shape) == 3: dimb = 1 assert dima or dimb, "Tensor A or B must have 3 dims to use batched_dot" if len(C.shape) == 3: dimc = 1 batch_grid = C.shape[0] assert not dima or A.shape[0] == batch_grid assert not dimb or B.shape[0] == batch_grid if dima: batch_loops = A.shape[0] assert not dimb or B.shape[0] == batch_loops elif dimb: batch_loops = B.shape[0] assert not dima or A.shape[0] == batch_loops assert A.shape[0 + dima] == C.shape[0 + dimc] assert B.shape[1 + dimb] == C.shape[1 + dimc] assert A.shape[1 + dima] == B.shape[0 + dimb] tmp = np.zeros(C.shape) for i in range(batch_loops): if dima: tmp += np.dot(A._tensor[i], B._tensor[i]) else: tmp[i] = np.dot(A._tensor, B._tensor[i]) np.multiply(tmp, alpha, tmp) if relu: self.Relu(tmp, tmp) np.add(C._tensor * beta, tmp, C._tensor) return C def make_binary_mask(self, out, keepthresh=0.5): """ Create a binary mask for dropout layers. Arguments: out (CPUTensor): Output tensor keepthresh (float): fraction of ones """ out._tensor[:] = np.array( self.rng.uniform(size=out._tensor.shape) < keepthresh, dtype=out._tensor.dtype) def conv_layer(self, dtype, N, C, K, D=1, H=1, W=1, T=1, R=1, S=1, pad_d=0, pad_h=0, pad_w=0, str_d=1, str_h=1, str_w=1): """ Create a new ConvLayer parameter object. This then is passed as an argument to all the convolution operations. N: Number of images in mini-batch C: Number of input feature maps K: Number of output feature maps D: Depth of input image H: Height of input image W: Width of input image T: Depth of filter kernel R: Height of filter kernel S: Width of filter kernel padding: amount of zero-padding around the given edge strides: factor to step the filters by in a given direction dtype: need to know dtype to setup proper kernels and params. """ return ConvLayer(self, dtype, N, C, K, D, H, W, T, R, S, pad_d, pad_h, pad_w, str_d, str_h, str_w) def fprop_conv(self, layer, I, F, O, alpha=1.0, relu=False): """ Forward propagate the inputs of a convolutional network layer to produce output Arguments: layer: the conv layer as a parameter object I (CPUTensor): inputs F (CPUTensor): the weights (filters) O (CPUTensor): outputs alpha (float): linear scaling relu (boolean): apply ReLu or not before output (currently not implemented) """ assert layer.sizeI == I.size assert layer.sizeF == F.size assert layer.sizeO == O.size M, P, Q = layer.MPQ C, D, H, W, N = layer.dimI C, T, R, S, K = layer.dimF K, M, P, Q, N = layer.dimO pad_d, pad_h, pad_w = layer.padding str_d, str_h, str_w = layer.strides array_I = I.get().reshape(layer.dimI) array_F = F.get().reshape(layer.dimF) array_O = O.get().reshape(layer.dimO) for m in range(M): sliceT, sliceD = layer.fprop_slice(m, T, D, pad_d, str_d) for p in range(P): sliceR, sliceH = layer.fprop_slice(p, R, H, pad_h, str_h) for q in range(Q): sliceS, sliceW = layer.fprop_slice(q, S, W, pad_w, str_w) sliceTRS = np.array([ t * R * S + r * S + s for t in sliceT for r in sliceR for s in sliceS], dtype=np.intp) sliceDHW = np.array([ d * H * W + y * W + w for d in sliceD for y in sliceH for w in sliceW], dtype=np.intp) slicedF = array_F.reshape( (C, -1, K))[:, sliceTRS, :].reshape((-1, K)) slicedI = array_I.reshape( (C, -1, N))[:, sliceDHW, :].reshape((-1, N)) array_O[:, m, p, q, :] = alpha * \ np.dot(slicedF.T, slicedI) def bprop_conv(self, layer, F, E, grad_I, alpha=1.0, relu=False): """ Backward propagate the error through a convolutional network layer. Arguments: layer: the conv layer as a parameter object F (CPUTensor): the weights (filters) E (CPUTensor): errors grad_I (CPUTensor): gradient to inputs (output delta) alpha (float): linear scaling relu (boolean): apply ReLu or not before output (currently not implemented) """ assert layer.sizeF == F.size assert layer.sizeO == E.size assert layer.sizeI == grad_I.size M, P, Q = layer.MPQ C, D, H, W, N = layer.dimI C, T, R, S, K = layer.dimF K, M, P, Q, N = layer.dimO pad_d, pad_h, pad_w = layer.padding str_d, str_h, str_w = layer.strides array_F = F.get().reshape(layer.dimF) array_E = E.get().reshape(layer.dimO) array_grad_I = grad_I.get().reshape(layer.dimI) array_grad_I.fill(0.) array_F = np.transpose(array_F, (4, 1, 2, 3, 0)).copy() for d in range(D): sliceT, sliceM = layer.bprop_slice(d, T, M, pad_d, str_d) for h in range(H): sliceR, sliceP = layer.bprop_slice(h, R, P, pad_h, str_h) for w in range(W): sliceS, sliceQ = layer.bprop_slice(w, S, Q, pad_w, str_w) sliceTRS = np.array([ t * R * S + r * S + s for t in sliceT for r in sliceR for s in sliceS], dtype=np.intp) sliceMPQ = np.array([ m * P * Q + p * Q + q for m in sliceM for p in sliceP for q in sliceQ], dtype=np.intp) slicedF = array_F.reshape( (K, -1, C))[:, sliceTRS, :].reshape((-1, C)) slicedE = array_E.reshape( (K, -1, N))[:, sliceMPQ, :].reshape((-1, N)) array_grad_I[:, m, h, w, :] = alpha * \ np.dot(slicedF.T, slicedE) def update_conv(self, layer, I, E, U, alpha=1.0): """ Compute the updated gradient for a convolutional network layer. Arguments: layer: the conv layer as a parameter object I (CPUTensor): the inputs E (CPUTensor): the errors U (CPUTensor): the updates alpha (float): linear scaling """ assert layer.sizeI == I.size assert layer.sizeO == E.size assert layer.sizeF == U.size C, D, H, W, N = layer.dimI C, T, R, S, K = layer.dimF K, M, P, Q, N = layer.dimO pad_d, pad_h, pad_w = layer.padding str_d, str_h, str_w = layer.strides array_I = I.get().reshape(layer.dimI) array_E = E.get().reshape(layer.dimO) array_U = U.get().reshape(layer.dimF) array_U.fill(0.) for m in range(M): sliceT, sliceD = layer.fprop_slice(m, T, D, pad_d, str_d) for p in range(P): sliceR, sliceH = layer.fprop_slice(p, R, H, pad_h, str_h) for q in range(Q): sliceS, sliceW = layer.fprop_slice(q, S, W, pad_w, str_w) sliceTRS = np.array([ t * R * S + r * S + s for t in sliceT for r in sliceR for s in sliceS], dtype=np.intp) sliceDHW = np.array([ d * H * W + y * W + w for d in sliceD for y in sliceH for w in sliceW], dtype=np.intp) slicedI = array_I.reshape( (C, -1, N))[:, sliceDHW, :].reshape((-1, N)) slicedE = array_E[:, m, p, q, :] slicedU = array_U.reshape((C, -1, K)) slicedU[:, sliceTRS, :] += alpha * np.dot( slicedI, slicedE.T).reshape((C, -1, K)) def deconv_layer(self, dtype, N, C, K, P, Q, R=1, S=1, pad_d=0, pad_h=0, pad_w=0, str_d=1, str_h=1, str_w=1): """ Create a new PoolLayer parameter object. This then is passed as an argument to all pooling kernels. op: max, avg, l2 pooling N: Number of images in mini-batch C: Number of input feature maps D: Depth of input image H: Height of input image W: Width of input image J: Size of feature map pooling window (maxout n_pieces) T: Depth of pooling window R: Height of pooling window S: Width of pooling window padding: amount of zero-padding around the given image or feature map edge strides: factor to step the window by in a given direction (overlap allowed) Leave spatial dimensions at 1 to allow feature map pooling in the fc layers. """ return DeconvLayer(self, dtype, N, C, K, P, Q, R, S, pad_d, pad_h, pad_w, str_d, str_h, str_w) def pool_layer(self, dtype, op, N, C, D=1, H=1, W=1, J=1, T=1, R=1, S=1, pad_j=0, pad_d=0, pad_h=0, pad_w=0, str_j=None, str_d=None, str_h=None, str_w=None): """ Create a new PoolLayer parameter object. This then is passed as an argument to all pooling kernels. op: "max", "avg", "l2" pooling (currently bprop only supports max, but not avg and l2) N: Number of images in mini-batch C: Number of input feature maps D: Depth of input image H: Height of input image W: Width of input image J: Size of feature map pooling window (maxout n_pieces) T: Depth of pooling window R: Height of pooling window S: Width of pooling window padding: amount of zero-padding around the given image or feature map edge strides: factor to step the window by in a given direction (overlap allowed) Leave spatial dimensions at 1 to allow feature map pooling in the fc layers. """ # default to non-overlapping if str_j is None: str_j = J if str_d is None: str_d = T if str_h is None: str_h = R if str_w is None: str_w = S return PoolLayer(self, dtype, op, N, C, D, H, W, J, T, R, S, pad_j, pad_d, pad_h, pad_w, str_j, str_d, str_h, str_w) def fprop_pool(self, layer, I, O): """ Forward propagate pooling layer. Arguments: layer (PoolLayer): The pool layer object, different backends have different pool layers. I (Tensor): Input tensor. O (Tensor): output tensor. """ assert layer.sizeI == I.size assert layer.sizeO == O.size op = layer.op J, T, R, S = layer.JTRS C, D, H, W, N = layer.dimI K, M, P, Q, N = layer.dimO pad_j, pad_d, pad_h, pad_w = layer.padding str_j, str_d, str_h, str_w = layer.strides WH = W * H DWH = D * W * H array_I = I.get().reshape(layer.dimI) array_O = O.get().reshape(layer.dimO) for k in range(K): sliceC = layer.pool_slice(k, J, C, pad_j, str_j) for m in range(M): sliceD = layer.pool_slice(m, T, D, pad_d, str_d) for p in range(P): sliceH = layer.pool_slice(p, R, H, pad_h, str_h) for q in range(Q): sliceW = layer.pool_slice(q, S, W, pad_w, str_w) sliceCDHW = np.array([ c * DWH + d * WH + y * W + x for c in sliceC for d in sliceD for y in sliceH for x in sliceW], dtype=np.intp) sliceI = array_I.reshape((-1, N)) if op == "max": array_O[k, m, p, q, :] = np.max( sliceI[sliceCDHW, :], axis=0) elif op == "avg": array_O[k, m, p, q, :] = np.mean( sliceI[sliceCDHW, :], axis=0) elif op == "l2": array_O[k, m, p, q, :] = np.sqrt(np.sum( np.square(sliceI[sliceCDHW, :]), axis=0)) def bprop_pool(self, layer, I, E, delta): """ Backward propagate pooling layer. Arguments: layer (PoolLayer): The pool layer object. Different backends have different pool layers. I (Tensor): Input tensor. E (Tensor): Error tensor. delta (Tensor): Gradient tensor (delta) """ assert layer.sizeI == I.size assert layer.sizeO == E.size assert layer.sizeI == delta.size op = layer.op J, T, R, S = layer.JTRS C, D, H, W, N = layer.dimI K, M, P, Q, N = layer.dimO pad_j, pad_d, pad_h, pad_w = layer.padding str_j, str_d, str_h, str_w = layer.strides WH = W * H DWH = D * W * H array_I = I.get().reshape(layer.dimI) array_E = E.get().reshape(layer.dimO) array_delta = delta.get().reshape(layer.dimI) array_delta.fill(0.) for k in range(K): sliceC = layer.pool_slice(k, J, C, pad_j, str_j) for m in range(M): sliceD = layer.pool_slice(m, T, D, pad_d, str_d) for p in range(P): sliceH = layer.pool_slice(p, R, H, pad_h, str_h) for q in range(Q): sliceW = layer.pool_slice(q, S, W, pad_w, str_w) sliceCDHW = np.array([ c * DWH + d * WH + y * W + x for c in sliceC for d in sliceD for y in sliceH for x in sliceW], dtype=np.intp) sliceB = array_delta.reshape((-1, N)) if op == "max": sliceI = array_I.reshape((-1, N)) max_idx = np.argmax(sliceI[sliceCDHW, :], axis=0) for n in range(N): sliceB[ sliceCDHW[max_idx[n]], n] += array_E[k, m, p, q, n] elif op == "avg": sliceB[ sliceCDHW, :] += array_E[k, m, p, q, :] * (1.0 / sliceCDHW.size) else: raise NotImplementedError def _hist_tensor(self, tag): """ Create a tensor the right size for histogram data, with memory allocated in the contiguous histogram buffer. Track it by tag for later reference. """ assert self.hist_idx < self.hist_max self.hist_map[tag] = (self.hist_idx) hist_buf = self.hist_buf[self.hist_idx] self.hist_idx += 1 return hist_buf def dump_hist_data(self): hist_data = self.hist_buf hist_map = self.hist_map self.hist_map = dict() self.hist_idx = 0 self.hist_buf = self.empty( (self.hist_max, self.hist_bins), dtype=np.int32) return hist_data, hist_map def Relu(self, ary, out=None): """ Calculates the ReLu transformation for input array Arguments: ary: numpy array out: reference to output """ if out is not None: np.log(1 + np.exp(ary), out) else: return np.log(1 + np.exp(ary))
apache-2.0
waytai/django
tests/template_tests/filter_tests/test_date.py
207
2534
from datetime import datetime, time from django.template.defaultfilters import date from django.test import SimpleTestCase from django.utils import timezone from ..utils import setup from .timezone_utils import TimezoneTestCase class DateTests(TimezoneTestCase): @setup({'date01': '{{ d|date:"m" }}'}) def test_date01(self): output = self.engine.render_to_string('date01', {'d': datetime(2008, 1, 1)}) self.assertEqual(output, '01') @setup({'date02': '{{ d|date }}'}) def test_date02(self): output = self.engine.render_to_string('date02', {'d': datetime(2008, 1, 1)}) self.assertEqual(output, 'Jan. 1, 2008') @setup({'date03': '{{ d|date:"m" }}'}) def test_date03(self): """ #9520: Make sure |date doesn't blow up on non-dates """ output = self.engine.render_to_string('date03', {'d': 'fail_string'}) self.assertEqual(output, '') # ISO date formats @setup({'date04': '{{ d|date:"o" }}'}) def test_date04(self): output = self.engine.render_to_string('date04', {'d': datetime(2008, 12, 29)}) self.assertEqual(output, '2009') @setup({'date05': '{{ d|date:"o" }}'}) def test_date05(self): output = self.engine.render_to_string('date05', {'d': datetime(2010, 1, 3)}) self.assertEqual(output, '2009') # Timezone name @setup({'date06': '{{ d|date:"e" }}'}) def test_date06(self): output = self.engine.render_to_string('date06', {'d': datetime(2009, 3, 12, tzinfo=timezone.get_fixed_timezone(30))}) self.assertEqual(output, '+0030') @setup({'date07': '{{ d|date:"e" }}'}) def test_date07(self): output = self.engine.render_to_string('date07', {'d': datetime(2009, 3, 12)}) self.assertEqual(output, '') # #19370: Make sure |date doesn't blow up on a midnight time object @setup({'date08': '{{ t|date:"H:i" }}'}) def test_date08(self): output = self.engine.render_to_string('date08', {'t': time(0, 1)}) self.assertEqual(output, '00:01') @setup({'date09': '{{ t|date:"H:i" }}'}) def test_date09(self): output = self.engine.render_to_string('date09', {'t': time(0, 0)}) self.assertEqual(output, '00:00') class FunctionTests(SimpleTestCase): def test_date(self): self.assertEqual(date(datetime(2005, 12, 29), "d F Y"), '29 December 2005') def test_escape_characters(self): self.assertEqual(date(datetime(2005, 12, 29), r'jS \o\f F'), '29th of December')
bsd-3-clause
qiqi/fds
apps/Reynolds/from_hdf5_to_ReynoldsInputFiles.py
1
1750
import numpy from numpy import * from mpi4py import MPI import h5py import os import sys import argparse import shutil HDF5file_path = sys.argv[1] work_path = sys.argv[2] REF_WORK_PATH = sys.argv[3] comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() root = 0 if os.path.exists(HDF5file_path): # Get start start_file = os.path.join(REF_WORK_PATH, 'start.bin') comm.Barrier() if not os.path.exists(start_file): if rank == root: start = numpy.zeros((size+1, 1), dtype='i') for i in range(size): ref_data_file = os.path.join(REF_WORK_PATH, 'finalData' + str(i) + '.bin') start[i+1] = start[i] + len(frombuffer(open(ref_data_file, 'rb').read(), dtype='d')) with open(start_file, 'wb') as f: f.write(asarray(start, dtype='i').tobytes()) comm.Barrier() start = frombuffer(open(start_file, 'rb').read(), dtype='i') # Get solution from HDF5 file: with h5py.File(HDF5file_path, 'r', driver='mpio', comm=comm) as handle: field = handle['/field'][start[rank]:start[rank + 1]].copy() field = ravel(field) # Generate input binary files: initial_data_file = os.path.join(work_path, 'initialData' + str(rank) + '.bin') with open(initial_data_file, 'wb') as f: f.write(asarray(field, dtype='d').tobytes()) shutil.copy(os.path.join(REF_WORK_PATH, 'dataStructs' + str(rank) + '.bin'), work_path) else: # Use reference solution as initialization: shutil.copy(os.path.join(REF_WORK_PATH, 'finalData' + str(rank) + '.bin'), os.path.join(work_path, 'initialData' + str(rank) + '.bin')) shutil.copy(os.path.join(REF_WORK_PATH, 'dataStructs' + str(rank) + '.bin'), work_path)
gpl-3.0
datacommonsorg/data
scripts/oecd/regional_demography/utils_test.py
1
1248
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import json import pandas as pd from pandas.testing import assert_frame_equal from utils import multi_index_to_single_index class TestUtils(unittest.TestCase): def test_multi_index_to_single_index(self): df = pd.read_csv("test.csv") df_cleaned = df.pivot_table(values='value', index=['name'], columns=['var', 'sex']) df_cleaned = multi_index_to_single_index(df_cleaned) df_expected = pd.read_csv("test_expected.csv") self.assertTrue(assert_frame_equal(df_cleaned, df_expected) is None) if __name__ == '__main__': unittest.main()
apache-2.0
Argon-Zhou/django
tests/m2m_multiple/tests.py
228
2386
from __future__ import unicode_literals from datetime import datetime from django.test import TestCase from .models import Article, Category class M2MMultipleTests(TestCase): def test_multiple(self): c1, c2, c3, c4 = [ Category.objects.create(name=name) for name in ["Sports", "News", "Crime", "Life"] ] a1 = Article.objects.create( headline="Area man steals", pub_date=datetime(2005, 11, 27) ) a1.primary_categories.add(c2, c3) a1.secondary_categories.add(c4) a2 = Article.objects.create( headline="Area man runs", pub_date=datetime(2005, 11, 28) ) a2.primary_categories.add(c1, c2) a2.secondary_categories.add(c4) self.assertQuerysetEqual( a1.primary_categories.all(), [ "Crime", "News", ], lambda c: c.name ) self.assertQuerysetEqual( a2.primary_categories.all(), [ "News", "Sports", ], lambda c: c.name ) self.assertQuerysetEqual( a1.secondary_categories.all(), [ "Life", ], lambda c: c.name ) self.assertQuerysetEqual( c1.primary_article_set.all(), [ "Area man runs", ], lambda a: a.headline ) self.assertQuerysetEqual( c1.secondary_article_set.all(), [] ) self.assertQuerysetEqual( c2.primary_article_set.all(), [ "Area man steals", "Area man runs", ], lambda a: a.headline ) self.assertQuerysetEqual( c2.secondary_article_set.all(), [] ) self.assertQuerysetEqual( c3.primary_article_set.all(), [ "Area man steals", ], lambda a: a.headline ) self.assertQuerysetEqual( c3.secondary_article_set.all(), [] ) self.assertQuerysetEqual( c4.primary_article_set.all(), [] ) self.assertQuerysetEqual( c4.secondary_article_set.all(), [ "Area man steals", "Area man runs", ], lambda a: a.headline )
bsd-3-clause
srajag/nova
nova/virt/hyperv/utilsfactory.py
13
3118
# Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from nova.openstack.common import log as logging from nova.virt.hyperv import hostutils from nova.virt.hyperv import livemigrationutils from nova.virt.hyperv import networkutils from nova.virt.hyperv import networkutilsv2 from nova.virt.hyperv import pathutils from nova.virt.hyperv import rdpconsoleutils from nova.virt.hyperv import rdpconsoleutilsv2 from nova.virt.hyperv import vhdutils from nova.virt.hyperv import vhdutilsv2 from nova.virt.hyperv import vmutils from nova.virt.hyperv import vmutilsv2 from nova.virt.hyperv import volumeutils from nova.virt.hyperv import volumeutilsv2 hyper_opts = [ cfg.BoolOpt('force_hyperv_utils_v1', default=False, help='Force V1 WMI utility classes'), cfg.BoolOpt('force_volumeutils_v1', default=False, help='Force V1 volume utility class'), ] CONF = cfg.CONF CONF.register_opts(hyper_opts, 'hyperv') LOG = logging.getLogger(__name__) def _get_class(v1_class, v2_class, force_v1_flag): # V2 classes are supported starting from Hyper-V Server 2012 and # Windows Server 2012 (kernel version 6.2) if not force_v1_flag and get_hostutils().check_min_windows_version(6, 2): cls = v2_class else: cls = v1_class LOG.debug("Loading class: %(module_name)s.%(class_name)s", {'module_name': cls.__module__, 'class_name': cls.__name__}) return cls def get_vmutils(host='.'): return _get_class(vmutils.VMUtils, vmutilsv2.VMUtilsV2, CONF.hyperv.force_hyperv_utils_v1)(host) def get_vhdutils(): return _get_class(vhdutils.VHDUtils, vhdutilsv2.VHDUtilsV2, CONF.hyperv.force_hyperv_utils_v1)() def get_networkutils(): return _get_class(networkutils.NetworkUtils, networkutilsv2.NetworkUtilsV2, CONF.hyperv.force_hyperv_utils_v1)() def get_hostutils(): return hostutils.HostUtils() def get_pathutils(): return pathutils.PathUtils() def get_volumeutils(): return _get_class(volumeutils.VolumeUtils, volumeutilsv2.VolumeUtilsV2, CONF.hyperv.force_volumeutils_v1)() def get_livemigrationutils(): return livemigrationutils.LiveMigrationUtils() def get_rdpconsoleutils(): return _get_class(rdpconsoleutils.RDPConsoleUtils, rdpconsoleutilsv2.RDPConsoleUtilsV2, CONF.hyperv.force_hyperv_utils_v1)()
apache-2.0
amallia/zulip
zerver/test_unread.py
120
6343
# -*- coding: utf-8 -*- from __future__ import absolute_import from zerver.models import ( get_user_profile_by_email, Recipient, UserMessage, ) from zerver.lib.test_helpers import AuthedTestCase import ujson class PointerTest(AuthedTestCase): def test_update_pointer(self): """ Posting a pointer to /update (in the form {"pointer": pointer}) changes the pointer we store for your UserProfile. """ self.login("hamlet@zulip.com") self.assertEqual(get_user_profile_by_email("hamlet@zulip.com").pointer, -1) msg_id = self.send_message("othello@zulip.com", "Verona", Recipient.STREAM) result = self.client.post("/json/update_pointer", {"pointer": msg_id}) self.assert_json_success(result) self.assertEqual(get_user_profile_by_email("hamlet@zulip.com").pointer, msg_id) def test_api_update_pointer(self): """ Same as above, but for the API view """ email = "hamlet@zulip.com" self.assertEqual(get_user_profile_by_email(email).pointer, -1) msg_id = self.send_message("othello@zulip.com", "Verona", Recipient.STREAM) result = self.client_put("/api/v1/users/me/pointer", {"pointer": msg_id}, **self.api_auth(email)) self.assert_json_success(result) self.assertEqual(get_user_profile_by_email(email).pointer, msg_id) def test_missing_pointer(self): """ Posting json to /json/update_pointer which does not contain a pointer key/value pair returns a 400 and error message. """ self.login("hamlet@zulip.com") self.assertEqual(get_user_profile_by_email("hamlet@zulip.com").pointer, -1) result = self.client.post("/json/update_pointer", {"foo": 1}) self.assert_json_error(result, "Missing 'pointer' argument") self.assertEqual(get_user_profile_by_email("hamlet@zulip.com").pointer, -1) def test_invalid_pointer(self): """ Posting json to /json/update_pointer with an invalid pointer returns a 400 and error message. """ self.login("hamlet@zulip.com") self.assertEqual(get_user_profile_by_email("hamlet@zulip.com").pointer, -1) result = self.client.post("/json/update_pointer", {"pointer": "foo"}) self.assert_json_error(result, "Bad value for 'pointer': foo") self.assertEqual(get_user_profile_by_email("hamlet@zulip.com").pointer, -1) def test_pointer_out_of_range(self): """ Posting json to /json/update_pointer with an out of range (< 0) pointer returns a 400 and error message. """ self.login("hamlet@zulip.com") self.assertEqual(get_user_profile_by_email("hamlet@zulip.com").pointer, -1) result = self.client.post("/json/update_pointer", {"pointer": -2}) self.assert_json_error(result, "Bad value for 'pointer': -2") self.assertEqual(get_user_profile_by_email("hamlet@zulip.com").pointer, -1) class UnreadCountTests(AuthedTestCase): def setUp(self): self.unread_msg_ids = [self.send_message( "iago@zulip.com", "hamlet@zulip.com", Recipient.PERSONAL, "hello"), self.send_message( "iago@zulip.com", "hamlet@zulip.com", Recipient.PERSONAL, "hello2")] def test_new_message(self): # Sending a new message results in unread UserMessages being created self.login("hamlet@zulip.com") content = "Test message for unset read bit" last_msg = self.send_message("hamlet@zulip.com", "Verona", Recipient.STREAM, content) user_messages = list(UserMessage.objects.filter(message=last_msg)) self.assertEqual(len(user_messages) > 0, True) for um in user_messages: self.assertEqual(um.message.content, content) if um.user_profile.email != "hamlet@zulip.com": self.assertFalse(um.flags.read) def test_update_flags(self): self.login("hamlet@zulip.com") result = self.client.post("/json/update_message_flags", {"messages": ujson.dumps(self.unread_msg_ids), "op": "add", "flag": "read"}) self.assert_json_success(result) # Ensure we properly set the flags found = 0 for msg in self.get_old_messages(): if msg['id'] in self.unread_msg_ids: self.assertEqual(msg['flags'], ['read']) found += 1 self.assertEqual(found, 2) result = self.client.post("/json/update_message_flags", {"messages": ujson.dumps([self.unread_msg_ids[1]]), "op": "remove", "flag": "read"}) self.assert_json_success(result) # Ensure we properly remove just one flag for msg in self.get_old_messages(): if msg['id'] == self.unread_msg_ids[0]: self.assertEqual(msg['flags'], ['read']) elif msg['id'] == self.unread_msg_ids[1]: self.assertEqual(msg['flags'], []) def test_update_all_flags(self): self.login("hamlet@zulip.com") message_ids = [self.send_message("hamlet@zulip.com", "iago@zulip.com", Recipient.PERSONAL, "test"), self.send_message("hamlet@zulip.com", "cordelia@zulip.com", Recipient.PERSONAL, "test2")] result = self.client.post("/json/update_message_flags", {"messages": ujson.dumps(message_ids), "op": "add", "flag": "read"}) self.assert_json_success(result) result = self.client.post("/json/update_message_flags", {"messages": ujson.dumps([]), "op": "remove", "flag": "read", "all": ujson.dumps(True)}) self.assert_json_success(result) for msg in self.get_old_messages(): self.assertEqual(msg['flags'], [])
apache-2.0
Sorsly/subtle
google-cloud-sdk/lib/surface/test/android/versions/__init__.py
4
1368
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The 'gcloud test android versions' command group.""" from googlecloudsdk.calliope import base class Versions(base.Group): """Explore Android versions available for testing.""" detailed_help = { 'DESCRIPTION': '{description}', 'EXAMPLES': """\ To list information about all versions of the Android OS available for running tests, including details such as OS code name and release date, run: $ {command} list """, } @staticmethod def Args(parser): """Method called by Calliope to register flags common to this sub-group. Args: parser: An argparse parser used to add arguments that immediately follow this group in the CLI. Positional arguments are allowed. """
mit
Teagan42/home-assistant
homeassistant/components/tradfri/sensor.py
4
1599
"""Support for IKEA Tradfri sensors.""" from homeassistant.const import DEVICE_CLASS_BATTERY from .base_class import TradfriBaseDevice from .const import CONF_GATEWAY_ID, KEY_API, KEY_GATEWAY async def async_setup_entry(hass, config_entry, async_add_entities): """Set up a Tradfri config entry.""" gateway_id = config_entry.data[CONF_GATEWAY_ID] api = hass.data[KEY_API][config_entry.entry_id] gateway = hass.data[KEY_GATEWAY][config_entry.entry_id] devices_commands = await api(gateway.get_devices()) all_devices = await api(devices_commands) devices = ( dev for dev in all_devices if not dev.has_light_control and not dev.has_socket_control and not dev.has_blind_control and not dev.has_signal_repeater_control ) if devices: async_add_entities(TradfriSensor(device, api, gateway_id) for device in devices) class TradfriSensor(TradfriBaseDevice): """The platform class required by Home Assistant.""" def __init__(self, device, api, gateway_id): """Initialize the device.""" super().__init__(device, api, gateway_id) self._unique_id = f"{gateway_id}-{device.id}" @property def device_class(self): """Return the devices' state attributes.""" return DEVICE_CLASS_BATTERY @property def state(self): """Return the current state of the device.""" return self._device.device_info.battery_level @property def unit_of_measurement(self): """Return the unit_of_measurement of the device.""" return "%"
apache-2.0
TathagataChakraborti/resource-conflicts
PLANROB-2015/seq-sat-lama/py2.5/lib/python2.5/test/test_sax.py
6
22569
# regression test for SAX 2.0 -*- coding: iso-8859-1 -*- # $Id: test_sax.py 54954 2007-04-25 06:42:41Z neal.norwitz $ from xml.sax import make_parser, ContentHandler, \ SAXException, SAXReaderNotAvailable, SAXParseException try: make_parser() except SAXReaderNotAvailable: # don't try to test this module if we cannot create a parser raise ImportError("no XML parsers available") from xml.sax.saxutils import XMLGenerator, escape, unescape, quoteattr, \ XMLFilterBase from xml.sax.expatreader import create_parser from xml.sax.xmlreader import InputSource, AttributesImpl, AttributesNSImpl from cStringIO import StringIO from test.test_support import verify, verbose, TestFailed, findfile import os # ===== Utilities tests = 0 failures = [] def confirm(outcome, name): global tests tests = tests + 1 if outcome: if verbose: print "Passed", name else: failures.append(name) def test_make_parser2(): try: # Creating parsers several times in a row should succeed. # Testing this because there have been failures of this kind # before. from xml.sax import make_parser p = make_parser() from xml.sax import make_parser p = make_parser() from xml.sax import make_parser p = make_parser() from xml.sax import make_parser p = make_parser() from xml.sax import make_parser p = make_parser() from xml.sax import make_parser p = make_parser() except: return 0 else: return p # =========================================================================== # # saxutils tests # # =========================================================================== # ===== escape def test_escape_basic(): return escape("Donald Duck & Co") == "Donald Duck &amp; Co" def test_escape_all(): return escape("<Donald Duck & Co>") == "&lt;Donald Duck &amp; Co&gt;" def test_escape_extra(): return escape("Hei på deg", {"å" : "&aring;"}) == "Hei p&aring; deg" # ===== unescape def test_unescape_basic(): return unescape("Donald Duck &amp; Co") == "Donald Duck & Co" def test_unescape_all(): return unescape("&lt;Donald Duck &amp; Co&gt;") == "<Donald Duck & Co>" def test_unescape_extra(): return unescape("Hei på deg", {"å" : "&aring;"}) == "Hei p&aring; deg" def test_unescape_amp_extra(): return unescape("&amp;foo;", {"&foo;": "splat"}) == "&foo;" # ===== quoteattr def test_quoteattr_basic(): return quoteattr("Donald Duck & Co") == '"Donald Duck &amp; Co"' def test_single_quoteattr(): return (quoteattr('Includes "double" quotes') == '\'Includes "double" quotes\'') def test_double_quoteattr(): return (quoteattr("Includes 'single' quotes") == "\"Includes 'single' quotes\"") def test_single_double_quoteattr(): return (quoteattr("Includes 'single' and \"double\" quotes") == "\"Includes 'single' and &quot;double&quot; quotes\"") # ===== make_parser def test_make_parser(): try: # Creating a parser should succeed - it should fall back # to the expatreader p = make_parser(['xml.parsers.no_such_parser']) except: return 0 else: return p # ===== XMLGenerator start = '<?xml version="1.0" encoding="iso-8859-1"?>\n' def test_xmlgen_basic(): result = StringIO() gen = XMLGenerator(result) gen.startDocument() gen.startElement("doc", {}) gen.endElement("doc") gen.endDocument() return result.getvalue() == start + "<doc></doc>" def test_xmlgen_content(): result = StringIO() gen = XMLGenerator(result) gen.startDocument() gen.startElement("doc", {}) gen.characters("huhei") gen.endElement("doc") gen.endDocument() return result.getvalue() == start + "<doc>huhei</doc>" def test_xmlgen_pi(): result = StringIO() gen = XMLGenerator(result) gen.startDocument() gen.processingInstruction("test", "data") gen.startElement("doc", {}) gen.endElement("doc") gen.endDocument() return result.getvalue() == start + "<?test data?><doc></doc>" def test_xmlgen_content_escape(): result = StringIO() gen = XMLGenerator(result) gen.startDocument() gen.startElement("doc", {}) gen.characters("<huhei&") gen.endElement("doc") gen.endDocument() return result.getvalue() == start + "<doc>&lt;huhei&amp;</doc>" def test_xmlgen_attr_escape(): result = StringIO() gen = XMLGenerator(result) gen.startDocument() gen.startElement("doc", {"a": '"'}) gen.startElement("e", {"a": "'"}) gen.endElement("e") gen.startElement("e", {"a": "'\""}) gen.endElement("e") gen.startElement("e", {"a": "\n\r\t"}) gen.endElement("e") gen.endElement("doc") gen.endDocument() return result.getvalue() == start + ("<doc a='\"'><e a=\"'\"></e>" "<e a=\"'&quot;\"></e>" "<e a=\"&#10;&#13;&#9;\"></e></doc>") def test_xmlgen_ignorable(): result = StringIO() gen = XMLGenerator(result) gen.startDocument() gen.startElement("doc", {}) gen.ignorableWhitespace(" ") gen.endElement("doc") gen.endDocument() return result.getvalue() == start + "<doc> </doc>" ns_uri = "http://www.python.org/xml-ns/saxtest/" def test_xmlgen_ns(): result = StringIO() gen = XMLGenerator(result) gen.startDocument() gen.startPrefixMapping("ns1", ns_uri) gen.startElementNS((ns_uri, "doc"), "ns1:doc", {}) # add an unqualified name gen.startElementNS((None, "udoc"), None, {}) gen.endElementNS((None, "udoc"), None) gen.endElementNS((ns_uri, "doc"), "ns1:doc") gen.endPrefixMapping("ns1") gen.endDocument() return result.getvalue() == start + \ ('<ns1:doc xmlns:ns1="%s"><udoc></udoc></ns1:doc>' % ns_uri) def test_1463026_1(): result = StringIO() gen = XMLGenerator(result) gen.startDocument() gen.startElementNS((None, 'a'), 'a', {(None, 'b'):'c'}) gen.endElementNS((None, 'a'), 'a') gen.endDocument() return result.getvalue() == start+'<a b="c"></a>' def test_1463026_2(): result = StringIO() gen = XMLGenerator(result) gen.startDocument() gen.startPrefixMapping(None, 'qux') gen.startElementNS(('qux', 'a'), 'a', {}) gen.endElementNS(('qux', 'a'), 'a') gen.endPrefixMapping(None) gen.endDocument() return result.getvalue() == start+'<a xmlns="qux"></a>' def test_1463026_3(): result = StringIO() gen = XMLGenerator(result) gen.startDocument() gen.startPrefixMapping('my', 'qux') gen.startElementNS(('qux', 'a'), 'a', {(None, 'b'):'c'}) gen.endElementNS(('qux', 'a'), 'a') gen.endPrefixMapping('my') gen.endDocument() return result.getvalue() == start+'<my:a xmlns:my="qux" b="c"></my:a>' # ===== Xmlfilterbase def test_filter_basic(): result = StringIO() gen = XMLGenerator(result) filter = XMLFilterBase() filter.setContentHandler(gen) filter.startDocument() filter.startElement("doc", {}) filter.characters("content") filter.ignorableWhitespace(" ") filter.endElement("doc") filter.endDocument() return result.getvalue() == start + "<doc>content </doc>" # =========================================================================== # # expatreader tests # # =========================================================================== # ===== XMLReader support def test_expat_file(): parser = create_parser() result = StringIO() xmlgen = XMLGenerator(result) parser.setContentHandler(xmlgen) parser.parse(open(findfile("test"+os.extsep+"xml"))) return result.getvalue() == xml_test_out # ===== DTDHandler support class TestDTDHandler: def __init__(self): self._notations = [] self._entities = [] def notationDecl(self, name, publicId, systemId): self._notations.append((name, publicId, systemId)) def unparsedEntityDecl(self, name, publicId, systemId, ndata): self._entities.append((name, publicId, systemId, ndata)) def test_expat_dtdhandler(): parser = create_parser() handler = TestDTDHandler() parser.setDTDHandler(handler) parser.feed('<!DOCTYPE doc [\n') parser.feed(' <!ENTITY img SYSTEM "expat.gif" NDATA GIF>\n') parser.feed(' <!NOTATION GIF PUBLIC "-//CompuServe//NOTATION Graphics Interchange Format 89a//EN">\n') parser.feed(']>\n') parser.feed('<doc></doc>') parser.close() return handler._notations == [("GIF", "-//CompuServe//NOTATION Graphics Interchange Format 89a//EN", None)] and \ handler._entities == [("img", None, "expat.gif", "GIF")] # ===== EntityResolver support class TestEntityResolver: def resolveEntity(self, publicId, systemId): inpsrc = InputSource() inpsrc.setByteStream(StringIO("<entity/>")) return inpsrc def test_expat_entityresolver(): parser = create_parser() parser.setEntityResolver(TestEntityResolver()) result = StringIO() parser.setContentHandler(XMLGenerator(result)) parser.feed('<!DOCTYPE doc [\n') parser.feed(' <!ENTITY test SYSTEM "whatever">\n') parser.feed(']>\n') parser.feed('<doc>&test;</doc>') parser.close() return result.getvalue() == start + "<doc><entity></entity></doc>" # ===== Attributes support class AttrGatherer(ContentHandler): def startElement(self, name, attrs): self._attrs = attrs def startElementNS(self, name, qname, attrs): self._attrs = attrs def test_expat_attrs_empty(): parser = create_parser() gather = AttrGatherer() parser.setContentHandler(gather) parser.feed("<doc/>") parser.close() return verify_empty_attrs(gather._attrs) def test_expat_attrs_wattr(): parser = create_parser() gather = AttrGatherer() parser.setContentHandler(gather) parser.feed("<doc attr='val'/>") parser.close() return verify_attrs_wattr(gather._attrs) def test_expat_nsattrs_empty(): parser = create_parser(1) gather = AttrGatherer() parser.setContentHandler(gather) parser.feed("<doc/>") parser.close() return verify_empty_nsattrs(gather._attrs) def test_expat_nsattrs_wattr(): parser = create_parser(1) gather = AttrGatherer() parser.setContentHandler(gather) parser.feed("<doc xmlns:ns='%s' ns:attr='val'/>" % ns_uri) parser.close() attrs = gather._attrs return attrs.getLength() == 1 and \ attrs.getNames() == [(ns_uri, "attr")] and \ (attrs.getQNames() == [] or attrs.getQNames() == ["ns:attr"]) and \ len(attrs) == 1 and \ attrs.has_key((ns_uri, "attr")) and \ attrs.keys() == [(ns_uri, "attr")] and \ attrs.get((ns_uri, "attr")) == "val" and \ attrs.get((ns_uri, "attr"), 25) == "val" and \ attrs.items() == [((ns_uri, "attr"), "val")] and \ attrs.values() == ["val"] and \ attrs.getValue((ns_uri, "attr")) == "val" and \ attrs[(ns_uri, "attr")] == "val" # ===== InputSource support xml_test_out = open(findfile("test"+os.extsep+"xml"+os.extsep+"out")).read() def test_expat_inpsource_filename(): parser = create_parser() result = StringIO() xmlgen = XMLGenerator(result) parser.setContentHandler(xmlgen) parser.parse(findfile("test"+os.extsep+"xml")) return result.getvalue() == xml_test_out def test_expat_inpsource_sysid(): parser = create_parser() result = StringIO() xmlgen = XMLGenerator(result) parser.setContentHandler(xmlgen) parser.parse(InputSource(findfile("test"+os.extsep+"xml"))) return result.getvalue() == xml_test_out def test_expat_inpsource_stream(): parser = create_parser() result = StringIO() xmlgen = XMLGenerator(result) parser.setContentHandler(xmlgen) inpsrc = InputSource() inpsrc.setByteStream(open(findfile("test"+os.extsep+"xml"))) parser.parse(inpsrc) return result.getvalue() == xml_test_out # ===== IncrementalParser support def test_expat_incremental(): result = StringIO() xmlgen = XMLGenerator(result) parser = create_parser() parser.setContentHandler(xmlgen) parser.feed("<doc>") parser.feed("</doc>") parser.close() return result.getvalue() == start + "<doc></doc>" def test_expat_incremental_reset(): result = StringIO() xmlgen = XMLGenerator(result) parser = create_parser() parser.setContentHandler(xmlgen) parser.feed("<doc>") parser.feed("text") result = StringIO() xmlgen = XMLGenerator(result) parser.setContentHandler(xmlgen) parser.reset() parser.feed("<doc>") parser.feed("text") parser.feed("</doc>") parser.close() return result.getvalue() == start + "<doc>text</doc>" # ===== Locator support def test_expat_locator_noinfo(): result = StringIO() xmlgen = XMLGenerator(result) parser = create_parser() parser.setContentHandler(xmlgen) parser.feed("<doc>") parser.feed("</doc>") parser.close() return parser.getSystemId() is None and \ parser.getPublicId() is None and \ parser.getLineNumber() == 1 def test_expat_locator_withinfo(): result = StringIO() xmlgen = XMLGenerator(result) parser = create_parser() parser.setContentHandler(xmlgen) parser.parse(findfile("test.xml")) return parser.getSystemId() == findfile("test.xml") and \ parser.getPublicId() is None # =========================================================================== # # error reporting # # =========================================================================== def test_expat_inpsource_location(): parser = create_parser() parser.setContentHandler(ContentHandler()) # do nothing source = InputSource() source.setByteStream(StringIO("<foo bar foobar>")) #ill-formed name = "a file name" source.setSystemId(name) try: parser.parse(source) except SAXException, e: return e.getSystemId() == name def test_expat_incomplete(): parser = create_parser() parser.setContentHandler(ContentHandler()) # do nothing try: parser.parse(StringIO("<foo>")) except SAXParseException: return 1 # ok, error found else: return 0 def test_sax_parse_exception_str(): # pass various values from a locator to the SAXParseException to # make sure that the __str__() doesn't fall apart when None is # passed instead of an integer line and column number # # use "normal" values for the locator: str(SAXParseException("message", None, DummyLocator(1, 1))) # use None for the line number: str(SAXParseException("message", None, DummyLocator(None, 1))) # use None for the column number: str(SAXParseException("message", None, DummyLocator(1, None))) # use None for both: str(SAXParseException("message", None, DummyLocator(None, None))) return 1 class DummyLocator: def __init__(self, lineno, colno): self._lineno = lineno self._colno = colno def getPublicId(self): return "pubid" def getSystemId(self): return "sysid" def getLineNumber(self): return self._lineno def getColumnNumber(self): return self._colno # =========================================================================== # # xmlreader tests # # =========================================================================== # ===== AttributesImpl def verify_empty_attrs(attrs): try: attrs.getValue("attr") gvk = 0 except KeyError: gvk = 1 try: attrs.getValueByQName("attr") gvqk = 0 except KeyError: gvqk = 1 try: attrs.getNameByQName("attr") gnqk = 0 except KeyError: gnqk = 1 try: attrs.getQNameByName("attr") gqnk = 0 except KeyError: gqnk = 1 try: attrs["attr"] gik = 0 except KeyError: gik = 1 return attrs.getLength() == 0 and \ attrs.getNames() == [] and \ attrs.getQNames() == [] and \ len(attrs) == 0 and \ not attrs.has_key("attr") and \ attrs.keys() == [] and \ attrs.get("attrs") is None and \ attrs.get("attrs", 25) == 25 and \ attrs.items() == [] and \ attrs.values() == [] and \ gvk and gvqk and gnqk and gik and gqnk def verify_attrs_wattr(attrs): return attrs.getLength() == 1 and \ attrs.getNames() == ["attr"] and \ attrs.getQNames() == ["attr"] and \ len(attrs) == 1 and \ attrs.has_key("attr") and \ attrs.keys() == ["attr"] and \ attrs.get("attr") == "val" and \ attrs.get("attr", 25) == "val" and \ attrs.items() == [("attr", "val")] and \ attrs.values() == ["val"] and \ attrs.getValue("attr") == "val" and \ attrs.getValueByQName("attr") == "val" and \ attrs.getNameByQName("attr") == "attr" and \ attrs["attr"] == "val" and \ attrs.getQNameByName("attr") == "attr" def test_attrs_empty(): return verify_empty_attrs(AttributesImpl({})) def test_attrs_wattr(): return verify_attrs_wattr(AttributesImpl({"attr" : "val"})) # ===== AttributesImpl def verify_empty_nsattrs(attrs): try: attrs.getValue((ns_uri, "attr")) gvk = 0 except KeyError: gvk = 1 try: attrs.getValueByQName("ns:attr") gvqk = 0 except KeyError: gvqk = 1 try: attrs.getNameByQName("ns:attr") gnqk = 0 except KeyError: gnqk = 1 try: attrs.getQNameByName((ns_uri, "attr")) gqnk = 0 except KeyError: gqnk = 1 try: attrs[(ns_uri, "attr")] gik = 0 except KeyError: gik = 1 return attrs.getLength() == 0 and \ attrs.getNames() == [] and \ attrs.getQNames() == [] and \ len(attrs) == 0 and \ not attrs.has_key((ns_uri, "attr")) and \ attrs.keys() == [] and \ attrs.get((ns_uri, "attr")) is None and \ attrs.get((ns_uri, "attr"), 25) == 25 and \ attrs.items() == [] and \ attrs.values() == [] and \ gvk and gvqk and gnqk and gik and gqnk def test_nsattrs_empty(): return verify_empty_nsattrs(AttributesNSImpl({}, {})) def test_nsattrs_wattr(): attrs = AttributesNSImpl({(ns_uri, "attr") : "val"}, {(ns_uri, "attr") : "ns:attr"}) return attrs.getLength() == 1 and \ attrs.getNames() == [(ns_uri, "attr")] and \ attrs.getQNames() == ["ns:attr"] and \ len(attrs) == 1 and \ attrs.has_key((ns_uri, "attr")) and \ attrs.keys() == [(ns_uri, "attr")] and \ attrs.get((ns_uri, "attr")) == "val" and \ attrs.get((ns_uri, "attr"), 25) == "val" and \ attrs.items() == [((ns_uri, "attr"), "val")] and \ attrs.values() == ["val"] and \ attrs.getValue((ns_uri, "attr")) == "val" and \ attrs.getValueByQName("ns:attr") == "val" and \ attrs.getNameByQName("ns:attr") == (ns_uri, "attr") and \ attrs[(ns_uri, "attr")] == "val" and \ attrs.getQNameByName((ns_uri, "attr")) == "ns:attr" # During the development of Python 2.5, an attempt to move the "xml" # package implementation to a new package ("xmlcore") proved painful. # The goal of this change was to allow applications to be able to # obtain and rely on behavior in the standard library implementation # of the XML support without needing to be concerned about the # availability of the PyXML implementation. # # While the existing import hackery in Lib/xml/__init__.py can cause # PyXML's _xmlpus package to supplant the "xml" package, that only # works because either implementation uses the "xml" package name for # imports. # # The move resulted in a number of problems related to the fact that # the import machinery's "package context" is based on the name that's # being imported rather than the __name__ of the actual package # containment; it wasn't possible for the "xml" package to be replaced # by a simple module that indirected imports to the "xmlcore" package. # # The following two tests exercised bugs that were introduced in that # attempt. Keeping these tests around will help detect problems with # other attempts to provide reliable access to the standard library's # implementation of the XML support. def test_sf_1511497(): # Bug report: http://www.python.org/sf/1511497 import sys old_modules = sys.modules.copy() for modname in sys.modules.keys(): if modname.startswith("xml."): del sys.modules[modname] try: import xml.sax.expatreader module = xml.sax.expatreader return module.__name__ == "xml.sax.expatreader" finally: sys.modules.update(old_modules) def test_sf_1513611(): # Bug report: http://www.python.org/sf/1513611 sio = StringIO("invalid") parser = make_parser() from xml.sax import SAXParseException try: parser.parse(sio) except SAXParseException: return True else: return False # ===== Main program def make_test_output(): parser = create_parser() result = StringIO() xmlgen = XMLGenerator(result) parser.setContentHandler(xmlgen) parser.parse(findfile("test"+os.extsep+"xml")) outf = open(findfile("test"+os.extsep+"xml"+os.extsep+"out"), "w") outf.write(result.getvalue()) outf.close() items = locals().items() items.sort() for (name, value) in items: if name[ : 5] == "test_": confirm(value(), name) # We delete the items variable so that the assignment to items above # doesn't pick up the old value of items (which messes with attempts # to find reference leaks). del items if verbose: print "%d tests, %d failures" % (tests, len(failures)) if failures: raise TestFailed("%d of %d tests failed: %s" % (len(failures), tests, ", ".join(failures)))
mit
t-hey/QGIS-Original
scripts/process_function_template.py
12
4210
import sys import os import json import glob sys.path.append( os.path.join( os.path.dirname(os.path.realpath(__file__)), '../python/ext-libs')) from six import string_types cpp = open(sys.argv[1], "w") cpp.write( "#include \"qgsexpression.h\"\n" "\n" "QHash<QString, QgsExpression::Help> QgsExpression::sFunctionHelpTexts;\n" "\n" "void QgsExpression::initFunctionHelp()\n" "{\n" " if( !sFunctionHelpTexts.isEmpty() )\n" " return;" ) def quote(v): if isinstance(v, dict): for k in v: v[k] = quote(v[k]) return v elif isinstance(v, list): return map(quote, v) elif isinstance(v, string_types): return v.replace('"', '\\"').replace('\n', '\\n') elif isinstance(v, bool): return v else: raise BaseException("unexpected type " + repr(v)) for f in sorted(glob.glob('resources/function_help/json/*')): with open(f) as function_file: try: json_params = json.load(function_file) except: print(f) raise json_params = quote(json_params) for field in ['name', 'type']: if field not in json_params: raise BaseException("%s: %s missing" % (f, field)) if not json_params['type'] in ['function', 'operator', 'value', 'expression', 'group']: raise BaseException("%s: invalid type %s " % (f, json_params['type'])) if 'variants' not in json_params: # convert single variant shortcut to a expanded variant v = {} for i in json_params: v[i] = json_params[i] v['variant'] = json_params['name'] v['variant_description'] = json_params['description'] json_params['variants'] = [v] name = "\"{0}\"".format(json_params['name']) if json_params['type'] == 'operator': for v in json_params['variants']: if 'arguments' not in v: raise BaseException("%s: arguments expected for operator") if len(list(v['arguments'])) < 1 or len(list(v['arguments'])) > 2: raise BaseException("%s: 1 or 2 arguments expected for operator") cpp.write("\n\n sFunctionHelpTexts.insert( {0},\n Help( {0}, tr( \"{1}\" ), tr( \"{2}\" ),\n QList<HelpVariant>()".format( name, json_params['type'], json_params['description']) ) for v in json_params['variants']: cpp.write( "\n << HelpVariant( tr( \"{0}\" ), tr( \"{1}\" ),\n QList<HelpArg>()".format(v['variant'], v['variant_description'])) if 'arguments' in v: for a in v['arguments']: cpp.write("\n << HelpArg( \"{0}\", tr( \"{1}\" ), {2}, {3} )".format( a['arg'], a.get('description', ''), "true" if a.get('descOnly', False) else "false", "true" if a.get('syntaxOnly', False) else "false", "true" if a.get('optional', False) else "false", a.get('default', '')) ) cpp.write(",\n /* variableLenArguments */ {0}".format( "true" if v.get('variableLenArguments', False) else "false")) cpp.write(",\n QList<HelpExample>()") if 'examples' in v: for e in v['examples']: cpp.write("\n << HelpExample( tr( \"{0}\" ), tr( \"{1}\" ), tr( \"{2}\") )".format( e['expression'], e['returns'], e.get('note', '')) ) if 'notes' in v: cpp.write(",\n tr( \"{0}\" )".format(v['notes'])) cpp.write("\n )") cpp.write("\n )") cpp.write("\n );") for f in sorted(glob.glob('resources/function_help/text/*')): n = os.path.basename(f) with open(f) as content: cpp.write("\n\n sFunctionHelpTexts.insert( \"{0}\",\n Help( tr( \"{0}\" ), tr( \"group\" ), tr( \"{1}\" ), QList<HelpVariant>() ) );\n".format( n, content.read().replace("\\", "&#92;").replace('\\', '\\\\').replace('"', '\\"').replace('\n', '\\n'))) cpp.write("\n}\n") cpp.close()
gpl-2.0
itskewpie/tempest
tempest/api/object_storage/test_container_services.py
3
5029
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.object_storage import base from tempest.common.utils.data_utils import arbitrary_string from tempest.common.utils.data_utils import rand_name from tempest.test import attr from tempest.test import HTTP_SUCCESS class ContainerTest(base.BaseObjectTest): @classmethod def setUpClass(cls): super(ContainerTest, cls).setUpClass() cls.containers = [] @classmethod def tearDownClass(cls): cls.delete_containers(cls.containers) super(ContainerTest, cls).tearDownClass() @attr(type='smoke') def test_create_container(self): container_name = rand_name(name='TestContainer') resp, body = self.container_client.create_container(container_name) self.containers.append(container_name) self.assertIn(resp['status'], ('202', '201')) @attr(type='smoke') def test_delete_container(self): # create a container container_name = rand_name(name='TestContainer') resp, _ = self.container_client.create_container(container_name) self.containers.append(container_name) # delete container resp, _ = self.container_client.delete_container(container_name) self.assertIn(int(resp['status']), HTTP_SUCCESS) self.containers.remove(container_name) @attr(type='smoke') def test_list_container_contents_json(self): # add metadata to an object # create a container container_name = rand_name(name='TestContainer') resp, _ = self.container_client.create_container(container_name) self.containers.append(container_name) # create object object_name = rand_name(name='TestObject') data = arbitrary_string() resp, _ = self.object_client.create_object(container_name, object_name, data) # set object metadata meta_key = rand_name(name='Meta-Test-') meta_value = rand_name(name='MetaValue-') orig_metadata = {meta_key: meta_value} resp, _ = self.object_client.update_object_metadata(container_name, object_name, orig_metadata) # get container contents list params = {'format': 'json'} resp, object_list = \ self.container_client.\ list_container_contents(container_name, params=params) self.assertIn(int(resp['status']), HTTP_SUCCESS) self.assertIsNotNone(object_list) object_names = [obj['name'] for obj in object_list] self.assertIn(object_name, object_names) @attr(type='smoke') def test_container_metadata(self): # update/retrieve/delete container metadata # create a container container_name = rand_name(name='TestContainer') resp, _ = self.container_client.create_container(container_name) self.containers.append(container_name) # update container metadata metadata = {'name': 'Pictures', 'description': 'Travel' } resp, _ = \ self.container_client.update_container_metadata(container_name, metadata=metadata) self.assertIn(int(resp['status']), HTTP_SUCCESS) # list container metadata resp, _ = self.container_client.list_container_metadata( container_name) self.assertIn(int(resp['status']), HTTP_SUCCESS) self.assertIn('x-container-meta-name', resp) self.assertIn('x-container-meta-description', resp) self.assertEqual(resp['x-container-meta-name'], 'Pictures') self.assertEqual(resp['x-container-meta-description'], 'Travel') # delete container metadata resp, _ = self.container_client.delete_container_metadata( container_name, metadata=metadata.keys()) self.assertIn(int(resp['status']), HTTP_SUCCESS) # check if the metadata are no longer there resp, _ = self.container_client.list_container_metadata(container_name) self.assertIn(int(resp['status']), HTTP_SUCCESS) self.assertNotIn('x-container-meta-name', resp) self.assertNotIn('x-container-meta-description', resp)
apache-2.0
jmichalicek/djukebox
djukebox/migrations/0013_auto__chg_field_album_artist.py
1
7027
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Changing field 'Album.artist' db.alter_column('djukebox_album', 'artist_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['djukebox.Artist'], null=True)) def backwards(self, orm): # User chose to not deal with backwards NULL issues for 'Album.artist' raise RuntimeError("Cannot reverse this migration. 'Album.artist' and its values cannot be restored.") models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'djukebox.album': { 'Meta': {'unique_together': "(['title', 'artist', 'user'],)", 'object_name': 'Album'}, 'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djukebox.Artist']", 'null': 'True', 'blank': 'True'}), 'cover_art': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'djukebox.artist': { 'Meta': {'object_name': 'Artist'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'djukebox.audiofile': { 'Meta': {'object_name': 'AudioFile'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), 'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'track': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djukebox.Track']"}) }, 'djukebox.mp3file': { 'Meta': {'object_name': 'Mp3File', '_ormbases': ['djukebox.AudioFile']}, 'audiofile_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['djukebox.AudioFile']", 'unique': 'True', 'primary_key': 'True'}) }, 'djukebox.oggfile': { 'Meta': {'object_name': 'OggFile', '_ormbases': ['djukebox.AudioFile']}, 'audiofile_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['djukebox.AudioFile']", 'unique': 'True', 'primary_key': 'True'}) }, 'djukebox.track': { 'Meta': {'object_name': 'Track'}, 'album': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djukebox.Album']", 'null': 'True', 'blank': 'True'}), 'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['djukebox.Artist']", 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'track_number': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) } } complete_apps = ['djukebox']
bsd-2-clause
richardnpaul/FWL-Website
lib/python2.7/site-packages/django/conf/locale/nn/formats.py
108
1629
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j. F Y' TIME_FORMAT = 'H:i' DATETIME_FORMAT = 'j. F Y H:i' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j. F' SHORT_DATE_FORMAT = 'd.m.Y' SHORT_DATETIME_FORMAT = 'd.m.Y H:i' FIRST_DAY_OF_WEEK = 1 # Monday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior # Kept ISO formats as they are in first position DATE_INPUT_FORMATS = ( '%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06' # '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006' # '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006' # '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006' ) DATETIME_INPUT_FORMATS = ( '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' '%Y-%m-%d', # '2006-10-25' '%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59' '%d.%m.%Y %H:%M', # '25.10.2006 14:30' '%d.%m.%Y', # '25.10.2006' '%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59' '%d.%m.%y %H:%M', # '25.10.06 14:30' '%d.%m.%y', # '25.10.06' ) DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '\xa0' # non-breaking space NUMBER_GROUPING = 3
gpl-3.0
aspectron/jsx
build/tools/gyp/test/actions/gyptest-all.py
243
3677
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies simple actions when using an explicit build target of 'all'. """ import glob import os import TestGyp test = TestGyp.TestGyp(workdir='workarea_all') test.run_gyp('actions.gyp', chdir='src') test.relocate('src', 'relocate/src') # Some gyp files use an action that mentions an output but never # writes it as a means to making the action run on every build. That # doesn't mesh well with ninja's semantics. TODO(evan): figure out # how to work always-run actions in to ninja. # Android also can't do this as it doesn't have order-only dependencies. if test.format in ['ninja', 'android']: test.build('actions.gyp', test.ALL, chdir='relocate/src') else: # Test that an "always run" action increases a counter on multiple # invocations, and that a dependent action updates in step. test.build('actions.gyp', test.ALL, chdir='relocate/src') test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '1') test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '1') test.build('actions.gyp', test.ALL, chdir='relocate/src') test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2') test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2') # The "always run" action only counts to 2, but the dependent target # will count forever if it's allowed to run. This verifies that the # dependent target only runs when the "always run" action generates # new output, not just because the "always run" ran. test.build('actions.gyp', test.ALL, chdir='relocate/src') test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2') test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2') expect = """\ Hello from program.c Hello from make-prog1.py Hello from make-prog2.py """ if test.format == 'xcode': chdir = 'relocate/src/subdir1' else: chdir = 'relocate/src' test.run_built_executable('program', chdir=chdir, stdout=expect) test.must_match('relocate/src/subdir2/file.out', "Hello from make-file.py\n") expect = "Hello from generate_main.py\n" if test.format == 'xcode': chdir = 'relocate/src/subdir3' else: chdir = 'relocate/src' test.run_built_executable('null_input', chdir=chdir, stdout=expect) # Clean out files which may have been created if test.ALL was run. def clean_dep_files(): for file in (glob.glob('relocate/src/dep_*.txt') + glob.glob('relocate/src/deps_all_done_*.txt')): if os.path.exists(file): os.remove(file) # Confirm our clean. clean_dep_files() test.must_not_exist('relocate/src/dep_1.txt') test.must_not_exist('relocate/src/deps_all_done_first_123.txt') # Make sure all deps finish before an action is run on a 'None' target. # If using the Make builder, add -j to make things more difficult. arguments = [] if test.format == 'make': arguments = ['-j'] test.build('actions.gyp', 'action_with_dependencies_123', chdir='relocate/src', arguments=arguments) test.must_exist('relocate/src/deps_all_done_first_123.txt') # Try again with a target that has deps in reverse. Output files from # previous tests deleted. Confirm this execution did NOT run the ALL # target which would mess up our dep tests. clean_dep_files() test.build('actions.gyp', 'action_with_dependencies_321', chdir='relocate/src', arguments=arguments) test.must_exist('relocate/src/deps_all_done_first_321.txt') test.must_not_exist('relocate/src/deps_all_done_first_123.txt') test.pass_test()
mit
soltanmm-google/grpc
src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py
2
20729
# Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import collections from concurrent import futures import contextlib import distutils.spawn import errno import os import shutil import subprocess import sys import tempfile import threading import unittest from six import moves import grpc from tests.unit.framework.common import test_constants import tests.protoc_plugin.protos.payload.test_payload_pb2 as payload_pb2 import tests.protoc_plugin.protos.requests.r.test_requests_pb2 as request_pb2 import tests.protoc_plugin.protos.responses.test_responses_pb2 as response_pb2 import tests.protoc_plugin.protos.service.test_service_pb2 as service_pb2 # Identifiers of entities we expect to find in the generated module. STUB_IDENTIFIER = 'TestServiceStub' SERVICER_IDENTIFIER = 'TestServiceServicer' ADD_SERVICER_TO_SERVER_IDENTIFIER = 'add_TestServiceServicer_to_server' class _ServicerMethods(object): def __init__(self): self._condition = threading.Condition() self._paused = False self._fail = False @contextlib.contextmanager def pause(self): # pylint: disable=invalid-name with self._condition: self._paused = True yield with self._condition: self._paused = False self._condition.notify_all() @contextlib.contextmanager def fail(self): # pylint: disable=invalid-name with self._condition: self._fail = True yield with self._condition: self._fail = False def _control(self): # pylint: disable=invalid-name with self._condition: if self._fail: raise ValueError() while self._paused: self._condition.wait() def UnaryCall(self, request, unused_rpc_context): response = response_pb2.SimpleResponse() response.payload.payload_type = payload_pb2.COMPRESSABLE response.payload.payload_compressable = 'a' * request.response_size self._control() return response def StreamingOutputCall(self, request, unused_rpc_context): for parameter in request.response_parameters: response = response_pb2.StreamingOutputCallResponse() response.payload.payload_type = payload_pb2.COMPRESSABLE response.payload.payload_compressable = 'a' * parameter.size self._control() yield response def StreamingInputCall(self, request_iter, unused_rpc_context): response = response_pb2.StreamingInputCallResponse() aggregated_payload_size = 0 for request in request_iter: aggregated_payload_size += len(request.payload.payload_compressable) response.aggregated_payload_size = aggregated_payload_size self._control() return response def FullDuplexCall(self, request_iter, unused_rpc_context): for request in request_iter: for parameter in request.response_parameters: response = response_pb2.StreamingOutputCallResponse() response.payload.payload_type = payload_pb2.COMPRESSABLE response.payload.payload_compressable = 'a' * parameter.size self._control() yield response def HalfDuplexCall(self, request_iter, unused_rpc_context): responses = [] for request in request_iter: for parameter in request.response_parameters: response = response_pb2.StreamingOutputCallResponse() response.payload.payload_type = payload_pb2.COMPRESSABLE response.payload.payload_compressable = 'a' * parameter.size self._control() responses.append(response) for response in responses: yield response class _Service( collections.namedtuple('_Service', ( 'servicer_methods', 'server', 'stub',))): """A live and running service. Attributes: servicer_methods: The _ServicerMethods servicing RPCs. server: The grpc.Server servicing RPCs. stub: A stub on which to invoke RPCs. """ def _CreateService(): """Provides a servicer backend and a stub. Returns: A _Service with which to test RPCs. """ servicer_methods = _ServicerMethods() class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)): def UnaryCall(self, request, context): return servicer_methods.UnaryCall(request, context) def StreamingOutputCall(self, request, context): return servicer_methods.StreamingOutputCall(request, context) def StreamingInputCall(self, request_iter, context): return servicer_methods.StreamingInputCall(request_iter, context) def FullDuplexCall(self, request_iter, context): return servicer_methods.FullDuplexCall(request_iter, context) def HalfDuplexCall(self, request_iter, context): return servicer_methods.HalfDuplexCall(request_iter, context) server = grpc.server( futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE)) getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(), server) port = server.add_insecure_port('[::]:0') server.start() channel = grpc.insecure_channel('localhost:{}'.format(port)) stub = getattr(service_pb2, STUB_IDENTIFIER)(channel) return _Service(servicer_methods, server, stub) def _CreateIncompleteService(): """Provides a servicer backend that fails to implement methods and its stub. Returns: A _Service with which to test RPCs. The returned _Service's servicer_methods implements none of the methods required of it. """ class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)): pass server = grpc.server( futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE)) getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(), server) port = server.add_insecure_port('[::]:0') server.start() channel = grpc.insecure_channel('localhost:{}'.format(port)) stub = getattr(service_pb2, STUB_IDENTIFIER)(channel) return _Service(None, server, stub) def _streaming_input_request_iterator(): for _ in range(3): request = request_pb2.StreamingInputCallRequest() request.payload.payload_type = payload_pb2.COMPRESSABLE request.payload.payload_compressable = 'a' yield request def _streaming_output_request(): request = request_pb2.StreamingOutputCallRequest() sizes = [1, 2, 3] request.response_parameters.add(size=sizes[0], interval_us=0) request.response_parameters.add(size=sizes[1], interval_us=0) request.response_parameters.add(size=sizes[2], interval_us=0) return request def _full_duplex_request_iterator(): request = request_pb2.StreamingOutputCallRequest() request.response_parameters.add(size=1, interval_us=0) yield request request = request_pb2.StreamingOutputCallRequest() request.response_parameters.add(size=2, interval_us=0) request.response_parameters.add(size=3, interval_us=0) yield request class PythonPluginTest(unittest.TestCase): """Test case for the gRPC Python protoc-plugin. While reading these tests, remember that the futures API (`stub.method.future()`) only gives futures for the *response-unary* methods and does not exist for response-streaming methods. """ def testImportAttributes(self): # check that we can access the generated module and its members. self.assertIsNotNone(getattr(service_pb2, STUB_IDENTIFIER, None)) self.assertIsNotNone(getattr(service_pb2, SERVICER_IDENTIFIER, None)) self.assertIsNotNone( getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER, None)) def testUpDown(self): service = _CreateService() self.assertIsNotNone(service.servicer_methods) self.assertIsNotNone(service.server) self.assertIsNotNone(service.stub) def testIncompleteServicer(self): service = _CreateIncompleteService() request = request_pb2.SimpleRequest(response_size=13) with self.assertRaises(grpc.RpcError) as exception_context: service.stub.UnaryCall(request) self.assertIs(exception_context.exception.code(), grpc.StatusCode.UNIMPLEMENTED) def testUnaryCall(self): service = _CreateService() request = request_pb2.SimpleRequest(response_size=13) response = service.stub.UnaryCall(request) expected_response = service.servicer_methods.UnaryCall( request, 'not a real context!') self.assertEqual(expected_response, response) def testUnaryCallFuture(self): service = _CreateService() request = request_pb2.SimpleRequest(response_size=13) # Check that the call does not block waiting for the server to respond. with service.servicer_methods.pause(): response_future = service.stub.UnaryCall.future(request) response = response_future.result() expected_response = service.servicer_methods.UnaryCall( request, 'not a real RpcContext!') self.assertEqual(expected_response, response) def testUnaryCallFutureExpired(self): service = _CreateService() request = request_pb2.SimpleRequest(response_size=13) with service.servicer_methods.pause(): response_future = service.stub.UnaryCall.future( request, timeout=test_constants.SHORT_TIMEOUT) with self.assertRaises(grpc.RpcError) as exception_context: response_future.result() self.assertIs(exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED) self.assertIs(response_future.code(), grpc.StatusCode.DEADLINE_EXCEEDED) def testUnaryCallFutureCancelled(self): service = _CreateService() request = request_pb2.SimpleRequest(response_size=13) with service.servicer_methods.pause(): response_future = service.stub.UnaryCall.future(request) response_future.cancel() self.assertTrue(response_future.cancelled()) self.assertIs(response_future.code(), grpc.StatusCode.CANCELLED) def testUnaryCallFutureFailed(self): service = _CreateService() request = request_pb2.SimpleRequest(response_size=13) with service.servicer_methods.fail(): response_future = service.stub.UnaryCall.future(request) self.assertIsNotNone(response_future.exception()) self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN) def testStreamingOutputCall(self): service = _CreateService() request = _streaming_output_request() responses = service.stub.StreamingOutputCall(request) expected_responses = service.servicer_methods.StreamingOutputCall( request, 'not a real RpcContext!') for expected_response, response in moves.zip_longest(expected_responses, responses): self.assertEqual(expected_response, response) def testStreamingOutputCallExpired(self): service = _CreateService() request = _streaming_output_request() with service.servicer_methods.pause(): responses = service.stub.StreamingOutputCall( request, timeout=test_constants.SHORT_TIMEOUT) with self.assertRaises(grpc.RpcError) as exception_context: list(responses) self.assertIs(exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED) def testStreamingOutputCallCancelled(self): service = _CreateService() request = _streaming_output_request() responses = service.stub.StreamingOutputCall(request) next(responses) responses.cancel() with self.assertRaises(grpc.RpcError) as exception_context: next(responses) self.assertIs(responses.code(), grpc.StatusCode.CANCELLED) def testStreamingOutputCallFailed(self): service = _CreateService() request = _streaming_output_request() with service.servicer_methods.fail(): responses = service.stub.StreamingOutputCall(request) self.assertIsNotNone(responses) with self.assertRaises(grpc.RpcError) as exception_context: next(responses) self.assertIs(exception_context.exception.code(), grpc.StatusCode.UNKNOWN) def testStreamingInputCall(self): service = _CreateService() response = service.stub.StreamingInputCall( _streaming_input_request_iterator()) expected_response = service.servicer_methods.StreamingInputCall( _streaming_input_request_iterator(), 'not a real RpcContext!') self.assertEqual(expected_response, response) def testStreamingInputCallFuture(self): service = _CreateService() with service.servicer_methods.pause(): response_future = service.stub.StreamingInputCall.future( _streaming_input_request_iterator()) response = response_future.result() expected_response = service.servicer_methods.StreamingInputCall( _streaming_input_request_iterator(), 'not a real RpcContext!') self.assertEqual(expected_response, response) def testStreamingInputCallFutureExpired(self): service = _CreateService() with service.servicer_methods.pause(): response_future = service.stub.StreamingInputCall.future( _streaming_input_request_iterator(), timeout=test_constants.SHORT_TIMEOUT) with self.assertRaises(grpc.RpcError) as exception_context: response_future.result() self.assertIsInstance(response_future.exception(), grpc.RpcError) self.assertIs(response_future.exception().code(), grpc.StatusCode.DEADLINE_EXCEEDED) self.assertIs(exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED) def testStreamingInputCallFutureCancelled(self): service = _CreateService() with service.servicer_methods.pause(): response_future = service.stub.StreamingInputCall.future( _streaming_input_request_iterator()) response_future.cancel() self.assertTrue(response_future.cancelled()) with self.assertRaises(grpc.FutureCancelledError): response_future.result() def testStreamingInputCallFutureFailed(self): service = _CreateService() with service.servicer_methods.fail(): response_future = service.stub.StreamingInputCall.future( _streaming_input_request_iterator()) self.assertIsNotNone(response_future.exception()) self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN) def testFullDuplexCall(self): service = _CreateService() responses = service.stub.FullDuplexCall(_full_duplex_request_iterator()) expected_responses = service.servicer_methods.FullDuplexCall( _full_duplex_request_iterator(), 'not a real RpcContext!') for expected_response, response in moves.zip_longest(expected_responses, responses): self.assertEqual(expected_response, response) def testFullDuplexCallExpired(self): request_iterator = _full_duplex_request_iterator() service = _CreateService() with service.servicer_methods.pause(): responses = service.stub.FullDuplexCall( request_iterator, timeout=test_constants.SHORT_TIMEOUT) with self.assertRaises(grpc.RpcError) as exception_context: list(responses) self.assertIs(exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED) def testFullDuplexCallCancelled(self): service = _CreateService() request_iterator = _full_duplex_request_iterator() responses = service.stub.FullDuplexCall(request_iterator) next(responses) responses.cancel() with self.assertRaises(grpc.RpcError) as exception_context: next(responses) self.assertIs(exception_context.exception.code(), grpc.StatusCode.CANCELLED) def testFullDuplexCallFailed(self): request_iterator = _full_duplex_request_iterator() service = _CreateService() with service.servicer_methods.fail(): responses = service.stub.FullDuplexCall(request_iterator) with self.assertRaises(grpc.RpcError) as exception_context: next(responses) self.assertIs(exception_context.exception.code(), grpc.StatusCode.UNKNOWN) def testHalfDuplexCall(self): service = _CreateService() def half_duplex_request_iterator(): request = request_pb2.StreamingOutputCallRequest() request.response_parameters.add(size=1, interval_us=0) yield request request = request_pb2.StreamingOutputCallRequest() request.response_parameters.add(size=2, interval_us=0) request.response_parameters.add(size=3, interval_us=0) yield request responses = service.stub.HalfDuplexCall(half_duplex_request_iterator()) expected_responses = service.servicer_methods.HalfDuplexCall( half_duplex_request_iterator(), 'not a real RpcContext!') for expected_response, response in moves.zip_longest(expected_responses, responses): self.assertEqual(expected_response, response) def testHalfDuplexCallWedged(self): condition = threading.Condition() wait_cell = [False] @contextlib.contextmanager def wait(): # pylint: disable=invalid-name # Where's Python 3's 'nonlocal' statement when you need it? with condition: wait_cell[0] = True yield with condition: wait_cell[0] = False condition.notify_all() def half_duplex_request_iterator(): request = request_pb2.StreamingOutputCallRequest() request.response_parameters.add(size=1, interval_us=0) yield request with condition: while wait_cell[0]: condition.wait() service = _CreateService() with wait(): responses = service.stub.HalfDuplexCall( half_duplex_request_iterator(), timeout=test_constants.SHORT_TIMEOUT) # half-duplex waits for the client to send all info with self.assertRaises(grpc.RpcError) as exception_context: next(responses) self.assertIs(exception_context.exception.code(), grpc.StatusCode.DEADLINE_EXCEEDED) if __name__ == '__main__': unittest.main(verbosity=2)
bsd-3-clause
DataDog/integrations-core
vsphere/setup.py
1
1825
# (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) from codecs import open from os import path from setuptools import setup HERE = path.abspath(path.dirname(__file__)) # Get version info ABOUT = {} with open(path.join(HERE, "datadog_checks", "vsphere", "__about__.py")) as f: exec(f.read(), ABOUT) # Get the long description from the README file with open(path.join(HERE, 'README.md'), encoding='utf-8') as f: long_description = f.read() def get_dependencies(): dep_file = path.join(HERE, 'requirements.in') if not path.isfile(dep_file): return [] with open(dep_file, encoding='utf-8') as f: return f.readlines() CHECKS_BASE_REQ = 'datadog-checks-base>=11.9.0' setup( name='datadog-vsphere', version=ABOUT["__version__"], description='The vSphere check', long_description=long_description, long_description_content_type='text/markdown', keywords='datadog agent vSphere check', url='https://github.com/DataDog/integrations-core', author='Datadog', author_email='packages@datadoghq.com', license='New BSD', # See https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Topic :: System :: Monitoring', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', ], packages=['datadog_checks.vsphere'], # Run-time dependencies install_requires=[CHECKS_BASE_REQ], extras_require={'deps': get_dependencies()}, # Extra files to ship with the wheel package include_package_data=True, )
bsd-3-clause
blarghmatey/pip
pip/_vendor/requests/packages/chardet/big5prober.py
2931
1684
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import Big5DistributionAnalysis from .mbcssm import Big5SMModel class Big5Prober(MultiByteCharSetProber): def __init__(self): MultiByteCharSetProber.__init__(self) self._mCodingSM = CodingStateMachine(Big5SMModel) self._mDistributionAnalyzer = Big5DistributionAnalysis() self.reset() def get_charset_name(self): return "Big5"
mit
jbedorf/tensorflow
tensorflow/python/layers/normalization_test.py
9
58321
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.layers.normalization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np from tensorflow.core.protobuf import saver_pb2 from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.layers import convolutional as conv_layers from tensorflow.python.layers import normalization as normalization_layers from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import gradient_descent from tensorflow.python.training import saver as saver_lib @test_util.run_v1_only('b/120545219') class BNTest(test.TestCase): def _simple_model(self, image, fused, freeze_mode): output_channels, kernel_size = 2, 3 conv = conv_layers.conv2d( image, output_channels, kernel_size, use_bias=False, kernel_initializer=init_ops.ones_initializer()) bn_layer = normalization_layers.BatchNormalization(fused=fused) bn_layer._bessels_correction_test_only = False training = not freeze_mode bn = bn_layer.apply(conv, training=training) loss = math_ops.reduce_sum(math_ops.abs(bn)) optimizer = gradient_descent.GradientDescentOptimizer(0.01) if not freeze_mode: update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS) with ops.control_dependencies(update_ops): train_op = optimizer.minimize(loss) else: train_op = optimizer.minimize(loss) saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2) return loss, train_op, saver def _train(self, checkpoint_path, shape, use_gpu, is_fused, restore=False, freeze_mode=False, dtype=dtypes.float32): ops.reset_default_graph() graph = ops.get_default_graph() with self.session(graph=graph, use_gpu=use_gpu) as sess: image = array_ops.placeholder(dtype=dtype, shape=shape) loss, train_op, saver = self._simple_model(image, is_fused, freeze_mode) if restore: saver.restore(sess, checkpoint_path) else: self.evaluate(variables.global_variables_initializer()) np.random.seed(0) for _ in range(2): image_val = np.random.rand(*shape).astype(dtype.as_numpy_dtype) sess.run([loss, train_op], feed_dict={image: image_val}) if restore: all_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) all_vars_values = [var.eval() for var in all_vars] return all_vars_values else: saver.save(sess, checkpoint_path) def _infer(self, checkpoint_path, image_val, shape, use_gpu, is_fused): dtype = image_val.dtype ops.reset_default_graph() graph = ops.get_default_graph() with self.session(graph=graph, use_gpu=use_gpu) as sess: image = array_ops.placeholder(dtype=dtype, shape=shape) loss, _, saver = self._simple_model(image, is_fused, True) saver.restore(sess, checkpoint_path) loss_val = sess.run(loss, feed_dict={image: image_val}) return loss_val def _trainEvalSequence(self, dtype, train1_use_gpu, train2_use_gpu, infer_use_gpu): batch, height, width, input_channels = 2, 4, 5, 3 shape = [batch, height, width, input_channels] # Not all characters in a dtype string representation are allowed in # filenames in all operating systems. This map will sanitize these. dtype_to_valid_fn = { dtypes.float16: 'float16', dtypes.float32: 'float32', } checkpoint = os.path.join( self.get_temp_dir(), 'cp_%s_%s_%s_%s' % ( dtype_to_valid_fn[dtype], train1_use_gpu, train2_use_gpu, infer_use_gpu)) self._train( checkpoint, shape, use_gpu=train1_use_gpu, is_fused=True, restore=False, freeze_mode=False, dtype=dtype) train_vars = self._train( checkpoint, shape, use_gpu=train2_use_gpu, is_fused=True, restore=True, freeze_mode=False, dtype=dtype) np.random.seed(0) image_val = np.random.rand(batch, height, width, input_channels).astype( dtype.as_numpy_dtype) loss_val = self._infer( checkpoint, image_val, shape, use_gpu=infer_use_gpu, is_fused=True) return train_vars, loss_val def testHalfPrecision(self): ref_vars, ref_loss = self._trainEvalSequence( dtype=dtypes.float32, train1_use_gpu=True, train2_use_gpu=True, infer_use_gpu=True) self.assertEqual(len(ref_vars), 5) for train1_use_gpu in [True, False]: for train2_use_gpu in [True, False]: for infer_use_gpu in [True, False]: test_vars, test_loss = self._trainEvalSequence( dtypes.float16, train1_use_gpu, train2_use_gpu, infer_use_gpu) self.assertEqual(len(test_vars), 5) for test_var, ref_var in zip(test_vars, ref_vars): self.assertAllClose(test_var, ref_var, rtol=1.e-3, atol=1.e-3) self.assertAllClose(test_loss, ref_loss, rtol=1.e-3, atol=1.e-3) def _testCheckpoint(self, is_fused_checkpoint_a, is_fused_checkpoint_b, use_gpu_checkpoint_a, use_gpu_checkpoint_b, use_gpu_test_a, use_gpu_test_b, freeze_mode): batch, height, width, input_channels = 2, 4, 5, 3 shape = [batch, height, width, input_channels] base_path = '%s_%s_%s_%s_%s_%s' % (is_fused_checkpoint_a, is_fused_checkpoint_b, use_gpu_checkpoint_a, use_gpu_checkpoint_b, use_gpu_test_a, use_gpu_test_b) checkpoint_path_a = os.path.join(self.get_temp_dir(), 'checkpoint_a_%s' % base_path) self._train( checkpoint_path_a, shape, use_gpu_checkpoint_a, is_fused_checkpoint_a, restore=False, freeze_mode=freeze_mode) checkpoint_path_b = os.path.join(self.get_temp_dir(), 'checkpoint_b_%s' % base_path) self._train( checkpoint_path_b, shape, use_gpu_checkpoint_b, is_fused_checkpoint_b, restore=False, freeze_mode=freeze_mode) vars_fused = self._train( checkpoint_path_a, shape, use_gpu_test_a, True, restore=True, freeze_mode=freeze_mode) vars_nonfused = self._train( checkpoint_path_b, shape, use_gpu_test_b, False, restore=True, freeze_mode=freeze_mode) self.assertEqual(len(vars_fused), 5) self.assertEqual(len(vars_nonfused), 5) for var_fused, var_nonfused in zip(vars_fused, vars_nonfused): self.assertAllClose(var_fused, var_nonfused, atol=1e-6) image_val = np.random.rand(batch, height, width, input_channels).astype(np.float32) loss_fused_val = self._infer(checkpoint_path_a, image_val, shape, use_gpu_test_a, True) loss_nonfused_val = self._infer(checkpoint_path_b, image_val, shape, use_gpu_test_b, False) self.assertAllClose(loss_fused_val, loss_nonfused_val, atol=1e-6) def _testCheckpointCrossDevice(self, ckpt_a_fused, ckpt_a_use_gpu, ckpt_b_fused, ckpt_b_use_gpu): for use_gpu_test_a in [True, False]: for use_gpu_test_b in [True, False]: for freeze_mode in [True, False]: self._testCheckpoint(ckpt_a_fused, ckpt_a_use_gpu, ckpt_b_fused, ckpt_b_use_gpu, use_gpu_test_a, use_gpu_test_b, freeze_mode) def testCheckpointFusedCPUAndFusedGPU(self): self._testCheckpointCrossDevice(True, False, True, True) def testCheckpointFusedCPUAndFusedCPU(self): self._testCheckpointCrossDevice(True, False, True, False) def testCheckpointFusedGPUAndFusedGPU(self): self._testCheckpointCrossDevice(True, True, True, True) def testCheckpointNonFusedCPUAndNonFusedGPU(self): self._testCheckpointCrossDevice(False, False, False, True) def testCheckpointNonFusedCPUAndNonFusedCPU(self): self._testCheckpointCrossDevice(False, False, False, False) def testCheckpointNonFusedGPUAndNonFusedGPU(self): self._testCheckpointCrossDevice(False, True, False, True) def testCheckpointNonFusedGPUAndFusedGPU(self): self._testCheckpointCrossDevice(False, True, True, True) def testCheckpointNonFusedGPUAndFusedCPU(self): self._testCheckpointCrossDevice(False, True, True, False) def testCheckpointNonFusedCPUAndFusedCPU(self): self._testCheckpointCrossDevice(False, False, True, False) def testCreateBN(self): # Call layer. bn = normalization_layers.BatchNormalization(axis=1) inputs = random_ops.random_uniform((5, 4, 3), seed=1) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) # Verify shape. self.assertListEqual(outputs.get_shape().as_list(), [5, 4, 3]) # Verify layer attributes. self.assertEqual(len(bn.updates), 2) self.assertEqual(len(bn.variables), 4) self.assertEqual(len(bn.trainable_variables), 2) self.assertEqual(len(bn.non_trainable_variables), 2) # Test that updates were created and added to UPDATE_OPS. self.assertEqual(len(bn.updates), 2) self.assertListEqual( ops.get_collection(ops.GraphKeys.UPDATE_OPS), bn.updates) # Test that weights were created and added to TRAINABLE_VARIABLES. self.assertListEqual( ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES), bn.trainable_variables) def testCreateFusedBNFloat16(self): # Call layer. bn = normalization_layers.BatchNormalization(axis=1, fused=True) inputs = random_ops.random_uniform( (5, 4, 3, 3), seed=1, dtype=dtypes.float16) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) # Verify shape. self.assertListEqual(outputs.get_shape().as_list(), [5, 4, 3, 3]) # Verify layer attributes. self.assertEqual(len(bn.updates), 2) self.assertEqual(len(bn.variables), 4) self.assertEqual(len(bn.trainable_variables), 2) self.assertEqual(len(bn.non_trainable_variables), 2) for var in bn.variables: self.assertEqual(var.dtype, dtypes.float32_ref) # Test that updates were created and added to UPDATE_OPS. self.assertEqual(len(bn.updates), 2) self.assertListEqual( ops.get_collection(ops.GraphKeys.UPDATE_OPS), bn.updates) # Test that weights were created and added to TRAINABLE_VARIABLES. self.assertListEqual( ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES), bn.trainable_variables) def test3DInputAxis1(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=1, epsilon=epsilon, momentum=0.9) inputs = variables.Variable( np.random.random((5, 4, 3)) + 100, dtype=dtypes.float32) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 4, 1)) np_beta = np.reshape(np_beta, (1, 4, 1)) for _ in range(100): np_output, _, _ = sess.run([outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 2)) std = np.std(np_inputs, axis=(0, 2)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def test3DInputAxis2(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=2, epsilon=epsilon, momentum=0.9) inputs = variables.Variable( np.random.random((5, 4, 3)) + 100, dtype=dtypes.float32) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 1, 3)) np_beta = np.reshape(np_beta, (1, 1, 3)) for _ in range(100): np_output, _, _ = sess.run([outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 1)) std = np.std(np_inputs, axis=(0, 1)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def test4DInputAxis1(self): if test.is_gpu_available(cuda_only=True): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=1, epsilon=epsilon, momentum=0.9) inputs = variables.Variable( np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.session(use_gpu=True) as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 4, 1, 1)) np_beta = np.reshape(np_beta, (1, 4, 1, 1)) for _ in range(100): np_output, _, _ = sess.run( [outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 2, 3)) std = np.std(np_inputs, axis=(0, 2, 3)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def test4DInputAxis2(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=2, epsilon=epsilon, momentum=0.9) inputs = variables.Variable( np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 1, 3, 1)) np_beta = np.reshape(np_beta, (1, 1, 3, 1)) for _ in range(100): np_output, _, _ = sess.run([outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 1, 3)) std = np.std(np_inputs, axis=(0, 1, 3)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def test4DInputAxis3(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=3, epsilon=epsilon, momentum=0.9) inputs = variables.Variable( np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 1, 1, 6)) np_beta = np.reshape(np_beta, (1, 1, 1, 6)) for _ in range(100): np_output, _, _ = sess.run([outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 1, 2)) std = np.std(np_inputs, axis=(0, 1, 2)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def test4DInputAxis3Fused(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=3, epsilon=epsilon, momentum=0.9, fused=True) inputs = variables.Variable( np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 1, 1, 6)) np_beta = np.reshape(np_beta, (1, 1, 1, 6)) for _ in range(100): np_output, _, _ = sess.run( [outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 1, 2)) std = np.std(np_inputs, axis=(0, 1, 2)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def test4DInputAxis1Fused(self): if test.is_gpu_available(cuda_only=True): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=1, epsilon=epsilon, momentum=0.9, fused=True) inputs = variables.Variable( np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 4, 1, 1)) np_beta = np.reshape(np_beta, (1, 4, 1, 1)) for _ in range(100): np_output, _, _ = sess.run( [outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 2, 3)) std = np.std(np_inputs, axis=(0, 2, 3)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def testNegativeAxis(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=-1, epsilon=epsilon, momentum=0.9) inputs = variables.Variable( np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 1, 1, 6)) np_beta = np.reshape(np_beta, (1, 1, 1, 6)) for _ in range(100): np_output, _, _ = sess.run([outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 1, 2)) std = np.std(np_inputs, axis=(0, 1, 2)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def testBooleanLearningPhase(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=-1, epsilon=epsilon, momentum=0.9) inputs = variables.Variable( np.random.random((5, 4, 3, 6)) + 100, dtype=dtypes.float32) outputs_training = bn.apply(inputs, training=True) outputs_infer = bn.apply(inputs, training=False) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) np_gamma = np.reshape(np_gamma, (1, 1, 1, 6)) np_beta = np.reshape(np_beta, (1, 1, 1, 6)) for _ in range(100): np_output, _, _ = sess.run([outputs_training] + bn.updates) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=2) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 1, 2)) std = np.std(np_inputs, axis=(0, 1, 2)) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = self.evaluate(outputs_infer) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def testFunctionalNoReuse(self): inputs = variables.Variable( np.random.random((5, 4, 3, 6)), dtype=dtypes.float32) epsilon = 1e-3 training = array_ops.placeholder(dtype='bool') outputs = normalization_layers.batch_norm( inputs, axis=-1, momentum=0.9, epsilon=epsilon, training=training, name='bn') updates = ops.get_collection(ops.GraphKeys.UPDATE_OPS) all_vars = dict([(v.name, v) for v in variables.global_variables()]) moving_mean = all_vars['bn/moving_mean:0'] moving_variance = all_vars['bn/moving_variance:0'] beta = all_vars['bn/beta:0'] gamma = all_vars['bn/gamma:0'] with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([gamma, beta]) np_gamma = np.reshape(np_gamma, (1, 1, 1, 6)) np_beta = np.reshape(np_beta, (1, 1, 1, 6)) for _ in range(100): np_output, _, _ = sess.run([outputs] + updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. np_moving_mean, np_moving_var = self.evaluate( [moving_mean, moving_variance]) np_inputs = self.evaluate(inputs) np_mean = np.mean(np_inputs, axis=(0, 1, 2)) np_std = np.std(np_inputs, axis=(0, 1, 2)) np_variance = np.square(np_std) self.assertAllClose(np_mean, np_moving_mean, atol=1e-2) self.assertAllClose(np_variance, np_moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def testFunctionalReuse(self): inputs1 = variables.Variable( np.random.random((5, 4, 3, 6)), dtype=dtypes.float32) inputs2 = variables.Variable( np.random.random((5, 4, 3, 6)), dtype=dtypes.float32) epsilon = 1e-3 training = array_ops.placeholder(dtype='bool') _ = normalization_layers.batch_norm( inputs1, axis=-1, momentum=0.9, epsilon=epsilon, training=training, name='bn') outputs2 = normalization_layers.batch_norm( inputs2, axis=-1, momentum=0.9, epsilon=epsilon, training=training, name='bn', reuse=True) # Last 2 update ops updates = ops.get_collection(ops.GraphKeys.UPDATE_OPS)[-2:] all_vars = dict([(v.name, v) for v in variables.global_variables()]) moving_mean = all_vars['bn/moving_mean:0'] moving_variance = all_vars['bn/moving_variance:0'] beta = all_vars['bn/beta:0'] gamma = all_vars['bn/gamma:0'] with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) for _ in range(100): np_output, _, _ = sess.run([outputs2] + updates, feed_dict={training: True}) # Verify that the statistics are updated during training. np_moving_mean, np_moving_var = self.evaluate( [moving_mean, moving_variance]) np_inputs = self.evaluate(inputs2) np_mean = np.mean(np_inputs, axis=(0, 1, 2)) np_std = np.std(np_inputs, axis=(0, 1, 2)) np_variance = np.square(np_std) self.assertAllClose(np_mean, np_moving_mean, atol=1e-2) self.assertAllClose(np_variance, np_moving_var, atol=1e-2) # Verify that the axis is normalized during training. np_gamma, np_beta = self.evaluate([gamma, beta]) np_gamma = np.reshape(np_gamma, (1, 1, 1, 6)) np_beta = np.reshape(np_beta, (1, 1, 1, 6)) normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=2) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Test inference with placeholder learning phase. np_output = sess.run(outputs2, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=2) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def testFunctionalReuseFromScope(self): inputs = variables.Variable( np.random.random((5, 4, 3, 6)), dtype=dtypes.float32) epsilon = 1e-3 training = array_ops.placeholder(dtype='bool') with variable_scope.variable_scope('scope'): _ = normalization_layers.batch_norm( inputs, axis=-1, momentum=0.9, epsilon=epsilon, training=training) self.assertEqual(len(variables.global_variables()), 5) with variable_scope.variable_scope('scope', reuse=True): _ = normalization_layers.batch_norm( inputs, axis=-1, momentum=0.9, epsilon=epsilon, training=training) self.assertEqual(len(variables.global_variables()), 5) def testNoCenter(self): bn = normalization_layers.BatchNormalization(axis=1, center=False) inputs = random_ops.random_uniform((5, 4, 3), seed=1) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) # Verify shape. self.assertListEqual(outputs.get_shape().as_list(), [5, 4, 3]) # Verify layer attributes. self.assertEqual(len(bn.updates), 2) self.assertEqual(len(bn.variables), 3) self.assertEqual(len(bn.trainable_variables), 1) self.assertEqual(len(bn.non_trainable_variables), 2) def testNoScale(self): bn = normalization_layers.BatchNormalization(axis=1, scale=False) inputs = random_ops.random_uniform((5, 4, 3), seed=1) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) # Verify shape. self.assertListEqual(outputs.get_shape().as_list(), [5, 4, 3]) # Verify layer attributes. self.assertEqual(len(bn.updates), 2) self.assertEqual(len(bn.variables), 3) self.assertEqual(len(bn.trainable_variables), 1) self.assertEqual(len(bn.non_trainable_variables), 2) def testRegularizers(self): reg = lambda x: 0.1 * math_ops.reduce_sum(x) bn = normalization_layers.BatchNormalization(axis=1, beta_regularizer=reg) inputs = random_ops.random_uniform((5, 4, 3), seed=1) training = array_ops.placeholder(dtype='bool') _ = bn.apply(inputs, training=training) self.assertEqual(len(bn.losses), 1) bn = normalization_layers.BatchNormalization(axis=1, gamma_regularizer=reg) inputs = random_ops.random_uniform((5, 4, 3), seed=1) training = array_ops.placeholder(dtype='bool') _ = bn.apply(inputs, training=training) self.assertEqual(len(bn.losses), 1) def testConstraints(self): g_constraint = lambda x: x / math_ops.reduce_sum(x) b_constraint = lambda x: x / math_ops.reduce_max(x) bn = normalization_layers.BatchNormalization(axis=1, gamma_constraint=g_constraint, beta_constraint=b_constraint) inputs = random_ops.random_uniform((5, 4, 3), seed=1) bn(inputs) self.assertEqual(bn.gamma_constraint, g_constraint) self.assertEqual(bn.beta_constraint, b_constraint) def testRenorm(self): shape = (4, 3) xt = array_ops.placeholder(dtypes.float32, shape) momentum = 0.99 renorm_momentum = 0.8 rmax = 1.1 rmin = 0.9 dmax = 0.1 gamma = 2. beta = 3. epsilon = 0.001 bn = normalization_layers.BatchNormalization( axis=1, gamma_initializer=init_ops.constant_initializer(gamma), beta_initializer=init_ops.constant_initializer(beta), epsilon=epsilon, momentum=momentum, renorm=True, renorm_clipping={'rmax': rmax, 'rmin': rmin, 'dmax': dmax}, renorm_momentum=renorm_momentum) training = array_ops.placeholder(dtypes.bool) yt = bn.apply(xt, training=training) moving_mean = 0. moving_variance = 1. renorm_mean = renorm_stddev = 0. renorm_weight = 0. with self.session(use_gpu=True) as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) mean = x.mean(0) stddev = np.sqrt(x.var(0) + epsilon) adj_mean = renorm_mean + (1. - renorm_weight) * mean adj_stddev = renorm_stddev + (1. - renorm_weight) * stddev r = (stddev / adj_stddev).clip(rmin, rmax) d = ((mean - adj_mean) / adj_stddev).clip(-dmax, dmax) y_train = ((x - mean) / stddev * r + d) * gamma + beta renorm_mean += (mean - renorm_mean) * (1. - renorm_momentum) renorm_stddev += (stddev - renorm_stddev) * (1. - renorm_momentum) renorm_weight += (1. - renorm_weight) * (1. - renorm_momentum) moving_mean += (renorm_mean / renorm_weight - moving_mean) * (1. - momentum) moving_variance += ((renorm_stddev / renorm_weight) ** 2 - epsilon - moving_variance) * (1. - momentum) y_test = ((x - moving_mean) / (moving_variance + epsilon) ** 0.5 * gamma) + beta yt_val_train, _, _ = sess.run([yt] + bn.updates, feed_dict={xt: x, training: True}) yt_val_test, _, _ = sess.run([yt] + bn.updates, feed_dict={xt: x, training: False}) self.assertAllClose(y_train, yt_val_train, atol=1e-5) self.assertAllClose(y_test, yt_val_test, atol=1e-5) def testAdjustment(self): shape = (4, 3) xt = array_ops.placeholder(dtypes.float32, shape) momentum = 0.99 gamma = 2. beta = 3. epsilon = 0.001 adjust_scale = random_ops.random_uniform(shape[-1:], 0.5, 1.5) adjust_bias = random_ops.random_uniform(shape[-1:], -.2, .2) bn = normalization_layers.BatchNormalization( axis=1, gamma_initializer=init_ops.constant_initializer(gamma), beta_initializer=init_ops.constant_initializer(beta), epsilon=epsilon, momentum=momentum, adjustment=lambda _: (adjust_scale, adjust_bias)) training = array_ops.placeholder(dtypes.bool) yt = bn.apply(xt, training=training) moving_mean = 0. moving_variance = 1. with self.session(use_gpu=True) as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) yt_val_train, adj_scale_val, adj_bias_val = sess.run( [yt, adjust_scale, adjust_bias] + bn.updates, feed_dict={xt: x, training: True})[:3] yt_val_test = sess.run([yt] + bn.updates, feed_dict={xt: x, training: False})[0] mean = x.mean(0) variance = x.var(0) y_train = (((x - mean) / (variance + epsilon) ** 0.5) * adj_scale_val + adj_bias_val) * gamma + beta moving_mean += (mean - moving_mean) * (1. - momentum) moving_variance += (variance - moving_variance) * (1. - momentum) y_test = ((x - moving_mean) / (moving_variance + epsilon) ** 0.5 * gamma) + beta self.assertAllClose(y_train, yt_val_train, atol=1e-5) self.assertAllClose(y_test, yt_val_test, atol=1e-5) def testRenormWithAdjustment(self): shape = (4, 3) xt = array_ops.placeholder(dtypes.float32, shape) momentum = 0.99 renorm_momentum = 0.8 rmax = 1.1 rmin = 0.9 dmax = 0.1 gamma = 2. beta = 3. epsilon = 0.001 adjust_scale = random_ops.random_uniform(shape[-1:], 0.5, 1.5) adjust_bias = random_ops.random_uniform(shape[-1:], -.2, .2) bn = normalization_layers.BatchNormalization( axis=1, gamma_initializer=init_ops.constant_initializer(gamma), beta_initializer=init_ops.constant_initializer(beta), epsilon=epsilon, momentum=momentum, renorm=True, renorm_clipping={'rmax': rmax, 'rmin': rmin, 'dmax': dmax}, renorm_momentum=renorm_momentum, adjustment=lambda _: (adjust_scale, adjust_bias)) training = array_ops.placeholder(dtypes.bool) yt = bn.apply(xt, training=training) moving_mean = 0. moving_variance = 1. renorm_mean = renorm_stddev = 0. renorm_weight = 0. with self.session(use_gpu=True) as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) yt_val_train, adj_scale_val, adj_bias_val = sess.run( [yt, adjust_scale, adjust_bias] + bn.updates, feed_dict={xt: x, training: True})[:3] yt_val_test = sess.run([yt] + bn.updates, feed_dict={xt: x, training: False})[0] mean = x.mean(0) stddev = np.sqrt(x.var(0) + epsilon) adj_mean = renorm_mean + (1. - renorm_weight) * mean adj_stddev = renorm_stddev + (1. - renorm_weight) * stddev r = (stddev / adj_stddev).clip(rmin, rmax) d = ((mean - adj_mean) / adj_stddev).clip(-dmax, dmax) y_train = (((x - mean) / stddev * r + d) * adj_scale_val + adj_bias_val) * gamma + beta renorm_mean += (mean - renorm_mean) * (1. - renorm_momentum) renorm_stddev += (stddev - renorm_stddev) * (1. - renorm_momentum) renorm_weight += (1. - renorm_weight) * (1. - renorm_momentum) moving_mean += (renorm_mean / renorm_weight - moving_mean) * (1. - momentum) moving_variance += ((renorm_stddev / renorm_weight) ** 2 - epsilon - moving_variance) * (1. - momentum) y_test = ((x - moving_mean) / (moving_variance + epsilon) ** 0.5 * gamma) + beta self.assertAllClose(y_train, yt_val_train, atol=1e-5) self.assertAllClose(y_test, yt_val_test, atol=1e-5) def testGhostBNNegativeVirtualBatch(self): shape = [6, 5, 4, 3] inp = random_ops.random_uniform(shape, seed=1) with self.assertRaises(ValueError): normalization_layers.batch_normalization( inp, virtual_batch_size=-1) def testGhostBNVirtualBatchFull(self): shape = [6, 5, 4, 3] inp = random_ops.random_uniform(shape, seed=1) out1 = normalization_layers.batch_normalization(inp) out2 = normalization_layers.batch_normalization( inp, virtual_batch_size=6) self.assertListEqual( out1.shape.as_list(), out2.shape.as_list()) with self.session(use_gpu=True) as sess: self.evaluate(variables.global_variables_initializer()) x = np.random.random(shape) y1, y2 = sess.run([out1, out2], feed_dict={inp: x}) self.assertAllClose(y1, y2, atol=1e-5) def testGhostBNInputOutputShapesMatch(self): shape = [6, 4, 3] inp = random_ops.random_uniform(shape, seed=1) out = normalization_layers.batch_normalization( inp, virtual_batch_size=3) self.assertListEqual(out.shape.as_list(), shape) def testGhostBNUnknownBatchSize(self): np_shape = [10, 5, 4] tf_shape = [None, 5, 4] inp = array_ops.placeholder(dtypes.float32, tf_shape) out = normalization_layers.batch_normalization( inp, virtual_batch_size=2) with self.session(use_gpu=True) as sess: self.evaluate(variables.global_variables_initializer()) x = np.random.random(np_shape) y = sess.run(out, feed_dict={inp: x}) self.assertListEqual(list(y.shape), np_shape) def testGhostBN2Dims(self): shape = [6, 2] virtual_batch_size = 3 beta = 2. gamma = 3. momentum = 0.8 epsilon = 1e-3 moving_means = np.zeros([2, 2], dtype=np.float32) moving_vars = np.ones([2, 2], dtype=np.float32) inp = array_ops.placeholder(dtypes.float32, shape) is_training = array_ops.placeholder(dtypes.bool) bn = normalization_layers.BatchNormalization( momentum=momentum, epsilon=epsilon, beta_initializer=init_ops.constant_initializer(beta), gamma_initializer=init_ops.constant_initializer(gamma), virtual_batch_size=virtual_batch_size) out = bn.apply(inp, training=is_training) ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size, shape[1]]) with self.session(use_gpu=True) as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) sub_batched = np.reshape(x, ghost_shape) means = np.mean(sub_batched, axis=0, keepdims=True) variances = np.var(sub_batched, axis=0, keepdims=True) avg_means = np.mean(means, axis=1, keepdims=True) avg_variances = np.mean(variances, axis=1, keepdims=True) moving_means = moving_means * momentum + avg_means * (1. - momentum) moving_vars = moving_vars * momentum + avg_variances * (1. - momentum) y_train = ((sub_batched - means) / (variances + epsilon) ** 0.5 * gamma) + beta y_test = ((sub_batched - moving_means) / (moving_vars + epsilon) ** 0.5 * gamma) + beta y_train = np.reshape(y_train, shape) y_test = np.reshape(y_test, shape) y_val_train, _, _ = sess.run([out] + bn.updates, feed_dict={inp: x, is_training: True}) y_val_test = sess.run(out, feed_dict={inp: x, is_training: False}) self.assertAllClose(y_train, y_val_train, atol=1e-5) self.assertAllClose(y_test, y_val_test, atol=1e-5) def testGhostBN4DimsAxis3(self): shape = [6, 10, 10, 3] virtual_batch_size = 2 beta = 2. gamma = 3. momentum = 0.8 epsilon = 1e-3 moving_means = np.zeros([1, 1, 1, 1, 3], dtype=np.float32) moving_vars = np.ones([1, 1, 1, 1, 3], dtype=np.float32) inp = array_ops.placeholder(dtypes.float32, shape) is_training = array_ops.placeholder(dtypes.bool) bn = normalization_layers.BatchNormalization( axis=3, momentum=momentum, epsilon=epsilon, beta_initializer=init_ops.constant_initializer(beta), gamma_initializer=init_ops.constant_initializer(gamma), virtual_batch_size=virtual_batch_size) out = bn.apply(inp, training=is_training) ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] + shape[1:]) with self.session(use_gpu=True) as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) sub_batched = np.reshape(x, ghost_shape) means = np.mean(sub_batched, axis=(0, 2, 3), keepdims=True) variances = np.var(sub_batched, axis=(0, 2, 3), keepdims=True) avg_means = np.mean(means, axis=1, keepdims=True) avg_variances = np.mean(variances, axis=1, keepdims=True) moving_means = moving_means * momentum + avg_means * (1. - momentum) moving_vars = moving_vars * momentum + avg_variances * (1. - momentum) y_train = ((sub_batched - means) / (variances + epsilon) ** 0.5 * gamma) + beta y_test = ((sub_batched - moving_means) / (moving_vars + epsilon) ** 0.5 * gamma) + beta y_train = np.reshape(y_train, shape) y_test = np.reshape(y_test, shape) y_val_train, _, _ = sess.run([out] + bn.updates, feed_dict={inp: x, is_training: True}) y_val_test = sess.run(out, feed_dict={inp: x, is_training: False}) self.assertAllClose(y_train, y_val_train, atol=1e-2) self.assertAllClose(y_test, y_val_test, atol=1e-2) def testGhostBN4DimsAxis1(self): shape = [6, 3, 10, 10] virtual_batch_size = 2 beta = 2. gamma = 3. momentum = 0.8 epsilon = 1e-3 moving_means = np.zeros([1, 1, 3, 1, 1], dtype=np.float32) moving_vars = np.ones([1, 1, 3, 1, 1], dtype=np.float32) inp = array_ops.placeholder(dtypes.float32, shape) is_training = array_ops.placeholder(dtypes.bool) bn = normalization_layers.BatchNormalization( axis=1, momentum=momentum, epsilon=epsilon, beta_initializer=init_ops.constant_initializer(beta), gamma_initializer=init_ops.constant_initializer(gamma), virtual_batch_size=virtual_batch_size, fused=False) # NCHW is unsupported by CPU fused batch norm out = bn.apply(inp, training=is_training) ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] + shape[1:]) with self.session(use_gpu=True) as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) sub_batched = np.reshape(x, ghost_shape) means = np.mean(sub_batched, axis=(0, 3, 4), keepdims=True) variances = np.var(sub_batched, axis=(0, 3, 4), keepdims=True) avg_means = np.mean(means, axis=1, keepdims=True) avg_variances = np.mean(variances, axis=1, keepdims=True) moving_means = moving_means * momentum + avg_means * (1. - momentum) moving_vars = moving_vars * momentum + avg_variances * (1. - momentum) y_train = ((sub_batched - means) / (variances + epsilon) ** 0.5 * gamma) + beta y_test = ((sub_batched - moving_means) / (moving_vars + epsilon) ** 0.5 * gamma) + beta y_train = np.reshape(y_train, shape) y_test = np.reshape(y_test, shape) y_val_train, _, _ = sess.run([out] + bn.updates, feed_dict={inp: x, is_training: True}) y_val_test = sess.run(out, feed_dict={inp: x, is_training: False}) self.assertAllClose(y_train, y_val_train, atol=1e-2) self.assertAllClose(y_test, y_val_test, atol=1e-2) def testMultiAxisInvalid(self): shape = [6, 5, 4, 3] inp = random_ops.random_uniform(shape, seed=1) with self.assertRaises(ValueError): normalization_layers.batch_normalization( inp, axis=[1, 4]) # out of bounds with self.assertRaises(ValueError): normalization_layers.batch_normalization( inp, axis=[-5, 1]) # out of bounds with self.assertRaises(ValueError): normalization_layers.batch_normalization( inp, axis=[1, 2, 1]) # duplicate def test3DInputMultiAxis12(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=[1, 2], epsilon=epsilon, momentum=0.9) inputs = variables.Variable( np.random.random((5, 4, 3)) + 100, dtype=dtypes.float32) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) for _ in range(100): np_output, _, _ = sess.run([outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=0, keepdims=True) std = np.std(np_inputs, axis=0, keepdims=True) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def test5DInputMultiAxis123(self): epsilon = 1e-3 bn = normalization_layers.BatchNormalization( axis=[1, 2, 3], epsilon=epsilon, momentum=0.9) inputs = variables.Variable( np.random.random((5, 3, 4, 4, 3)) + 100, dtype=dtypes.float32) training = array_ops.placeholder(dtype='bool') outputs = bn.apply(inputs, training=training) with self.cached_session() as sess: # Test training with placeholder learning phase. self.evaluate(variables.global_variables_initializer()) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) for _ in range(100): np_output, _, _ = sess.run([outputs] + bn.updates, feed_dict={training: True}) # Verify that the axis is normalized during training. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) # Verify that the statistics are updated during training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) np_inputs = self.evaluate(inputs) mean = np.mean(np_inputs, axis=(0, 4), keepdims=True) std = np.std(np_inputs, axis=(0, 4), keepdims=True) variance = np.square(std) self.assertAllClose(mean, moving_mean, atol=1e-2) self.assertAllClose(variance, moving_var, atol=1e-2) # Test inference with placeholder learning phase. np_output = sess.run(outputs, feed_dict={training: False}) # Verify that the axis is normalized during inference. normed_np_output = ((np_output - epsilon) * np_gamma) + np_beta self.assertAlmostEqual(np.mean(normed_np_output), 0., places=1) self.assertAlmostEqual(np.std(normed_np_output), 1., places=1) def testGhostBN5DimsMultiAxis14(self): shape = [6, 3, 10, 10, 4] virtual_batch_size = 3 beta = 2. gamma = 3. momentum = 0.8 epsilon = 1e-3 moving_means = np.zeros([1, 1, 3, 1, 1, 4], dtype=np.float32) moving_vars = np.ones([1, 1, 3, 1, 1, 4], dtype=np.float32) inp = array_ops.placeholder(dtypes.float32, shape) is_training = array_ops.placeholder(dtypes.bool) bn = normalization_layers.BatchNormalization( axis=[1, 4], momentum=momentum, epsilon=epsilon, beta_initializer=init_ops.constant_initializer(beta), gamma_initializer=init_ops.constant_initializer(gamma), virtual_batch_size=virtual_batch_size, fused=False) out = bn.apply(inp, training=is_training) ghost_shape = ([virtual_batch_size, shape[0] // virtual_batch_size] + shape[1:]) with self.session(use_gpu=True) as sess: self.evaluate(variables.global_variables_initializer()) for _ in range(5): x = np.random.random(shape) sub_batched = np.reshape(x, ghost_shape) means = np.mean(sub_batched, axis=(0, 3, 4), keepdims=True) variances = np.var(sub_batched, axis=(0, 3, 4), keepdims=True) avg_means = np.mean(means, axis=1, keepdims=True) avg_variances = np.mean(variances, axis=1, keepdims=True) moving_means = moving_means * momentum + avg_means * (1. - momentum) moving_vars = moving_vars * momentum + avg_variances * (1. - momentum) y_train = ((sub_batched - means) / (variances + epsilon) ** 0.5 * gamma) + beta y_test = ((sub_batched - moving_means) / (moving_vars + epsilon) ** 0.5 * gamma) + beta y_train = np.reshape(y_train, shape) y_test = np.reshape(y_test, shape) y_val_train, _, _ = sess.run([out] + bn.updates, feed_dict={inp: x, is_training: True}) y_val_test = sess.run(out, feed_dict={inp: x, is_training: False}) self.assertAllClose(y_train, y_val_train, atol=1e-2) self.assertAllClose(y_test, y_val_test, atol=1e-2) if __name__ == '__main__': test.main()
apache-2.0
GunoH/intellij-community
python/helpers/py3only/docutils/readers/pep.py
44
1536
# $Id: pep.py 7320 2012-01-19 22:33:02Z milde $ # Author: David Goodger <goodger@python.org> # Copyright: This module has been placed in the public domain. """ Python Enhancement Proposal (PEP) Reader. """ __docformat__ = 'reStructuredText' from docutils.parsers import rst from docutils.readers import standalone from docutils.transforms import peps, frontmatter class Reader(standalone.Reader): supported = ('pep',) """Contexts this reader supports.""" settings_spec = ( 'PEP Reader Option Defaults', 'The --pep-references and --rfc-references options (for the ' 'reStructuredText parser) are on by default.', ()) config_section = 'pep reader' config_section_dependencies = ('readers', 'standalone reader') def get_transforms(self): transforms = standalone.Reader.get_transforms(self) # We have PEP-specific frontmatter handling. transforms.remove(frontmatter.DocTitle) transforms.remove(frontmatter.SectionSubTitle) transforms.remove(frontmatter.DocInfo) transforms.extend([peps.Headers, peps.Contents, peps.TargetNotes]) return transforms settings_default_overrides = {'pep_references': 1, 'rfc_references': 1} inliner_class = rst.states.Inliner def __init__(self, parser=None, parser_name=None): """`parser` should be ``None``.""" if parser is None: parser = rst.Parser(rfc2822=True, inliner=self.inliner_class()) standalone.Reader.__init__(self, parser, '')
apache-2.0
jpotterm/django-fluent-contents
fluent_contents/analyzer.py
2
1535
""" Analyze the templates for placeholders of this module. """ from template_analyzer.djangoanalyzer import get_node_instances from fluent_contents.models import PlaceholderData from fluent_contents.templatetags.fluent_contents_tags import PagePlaceholderNode __all__ = ('get_template_placeholder_data',) def get_template_placeholder_data(template): """ Return the placeholders found in a template, wrapped in a :class:`~fluent_contents.models.containers.PlaceholderData` object. This function looks for the :class:`~fluent_contents.templatetags.fluent_contents_tags.PagePlaceholderNode` nodes in the template, using the :func:`~template_analyzer.djangoanalyzer.get_node_instances` function of `django-template-analyzer <https://github.com/edoburu/django-template-analyzer>`_. :param template: The Template object, or nodelist to scan. :rtype: list of :class:`~fluent_contents.models.PlaceholderData` """ # Find the instances. nodes = get_node_instances(template, PagePlaceholderNode) # Avoid duplicates, wrap in a class. names = set() result = [] for pageplaceholdernode in nodes: data = PlaceholderData( slot=pageplaceholdernode.get_slot(), title=pageplaceholdernode.get_title(), role=pageplaceholdernode.get_role(), fallback_language=pageplaceholdernode.get_fallback_language(), ) if data.slot not in names: result.append(data) names.add(data.slot) return result
apache-2.0
Lyrositor/moul-scripts
Python/system/encodings/uu_codec.py
383
3738
""" Python 'uu_codec' Codec - UU content transfer encoding Unlike most of the other codecs which target Unicode, this codec will return Python string objects for both encode and decode. Written by Marc-Andre Lemburg (mal@lemburg.com). Some details were adapted from uu.py which was written by Lance Ellinghouse and modified by Jack Jansen and Fredrik Lundh. """ import codecs, binascii ### Codec APIs def uu_encode(input,errors='strict',filename='<data>',mode=0666): """ Encodes the object input and returns a tuple (output object, length consumed). errors defines the error handling to apply. It defaults to 'strict' handling which is the only currently supported error handling for this codec. """ assert errors == 'strict' from cStringIO import StringIO from binascii import b2a_uu # using str() because of cStringIO's Unicode undesired Unicode behavior. infile = StringIO(str(input)) outfile = StringIO() read = infile.read write = outfile.write # Encode write('begin %o %s\n' % (mode & 0777, filename)) chunk = read(45) while chunk: write(b2a_uu(chunk)) chunk = read(45) write(' \nend\n') return (outfile.getvalue(), len(input)) def uu_decode(input,errors='strict'): """ Decodes the object input and returns a tuple (output object, length consumed). input must be an object which provides the bf_getreadbuf buffer slot. Python strings, buffer objects and memory mapped files are examples of objects providing this slot. errors defines the error handling to apply. It defaults to 'strict' handling which is the only currently supported error handling for this codec. Note: filename and file mode information in the input data is ignored. """ assert errors == 'strict' from cStringIO import StringIO from binascii import a2b_uu infile = StringIO(str(input)) outfile = StringIO() readline = infile.readline write = outfile.write # Find start of encoded data while 1: s = readline() if not s: raise ValueError, 'Missing "begin" line in input data' if s[:5] == 'begin': break # Decode while 1: s = readline() if not s or \ s == 'end\n': break try: data = a2b_uu(s) except binascii.Error, v: # Workaround for broken uuencoders by /Fredrik Lundh nbytes = (((ord(s[0])-32) & 63) * 4 + 5) / 3 data = a2b_uu(s[:nbytes]) #sys.stderr.write("Warning: %s\n" % str(v)) write(data) if not s: raise ValueError, 'Truncated input data' return (outfile.getvalue(), len(input)) class Codec(codecs.Codec): def encode(self,input,errors='strict'): return uu_encode(input,errors) def decode(self,input,errors='strict'): return uu_decode(input,errors) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return uu_encode(input, self.errors)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return uu_decode(input, self.errors)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='uu', encode=uu_encode, decode=uu_decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
gpl-3.0
gpitel/pyjs
examples/gcharttestapp/GChartExample21.py
6
3818
from pyjamas.ui.Button import Button from pyjamas.ui.HTML import HTML from pyjamas.ui.HorizontalPanel import HorizontalPanel from pyjamas.chart.GChart import GChart from pyjamas.chart import AnnotationLocation from pyjamas.chart import SymbolType """* * * In this example, whenever the user clicks on a point, a * hover widget that allows them to increase or decrease * the y value of that point appears below the chart. * <p> * * The chart uses <tt>setHoverTouchingEnabled(False)</tt> to * disable GChart's "auto-select-on-mouseover" feature. This * assures that, when the user clicks on a point, that point * remains selected, as required, when their mouse moves * below the chart to interact with the y-value-changing * hover widget.<p> * * In general, by disabling hover touching in this manner, * you can make a GChart act much like a single-selection * listbox, with points playing the role of list items. * <p> * * The screen shot shows what the chart looks like after the * user clicks on a center bar, and then clicks the * "Increment Y" button a few times. * * """ # hover widget that changes y value of selected point class YChanger (HorizontalPanel): def __init__(self, chart): self.chart = chart HorizontalPanel.__init__(self) # y-changing, x,y coordinate displaying, widget self.incrementY = Button("Increment Y") self.coordinates = HTML(""); # x,y of selected point self.decrementY = Button("Decrement Y") self.incrementY.addClickListener(self) self.decrementY.addClickListener(self) self.add(self.incrementY) self.add(self.coordinates) self.add(self.decrementY) def onClick(self, sender): if sender == self.incrementY: self.chart.getTouchedPoint().setY( self.chart.getTouchedPoint().getY() + 1) else: self.chart.getTouchedPoint().setY( self.chart.getTouchedPoint().getY() - 1) self.chart.update() # The 2 HoverUpdateable interface methods: def hoverCleanup(self, hoveredAwayFrom): pass def hoverUpdate(self, hoveredOver): # update (x,y) display when they click point self.coordinates.setHTML(hoveredOver.getHovertext()) class GChartExample21(GChart): def __init__(self): GChart.__init__(self) self.setChartSize(300, 300) self.setBorderStyle("none") """ * So selection changing requires the user to click * (not just mouseover a point). This allows the * selection to stay put while user moves to click the * y-changing buttons. * """ self.setHoverTouchingEnabled(False) self.addCurve() # make a y-changer pop up when they click a point self.getCurve().getSymbol().setHoverWidget(YChanger(self)) # Configure hover annotation so it appears below chart self.getCurve().getSymbol().setHoverAnnotationSymbolType( SymbolType.ANCHOR_SOUTH) self.getCurve().getSymbol().setHoverLocation(AnnotationLocation.SOUTH) self.getCurve().getSymbol().setHoverYShift(-30) # 3px, external point selection border self.getCurve().getSymbol().setHoverSelectionBorderWidth(-3) # configure curve as a baseline-based bar chart self.getCurve().getSymbol().setSymbolType(SymbolType.VBAR_BASELINE_EAST) self.getCurve().getSymbol().setModelWidth(1) self.getCurve().getSymbol().setBorderWidth(1) self.getCurve().getSymbol().setBorderColor("black") self.getCurve().getSymbol().setBackgroundColor("blue") # add a simple y = 2*x curve for iPoint in range(10): self.getCurve().addPoint(iPoint, 2*iPoint)
apache-2.0
jsirois/pants
src/python/pants/backend/project_info/list_roots_test.py
1
1726
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from typing import List, Optional import pytest from pants.backend.project_info import list_roots from pants.backend.project_info.list_roots import Roots from pants.testutil.rule_runner import RuleRunner @pytest.fixture def rule_runner() -> RuleRunner: return RuleRunner(rules=list_roots.rules()) def assert_roots( rule_runner: RuleRunner, configured: List[str], *, marker_files: Optional[List[str]] = None, expected: Optional[List[str]] = None, ) -> None: result = rule_runner.run_goal_rule( Roots, args=[ f"--source-root-patterns={configured}", f"--source-marker-filenames={marker_files or []}", ], ) assert result.stdout.splitlines() == sorted(expected or configured) def test_single_source_root(rule_runner: RuleRunner) -> None: rule_runner.create_dir("fakeroot") assert_roots(rule_runner, ["fakeroot"]) def test_multiple_source_roots(rule_runner: RuleRunner) -> None: rule_runner.create_dir("fakerootA") rule_runner.create_dir("fakerootB") assert_roots(rule_runner, ["fakerootA", "fakerootB"]) def test_buildroot_is_source_root(rule_runner: RuleRunner) -> None: rule_runner.create_dir("code") assert_roots(rule_runner, ["/"], expected=["."]) def test_marker_file(rule_runner: RuleRunner) -> None: rule_runner.create_file("fakerootA/SOURCE_ROOT") rule_runner.create_file("fakerootB/setup.py") assert_roots( rule_runner, configured=[], marker_files=["SOURCE_ROOT", "setup.py"], expected=["fakerootA", "fakerootB"], )
apache-2.0
Dark5ide/mycroft-core
mycroft/util/lang/common_data_pt.py
1
2228
# Undefined articles ["um", "uma", "uns", "umas"] can not be supressed, # in PT, "um cavalo" means "a horse" or "one horse". _PT_ARTICLES = ["o", "a", "os", "as"] _PT_NUMBERS = { "zero": 0, "um": 1, "uma": 1, "uns": 1, "umas": 1, "primeiro": 1, "segundo": 2, "terceiro": 3, "dois": 2, "duas": 2, "tres": 3, "três": 3, "quatro": 4, "cinco": 5, "seis": 6, "sete": 7, "oito": 8, "nove": 9, "dez": 10, "onze": 11, "doze": 12, "treze": 13, "catorze": 14, "quinze": 15, "dezasseis": 16, "dezassete": 17, "dezoito": 18, "dezanove": 19, "vinte": 20, "trinta": 30, "quarenta": 40, "cinquenta": 50, "sessenta": 60, "setenta": 70, "oitenta": 80, "noventa": 90, "cem": 100, "cento": 100, "duzentos": 200, "duzentas": 200, "trezentos": 300, "trezentas": 300, "quatrocentos": 400, "quatrocentas": 400, "quinhentos": 500, "quinhentas": 500, "seiscentos": 600, "seiscentas": 600, "setecentos": 700, "setecentas": 700, "oitocentos": 800, "oitocentas": 800, "novecentos": 900, "novecentas": 900, "mil": 1000, "milh�o": 1000000} _FRACTION_STRING_PT = { 2: 'meio', 3: 'terço', 4: 'quarto', 5: 'quinto', 6: 'sexto', 7: 'sétimo', 8: 'oitavo', 9: 'nono', 10: 'décimo', 11: 'onze avos', 12: 'doze avos', 13: 'treze avos', 14: 'catorze avos', 15: 'quinze avos', 16: 'dezasseis avos', 17: 'dezassete avos', 18: 'dezoito avos', 19: 'dezanove avos', 20: 'vigésimo', 30: 'trigésimo', 100: 'centésimo', 1000: 'milésimo' } _NUM_STRING_PT = { 0: 'zero', 1: 'um', 2: 'dois', 3: 'três', 4: 'quatro', 5: 'cinco', 6: 'seis', 7: 'sete', 8: 'oito', 9: 'nove', 10: 'dez', 11: 'onze', 12: 'doze', 13: 'treze', 14: 'catorze', 15: 'quinze', 16: 'dezasseis', 17: 'dezassete', 18: 'dezoito', 19: 'dezanove', 20: 'vinte', 30: 'trinta', 40: 'quarenta', 50: 'cinquenta', 60: 'sessenta', 70: 'setenta', 80: 'oitenta', 90: 'noventa' }
apache-2.0
takat0m0/test_code
tf_rnn/model.py
1
1643
# -*- coding:utf-8 -*- import os import sys import numpy as np import tensorflow as tf from linear_layer import FeatureExtractor, LinearLayers from rnn_layer import RNNLayers class Model(object): def __init__(self, max_time_length, data_dim): self.max_time_length = max_time_length self.data_dim = data_dim self.fe = FeatureExtractor(['FEATURE'], [20, 20, 15]) self.rnn = RNNLayers(['RNNLAYERS'], [10, 20]) self.ll = LinearLayers(['LINEARLAYER'], [10, 5]) def set_model(self): # -- place holder -- self.inputs = tf.placeholder(dtype = tf.float32, shape = [None, self.max_time_length, self.data_dim]) self.sequence_length = tf.placeholder(dtype = tf.int32, shape = [None]) # -- make networks -- fv = self.fe.set_model(self.inputs, True, False) rnn_fv = self.rnn.set_model(fv, self.sequence_length, True, False) outputs = self.ll.set_model(rnn_fv, True, False) # -- for inference -- fv = self.fe.set_model(self.inputs, False, True) rnn_fv = self.rnn.set_model(fv, self.sequence_length, False, True) self.outputs_wo_train = self.ll.set_model(rnn_fv, False, True) print(self.outputs_wo_train) def get_outputs(sess, inputs, sequence_length): feed_dict = {self.inputs: inputs, self.sequence_length: sequence_length} ret = sess.run(self.outputs_wo_train, feed_dict = feed_dict) return ret if __name__ == '__main__': model = Model(max_time_length = 8, data_dim = 30) model.set_model()
mit
akosyakov/intellij-community
python/helpers/pydev/tests/test_check_pydevconsole.py
41
4796
import threading import unittest import pydevconsole from pydev_imports import xmlrpclib, SimpleXMLRPCServer import sys from pydev_localhost import get_localhost from pydev_ipython_console_011 import get_pydev_frontend try: raw_input raw_input_name = 'raw_input' except NameError: raw_input_name = 'input' #======================================================================================================================= # Test #======================================================================================================================= class Test(unittest.TestCase): def startClientThread(self, client_port): class ClientThread(threading.Thread): def __init__(self, client_port): threading.Thread.__init__(self) self.client_port = client_port def run(self): class HandleRequestInput: def RequestInput(self): client_thread.requested_input = True return 'RequestInput: OK' def NotifyFinished(self, *args, **kwargs): client_thread.notified_finished += 1 return 1 handle_request_input = HandleRequestInput() import pydev_localhost self.client_server = client_server = SimpleXMLRPCServer((pydev_localhost.get_localhost(), self.client_port), logRequests=False) client_server.register_function(handle_request_input.RequestInput) client_server.register_function(handle_request_input.NotifyFinished) client_server.serve_forever() def shutdown(self): return self.client_server.shutdown() client_thread = ClientThread(client_port) client_thread.requested_input = False client_thread.notified_finished = 0 client_thread.setDaemon(True) client_thread.start() return client_thread def getFreeAddresses(self): import socket s = socket.socket() s.bind(('', 0)) port0 = s.getsockname()[1] s1 = socket.socket() s1.bind(('', 0)) port1 = s1.getsockname()[1] s.close() s1.close() return port0, port1 def testServer(self): # Just making sure that the singleton is created in this thread. get_pydev_frontend(get_localhost(), 0) client_port, server_port = self.getFreeAddresses() class ServerThread(threading.Thread): def __init__(self, client_port, server_port): threading.Thread.__init__(self) self.client_port = client_port self.server_port = server_port def run(self): import pydev_localhost print('Starting server with:', pydev_localhost.get_localhost(), self.server_port, self.client_port) pydevconsole.StartServer(pydev_localhost.get_localhost(), self.server_port, self.client_port) server_thread = ServerThread(client_port, server_port) server_thread.setDaemon(True) server_thread.start() client_thread = self.startClientThread(client_port) #@UnusedVariable try: import time time.sleep(.3) #let's give it some time to start the threads import pydev_localhost server = xmlrpclib.Server('http://%s:%s' % (pydev_localhost.get_localhost(), server_port)) server.execLine("import sys; print('Running with: %s %s' % (sys.executable or sys.platform, sys.version))") server.execLine('class Foo:') server.execLine(' pass') server.execLine('') server.execLine('foo = Foo()') server.execLine('a = %s()' % raw_input_name) initial = time.time() while not client_thread.requested_input: if time.time() - initial > 2: raise AssertionError('Did not get the return asked before the timeout.') time.sleep(.1) frame_xml = server.getFrame() self.assert_('RequestInput' in frame_xml, 'Did not fid RequestInput in:\n%s' % (frame_xml,)) finally: client_thread.shutdown() #======================================================================================================================= # main #======================================================================================================================= if __name__ == '__main__': unittest.main()
apache-2.0
Bismarrck/tensorflow
tensorflow/python/kernel_tests/distributions/dirichlet_test.py
30
11370
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import importlib import numpy as np from tensorflow.python.eager import backprop from tensorflow.python.framework import constant_op from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import dirichlet as dirichlet_lib from tensorflow.python.ops.distributions import kullback_leibler from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging def try_import(name): # pylint: disable=invalid-name module = None try: module = importlib.import_module(name) except ImportError as e: tf_logging.warning("Could not import %s: %s" % (name, str(e))) return module special = try_import("scipy.special") stats = try_import("scipy.stats") @test_util.run_all_in_graph_and_eager_modes class DirichletTest(test.TestCase): def testSimpleShapes(self): alpha = np.random.rand(3) dist = dirichlet_lib.Dirichlet(alpha) self.assertEqual(3, self.evaluate(dist.event_shape_tensor())) self.assertAllEqual([], self.evaluate(dist.batch_shape_tensor())) self.assertEqual(tensor_shape.TensorShape([3]), dist.event_shape) self.assertEqual(tensor_shape.TensorShape([]), dist.batch_shape) def testComplexShapes(self): alpha = np.random.rand(3, 2, 2) dist = dirichlet_lib.Dirichlet(alpha) self.assertEqual(2, self.evaluate(dist.event_shape_tensor())) self.assertAllEqual([3, 2], self.evaluate(dist.batch_shape_tensor())) self.assertEqual(tensor_shape.TensorShape([2]), dist.event_shape) self.assertEqual(tensor_shape.TensorShape([3, 2]), dist.batch_shape) def testConcentrationProperty(self): alpha = [[1., 2, 3]] dist = dirichlet_lib.Dirichlet(alpha) self.assertEqual([1, 3], dist.concentration.get_shape()) self.assertAllClose(alpha, self.evaluate(dist.concentration)) def testPdfXProper(self): alpha = [[1., 2, 3]] dist = dirichlet_lib.Dirichlet(alpha, validate_args=True) self.evaluate(dist.prob([.1, .3, .6])) self.evaluate(dist.prob([.2, .3, .5])) # Either condition can trigger. with self.assertRaisesOpError("samples must be positive"): self.evaluate(dist.prob([-1., 1.5, 0.5])) with self.assertRaisesOpError("samples must be positive"): self.evaluate(dist.prob([0., .1, .9])) with self.assertRaisesOpError("sample last-dimension must sum to `1`"): self.evaluate(dist.prob([.1, .2, .8])) def testLogPdfOnBoundaryIsFiniteWhenAlphaIsOne(self): # Test concentration = 1. for each dimension. concentration = 3 * np.ones((10, 10)).astype(np.float32) concentration[range(10), range(10)] = 1. x = 1 / 9. * np.ones((10, 10)).astype(np.float32) x[range(10), range(10)] = 0. dist = dirichlet_lib.Dirichlet(concentration) log_prob = self.evaluate(dist.log_prob(x)) self.assertAllEqual( np.ones_like(log_prob, dtype=np.bool), np.isfinite(log_prob)) # Test when concentration[k] = 1., and x is zero at various dimensions. dist = dirichlet_lib.Dirichlet(10 * [1.]) log_prob = self.evaluate(dist.log_prob(x)) self.assertAllEqual( np.ones_like(log_prob, dtype=np.bool), np.isfinite(log_prob)) def testPdfZeroBatches(self): alpha = [1., 2] x = [.5, .5] dist = dirichlet_lib.Dirichlet(alpha) pdf = dist.prob(x) self.assertAllClose(1., self.evaluate(pdf)) self.assertEqual((), pdf.get_shape()) def testPdfZeroBatchesNontrivialX(self): alpha = [1., 2] x = [.3, .7] dist = dirichlet_lib.Dirichlet(alpha) pdf = dist.prob(x) self.assertAllClose(7. / 5, self.evaluate(pdf)) self.assertEqual((), pdf.get_shape()) def testPdfUniformZeroBatches(self): # Corresponds to a uniform distribution alpha = [1., 1, 1] x = [[.2, .5, .3], [.3, .4, .3]] dist = dirichlet_lib.Dirichlet(alpha) pdf = dist.prob(x) self.assertAllClose([2., 2.], self.evaluate(pdf)) self.assertEqual((2), pdf.get_shape()) def testPdfAlphaStretchedInBroadcastWhenSameRank(self): alpha = [[1., 2]] x = [[.5, .5], [.3, .7]] dist = dirichlet_lib.Dirichlet(alpha) pdf = dist.prob(x) self.assertAllClose([1., 7. / 5], self.evaluate(pdf)) self.assertEqual((2), pdf.get_shape()) def testPdfAlphaStretchedInBroadcastWhenLowerRank(self): alpha = [1., 2] x = [[.5, .5], [.2, .8]] pdf = dirichlet_lib.Dirichlet(alpha).prob(x) self.assertAllClose([1., 8. / 5], self.evaluate(pdf)) self.assertEqual((2), pdf.get_shape()) def testPdfXStretchedInBroadcastWhenSameRank(self): alpha = [[1., 2], [2., 3]] x = [[.5, .5]] pdf = dirichlet_lib.Dirichlet(alpha).prob(x) self.assertAllClose([1., 3. / 2], self.evaluate(pdf)) self.assertEqual((2), pdf.get_shape()) def testPdfXStretchedInBroadcastWhenLowerRank(self): alpha = [[1., 2], [2., 3]] x = [.5, .5] pdf = dirichlet_lib.Dirichlet(alpha).prob(x) self.assertAllClose([1., 3. / 2], self.evaluate(pdf)) self.assertEqual((2), pdf.get_shape()) def testMean(self): alpha = [1., 2, 3] dirichlet = dirichlet_lib.Dirichlet(concentration=alpha) self.assertEqual(dirichlet.mean().get_shape(), [3]) if not stats: return expected_mean = stats.dirichlet.mean(alpha) self.assertAllClose(self.evaluate(dirichlet.mean()), expected_mean) def testCovarianceFromSampling(self): alpha = np.array([[1., 2, 3], [2.5, 4, 0.01]], dtype=np.float32) dist = dirichlet_lib.Dirichlet(alpha) # batch_shape=[2], event_shape=[3] x = dist.sample(int(250e3), seed=1) sample_mean = math_ops.reduce_mean(x, 0) x_centered = x - sample_mean[None, ...] sample_cov = math_ops.reduce_mean(math_ops.matmul( x_centered[..., None], x_centered[..., None, :]), 0) sample_var = array_ops.matrix_diag_part(sample_cov) sample_stddev = math_ops.sqrt(sample_var) [ sample_mean_, sample_cov_, sample_var_, sample_stddev_, analytic_mean, analytic_cov, analytic_var, analytic_stddev, ] = self.evaluate([ sample_mean, sample_cov, sample_var, sample_stddev, dist.mean(), dist.covariance(), dist.variance(), dist.stddev(), ]) self.assertAllClose(sample_mean_, analytic_mean, atol=0.04, rtol=0.) self.assertAllClose(sample_cov_, analytic_cov, atol=0.06, rtol=0.) self.assertAllClose(sample_var_, analytic_var, atol=0.03, rtol=0.) self.assertAllClose(sample_stddev_, analytic_stddev, atol=0.02, rtol=0.) def testVariance(self): alpha = [1., 2, 3] denominator = np.sum(alpha)**2 * (np.sum(alpha) + 1) dirichlet = dirichlet_lib.Dirichlet(concentration=alpha) self.assertEqual(dirichlet.covariance().get_shape(), (3, 3)) if not stats: return expected_covariance = np.diag(stats.dirichlet.var(alpha)) expected_covariance += [[0., -2, -3], [-2, 0, -6], [-3, -6, 0] ] / denominator self.assertAllClose( self.evaluate(dirichlet.covariance()), expected_covariance) def testMode(self): alpha = np.array([1.1, 2, 3]) expected_mode = (alpha - 1) / (np.sum(alpha) - 3) dirichlet = dirichlet_lib.Dirichlet(concentration=alpha) self.assertEqual(dirichlet.mode().get_shape(), [3]) self.assertAllClose(self.evaluate(dirichlet.mode()), expected_mode) def testModeInvalid(self): alpha = np.array([1., 2, 3]) dirichlet = dirichlet_lib.Dirichlet( concentration=alpha, allow_nan_stats=False) with self.assertRaisesOpError("Condition x < y.*"): self.evaluate(dirichlet.mode()) def testModeEnableAllowNanStats(self): alpha = np.array([1., 2, 3]) dirichlet = dirichlet_lib.Dirichlet( concentration=alpha, allow_nan_stats=True) expected_mode = np.zeros_like(alpha) + np.nan self.assertEqual(dirichlet.mode().get_shape(), [3]) self.assertAllClose(self.evaluate(dirichlet.mode()), expected_mode) def testEntropy(self): alpha = [1., 2, 3] dirichlet = dirichlet_lib.Dirichlet(concentration=alpha) self.assertEqual(dirichlet.entropy().get_shape(), ()) if not stats: return expected_entropy = stats.dirichlet.entropy(alpha) self.assertAllClose(self.evaluate(dirichlet.entropy()), expected_entropy) def testSample(self): alpha = [1., 2] dirichlet = dirichlet_lib.Dirichlet(alpha) n = constant_op.constant(100000) samples = dirichlet.sample(n) sample_values = self.evaluate(samples) self.assertEqual(sample_values.shape, (100000, 2)) self.assertTrue(np.all(sample_values > 0.0)) if not stats: return self.assertLess( stats.kstest( # Beta is a univariate distribution. sample_values[:, 0], stats.beta(a=1., b=2.).cdf)[0], 0.01) def testDirichletFullyReparameterized(self): alpha = constant_op.constant([1.0, 2.0, 3.0]) with backprop.GradientTape() as tape: tape.watch(alpha) dirichlet = dirichlet_lib.Dirichlet(alpha) samples = dirichlet.sample(100) grad_alpha = tape.gradient(samples, alpha) self.assertIsNotNone(grad_alpha) def testDirichletDirichletKL(self): conc1 = np.array([[1., 2., 3., 1.5, 2.5, 3.5], [1.5, 2.5, 3.5, 4.5, 5.5, 6.5]]) conc2 = np.array([[0.5, 1., 1.5, 2., 2.5, 3.]]) d1 = dirichlet_lib.Dirichlet(conc1) d2 = dirichlet_lib.Dirichlet(conc2) x = d1.sample(int(1e4), seed=0) kl_sample = math_ops.reduce_mean(d1.log_prob(x) - d2.log_prob(x), 0) kl_actual = kullback_leibler.kl_divergence(d1, d2) kl_sample_val = self.evaluate(kl_sample) kl_actual_val = self.evaluate(kl_actual) self.assertEqual(conc1.shape[:-1], kl_actual.get_shape()) if not special: return kl_expected = ( special.gammaln(np.sum(conc1, -1)) - special.gammaln(np.sum(conc2, -1)) - np.sum(special.gammaln(conc1) - special.gammaln(conc2), -1) + np.sum((conc1 - conc2) * (special.digamma(conc1) - special.digamma( np.sum(conc1, -1, keepdims=True))), -1)) self.assertAllClose(kl_expected, kl_actual_val, atol=0., rtol=1e-6) self.assertAllClose(kl_sample_val, kl_actual_val, atol=0., rtol=1e-1) # Make sure KL(d1||d1) is 0 kl_same = self.evaluate(kullback_leibler.kl_divergence(d1, d1)) self.assertAllClose(kl_same, np.zeros_like(kl_expected)) if __name__ == "__main__": test.main()
apache-2.0
dmckinney5/SlackOff
slackoff/lib/python2.7/site-packages/requests/packages/urllib3/exceptions.py
223
6603
from __future__ import absolute_import from .packages.six.moves.http_client import ( IncompleteRead as httplib_IncompleteRead ) # Base Exceptions class HTTPError(Exception): "Base exception used by this module." pass class HTTPWarning(Warning): "Base warning used by this module." pass class PoolError(HTTPError): "Base exception for errors caused within a pool." def __init__(self, pool, message): self.pool = pool HTTPError.__init__(self, "%s: %s" % (pool, message)) def __reduce__(self): # For pickling purposes. return self.__class__, (None, None) class RequestError(PoolError): "Base exception for PoolErrors that have associated URLs." def __init__(self, pool, url, message): self.url = url PoolError.__init__(self, pool, message) def __reduce__(self): # For pickling purposes. return self.__class__, (None, self.url, None) class SSLError(HTTPError): "Raised when SSL certificate fails in an HTTPS connection." pass class ProxyError(HTTPError): "Raised when the connection to a proxy fails." pass class DecodeError(HTTPError): "Raised when automatic decoding based on Content-Type fails." pass class ProtocolError(HTTPError): "Raised when something unexpected happens mid-request/response." pass #: Renamed to ProtocolError but aliased for backwards compatibility. ConnectionError = ProtocolError # Leaf Exceptions class MaxRetryError(RequestError): """Raised when the maximum number of retries is exceeded. :param pool: The connection pool :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool` :param string url: The requested Url :param exceptions.Exception reason: The underlying error """ def __init__(self, pool, url, reason=None): self.reason = reason message = "Max retries exceeded with url: %s (Caused by %r)" % ( url, reason) RequestError.__init__(self, pool, url, message) class HostChangedError(RequestError): "Raised when an existing pool gets a request for a foreign host." def __init__(self, pool, url, retries=3): message = "Tried to open a foreign host with url: %s" % url RequestError.__init__(self, pool, url, message) self.retries = retries class TimeoutStateError(HTTPError): """ Raised when passing an invalid state to a timeout """ pass class TimeoutError(HTTPError): """ Raised when a socket timeout error occurs. Catching this error will catch both :exc:`ReadTimeoutErrors <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`. """ pass class ReadTimeoutError(TimeoutError, RequestError): "Raised when a socket timeout occurs while receiving data from a server" pass # This timeout error does not have a URL attached and needs to inherit from the # base HTTPError class ConnectTimeoutError(TimeoutError): "Raised when a socket timeout occurs while connecting to a server" pass class NewConnectionError(ConnectTimeoutError, PoolError): "Raised when we fail to establish a new connection. Usually ECONNREFUSED." pass class EmptyPoolError(PoolError): "Raised when a pool runs out of connections and no more are allowed." pass class ClosedPoolError(PoolError): "Raised when a request enters a pool after the pool has been closed." pass class LocationValueError(ValueError, HTTPError): "Raised when there is something wrong with a given URL input." pass class LocationParseError(LocationValueError): "Raised when get_host or similar fails to parse the URL input." def __init__(self, location): message = "Failed to parse: %s" % location HTTPError.__init__(self, message) self.location = location class ResponseError(HTTPError): "Used as a container for an error reason supplied in a MaxRetryError." GENERIC_ERROR = 'too many error responses' SPECIFIC_ERROR = 'too many {status_code} error responses' class SecurityWarning(HTTPWarning): "Warned when perfoming security reducing actions" pass class SubjectAltNameWarning(SecurityWarning): "Warned when connecting to a host with a certificate missing a SAN." pass class InsecureRequestWarning(SecurityWarning): "Warned when making an unverified HTTPS request." pass class SystemTimeWarning(SecurityWarning): "Warned when system time is suspected to be wrong" pass class InsecurePlatformWarning(SecurityWarning): "Warned when certain SSL configuration is not available on a platform." pass class SNIMissingWarning(HTTPWarning): "Warned when making a HTTPS request without SNI available." pass class DependencyWarning(HTTPWarning): """ Warned when an attempt is made to import a module with missing optional dependencies. """ pass class ResponseNotChunked(ProtocolError, ValueError): "Response needs to be chunked in order to read it as chunks." pass class BodyNotHttplibCompatible(HTTPError): """ Body should be httplib.HTTPResponse like (have an fp attribute which returns raw chunks) for read_chunked(). """ pass class IncompleteRead(HTTPError, httplib_IncompleteRead): """ Response length doesn't match expected Content-Length Subclass of http_client.IncompleteRead to allow int value for `partial` to avoid creating large objects on streamed reads. """ def __init__(self, partial, expected): super(IncompleteRead, self).__init__(partial, expected) def __repr__(self): return ('IncompleteRead(%i bytes read, ' '%i more expected)' % (self.partial, self.expected)) class InvalidHeader(HTTPError): "The header provided was somehow invalid." pass class ProxySchemeUnknown(AssertionError, ValueError): "ProxyManager does not support the supplied scheme" # TODO(t-8ch): Stop inheriting from AssertionError in v2.0. def __init__(self, scheme): message = "Not supported proxy scheme %s" % scheme super(ProxySchemeUnknown, self).__init__(message) class HeaderParsingError(HTTPError): "Raised by assert_header_parsing, but we convert it to a log.warning statement." def __init__(self, defects, unparsed_data): message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data) super(HeaderParsingError, self).__init__(message) class UnrewindableBodyError(HTTPError): "urllib3 encountered an error when trying to rewind a body" pass
mit
Work4Labs/lettuce
tests/integration/lib/Django-1.2.5/django/contrib/gis/geos/prototypes/predicates.py
623
1777
""" This module houses the GEOS ctypes prototype functions for the unary and binary predicate operations on geometries. """ from ctypes import c_char, c_char_p, c_double from django.contrib.gis.geos.libgeos import GEOM_PTR from django.contrib.gis.geos.prototypes.errcheck import check_predicate from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc ## Binary & unary predicate functions ## def binary_predicate(func, *args): "For GEOS binary predicate functions." argtypes = [GEOM_PTR, GEOM_PTR] if args: argtypes += args func.argtypes = argtypes func.restype = c_char func.errcheck = check_predicate return func def unary_predicate(func): "For GEOS unary predicate functions." func.argtypes = [GEOM_PTR] func.restype = c_char func.errcheck = check_predicate return func ## Unary Predicates ## geos_hasz = unary_predicate(GEOSFunc('GEOSHasZ')) geos_isempty = unary_predicate(GEOSFunc('GEOSisEmpty')) geos_isring = unary_predicate(GEOSFunc('GEOSisRing')) geos_issimple = unary_predicate(GEOSFunc('GEOSisSimple')) geos_isvalid = unary_predicate(GEOSFunc('GEOSisValid')) ## Binary Predicates ## geos_contains = binary_predicate(GEOSFunc('GEOSContains')) geos_crosses = binary_predicate(GEOSFunc('GEOSCrosses')) geos_disjoint = binary_predicate(GEOSFunc('GEOSDisjoint')) geos_equals = binary_predicate(GEOSFunc('GEOSEquals')) geos_equalsexact = binary_predicate(GEOSFunc('GEOSEqualsExact'), c_double) geos_intersects = binary_predicate(GEOSFunc('GEOSIntersects')) geos_overlaps = binary_predicate(GEOSFunc('GEOSOverlaps')) geos_relatepattern = binary_predicate(GEOSFunc('GEOSRelatePattern'), c_char_p) geos_touches = binary_predicate(GEOSFunc('GEOSTouches')) geos_within = binary_predicate(GEOSFunc('GEOSWithin'))
gpl-3.0
ruschelp/cortex-vfx
python/IECoreMaya/FnSceneShape.py
5
15323
########################################################################## # # Copyright (c) 2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## from __future__ import with_statement import re import maya.OpenMaya import maya.cmds import IECore import IECoreMaya import _IECoreMaya from FnDagNode import FnDagNode import StringUtil ## A function set for operating on the IECoreMaya::SceneShape type. class FnSceneShape( maya.OpenMaya.MFnDependencyNode ) : ## Initialise the function set for the given procedural object, which may # either be an MObject or a node name in string or unicode form. def __init__( self, object ) : if isinstance( object, str ) or isinstance( object, unicode ) : object = StringUtil.dependencyNodeFromString( object ) maya.OpenMaya.MFnDependencyNode.__init__( self, object ) ## Creates a new node under a transform of the specified name. Returns a function set instance operating on this new node. @staticmethod def create( parentName ) : try: parentNode = maya.cmds.createNode( "transform", name=parentName, skipSelect=True ) except: # The parent name is supposed to be the children names in a sceneInterface, they could be numbers, maya doesn't like that. Use a prefix. parentNode = maya.cmds.createNode( "transform", name="sceneShape_"+parentName, skipSelect=True ) return FnSceneShape.createShape( parentNode ) ## Create a scene shape under the given node. Returns a function set instance operating on this shape. @staticmethod def createShape( parentNode ) : parentShort = parentNode.rpartition( "|" )[-1] numbersMatch = re.search( "[0-9]+$", parentShort ) if numbersMatch is not None : numbers = numbersMatch.group() shapeName = parentShort[:-len(numbers)] + "SceneShape" + numbers else : shapeName = parentShort + "SceneShape" shapeNode = maya.cmds.createNode( "ieSceneShape", name=shapeName, parent=parentNode, skipSelect=True ) fnScS = FnSceneShape( shapeNode ) maya.cmds.sets( fnScS.fullPathName(), add="initialShadingGroup" ) maya.cmds.setAttr( fnScS.fullPathName()+".objectOnly", l=True ) maya.cmds.connectAttr( "time1.outTime", fnScS.fullPathName()+'.time' ) return fnScS ## Returns a set of the names of any currently selected components. def selectedComponentNames( self ) : result = set() s = maya.OpenMaya.MSelectionList() maya.OpenMaya.MGlobal.getActiveSelectionList( s ) allComponents = self.componentNames() fullPathName = self.fullPathName() for i in range( 0, s.length() ) : try : p = maya.OpenMaya.MDagPath() c = maya.OpenMaya.MObject() s.getDagPath( i, p, c ) if p.node()==self.object() : fnC = maya.OpenMaya.MFnSingleIndexedComponent( c ) a = maya.OpenMaya.MIntArray() fnC.getElements( a ) for j in range( 0, a.length() ) : result.add( allComponents[ a[j] ] ) except : pass return result ## Selects the components specified by the passed names. If replace is True # then the current selection is deselected first. def selectComponentNames( self, componentNames ) : if not isinstance( componentNames, set ) : componentNames = set( componentNames ) fullPathName = self.fullPathName() allnames = self.componentNames() for i, name in enumerate( allNames ): if name in componentNames: toSelect.append( fullPathName + ".f[" + str( i ) + "]" ) maya.cmds.select( clear=True ) maya.cmds.selectMode( component=True ) maya.cmds.hilite( fullPathName ) for s in toSelect : maya.cmds.select( s, add=True ) ## Returns the full path name to this node. def fullPathName( self ) : try : f = maya.OpenMaya.MFnDagNode( self.object() ) return f.fullPathName() except : pass return self.name() def sceneInterface( self ) : return _IECoreMaya._sceneShapeSceneInterface( self ) def componentNames( self ) : return _IECoreMaya._sceneShapeComponentNames( self ) ## Returns True if the scene shape can be expanded. # We assume that if the objectOnly flag is on, it means the scene shape has already been expanded so return False. # Can only be expanded if the scene interface for the scene shape has children. def canBeExpanded( self ) : # An already expanded scene should have objectOnly on if not maya.cmds.getAttr( self.fullPathName()+".objectOnly" ): # Check if you have any children to expand to if self.sceneInterface().childNames(): return True return False ## Returns True if the scene shape can be collapsed. # We assume that if the objectOnly flag is off, the scene shape is already collapsed. def canBeCollapsed( self ) : # if already collapsed, objectOnly is off return maya.cmds.getAttr( self.fullPathName()+".objectOnly" ) ## Returns the index in the queryPaths which matches the given path. # If the path isn't already in the queries, add it and return the new index. def __queryIndexForPath( self, path ): node = self.fullPathName() index = None validIndices = maya.cmds.getAttr( node+".queryPaths", mi=True ) if not validIndices: index = 0 else: for id in validIndices: # Check if we can reuse a query path if maya.cmds.getAttr( node+".queryPaths["+str(id)+"]" ) == path: index = id break if index is None: # Didn't find path, get the next available index index = max( i for i in validIndices ) +1 maya.cmds.setAttr( node+".queryPaths["+str(index)+"]", path, type="string" ) return index ## Expands the scene shape one level down if possible. # Returns a list of function sets for the child scene shapes. # Missing child transforms and shapes will be created, missing connections and attribute values will be reset. def expandOnce( self ) : node = self.fullPathName() transform = maya.cmds.listRelatives( node, parent=True, f=True )[0] scene = self.sceneInterface() if not scene: return [] sceneChildren = scene.childNames() if sceneChildren == []: # No children to expand to return [] sceneFile = maya.cmds.getAttr( node+".file" ) sceneRoot = maya.cmds.getAttr( node+".root" ) maya.cmds.setAttr( node+".querySpace", 1 ) maya.cmds.setAttr( node+".objectOnly", l=False ) maya.cmds.setAttr( node+".objectOnly", 1 ) maya.cmds.setAttr( node+".objectOnly", l=True ) drawGeo = maya.cmds.getAttr( node+".drawGeometry" ) drawChildBounds = maya.cmds.getAttr( node+".drawChildBounds" ) drawRootBound = maya.cmds.getAttr( node+".drawRootBound" ) drawTagsFilter = maya.cmds.getAttr( node+".drawTagsFilter" ) newSceneShapeFns = [] for i, child in enumerate( sceneChildren ): if maya.cmds.objExists( transform+"|"+child ): shape = maya.cmds.listRelatives( transform+"|"+child, f=True, type="ieSceneShape" ) if shape: fnChild = IECoreMaya.FnSceneShape( shape[0] ) else: fnChild = IECoreMaya.FnSceneShape.createShape( transform+"|"+child ) else: fnChild = IECoreMaya.FnSceneShape.create( child ) childNode = fnChild.fullPathName() childTransform = maya.cmds.listRelatives( childNode, parent=True, f=True )[0] maya.cmds.setAttr( childNode+".file", sceneFile, type="string" ) sceneRootName = "/"+child if sceneRoot == "/" else sceneRoot+"/"+child maya.cmds.setAttr( childNode+".root", sceneRootName, type="string" ) index = self.__queryIndexForPath( "/"+child ) outTransform = node+".outTransform["+str(index)+"]" if not maya.cmds.isConnected( outTransform+".outTranslate", childTransform+".translate" ): maya.cmds.connectAttr( outTransform+".outTranslate", childTransform+".translate", f=True ) if not maya.cmds.isConnected( outTransform+".outRotate", childTransform+".rotate" ): maya.cmds.connectAttr( outTransform+".outRotate", childTransform+".rotate", f=True ) if not maya.cmds.isConnected( outTransform+".outScale", childTransform+".scale" ): maya.cmds.connectAttr( outTransform+".outScale", childTransform+".scale", f=True ) maya.cmds.setAttr( childNode+".drawGeometry", drawGeo ) maya.cmds.setAttr( childNode+".drawChildBounds", drawChildBounds ) maya.cmds.setAttr( childNode+".drawRootBound", drawRootBound ) if drawTagsFilter: parentTags = drawTagsFilter.split() childTags = fnChild.sceneInterface().readTags() commonTags = filter( lambda x: str(x) in childTags, parentTags ) if not commonTags: # Hide that child since it doesn't match any filter maya.cmds.setAttr( childTransform+".visibility", 0 ) else: maya.cmds.setAttr( childNode+".drawTagsFilter", " ".join(commonTags),type="string" ) if maya.cmds.listRelatives( childTransform, parent = True, f=True ) != [ transform ]: maya.cmds.parent( childTransform, transform, relative=True ) newSceneShapeFns.append( fnChild ) return newSceneShapeFns ## Recursively expands all levels starting from the scene shape. # Returns a list of function sets for all the child scene shapes. def expandAll( self ): newFn = [] def recursiveExpand( fnSceneShape ): new = fnSceneShape.expandOnce() newFn.extend( new ) for n in new: recursiveExpand( n ) recursiveExpand( self ) return newFn ## Collapses all children up to this scene shape. def collapse( self ) : node = self.fullPathName() transform = maya.cmds.listRelatives( node, parent=True, f=True )[0] allTransformChildren = maya.cmds.listRelatives( transform, f=True, type = "transform" ) or [] for child in allTransformChildren: # Do a bunch of tests first! maya.cmds.delete( child ) maya.cmds.setAttr( node+".objectOnly", l=False ) maya.cmds.setAttr( node+".objectOnly", 0 ) maya.cmds.setAttr( node+".objectOnly", l=True ) maya.cmds.setAttr( node+".intermediateObject", 0 ) ## Returns tuple of maya type and input plug name that match the object in the scene interface, by checking the objectType tags. # Returns (None, None) if no object in the scene interface or the object isn't compatible with maya geometry we can create. def __mayaCompatibleShapeAndPlug( self ) : result = (None, None) if self.sceneInterface().hasObject(): tags = self.sceneInterface().readTags( includeChildren=False ) if "ObjectType:MeshPrimitive" in tags: result = ( "mesh", "inMesh" ) elif "ObjectType:CurvesPrimitive" in tags: result = ( "nurbsCurve", "create" ) elif "ObjectType:CoordinateSystem" in tags: result = ( "locator", "localPosition" ) return result ## Recursively converts all objects in the scene interface to compatible maya geometry # All scene shape nodes in the hierarchy are turned into an intermediate object. def convertAllToGeometry( self ) : # Expand scene first, then for each scene shape we turn them into an intermediate object and connect a mesh self.expandAll() transform = maya.cmds.listRelatives( self.fullPathName(), parent=True, f=True )[0] allSceneShapes = maya.cmds.listRelatives( transform, ad=True, f=True, type="ieSceneShape" ) for sceneShape in allSceneShapes: maya.cmds.setAttr( sceneShape+".querySpace", 1 ) fn = FnSceneShape( sceneShape ) if fn.sceneInterface() and fn.sceneInterface().hasObject(): fn.convertObjectToGeometry() # turn the scene node an intermediateObject so it can't be seen by MayaScene maya.cmds.setAttr( sceneShape+".intermediateObject", 1 ) ## Converts the object (if any) in the scene interface into maya geometry. # If a shape with the expected name but incompatible type is found under the transform, we rename it and create a new proper shape. # The shape is connected to the scene shape object output only if it isn't already connected or locked. # transformNode parameter can be used to specify the parent of the geometry. If None, uses the transform of the scene shape. def convertObjectToGeometry( self, transformNode = None ): if not self.sceneInterface().hasObject(): return node = self.fullPathName() if not transformNode: # No transform provided, use the transform of the reader transformNode = maya.cmds.listRelatives( node, f=True, p=True )[0] type, plug = self.__mayaCompatibleShapeAndPlug() if not (type and plug): raise Exception, "Scene interface at %s cannot be converted to Maya geometry." % self.sceneInterface().pathAsString() shapeName = IECoreMaya.FnDagNode.defaultShapeName( transformNode ) shape = transformNode + "|" + shapeName create = False if not maya.cmds.objExists( shape ): create = True elif maya.cmds.nodeType( shape ) != type: # Rename existing shape newName = shapeName + "_orig" maya.cmds.rename( shape, newName ) IECore.msg( IECore.Msg.Level.Warning, "FnSceneShape.convertObjectToGeometry", "Renaming incompatible shape %s to %s." % shape, newName ) create = True if create: maya.cmds.createNode( type, parent = transformNode, name = shapeName ) if type == "mesh": maya.cmds.sets(shape, add="initialShadingGroup" ) index = self.__queryIndexForPath( "/" ) if not maya.cmds.listConnections( shape+"."+plug, source = True, destination = False ) and not maya.cmds.getAttr( shape+"."+plug, l=True ): maya.cmds.connectAttr( node+'.outObjects['+str(index)+']', shape+"."+plug, f=True ) if type == "mesh": object = self.sceneInterface().readObject(0.0) interpolation = object.interpolation try: IECoreMaya.ToMayaMeshConverter.setMeshInterpolationAttribute( shape, interpolation ) except: IECore.msg( IECore.Msg.Level.Warning, "FnSceneShape.convertObjectToGeometry", "Failed to set interpolation on %s." % shape ) ## Returns the maya node type that this function set operates on @classmethod def _mayaNodeType( cls ): return "ieSceneShape"
bsd-3-clause
blaggacao/OpenUpgrade
addons/stock_landed_costs/__openerp__.py
220
1914
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'WMS Landed Costs', 'version': '1.1', 'author': 'OpenERP SA', 'summary': 'Landed Costs', 'description': """ Landed Costs Management ======================= This module allows you to easily add extra costs on pickings and decide the split of these costs among their stock moves in order to take them into account in your stock valuation. """, 'website': 'https://www.odoo.com/page/warehouse', 'depends': ['stock_account'], 'category': 'Warehouse Management', 'sequence': 16, 'demo': [ ], 'data': [ 'security/ir.model.access.csv', 'stock_landed_costs_sequence.xml', 'product_view.xml', 'stock_landed_costs_view.xml', 'stock_landed_costs_data.xml', ], 'test': [ 'test/stock_landed_costs.yml' ], 'installable': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
diwer/sublimeconfig
Packages/Package Control/package_control/versions.py
9
2399
import re from .semver import SemVer from .console_write import console_write def semver_compat(v): if isinstance(v, SemVer): return str(v) # Allowing passing in a dict containing info about a package if isinstance(v, dict): if 'version' not in v: return '0' v = v['version'] # Trim v off of the front v = re.sub('^v', '', v) # We prepend 0 to all date-based version numbers so that developers # may switch to explicit versioning from GitHub/BitBucket # versioning based on commit dates. # # When translating dates into semver, the way to get each date # segment into the version is to treat the year and month as # minor and patch, and then the rest as a numeric build version # with four different parts. The result looks like: # 0.2012.11+10.31.23.59 date_match = re.match('(\d{4})\.(\d{2})\.(\d{2})\.(\d{2})\.(\d{2})\.(\d{2})$', v) if date_match: v = '0.%s.%s+%s.%s.%s.%s' % date_match.groups() # This handles version that were valid pre-semver with 4+ dotted # groups, such as 1.6.9.0 four_plus_match = re.match('(\d+\.\d+\.\d+)[T\.](\d+(\.\d+)*)$', v) if four_plus_match: v = '%s+%s' % (four_plus_match.group(1), four_plus_match.group(2)) # Semver must have major, minor, patch elif re.match('^\d+$', v): v += '.0.0' elif re.match('^\d+\.\d+$', v): v += '.0' return v def version_comparable(string): return SemVer(semver_compat(string)) def version_exclude_prerelease(versions): output = [] for version in versions: if SemVer(semver_compat(version)).prerelease != None: continue output.append(version) return output def version_filter(versions, allow_prerelease=False): output = [] for version in versions: no_v_version = re.sub('^v', '', version) if not SemVer.valid(no_v_version): continue if not allow_prerelease and SemVer(no_v_version).prerelease != None: continue output.append(version) return output def _version_sort_key(item): return SemVer(semver_compat(item)) def version_sort(sortable, **kwargs): try: return sorted(sortable, key=_version_sort_key, **kwargs) except (ValueError) as e: console_write(u"Error sorting versions - %s" % e, True) return []
mit
lseyesl/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/port/qt.py
113
7883
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the Google name nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """QtWebKit implementation of the Port interface.""" import glob import logging import re import sys import os import platform from webkitpy.common.memoized import memoized from webkitpy.layout_tests.models.test_configuration import TestConfiguration from webkitpy.port.base import Port from webkitpy.port.xvfbdriver import XvfbDriver _log = logging.getLogger(__name__) class QtPort(Port): ALL_VERSIONS = ['linux', 'win', 'mac'] port_name = "qt" def _wk2_port_name(self): return "qt-wk2" def _port_flag_for_scripts(self): return "--qt" @classmethod def determine_full_port_name(cls, host, options, port_name): if port_name and port_name != cls.port_name: return port_name return port_name + '-' + host.platform.os_name # sys_platform exists only for unit testing. def __init__(self, host, port_name, **kwargs): super(QtPort, self).__init__(host, port_name, **kwargs) self._operating_system = port_name.replace('qt-', '') # FIXME: Why is this being set at all? self._version = self.operating_system() def _generate_all_test_configurations(self): configurations = [] for version in self.ALL_VERSIONS: for build_type in self.ALL_BUILD_TYPES: configurations.append(TestConfiguration(version=version, architecture='x86', build_type=build_type)) return configurations def _build_driver(self): # The Qt port builds DRT as part of the main build step return True def supports_per_test_timeout(self): return True def _path_to_driver(self): return self._build_path('bin/%s' % self.driver_name()) def _path_to_image_diff(self): return self._build_path('bin/ImageDiff') def _path_to_webcore_library(self): if self.operating_system() == 'mac': return self._build_path('lib/QtWebKitWidgets.framework/QtWebKitWidgets') else: return self._build_path('lib/libQt5WebKitWidgets.so') def _modules_to_search_for_symbols(self): # We search in every library to be reliable in the case of building with CONFIG+=force_static_libs_as_shared. if self.operating_system() == 'mac': frameworks = glob.glob(os.path.join(self._build_path('lib'), '*.framework')) return [os.path.join(framework, os.path.splitext(os.path.basename(framework))[0]) for framework in frameworks] else: suffix = 'dll' if self.operating_system() == 'win' else 'so' return glob.glob(os.path.join(self._build_path('lib'), 'lib*.' + suffix)) @memoized def qt_version(self): version = '' try: for line in self._executive.run_command(['qmake', '-v']).split('\n'): match = re.search('Qt\sversion\s(?P<version>\d\.\d)', line) if match: version = match.group('version') break except OSError: version = '5.0' return version def _search_paths(self): # qt-mac-wk2 # / # qt-wk1 qt-wk2 # \/ # qt-5.x # \ # (qt-linux|qt-mac|qt-win) # | # qt search_paths = [] if self.get_option('webkit_test_runner'): if self.operating_system() == 'mac': search_paths.append('qt-mac-wk2') search_paths.append('qt-wk2') else: search_paths.append('qt-wk1') search_paths.append('qt-' + self.qt_version()) search_paths.append(self.port_name + '-' + self.operating_system()) search_paths.append(self.port_name) return search_paths def default_baseline_search_path(self): return map(self._webkit_baseline_path, self._search_paths()) def _port_specific_expectations_files(self): paths = self._search_paths() if self.get_option('webkit_test_runner'): paths.append('wk2') # expectations_files() uses the directories listed in _search_paths reversed. # e.g. qt -> qt-linux -> qt-5.x -> qt-wk1 return list(reversed([self._filesystem.join(self._webkit_baseline_path(p), 'TestExpectations') for p in paths])) def setup_environ_for_server(self, server_name=None): clean_env = super(QtPort, self).setup_environ_for_server(server_name) clean_env['QTWEBKIT_PLUGIN_PATH'] = self._build_path('lib/plugins') self._copy_value_from_environ_if_set(clean_env, 'QT_DRT_WEBVIEW_MODE') self._copy_value_from_environ_if_set(clean_env, 'DYLD_IMAGE_SUFFIX') self._copy_value_from_environ_if_set(clean_env, 'QT_WEBKIT_LOG') self._copy_value_from_environ_if_set(clean_env, 'DISABLE_NI_WARNING') self._copy_value_from_environ_if_set(clean_env, 'QT_WEBKIT_PAUSE_UI_PROCESS') self._copy_value_from_environ_if_set(clean_env, 'QT_QPA_PLATFORM_PLUGIN_PATH') self._copy_value_from_environ_if_set(clean_env, 'QT_WEBKIT_DISABLE_UIPROCESS_DUMPPIXELS') return clean_env # FIXME: We should find a way to share this implmentation with Gtk, # or teach run-launcher how to call run-safari and move this down to Port. def show_results_html_file(self, results_filename): run_launcher_args = [] if self.get_option('webkit_test_runner'): run_launcher_args.append('-2') run_launcher_args.append("file://%s" % results_filename) self._run_script("run-launcher", run_launcher_args) def operating_system(self): return self._operating_system def check_sys_deps(self, needs_http): result = super(QtPort, self).check_sys_deps(needs_http) if not 'WEBKIT_TESTFONTS' in os.environ: _log.error('\nThe WEBKIT_TESTFONTS environment variable is not defined or not set properly.') _log.error('You must set it before running the tests.') _log.error('Use git to grab the actual fonts from http://gitorious.org/qtwebkit/testfonts') return False return result # Qt port is not ready for parallel testing, see https://bugs.webkit.org/show_bug.cgi?id=77730 for details. def default_child_processes(self): return 1
bsd-3-clause
waseem18/oh-mainline
vendor/packages/gdata/src/gdata/data.py
127
39947
#!/usr/bin/env python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This module is used for version 2 of the Google Data APIs. """Provides classes and constants for the XML in the Google Data namespace. Documentation for the raw XML which these classes represent can be found here: http://code.google.com/apis/gdata/docs/2.0/elements.html """ __author__ = 'j.s@google.com (Jeff Scudder)' import os import atom.core import atom.data GDATA_TEMPLATE = '{http://schemas.google.com/g/2005}%s' GD_TEMPLATE = GDATA_TEMPLATE OPENSEARCH_TEMPLATE_V1 = '{http://a9.com/-/spec/opensearchrss/1.0/}%s' OPENSEARCH_TEMPLATE_V2 = '{http://a9.com/-/spec/opensearch/1.1/}%s' BATCH_TEMPLATE = '{http://schemas.google.com/gdata/batch}%s' # Labels used in batch request entries to specify the desired CRUD operation. BATCH_INSERT = 'insert' BATCH_UPDATE = 'update' BATCH_DELETE = 'delete' BATCH_QUERY = 'query' EVENT_LOCATION = 'http://schemas.google.com/g/2005#event' ALTERNATE_LOCATION = 'http://schemas.google.com/g/2005#event.alternate' PARKING_LOCATION = 'http://schemas.google.com/g/2005#event.parking' CANCELED_EVENT = 'http://schemas.google.com/g/2005#event.canceled' CONFIRMED_EVENT = 'http://schemas.google.com/g/2005#event.confirmed' TENTATIVE_EVENT = 'http://schemas.google.com/g/2005#event.tentative' CONFIDENTIAL_EVENT = 'http://schemas.google.com/g/2005#event.confidential' DEFAULT_EVENT = 'http://schemas.google.com/g/2005#event.default' PRIVATE_EVENT = 'http://schemas.google.com/g/2005#event.private' PUBLIC_EVENT = 'http://schemas.google.com/g/2005#event.public' OPAQUE_EVENT = 'http://schemas.google.com/g/2005#event.opaque' TRANSPARENT_EVENT = 'http://schemas.google.com/g/2005#event.transparent' CHAT_MESSAGE = 'http://schemas.google.com/g/2005#message.chat' INBOX_MESSAGE = 'http://schemas.google.com/g/2005#message.inbox' SENT_MESSAGE = 'http://schemas.google.com/g/2005#message.sent' SPAM_MESSAGE = 'http://schemas.google.com/g/2005#message.spam' STARRED_MESSAGE = 'http://schemas.google.com/g/2005#message.starred' UNREAD_MESSAGE = 'http://schemas.google.com/g/2005#message.unread' BCC_RECIPIENT = 'http://schemas.google.com/g/2005#message.bcc' CC_RECIPIENT = 'http://schemas.google.com/g/2005#message.cc' SENDER = 'http://schemas.google.com/g/2005#message.from' REPLY_TO = 'http://schemas.google.com/g/2005#message.reply-to' TO_RECIPIENT = 'http://schemas.google.com/g/2005#message.to' ASSISTANT_REL = 'http://schemas.google.com/g/2005#assistant' CALLBACK_REL = 'http://schemas.google.com/g/2005#callback' CAR_REL = 'http://schemas.google.com/g/2005#car' COMPANY_MAIN_REL = 'http://schemas.google.com/g/2005#company_main' FAX_REL = 'http://schemas.google.com/g/2005#fax' HOME_REL = 'http://schemas.google.com/g/2005#home' HOME_FAX_REL = 'http://schemas.google.com/g/2005#home_fax' ISDN_REL = 'http://schemas.google.com/g/2005#isdn' MAIN_REL = 'http://schemas.google.com/g/2005#main' MOBILE_REL = 'http://schemas.google.com/g/2005#mobile' OTHER_REL = 'http://schemas.google.com/g/2005#other' OTHER_FAX_REL = 'http://schemas.google.com/g/2005#other_fax' PAGER_REL = 'http://schemas.google.com/g/2005#pager' RADIO_REL = 'http://schemas.google.com/g/2005#radio' TELEX_REL = 'http://schemas.google.com/g/2005#telex' TTL_TDD_REL = 'http://schemas.google.com/g/2005#tty_tdd' WORK_REL = 'http://schemas.google.com/g/2005#work' WORK_FAX_REL = 'http://schemas.google.com/g/2005#work_fax' WORK_MOBILE_REL = 'http://schemas.google.com/g/2005#work_mobile' WORK_PAGER_REL = 'http://schemas.google.com/g/2005#work_pager' NETMEETING_REL = 'http://schemas.google.com/g/2005#netmeeting' OVERALL_REL = 'http://schemas.google.com/g/2005#overall' PRICE_REL = 'http://schemas.google.com/g/2005#price' QUALITY_REL = 'http://schemas.google.com/g/2005#quality' EVENT_REL = 'http://schemas.google.com/g/2005#event' EVENT_ALTERNATE_REL = 'http://schemas.google.com/g/2005#event.alternate' EVENT_PARKING_REL = 'http://schemas.google.com/g/2005#event.parking' AIM_PROTOCOL = 'http://schemas.google.com/g/2005#AIM' MSN_PROTOCOL = 'http://schemas.google.com/g/2005#MSN' YAHOO_MESSENGER_PROTOCOL = 'http://schemas.google.com/g/2005#YAHOO' SKYPE_PROTOCOL = 'http://schemas.google.com/g/2005#SKYPE' QQ_PROTOCOL = 'http://schemas.google.com/g/2005#QQ' GOOGLE_TALK_PROTOCOL = 'http://schemas.google.com/g/2005#GOOGLE_TALK' ICQ_PROTOCOL = 'http://schemas.google.com/g/2005#ICQ' JABBER_PROTOCOL = 'http://schemas.google.com/g/2005#JABBER' REGULAR_COMMENTS = 'http://schemas.google.com/g/2005#regular' REVIEW_COMMENTS = 'http://schemas.google.com/g/2005#reviews' MAIL_BOTH = 'http://schemas.google.com/g/2005#both' MAIL_LETTERS = 'http://schemas.google.com/g/2005#letters' MAIL_PARCELS = 'http://schemas.google.com/g/2005#parcels' MAIL_NEITHER = 'http://schemas.google.com/g/2005#neither' GENERAL_ADDRESS = 'http://schemas.google.com/g/2005#general' LOCAL_ADDRESS = 'http://schemas.google.com/g/2005#local' OPTIONAL_ATENDEE = 'http://schemas.google.com/g/2005#event.optional' REQUIRED_ATENDEE = 'http://schemas.google.com/g/2005#event.required' ATTENDEE_ACCEPTED = 'http://schemas.google.com/g/2005#event.accepted' ATTENDEE_DECLINED = 'http://schemas.google.com/g/2005#event.declined' ATTENDEE_INVITED = 'http://schemas.google.com/g/2005#event.invited' ATTENDEE_TENTATIVE = 'http://schemas.google.com/g/2005#event.tentative' FULL_PROJECTION = 'full' VALUES_PROJECTION = 'values' BASIC_PROJECTION = 'basic' PRIVATE_VISIBILITY = 'private' PUBLIC_VISIBILITY = 'public' OPAQUE_TRANSPARENCY = 'http://schemas.google.com/g/2005#event.opaque' TRANSPARENT_TRANSPARENCY = 'http://schemas.google.com/g/2005#event.transparent' CONFIDENTIAL_EVENT_VISIBILITY = 'http://schemas.google.com/g/2005#event.confidential' DEFAULT_EVENT_VISIBILITY = 'http://schemas.google.com/g/2005#event.default' PRIVATE_EVENT_VISIBILITY = 'http://schemas.google.com/g/2005#event.private' PUBLIC_EVENT_VISIBILITY = 'http://schemas.google.com/g/2005#event.public' CANCELED_EVENT_STATUS = 'http://schemas.google.com/g/2005#event.canceled' CONFIRMED_EVENT_STATUS = 'http://schemas.google.com/g/2005#event.confirmed' TENTATIVE_EVENT_STATUS = 'http://schemas.google.com/g/2005#event.tentative' ACL_REL = 'http://schemas.google.com/acl/2007#accessControlList' class Error(Exception): pass class MissingRequiredParameters(Error): pass class LinkFinder(atom.data.LinkFinder): """Mixin used in Feed and Entry classes to simplify link lookups by type. Provides lookup methods for edit, edit-media, post, ACL and other special links which are common across Google Data APIs. """ def find_html_link(self): """Finds the first link with rel of alternate and type of text/html.""" for link in self.link: if link.rel == 'alternate' and link.type == 'text/html': return link.href return None FindHtmlLink = find_html_link def get_html_link(self): for a_link in self.link: if a_link.rel == 'alternate' and a_link.type == 'text/html': return a_link return None GetHtmlLink = get_html_link def find_post_link(self): """Get the URL to which new entries should be POSTed. The POST target URL is used to insert new entries. Returns: A str for the URL in the link with a rel matching the POST type. """ return self.find_url('http://schemas.google.com/g/2005#post') FindPostLink = find_post_link def get_post_link(self): return self.get_link('http://schemas.google.com/g/2005#post') GetPostLink = get_post_link def find_acl_link(self): acl_link = self.get_acl_link() if acl_link: return acl_link.href return None FindAclLink = find_acl_link def get_acl_link(self): """Searches for a link or feed_link (if present) with the rel for ACL.""" acl_link = self.get_link(ACL_REL) if acl_link: return acl_link elif hasattr(self, 'feed_link'): for a_feed_link in self.feed_link: if a_feed_link.rel == ACL_REL: return a_feed_link return None GetAclLink = get_acl_link def find_feed_link(self): return self.find_url('http://schemas.google.com/g/2005#feed') FindFeedLink = find_feed_link def get_feed_link(self): return self.get_link('http://schemas.google.com/g/2005#feed') GetFeedLink = get_feed_link def find_previous_link(self): return self.find_url('previous') FindPreviousLink = find_previous_link def get_previous_link(self): return self.get_link('previous') GetPreviousLink = get_previous_link class TotalResults(atom.core.XmlElement): """opensearch:TotalResults for a GData feed.""" _qname = (OPENSEARCH_TEMPLATE_V1 % 'totalResults', OPENSEARCH_TEMPLATE_V2 % 'totalResults') class StartIndex(atom.core.XmlElement): """The opensearch:startIndex element in GData feed.""" _qname = (OPENSEARCH_TEMPLATE_V1 % 'startIndex', OPENSEARCH_TEMPLATE_V2 % 'startIndex') class ItemsPerPage(atom.core.XmlElement): """The opensearch:itemsPerPage element in GData feed.""" _qname = (OPENSEARCH_TEMPLATE_V1 % 'itemsPerPage', OPENSEARCH_TEMPLATE_V2 % 'itemsPerPage') class ExtendedProperty(atom.core.XmlElement): """The Google Data extendedProperty element. Used to store arbitrary key-value information specific to your application. The value can either be a text string stored as an XML attribute (.value), or an XML node (XmlBlob) as a child element. This element is used in the Google Calendar data API and the Google Contacts data API. """ _qname = GDATA_TEMPLATE % 'extendedProperty' name = 'name' value = 'value' def get_xml_blob(self): """Returns the XML blob as an atom.core.XmlElement. Returns: An XmlElement representing the blob's XML, or None if no blob was set. """ if self._other_elements: return self._other_elements[0] else: return None GetXmlBlob = get_xml_blob def set_xml_blob(self, blob): """Sets the contents of the extendedProperty to XML as a child node. Since the extendedProperty is only allowed one child element as an XML blob, setting the XML blob will erase any preexisting member elements in this object. Args: blob: str or atom.core.XmlElement representing the XML blob stored in the extendedProperty. """ # Erase any existing extension_elements, clears the child nodes from the # extendedProperty. if isinstance(blob, atom.core.XmlElement): self._other_elements = [blob] else: self._other_elements = [atom.core.parse(str(blob))] SetXmlBlob = set_xml_blob class GDEntry(atom.data.Entry, LinkFinder): """Extends Atom Entry to provide data processing""" etag = '{http://schemas.google.com/g/2005}etag' def get_id(self): if self.id is not None and self.id.text is not None: return self.id.text.strip() return None GetId = get_id def is_media(self): if self.find_edit_media_link(): return True return False IsMedia = is_media def find_media_link(self): """Returns the URL to the media content, if the entry is a media entry. Otherwise returns None. """ if self.is_media(): return self.content.src return None FindMediaLink = find_media_link class GDFeed(atom.data.Feed, LinkFinder): """A Feed from a GData service.""" etag = '{http://schemas.google.com/g/2005}etag' total_results = TotalResults start_index = StartIndex items_per_page = ItemsPerPage entry = [GDEntry] def get_id(self): if self.id is not None and self.id.text is not None: return self.id.text.strip() return None GetId = get_id def get_generator(self): if self.generator and self.generator.text: return self.generator.text.strip() return None class BatchId(atom.core.XmlElement): """Identifies a single operation in a batch request.""" _qname = BATCH_TEMPLATE % 'id' class BatchOperation(atom.core.XmlElement): """The CRUD operation which this batch entry represents.""" _qname = BATCH_TEMPLATE % 'operation' type = 'type' class BatchStatus(atom.core.XmlElement): """The batch:status element present in a batch response entry. A status element contains the code (HTTP response code) and reason as elements. In a single request these fields would be part of the HTTP response, but in a batch request each Entry operation has a corresponding Entry in the response feed which includes status information. See http://code.google.com/apis/gdata/batch.html#Handling_Errors """ _qname = BATCH_TEMPLATE % 'status' code = 'code' reason = 'reason' content_type = 'content-type' class BatchEntry(GDEntry): """An atom:entry for use in batch requests. The BatchEntry contains additional members to specify the operation to be performed on this entry and a batch ID so that the server can reference individual operations in the response feed. For more information, see: http://code.google.com/apis/gdata/batch.html """ batch_operation = BatchOperation batch_id = BatchId batch_status = BatchStatus class BatchInterrupted(atom.core.XmlElement): """The batch:interrupted element sent if batch request was interrupted. Only appears in a feed if some of the batch entries could not be processed. See: http://code.google.com/apis/gdata/batch.html#Handling_Errors """ _qname = BATCH_TEMPLATE % 'interrupted' reason = 'reason' success = 'success' failures = 'failures' parsed = 'parsed' class BatchFeed(GDFeed): """A feed containing a list of batch request entries.""" interrupted = BatchInterrupted entry = [BatchEntry] def add_batch_entry(self, entry=None, id_url_string=None, batch_id_string=None, operation_string=None): """Logic for populating members of a BatchEntry and adding to the feed. If the entry is not a BatchEntry, it is converted to a BatchEntry so that the batch specific members will be present. The id_url_string can be used in place of an entry if the batch operation applies to a URL. For example query and delete operations require just the URL of an entry, no body is sent in the HTTP request. If an id_url_string is sent instead of an entry, a BatchEntry is created and added to the feed. This method also assigns the desired batch id to the entry so that it can be referenced in the server's response. If the batch_id_string is None, this method will assign a batch_id to be the index at which this entry will be in the feed's entry list. Args: entry: BatchEntry, atom.data.Entry, or another Entry flavor (optional) The entry which will be sent to the server as part of the batch request. The item must have a valid atom id so that the server knows which entry this request references. id_url_string: str (optional) The URL of the entry to be acted on. You can find this URL in the text member of the atom id for an entry. If an entry is not sent, this id will be used to construct a new BatchEntry which will be added to the request feed. batch_id_string: str (optional) The batch ID to be used to reference this batch operation in the results feed. If this parameter is None, the current length of the feed's entry array will be used as a count. Note that batch_ids should either always be specified or never, mixing could potentially result in duplicate batch ids. operation_string: str (optional) The desired batch operation which will set the batch_operation.type member of the entry. Options are 'insert', 'update', 'delete', and 'query' Raises: MissingRequiredParameters: Raised if neither an id_ url_string nor an entry are provided in the request. Returns: The added entry. """ if entry is None and id_url_string is None: raise MissingRequiredParameters('supply either an entry or URL string') if entry is None and id_url_string is not None: entry = BatchEntry(id=atom.data.Id(text=id_url_string)) if batch_id_string is not None: entry.batch_id = BatchId(text=batch_id_string) elif entry.batch_id is None or entry.batch_id.text is None: entry.batch_id = BatchId(text=str(len(self.entry))) if operation_string is not None: entry.batch_operation = BatchOperation(type=operation_string) self.entry.append(entry) return entry AddBatchEntry = add_batch_entry def add_insert(self, entry, batch_id_string=None): """Add an insert request to the operations in this batch request feed. If the entry doesn't yet have an operation or a batch id, these will be set to the insert operation and a batch_id specified as a parameter. Args: entry: BatchEntry The entry which will be sent in the batch feed as an insert request. batch_id_string: str (optional) The batch ID to be used to reference this batch operation in the results feed. If this parameter is None, the current length of the feed's entry array will be used as a count. Note that batch_ids should either always be specified or never, mixing could potentially result in duplicate batch ids. """ self.add_batch_entry(entry=entry, batch_id_string=batch_id_string, operation_string=BATCH_INSERT) AddInsert = add_insert def add_update(self, entry, batch_id_string=None): """Add an update request to the list of batch operations in this feed. Sets the operation type of the entry to insert if it is not already set and assigns the desired batch id to the entry so that it can be referenced in the server's response. Args: entry: BatchEntry The entry which will be sent to the server as an update (HTTP PUT) request. The item must have a valid atom id so that the server knows which entry to replace. batch_id_string: str (optional) The batch ID to be used to reference this batch operation in the results feed. If this parameter is None, the current length of the feed's entry array will be used as a count. See also comments for AddInsert. """ self.add_batch_entry(entry=entry, batch_id_string=batch_id_string, operation_string=BATCH_UPDATE) AddUpdate = add_update def add_delete(self, url_string=None, entry=None, batch_id_string=None): """Adds a delete request to the batch request feed. This method takes either the url_string which is the atom id of the item to be deleted, or the entry itself. The atom id of the entry must be present so that the server knows which entry should be deleted. Args: url_string: str (optional) The URL of the entry to be deleted. You can find this URL in the text member of the atom id for an entry. entry: BatchEntry (optional) The entry to be deleted. batch_id_string: str (optional) Raises: MissingRequiredParameters: Raised if neither a url_string nor an entry are provided in the request. """ self.add_batch_entry(entry=entry, id_url_string=url_string, batch_id_string=batch_id_string, operation_string=BATCH_DELETE) AddDelete = add_delete def add_query(self, url_string=None, entry=None, batch_id_string=None): """Adds a query request to the batch request feed. This method takes either the url_string which is the query URL whose results will be added to the result feed. The query URL will be encapsulated in a BatchEntry, and you may pass in the BatchEntry with a query URL instead of sending a url_string. Args: url_string: str (optional) entry: BatchEntry (optional) batch_id_string: str (optional) Raises: MissingRequiredParameters """ self.add_batch_entry(entry=entry, id_url_string=url_string, batch_id_string=batch_id_string, operation_string=BATCH_QUERY) AddQuery = add_query def find_batch_link(self): return self.find_url('http://schemas.google.com/g/2005#batch') FindBatchLink = find_batch_link class EntryLink(atom.core.XmlElement): """The gd:entryLink element. Represents a logically nested entry. For example, a <gd:who> representing a contact might have a nested entry from a contact feed. """ _qname = GDATA_TEMPLATE % 'entryLink' entry = GDEntry rel = 'rel' read_only = 'readOnly' href = 'href' class FeedLink(atom.core.XmlElement): """The gd:feedLink element. Represents a logically nested feed. For example, a calendar feed might have a nested feed representing all comments on entries. """ _qname = GDATA_TEMPLATE % 'feedLink' feed = GDFeed rel = 'rel' read_only = 'readOnly' count_hint = 'countHint' href = 'href' class AdditionalName(atom.core.XmlElement): """The gd:additionalName element. Specifies additional (eg. middle) name of the person. Contains an attribute for the phonetic representaton of the name. """ _qname = GDATA_TEMPLATE % 'additionalName' yomi = 'yomi' class Comments(atom.core.XmlElement): """The gd:comments element. Contains a comments feed for the enclosing entry (such as a calendar event). """ _qname = GDATA_TEMPLATE % 'comments' rel = 'rel' feed_link = FeedLink class Country(atom.core.XmlElement): """The gd:country element. Country name along with optional country code. The country code is given in accordance with ISO 3166-1 alpha-2: http://www.iso.org/iso/iso-3166-1_decoding_table """ _qname = GDATA_TEMPLATE % 'country' code = 'code' class EmailImParent(atom.core.XmlElement): address = 'address' label = 'label' rel = 'rel' primary = 'primary' class Email(EmailImParent): """The gd:email element. An email address associated with the containing entity (which is usually an entity representing a person or a location). """ _qname = GDATA_TEMPLATE % 'email' display_name = 'displayName' class FamilyName(atom.core.XmlElement): """The gd:familyName element. Specifies family name of the person, eg. "Smith". """ _qname = GDATA_TEMPLATE % 'familyName' yomi = 'yomi' class Im(EmailImParent): """The gd:im element. An instant messaging address associated with the containing entity. """ _qname = GDATA_TEMPLATE % 'im' protocol = 'protocol' class GivenName(atom.core.XmlElement): """The gd:givenName element. Specifies given name of the person, eg. "John". """ _qname = GDATA_TEMPLATE % 'givenName' yomi = 'yomi' class NamePrefix(atom.core.XmlElement): """The gd:namePrefix element. Honorific prefix, eg. 'Mr' or 'Mrs'. """ _qname = GDATA_TEMPLATE % 'namePrefix' class NameSuffix(atom.core.XmlElement): """The gd:nameSuffix element. Honorific suffix, eg. 'san' or 'III'. """ _qname = GDATA_TEMPLATE % 'nameSuffix' class FullName(atom.core.XmlElement): """The gd:fullName element. Unstructured representation of the name. """ _qname = GDATA_TEMPLATE % 'fullName' class Name(atom.core.XmlElement): """The gd:name element. Allows storing person's name in a structured way. Consists of given name, additional name, family name, prefix, suffix and full name. """ _qname = GDATA_TEMPLATE % 'name' given_name = GivenName additional_name = AdditionalName family_name = FamilyName name_prefix = NamePrefix name_suffix = NameSuffix full_name = FullName class OrgDepartment(atom.core.XmlElement): """The gd:orgDepartment element. Describes a department within an organization. Must appear within a gd:organization element. """ _qname = GDATA_TEMPLATE % 'orgDepartment' class OrgJobDescription(atom.core.XmlElement): """The gd:orgJobDescription element. Describes a job within an organization. Must appear within a gd:organization element. """ _qname = GDATA_TEMPLATE % 'orgJobDescription' class OrgName(atom.core.XmlElement): """The gd:orgName element. The name of the organization. Must appear within a gd:organization element. Contains a Yomigana attribute (Japanese reading aid) for the organization name. """ _qname = GDATA_TEMPLATE % 'orgName' yomi = 'yomi' class OrgSymbol(atom.core.XmlElement): """The gd:orgSymbol element. Provides a symbol of an organization. Must appear within a gd:organization element. """ _qname = GDATA_TEMPLATE % 'orgSymbol' class OrgTitle(atom.core.XmlElement): """The gd:orgTitle element. The title of a person within an organization. Must appear within a gd:organization element. """ _qname = GDATA_TEMPLATE % 'orgTitle' class Organization(atom.core.XmlElement): """The gd:organization element. An organization, typically associated with a contact. """ _qname = GDATA_TEMPLATE % 'organization' label = 'label' primary = 'primary' rel = 'rel' department = OrgDepartment job_description = OrgJobDescription name = OrgName symbol = OrgSymbol title = OrgTitle class When(atom.core.XmlElement): """The gd:when element. Represents a period of time or an instant. """ _qname = GDATA_TEMPLATE % 'when' end = 'endTime' start = 'startTime' value = 'valueString' class OriginalEvent(atom.core.XmlElement): """The gd:originalEvent element. Equivalent to the Recurrence ID property specified in section 4.8.4.4 of RFC 2445. Appears in every instance of a recurring event, to identify the original event. Contains a <gd:when> element specifying the original start time of the instance that has become an exception. """ _qname = GDATA_TEMPLATE % 'originalEvent' id = 'id' href = 'href' when = When class PhoneNumber(atom.core.XmlElement): """The gd:phoneNumber element. A phone number associated with the containing entity (which is usually an entity representing a person or a location). """ _qname = GDATA_TEMPLATE % 'phoneNumber' label = 'label' rel = 'rel' uri = 'uri' primary = 'primary' class PostalAddress(atom.core.XmlElement): """The gd:postalAddress element.""" _qname = GDATA_TEMPLATE % 'postalAddress' label = 'label' rel = 'rel' uri = 'uri' primary = 'primary' class Rating(atom.core.XmlElement): """The gd:rating element. Represents a numeric rating of the enclosing entity, such as a comment. Each rating supplies its own scale, although it may be normalized by a service; for example, some services might convert all ratings to a scale from 1 to 5. """ _qname = GDATA_TEMPLATE % 'rating' average = 'average' max = 'max' min = 'min' num_raters = 'numRaters' rel = 'rel' value = 'value' class Recurrence(atom.core.XmlElement): """The gd:recurrence element. Represents the dates and times when a recurring event takes place. The string that defines the recurrence consists of a set of properties, each of which is defined in the iCalendar standard (RFC 2445). Specifically, the string usually begins with a DTSTART property that indicates the starting time of the first instance of the event, and often a DTEND property or a DURATION property to indicate when the first instance ends. Next come RRULE, RDATE, EXRULE, and/or EXDATE properties, which collectively define a recurring event and its exceptions (but see below). (See section 4.8.5 of RFC 2445 for more information about these recurrence component properties.) Last comes a VTIMEZONE component, providing detailed timezone rules for any timezone ID mentioned in the preceding properties. Google services like Google Calendar don't generally generate EXRULE and EXDATE properties to represent exceptions to recurring events; instead, they generate <gd:recurrenceException> elements. However, Google services may include EXRULE and/or EXDATE properties anyway; for example, users can import events and exceptions into Calendar, and if those imported events contain EXRULE or EXDATE properties, then Calendar will provide those properties when it sends a <gd:recurrence> element. Note the the use of <gd:recurrenceException> means that you can't be sure just from examining a <gd:recurrence> element whether there are any exceptions to the recurrence description. To ensure that you find all exceptions, look for <gd:recurrenceException> elements in the feed, and use their <gd:originalEvent> elements to match them up with <gd:recurrence> elements. """ _qname = GDATA_TEMPLATE % 'recurrence' class RecurrenceException(atom.core.XmlElement): """The gd:recurrenceException element. Represents an event that's an exception to a recurring event-that is, an instance of a recurring event in which one or more aspects of the recurring event (such as attendance list, time, or location) have been changed. Contains a <gd:originalEvent> element that specifies the original recurring event that this event is an exception to. When you change an instance of a recurring event, that instance becomes an exception. Depending on what change you made to it, the exception behaves in either of two different ways when the original recurring event is changed: - If you add, change, or remove comments, attendees, or attendee responses, then the exception remains tied to the original event, and changes to the original event also change the exception. - If you make any other changes to the exception (such as changing the time or location) then the instance becomes "specialized," which means that it's no longer as tightly tied to the original event. If you change the original event, specialized exceptions don't change. But see below. For example, say you have a meeting every Tuesday and Thursday at 2:00 p.m. If you change the attendance list for this Thursday's meeting (but not for the regularly scheduled meeting), then it becomes an exception. If you change the time for this Thursday's meeting (but not for the regularly scheduled meeting), then it becomes specialized. Regardless of whether an exception is specialized or not, if you do something that deletes the instance that the exception was derived from, then the exception is deleted. Note that changing the day or time of a recurring event deletes all instances, and creates new ones. For example, after you've specialized this Thursday's meeting, say you change the recurring meeting to happen on Monday, Wednesday, and Friday. That change deletes all of the recurring instances of the Tuesday/Thursday meeting, including the specialized one. If a particular instance of a recurring event is deleted, then that instance appears as a <gd:recurrenceException> containing a <gd:entryLink> that has its <gd:eventStatus> set to "http://schemas.google.com/g/2005#event.canceled". (For more information about canceled events, see RFC 2445.) """ _qname = GDATA_TEMPLATE % 'recurrenceException' specialized = 'specialized' entry_link = EntryLink original_event = OriginalEvent class Reminder(atom.core.XmlElement): """The gd:reminder element. A time interval, indicating how long before the containing entity's start time or due time attribute a reminder should be issued. Alternatively, may specify an absolute time at which a reminder should be issued. Also specifies a notification method, indicating what medium the system should use to remind the user. """ _qname = GDATA_TEMPLATE % 'reminder' absolute_time = 'absoluteTime' method = 'method' days = 'days' hours = 'hours' minutes = 'minutes' class Transparency(atom.core.XmlElement): """The gd:transparency element: Extensible enum corresponding to the TRANSP property defined in RFC 244. """ _qname = GDATA_TEMPLATE % 'transparency' value = 'value' class Agent(atom.core.XmlElement): """The gd:agent element. The agent who actually receives the mail. Used in work addresses. Also for 'in care of' or 'c/o'. """ _qname = GDATA_TEMPLATE % 'agent' class HouseName(atom.core.XmlElement): """The gd:housename element. Used in places where houses or buildings have names (and not necessarily numbers), eg. "The Pillars". """ _qname = GDATA_TEMPLATE % 'housename' class Street(atom.core.XmlElement): """The gd:street element. Can be street, avenue, road, etc. This element also includes the house number and room/apartment/flat/floor number. """ _qname = GDATA_TEMPLATE % 'street' class PoBox(atom.core.XmlElement): """The gd:pobox element. Covers actual P.O. boxes, drawers, locked bags, etc. This is usually but not always mutually exclusive with street. """ _qname = GDATA_TEMPLATE % 'pobox' class Neighborhood(atom.core.XmlElement): """The gd:neighborhood element. This is used to disambiguate a street address when a city contains more than one street with the same name, or to specify a small place whose mail is routed through a larger postal town. In China it could be a county or a minor city. """ _qname = GDATA_TEMPLATE % 'neighborhood' class City(atom.core.XmlElement): """The gd:city element. Can be city, village, town, borough, etc. This is the postal town and not necessarily the place of residence or place of business. """ _qname = GDATA_TEMPLATE % 'city' class Subregion(atom.core.XmlElement): """The gd:subregion element. Handles administrative districts such as U.S. or U.K. counties that are not used for mail addressing purposes. Subregion is not intended for delivery addresses. """ _qname = GDATA_TEMPLATE % 'subregion' class Region(atom.core.XmlElement): """The gd:region element. A state, province, county (in Ireland), Land (in Germany), departement (in France), etc. """ _qname = GDATA_TEMPLATE % 'region' class Postcode(atom.core.XmlElement): """The gd:postcode element. Postal code. Usually country-wide, but sometimes specific to the city (e.g. "2" in "Dublin 2, Ireland" addresses). """ _qname = GDATA_TEMPLATE % 'postcode' class Country(atom.core.XmlElement): """The gd:country element. The name or code of the country. """ _qname = GDATA_TEMPLATE % 'country' class FormattedAddress(atom.core.XmlElement): """The gd:formattedAddress element. The full, unstructured postal address. """ _qname = GDATA_TEMPLATE % 'formattedAddress' class StructuredPostalAddress(atom.core.XmlElement): """The gd:structuredPostalAddress element. Postal address split into components. It allows to store the address in locale independent format. The fields can be interpreted and used to generate formatted, locale dependent address. The following elements reperesent parts of the address: agent, house name, street, P.O. box, neighborhood, city, subregion, region, postal code, country. The subregion element is not used for postal addresses, it is provided for extended uses of addresses only. In order to store postal address in an unstructured form formatted address field is provided. """ _qname = GDATA_TEMPLATE % 'structuredPostalAddress' rel = 'rel' mail_class = 'mailClass' usage = 'usage' label = 'label' primary = 'primary' agent = Agent house_name = HouseName street = Street po_box = PoBox neighborhood = Neighborhood city = City subregion = Subregion region = Region postcode = Postcode country = Country formatted_address = FormattedAddress class Where(atom.core.XmlElement): """The gd:where element. A place (such as an event location) associated with the containing entity. The type of the association is determined by the rel attribute; the details of the location are contained in an embedded or linked-to Contact entry. A <gd:where> element is more general than a <gd:geoPt> element. The former identifies a place using a text description and/or a Contact entry, while the latter identifies a place using a specific geographic location. """ _qname = GDATA_TEMPLATE % 'where' label = 'label' rel = 'rel' value = 'valueString' entry_link = EntryLink class AttendeeType(atom.core.XmlElement): """The gd:attendeeType element.""" _qname = GDATA_TEMPLATE % 'attendeeType' value = 'value' class AttendeeStatus(atom.core.XmlElement): """The gd:attendeeStatus element.""" _qname = GDATA_TEMPLATE % 'attendeeStatus' value = 'value' class EventStatus(atom.core.XmlElement): """The gd:eventStatus element.""" _qname = GDATA_TEMPLATE % 'eventStatus' value = 'value' class Visibility(atom.core.XmlElement): """The gd:visibility element.""" _qname = GDATA_TEMPLATE % 'visibility' value = 'value' class Who(atom.core.XmlElement): """The gd:who element. A person associated with the containing entity. The type of the association is determined by the rel attribute; the details about the person are contained in an embedded or linked-to Contact entry. The <gd:who> element can be used to specify email senders and recipients, calendar event organizers, and so on. """ _qname = GDATA_TEMPLATE % 'who' email = 'email' rel = 'rel' value = 'valueString' attendee_status = AttendeeStatus attendee_type = AttendeeType entry_link = EntryLink class Deleted(atom.core.XmlElement): """gd:deleted when present, indicates the containing entry is deleted.""" _qname = GD_TEMPLATE % 'deleted' class Money(atom.core.XmlElement): """Describes money""" _qname = GD_TEMPLATE % 'money' amount = 'amount' currency_code = 'currencyCode' class MediaSource(object): """GData Entries can refer to media sources, so this class provides a place to store references to these objects along with some metadata. """ def __init__(self, file_handle=None, content_type=None, content_length=None, file_path=None, file_name=None): """Creates an object of type MediaSource. Args: file_handle: A file handle pointing to the file to be encapsulated in the MediaSource. content_type: string The MIME type of the file. Required if a file_handle is given. content_length: int The size of the file. Required if a file_handle is given. file_path: string (optional) A full path name to the file. Used in place of a file_handle. file_name: string The name of the file without any path information. Required if a file_handle is given. """ self.file_handle = file_handle self.content_type = content_type self.content_length = content_length self.file_name = file_name if (file_handle is None and content_type is not None and file_path is not None): self.set_file_handle(file_path, content_type) def set_file_handle(self, file_name, content_type): """A helper function which can create a file handle from a given filename and set the content type and length all at once. Args: file_name: string The path and file name to the file containing the media content_type: string A MIME type representing the type of the media """ self.file_handle = open(file_name, 'rb') self.content_type = content_type self.content_length = os.path.getsize(file_name) self.file_name = os.path.basename(file_name) SetFileHandle = set_file_handle def modify_request(self, http_request): http_request.add_body_part(self.file_handle, self.content_type, self.content_length) return http_request ModifyRequest = modify_request
agpl-3.0
darktears/chromium-crosswalk
third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/browser_test_driver_unittest.py
48
2395
# Copyright (C) 2014 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest from webkitpy.common.system.systemhost_mock import MockSystemHost from webkitpy.layout_tests.port import Port, Driver, DriverOutput from webkitpy.layout_tests.port import browser_test, browser_test_driver from webkitpy.layout_tests.port.server_process_mock import MockServerProcess from webkitpy.layout_tests.port.port_testcase import TestWebKitPort from webkitpy.tool.mocktool import MockOptions class BrowserTestDriverTest(unittest.TestCase): def test_read_stdin_path(self): port = TestWebKitPort() driver = browser_test_driver.BrowserTestDriver(port, 0, pixel_tests=True) driver._server_process = MockServerProcess(lines=[ 'StdinPath: /foo/bar', '#EOF']) content_block = driver._read_block(0) self.assertEqual(content_block.stdin_path, '/foo/bar') driver._stdin_directory = None
bsd-3-clause
ABaldwinHunter/django-clone
tests/aggregation/tests.py
17
45691
from __future__ import unicode_literals import datetime import re from decimal import Decimal from django.core.exceptions import FieldError from django.db import connection from django.db.models import ( F, Avg, Count, DecimalField, DurationField, FloatField, Func, IntegerField, Max, Min, Sum, Value, ) from django.test import TestCase from django.test.utils import Approximate, CaptureQueriesContext from django.utils import six, timezone from .models import Author, Book, Publisher, Store class AggregateTestCase(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34) cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35) cls.a3 = Author.objects.create(name='Brad Dayley', age=45) cls.a4 = Author.objects.create(name='James Bennett', age=29) cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37) cls.a6 = Author.objects.create(name='Paul Bissex', age=29) cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25) cls.a8 = Author.objects.create(name='Peter Norvig', age=57) cls.a9 = Author.objects.create(name='Stuart Russell', age=46) cls.a1.friends.add(cls.a2, cls.a4) cls.a2.friends.add(cls.a1, cls.a7) cls.a4.friends.add(cls.a1) cls.a5.friends.add(cls.a6, cls.a7) cls.a6.friends.add(cls.a5, cls.a7) cls.a7.friends.add(cls.a2, cls.a5, cls.a6) cls.a8.friends.add(cls.a9) cls.a9.friends.add(cls.a8) cls.p1 = Publisher.objects.create(name='Apress', num_awards=3, duration=datetime.timedelta(days=1)) cls.p2 = Publisher.objects.create(name='Sams', num_awards=1, duration=datetime.timedelta(days=2)) cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7) cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9) cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0) cls.b1 = Book.objects.create( isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right', pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1, pubdate=datetime.date(2007, 12, 6) ) cls.b2 = Book.objects.create( isbn='067232959', name='Sams Teach Yourself Django in 24 Hours', pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2, pubdate=datetime.date(2008, 3, 3) ) cls.b3 = Book.objects.create( isbn='159059996', name='Practical Django Projects', pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1, pubdate=datetime.date(2008, 6, 23) ) cls.b4 = Book.objects.create( isbn='013235613', name='Python Web Development with Django', pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3, pubdate=datetime.date(2008, 11, 3) ) cls.b5 = Book.objects.create( isbn='013790395', name='Artificial Intelligence: A Modern Approach', pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3, pubdate=datetime.date(1995, 1, 15) ) cls.b6 = Book.objects.create( isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4, pubdate=datetime.date(1991, 10, 15) ) cls.b1.authors.add(cls.a1, cls.a2) cls.b2.authors.add(cls.a3) cls.b3.authors.add(cls.a4) cls.b4.authors.add(cls.a5, cls.a6, cls.a7) cls.b5.authors.add(cls.a8, cls.a9) cls.b6.authors.add(cls.a8) s1 = Store.objects.create( name='Amazon.com', original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42), friday_night_closing=datetime.time(23, 59, 59) ) s2 = Store.objects.create( name='Books.com', original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37), friday_night_closing=datetime.time(23, 59, 59) ) s3 = Store.objects.create( name="Mamma and Pappa's Books", original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14), friday_night_closing=datetime.time(21, 30) ) s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6) s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6) s3.books.add(cls.b3, cls.b4, cls.b6) def test_empty_aggregate(self): self.assertEqual(Author.objects.all().aggregate(), {}) def test_aggregate_in_order_by(self): msg = ( 'Using an aggregate in order_by() without also including it in ' 'annotate() is not allowed: Avg(F(book__rating)' ) with self.assertRaisesMessage(FieldError, msg): Author.objects.values('age').order_by(Avg('book__rating')) def test_single_aggregate(self): vals = Author.objects.aggregate(Avg("age")) self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)}) def test_multiple_aggregates(self): vals = Author.objects.aggregate(Sum("age"), Avg("age")) self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)}) def test_filter_aggregate(self): vals = Author.objects.filter(age__gt=29).aggregate(Sum("age")) self.assertEqual(len(vals), 1) self.assertEqual(vals["age__sum"], 254) def test_related_aggregate(self): vals = Author.objects.aggregate(Avg("friends__age")) self.assertEqual(len(vals), 1) self.assertAlmostEqual(vals["friends__age__avg"], 34.07, places=2) vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age")) self.assertEqual(len(vals), 1) self.assertAlmostEqual(vals["authors__age__avg"], 38.2857, places=2) vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating")) self.assertEqual(len(vals), 1) self.assertEqual(vals["book__rating__avg"], 4.0) vals = Book.objects.aggregate(Sum("publisher__num_awards")) self.assertEqual(len(vals), 1) self.assertEqual(vals["publisher__num_awards__sum"], 30) vals = Publisher.objects.aggregate(Sum("book__price")) self.assertEqual(len(vals), 1) self.assertEqual(vals["book__price__sum"], Decimal("270.27")) def test_aggregate_multi_join(self): vals = Store.objects.aggregate(Max("books__authors__age")) self.assertEqual(len(vals), 1) self.assertEqual(vals["books__authors__age__max"], 57) vals = Author.objects.aggregate(Min("book__publisher__num_awards")) self.assertEqual(len(vals), 1) self.assertEqual(vals["book__publisher__num_awards__min"], 1) def test_aggregate_alias(self): vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating")) self.assertEqual(len(vals), 1) self.assertAlmostEqual(vals["amazon_mean"], 4.08, places=2) def test_annotate_basic(self): self.assertQuerysetEqual( Book.objects.annotate().order_by('pk'), [ "The Definitive Guide to Django: Web Development Done Right", "Sams Teach Yourself Django in 24 Hours", "Practical Django Projects", "Python Web Development with Django", "Artificial Intelligence: A Modern Approach", "Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp" ], lambda b: b.name ) books = Book.objects.annotate(mean_age=Avg("authors__age")) b = books.get(pk=self.b1.pk) self.assertEqual( b.name, 'The Definitive Guide to Django: Web Development Done Right' ) self.assertEqual(b.mean_age, 34.5) def test_annotate_defer(self): qs = Book.objects.annotate( page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk) rows = [ (1, "159059725", 447, "The Definitive Guide to Django: Web Development Done Right") ] self.assertQuerysetEqual( qs.order_by('pk'), rows, lambda r: (r.id, r.isbn, r.page_sum, r.name) ) def test_annotate_defer_select_related(self): qs = Book.objects.select_related('contact').annotate( page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk) rows = [ (1, "159059725", 447, "Adrian Holovaty", "The Definitive Guide to Django: Web Development Done Right") ] self.assertQuerysetEqual( qs.order_by('pk'), rows, lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name) ) def test_annotate_m2m(self): books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name") self.assertQuerysetEqual( books, [ ('Artificial Intelligence: A Modern Approach', 51.5), ('Practical Django Projects', 29.0), ('Python Web Development with Django', Approximate(30.3, places=1)), ('Sams Teach Yourself Django in 24 Hours', 45.0) ], lambda b: (b.name, b.authors__age__avg), ) books = Book.objects.annotate(num_authors=Count("authors")).order_by("name") self.assertQuerysetEqual( books, [ ('Artificial Intelligence: A Modern Approach', 2), ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1), ('Practical Django Projects', 1), ('Python Web Development with Django', 3), ('Sams Teach Yourself Django in 24 Hours', 1), ('The Definitive Guide to Django: Web Development Done Right', 2) ], lambda b: (b.name, b.num_authors) ) def test_backwards_m2m_annotate(self): authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name") self.assertQuerysetEqual( authors, [ ('Adrian Holovaty', 4.5), ('Brad Dayley', 3.0), ('Jacob Kaplan-Moss', 4.5), ('James Bennett', 4.0), ('Paul Bissex', 4.0), ('Stuart Russell', 4.0) ], lambda a: (a.name, a.book__rating__avg) ) authors = Author.objects.annotate(num_books=Count("book")).order_by("name") self.assertQuerysetEqual( authors, [ ('Adrian Holovaty', 1), ('Brad Dayley', 1), ('Jacob Kaplan-Moss', 1), ('James Bennett', 1), ('Jeffrey Forcier', 1), ('Paul Bissex', 1), ('Peter Norvig', 2), ('Stuart Russell', 1), ('Wesley J. Chun', 1) ], lambda a: (a.name, a.num_books) ) def test_reverse_fkey_annotate(self): books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name") self.assertQuerysetEqual( books, [ ('Artificial Intelligence: A Modern Approach', 7), ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9), ('Practical Django Projects', 3), ('Python Web Development with Django', 7), ('Sams Teach Yourself Django in 24 Hours', 1), ('The Definitive Guide to Django: Web Development Done Right', 3) ], lambda b: (b.name, b.publisher__num_awards__sum) ) publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name") self.assertQuerysetEqual( publishers, [ ('Apress', Decimal("59.69")), ("Jonno's House of Books", None), ('Morgan Kaufmann', Decimal("75.00")), ('Prentice Hall', Decimal("112.49")), ('Sams', Decimal("23.09")) ], lambda p: (p.name, p.book__price__sum) ) def test_annotate_values(self): books = list(Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values()) self.assertEqual( books, [ { "contact_id": 1, "id": 1, "isbn": "159059725", "mean_age": 34.5, "name": "The Definitive Guide to Django: Web Development Done Right", "pages": 447, "price": Approximate(Decimal("30")), "pubdate": datetime.date(2007, 12, 6), "publisher_id": 1, "rating": 4.5, } ] ) books = ( Book.objects .filter(pk=self.b1.pk) .annotate(mean_age=Avg('authors__age')) .values('pk', 'isbn', 'mean_age') ) self.assertEqual( list(books), [ { "pk": 1, "isbn": "159059725", "mean_age": 34.5, } ] ) books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values("name") self.assertEqual( list(books), [ { "name": "The Definitive Guide to Django: Web Development Done Right" } ] ) books = Book.objects.filter(pk=self.b1.pk).values().annotate(mean_age=Avg('authors__age')) self.assertEqual( list(books), [ { "contact_id": 1, "id": 1, "isbn": "159059725", "mean_age": 34.5, "name": "The Definitive Guide to Django: Web Development Done Right", "pages": 447, "price": Approximate(Decimal("30")), "pubdate": datetime.date(2007, 12, 6), "publisher_id": 1, "rating": 4.5, } ] ) books = ( Book.objects .values("rating") .annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age")) .order_by("rating") ) self.assertEqual( list(books), [ { "rating": 3.0, "n_authors": 1, "mean_age": 45.0, }, { "rating": 4.0, "n_authors": 6, "mean_age": Approximate(37.16, places=1) }, { "rating": 4.5, "n_authors": 2, "mean_age": 34.5, }, { "rating": 5.0, "n_authors": 1, "mean_age": 57.0, } ] ) authors = Author.objects.annotate(Avg("friends__age")).order_by("name") self.assertEqual(len(authors), 9) self.assertQuerysetEqual( authors, [ ('Adrian Holovaty', 32.0), ('Brad Dayley', None), ('Jacob Kaplan-Moss', 29.5), ('James Bennett', 34.0), ('Jeffrey Forcier', 27.0), ('Paul Bissex', 31.0), ('Peter Norvig', 46.0), ('Stuart Russell', 57.0), ('Wesley J. Chun', Approximate(33.66, places=1)) ], lambda a: (a.name, a.friends__age__avg) ) def test_count(self): vals = Book.objects.aggregate(Count("rating")) self.assertEqual(vals, {"rating__count": 6}) vals = Book.objects.aggregate(Count("rating", distinct=True)) self.assertEqual(vals, {"rating__count": 4}) def test_count_star(self): with self.assertNumQueries(1) as ctx: Book.objects.aggregate(n=Count("*")) sql = ctx.captured_queries[0]['sql'] self.assertIn('SELECT COUNT(*) ', sql) def test_non_grouped_annotation_not_in_group_by(self): """ An annotation not included in values() before an aggregate should be excluded from the group by clause. """ qs = ( Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating') .annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count') ) self.assertEqual( list(qs), [ {'rating': 4.0, 'count': 2}, ] ) def test_grouped_annotation_in_group_by(self): """ An annotation included in values() before an aggregate should be included in the group by clause. """ qs = ( Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating', 'xprice') .annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count') ) self.assertEqual( list(qs), [ {'rating': 4.0, 'count': 1}, {'rating': 4.0, 'count': 2}, ] ) def test_fkey_aggregate(self): explicit = list(Author.objects.annotate(Count('book__id'))) implicit = list(Author.objects.annotate(Count('book'))) self.assertEqual(explicit, implicit) def test_annotate_ordering(self): books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating') self.assertEqual( list(books), [ { "rating": 4.5, "oldest": 35, }, { "rating": 3.0, "oldest": 45 }, { "rating": 4.0, "oldest": 57, }, { "rating": 5.0, "oldest": 57, } ] ) books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating") self.assertEqual( list(books), [ { "rating": 5.0, "oldest": 57, }, { "rating": 4.0, "oldest": 57, }, { "rating": 3.0, "oldest": 45, }, { "rating": 4.5, "oldest": 35, } ] ) def test_aggregate_annotation(self): vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors")) self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)}) def test_avg_duration_field(self): self.assertEqual( Publisher.objects.aggregate(Avg('duration', output_field=DurationField())), {'duration__avg': datetime.timedelta(days=1, hours=12)} ) def test_sum_duration_field(self): self.assertEqual( Publisher.objects.aggregate(Sum('duration', output_field=DurationField())), {'duration__sum': datetime.timedelta(days=3)} ) def test_sum_distinct_aggregate(self): """ Sum on a distinct() QuerySet should aggregate only the distinct items. """ authors = Author.objects.filter(book__in=[5, 6]) self.assertEqual(authors.count(), 3) distinct_authors = authors.distinct() self.assertEqual(distinct_authors.count(), 2) # Selected author ages are 57 and 46 age_sum = distinct_authors.aggregate(Sum('age')) self.assertEqual(age_sum['age__sum'], 103) def test_filtering(self): p = Publisher.objects.create(name='Expensive Publisher', num_awards=0) Book.objects.create( name='ExpensiveBook1', pages=1, isbn='111', rating=3.5, price=Decimal("1000"), publisher=p, contact_id=1, pubdate=datetime.date(2008, 12, 1) ) Book.objects.create( name='ExpensiveBook2', pages=1, isbn='222', rating=4.0, price=Decimal("1000"), publisher=p, contact_id=1, pubdate=datetime.date(2008, 12, 2) ) Book.objects.create( name='ExpensiveBook3', pages=1, isbn='333', rating=4.5, price=Decimal("35"), publisher=p, contact_id=1, pubdate=datetime.date(2008, 12, 3) ) publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk") self.assertQuerysetEqual( publishers, [ "Apress", "Prentice Hall", "Expensive Publisher", ], lambda p: p.name, ) publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk") self.assertQuerysetEqual( publishers, [ "Apress", "Apress", "Sams", "Prentice Hall", "Expensive Publisher", ], lambda p: p.name ) publishers = ( Publisher.objects .annotate(num_books=Count("book__id")) .filter(num_books__gt=1, book__price__lt=Decimal("40.0")) .order_by("pk") ) self.assertQuerysetEqual( publishers, [ "Apress", "Prentice Hall", "Expensive Publisher", ], lambda p: p.name, ) publishers = ( Publisher.objects .filter(book__price__lt=Decimal("40.0")) .annotate(num_books=Count("book__id")) .filter(num_books__gt=1) .order_by("pk") ) self.assertQuerysetEqual( publishers, [ "Apress", ], lambda p: p.name ) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk") self.assertQuerysetEqual( publishers, [ "Apress", "Sams", "Prentice Hall", "Morgan Kaufmann", "Expensive Publisher", ], lambda p: p.name ) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk") self.assertQuerysetEqual( publishers, [ "Apress", "Sams", "Prentice Hall", "Morgan Kaufmann", ], lambda p: p.name ) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk") self.assertQuerysetEqual( publishers, [ "Sams", "Morgan Kaufmann", "Expensive Publisher", ], lambda p: p.name, ) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True) self.assertEqual(len(publishers), 0) def test_annotation(self): vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id")) self.assertEqual(vals, {"friends__id__count": 2}) books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__exact=2).order_by("pk") self.assertQuerysetEqual( books, [ "The Definitive Guide to Django: Web Development Done Right", "Artificial Intelligence: A Modern Approach", ], lambda b: b.name ) authors = ( Author.objects .annotate(num_friends=Count("friends__id", distinct=True)) .filter(num_friends=0) .order_by("pk") ) self.assertQuerysetEqual( authors, [ "Brad Dayley", ], lambda a: a.name ) publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk") self.assertQuerysetEqual( publishers, [ "Apress", "Prentice Hall", ], lambda p: p.name ) publishers = ( Publisher.objects .filter(book__price__lt=Decimal("40.0")) .annotate(num_books=Count("book__id")) .filter(num_books__gt=1) ) self.assertQuerysetEqual( publishers, [ "Apress", ], lambda p: p.name ) books = ( Book.objects .annotate(num_authors=Count("authors__id")) .filter(authors__name__contains="Norvig", num_authors__gt=1) ) self.assertQuerysetEqual( books, [ "Artificial Intelligence: A Modern Approach", ], lambda b: b.name ) def test_more_aggregation(self): a = Author.objects.get(name__contains='Norvig') b = Book.objects.get(name__contains='Done Right') b.authors.add(a) b.save() vals = ( Book.objects .annotate(num_authors=Count("authors__id")) .filter(authors__name__contains="Norvig", num_authors__gt=1) .aggregate(Avg("rating")) ) self.assertEqual(vals, {"rating__avg": 4.25}) def test_even_more_aggregate(self): publishers = Publisher.objects.annotate( earliest_book=Min("book__pubdate"), ).exclude(earliest_book=None).order_by("earliest_book").values( 'earliest_book', 'num_awards', 'id', 'name', ) self.assertEqual( list(publishers), [ { 'earliest_book': datetime.date(1991, 10, 15), 'num_awards': 9, 'id': 4, 'name': 'Morgan Kaufmann' }, { 'earliest_book': datetime.date(1995, 1, 15), 'num_awards': 7, 'id': 3, 'name': 'Prentice Hall' }, { 'earliest_book': datetime.date(2007, 12, 6), 'num_awards': 3, 'id': 1, 'name': 'Apress' }, { 'earliest_book': datetime.date(2008, 3, 3), 'num_awards': 1, 'id': 2, 'name': 'Sams' } ] ) vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening")) self.assertEqual( vals, { "friday_night_closing__max": datetime.time(23, 59, 59), "original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14), } ) def test_annotate_values_list(self): books = ( Book.objects .filter(pk=self.b1.pk) .annotate(mean_age=Avg("authors__age")) .values_list("pk", "isbn", "mean_age") ) self.assertEqual( list(books), [ (1, "159059725", 34.5), ] ) books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("isbn") self.assertEqual( list(books), [ ('159059725',) ] ) books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("mean_age") self.assertEqual( list(books), [ (34.5,) ] ) books = ( Book.objects .filter(pk=self.b1.pk) .annotate(mean_age=Avg("authors__age")) .values_list("mean_age", flat=True) ) self.assertEqual(list(books), [34.5]) books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price") self.assertEqual( list(books), [ (Decimal("29.69"), 2), (Decimal('23.09'), 1), (Decimal('30'), 1), (Decimal('75'), 1), (Decimal('82.8'), 1), ] ) def test_dates_with_aggregation(self): """ Test that .dates() returns a distinct set of dates when applied to a QuerySet with aggregation. Refs #18056. Previously, .dates() would return distinct (date_kind, aggregation) sets, in this case (year, num_authors), so 2008 would be returned twice because there are books from 2008 with a different number of authors. """ dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year') self.assertQuerysetEqual( dates, [ "datetime.date(1991, 1, 1)", "datetime.date(1995, 1, 1)", "datetime.date(2007, 1, 1)", "datetime.date(2008, 1, 1)" ] ) def test_values_aggregation(self): # Refs #20782 max_rating = Book.objects.values('rating').aggregate(max_rating=Max('rating')) self.assertEqual(max_rating['max_rating'], 5) max_books_per_rating = Book.objects.values('rating').annotate( books_per_rating=Count('id') ).aggregate(Max('books_per_rating')) self.assertEqual( max_books_per_rating, {'books_per_rating__max': 3}) def test_ticket17424(self): """ Check that doing exclude() on a foreign model after annotate() doesn't crash. """ all_books = list(Book.objects.values_list('pk', flat=True).order_by('pk')) annotated_books = Book.objects.order_by('pk').annotate(one=Count("id")) # The value doesn't matter, we just need any negative # constraint on a related model that's a noop. excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__") # Try to generate query tree str(excluded_books.query) self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk) # Check internal state self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type) self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type) def test_ticket12886(self): """ Check that aggregation over sliced queryset works correctly. """ qs = Book.objects.all().order_by('-rating')[0:3] vals = qs.aggregate(average_top3_rating=Avg('rating'))['average_top3_rating'] self.assertAlmostEqual(vals, 4.5, places=2) def test_ticket11881(self): """ Check that subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE or select_related() stuff. """ qs = Book.objects.all().select_for_update().order_by( 'pk').select_related('publisher').annotate(max_pk=Max('pk')) with CaptureQueriesContext(connection) as captured_queries: qs.aggregate(avg_pk=Avg('max_pk')) self.assertEqual(len(captured_queries), 1) qstr = captured_queries[0]['sql'].lower() self.assertNotIn('for update', qstr) forced_ordering = connection.ops.force_no_ordering() if forced_ordering: # If the backend needs to force an ordering we make sure it's # the only "ORDER BY" clause present in the query. self.assertEqual( re.findall(r'order by (\w+)', qstr), [', '.join(f[1][0] for f in forced_ordering).lower()] ) else: self.assertNotIn('order by', qstr) self.assertEqual(qstr.count(' join '), 0) def test_decimal_max_digits_has_no_effect(self): Book.objects.all().delete() a1 = Author.objects.first() p1 = Publisher.objects.first() thedate = timezone.now() for i in range(10): Book.objects.create( isbn="abcde{}".format(i), name="none", pages=10, rating=4.0, price=9999.98, contact=a1, publisher=p1, pubdate=thedate) book = Book.objects.aggregate(price_sum=Sum('price')) self.assertEqual(book['price_sum'], Decimal("99999.80")) def test_nonaggregate_aggregation_throws(self): with six.assertRaisesRegex(self, TypeError, 'fail is not an aggregate expression'): Book.objects.aggregate(fail=F('price')) def test_nonfield_annotation(self): book = Book.objects.annotate(val=Max(Value(2, output_field=IntegerField()))).first() self.assertEqual(book.val, 2) book = Book.objects.annotate(val=Max(Value(2), output_field=IntegerField())).first() self.assertEqual(book.val, 2) book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first() self.assertEqual(book.val, 2) def test_missing_output_field_raises_error(self): with six.assertRaisesRegex(self, FieldError, 'Cannot resolve expression type, unknown output_field'): Book.objects.annotate(val=Max(2)).first() def test_annotation_expressions(self): authors = Author.objects.annotate(combined_ages=Sum(F('age') + F('friends__age'))).order_by('name') authors2 = Author.objects.annotate(combined_ages=Sum('age') + Sum('friends__age')).order_by('name') for qs in (authors, authors2): self.assertEqual(len(qs), 9) self.assertQuerysetEqual( qs, [ ('Adrian Holovaty', 132), ('Brad Dayley', None), ('Jacob Kaplan-Moss', 129), ('James Bennett', 63), ('Jeffrey Forcier', 128), ('Paul Bissex', 120), ('Peter Norvig', 103), ('Stuart Russell', 103), ('Wesley J. Chun', 176) ], lambda a: (a.name, a.combined_ages) ) def test_aggregation_expressions(self): a1 = Author.objects.aggregate(av_age=Sum('age') / Count('*')) a2 = Author.objects.aggregate(av_age=Sum('age') / Count('age')) a3 = Author.objects.aggregate(av_age=Avg('age')) self.assertEqual(a1, {'av_age': 37}) self.assertEqual(a2, {'av_age': 37}) self.assertEqual(a3, {'av_age': Approximate(37.4, places=1)}) def test_avg_decimal_field(self): v = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price')))['avg_price'] self.assertIsInstance(v, float) self.assertEqual(v, Approximate(47.39, places=2)) def test_order_of_precedence(self): p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price') + 2) * 3) self.assertEqual(p1, {'avg_price': Approximate(148.18, places=2)}) p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg('price') + 2 * 3) self.assertEqual(p2, {'avg_price': Approximate(53.39, places=2)}) def test_combine_different_types(self): with six.assertRaisesRegex(self, FieldError, 'Expression contains mixed types. You must set output_field'): Book.objects.annotate(sums=Sum('rating') + Sum('pages') + Sum('price')).get(pk=self.b4.pk) b1 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'), output_field=IntegerField())).get(pk=self.b4.pk) self.assertEqual(b1.sums, 383) b2 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'), output_field=FloatField())).get(pk=self.b4.pk) self.assertEqual(b2.sums, 383.69) b3 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'), output_field=DecimalField())).get(pk=self.b4.pk) self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2)) def test_complex_aggregations_require_kwarg(self): with six.assertRaisesRegex(self, TypeError, 'Complex annotations require an alias'): Author.objects.annotate(Sum(F('age') + F('friends__age'))) with six.assertRaisesRegex(self, TypeError, 'Complex aggregates require an alias'): Author.objects.aggregate(Sum('age') / Count('age')) with six.assertRaisesRegex(self, TypeError, 'Complex aggregates require an alias'): Author.objects.aggregate(Sum(1)) def test_aggregate_over_complex_annotation(self): qs = Author.objects.annotate( combined_ages=Sum(F('age') + F('friends__age'))) age = qs.aggregate(max_combined_age=Max('combined_ages')) self.assertEqual(age['max_combined_age'], 176) age = qs.aggregate(max_combined_age_doubled=Max('combined_ages') * 2) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) age = qs.aggregate( max_combined_age_doubled=Max('combined_ages') + Max('combined_ages')) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) age = qs.aggregate( max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'), sum_combined_age=Sum('combined_ages')) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) self.assertEqual(age['sum_combined_age'], 954) age = qs.aggregate( max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'), sum_combined_age_doubled=Sum('combined_ages') + Sum('combined_ages')) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) self.assertEqual(age['sum_combined_age_doubled'], 954 * 2) def test_values_annotation_with_expression(self): # ensure the F() is promoted to the group by clause qs = Author.objects.values('name').annotate(another_age=Sum('age') + F('age')) a = qs.get(name="Adrian Holovaty") self.assertEqual(a['another_age'], 68) qs = qs.annotate(friend_count=Count('friends')) a = qs.get(name="Adrian Holovaty") self.assertEqual(a['friend_count'], 2) qs = qs.annotate(combined_age=Sum('age') + F('friends__age')).filter( name="Adrian Holovaty").order_by('-combined_age') self.assertEqual( list(qs), [ { "name": 'Adrian Holovaty', "another_age": 68, "friend_count": 1, "combined_age": 69 }, { "name": 'Adrian Holovaty', "another_age": 68, "friend_count": 1, "combined_age": 63 } ] ) vals = qs.values('name', 'combined_age') self.assertEqual( list(vals), [ { "name": 'Adrian Holovaty', "combined_age": 69 }, { "name": 'Adrian Holovaty', "combined_age": 63 } ] ) def test_annotate_values_aggregate(self): alias_age = Author.objects.annotate( age_alias=F('age') ).values( 'age_alias', ).aggregate(sum_age=Sum('age_alias')) age = Author.objects.values('age').aggregate(sum_age=Sum('age')) self.assertEqual(alias_age['sum_age'], age['sum_age']) def test_annotate_over_annotate(self): author = Author.objects.annotate( age_alias=F('age') ).annotate( sum_age=Sum('age_alias') ).get(name="Adrian Holovaty") other_author = Author.objects.annotate( sum_age=Sum('age') ).get(name="Adrian Holovaty") self.assertEqual(author.sum_age, other_author.sum_age) def test_annotated_aggregate_over_annotated_aggregate(self): with self.assertRaisesMessage(FieldError, "Cannot compute Sum('id__max'): 'id__max' is an aggregate"): Book.objects.annotate(Max('id')).annotate(Sum('id__max')) class MyMax(Max): def as_sql(self, compiler, connection): self.set_source_expressions(self.get_source_expressions()[0:1]) return super(MyMax, self).as_sql(compiler, connection) with self.assertRaisesMessage(FieldError, "Cannot compute Max('id__max'): 'id__max' is an aggregate"): Book.objects.annotate(Max('id')).annotate(my_max=MyMax('id__max', 'price')) def test_multi_arg_aggregate(self): class MyMax(Max): def as_sql(self, compiler, connection): self.set_source_expressions(self.get_source_expressions()[0:1]) return super(MyMax, self).as_sql(compiler, connection) with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'): Book.objects.aggregate(MyMax('pages', 'price')) with self.assertRaisesMessage(TypeError, 'Complex annotations require an alias'): Book.objects.annotate(MyMax('pages', 'price')) Book.objects.aggregate(max_field=MyMax('pages', 'price')) def test_add_implementation(self): class MySum(Sum): pass # test completely changing how the output is rendered def lower_case_function_override(self, compiler, connection): sql, params = compiler.compile(self.source_expressions[0]) substitutions = dict(function=self.function.lower(), expressions=sql) substitutions.update(self.extra) return self.template % substitutions, params setattr(MySum, 'as_' + connection.vendor, lower_case_function_override) qs = Book.objects.annotate( sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField()) ) self.assertEqual(str(qs.query).count('sum('), 1) b1 = qs.get(pk=self.b4.pk) self.assertEqual(b1.sums, 383) # test changing the dict and delegating def lower_case_function_super(self, compiler, connection): self.extra['function'] = self.function.lower() return super(MySum, self).as_sql(compiler, connection) setattr(MySum, 'as_' + connection.vendor, lower_case_function_super) qs = Book.objects.annotate( sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField()) ) self.assertEqual(str(qs.query).count('sum('), 1) b1 = qs.get(pk=self.b4.pk) self.assertEqual(b1.sums, 383) # test overriding all parts of the template def be_evil(self, compiler, connection): substitutions = dict(function='MAX', expressions='2') substitutions.update(self.extra) return self.template % substitutions, () setattr(MySum, 'as_' + connection.vendor, be_evil) qs = Book.objects.annotate( sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField()) ) self.assertEqual(str(qs.query).count('MAX('), 1) b1 = qs.get(pk=self.b4.pk) self.assertEqual(b1.sums, 2) def test_complex_values_aggregation(self): max_rating = Book.objects.values('rating').aggregate( double_max_rating=Max('rating') + Max('rating')) self.assertEqual(max_rating['double_max_rating'], 5 * 2) max_books_per_rating = Book.objects.values('rating').annotate( books_per_rating=Count('id') + 5 ).aggregate(Max('books_per_rating')) self.assertEqual( max_books_per_rating, {'books_per_rating__max': 3 + 5}) def test_expression_on_aggregation(self): # Create a plain expression class Greatest(Func): function = 'GREATEST' def as_sqlite(self, compiler, connection): return super(Greatest, self).as_sql(compiler, connection, function='MAX') qs = Publisher.objects.annotate( price_or_median=Greatest(Avg('book__rating'), Avg('book__price')) ).filter(price_or_median__gte=F('num_awards')).order_by('num_awards') self.assertQuerysetEqual( qs, [1, 3, 7, 9], lambda v: v.num_awards) qs2 = Publisher.objects.annotate( rating_or_num_awards=Greatest(Avg('book__rating'), F('num_awards'), output_field=FloatField()) ).filter(rating_or_num_awards__gt=F('num_awards')).order_by('num_awards') self.assertQuerysetEqual( qs2, [1, 3], lambda v: v.num_awards)
bsd-3-clause
lkhomenk/integration_tests
cfme/tests/cloud/test_providers.py
2
24565
# -*- coding: utf-8 -*- # pylint: disable=E1101 # pylint: disable=W0621 import uuid import fauxfactory import pytest from widgetastic.exceptions import MoveTargetOutOfBoundsException from cfme import test_requirements from cfme.base.credential import Credential from cfme.cloud.instance import Instance from cfme.cloud.provider import CloudProvider from cfme.cloud.provider.azure import AzureProvider from cfme.cloud.provider.ec2 import EC2Provider from cfme.cloud.provider.gce import GCEProvider from cfme.cloud.provider.openstack import OpenStackProvider, RHOSEndpoint from cfme.common.provider_views import ( CloudProviderAddView, CloudProvidersView, CloudProvidersDiscoverView) from cfme.rest.gen_data import _creating_skeleton as creating_skeleton from cfme.rest.gen_data import arbitration_profiles as _arbitration_profiles from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.rest import ( assert_response, delete_resources_from_collection, delete_resources_from_detail, ) from cfme.utils.update import update from cfme.fixtures.provider import enable_provider_regions from cfme.fixtures.pytest_store import store pytestmark = [pytest.mark.provider([CloudProvider], scope="function")] @pytest.fixture(scope='function') def enable_regions(provider): enable_provider_regions(provider) @pytest.mark.tier(3) @test_requirements.discovery @pytest.mark.uncollectif(lambda: store.current_appliance.version >= '5.9', reason='no more support for cloud provider discovery') def test_empty_discovery_form_validation_cloud(appliance): """ Tests that the flash message is correct when discovery form is empty.""" collection = appliance.collections.cloud_providers collection.discover(None, AzureProvider) view = appliance.browser.create_view(CloudProvidersDiscoverView) view.flash.assert_message('Client ID, Client Key, Azure Tenant ID and ' 'Subscription ID are required') @pytest.mark.tier(3) @test_requirements.discovery @pytest.mark.uncollectif(lambda: store.current_appliance.version >= '5.9', reason='no more support for cloud provider discovery') def test_discovery_cancelled_validation_cloud(appliance): """ Tests that the flash message is correct when discovery is cancelled.""" collection = appliance.collections.cloud_providers collection.discover(None, AzureProvider, cancel=True) view = appliance.browser.create_view(CloudProvidersView) view.flash.assert_success_message('Cloud Providers Discovery was cancelled by the user') @pytest.mark.tier(3) @test_requirements.discovery def test_add_cancelled_validation_cloud(request, appliance): """Tests that the flash message is correct when add is cancelled.""" collection = appliance.collections.cloud_providers prov = collection.instantiate(prov_class=EC2Provider) request.addfinalizer(prov.delete_if_exists) try: prov.create(cancel=True) except MoveTargetOutOfBoundsException: # TODO: Remove once fixed 1475303 prov.create(cancel=True) view = prov.browser.create_view(CloudProvidersView) view.flash.assert_success_message('Add of Cloud Provider was cancelled by the user') @pytest.mark.tier(3) @pytest.mark.uncollectif(lambda: store.current_appliance.version >= '5.9', reason='no more support for cloud provider discovery') def test_discovery_password_mismatch_validation_cloud(appliance): cred = Credential( principal=fauxfactory.gen_alphanumeric(5), secret=fauxfactory.gen_alphanumeric(5), verify_secret=fauxfactory.gen_alphanumeric(7)) collection = appliance.collections.cloud_providers collection.discover(cred, EC2Provider) view = appliance.browser.create_view(CloudProvidersView) view.flash.assert_message('Password/Verify Password do not match') @pytest.mark.tier(3) @pytest.mark.uncollect() @pytest.mark.usefixtures('has_no_cloud_providers') @test_requirements.discovery def test_providers_discovery_amazon(appliance): # This test was being uncollected anyway, and needs to be parametrized and not directory call # out to specific credential keys # amazon_creds = get_credentials_from_config('cloudqe_amazon') # discover(amazon_creds, EC2Provider) collection = appliance.collections.cloud_providers view = appliance.browser.create_view(CloudProvidersView) view.flash.assert_success_message('Amazon Cloud Providers: Discovery successfully initiated') collection.wait_for_new_provider() @pytest.mark.uncollectif(lambda provider: (store.current_appliance.version >= '5.9' or not(provider.one_of(AzureProvider) or provider.one_of(EC2Provider))), reason='no more support for cloud provider discovery') @test_requirements.discovery @pytest.mark.tier(1) def test_providers_discovery(request, appliance, provider): """Tests provider discovery Metadata: test_flag: crud """ if provider.one_of(AzureProvider): cred = Credential( principal=provider.default_endpoint.credentials.principal, secret=provider.default_endpoint.credentials.secret, tenant_id=provider.data['tenant_id'], subscription_id=provider.data['subscription_id']) elif provider.one_of(EC2Provider): cred = Credential( principal=provider.default_endpoint.credentials.principal, secret=provider.default_endpoint.credentials.secret, verify_secret=provider.default_endpoint.credentials.secret) collection = appliance.collections.cloud_providers collection.discover(cred, provider) view = provider.create_view(CloudProvidersView) view.flash.assert_success_message('Cloud Providers: Discovery successfully initiated') request.addfinalizer(CloudProvider.clear_providers) collection.wait_for_new_provider() @pytest.mark.tier(3) @pytest.mark.usefixtures('has_no_cloud_providers') @test_requirements.discovery def test_cloud_provider_add_with_bad_credentials(provider, enable_regions): """ Tests provider add with bad credentials Metadata: test_flag: crud """ default_credentials = provider.default_endpoint.credentials # default settings flash = 'Login failed due to a bad username or password.' default_credentials.principal = "bad" default_credentials.secret = 'notyourday' if provider.one_of(AzureProvider): flash = ( "Credential validation was not successful: Incorrect credentials - " "check your Azure Client ID and Client Key" ) default_credentials.principal = str(uuid.uuid4()) default_credentials.secret = 'notyourday' elif provider.one_of(GCEProvider): flash = 'Credential validation was not successful: Invalid Google JSON key' default_credentials.service_account = '{"test": "bad"}' elif provider.one_of(OpenStackProvider): for endp_name in provider.endpoints.keys(): if endp_name != 'default': del provider.endpoints[endp_name] with pytest.raises(Exception, match=flash): provider.create(validate_credentials=True) @pytest.mark.tier(1) @pytest.mark.smoke @pytest.mark.usefixtures('has_no_cloud_providers') @test_requirements.discovery def test_cloud_provider_crud(provider, enable_regions): """ Tests provider add with good credentials Metadata: test_flag: crud """ provider.create() provider.validate_stats(ui=True) old_name = provider.name with update(provider): provider.name = str(uuid.uuid4()) # random uuid with update(provider): provider.name = old_name # old name provider.delete(cancel=False) provider.wait_for_delete() @pytest.mark.tier(3) @test_requirements.discovery def test_type_required_validation_cloud(request, appliance): """Test to validate type while adding a provider""" collection = appliance.collections.cloud_providers view = navigate_to(collection, 'Add') view.fill({'name': 'foo'}) assert not view.add.active @pytest.mark.tier(3) @test_requirements.discovery def test_name_required_validation_cloud(request, appliance): """Tests to validate the name while adding a provider""" collection = appliance.collections.cloud_providers prov = collection.instantiate(prov_class=EC2Provider, name=None, region='US East (Northern Virginia)') request.addfinalizer(prov.delete_if_exists) with pytest.raises(AssertionError): prov.create() view = prov.create_view(CloudProviderAddView) assert view.name.help_block == "Required" assert not view.add.active @pytest.mark.tier(3) def test_region_required_validation(request, soft_assert, appliance): """Tests to validate the region while adding a provider""" collection = appliance.collections.cloud_providers prov = collection.instantiate(prov_class=EC2Provider, name=fauxfactory.gen_alphanumeric(5), region=None) request.addfinalizer(prov.delete_if_exists) with pytest.raises(AssertionError): prov.create() view = prov.create_view(CloudProviderAddView) soft_assert(view.region.help_block == "Required") @pytest.mark.tier(3) @test_requirements.discovery def test_host_name_required_validation_cloud(request, appliance): """Test to validate the hostname while adding a provider""" endpoint = RHOSEndpoint(hostname=None, ip_address=fauxfactory.gen_ipaddr(prefix=[10]), security_protocol=None) collection = appliance.collections.cloud_providers prov = collection.instantiate(prov_class=OpenStackProvider, name=fauxfactory.gen_alphanumeric(5), endpoints=endpoint) request.addfinalizer(prov.delete_if_exists) # It must raise an exception because it keeps on the form with pytest.raises(AssertionError): prov.create() endpoints = prov.create_view(prov.endpoints_form) assert endpoints.default.hostname.help_block == "Required" @pytest.mark.tier(3) def test_api_port_blank_validation(request, appliance): """Test to validate blank api port while adding a provider""" endpoint = RHOSEndpoint(hostname=fauxfactory.gen_alphanumeric(5), ip_address=fauxfactory.gen_ipaddr(prefix=[10]), api_port='', security_protocol='Non-SSL') collection = appliance.collections.cloud_providers prov = collection.instantiate(prov_class=OpenStackProvider, name=fauxfactory.gen_alphanumeric(5), endpoints=endpoint) request.addfinalizer(prov.delete_if_exists) # It must raise an exception because it keeps on the form with pytest.raises(AssertionError): prov.create() endpoints = prov.create_view(prov.endpoints_form) assert endpoints.default.api_port.help_block == "Required" @pytest.mark.tier(3) @pytest.mark.uncollectif(lambda: store.current_appliance.version >= '5.9', reason='EC2 option not available') def test_user_id_max_character_validation(appliance): cred = Credential(principal=fauxfactory.gen_alphanumeric(51), secret='') collection = appliance.collections.cloud_providers collection.discover(cred, EC2Provider) @pytest.mark.tier(3) @pytest.mark.uncollectif(lambda: store.current_appliance.version >= '5.9', reason='EC2 option not available') def test_password_max_character_validation(appliance): password = fauxfactory.gen_alphanumeric(51) cred = Credential( principal=fauxfactory.gen_alphanumeric(5), secret=password, verify_secret=password) collection = appliance.collections.cloud_providers collection.discover(cred, EC2Provider) @pytest.mark.tier(3) @test_requirements.discovery def test_name_max_character_validation_cloud(request, cloud_provider): """Test to validate that provider can have up to 255 characters in name""" request.addfinalizer(lambda: cloud_provider.delete_if_exists(cancel=False)) name = fauxfactory.gen_alphanumeric(255) with update(cloud_provider): cloud_provider.name = name assert cloud_provider.exists @pytest.mark.tier(3) def test_hostname_max_character_validation_cloud(appliance): """Test to validate max character for hostname field""" endpoint = RHOSEndpoint(hostname=fauxfactory.gen_alphanumeric(256), api_port=None, security_protocol=None) collection = appliance.collections.cloud_providers prov = collection.instantiate(prov_class=OpenStackProvider, name=fauxfactory.gen_alphanumeric(5), endpoints=endpoint) try: prov.create() except MoveTargetOutOfBoundsException: # TODO: Remove once fixed 1475303 prov.create() except AssertionError: endpoints = prov.create_view(prov.endpoints_form) assert endpoints.default.hostname.value == prov.hostname[0:255] @pytest.mark.tier(3) @test_requirements.discovery def test_api_port_max_character_validation_cloud(appliance): """Test to validate max character for api port field""" endpoint = RHOSEndpoint(hostname=fauxfactory.gen_alphanumeric(5), api_port=fauxfactory.gen_alphanumeric(16), security_protocol='Non-SSL') collection = appliance.collections.cloud_providers prov = collection.instantiate(prov_class=OpenStackProvider, name=fauxfactory.gen_alphanumeric(5), endpoints=endpoint) try: prov.create() except AssertionError: view = prov.create_view(prov.endpoints_form) text = view.default.api_port.value assert text == prov.default_endpoint.api_port[0:15] @pytest.mark.tier(3) def test_openstack_provider_has_api_version(appliance): """Check whether the Keystone API version field is present for Openstack.""" view = navigate_to(appliance.collections.cloud_providers, 'Add') view.fill({"prov_type": "OpenStack"}) assert view.api_version.is_displayed, "API version select is not visible" @pytest.mark.tier(3) @pytest.mark.uncollectif(lambda provider: not provider.one_of(EC2Provider) or 'govcloud' in provider.data.tags) def test_select_key_pair_none_while_provisioning(request, has_no_cloud_providers, provider): """ GH Issue: https://github.com/ManageIQ/manageiq/issues/10575 Requirement: Have an ec2 provider with single key pair (For now available in South America (Sao Paulo) region) 1. Compute -> Cloud -> Instances 2. Click on Provision Instances in Toolbar 3. Go to Properties 4. Select None in Guest Access Key Pair 5. None should be selected """ provider.region_name = 'South America (Sao Paulo)' request.addfinalizer(provider.delete_if_exists) provider.create() provider.validate() view = navigate_to(Instance, 'Provision') view.image_table[0].click() view.form.continue_button.click() view.form.properties.guest_keypair.fill('<None>') # check drop down was updated with selected value assert view.form.properties.guest_keypair.read() == '<None>' @pytest.mark.tier(3) def test_cloud_names_grid_floating_ips(appliance, ec2_provider, soft_assert): """ Requirement: Cloud provider with floating IPs Go to Network -> Floating IPs Change view to grid Test if names are displayed """ floating_ips_collection = appliance.collections.network_floating_ips view = navigate_to(floating_ips_collection, "All") view.toolbar.view_selector.select('Grid View') for entity in view.entities.get_all(): if appliance.version < '5.9': soft_assert(entity.name) else: soft_assert('title="{}"'.format(entity.data['address']) in entity.data['quadicon']) @pytest.mark.tier(3) def test_display_network_topology(appliance, openstack_provider): """ BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1343553 Steps to Reproduce: 1. Add RHOS undercloud provider 2. Make sure it has no floating IPs 3. Go to Networks -> Topology 4. Topology should be shown without errors. """ floating_ips_collection = appliance.collections.network_floating_ips view = navigate_to(floating_ips_collection, "All") if not view.entities.get_all(): pytest.skip("No Floating IPs needed for this test") topology_col = appliance.collections.network_topology_elements view = navigate_to(topology_col, 'All') assert view.is_displayed view.flash.assert_no_error() class TestProvidersRESTAPI(object): @pytest.fixture(scope="function") def arbitration_profiles(self, request, appliance, cloud_provider): num_profiles = 2 response = _arbitration_profiles( request, appliance.rest_api, cloud_provider, num=num_profiles) assert_response(appliance) assert len(response) == num_profiles return response @pytest.mark.tier(3) @pytest.mark.parametrize('from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_cloud_networks_query(self, cloud_provider, appliance, from_detail): """Tests querying cloud providers and cloud_networks collection for network info. Metadata: test_flag: rest """ if from_detail: networks = appliance.rest_api.collections.providers.get( name=cloud_provider.name).cloud_networks else: networks = appliance.rest_api.collections.cloud_networks assert_response(appliance) assert networks assert len(networks) == networks.subcount enabled_networks = 0 networks.reload(expand=True) for network in networks: assert 'CloudNetwork' in network.type if network.enabled is True: enabled_networks += 1 assert enabled_networks >= 1 @pytest.mark.tier(3) def test_security_groups_query(self, cloud_provider, appliance): """Tests querying cloud networks subcollection for security groups info. Metadata: test_flag: rest """ network = appliance.rest_api.collections.providers.get( name=cloud_provider.name).cloud_networks[0] network.reload(attributes='security_groups') security_groups = network.security_groups # "security_groups" needs to be present, even if it's just an empty list assert isinstance(security_groups, list) # if it's not empty, check type if security_groups: assert 'SecurityGroup' in security_groups[0]['type'] @pytest.mark.tier(3) # arbitration_profiles were removed in versions >= 5.9' @pytest.mark.uncollectif(lambda: store.current_appliance.version >= '5.9') def test_create_arbitration_profiles(self, appliance, arbitration_profiles): """Tests creation of arbitration profiles. Metadata: test_flag: rest """ for profile in arbitration_profiles: record = appliance.rest_api.collections.arbitration_profiles.get(id=profile.id) assert_response(appliance) assert record._data == profile._data assert 'ArbitrationProfile' in profile.type @pytest.mark.tier(3) # arbitration_profiles were removed in versions >= 5.9' @pytest.mark.uncollectif(lambda: store.current_appliance.version >= '5.9') @pytest.mark.parametrize('method', ['post', 'delete']) def test_delete_arbitration_profiles_from_detail(self, arbitration_profiles, method): """Tests delete arbitration profiles from detail. Metadata: test_flag: rest """ delete_resources_from_detail(arbitration_profiles, method=method) @pytest.mark.tier(3) # arbitration_profiles were removed in versions >= 5.9' @pytest.mark.uncollectif(lambda: store.current_appliance.version >= '5.9') def test_delete_arbitration_profiles_from_collection(self, arbitration_profiles): """Tests delete arbitration profiles from collection. Metadata: test_flag: rest """ delete_resources_from_collection(arbitration_profiles) @pytest.mark.tier(3) # arbitration_profiles were removed in versions >= 5.9' @pytest.mark.uncollectif(lambda: store.current_appliance.version >= '5.9') @pytest.mark.parametrize('from_detail', [True, False], ids=['from_detail', 'from_collection']) def test_edit_arbitration_profiles(self, appliance, arbitration_profiles, from_detail): """Tests editing of arbitration profiles. Metadata: test_flag: rest """ response_len = len(arbitration_profiles) zone = appliance.rest_api.collections.availability_zones[-1] locators = [{'id': zone.id}, {'href': zone.href}] new = [{'availability_zone': locators[i % 2]} for i in range(response_len)] if from_detail: edited = [] for i in range(response_len): edited.append(arbitration_profiles[i].action.edit(**new[i])) assert_response(appliance) else: for i in range(response_len): new[i].update(arbitration_profiles[i]._ref_repr()) edited = appliance.rest_api.collections.arbitration_profiles.action.edit(*new) assert_response(appliance) assert len(edited) == response_len for i in range(response_len): assert edited[i].availability_zone_id == zone.id @pytest.mark.tier(3) # arbitration_rules were removed in versions >= 5.9' @pytest.mark.uncollectif(lambda: store.current_appliance.version >= '5.9' or store.current_appliance.version < '5.8') def test_create_arbitration_rules_with_profile(self, request, appliance, arbitration_profiles): """Tests creation of arbitration rules referencing arbitration profiles. Metadata: test_flag: rest """ num_rules = 2 profile = arbitration_profiles[0] references = [{'id': profile.id}, {'href': profile._href}] data = [] for index in range(num_rules): data.append({ 'description': 'test admin rule {}'.format(fauxfactory.gen_alphanumeric(5)), 'operation': 'inject', 'arbitration_profile': references[index % 2], 'expression': {'EQUAL': {'field': 'User-userid', 'value': 'admin'}} }) response = creating_skeleton(request, appliance.rest_api, 'arbitration_rules', data) assert_response(appliance) assert len(response) == num_rules for rule in response: record = appliance.rest_api.collections.arbitration_rules.get(id=rule.id) assert record.arbitration_profile_id == rule.arbitration_profile_id == profile.id @pytest.mark.tier(3) # arbitration_rules were removed in versions >= 5.9' @pytest.mark.uncollectif(lambda: store.current_appliance.version >= '5.9' or store.current_appliance.version < '5.8') def test_create_arbitration_rule_with_invalid_profile(self, request, appliance): """Tests creation of arbitration rule referencing invalid arbitration profile. Metadata: test_flag: rest """ data = [{ 'description': 'test admin rule {}'.format(fauxfactory.gen_alphanumeric(5)), 'operation': 'inject', 'arbitration_profile': 'invalid_value', 'expression': {'EQUAL': {'field': 'User-userid', 'value': 'admin'}} }] response = creating_skeleton(request, appliance.rest_api, 'arbitration_rules', data) # this will fail once BZ 1433477 is fixed - change and expand the test accordingly assert_response(appliance) for rule in response: assert not hasattr(rule, 'arbitration_profile_id')
gpl-2.0
jsha/letsencrypt
certbot/tests/util.py
1
12620
"""Test utilities. .. warning:: This module is not part of the public API. """ import multiprocessing import os import pkg_resources import shutil import tempfile import unittest from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization import mock import OpenSSL import six from six.moves import reload_module # pylint: disable=import-error from acme import jose from certbot import constants from certbot import interfaces from certbot import storage from certbot import util from certbot import configuration from certbot.display import util as display_util def vector_path(*names): """Path to a test vector.""" return pkg_resources.resource_filename( __name__, os.path.join('testdata', *names)) def load_vector(*names): """Load contents of a test vector.""" # luckily, resource_string opens file in binary mode return pkg_resources.resource_string( __name__, os.path.join('testdata', *names)) def _guess_loader(filename, loader_pem, loader_der): _, ext = os.path.splitext(filename) if ext.lower() == '.pem': return loader_pem elif ext.lower() == '.der': return loader_der else: # pragma: no cover raise ValueError("Loader could not be recognized based on extension") def load_cert(*names): """Load certificate.""" loader = _guess_loader( names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1) return OpenSSL.crypto.load_certificate(loader, load_vector(*names)) def load_comparable_cert(*names): """Load ComparableX509 cert.""" return jose.ComparableX509(load_cert(*names)) def load_csr(*names): """Load certificate request.""" loader = _guess_loader( names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1) return OpenSSL.crypto.load_certificate_request(loader, load_vector(*names)) def load_comparable_csr(*names): """Load ComparableX509 certificate request.""" return jose.ComparableX509(load_csr(*names)) def load_rsa_private_key(*names): """Load RSA private key.""" loader = _guess_loader(names[-1], serialization.load_pem_private_key, serialization.load_der_private_key) return jose.ComparableRSAKey(loader( load_vector(*names), password=None, backend=default_backend())) def load_pyopenssl_private_key(*names): """Load pyOpenSSL private key.""" loader = _guess_loader( names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1) return OpenSSL.crypto.load_privatekey(loader, load_vector(*names)) def skip_unless(condition, reason): # pragma: no cover """Skip tests unless a condition holds. This implements the basic functionality of unittest.skipUnless which is only available on Python 2.7+. :param bool condition: If ``False``, the test will be skipped :param str reason: the reason for skipping the test :rtype: callable :returns: decorator that hides tests unless condition is ``True`` """ if hasattr(unittest, "skipUnless"): return unittest.skipUnless(condition, reason) elif condition: return lambda cls: cls else: return lambda cls: None def make_lineage(config_dir, testfile): """Creates a lineage defined by testfile. This creates the archive, live, and renewal directories if necessary and creates a simple lineage. :param str config_dir: path to the configuration directory :param str testfile: configuration file to base the lineage on :returns: path to the renewal conf file for the created lineage :rtype: str """ lineage_name = testfile[:-len('.conf')] conf_dir = os.path.join( config_dir, constants.RENEWAL_CONFIGS_DIR) archive_dir = os.path.join( config_dir, constants.ARCHIVE_DIR, lineage_name) live_dir = os.path.join( config_dir, constants.LIVE_DIR, lineage_name) for directory in (archive_dir, conf_dir, live_dir,): if not os.path.exists(directory): os.makedirs(directory) sample_archive = vector_path('sample-archive') for kind in os.listdir(sample_archive): shutil.copyfile(os.path.join(sample_archive, kind), os.path.join(archive_dir, kind)) for kind in storage.ALL_FOUR: os.symlink(os.path.join(archive_dir, '{0}1.pem'.format(kind)), os.path.join(live_dir, '{0}.pem'.format(kind))) conf_path = os.path.join(config_dir, conf_dir, testfile) with open(vector_path(testfile)) as src: with open(conf_path, 'w') as dst: dst.writelines( line.replace('MAGICDIR', config_dir) for line in src) return conf_path def patch_get_utility(target='zope.component.getUtility'): """Patch zope.component.getUtility to use a special mock IDisplay. The mock IDisplay works like a regular mock object, except it also also asserts that methods are called with valid arguments. :param str target: path to patch :returns: mock zope.component.getUtility :rtype: mock.MagicMock """ return mock.patch(target, new_callable=_create_get_utility_mock) def patch_get_utility_with_stdout(target='zope.component.getUtility', stdout=None): """Patch zope.component.getUtility to use a special mock IDisplay. The mock IDisplay works like a regular mock object, except it also also asserts that methods are called with valid arguments. The `message` argument passed to the IDisplay methods is passed to stdout's write method. :param str target: path to patch :param object stdout: object to write standard output to; it is expected to have a `write` method :returns: mock zope.component.getUtility :rtype: mock.MagicMock """ stdout = stdout if stdout else six.StringIO() freezable_mock = _create_get_utility_mock_with_stdout(stdout) return mock.patch(target, new=freezable_mock) class FreezableMock(object): """Mock object with the ability to freeze attributes. This class works like a regular mock.MagicMock object, except attributes and behavior set before the object is frozen cannot be changed during tests. If a func argument is provided to the constructor, this function is called first when an instance of FreezableMock is called, followed by the usual behavior defined by MagicMock. The return value of func is ignored. """ def __init__(self, frozen=False, func=None, return_value=mock.sentinel.DEFAULT): self._frozen_set = set() if frozen else set(('freeze',)) self._func = func self._mock = mock.MagicMock() if return_value != mock.sentinel.DEFAULT: self.return_value = return_value self._frozen = frozen def freeze(self): """Freeze object preventing further changes.""" self._frozen = True def __call__(self, *args, **kwargs): if self._func is not None: self._func(*args, **kwargs) return self._mock(*args, **kwargs) def __getattribute__(self, name): if name == '_frozen': try: return object.__getattribute__(self, name) except AttributeError: return False elif name in ('return_value', 'side_effect',): return getattr(object.__getattribute__(self, '_mock'), name) elif name == '_frozen_set' or name in self._frozen_set: return object.__getattribute__(self, name) else: return getattr(object.__getattribute__(self, '_mock'), name) def __setattr__(self, name, value): """ Before it is frozen, attributes are set on the FreezableMock instance and added to the _frozen_set. Attributes in the _frozen_set cannot be changed after the FreezableMock is frozen. In this case, they are set on the underlying _mock. In cases of return_value and side_effect, these attributes are always passed through to the instance's _mock and added to the _frozen_set before the object is frozen. """ if self._frozen: if name in self._frozen_set: raise AttributeError('Cannot change frozen attribute ' + name) else: return setattr(self._mock, name, value) if name != '_frozen_set': self._frozen_set.add(name) if name in ('return_value', 'side_effect'): return setattr(self._mock, name, value) else: return object.__setattr__(self, name, value) def _create_get_utility_mock(): display = FreezableMock() for name in interfaces.IDisplay.names(): # pylint: disable=no-member if name != 'notification': frozen_mock = FreezableMock(frozen=True, func=_assert_valid_call) setattr(display, name, frozen_mock) display.freeze() return FreezableMock(frozen=True, return_value=display) def _create_get_utility_mock_with_stdout(stdout): def _write_msg(message, *unused_args, **unused_kwargs): """Write to message to stdout. """ if message: stdout.write(message) def mock_method(*args, **kwargs): """ Mock function for IDisplay methods. """ _assert_valid_call(args, kwargs) _write_msg(*args, **kwargs) display = FreezableMock() for name in interfaces.IDisplay.names(): # pylint: disable=no-member if name == 'notification': frozen_mock = FreezableMock(frozen=True, func=_write_msg) setattr(display, name, frozen_mock) else: frozen_mock = FreezableMock(frozen=True, func=mock_method) setattr(display, name, frozen_mock) display.freeze() return FreezableMock(frozen=True, return_value=display) def _assert_valid_call(*args, **kwargs): assert_args = [args[0] if args else kwargs['message']] assert_kwargs = {} assert_kwargs['default'] = kwargs.get('default', None) assert_kwargs['cli_flag'] = kwargs.get('cli_flag', None) assert_kwargs['force_interactive'] = kwargs.get('force_interactive', False) # pylint: disable=star-args display_util.assert_valid_call(*assert_args, **assert_kwargs) class TempDirTestCase(unittest.TestCase): """Base test class which sets up and tears down a temporary directory""" def setUp(self): self.tempdir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.tempdir) class ConfigTestCase(TempDirTestCase): """Test class which sets up a NamespaceConfig object. """ def setUp(self): super(ConfigTestCase, self).setUp() self.config = configuration.NamespaceConfig( mock.MagicMock(**constants.CLI_DEFAULTS) ) self.config.verb = "certonly" self.config.config_dir = os.path.join(self.tempdir, 'config') self.config.work_dir = os.path.join(self.tempdir, 'work') self.config.logs_dir = os.path.join(self.tempdir, 'logs') self.config.cert_path = constants.CLI_DEFAULTS['auth_cert_path'] self.config.fullchain_path = constants.CLI_DEFAULTS['auth_chain_path'] self.config.chain_path = constants.CLI_DEFAULTS['auth_chain_path'] self.config.server = "example.com" def lock_and_call(func, lock_path): """Grab a lock for lock_path and call func. :param callable func: object to call after acquiring the lock :param str lock_path: path to file or directory to lock """ # Reload module to reset internal _LOCKS dictionary reload_module(util) # start child and wait for it to grab the lock cv = multiprocessing.Condition() cv.acquire() child_args = (cv, lock_path,) child = multiprocessing.Process(target=hold_lock, args=child_args) child.start() cv.wait() # call func and terminate the child func() cv.notify() cv.release() child.join() assert child.exitcode == 0 def hold_lock(cv, lock_path): # pragma: no cover """Acquire a file lock at lock_path and wait to release it. :param multiprocessing.Condition cv: condition for synchronization :param str lock_path: path to the file lock """ from certbot import lock if os.path.isdir(lock_path): my_lock = lock.lock_dir(lock_path) else: my_lock = lock.LockFile(lock_path) cv.acquire() cv.notify() cv.wait() my_lock.release()
apache-2.0
jbloom/mutpath
src/trajectory.py
1
39654
"""Module for representing mutational trajectories as directed graphs. Represents mutational trajectories through sequence space, which is the space in which each node is a unique sequence and edges are directional connections between nodes corresponding to mutations. These digraphs can be used to visualize mutational trajectories through sequence space. They are designed to be visualized using the GraphViz program. Written by Jesse Bloom. Functions defined in this module ------------------------------------ `WriteGraphVizTrajectory` - Writes a GraphViz visualization of a *Trajectory* object. `WriteMutationDates` - Writes files giving mutations dates and credible intervals. `WriteNodePersistence` - Writes times that nodes persist (time before next mutation). `DistanceAlongPath` - Distance along a mutational path. `HeuristicTraceback` - Tries to find the last high weight predecessor of a node. `IteratePaths` - Iterates of paths in a mutational path file. Classes defined in this module -------------------------------- `Trajectory` - A class for representing a mutational trajectory through sequence space. Detailed documentation for functions -------------------------------------- Provided in the individual function documentation strings below. """ import os import re import sys import math import sequtils import stats import plot def DistanceAlongPath(startseq, endseq, s): """Returns the distance along the mutational path. This distance of a sequence *s* along the path is defined as the Hamming Distance between *startseq* and *s* minus the Hamming Distance between *endseq* and *s* plus the Hamming distance between *startseq* and *endseq*. Sequences are not case sensitive. """ assert len(s) == len(startseq) == len(endseq) s_to_start = len([1 for (x, y) in zip(s.upper(), startseq.upper()) if x != y]) s_to_end = len([1 for (x, y) in zip(s.upper(), endseq.upper()) if x != y]) start_to_end = len([1 for (x, y) in zip(startseq.upper(), endseq.upper()) if x != y]) return s_to_start - s_to_end + start_to_end def HeuristicTraceback(t, node, cutoff): """Traces back to find last high weight precessor of a node. *t* is a *Trajectory* object. *node* is the string name of a node in *t*. *cutoff* is a number > 0 and <= 1. This function starts at node, and traces back along the trajectory to return the first predecessor of *node* with a weight >= *cutoff*. It does this by tracing back from *node* along its highest weight incoming edge to that predecessor, and then from that predecessor along its highest weight edge, etc until we find a predecessor with weight >= *cutoff*. This approach is not absolutely guaranteed to find the first predecessor with weight > *cutoff*, but it should work for reasonable trajectories. But beware, hence the word 'Heuristic' in the name of this function. The return value is the string name of the first high weight predecessor. This function recursively calls itself. """ assert node in t.nodes weights_predecessors = [] for ((n1, n2), weight) in t.edges.iteritems(): if n2 == node: weights_predecessors.append((weight, n1)) if not weights_predecessors: raise ValueError("failed to find predecessor") weights_predecessors.sort() weights_predecessors.reverse() (weight, predecessor) = weights_predecessors[0] if t.nodes[predecessor] >= cutoff: return predecessor else: return HeuristicTraceback(t, predecessor, cutoff) def WriteNodePersistence(t, nodestowrite, interval, persistencefile, cutoff): """Writes times for which nodes persist before the next mutation. The trajectory *t* specifies a set of nodes. For each node specified by *nodestowrite* and with weight >= *cutoff*, reports the time until that node experiences the next mutation that moves it to a new node. If *t* is a trajectory through protein sequence space that was created from nucleotide sequences (this will be the case if *t* was created with *translateseqs = True*), the next mutation that moves it to a new node is a nonsynonymous mutation. This method then also records the time until the first mutation of any type (synonymous or nonsynonymous) after the trajectory moves to nodes. The persistence is written as the posterior median from all paths containing plus the Bayesian credible interval specified by *interval*. The persistence times are written to the text file *persistencefile*. CALLING VARIABLES: * *t* is a *Trajectory* object that contains the persistence data. * *nodestowrite* is a dictionary specifying for which nodes we write persistence times, and the names used when writing these nodes. It is keyed by node sequences (which are the identifiers for the nodes in t.nodes) and the values are strings giving names that are used to label the nodes in the output. However, there does not actually have to be a node with persistence data for each key in *nodestowrite* -- if there is not persistence data for a node key, nothing is written for it. * *interval* specifies the range of the Bayesian credible interval, for example a value of 0.9 means that we print the 90% credible interval. * *persistencefile* is a string giving the name of the text file that we create which contains the persistence times. It has headers explaning its format. If this file already exists, it is overwritten. The order in which nodes is written is arbitrary. * *cutoff* is a weight cutoff (fraction of paths containing this node). We only write persistence times for nodes that are both in *nodestowrite* and have weights >= *cutoff*. This keeps us from writing persistence times for nodes that only occur rarely. """ d = dict([(name, node) for (node, name) in nodestowrite.iteritems()]) f = open(persistencefile, 'w') f.write("# Name : name of the node\n") f.write("# MedianPersistence : posterior median time to next node\n") f.write("# MinPersistenceInterval : minimum of %.2f percent credible interval\n" % (interval * 100)) f.write("# MaxPersistenceInterval : maximum of %.2f percent credible interval\n" % (interval * 100)) if t.persistence != t.timetofirstmut: f.write("# MedianTimeToFirstMut : posterior median time to first mutation\n") f.write("# MinTimeToFirstMutInterval : minimum of %.2f percent credible interval\n" % (interval * 100)) f.write("# MaxTimeToFirstMutInterval : maximum of %.2f percent credible interval\n" % (interval * 100)) f.write("#\n") f.write("#Name\tMedianPersistence\tMinPersistenceInterval\tMaxPersistenceInterval\tMedianTimeToFirstMut\tMinTimeToFirstMut\tMaxTimeToFirstMut\n") else: f.write("#\n") f.write("#Name\tMedianPersistence\tMinPersistenceInterval\tMaxPersistenceInterval\n") for (node, persistence) in t.persistence.iteritems(): if (node not in nodestowrite): continue if (len(persistence) / float(t.npaths)) < cutoff: continue name = nodestowrite[node] (median, mininterval, maxinterval) = stats.MedianCredibleInterval(persistence, interval) f.write("%s\t%f\t%f\t%f" % (name, median, mininterval, maxinterval)) if t.persistence != t.timetofirstmut: (median, mininterval, maxinterval) = stats.MedianCredibleInterval(t.timetofirstmut[node], interval) f.write("\t%f\t%f\t%f\n" % (median, mininterval, maxinterval)) else: f.write("\n") f.close() def WriteMutationDates(t, labelcutoff, interval, datesfile, datesplot, lasttipdate): """Creates text file and plot showing dates of mutations. For each mutation that occurs in at least *labelcutoff* fraction of the paths that form the trajectory *t*, this function writes the posterior median and a Bayesian credible interval for the date of first occurrence of that mutation. The posterior is taken over all paths that contain that mutation. The output also provides information about whether the mutations are on the branch from the starting sequence to the common ancestor, or from the common ancestor to the starting sequence. CALLING VARIABLES: * *t* is a *Trajectory* object that contains the mutations data. * *labelcutoff* is a number > 0 and <= 1. We write the dates of all mutations in *t* that have weights >= *labelcutoff* (occur in at least this fraction of the paths). * *interval* specifies the range of the Bayesian credible interval, for example a value of 0.9 means that we print the 90% credible interval. * *datesfile* is the name of the text file that we create which contains the dates and intervals. It is overwritten if it does not already exist. * *datesplot* is the name of the plot file that we create using matplotlib. This plot can only be created if matplotlib is available. So first check on this (for example using *Plot.PylabAvailable()*. If matplotlib is not available, or if you don't want to make the plot, make this argument *None*. Otherwise make it the name of the PDF plot file that you want to create. * *lasttipdate* specifies the absolute units for the dates. Dates in *t* will be specified in units of time before the most recent tip. Here provide a number giving the date of the most recent tip, and the dates shown are then this number minus the time for each mutation. """ mutdatestoca = [] # keys are (median, mininterval, maxinterval, mut, fractoca, weight) mutdatesfromca = [] # keys are (median, mininterval, maxinterval, mut, fractoca, weight) n = t.npaths for (mut, muttimes) in t.mutations.iteritems(): nmut = len(muttimes) weight = nmut / float(n) if weight >= labelcutoff: # mutation meets the cutoff fractoca = t.mutationstoca[mut] / float(nmut) (median, mininterval, maxinterval) = stats.MedianCredibleInterval(muttimes, interval) # we interchange minimum and median on the next line because the times # are in units before last tip prior to this conversion (median, maxinterval, mininterval) = (lasttipdate - median, lasttipdate - mininterval, lasttipdate - maxinterval) if fractoca > 0.5: mutdatestoca.append((median, mininterval, maxinterval, mut, fractoca, weight)) else: mutdatesfromca.append((median, mininterval, maxinterval, mut, fractoca, weight)) mutdatestoca.sort() mutdatestoca.reverse() mutdatesfromca.sort() mutdates = mutdatestoca + mutdatesfromca f = open(datesfile, 'w') f.write('# Mutation : mutation in 1, 2, ... numbering\n') f.write('# FracOccurrence : fraction of paths containing this mutation\n') f.write('# FracToCommonAncestor : fraction of times which this mutation is on path from starting sequence to common ancestor\n') f.write('# MedianDate : posterior median date of mutation\n') f.write('# MinInterval : minimum of %.2f percent Bayesian credible interval (median centered)\n' % interval) f.write('# MaxInterval : maximum of %.2f percent Bayesian credible interval (median centered)\n' % interval) f.write('#\n') f.write('# Mutation\tFracOccurrence\tFracToCommonAncestor\tMedianDate\tMinInterval\tMaxInterval\n') for (median, mininterval, maxinterval, mut, fractoca, weight) in mutdates: f.write('%s\t%f\t%f\t%f\t%f\t%f\n' % (mut, weight, fractoca, median, mininterval, maxinterval)) f.close() if datesplot: plot.DatesPlot(mutdates, datesplot, interval) def WriteGraphVizTrajectory(t, graphvizfile, minweight, labelcutoff,\ nodenames=None, nodesize=0.9, ranksep=0.1, nodesep=0.2, penwidth=10,\ fontsize=40, arrowsize=1.4, fontname='Helvetica-Bold', rankdir='TB',\ startendasdiamonds=True): """Writes a GraphViz visualization of a *Trajectory* object. This function creates a file *graphvizfile* that can be used to visualize the directed graph represented by a *Trajectory* object *t*. Graphviz is a freely available software package (http://www.graphviz.org/) for visualizing graphs. The trajectory is written in the DOT language (http://www.graphviz.org/doc/info/lang.html). The areas of nodes and the widths of edges are proportional to their weights. The color saturations of nodes and edges are linearly proportional to their weights. The rank of nodes (for example, their vertical position when *rankdir* is 'LR') is ordered according to their distance along the path from the starting to ending sequence. This distance is defined as the Hamming Distance from the starting node minus the Hamming Distance from the ending node plus the Hamming distance between the starting and ending nodes. CALLING VARIABLES: * *t* is the *Trajectory* object that contains the trajectory that we want to visualize. * *graphvizfile* is a string giving the name of the GraphViz input file that we want to create. It will typically end with the extension ``.dot``. If this file already exists, it is overwritten. You should be able to open this file directly with Graphviz. The file is written in the DOT language (http://www.graphviz.org/doc/info/lang.html). * *minweight* is a number specifying the minimum weight that a node or edge must possess in order to be shown on the graph. Nodes or edges with weights < *minweight* are not included. Note that this creates a possibility for orphan nodes if a node has a weight >= *minweight* but all of its incoming and outgoing nodes have weights < *minweight*. To show all nodes and edges regardless of weight, set *minweight* to zero. However, this can sometimes lead to a very large *graphvizfile* since there can be a huge number of very low weight nodes / edges. * *labelcutoff* is the minimum weight that an edge must possess in order to be labeled on the graph. In addition, all nodes with weight >= *labelcutoff* have an incoming edge that is labeled. If there is not such an edge, then traces back to find the first predecessor node with weight *labelcutoff* and then draws a different colored edge spanning multiple mutations to connect these nodes. Generally, you would want *labelcutoff > 0.5*. * *nodenames* is an optional argument that allows you to specify names for nodes. It is *None* by default. If you set it to another value, it should be a dictionary. It is keyed by node sequences (which are the identifiers for the nodes in t.nodes) and the values are strings giving names that are used to label the nodes in the trajectory. These names are written on the nodes only if the weight for that node is >= *labelcutoff*. OPTIONAL CALLING VARIABLES SPECIFYING FORMATTING DETAILS: * *nodesize* is the height of a node with weight. * *ranksep* is the separation between ranks, as fraction of *nodesize*. * *nodesep* is the minimum separation between nodes of the same rank, as fraction of *nodesize*. * *penwidth* is the pen width of an edge. * *fontsize* is the font size. * *arrowsize* is the size of the arrows. * *fontname* is the font style. * *rankdir* specifies the direction the ranks move. If set to 'TB' then the graph moves from top to bottom. If set to 'LR' then the graph moves from left to right. * *startendasdiamonds* is a Boolean switch. If True, we show the starting and ending nodes as diamonds rather than circles. We also make these starting and ending nodes larger in size to fit their full labels. If False, we make them circles with size proportional to weights like all other nodes. """ f = open(graphvizfile, 'w') f.write('digraph G { rankdir=%s; ranksep=%f; nodesep=%f;\n' % (rankdir, ranksep * nodesize, nodesep * nodesize)) # first write the nodes ordered into subgraphs of the same rank by DistanceAlongPath nodes_by_d = {} needs_incoming = {} # does node need an incoming edge? for (node, weight) in t.nodes.iteritems(): if weight < minweight: continue # weight too low d = DistanceAlongPath(t.startseq, t.endseq, node) if d in nodes_by_d: nodes_by_d[d].append((node, weight)) else: nodes_by_d[d] = [(node, weight)] if (weight >= labelcutoff) and node != t.startseq: needs_incoming[node] = True for d in range(max(nodes_by_d.keys()) + 1): if d not in nodes_by_d: continue # none of this distance f.write('\tsubgraph %d { label="DistanceAlongPath%d"; rank=same;\n' % (d, d)) for (node, weight) in nodes_by_d[d]: if startendasdiamonds and (node == t.startseq or node == t.endseq): shape = 'diamond' fixedsize = 'false' else: shape = 'circle' fixedsize = 'true' if nodenames and (node in nodenames) and weight >= labelcutoff: nodelabel = "%s" % nodenames[node] else: nodelabel = '' f.write('\t\tnode [style=filled shape=%s label="%s" height=%f color="0.7 %f 0.9" penwidth=%f arrowsize=%f fontsize=%d fontname="%s" fontcolor="white" fixedsize=%s] "%s";\n' % (shape, nodelabel, nodesize * math.sqrt(weight), weight, penwidth, arrowsize, fontsize, fontname, fixedsize, node)) f.write('\t}\n') # now write all of the edges # In order to get good overlay, first we write unabeled edges, then # labeled edges, and finally implied connections between major nodes without # connecting labeled edges. labeled_edges = [] for ((node1, node2), weight) in t.edges.iteritems(): if weight < minweight: continue # weight too low if weight >= labelcutoff: assert len(node1) == len(node2) diffs = [i for i in range(len(node1)) if node1[i] != node2[i]] if len(diffs) != 1: raise ValueError("Should be exactly one difference") i = diffs[0] edgelabel = '%s%d%s' % (node1[i], i + 1, node2[i]) if node2 in needs_incoming: del needs_incoming[node2] else: edgelabel = '' edgestring = '\t"%s" -> "%s" [weight=%f penwidth=%f color="0.7 %f 0.9" arrowsize=%f label="%s" fontsize=%d fontname="%s"];\n' % (node1, node2, weight, penwidth * weight, weight, arrowsize, edgelabel, fontsize, fontname) if edgelabel: labeled_edges.append(edgestring) # write these later else: f.write(edgestring) f.write(''.join(labeled_edges)) # now write labeled edges # now find implied connections between major nodes without incoming labeled edges for node in needs_incoming: predecessor = HeuristicTraceback(t, node, labelcutoff) diffs = [i for i in range(len(node)) if node[i] != predecessor[i]] assert len(diffs) >= 1 diffs.sort() edgelabel = '-'.join(["%s%d%s" % (predecessor[i], i + 1, node[i]) for i in diffs]) f.write('\t"%s" -> "%s" [weight=0 penwidth=%f color="0.0 1.0 0.9" arrowsize=%f label="%s" fontsize=%d fontname="%s" fontcolor="0.0 1.0 0.9"];\n' % (predecessor, node, penwidth, arrowsize, edgelabel, fontsize, fontname)) f.write('}') f.close() def IteratePaths(pathfile): """Iterates over paths in a mutational path file. *pathfile* should be a string giving a name of an input file specifying one or more mutational paths. These files are of the format created by ``mutpath_get_paths.py``. The required format is detailed below. This function will iterate over all paths in *pathfile*. For each path, it will return the tuple *(startseq, starttime, endseq, endtime, caseq, catime, tocamuts, fromcamuts)*. The entries of these tuples are as follows. All sequences are converted to upper case, as are all letters in the mutation notations. The times are measured in units before the most recent tip of the tree. Tuple entries: * *startseq* is the starting sequence specified by *startstrain_seq* * *starttime* is the time of *startseq* specified by *startstrain_time* * *endseq* is the ending sequence specified by *endstrain_seq* * *endtime* is the time of *endseq* specified by *endstrain_time* * *caseq* is the common ancestor sequence specified by *commonancestor_seq* * *catime* is the time of *caseq* specified by *commonancestor_time* * *tocamuts* is a list of the mutations going from *startseq* to *caseq*, specified in the order they are listed in the file (should be along the path) as the 2-tuples of the form *('A6G', 42.713)* where the entries are the mutation and then the time. * *fromcamuts* is like *tocamuts*, but for mutations going from *caseq* to *endseq*. The format of *pathfile* is as follows. This file should list mutational paths as:: MUTPATH 1 startstrain_name A/Aichi/2/1968_1968.50 startstrain_seq ATGGCAATGGGCTAA startstrain_time 42.5 endstrain_name A/Brisbane/10/2007_2007.10 endstrain_seq ATGACGATTGGATAA endstrain_time 3.9 commonancestor_seq ATGGCGATGGGCTAA commonancestor_time 43.12713 startstrain_to_commonancestor_path A6G : 42.713 commonancestor_to_endstrain_path G9T : 31.732 G4A : 25.1343 C12A : 10.134 MUTPATH 2 startstrain_name A/Aichi/2/1968_1968.50 startstrain_seq ATGGCAATGGGCTAA startstrain_time 42.5 endstrain_name A/Brisbane/10/2007_2007.10 endstrain_seq ATGACGATTGGATAA endstrain_time 3.9 commonancestor_seq ATGGCGATGGGCTAA commonancestor_time 44.12713 startstrain_to_commonancestor_path A6G : 42.113 G9T : 43.124 commonancestor_to_endstrain_path G4A : 21.1343 C5A : 19.531 A5C : 19.402 C12A : 9.134 The file lists each of the paths numbered starting at 1. Within each path, the mutations are indicated with numbering starting at 1 for the first position in the sequence. The times for the mutations, the starting and ending strains, and the most recent common ancestor of these two strains, are also indicated. These times are measured in units before the most recent tip node (so the root node would have the largest value of time). The mutations must move from the starting to the ending sequence, and if multiple paths are specified, then they all must have the same starting and ending sequences. """ mutmatch = re.compile('^(?P<mut>[A-z\*\-]\d+[A-z\*\-]) : (?P<time>\d+\.*\d*)$') if not os.path.isfile(pathfile): raise IOError("Cannot find pathfile %s" % pathfile) f = open(pathfile) firststartseq = firstendseq = None while True: try: line = f.next() except StopIteration: break # no more lines lines = [] while not line.isspace(): lines.append(line.strip()) line = f.next() tocamuts = [] fromcamuts = [] assert lines[0][ : 7] == 'MUTPATH' assert lines[1][ : 16] == 'startstrain_name' assert lines[2][ : 15] == 'startstrain_seq' startseq = lines[2].split()[1].strip().upper() if firststartseq == None: firststartseq = startseq elif firststartseq != startseq: raise IOError("Change in startseq") assert lines[3][ : 16] == 'startstrain_time' starttime = float(lines[3].split()[1]) assert lines[4][ : 14] == 'endstrain_name' assert lines[5][ : 13] == 'endstrain_seq' endseq = lines[5].split()[1].strip().upper() if firstendseq == None: firstendseq = endseq elif firstendseq != endseq: raise IOError("Change in endseq") assert lines[6][ : 14] == 'endstrain_time' endtime = float(lines[6].split()[1]) assert lines[7][ : 18] == 'commonancestor_seq' caseq = lines[7].split()[1].strip().upper() assert lines[8][ : 19] == 'commonancestor_time' catime = float(lines[8].split()[1]) assert lines[9] == 'startstrain_to_commonancestor_path' i = 10 while lines[i] != 'commonancestor_to_endstrain_path' and i < len(lines): m = mutmatch.search(lines[i]) if not m: raise ValueError("Failed to match mutation line:\n%s" % lines[i]) tocamuts.append((m.group('mut'), float(m.group('time')))) i += 1 if i < len(lines): if lines[i] != 'commonancestor_to_endstrain_path': raise ValueError("Expected 'commonancestor_to_endstrain_path', but got:\n%s" % lines[i]) i += 1 while i < len(lines): m = mutmatch.search(lines[i]) if not m: raise ValueError("Failed to match mutation line:\n%s" % lines[i]) fromcamuts.append((m.group('mut'), float(m.group('time')))) i += 1 yield (startseq, starttime, endseq, endtime, caseq, catime, tocamuts, fromcamuts) f.close() class Trajectory(object): """Class for representing a mutational trajectory through sequence space. This class represents a mutational trajectory in sequence space. The trajectory is a directed graph consisting of nodes (sequences) and edges (mutations connecting nodes). The trajectory moves from one known sequence to another known sequence, passing through some number of uncertain intermediates (nodes). The trajectory is created by passing it a set of possible mutational paths from the starting to ending sequence. In the trajectory, the weight of each node corresponds to the fraction of paths that contain that sequence, while the weight of each edge corresponds to the fraction of paths that contain that edge. Note that if a path contains a node or edge more than once (which can happen if there are mutational cycles), the node or edge is still considered to have occurred once in that path for the purposes of assigning the weights. Each *Trajectory* object *t* has the following attributes: * *t.npaths* : the number of individual paths used to construct the overall trajectory. * *t.startseq* : a string giving the starting sequence for the trajectory. * *t.endseq* : a string giving the ending sequence for the trajectory. * *t.nodes* : a dictionary keyed by strings representing the sequences for each node found at least once in the trajectory, and with values equal to the weight of that node (fraction of paths containing the node). * *t.edges* : a dictionary keyed by 2-tuples of strings *(s1, s2)* and values giving the weight of the directed edges from sequence *s1* to *s2*. * *t.mutations* : a dictionary keyed by mutation strings of the form 'G5A' specifying mutations where the numbering is in 1, 2, ... For each mutation that occurs in at least one of the paths passed to this trajectory, there will be a key. The values are lists giving the times of occurrence for all occurrences of that mutation in the paths used to create this trajectory. If a mutation occurs more than once in a path, only the time for its first occurrence is listed. So the fraction of paths that contain some mutation *m* is *t.mutations[m] / float(t.npaths)*. Note that if *translateseqs* is *True*, then the mutations specified here are only the protein mutations, not the nucleotide ones in the underlying nucleotide sequences. * *t.mutationstoca* : a dictionary keyed by mutation strings just as for *t.mutations*. Each mutation that is added to the lists in *t.mutations* can arise on the branch from the starting sequence to the common ancestor, or on the branch from the common ancestor to the ending sequence. The value of *t.mutationstoca[mut]* is the number of times that the first occurrence of *mut* is on the route from starting sequence to the common ancestor. So if *mut* is always on the path from the starting sequence to the common ancestor, then *t.mutationstoca[mut] == len(t.mutations[mut])*. If it is always on the path from the starting sequence to the ending sequence, then *t.mutations[mut] == 0*. * *t.persistence* : a dictionary keyed by the node sequences and with the values being a list of numbers. If a node occurs on a path, then the time for which the node sequence persisted before another mutation is listed (for the first occurrence of the node if it occurs multiple times on the same path). Note that it *translateseqs* is True, then these are the persistence times to the first non-synonymous mutation, as only those mutations change the node sequences. The total length of the list for each node will be equal to the number of paths that contained that node. * *t.timetofirstmut* : if *translateseqs* is False, then this is just equal to *t.persistence*. But if *translateseqs* is True, then the entries give the time after the occurrence of a node to the first mutation of any time -- synonymous or nonsynonymous. In this case, entries in *t.timetofirstmut* will often be less than those in *t.persistence*, since the first mutation to a node will often by synonymous, which will change the nucleotide sequence but not the actual protein sequence node identity. To create a *Trajectory* object *t*, use the command:: t = Trajectory(pathfile, translateseqs=False, printprogress=False) *pathfile* should be a string giving a name of an input file specifying one or more mutational paths. These files are of the format created by ``mutpath_get_paths.py``. They must be readable by the *IteratePaths* function. *translateseqs* is an optional argument that is *False* by default. If it is set to *True*, then the sequences contained within *mutpathfile* are taken to represent coding nucleotide sequences, but the trajectory is built through protein sequence space. In other words, the nucleotide sequences in the paths are translated, and the trajectory is built from these translated sequences. All of the nodes and edges will therefore connect protein sequences. Note that no checking is made to ensure that the sequences translate properly: any stop codons are simply translated to '*', codons containing gaps are translated to '-', and sequences that do not have lengths that are multiples of three have the extra one or two nucleotides truncated. *printprogress* is a switch specifying that we print progress as we process paths. You may want to use this if you are processing a large number of paths and want to output the progress. By default it is *False*, meaning that nothing is printed. If you set it to an integer, it will then print to *sys.stdout* after processing every *printprogress* paths. """ def __init__(self, pathfile, translateseqs=False, printprogress=False): """Intializes and returns a *Trajectory* object. Returns a *Trajectory* object *t* constructed from the collection of mutational paths encoded in *pathfile*. Detailed in main docstring for this class. """ if not os.path.isfile(pathfile): raise IOError("Cannot find pathfile %s" % pathfile) self.npaths = 0 self.nodes = {} self.edges = {} self.mutations = {} self.mutationstoca = {} self.persistence = {} if translateseqs: self.timetofirstmut = {} else: self.timetofirstmut = self.persistence self.startseq = self.endseq = None for (startseq, starttime, endseq, endtime, caseq, catime, tocamuts, fromcamuts) in IteratePaths(pathfile): onthispath = {} persistenceonthispath = {} timetofirstmutonthispath = {} self.npaths += 1 if printprogress: if not (self.npaths % printprogress): sys.stdout.write("Processed %d paths...\n" % self.npaths) sys.stdout.flush() currentseq = list(startseq) nodetime = starttime if translateseqs: startseq = sequtils.Translate([('head', startseq)], readthrough_n=True, readthrough_stop=True, truncate_incomplete=True, translate_gaps=True)[0][1] endseq = sequtils.Translate([('head', endseq)], readthrough_n=True, readthrough_stop=True, truncate_incomplete=True, translate_gaps=True)[0][1] if self.startseq == None: self.startseq = startseq self.endseq = endseq assert self.startseq == startseq and self.endseq == endseq onthispath[startseq] = True if startseq in self.nodes: self.nodes[startseq] += 1 else: self.nodes[startseq] = 1 firstfromca = True for (mutlist, toca) in [(tocamuts, True), (fromcamuts, False)]: for (mut, time) in mutlist: (wt, i, m) = (mut[0], int(mut[1 : -1]), mut[-1]) if not (1 <= i <= len(currentseq)): raise ValueError("Position %d is out of range." % i) if currentseq[i - 1] != wt: raise ValueError("Identity mismatch for %s" % mut) if wt == m: raise ValueError("Invalid mutation %s" % mut) s1 = ''.join(currentseq) currentseq[i - 1] = m s2 = ''.join(currentseq) if translateseqs: s1 = sequtils.Translate([('head', s1)], readthrough_n=True, readthrough_stop=True, truncate_incomplete=True, translate_gaps=True)[0][1] s2 = sequtils.Translate([('head', s2)], readthrough_n=True, readthrough_stop=True, truncate_incomplete=True, translate_gaps=True)[0][1] if not s2 in onthispath: onthispath[s2] = True if s2 in self.nodes: self.nodes[s2] += 1 else: self.nodes[s2] = 1 assert len(s1) == len(s2) == len(self.startseq) == len(self.endseq) if self.persistence != self.timetofirstmut: assert translateseqs if s1 not in timetofirstmutonthispath: timetofirstmutonthispath[s1] = True if toca: dt = time - nodetime elif firstfromca: dt = catime - nodetime + catime - time else: dt = nodetime - time if s1 in self.timetofirstmut: self.timetofirstmut[s1].append(dt) else: self.timetofirstmut[s1] = [dt] if s1 != s2: if s1 not in persistenceonthispath: persistenceonthispath[s1] = True if toca: dt = time - nodetime elif firstfromca: firstfromca = False dt = catime - nodetime + catime - time else: dt = nodetime - time if s1 in self.persistence: self.persistence[s1].append(dt) else: self.persistence[s1] = [dt] nodetime = time if translateseqs: diffs = [i for i in range(len(s1)) if s1[i] != s2[i]] assert len(diffs) == 1, str(diffs) i = diffs[0] mutstring = "%s%d%s" % (s1[i], i + 1, s2[i]) else: mutstring = mut if mutstring not in onthispath: if mutstring in self.mutations: self.mutations[mutstring].append(time) if toca: self.mutationstoca[mutstring] += 1 else: self.mutations[mutstring] = [time] if toca: self.mutationstoca[mutstring] = 1 else: self.mutationstoca[mutstring] = 0 onthispath[mutstring] = True tup = (s1, s2) if not tup in onthispath: onthispath[tup] = True if tup in self.edges: self.edges[tup] += 1 else: self.edges[tup] = 1 # check that path finished correctly if translateseqs: if sequtils.Translate([('head', ''.join(currentseq))], readthrough_n=True, readthrough_stop=True, truncate_incomplete=True, translate_gaps=True)[0][1] != endseq: raise ValueError("Failed to end on endseq") elif ''.join(currentseq) != endseq: raise ValueError("Failed to end on endseq") if not self.npaths: raise ValueError("Failed to find any paths in %s" % pathfile) for key in self.nodes.iterkeys(): if key != self.endseq: if len(self.persistence[key]) != self.nodes[key]: raise ValueError("Incorect number of persistence entries") self.nodes[key] /= float(self.npaths) for key in self.edges.iterkeys(): self.edges[key] /= float(self.npaths) if self.nodes[self.startseq] != 1: raise ValueError("The weight of startseq is not one") if self.nodes[self.endseq] != 1: raise ValueError("The weight of endseq is not one") # Test with doctest if __name__ == '__main__': import doctest doctest.testmod()
gpl-3.0
scrollback/kuma
vendor/packages/importlib/importlib/__init__.py
456
1327
"""Backport of importlib.import_module from 3.x.""" # While not critical (and in no way guaranteed!), it would be nice to keep this # code compatible with Python 2.3. import sys def _resolve_name(name, package, level): """Return the absolute name of the module to be imported.""" if not hasattr(package, 'rindex'): raise ValueError("'package' not set to a string") dot = len(package) for x in xrange(level, 1, -1): try: dot = package.rindex('.', 0, dot) except ValueError: raise ValueError("attempted relative import beyond top-level " "package") return "%s.%s" % (package[:dot], name) def import_module(name, package=None): """Import a module. The 'package' argument is required when performing a relative import. It specifies the package to use as the anchor point from which to resolve the relative import to an absolute import. """ if name.startswith('.'): if not package: raise TypeError("relative imports require the 'package' argument") level = 0 for character in name: if character != '.': break level += 1 name = _resolve_name(name[level:], package, level) __import__(name) return sys.modules[name]
mpl-2.0
Katya007/Python-Tutorials
Introduction-to-Computation-and-Programming-Using-Python-Guttag-2016-Finger-Exercises/fingerExercises.py
1
7601
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ @author: Dr Ekaterina Abramova, 2017 Worked through solutions to finger exercises, Guttag 2nd edition, 2016. """ # ----------------------------- exercise p 18 --------------------------------- """ Write a program that examines three variables x, y, z and prints largest odd number among them. If none are odd, print a message to that effect. """ def findOdd(L): oddNs = [] # find odd numbers for ii in L: if ii%2 != 0: # if int is odd oddNs.append(ii) # find largers int if len(oddNs) != 0: # if odd numbers present print(max(oddNs)) else: print('No odd numbers present') # funciton call L = [-3,-2,-5] findOdd(L) # -3 # ----------------------------- exercise p 24 --------------------------------- """ Replace the comment in the following code with a while loop """ numXs = int(input('How many times should I print the letter X?')) toPrint = '' # concatenate X to toPrint numXs times while numXs > 0: toPrint = toPrint + 'X' numXs -= 1 # decrement numXs print(toPrint) # ----------------------------- exercise p 24 --------------------------------- """ Write a program that asks user to input 10 integers, and prints largest odd number. If no odd number was entered, print a message to that effect. """ # build up input from user myInts = [] for ii in range(10): temp = int(input('Please input an integer.')) myInts.append(temp) # untilse a function already written for finding largest odd number in a list findOdd(myInts) # ----------------------------- exercise p 27 --------------------------------- """ Write a program that asks user to enter an integer and prints two integers root and pwr, such that 0<pwr<6 and root**pwr is equal to the integer entered by user. If no such pair exists it should print a message to that extent """ def root_pwr(x): # x = int(input('Write int ')) # typecast input string into an int max_power = 6 marker = False for pwr in range(2,max_power+1): ans = 0 while ans**pwr < abs(x): # stop at the potential root ans += 1 # check if exact root is found if ans**pwr == abs(x): if (x < 0) and (pwr%2 != 0): # deal with negative input & odd power ans = -ans marker = True print('For', x, 'base:', ans, 'power:', pwr) elif (x > 0): marker = True print('For', x, 'base:', ans, 'power:', pwr) if not marker: print('For', x, 'no such pair exists') # Good coding: write a test function to check fn behaviour over many inputs. def test_root_pwr(): for ii in range(-10,11): root_pwr(ii) # function call test_root_pwr() # --------------------------- exercise p 30 ----------------------------------- """ s is a string with a sequenc of decimal numbers separated by commas. Write a program that prints the sum of numbers in s. """ def sumStr(s): temp_str = '' L = [] for ii in s: # sum up the floats in the string if ii != ',': temp_str = temp_str + ii else: L.append(float(temp_str)) temp_str = '' L.append(float(temp_str)) ans = 0 for ii in L: ans = ans + ii print(ans) # funtion call sumStr('1.23,2.4,3.123') # 6.753 # --------------------------- exercise p 34 ----------------------------------- """ What would happen if x = 25 was replaced by x = -25 in Fig 3.4? """ # The while loop condition would always be true since 25 would always be added # to the ans**2 value and would thus result in an infinite loop. # --------------------------- exercise p 34 ----------------------------------- """ How would code in Fig 3.4 need to be changed for finding an approximation to the cube root of both negative and positive nubmers? """ # x can be positive or negative x = -25 epsilon = 0.01 numGuesses = 0 # change low to accomodate negative numbers low = min(-1.0, x) high = max(1.0, x) ans = (high + low) / 2.0 # change power to 3 while abs(ans**3 - x) >= epsilon: print('low =', low, 'high =', high, 'ans =', ans) numGuesses += 1 # change power to 3 if ans**3 < x: low = ans else: high = ans ans = (high + low) / 2.0 print('numGuesses =', numGuesses) print(ans, 'is close to cube root of', x) # --------------------------- exercise p 35 ----------------------------------- """ What is the decimal equivalent of binary number 10011? """ # (2^4 x 1) + (2^3 x 0) + (2^2 x 0) + (2^1 x 1) + (2^0 x 1) = 16 + 2 + 1 = 19 # --------------------------- exercise p 38 ----------------------------------- """ Count number of iterations of Newton-Raphson method and compare the efficiency to Bisecion """ # write a function for Bisection (for finding a root of f(x) = x**pwr - S) def bisection(S, pwr): epsilon = 0.01 numGuesses = 0 low = min(-1.0, x) high = max(1.0, x) guess = (high + low) / 2.0 while abs(guess**pwr - x) >= epsilon: numGuesses += 1 if guess**pwr < x: low = guess else: high = guess guess = (high + low) / 2.0 return guess, numGuesses # write a function for Newton-Raphson (for finding a root of f(x) = x**pwr - S) def newtonRaphson(S, pwr): epsilon = 0.01 guess = S/2.0 numGuesses = 0 while abs(guess**pwr - S) >= epsilon: numGuesses += 1 guess = guess - ((guess**pwr) - S) / (pwr*guess**(pwr-1)) return guess, numGuesses # test which function is faster: S, pwr = 27, 3 # (a) look at number of iterations root, iters = bisection(S, pwr) # 2.92364501953125 14 root, iters = newtonRaphson(S, pwr) # 3.000000081210202 7 # (b) look at the average speed using timeit built-in function # exectue both lines below in the command line: # %timeit bisection(S, pwr) # 100000 loops, best of 3: 5.83 µs per loop # %timeit newtonRaphson(S, pwr) # 100000 loops, best of 3: 3.34 µs per loop # ---------------------------- exercise p 42 ---------------------------------- """ Write a function isIn that accepts 2 strings as arguments and returns True if either string occurs anywhere in other, False otherwise.""" # Option 1: using str operation 'in' def isIn(a,b): if len(a) > len(b): longerStr = a shorterStr = b else: longerStr = b shorterStr = a if shorterStr in longerStr: print(shorterStr) return True return False # Option 2: stepping over longer string, looking for a match to shorter one def isIn2(a,b): if len(a) > len(b): longerStr = a shorterStr = b else: longerStr = b shorterStr = a lenShortStr = len(shorterStr) for ii in range(len(longerStr)-lenShortStr+1): if shorterStr == longerStr[ii : ii+lenShortStr]: print(shorterStr) return True return False # function call isIn('hell abcd','abcd') # True isIn2('hell abcd','abcd') # True # ------------------------------ exercise p 54 -------------------------------- """ When implementation of fib in Fig 4.7 is used to compute fib(5), how many times doesit compute the value of fib(2) on the way to computing fib(5)? """ # fib(5) --> fib(4) + fib(3) # | | # fib(3)+fib(2) fib(2)+fib(1) # | | | # | fib(1)+fib(0) fib(1)+fib(0) # fib(2)+fib(1) # | # fib(1)+fib(0) # Therefore fib(2) is called 3 times.
mit
ghtmtt/QGIS
python/plugins/db_manager/db_plugins/vlayers/plugin.py
29
6103
# -*- coding: utf-8 -*- """ /*************************************************************************** Name : DB Manager plugin for virtual layers Date : December 2015 copyright : (C) 2015 by Hugo Mercier email : hugo dot mercier at oslandia dot com ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ # this will disable the dbplugin if the connector raise an ImportError from .connector import VLayerConnector from qgis.PyQt.QtCore import QCoreApplication from qgis.PyQt.QtGui import QIcon from qgis.core import QgsApplication, QgsVectorLayer, QgsProject, QgsVirtualLayerDefinition from ..plugin import DBPlugin, Database, Table, VectorTable, TableField def classFactory(): return VLayerDBPlugin class VLayerDBPlugin(DBPlugin): @classmethod def icon(self): return QgsApplication.getThemeIcon("/mIconVirtualLayer.svg") def connectionIcon(self): return QgsApplication.getThemeIcon("/providerQgis.svg") @classmethod def typeName(self): return 'vlayers' @classmethod def typeNameString(self): return QCoreApplication.translate('db_manager', 'Virtual Layers') @classmethod def providerName(self): return 'virtual' @classmethod def connectionSettingsKey(self): return 'vlayers' @classmethod def connections(self): return [VLayerDBPlugin(QCoreApplication.translate('db_manager', 'Project layers'))] def databasesFactory(self, connection, uri): return FakeDatabase(connection, uri) def database(self): return self.db # def info( self ): def connect(self, parent=None): self.connectToUri("qgis") return True class FakeDatabase(Database): def __init__(self, connection, uri): Database.__init__(self, connection, uri) def connectorsFactory(self, uri): return VLayerConnector(uri) def dataTablesFactory(self, row, db, schema=None): return LTable(row, db, schema) def vectorTablesFactory(self, row, db, schema=None): return LVectorTable(row, db, schema) def rasterTablesFactory(self, row, db, schema=None): return None def info(self): from .info_model import LDatabaseInfo return LDatabaseInfo(self) def sqlResultModel(self, sql, parent): from .data_model import LSqlResultModel return LSqlResultModel(self, sql, parent) def sqlResultModelAsync(self, sql, parent): from .data_model import LSqlResultModelAsync return LSqlResultModelAsync(self, sql, parent) def toSqlLayer(self, sql, geomCol, uniqueCol, layerName="QueryLayer", layerType=None, avoidSelectById=False, _filter=""): df = QgsVirtualLayerDefinition() df.setQuery(sql) if uniqueCol is not None: uniqueCol = uniqueCol.strip('"').replace('""', '"') df.setUid(uniqueCol) if geomCol is not None: df.setGeometryField(geomCol) vl = QgsVectorLayer(df.toString(), layerName, "virtual") if _filter: vl.setSubsetString(_filter) return vl def registerDatabaseActions(self, mainWindow): return def runAction(self, action): return def uniqueIdFunction(self): return None def explicitSpatialIndex(self): return True def spatialIndexClause(self, src_table, src_column, dest_table, dest_column): return '"%s"._search_frame_ = "%s"."%s"' % (src_table, dest_table, dest_column) def supportsComment(self): return False class LTable(Table): def __init__(self, row, db, schema=None): Table.__init__(self, db, None) self.name, self.isView, self.isSysTable = row def tableFieldsFactory(self, row, table): return LTableField(row, table) def tableDataModel(self, parent): from .data_model import LTableDataModel return LTableDataModel(self, parent) def canBeAddedToCanvas(self): return False class LVectorTable(LTable, VectorTable): def __init__(self, row, db, schema=None): LTable.__init__(self, row[:-5], db, schema) VectorTable.__init__(self, db, schema) # SpatiaLite does case-insensitive checks for table names, but the # SL provider didn't do the same in QGIS < 1.9, so self.geomTableName # stores the table name like stored in the geometry_columns table self.geomTableName, self.geomColumn, self.geomType, self.geomDim, self.srid = row[ -5:] def uri(self): uri = self.database().uri() uri.setDataSource('', self.geomTableName, self.geomColumn) return uri def hasSpatialIndex(self, geom_column=None): return True def createSpatialIndex(self, geom_column=None): return def deleteSpatialIndex(self, geom_column=None): return def refreshTableEstimatedExtent(self): self.extent = self.database().connector.getTableExtent( ("id", self.geomTableName), None) def runAction(self, action): return def toMapLayer(self): return QgsProject.instance().mapLayer(self.geomTableName) class LTableField(TableField): def __init__(self, row, table): TableField.__init__(self, table) self.num, self.name, self.dataType, self.notNull, self.default, self.primaryKey = row self.hasDefault = self.default
gpl-2.0
nrhine1/scikit-learn
sklearn/datasets/tests/test_rcv1.py
322
2414
"""Test the rcv1 loader. Skipped if rcv1 is not already downloaded to data_home. """ import errno import scipy.sparse as sp import numpy as np from sklearn.datasets import fetch_rcv1 from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import SkipTest def test_fetch_rcv1(): try: data1 = fetch_rcv1(shuffle=False, download_if_missing=False) except IOError as e: if e.errno == errno.ENOENT: raise SkipTest("Download RCV1 dataset to run this test.") X1, Y1 = data1.data, data1.target cat_list, s1 = data1.target_names.tolist(), data1.sample_id # test sparsity assert_true(sp.issparse(X1)) assert_true(sp.issparse(Y1)) assert_equal(60915113, X1.data.size) assert_equal(2606875, Y1.data.size) # test shapes assert_equal((804414, 47236), X1.shape) assert_equal((804414, 103), Y1.shape) assert_equal((804414,), s1.shape) assert_equal(103, len(cat_list)) # test ordering of categories first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151'] assert_array_equal(first_categories, cat_list[:6]) # test number of sample for some categories some_categories = ('GMIL', 'E143', 'CCAT') number_non_zero_in_cat = (5, 1206, 381327) for num, cat in zip(number_non_zero_in_cat, some_categories): j = cat_list.index(cat) assert_equal(num, Y1[:, j].data.size) # test shuffling and subset data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77, download_if_missing=False) X2, Y2 = data2.data, data2.target s2 = data2.sample_id # The first 23149 samples are the training samples assert_array_equal(np.sort(s1[:23149]), np.sort(s2)) # test some precise values some_sample_ids = (2286, 3274, 14042) for sample_id in some_sample_ids: idx1 = s1.tolist().index(sample_id) idx2 = s2.tolist().index(sample_id) feature_values_1 = X1[idx1, :].toarray() feature_values_2 = X2[idx2, :].toarray() assert_almost_equal(feature_values_1, feature_values_2) target_values_1 = Y1[idx1, :].toarray() target_values_2 = Y2[idx2, :].toarray() assert_almost_equal(target_values_1, target_values_2)
bsd-3-clause
meowler/sandbox
node_modules/node-gyp/gyp/pylib/gyp/generator/eclipse.py
1825
17014
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """GYP backend that generates Eclipse CDT settings files. This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML files that can be imported into an Eclipse CDT project. The XML file contains a list of include paths and symbols (i.e. defines). Because a full .cproject definition is not created by this generator, it's not possible to properly define the include dirs and symbols for each file individually. Instead, one set of includes/symbols is generated for the entire project. This works fairly well (and is a vast improvement in general), but may still result in a few indexer issues here and there. This generator has no automated tests, so expect it to be broken. """ from xml.sax.saxutils import escape import os.path import subprocess import gyp import gyp.common import gyp.msvs_emulation import shlex import xml.etree.cElementTree as ET generator_wants_static_library_dependencies_adjusted = False generator_default_variables = { } for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']: # Some gyp steps fail if these are empty(!), so we convert them to variables generator_default_variables[dirname] = '$' + dirname for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME', 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT', 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX', 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX', 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX', 'CONFIGURATION_NAME']: generator_default_variables[unused] = '' # Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as # part of the path when dealing with generated headers. This value will be # replaced dynamically for each configuration. generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \ '$SHARED_INTERMEDIATE_DIR' def CalculateVariables(default_variables, params): generator_flags = params.get('generator_flags', {}) for key, val in generator_flags.items(): default_variables.setdefault(key, val) flavor = gyp.common.GetFlavor(params) default_variables.setdefault('OS', flavor) if flavor == 'win': # Copy additional generator configuration data from VS, which is shared # by the Eclipse generator. import gyp.generator.msvs as msvs_generator generator_additional_non_configuration_keys = getattr(msvs_generator, 'generator_additional_non_configuration_keys', []) generator_additional_path_sections = getattr(msvs_generator, 'generator_additional_path_sections', []) gyp.msvs_emulation.CalculateCommonVariables(default_variables, params) def CalculateGeneratorInputInfo(params): """Calculate the generator specific info that gets fed to input (called by gyp).""" generator_flags = params.get('generator_flags', {}) if generator_flags.get('adjust_static_libraries', False): global generator_wants_static_library_dependencies_adjusted generator_wants_static_library_dependencies_adjusted = True def GetAllIncludeDirectories(target_list, target_dicts, shared_intermediate_dirs, config_name, params, compiler_path): """Calculate the set of include directories to be used. Returns: A list including all the include_dir's specified for every target followed by any include directories that were added as cflag compiler options. """ gyp_includes_set = set() compiler_includes_list = [] # Find compiler's default include dirs. if compiler_path: command = shlex.split(compiler_path) command.extend(['-E', '-xc++', '-v', '-']) proc = subprocess.Popen(args=command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output = proc.communicate()[1] # Extract the list of include dirs from the output, which has this format: # ... # #include "..." search starts here: # #include <...> search starts here: # /usr/include/c++/4.6 # /usr/local/include # End of search list. # ... in_include_list = False for line in output.splitlines(): if line.startswith('#include'): in_include_list = True continue if line.startswith('End of search list.'): break if in_include_list: include_dir = line.strip() if include_dir not in compiler_includes_list: compiler_includes_list.append(include_dir) flavor = gyp.common.GetFlavor(params) if flavor == 'win': generator_flags = params.get('generator_flags', {}) for target_name in target_list: target = target_dicts[target_name] if config_name in target['configurations']: config = target['configurations'][config_name] # Look for any include dirs that were explicitly added via cflags. This # may be done in gyp files to force certain includes to come at the end. # TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and # remove this. if flavor == 'win': msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags) cflags = msvs_settings.GetCflags(config_name) else: cflags = config['cflags'] for cflag in cflags: if cflag.startswith('-I'): include_dir = cflag[2:] if include_dir not in compiler_includes_list: compiler_includes_list.append(include_dir) # Find standard gyp include dirs. if config.has_key('include_dirs'): include_dirs = config['include_dirs'] for shared_intermediate_dir in shared_intermediate_dirs: for include_dir in include_dirs: include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR', shared_intermediate_dir) if not os.path.isabs(include_dir): base_dir = os.path.dirname(target_name) include_dir = base_dir + '/' + include_dir include_dir = os.path.abspath(include_dir) gyp_includes_set.add(include_dir) # Generate a list that has all the include dirs. all_includes_list = list(gyp_includes_set) all_includes_list.sort() for compiler_include in compiler_includes_list: if not compiler_include in gyp_includes_set: all_includes_list.append(compiler_include) # All done. return all_includes_list def GetCompilerPath(target_list, data, options): """Determine a command that can be used to invoke the compiler. Returns: If this is a gyp project that has explicit make settings, try to determine the compiler from that. Otherwise, see if a compiler was specified via the CC_target environment variable. """ # First, see if the compiler is configured in make's settings. build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0]) make_global_settings_dict = data[build_file].get('make_global_settings', {}) for key, value in make_global_settings_dict: if key in ['CC', 'CXX']: return os.path.join(options.toplevel_dir, value) # Check to see if the compiler was specified as an environment variable. for key in ['CC_target', 'CC', 'CXX']: compiler = os.environ.get(key) if compiler: return compiler return 'gcc' def GetAllDefines(target_list, target_dicts, data, config_name, params, compiler_path): """Calculate the defines for a project. Returns: A dict that includes explict defines declared in gyp files along with all of the default defines that the compiler uses. """ # Get defines declared in the gyp files. all_defines = {} flavor = gyp.common.GetFlavor(params) if flavor == 'win': generator_flags = params.get('generator_flags', {}) for target_name in target_list: target = target_dicts[target_name] if flavor == 'win': msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags) extra_defines = msvs_settings.GetComputedDefines(config_name) else: extra_defines = [] if config_name in target['configurations']: config = target['configurations'][config_name] target_defines = config['defines'] else: target_defines = [] for define in target_defines + extra_defines: split_define = define.split('=', 1) if len(split_define) == 1: split_define.append('1') if split_define[0].strip() in all_defines: # Already defined continue all_defines[split_define[0].strip()] = split_define[1].strip() # Get default compiler defines (if possible). if flavor == 'win': return all_defines # Default defines already processed in the loop above. if compiler_path: command = shlex.split(compiler_path) command.extend(['-E', '-dM', '-']) cpp_proc = subprocess.Popen(args=command, cwd='.', stdin=subprocess.PIPE, stdout=subprocess.PIPE) cpp_output = cpp_proc.communicate()[0] cpp_lines = cpp_output.split('\n') for cpp_line in cpp_lines: if not cpp_line.strip(): continue cpp_line_parts = cpp_line.split(' ', 2) key = cpp_line_parts[1] if len(cpp_line_parts) >= 3: val = cpp_line_parts[2] else: val = '1' all_defines[key] = val return all_defines def WriteIncludePaths(out, eclipse_langs, include_dirs): """Write the includes section of a CDT settings export file.""" out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \ 'settingswizards.IncludePaths">\n') out.write(' <language name="holder for library settings"></language>\n') for lang in eclipse_langs: out.write(' <language name="%s">\n' % lang) for include_dir in include_dirs: out.write(' <includepath workspace_path="false">%s</includepath>\n' % include_dir) out.write(' </language>\n') out.write(' </section>\n') def WriteMacros(out, eclipse_langs, defines): """Write the macros section of a CDT settings export file.""" out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \ 'settingswizards.Macros">\n') out.write(' <language name="holder for library settings"></language>\n') for lang in eclipse_langs: out.write(' <language name="%s">\n' % lang) for key in sorted(defines.iterkeys()): out.write(' <macro><name>%s</name><value>%s</value></macro>\n' % (escape(key), escape(defines[key]))) out.write(' </language>\n') out.write(' </section>\n') def GenerateOutputForConfig(target_list, target_dicts, data, params, config_name): options = params['options'] generator_flags = params.get('generator_flags', {}) # build_dir: relative path from source root to our output files. # e.g. "out/Debug" build_dir = os.path.join(generator_flags.get('output_dir', 'out'), config_name) toplevel_build = os.path.join(options.toplevel_dir, build_dir) # Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the # SHARED_INTERMEDIATE_DIR. Include both possible locations. shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'), os.path.join(toplevel_build, 'gen')] GenerateCdtSettingsFile(target_list, target_dicts, data, params, config_name, os.path.join(toplevel_build, 'eclipse-cdt-settings.xml'), options, shared_intermediate_dirs) GenerateClasspathFile(target_list, target_dicts, options.toplevel_dir, toplevel_build, os.path.join(toplevel_build, 'eclipse-classpath.xml')) def GenerateCdtSettingsFile(target_list, target_dicts, data, params, config_name, out_name, options, shared_intermediate_dirs): gyp.common.EnsureDirExists(out_name) with open(out_name, 'w') as out: out.write('<?xml version="1.0" encoding="UTF-8"?>\n') out.write('<cdtprojectproperties>\n') eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File', 'GNU C++', 'GNU C', 'Assembly'] compiler_path = GetCompilerPath(target_list, data, options) include_dirs = GetAllIncludeDirectories(target_list, target_dicts, shared_intermediate_dirs, config_name, params, compiler_path) WriteIncludePaths(out, eclipse_langs, include_dirs) defines = GetAllDefines(target_list, target_dicts, data, config_name, params, compiler_path) WriteMacros(out, eclipse_langs, defines) out.write('</cdtprojectproperties>\n') def GenerateClasspathFile(target_list, target_dicts, toplevel_dir, toplevel_build, out_name): '''Generates a classpath file suitable for symbol navigation and code completion of Java code (such as in Android projects) by finding all .java and .jar files used as action inputs.''' gyp.common.EnsureDirExists(out_name) result = ET.Element('classpath') def AddElements(kind, paths): # First, we need to normalize the paths so they are all relative to the # toplevel dir. rel_paths = set() for path in paths: if os.path.isabs(path): rel_paths.add(os.path.relpath(path, toplevel_dir)) else: rel_paths.add(path) for path in sorted(rel_paths): entry_element = ET.SubElement(result, 'classpathentry') entry_element.set('kind', kind) entry_element.set('path', path) AddElements('lib', GetJavaJars(target_list, target_dicts, toplevel_dir)) AddElements('src', GetJavaSourceDirs(target_list, target_dicts, toplevel_dir)) # Include the standard JRE container and a dummy out folder AddElements('con', ['org.eclipse.jdt.launching.JRE_CONTAINER']) # Include a dummy out folder so that Eclipse doesn't use the default /bin # folder in the root of the project. AddElements('output', [os.path.join(toplevel_build, '.eclipse-java-build')]) ET.ElementTree(result).write(out_name) def GetJavaJars(target_list, target_dicts, toplevel_dir): '''Generates a sequence of all .jars used as inputs.''' for target_name in target_list: target = target_dicts[target_name] for action in target.get('actions', []): for input_ in action['inputs']: if os.path.splitext(input_)[1] == '.jar' and not input_.startswith('$'): if os.path.isabs(input_): yield input_ else: yield os.path.join(os.path.dirname(target_name), input_) def GetJavaSourceDirs(target_list, target_dicts, toplevel_dir): '''Generates a sequence of all likely java package root directories.''' for target_name in target_list: target = target_dicts[target_name] for action in target.get('actions', []): for input_ in action['inputs']: if (os.path.splitext(input_)[1] == '.java' and not input_.startswith('$')): dir_ = os.path.dirname(os.path.join(os.path.dirname(target_name), input_)) # If there is a parent 'src' or 'java' folder, navigate up to it - # these are canonical package root names in Chromium. This will # break if 'src' or 'java' exists in the package structure. This # could be further improved by inspecting the java file for the # package name if this proves to be too fragile in practice. parent_search = dir_ while os.path.basename(parent_search) not in ['src', 'java']: parent_search, _ = os.path.split(parent_search) if not parent_search or parent_search == toplevel_dir: # Didn't find a known root, just return the original path yield dir_ break else: yield parent_search def GenerateOutput(target_list, target_dicts, data, params): """Generate an XML settings file that can be imported into a CDT project.""" if params['options'].generator_output: raise NotImplementedError("--generator_output not implemented for eclipse") user_config = params.get('generator_flags', {}).get('config', None) if user_config: GenerateOutputForConfig(target_list, target_dicts, data, params, user_config) else: config_names = target_dicts[target_list[0]]['configurations'].keys() for config_name in config_names: GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
mit
msmathers/SpasmDB
spasm/crawlers/lastfm.py
1
1103
SLEEP = 21600 # 6 hours import spasm.data.sources as _data import spasm.web.sources as _web import time Data = _data.LastFM() Web = _web.LastFM() def run(artist): # Update name, thumbnail, metadata _artist = Web.get_artist(artist) for field in _artist: if field in artist: artist[field] = _artist[field] Data.update_artist(artist) # Update stats Data.add_stats({ 'artist_id' : artist['id'], 'listeners' : _artist['listeners'], 'playcount' : _artist['playcount'] }) # Update albums, tracks, tags, similar web2data = ( (Web.get_top_albums, Data.update_album), (Web.get_top_tracks, Data.update_track), (Web.get_top_tags, Data.update_tag), (Web.get_similar_artists, Data.update_similar)) for web, data in web2data: dd = web(artist) for d in dd: data(d) if __name__ == "__main__": while True: artists = Data.get_artists() for artist in artists: run(artist) time.sleep(SLEEP)
mit
lexus24/w16b_test
static/Brython3.1.1-20150328-091302/Lib/_sre.py
622
51369
# NOT_RPYTHON """ A pure Python reimplementation of the _sre module from CPython 2.4 Copyright 2005 Nik Haldimann, licensed under the MIT license This code is based on material licensed under CNRI's Python 1.6 license and copyrighted by: Copyright (c) 1997-2001 by Secret Labs AB """ MAXREPEAT = 2147483648 #import array import operator, sys from sre_constants import ATCODES, OPCODES, CHCODES from sre_constants import SRE_INFO_PREFIX, SRE_INFO_LITERAL from sre_constants import SRE_FLAG_UNICODE, SRE_FLAG_LOCALE import sys # Identifying as _sre from Python 2.3 or 2.4 #if sys.version_info[:2] >= (2, 4): MAGIC = 20031017 #else: # MAGIC = 20030419 # In _sre.c this is bytesize of the code word type of the C implementation. # There it's 2 for normal Python builds and more for wide unicode builds (large # enough to hold a 32-bit UCS-4 encoded character). Since here in pure Python # we only see re bytecodes as Python longs, we shouldn't have to care about the # codesize. But sre_compile will compile some stuff differently depending on the # codesize (e.g., charsets). # starting with python 3.3 CODESIZE is 4 #if sys.maxunicode == 65535: # CODESIZE = 2 #else: CODESIZE = 4 copyright = "_sre.py 2.4c Copyright 2005 by Nik Haldimann" def getcodesize(): return CODESIZE def compile(pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]): """Compiles (or rather just converts) a pattern descriptor to a SRE_Pattern object. Actual compilation to opcodes happens in sre_compile.""" return SRE_Pattern(pattern, flags, code, groups, groupindex, indexgroup) def getlower(char_ord, flags): if (char_ord < 128) or (flags & SRE_FLAG_UNICODE) \ or (flags & SRE_FLAG_LOCALE and char_ord < 256): #return ord(unichr(char_ord).lower()) return ord(chr(char_ord).lower()) else: return char_ord class SRE_Pattern: def __init__(self, pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]): self.pattern = pattern self.flags = flags self.groups = groups self.groupindex = groupindex # Maps group names to group indices self._indexgroup = indexgroup # Maps indices to group names self._code = code def match(self, string, pos=0, endpos=sys.maxsize): """If zero or more characters at the beginning of string match this regular expression, return a corresponding MatchObject instance. Return None if the string does not match the pattern.""" state = _State(string, pos, endpos, self.flags) if state.match(self._code): return SRE_Match(self, state) return None def search(self, string, pos=0, endpos=sys.maxsize): """Scan through string looking for a location where this regular expression produces a match, and return a corresponding MatchObject instance. Return None if no position in the string matches the pattern.""" state = _State(string, pos, endpos, self.flags) if state.search(self._code): return SRE_Match(self, state) else: return None def findall(self, string, pos=0, endpos=sys.maxsize): """Return a list of all non-overlapping matches of pattern in string.""" matchlist = [] state = _State(string, pos, endpos, self.flags) while state.start <= state.end: state.reset() state.string_position = state.start if not state.search(self._code): break match = SRE_Match(self, state) if self.groups == 0 or self.groups == 1: item = match.group(self.groups) else: item = match.groups("") matchlist.append(item) if state.string_position == state.start: state.start += 1 else: state.start = state.string_position return matchlist def _subx(self, template, string, count=0, subn=False): filter = template if not callable(template) and "\\" in template: # handle non-literal strings ; hand it over to the template compiler #import sre #sre was renamed to re #fix me brython #print("possible issue at _sre.py line 116") import re as sre filter = sre._subx(self, template) state = _State(string, 0, sys.maxsize, self.flags) sublist = [] n = last_pos = 0 while not count or n < count: state.reset() state.string_position = state.start if not state.search(self._code): break if last_pos < state.start: sublist.append(string[last_pos:state.start]) if not (last_pos == state.start and last_pos == state.string_position and n > 0): # the above ignores empty matches on latest position if callable(filter): sublist.append(filter(SRE_Match(self, state))) else: sublist.append(filter) last_pos = state.string_position n += 1 if state.string_position == state.start: state.start += 1 else: state.start = state.string_position if last_pos < state.end: sublist.append(string[last_pos:state.end]) item = "".join(sublist) if subn: return item, n else: return item def sub(self, repl, string, count=0): """Return the string obtained by replacing the leftmost non-overlapping occurrences of pattern in string by the replacement repl.""" return self._subx(repl, string, count, False) def subn(self, repl, string, count=0): """Return the tuple (new_string, number_of_subs_made) found by replacing the leftmost non-overlapping occurrences of pattern with the replacement repl.""" return self._subx(repl, string, count, True) def split(self, string, maxsplit=0): """Split string by the occurrences of pattern.""" splitlist = [] state = _State(string, 0, sys.maxsize, self.flags) n = 0 last = state.start while not maxsplit or n < maxsplit: state.reset() state.string_position = state.start if not state.search(self._code): break if state.start == state.string_position: # zero-width match if last == state.end: # or end of string break state.start += 1 continue splitlist.append(string[last:state.start]) # add groups (if any) if self.groups: match = SRE_Match(self, state) splitlist.extend(list(match.groups(None))) n += 1 last = state.start = state.string_position splitlist.append(string[last:state.end]) return splitlist def finditer(self, string, pos=0, endpos=sys.maxsize): """Return a list of all non-overlapping matches of pattern in string.""" #scanner = self.scanner(string, pos, endpos) _list=[] _m=self.scanner(string, pos, endpos) _re=SRE_Scanner(self, string, pos, endpos) _m=_re.search() while _m: _list.append(_m) _m=_re.search() return _list #return iter(scanner.search, None) def scanner(self, string, start=0, end=sys.maxsize): return SRE_Scanner(self, string, start, end) def __copy__(self): raise TypeError("cannot copy this pattern object") def __deepcopy__(self): raise TypeError("cannot copy this pattern object") class SRE_Scanner: """Undocumented scanner interface of sre.""" def __init__(self, pattern, string, start, end): self.pattern = pattern self._state = _State(string, start, end, self.pattern.flags) def _match_search(self, matcher): state = self._state state.reset() state.string_position = state.start match = None if matcher(self.pattern._code): match = SRE_Match(self.pattern, state) if match is None or state.string_position == state.start: state.start += 1 else: state.start = state.string_position return match def match(self): return self._match_search(self._state.match) def search(self): return self._match_search(self._state.search) class SRE_Match: def __init__(self, pattern, state): self.re = pattern self.string = state.string self.pos = state.pos self.endpos = state.end self.lastindex = state.lastindex if self.lastindex < 0: self.lastindex = None self.regs = self._create_regs(state) #statement below is not valid under python3 ( 0 <= None) #if pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup): if self.lastindex is not None and pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup): # The above upper-bound check should not be necessary, as the re # compiler is supposed to always provide an _indexgroup list long # enough. But the re.Scanner class seems to screw up something # there, test_scanner in test_re won't work without upper-bound # checking. XXX investigate this and report bug to CPython. self.lastgroup = pattern._indexgroup[self.lastindex] else: self.lastgroup = None def _create_regs(self, state): """Creates a tuple of index pairs representing matched groups.""" regs = [(state.start, state.string_position)] for group in range(self.re.groups): mark_index = 2 * group if mark_index + 1 < len(state.marks) \ and state.marks[mark_index] is not None \ and state.marks[mark_index + 1] is not None: regs.append((state.marks[mark_index], state.marks[mark_index + 1])) else: regs.append((-1, -1)) return tuple(regs) def _get_index(self, group): if isinstance(group, int): if group >= 0 and group <= self.re.groups: return group else: if group in self.re.groupindex: return self.re.groupindex[group] raise IndexError("no such group") def _get_slice(self, group, default): group_indices = self.regs[group] if group_indices[0] >= 0: return self.string[group_indices[0]:group_indices[1]] else: return default def start(self, group=0): """Returns the indices of the start of the substring matched by group; group defaults to zero (meaning the whole matched substring). Returns -1 if group exists but did not contribute to the match.""" return self.regs[self._get_index(group)][0] def end(self, group=0): """Returns the indices of the end of the substring matched by group; group defaults to zero (meaning the whole matched substring). Returns -1 if group exists but did not contribute to the match.""" return self.regs[self._get_index(group)][1] def span(self, group=0): """Returns the 2-tuple (m.start(group), m.end(group)).""" return self.start(group), self.end(group) def expand(self, template): """Return the string obtained by doing backslash substitution and resolving group references on template.""" import sre return sre._expand(self.re, self, template) def groups(self, default=None): """Returns a tuple containing all the subgroups of the match. The default argument is used for groups that did not participate in the match (defaults to None).""" groups = [] for indices in self.regs[1:]: if indices[0] >= 0: groups.append(self.string[indices[0]:indices[1]]) else: groups.append(default) return tuple(groups) def groupdict(self, default=None): """Return a dictionary containing all the named subgroups of the match. The default argument is used for groups that did not participate in the match (defaults to None).""" groupdict = {} for key, value in self.re.groupindex.items(): groupdict[key] = self._get_slice(value, default) return groupdict def group(self, *args): """Returns one or more subgroups of the match. Each argument is either a group index or a group name.""" if len(args) == 0: args = (0,) grouplist = [] for group in args: grouplist.append(self._get_slice(self._get_index(group), None)) if len(grouplist) == 1: return grouplist[0] else: return tuple(grouplist) def __copy__(): raise TypeError("cannot copy this pattern object") def __deepcopy__(): raise TypeError("cannot copy this pattern object") class _State: def __init__(self, string, start, end, flags): self.string = string if start < 0: start = 0 if end > len(string): end = len(string) self.start = start self.string_position = self.start self.end = end self.pos = start self.flags = flags self.reset() def reset(self): self.marks = [] self.lastindex = -1 self.marks_stack = [] self.context_stack = [] self.repeat = None def match(self, pattern_codes): # Optimization: Check string length. pattern_codes[3] contains the # minimum length for a string to possibly match. # brython.. the optimization doesn't work #if pattern_codes[0] == OPCODES["info"] and pattern_codes[3]: # if self.end - self.string_position < pattern_codes[3]: # #_log("reject (got %d chars, need %d)" # # % (self.end - self.string_position, pattern_codes[3])) # return False dispatcher = _OpcodeDispatcher() self.context_stack.append(_MatchContext(self, pattern_codes)) has_matched = None while len(self.context_stack) > 0: context = self.context_stack[-1] has_matched = dispatcher.match(context) if has_matched is not None: # don't pop if context isn't done self.context_stack.pop() return has_matched def search(self, pattern_codes): flags = 0 if pattern_codes[0] == OPCODES["info"]: # optimization info block # <INFO> <1=skip> <2=flags> <3=min> <4=max> <5=prefix info> if pattern_codes[2] & SRE_INFO_PREFIX and pattern_codes[5] > 1: return self.fast_search(pattern_codes) flags = pattern_codes[2] pattern_codes = pattern_codes[pattern_codes[1] + 1:] string_position = self.start if pattern_codes[0] == OPCODES["literal"]: # Special case: Pattern starts with a literal character. This is # used for short prefixes character = pattern_codes[1] while True: while string_position < self.end \ and ord(self.string[string_position]) != character: string_position += 1 if string_position >= self.end: return False self.start = string_position string_position += 1 self.string_position = string_position if flags & SRE_INFO_LITERAL: return True if self.match(pattern_codes[2:]): return True return False # General case while string_position <= self.end: self.reset() self.start = self.string_position = string_position if self.match(pattern_codes): return True string_position += 1 return False def fast_search(self, pattern_codes): """Skips forward in a string as fast as possible using information from an optimization info block.""" # pattern starts with a known prefix # <5=length> <6=skip> <7=prefix data> <overlap data> flags = pattern_codes[2] prefix_len = pattern_codes[5] prefix_skip = pattern_codes[6] # don't really know what this is good for prefix = pattern_codes[7:7 + prefix_len] overlap = pattern_codes[7 + prefix_len - 1:pattern_codes[1] + 1] pattern_codes = pattern_codes[pattern_codes[1] + 1:] i = 0 string_position = self.string_position while string_position < self.end: while True: if ord(self.string[string_position]) != prefix[i]: if i == 0: break else: i = overlap[i] else: i += 1 if i == prefix_len: # found a potential match self.start = string_position + 1 - prefix_len self.string_position = string_position + 1 \ - prefix_len + prefix_skip if flags & SRE_INFO_LITERAL: return True # matched all of pure literal pattern if self.match(pattern_codes[2 * prefix_skip:]): return True i = overlap[i] break string_position += 1 return False def set_mark(self, mark_nr, position): if mark_nr & 1: # This id marks the end of a group. # fix python 3 division incompatability #self.lastindex = mark_nr / 2 + 1 self.lastindex = mark_nr // 2 + 1 if mark_nr >= len(self.marks): self.marks.extend([None] * (mark_nr - len(self.marks) + 1)) self.marks[mark_nr] = position def get_marks(self, group_index): marks_index = 2 * group_index if len(self.marks) > marks_index + 1: return self.marks[marks_index], self.marks[marks_index + 1] else: return None, None def marks_push(self): self.marks_stack.append((self.marks[:], self.lastindex)) def marks_pop(self): self.marks, self.lastindex = self.marks_stack.pop() def marks_pop_keep(self): self.marks, self.lastindex = self.marks_stack[-1] def marks_pop_discard(self): self.marks_stack.pop() def lower(self, char_ord): return getlower(char_ord, self.flags) class _MatchContext: def __init__(self, state, pattern_codes): self.state = state self.pattern_codes = pattern_codes self.string_position = state.string_position self.code_position = 0 self.has_matched = None def push_new_context(self, pattern_offset): """Creates a new child context of this context and pushes it on the stack. pattern_offset is the offset off the current code position to start interpreting from.""" child_context = _MatchContext(self.state, self.pattern_codes[self.code_position + pattern_offset:]) #print("_sre.py:517:pushing new context") #, child_context.has_matched) #print(self.state.string_position) #print(self.pattern_codes[self.code_position + pattern_offset:]) #print(pattern_offset) self.state.context_stack.append(child_context) return child_context def peek_char(self, peek=0): return self.state.string[self.string_position + peek] def skip_char(self, skip_count): self.string_position += skip_count def remaining_chars(self): return self.state.end - self.string_position def peek_code(self, peek=0): return self.pattern_codes[self.code_position + peek] def skip_code(self, skip_count): self.code_position += skip_count def remaining_codes(self): return len(self.pattern_codes) - self.code_position def at_beginning(self): return self.string_position == 0 def at_end(self): return self.string_position == self.state.end def at_linebreak(self): return not self.at_end() and _is_linebreak(self.peek_char()) def at_boundary(self, word_checker): if self.at_beginning() and self.at_end(): return False that = not self.at_beginning() and word_checker(self.peek_char(-1)) this = not self.at_end() and word_checker(self.peek_char()) return this != that class _RepeatContext(_MatchContext): def __init__(self, context): _MatchContext.__init__(self, context.state, context.pattern_codes[context.code_position:]) self.count = -1 #print('569:repeat', context.state.repeat) self.previous = context.state.repeat self.last_position = None class _Dispatcher: DISPATCH_TABLE = None def dispatch(self, code, context): method = self.DISPATCH_TABLE.get(code, self.__class__.unknown) return method(self, context) def unknown(self, code, ctx): raise NotImplementedError() def build_dispatch_table(cls, code_dict, method_prefix): if cls.DISPATCH_TABLE is not None: return table = {} for key, value in code_dict.items(): if hasattr(cls, "%s%s" % (method_prefix, key)): table[value] = getattr(cls, "%s%s" % (method_prefix, key)) cls.DISPATCH_TABLE = table build_dispatch_table = classmethod(build_dispatch_table) class _OpcodeDispatcher(_Dispatcher): def __init__(self): self.executing_contexts = {} self.at_dispatcher = _AtcodeDispatcher() self.ch_dispatcher = _ChcodeDispatcher() self.set_dispatcher = _CharsetDispatcher() def match(self, context): """Returns True if the current context matches, False if it doesn't and None if matching is not finished, ie must be resumed after child contexts have been matched.""" while context.remaining_codes() > 0 and context.has_matched is None: opcode = context.peek_code() if not self.dispatch(opcode, context): return None if context.has_matched is None: context.has_matched = False return context.has_matched def dispatch(self, opcode, context): """Dispatches a context on a given opcode. Returns True if the context is done matching, False if it must be resumed when next encountered.""" #if self.executing_contexts.has_key(id(context)): if id(context) in self.executing_contexts: generator = self.executing_contexts[id(context)] del self.executing_contexts[id(context)] has_finished = next(generator) else: method = self.DISPATCH_TABLE.get(opcode, _OpcodeDispatcher.unknown) has_finished = method(self, context) if hasattr(has_finished, "__next__"): # avoid using the types module generator = has_finished has_finished = next(generator) if not has_finished: self.executing_contexts[id(context)] = generator return has_finished def op_success(self, ctx): # end of pattern #self._log(ctx, "SUCCESS") ctx.state.string_position = ctx.string_position ctx.has_matched = True return True def op_failure(self, ctx): # immediate failure #self._log(ctx, "FAILURE") ctx.has_matched = False return True def general_op_literal(self, ctx, compare, decorate=lambda x: x): #print(ctx.peek_char()) if ctx.at_end() or not compare(decorate(ord(ctx.peek_char())), decorate(ctx.peek_code(1))): ctx.has_matched = False ctx.skip_code(2) ctx.skip_char(1) def op_literal(self, ctx): # match literal string # <LITERAL> <code> #self._log(ctx, "LITERAL", ctx.peek_code(1)) self.general_op_literal(ctx, operator.eq) return True def op_not_literal(self, ctx): # match anything that is not the given literal character # <NOT_LITERAL> <code> #self._log(ctx, "NOT_LITERAL", ctx.peek_code(1)) self.general_op_literal(ctx, operator.ne) return True def op_literal_ignore(self, ctx): # match literal regardless of case # <LITERAL_IGNORE> <code> #self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1)) self.general_op_literal(ctx, operator.eq, ctx.state.lower) return True def op_not_literal_ignore(self, ctx): # match literal regardless of case # <LITERAL_IGNORE> <code> #self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1)) self.general_op_literal(ctx, operator.ne, ctx.state.lower) return True def op_at(self, ctx): # match at given position # <AT> <code> #self._log(ctx, "AT", ctx.peek_code(1)) if not self.at_dispatcher.dispatch(ctx.peek_code(1), ctx): ctx.has_matched = False #print('_sre.py:line693, update context.has_matched variable') return True ctx.skip_code(2) return True def op_category(self, ctx): # match at given category # <CATEGORY> <code> #self._log(ctx, "CATEGORY", ctx.peek_code(1)) if ctx.at_end() or not self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx): ctx.has_matched = False #print('_sre.py:line703, update context.has_matched variable') return True ctx.skip_code(2) ctx.skip_char(1) return True def op_any(self, ctx): # match anything (except a newline) # <ANY> #self._log(ctx, "ANY") if ctx.at_end() or ctx.at_linebreak(): ctx.has_matched = False #print('_sre.py:line714, update context.has_matched variable') return True ctx.skip_code(1) ctx.skip_char(1) return True def op_any_all(self, ctx): # match anything # <ANY_ALL> #self._log(ctx, "ANY_ALL") if ctx.at_end(): ctx.has_matched = False #print('_sre.py:line725, update context.has_matched variable') return True ctx.skip_code(1) ctx.skip_char(1) return True def general_op_in(self, ctx, decorate=lambda x: x): #self._log(ctx, "OP_IN") #print('general_op_in') if ctx.at_end(): ctx.has_matched = False #print('_sre.py:line734, update context.has_matched variable') return skip = ctx.peek_code(1) ctx.skip_code(2) # set op pointer to the set code #print(ctx.peek_char(), ord(ctx.peek_char()), # decorate(ord(ctx.peek_char()))) if not self.check_charset(ctx, decorate(ord(ctx.peek_char()))): #print('_sre.py:line738, update context.has_matched variable') ctx.has_matched = False return ctx.skip_code(skip - 1) ctx.skip_char(1) #print('end:general_op_in') def op_in(self, ctx): # match set member (or non_member) # <IN> <skip> <set> #self._log(ctx, "OP_IN") self.general_op_in(ctx) return True def op_in_ignore(self, ctx): # match set member (or non_member), disregarding case of current char # <IN_IGNORE> <skip> <set> #self._log(ctx, "OP_IN_IGNORE") self.general_op_in(ctx, ctx.state.lower) return True def op_jump(self, ctx): # jump forward # <JUMP> <offset> #self._log(ctx, "JUMP", ctx.peek_code(1)) ctx.skip_code(ctx.peek_code(1) + 1) return True # skip info # <INFO> <skip> op_info = op_jump def op_mark(self, ctx): # set mark # <MARK> <gid> #self._log(ctx, "OP_MARK", ctx.peek_code(1)) ctx.state.set_mark(ctx.peek_code(1), ctx.string_position) ctx.skip_code(2) return True def op_branch(self, ctx): # alternation # <BRANCH> <0=skip> code <JUMP> ... <NULL> #self._log(ctx, "BRANCH") ctx.state.marks_push() ctx.skip_code(1) current_branch_length = ctx.peek_code(0) while current_branch_length: # The following tries to shortcut branches starting with a # (unmatched) literal. _sre.c also shortcuts charsets here. if not (ctx.peek_code(1) == OPCODES["literal"] and \ (ctx.at_end() or ctx.peek_code(2) != ord(ctx.peek_char()))): ctx.state.string_position = ctx.string_position child_context = ctx.push_new_context(1) #print("_sre.py:803:op_branch") yield False if child_context.has_matched: ctx.has_matched = True yield True ctx.state.marks_pop_keep() ctx.skip_code(current_branch_length) current_branch_length = ctx.peek_code(0) ctx.state.marks_pop_discard() ctx.has_matched = False #print('_sre.py:line805, update context.has_matched variable') yield True def op_repeat_one(self, ctx): # match repeated sequence (maximizing). # this operator only works if the repeated item is exactly one character # wide, and we're not already collecting backtracking points. # <REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail mincount = ctx.peek_code(2) maxcount = ctx.peek_code(3) #print("repeat one", mincount, maxcount) #self._log(ctx, "REPEAT_ONE", mincount, maxcount) if ctx.remaining_chars() < mincount: ctx.has_matched = False yield True ctx.state.string_position = ctx.string_position count = self.count_repetitions(ctx, maxcount) ctx.skip_char(count) if count < mincount: ctx.has_matched = False yield True if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["success"]: # tail is empty. we're finished ctx.state.string_position = ctx.string_position ctx.has_matched = True yield True ctx.state.marks_push() if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["literal"]: # Special case: Tail starts with a literal. Skip positions where # the rest of the pattern cannot possibly match. char = ctx.peek_code(ctx.peek_code(1) + 2) while True: while count >= mincount and \ (ctx.at_end() or ord(ctx.peek_char()) != char): ctx.skip_char(-1) count -= 1 if count < mincount: break ctx.state.string_position = ctx.string_position child_context = ctx.push_new_context(ctx.peek_code(1) + 1) #print("_sre.py:856:push_new_context") yield False if child_context.has_matched: ctx.has_matched = True yield True ctx.skip_char(-1) count -= 1 ctx.state.marks_pop_keep() else: # General case: backtracking while count >= mincount: ctx.state.string_position = ctx.string_position child_context = ctx.push_new_context(ctx.peek_code(1) + 1) yield False if child_context.has_matched: ctx.has_matched = True yield True ctx.skip_char(-1) count -= 1 ctx.state.marks_pop_keep() ctx.state.marks_pop_discard() ctx.has_matched = False #ctx.has_matched = True # <== this should be True (so match object gets returned to program) yield True def op_min_repeat_one(self, ctx): # match repeated sequence (minimizing) # <MIN_REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail mincount = ctx.peek_code(2) maxcount = ctx.peek_code(3) #self._log(ctx, "MIN_REPEAT_ONE", mincount, maxcount) if ctx.remaining_chars() < mincount: ctx.has_matched = False yield True ctx.state.string_position = ctx.string_position if mincount == 0: count = 0 else: count = self.count_repetitions(ctx, mincount) if count < mincount: ctx.has_matched = False #print('_sre.py:line891, update context.has_matched variable') yield True ctx.skip_char(count) if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["success"]: # tail is empty. we're finished ctx.state.string_position = ctx.string_position ctx.has_matched = True yield True ctx.state.marks_push() while maxcount == MAXREPEAT or count <= maxcount: ctx.state.string_position = ctx.string_position child_context = ctx.push_new_context(ctx.peek_code(1) + 1) #print('_sre.py:916:push new context') yield False if child_context.has_matched: ctx.has_matched = True yield True ctx.state.string_position = ctx.string_position if self.count_repetitions(ctx, 1) == 0: break ctx.skip_char(1) count += 1 ctx.state.marks_pop_keep() ctx.state.marks_pop_discard() ctx.has_matched = False yield True def op_repeat(self, ctx): # create repeat context. all the hard work is done by the UNTIL # operator (MAX_UNTIL, MIN_UNTIL) # <REPEAT> <skip> <1=min> <2=max> item <UNTIL> tail #self._log(ctx, "REPEAT", ctx.peek_code(2), ctx.peek_code(3)) #if ctx.state.repeat is None: # print("951:ctx.state.repeat is None") # #ctx.state.repeat=_RepeatContext(ctx) repeat = _RepeatContext(ctx) ctx.state.repeat = repeat ctx.state.string_position = ctx.string_position child_context = ctx.push_new_context(ctx.peek_code(1) + 1) #print("_sre.py:941:push new context", id(child_context)) #print(child_context.state.repeat) #print(ctx.state.repeat) # are these two yields causing the issue? yield False ctx.state.repeat = repeat.previous ctx.has_matched = child_context.has_matched yield True def op_max_until(self, ctx): # maximizing repeat # <REPEAT> <skip> <1=min> <2=max> item <MAX_UNTIL> tail repeat = ctx.state.repeat #print("op_max_until") #, id(ctx.state.repeat)) if repeat is None: #print(id(ctx), id(ctx.state)) raise RuntimeError("Internal re error: MAX_UNTIL without REPEAT.") mincount = repeat.peek_code(2) maxcount = repeat.peek_code(3) ctx.state.string_position = ctx.string_position count = repeat.count + 1 #self._log(ctx, "MAX_UNTIL", count) if count < mincount: # not enough matches repeat.count = count child_context = repeat.push_new_context(4) yield False ctx.has_matched = child_context.has_matched if not ctx.has_matched: repeat.count = count - 1 ctx.state.string_position = ctx.string_position yield True if (count < maxcount or maxcount == MAXREPEAT) \ and ctx.state.string_position != repeat.last_position: # we may have enough matches, if we can match another item, do so repeat.count = count ctx.state.marks_push() save_last_position = repeat.last_position # zero-width match protection repeat.last_position = ctx.state.string_position child_context = repeat.push_new_context(4) yield False repeat.last_position = save_last_position if child_context.has_matched: ctx.state.marks_pop_discard() ctx.has_matched = True yield True ctx.state.marks_pop() repeat.count = count - 1 ctx.state.string_position = ctx.string_position # cannot match more repeated items here. make sure the tail matches ctx.state.repeat = repeat.previous child_context = ctx.push_new_context(1) #print("_sre.py:987:op_max_until") yield False ctx.has_matched = child_context.has_matched if not ctx.has_matched: ctx.state.repeat = repeat ctx.state.string_position = ctx.string_position yield True def op_min_until(self, ctx): # minimizing repeat # <REPEAT> <skip> <1=min> <2=max> item <MIN_UNTIL> tail repeat = ctx.state.repeat if repeat is None: raise RuntimeError("Internal re error: MIN_UNTIL without REPEAT.") mincount = repeat.peek_code(2) maxcount = repeat.peek_code(3) ctx.state.string_position = ctx.string_position count = repeat.count + 1 #self._log(ctx, "MIN_UNTIL", count) if count < mincount: # not enough matches repeat.count = count child_context = repeat.push_new_context(4) yield False ctx.has_matched = child_context.has_matched if not ctx.has_matched: repeat.count = count - 1 ctx.state.string_position = ctx.string_position yield True # see if the tail matches ctx.state.marks_push() ctx.state.repeat = repeat.previous child_context = ctx.push_new_context(1) #print('_sre.py:1022:push new context') yield False if child_context.has_matched: ctx.has_matched = True yield True ctx.state.repeat = repeat ctx.state.string_position = ctx.string_position ctx.state.marks_pop() # match more until tail matches if count >= maxcount and maxcount != MAXREPEAT: ctx.has_matched = False #print('_sre.py:line1022, update context.has_matched variable') yield True repeat.count = count child_context = repeat.push_new_context(4) yield False ctx.has_matched = child_context.has_matched if not ctx.has_matched: repeat.count = count - 1 ctx.state.string_position = ctx.string_position yield True def general_op_groupref(self, ctx, decorate=lambda x: x): group_start, group_end = ctx.state.get_marks(ctx.peek_code(1)) if group_start is None or group_end is None or group_end < group_start: ctx.has_matched = False return True while group_start < group_end: if ctx.at_end() or decorate(ord(ctx.peek_char())) \ != decorate(ord(ctx.state.string[group_start])): ctx.has_matched = False #print('_sre.py:line1042, update context.has_matched variable') return True group_start += 1 ctx.skip_char(1) ctx.skip_code(2) return True def op_groupref(self, ctx): # match backreference # <GROUPREF> <zero-based group index> #self._log(ctx, "GROUPREF", ctx.peek_code(1)) return self.general_op_groupref(ctx) def op_groupref_ignore(self, ctx): # match backreference case-insensitive # <GROUPREF_IGNORE> <zero-based group index> #self._log(ctx, "GROUPREF_IGNORE", ctx.peek_code(1)) return self.general_op_groupref(ctx, ctx.state.lower) def op_groupref_exists(self, ctx): # <GROUPREF_EXISTS> <group> <skip> codeyes <JUMP> codeno ... #self._log(ctx, "GROUPREF_EXISTS", ctx.peek_code(1)) group_start, group_end = ctx.state.get_marks(ctx.peek_code(1)) if group_start is None or group_end is None or group_end < group_start: ctx.skip_code(ctx.peek_code(2) + 1) else: ctx.skip_code(3) return True def op_assert(self, ctx): # assert subpattern # <ASSERT> <skip> <back> <pattern> #self._log(ctx, "ASSERT", ctx.peek_code(2)) ctx.state.string_position = ctx.string_position - ctx.peek_code(2) if ctx.state.string_position < 0: ctx.has_matched = False yield True child_context = ctx.push_new_context(3) yield False if child_context.has_matched: ctx.skip_code(ctx.peek_code(1) + 1) else: ctx.has_matched = False yield True def op_assert_not(self, ctx): # assert not subpattern # <ASSERT_NOT> <skip> <back> <pattern> #self._log(ctx, "ASSERT_NOT", ctx.peek_code(2)) ctx.state.string_position = ctx.string_position - ctx.peek_code(2) if ctx.state.string_position >= 0: child_context = ctx.push_new_context(3) yield False if child_context.has_matched: ctx.has_matched = False yield True ctx.skip_code(ctx.peek_code(1) + 1) yield True def unknown(self, ctx): #self._log(ctx, "UNKNOWN", ctx.peek_code()) raise RuntimeError("Internal re error. Unknown opcode: %s" % ctx.peek_code()) def check_charset(self, ctx, char): """Checks whether a character matches set of arbitrary length. Assumes the code pointer is at the first member of the set.""" self.set_dispatcher.reset(char) save_position = ctx.code_position result = None while result is None: result = self.set_dispatcher.dispatch(ctx.peek_code(), ctx) ctx.code_position = save_position #print("_sre.py:1123:check_charset", result) return result def count_repetitions(self, ctx, maxcount): """Returns the number of repetitions of a single item, starting from the current string position. The code pointer is expected to point to a REPEAT_ONE operation (with the repeated 4 ahead).""" count = 0 real_maxcount = ctx.state.end - ctx.string_position if maxcount < real_maxcount and maxcount != MAXREPEAT: real_maxcount = maxcount # XXX could special case every single character pattern here, as in C. # This is a general solution, a bit hackisch, but works and should be # efficient. code_position = ctx.code_position string_position = ctx.string_position ctx.skip_code(4) reset_position = ctx.code_position while count < real_maxcount: # this works because the single character pattern is followed by # a success opcode ctx.code_position = reset_position self.dispatch(ctx.peek_code(), ctx) #print("count_repetitions", ctx.has_matched, count) if ctx.has_matched is False: # could be None as well break count += 1 ctx.has_matched = None ctx.code_position = code_position ctx.string_position = string_position return count def _log(self, context, opname, *args): arg_string = ("%s " * len(args)) % args _log("|%s|%s|%s %s" % (context.pattern_codes, context.string_position, opname, arg_string)) _OpcodeDispatcher.build_dispatch_table(OPCODES, "op_") class _CharsetDispatcher(_Dispatcher): def __init__(self): self.ch_dispatcher = _ChcodeDispatcher() def reset(self, char): self.char = char self.ok = True def set_failure(self, ctx): return not self.ok def set_literal(self, ctx): # <LITERAL> <code> if ctx.peek_code(1) == self.char: return self.ok else: ctx.skip_code(2) def set_category(self, ctx): # <CATEGORY> <code> if self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx): return self.ok else: ctx.skip_code(2) def set_charset(self, ctx): # <CHARSET> <bitmap> (16 bits per code word) char_code = self.char ctx.skip_code(1) # point to beginning of bitmap if CODESIZE == 2: if char_code < 256 and ctx.peek_code(char_code >> 4) \ & (1 << (char_code & 15)): return self.ok ctx.skip_code(16) # skip bitmap else: if char_code < 256 and ctx.peek_code(char_code >> 5) \ & (1 << (char_code & 31)): return self.ok ctx.skip_code(8) # skip bitmap def set_range(self, ctx): # <RANGE> <lower> <upper> if ctx.peek_code(1) <= self.char <= ctx.peek_code(2): return self.ok ctx.skip_code(3) def set_negate(self, ctx): self.ok = not self.ok ctx.skip_code(1) #fixme brython. array module doesn't exist def set_bigcharset(self, ctx): raise NotImplementationError("_sre.py: set_bigcharset, array not implemented") # <BIGCHARSET> <blockcount> <256 blockindices> <blocks> char_code = self.char count = ctx.peek_code(1) ctx.skip_code(2) if char_code < 65536: block_index = char_code >> 8 # NB: there are CODESIZE block indices per bytecode a = array.array("B") a.fromstring(array.array(CODESIZE == 2 and "H" or "I", [ctx.peek_code(block_index // CODESIZE)]).tostring()) block = a[block_index % CODESIZE] ctx.skip_code(256 // CODESIZE) # skip block indices block_value = ctx.peek_code(block * (32 // CODESIZE) + ((char_code & 255) >> (CODESIZE == 2 and 4 or 5))) if block_value & (1 << (char_code & ((8 * CODESIZE) - 1))): return self.ok else: ctx.skip_code(256 // CODESIZE) # skip block indices ctx.skip_code(count * (32 // CODESIZE)) # skip blocks def unknown(self, ctx): return False _CharsetDispatcher.build_dispatch_table(OPCODES, "set_") class _AtcodeDispatcher(_Dispatcher): def at_beginning(self, ctx): return ctx.at_beginning() at_beginning_string = at_beginning def at_beginning_line(self, ctx): return ctx.at_beginning() or _is_linebreak(ctx.peek_char(-1)) def at_end(self, ctx): return (ctx.remaining_chars() == 1 and ctx.at_linebreak()) or ctx.at_end() def at_end_line(self, ctx): return ctx.at_linebreak() or ctx.at_end() def at_end_string(self, ctx): return ctx.at_end() def at_boundary(self, ctx): return ctx.at_boundary(_is_word) def at_non_boundary(self, ctx): return not ctx.at_boundary(_is_word) def at_loc_boundary(self, ctx): return ctx.at_boundary(_is_loc_word) def at_loc_non_boundary(self, ctx): return not ctx.at_boundary(_is_loc_word) def at_uni_boundary(self, ctx): return ctx.at_boundary(_is_uni_word) def at_uni_non_boundary(self, ctx): return not ctx.at_boundary(_is_uni_word) def unknown(self, ctx): return False _AtcodeDispatcher.build_dispatch_table(ATCODES, "") class _ChcodeDispatcher(_Dispatcher): def category_digit(self, ctx): return _is_digit(ctx.peek_char()) def category_not_digit(self, ctx): return not _is_digit(ctx.peek_char()) def category_space(self, ctx): return _is_space(ctx.peek_char()) def category_not_space(self, ctx): return not _is_space(ctx.peek_char()) def category_word(self, ctx): return _is_word(ctx.peek_char()) def category_not_word(self, ctx): return not _is_word(ctx.peek_char()) def category_linebreak(self, ctx): return _is_linebreak(ctx.peek_char()) def category_not_linebreak(self, ctx): return not _is_linebreak(ctx.peek_char()) def category_loc_word(self, ctx): return _is_loc_word(ctx.peek_char()) def category_loc_not_word(self, ctx): return not _is_loc_word(ctx.peek_char()) def category_uni_digit(self, ctx): return ctx.peek_char().isdigit() def category_uni_not_digit(self, ctx): return not ctx.peek_char().isdigit() def category_uni_space(self, ctx): return ctx.peek_char().isspace() def category_uni_not_space(self, ctx): return not ctx.peek_char().isspace() def category_uni_word(self, ctx): return _is_uni_word(ctx.peek_char()) def category_uni_not_word(self, ctx): return not _is_uni_word(ctx.peek_char()) def category_uni_linebreak(self, ctx): return ord(ctx.peek_char()) in _uni_linebreaks def category_uni_not_linebreak(self, ctx): return ord(ctx.peek_char()) not in _uni_linebreaks def unknown(self, ctx): return False _ChcodeDispatcher.build_dispatch_table(CHCODES, "") _ascii_char_info = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 6, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 0, 0, 0, 0, 0, 0, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0, 0, 0, 16, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0, 0, 0, 0 ] def _is_digit(char): code = ord(char) return code < 128 and _ascii_char_info[code] & 1 def _is_space(char): code = ord(char) return code < 128 and _ascii_char_info[code] & 2 def _is_word(char): # NB: non-ASCII chars aren't words according to _sre.c code = ord(char) return code < 128 and _ascii_char_info[code] & 16 def _is_loc_word(char): return (not (ord(char) & ~255) and char.isalnum()) or char == '_' def _is_uni_word(char): # not valid in python 3 #return unichr(ord(char)).isalnum() or char == '_' return chr(ord(char)).isalnum() or char == '_' def _is_linebreak(char): return char == "\n" # Static list of all unicode codepoints reported by Py_UNICODE_ISLINEBREAK. _uni_linebreaks = [10, 13, 28, 29, 30, 133, 8232, 8233] def _log(message): if 0: print(message)
agpl-3.0
wastholm/bitcoin
share/qt/extract_strings_qt.py
321
1873
#!/usr/bin/python ''' Extract _("...") strings for translation and convert to Qt4 stringdefs so that they can be picked up by Qt linguist. ''' from subprocess import Popen, PIPE import glob import operator import os import sys OUT_CPP="qt/bitcoinstrings.cpp" EMPTY=['""'] def parse_po(text): """ Parse 'po' format produced by xgettext. Return a list of (msgid,msgstr) tuples. """ messages = [] msgid = [] msgstr = [] in_msgid = False in_msgstr = False for line in text.split('\n'): line = line.rstrip('\r') if line.startswith('msgid '): if in_msgstr: messages.append((msgid, msgstr)) in_msgstr = False # message start in_msgid = True msgid = [line[6:]] elif line.startswith('msgstr '): in_msgid = False in_msgstr = True msgstr = [line[7:]] elif line.startswith('"'): if in_msgid: msgid.append(line) if in_msgstr: msgstr.append(line) if in_msgstr: messages.append((msgid, msgstr)) return messages files = sys.argv[1:] # xgettext -n --keyword=_ $FILES XGETTEXT=os.getenv('XGETTEXT', 'xgettext') child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE) (out, err) = child.communicate() messages = parse_po(out) f = open(OUT_CPP, 'w') f.write(""" #include <QtGlobal> // Automatically generated by extract_strings.py #ifdef __GNUC__ #define UNUSED __attribute__((unused)) #else #define UNUSED #endif """) f.write('static const char UNUSED *bitcoin_strings[] = {\n') messages.sort(key=operator.itemgetter(0)) for (msgid, msgstr) in messages: if msgid != EMPTY: f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid))) f.write('};\n') f.close()
mit
Big-B702/python-for-android
python-build/python-libs/gdata/src/gdata/webmastertools/__init__.py
138
17837
#!/usr/bin/python # # Copyright (C) 2008 Yu-Jie Lin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains extensions to Atom objects used with Google Webmaster Tools.""" __author__ = 'livibetter (Yu-Jie Lin)' try: from xml.etree import cElementTree as ElementTree except ImportError: try: import cElementTree as ElementTree except ImportError: try: from xml.etree import ElementTree except ImportError: from elementtree import ElementTree import atom import gdata # XML namespaces which are often used in Google Webmaster Tools entities. GWEBMASTERTOOLS_NAMESPACE = 'http://schemas.google.com/webmasters/tools/2007' GWEBMASTERTOOLS_TEMPLATE = '{http://schemas.google.com/webmasters/tools/2007}%s' class Indexed(atom.AtomBase): _tag = 'indexed' _namespace = GWEBMASTERTOOLS_NAMESPACE def IndexedFromString(xml_string): return atom.CreateClassFromXMLString(Indexed, xml_string) class Crawled(atom.Date): _tag = 'crawled' _namespace = GWEBMASTERTOOLS_NAMESPACE def CrawledFromString(xml_string): return atom.CreateClassFromXMLString(Crawled, xml_string) class GeoLocation(atom.AtomBase): _tag = 'geolocation' _namespace = GWEBMASTERTOOLS_NAMESPACE def GeoLocationFromString(xml_string): return atom.CreateClassFromXMLString(GeoLocation, xml_string) class PreferredDomain(atom.AtomBase): _tag = 'preferred-domain' _namespace = GWEBMASTERTOOLS_NAMESPACE def PreferredDomainFromString(xml_string): return atom.CreateClassFromXMLString(PreferredDomain, xml_string) class CrawlRate(atom.AtomBase): _tag = 'crawl-rate' _namespace = GWEBMASTERTOOLS_NAMESPACE def CrawlRateFromString(xml_string): return atom.CreateClassFromXMLString(CrawlRate, xml_string) class EnhancedImageSearch(atom.AtomBase): _tag = 'enhanced-image-search' _namespace = GWEBMASTERTOOLS_NAMESPACE def EnhancedImageSearchFromString(xml_string): return atom.CreateClassFromXMLString(EnhancedImageSearch, xml_string) class Verified(atom.AtomBase): _tag = 'verified' _namespace = GWEBMASTERTOOLS_NAMESPACE def VerifiedFromString(xml_string): return atom.CreateClassFromXMLString(Verified, xml_string) class VerificationMethodMeta(atom.AtomBase): _tag = 'meta' _namespace = atom.ATOM_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _attributes['name'] = 'name' _attributes['content'] = 'content' def __init__(self, text=None, name=None, content=None, extension_elements=None, extension_attributes=None): self.text = text self.name = name self.content = content self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} def VerificationMethodMetaFromString(xml_string): return atom.CreateClassFromXMLString(VerificationMethodMeta, xml_string) class VerificationMethod(atom.AtomBase): _tag = 'verification-method' _namespace = GWEBMASTERTOOLS_NAMESPACE _children = atom.Text._children.copy() _attributes = atom.Text._attributes.copy() _children['{%s}meta' % atom.ATOM_NAMESPACE] = ( 'meta', VerificationMethodMeta) _attributes['in-use'] = 'in_use' def __init__(self, text=None, in_use=None, meta=None, extension_elements=None, extension_attributes=None): self.text = text self.in_use = in_use self.meta = meta self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} def VerificationMethodFromString(xml_string): return atom.CreateClassFromXMLString(VerificationMethod, xml_string) class MarkupLanguage(atom.AtomBase): _tag = 'markup-language' _namespace = GWEBMASTERTOOLS_NAMESPACE def MarkupLanguageFromString(xml_string): return atom.CreateClassFromXMLString(MarkupLanguage, xml_string) class SitemapMobile(atom.AtomBase): _tag = 'sitemap-mobile' _namespace = GWEBMASTERTOOLS_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _children['{%s}markup-language' % GWEBMASTERTOOLS_NAMESPACE] = ( 'markup_language', [MarkupLanguage]) def __init__(self, markup_language=None, extension_elements=None, extension_attributes=None, text=None): self.markup_language = markup_language or [] self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} def SitemapMobileFromString(xml_string): return atom.CreateClassFromXMLString(SitemapMobile, xml_string) class SitemapMobileMarkupLanguage(atom.AtomBase): _tag = 'sitemap-mobile-markup-language' _namespace = GWEBMASTERTOOLS_NAMESPACE def SitemapMobileMarkupLanguageFromString(xml_string): return atom.CreateClassFromXMLString(SitemapMobileMarkupLanguage, xml_string) class PublicationLabel(atom.AtomBase): _tag = 'publication-label' _namespace = GWEBMASTERTOOLS_NAMESPACE def PublicationLabelFromString(xml_string): return atom.CreateClassFromXMLString(PublicationLabel, xml_string) class SitemapNews(atom.AtomBase): _tag = 'sitemap-news' _namespace = GWEBMASTERTOOLS_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _children['{%s}publication-label' % GWEBMASTERTOOLS_NAMESPACE] = ( 'publication_label', [PublicationLabel]) def __init__(self, publication_label=None, extension_elements=None, extension_attributes=None, text=None): self.publication_label = publication_label or [] self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} def SitemapNewsFromString(xml_string): return atom.CreateClassFromXMLString(SitemapNews, xml_string) class SitemapNewsPublicationLabel(atom.AtomBase): _tag = 'sitemap-news-publication-label' _namespace = GWEBMASTERTOOLS_NAMESPACE def SitemapNewsPublicationLabelFromString(xml_string): return atom.CreateClassFromXMLString(SitemapNewsPublicationLabel, xml_string) class SitemapLastDownloaded(atom.Date): _tag = 'sitemap-last-downloaded' _namespace = GWEBMASTERTOOLS_NAMESPACE def SitemapLastDownloadedFromString(xml_string): return atom.CreateClassFromXMLString(SitemapLastDownloaded, xml_string) class SitemapType(atom.AtomBase): _tag = 'sitemap-type' _namespace = GWEBMASTERTOOLS_NAMESPACE def SitemapTypeFromString(xml_string): return atom.CreateClassFromXMLString(SitemapType, xml_string) class SitemapStatus(atom.AtomBase): _tag = 'sitemap-status' _namespace = GWEBMASTERTOOLS_NAMESPACE def SitemapStatusFromString(xml_string): return atom.CreateClassFromXMLString(SitemapStatus, xml_string) class SitemapUrlCount(atom.AtomBase): _tag = 'sitemap-url-count' _namespace = GWEBMASTERTOOLS_NAMESPACE def SitemapUrlCountFromString(xml_string): return atom.CreateClassFromXMLString(SitemapUrlCount, xml_string) class LinkFinder(atom.LinkFinder): """An "interface" providing methods to find link elements SitesEntry elements often contain multiple links which differ in the rel attribute or content type. Often, developers are interested in a specific type of link so this class provides methods to find specific classes of links. This class is used as a mixin in SitesEntry. """ def GetSelfLink(self): """Find the first link with rel set to 'self' Returns: An atom.Link or none if none of the links had rel equal to 'self' """ for a_link in self.link: if a_link.rel == 'self': return a_link return None def GetEditLink(self): for a_link in self.link: if a_link.rel == 'edit': return a_link return None def GetPostLink(self): """Get a link containing the POST target URL. The POST target URL is used to insert new entries. Returns: A link object with a rel matching the POST type. """ for a_link in self.link: if a_link.rel == 'http://schemas.google.com/g/2005#post': return a_link return None def GetFeedLink(self): for a_link in self.link: if a_link.rel == 'http://schemas.google.com/g/2005#feed': return a_link return None class SitesEntry(atom.Entry, LinkFinder): """A Google Webmaster Tools meta Entry flavor of an Atom Entry """ _tag = atom.Entry._tag _namespace = atom.Entry._namespace _children = atom.Entry._children.copy() _attributes = atom.Entry._attributes.copy() _children['{%s}entryLink' % gdata.GDATA_NAMESPACE] = ( 'entry_link', [gdata.EntryLink]) _children['{%s}indexed' % GWEBMASTERTOOLS_NAMESPACE] = ('indexed', Indexed) _children['{%s}crawled' % GWEBMASTERTOOLS_NAMESPACE] = ( 'crawled', Crawled) _children['{%s}geolocation' % GWEBMASTERTOOLS_NAMESPACE] = ( 'geolocation', GeoLocation) _children['{%s}preferred-domain' % GWEBMASTERTOOLS_NAMESPACE] = ( 'preferred_domain', PreferredDomain) _children['{%s}crawl-rate' % GWEBMASTERTOOLS_NAMESPACE] = ( 'crawl_rate', CrawlRate) _children['{%s}enhanced-image-search' % GWEBMASTERTOOLS_NAMESPACE] = ( 'enhanced_image_search', EnhancedImageSearch) _children['{%s}verified' % GWEBMASTERTOOLS_NAMESPACE] = ( 'verified', Verified) _children['{%s}verification-method' % GWEBMASTERTOOLS_NAMESPACE] = ( 'verification_method', [VerificationMethod]) def __GetId(self): return self.__id # This method was created to strip the unwanted whitespace from the id's # text node. def __SetId(self, id): self.__id = id if id is not None and id.text is not None: self.__id.text = id.text.strip() id = property(__GetId, __SetId) def __init__(self, category=None, content=None, atom_id=None, link=None, title=None, updated=None, entry_link=None, indexed=None, crawled=None, geolocation=None, preferred_domain=None, crawl_rate=None, enhanced_image_search=None, verified=None, verification_method=None, extension_elements=None, extension_attributes=None, text=None): atom.Entry.__init__(self, category=category, content=content, atom_id=atom_id, link=link, title=title, updated=updated, text=text) self.entry_link = entry_link or [] self.indexed = indexed self.crawled = crawled self.geolocation = geolocation self.preferred_domain = preferred_domain self.crawl_rate = crawl_rate self.enhanced_image_search = enhanced_image_search self.verified = verified self.verification_method = verification_method or [] def SitesEntryFromString(xml_string): return atom.CreateClassFromXMLString(SitesEntry, xml_string) class SitesFeed(atom.Feed, LinkFinder): """A Google Webmaster Tools meta Sites feed flavor of an Atom Feed""" _tag = atom.Feed._tag _namespace = atom.Feed._namespace _children = atom.Feed._children.copy() _attributes = atom.Feed._attributes.copy() _children['{%s}startIndex' % gdata.OPENSEARCH_NAMESPACE] = ( 'start_index', gdata.StartIndex) _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [SitesEntry]) del _children['{%s}generator' % atom.ATOM_NAMESPACE] del _children['{%s}author' % atom.ATOM_NAMESPACE] del _children['{%s}contributor' % atom.ATOM_NAMESPACE] del _children['{%s}logo' % atom.ATOM_NAMESPACE] del _children['{%s}icon' % atom.ATOM_NAMESPACE] del _children['{%s}rights' % atom.ATOM_NAMESPACE] del _children['{%s}subtitle' % atom.ATOM_NAMESPACE] def __GetId(self): return self.__id def __SetId(self, id): self.__id = id if id is not None and id.text is not None: self.__id.text = id.text.strip() id = property(__GetId, __SetId) def __init__(self, start_index=None, atom_id=None, title=None, entry=None, category=None, link=None, updated=None, extension_elements=None, extension_attributes=None, text=None): """Constructor for Source Args: category: list (optional) A list of Category instances id: Id (optional) The entry's Id element link: list (optional) A list of Link instances title: Title (optional) the entry's title element updated: Updated (optional) the entry's updated element entry: list (optional) A list of the Entry instances contained in the feed. text: String (optional) The text contents of the element. This is the contents of the Entry's XML text node. (Example: <foo>This is the text</foo>) extension_elements: list (optional) A list of ExtensionElement instances which are children of this element. extension_attributes: dict (optional) A dictionary of strings which are the values for additional XML attributes of this element. """ self.start_index = start_index self.category = category or [] self.id = atom_id self.link = link or [] self.title = title self.updated = updated self.entry = entry or [] self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} def SitesFeedFromString(xml_string): return atom.CreateClassFromXMLString(SitesFeed, xml_string) class SitemapsEntry(atom.Entry, LinkFinder): """A Google Webmaster Tools meta Sitemaps Entry flavor of an Atom Entry """ _tag = atom.Entry._tag _namespace = atom.Entry._namespace _children = atom.Entry._children.copy() _attributes = atom.Entry._attributes.copy() _children['{%s}sitemap-type' % GWEBMASTERTOOLS_NAMESPACE] = ( 'sitemap_type', SitemapType) _children['{%s}sitemap-status' % GWEBMASTERTOOLS_NAMESPACE] = ( 'sitemap_status', SitemapStatus) _children['{%s}sitemap-last-downloaded' % GWEBMASTERTOOLS_NAMESPACE] = ( 'sitemap_last_downloaded', SitemapLastDownloaded) _children['{%s}sitemap-url-count' % GWEBMASTERTOOLS_NAMESPACE] = ( 'sitemap_url_count', SitemapUrlCount) _children['{%s}sitemap-mobile-markup-language' % GWEBMASTERTOOLS_NAMESPACE] \ = ('sitemap_mobile_markup_language', SitemapMobileMarkupLanguage) _children['{%s}sitemap-news-publication-label' % GWEBMASTERTOOLS_NAMESPACE] \ = ('sitemap_news_publication_label', SitemapNewsPublicationLabel) def __GetId(self): return self.__id # This method was created to strip the unwanted whitespace from the id's # text node. def __SetId(self, id): self.__id = id if id is not None and id.text is not None: self.__id.text = id.text.strip() id = property(__GetId, __SetId) def __init__(self, category=None, content=None, atom_id=None, link=None, title=None, updated=None, sitemap_type=None, sitemap_status=None, sitemap_last_downloaded=None, sitemap_url_count=None, sitemap_mobile_markup_language=None, sitemap_news_publication_label=None, extension_elements=None, extension_attributes=None, text=None): atom.Entry.__init__(self, category=category, content=content, atom_id=atom_id, link=link, title=title, updated=updated, text=text) self.sitemap_type = sitemap_type self.sitemap_status = sitemap_status self.sitemap_last_downloaded = sitemap_last_downloaded self.sitemap_url_count = sitemap_url_count self.sitemap_mobile_markup_language = sitemap_mobile_markup_language self.sitemap_news_publication_label = sitemap_news_publication_label def SitemapsEntryFromString(xml_string): return atom.CreateClassFromXMLString(SitemapsEntry, xml_string) class SitemapsFeed(atom.Feed, LinkFinder): """A Google Webmaster Tools meta Sitemaps feed flavor of an Atom Feed""" _tag = atom.Feed._tag _namespace = atom.Feed._namespace _children = atom.Feed._children.copy() _attributes = atom.Feed._attributes.copy() _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [SitemapsEntry]) _children['{%s}sitemap-mobile' % GWEBMASTERTOOLS_NAMESPACE] = ( 'sitemap_mobile', SitemapMobile) _children['{%s}sitemap-news' % GWEBMASTERTOOLS_NAMESPACE] = ( 'sitemap_news', SitemapNews) del _children['{%s}generator' % atom.ATOM_NAMESPACE] del _children['{%s}author' % atom.ATOM_NAMESPACE] del _children['{%s}contributor' % atom.ATOM_NAMESPACE] del _children['{%s}logo' % atom.ATOM_NAMESPACE] del _children['{%s}icon' % atom.ATOM_NAMESPACE] del _children['{%s}rights' % atom.ATOM_NAMESPACE] del _children['{%s}subtitle' % atom.ATOM_NAMESPACE] def __GetId(self): return self.__id def __SetId(self, id): self.__id = id if id is not None and id.text is not None: self.__id.text = id.text.strip() id = property(__GetId, __SetId) def __init__(self, category=None, content=None, atom_id=None, link=None, title=None, updated=None, entry=None, sitemap_mobile=None, sitemap_news=None, extension_elements=None, extension_attributes=None, text=None): self.category = category or [] self.id = atom_id self.link = link or [] self.title = title self.updated = updated self.entry = entry or [] self.text = text self.sitemap_mobile = sitemap_mobile self.sitemap_news = sitemap_news self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} def SitemapsFeedFromString(xml_string): return atom.CreateClassFromXMLString(SitemapsFeed, xml_string)
apache-2.0
praveen-pal/edx-platform
common/lib/xmodule/xmodule/video_module.py
1
15580
# pylint: disable=W0223 """Video is ungraded Xmodule for support video content. It's new improved video module, which support additional feature: - Can play non-YouTube video sources via in-browser HTML5 video player. - YouTube defaults to HTML5 mode from the start. - Speed changes in both YouTube and non-YouTube videos happen via in-browser HTML5 video method (when in HTML5 mode). - Navigational subtitles can be disabled altogether via an attribute in XML. """ import json import logging from lxml import etree from pkg_resources import resource_string import datetime import time from django.http import Http404 from django.conf import settings from xmodule.x_module import XModule from xmodule.editing_module import TabsEditingDescriptor from xmodule.raw_module import EmptyDataRawDescriptor from xmodule.xml_module import is_pointer_tag, name_to_pathname from xmodule.modulestore import Location from xblock.fields import Scope, String, Boolean, Float, List, Integer, ScopeIds from xblock.field_data import DictFieldData from xmodule.modulestore.inheritance import InheritanceKeyValueStore from xblock.runtime import DbModel log = logging.getLogger(__name__) class VideoFields(object): """Fields for `VideoModule` and `VideoDescriptor`.""" display_name = String( display_name="Display Name", help="Display name for this module.", default="Video", scope=Scope.settings ) position = Integer( help="Current position in the video", scope=Scope.user_state, default=0 ) show_captions = Boolean( help="This controls whether or not captions are shown by default.", display_name="Show Captions", scope=Scope.settings, default=True ) # TODO: This should be moved to Scope.content, but this will # require data migration to support the old video module. youtube_id_1_0 = String( help="This is the Youtube ID reference for the normal speed video.", display_name="Youtube ID", scope=Scope.settings, default="OEoXaMPEzfM" ) youtube_id_0_75 = String( help="The Youtube ID for the .75x speed video.", display_name="Youtube ID for .75x speed", scope=Scope.settings, default="" ) youtube_id_1_25 = String( help="The Youtube ID for the 1.25x speed video.", display_name="Youtube ID for 1.25x speed", scope=Scope.settings, default="" ) youtube_id_1_5 = String( help="The Youtube ID for the 1.5x speed video.", display_name="Youtube ID for 1.5x speed", scope=Scope.settings, default="" ) start_time = Float( help="Start time for the video.", display_name="Start Time", scope=Scope.settings, default=0.0 ) end_time = Float( help="End time for the video.", display_name="End Time", scope=Scope.settings, default=0.0 ) source = String( help="The external URL to download the video. This appears as a link beneath the video.", display_name="Download Video", scope=Scope.settings, default="" ) html5_sources = List( help="A list of filenames to be used with HTML5 video. The first supported filetype will be displayed.", display_name="Video Sources", scope=Scope.settings, ) track = String( help="The external URL to download the timed transcript track. This appears as a link beneath the video.", display_name="Download Track", scope=Scope.settings, default="" ) sub = String( help="The name of the timed transcript track (for non-Youtube videos).", display_name="HTML5 Timed Transcript", scope=Scope.settings, default="" ) class VideoModule(VideoFields, XModule): """ XML source example: <video show_captions="true" youtube="0.75:jNCf2gIqpeE,1.0:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg" url_name="lecture_21_3" display_name="S19V3: Vacancies" > <source src=".../mit-3091x/M-3091X-FA12-L21-3_100.mp4"/> <source src=".../mit-3091x/M-3091X-FA12-L21-3_100.webm"/> <source src=".../mit-3091x/M-3091X-FA12-L21-3_100.ogv"/> </video> """ video_time = 0 icon_class = 'video' js = { 'js': [ resource_string(__name__, 'js/src/video/01_initialize.js'), resource_string(__name__, 'js/src/video/02_html5_video.js'), resource_string(__name__, 'js/src/video/03_video_player.js'), resource_string(__name__, 'js/src/video/04_video_control.js'), resource_string(__name__, 'js/src/video/05_video_quality_control.js'), resource_string(__name__, 'js/src/video/06_video_progress_slider.js'), resource_string(__name__, 'js/src/video/07_video_volume_control.js'), resource_string(__name__, 'js/src/video/08_video_speed_control.js'), resource_string(__name__, 'js/src/video/09_video_caption.js'), resource_string(__name__, 'js/src/video/10_main.js') ] } css = {'scss': [resource_string(__name__, 'css/video/display.scss')]} js_module_name = "Video" def handle_ajax(self, dispatch, data): """This is not being called right now and we raise 404 error.""" log.debug(u"GET {0}".format(data)) log.debug(u"DISPATCH {0}".format(dispatch)) raise Http404() def get_instance_state(self): """Return information about state (position).""" return json.dumps({'position': self.position}) def get_html(self): caption_asset_path = "/static/subs/" get_ext = lambda filename: filename.rpartition('.')[-1] sources = {get_ext(src): src for src in self.html5_sources} sources['main'] = self.source # for testing Youtube timeout in acceptance tests if getattr(settings, 'VIDEO_PORT', None): yt_test_url = "http://127.0.0.1:" + str(settings.VIDEO_PORT) + '/test_youtube/' else: yt_test_url = 'https://gdata.youtube.com/feeds/api/videos/' return self.system.render_template('video.html', { 'youtube_streams': _create_youtube_string(self), 'id': self.location.html_id(), 'sub': self.sub, 'sources': sources, 'track': self.track, 'display_name': self.display_name_with_default, # This won't work when we move to data that # isn't on the filesystem 'data_dir': getattr(self, 'data_dir', None), 'caption_asset_path': caption_asset_path, 'show_captions': json.dumps(self.show_captions), 'start': self.start_time, 'end': self.end_time, 'autoplay': settings.MITX_FEATURES.get('AUTOPLAY_VIDEOS', True), # TODO: Later on the value 1500 should be taken from some global # configuration setting field. 'yt_test_timeout': 1500, 'yt_test_url': yt_test_url }) class VideoDescriptor(VideoFields, TabsEditingDescriptor, EmptyDataRawDescriptor): """Descriptor for `VideoModule`.""" module_class = VideoModule tabs = [ # { # 'name': "Subtitles", # 'template': "video/subtitles.html", # }, { 'name': "Settings", 'template': "tabs/metadata-edit-tab.html", 'current': True } ] def __init__(self, *args, **kwargs): super(VideoDescriptor, self).__init__(*args, **kwargs) # For backwards compatibility -- if we've got XML data, parse # it out and set the metadata fields if self.data: field_data = VideoDescriptor._parse_video_xml(self.data) self._field_data.set_many(self, field_data) del self.data @classmethod def from_xml(cls, xml_data, system, org=None, course=None): """ Creates an instance of this descriptor from the supplied xml_data. This may be overridden by subclasses xml_data: A string of xml that will be translated into data and children for this module system: A DescriptorSystem for interacting with external resources org and course are optional strings that will be used in the generated modules url identifiers """ xml_object = etree.fromstring(xml_data) url_name = xml_object.get('url_name', xml_object.get('slug')) location = Location( 'i4x', org, course, 'video', url_name ) if is_pointer_tag(xml_object): filepath = cls._format_filepath(xml_object.tag, name_to_pathname(url_name)) xml_data = etree.tostring(cls.load_file(filepath, system.resources_fs, location)) field_data = VideoDescriptor._parse_video_xml(xml_data) field_data['location'] = location kvs = InheritanceKeyValueStore(initial_values=field_data) field_data = DbModel(kvs) video = system.construct_xblock_from_class( cls, field_data, # We're loading a descriptor, so student_id is meaningless # We also don't have separate notions of definition and usage ids yet, # so we use the location for both ScopeIds(None, location.category, location, location) ) return video def definition_to_xml(self, resource_fs): """ Returns an xml string representing this module. """ xml = etree.Element('video') youtube_string = _create_youtube_string(self) # Mild workaround to ensure that tests pass -- if a field # is set to its default value, we don't need to write it out. if youtube_string and youtube_string != '1.00:OEoXaMPEzfM': xml.set('youtube', unicode(youtube_string)) xml.set('url_name', self.url_name) attrs = { 'display_name': self.display_name, 'show_captions': json.dumps(self.show_captions), 'start_time': datetime.timedelta(seconds=self.start_time), 'end_time': datetime.timedelta(seconds=self.end_time), 'sub': self.sub, } for key, value in attrs.items(): # Mild workaround to ensure that tests pass -- if a field # is set to its default value, we don't write it out. if value: if key in self.fields and self.fields[key].is_set_on(self): xml.set(key, unicode(value)) for source in self.html5_sources: ele = etree.Element('source') ele.set('src', source) xml.append(ele) if self.track: ele = etree.Element('track') ele.set('src', self.track) xml.append(ele) return xml @staticmethod def _parse_youtube(data): """ Parses a string of Youtube IDs such as "1.0:AXdE34_U,1.5:VO3SxfeD" into a dictionary. Necessary for backwards compatibility with XML-based courses. """ ret = {'0.75': '', '1.00': '', '1.25': '', '1.50': ''} videos = data.split(',') for video in videos: pieces = video.split(':') try: speed = '%.2f' % float(pieces[0]) # normalize speed # Handle the fact that youtube IDs got double-quoted for a period of time. # Note: we pass in "VideoFields.youtube_id_1_0" so we deserialize as a String-- # it doesn't matter what the actual speed is for the purposes of deserializing. youtube_id = VideoDescriptor._deserialize(VideoFields.youtube_id_1_0.name, pieces[1]) ret[speed] = youtube_id except (ValueError, IndexError): log.warning('Invalid YouTube ID: %s' % video) return ret @staticmethod def _parse_video_xml(xml_data): """ Parse video fields out of xml_data. The fields are set if they are present in the XML. """ xml = etree.fromstring(xml_data) field_data = {} conversions = { 'start_time': VideoDescriptor._parse_time, 'end_time': VideoDescriptor._parse_time } # Convert between key names for certain attributes -- # necessary for backwards compatibility. compat_keys = { 'from': 'start_time', 'to': 'end_time' } sources = xml.findall('source') if sources: field_data['html5_sources'] = [ele.get('src') for ele in sources] field_data['source'] = field_data['html5_sources'][0] track = xml.find('track') if track is not None: field_data['track'] = track.get('src') for attr, value in xml.items(): if attr in compat_keys: attr = compat_keys[attr] if attr in VideoDescriptor.metadata_to_strip + ('url_name', 'name'): continue if attr == 'youtube': speeds = VideoDescriptor._parse_youtube(value) for speed, youtube_id in speeds.items(): # should have made these youtube_id_1_00 for # cleanliness, but hindsight doesn't need glasses normalized_speed = speed[:-1] if speed.endswith('0') else speed # If the user has specified html5 sources, make sure we don't use the default video if youtube_id != '' or 'html5_sources' in field_data: field_data['youtube_id_{0}'.format(normalized_speed.replace('.', '_'))] = youtube_id else: # Convert XML attrs into Python values. if attr in conversions: value = conversions[attr](value) else: # We export values with json.dumps (well, except for Strings, but # for about a month we did it for Strings also). value = VideoDescriptor._deserialize(attr, value) field_data[attr] = value return field_data @classmethod def _deserialize(cls, attr, value): """ Handles deserializing values that may have been encoded with json.dumps. """ return cls.get_map_for_field(attr).from_xml(value) @staticmethod def _parse_time(str_time): """Converts s in '12:34:45' format to seconds. If s is None, returns empty string""" if not str_time: return '' else: obj_time = time.strptime(str_time, '%H:%M:%S') return datetime.timedelta( hours=obj_time.tm_hour, minutes=obj_time.tm_min, seconds=obj_time.tm_sec ).total_seconds() def _create_youtube_string(module): """ Create a string of Youtube IDs from `module`'s metadata attributes. Only writes a speed if an ID is present in the module. Necessary for backwards compatibility with XML-based courses. """ youtube_ids = [ module.youtube_id_0_75, module.youtube_id_1_0, module.youtube_id_1_25, module.youtube_id_1_5 ] youtube_speeds = ['0.75', '1.00', '1.25', '1.50'] return ','.join([':'.join(pair) for pair in zip(youtube_speeds, youtube_ids) if pair[1]])
agpl-3.0
dl1ksv/gnuradio
gr-digital/examples/narrowband/benchmark_add_channel.py
6
3590
#!/usr/bin/env python # # Copyright 2010,2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # SPDX-License-Identifier: GPL-3.0-or-later # # from gnuradio import channels, gr from gnuradio import blocks from gnuradio import eng_notation from gnuradio.eng_option import eng_option from optparse import OptionParser import random, math, sys class my_top_block(gr.top_block): def __init__(self, ifile, ofile, options): gr.top_block.__init__(self) SNR = 10.0**(options.snr / 10.0) frequency_offset = options.frequency_offset time_offset = options.time_offset phase_offset = options.phase_offset*(math.pi / 180.0) # calculate noise voltage from SNR power_in_signal = abs(options.tx_amplitude)**2 noise_power = power_in_signal / SNR noise_voltage = math.sqrt(noise_power) self.src = blocks.file_source(gr.sizeof_gr_complex, ifile) #self.throttle = blocks.throttle(gr.sizeof_gr_complex, options.sample_rate) self.channel = channels.channel_model(noise_voltage, frequency_offset, time_offset, noise_seed=-random.randint(0,100000)) self.phase = blocks.multiply_const_cc(complex(math.cos(phase_offset), math.sin(phase_offset))) self.snk = blocks.file_sink(gr.sizeof_gr_complex, ofile) self.connect(self.src, self.channel, self.phase, self.snk) # ///////////////////////////////////////////////////////////////////////////// # main # ///////////////////////////////////////////////////////////////////////////// def main(): # Create Options Parser: usage = "benchmack_add_channel.py [options] <input file> <output file>" parser = OptionParser (usage=usage, option_class=eng_option, conflict_handler="resolve") parser.add_option("-n", "--snr", type="eng_float", default=30, help="set the SNR of the channel in dB [default=%default]") parser.add_option("", "--seed", action="store_true", default=False, help="use a random seed for AWGN noise [default=%default]") parser.add_option("-f", "--frequency-offset", type="eng_float", default=0, help="set frequency offset introduced by channel [default=%default]") parser.add_option("-t", "--time-offset", type="eng_float", default=1.0, help="set timing offset between Tx and Rx [default=%default]") parser.add_option("-p", "--phase-offset", type="eng_float", default=0, help="set phase offset (in degrees) between Tx and Rx [default=%default]") parser.add_option("-m", "--use-multipath", action="store_true", default=False, help="Use a multipath channel [default=%default]") parser.add_option("", "--tx-amplitude", type="eng_float", default=1.0, help="tell the simulator the signal amplitude [default=%default]") (options, args) = parser.parse_args () if len(args) != 2: parser.print_help(sys.stderr) sys.exit(1) ifile = args[0] ofile = args[1] # build the graph tb = my_top_block(ifile, ofile, options) r = gr.enable_realtime_scheduling() if r != gr.RT_OK: print("Warning: Failed to enable realtime scheduling.") tb.start() # start flow graph tb.wait() # wait for it to finish if __name__ == '__main__': try: main() except KeyboardInterrupt: pass
gpl-3.0
johnkeepmoving/oss-ftp
python27/win32/Lib/test/test_difflib.py
40
10635
import difflib from test.test_support import run_unittest, findfile import unittest import doctest import sys class TestWithAscii(unittest.TestCase): def test_one_insert(self): sm = difflib.SequenceMatcher(None, 'b' * 100, 'a' + 'b' * 100) self.assertAlmostEqual(sm.ratio(), 0.995, places=3) self.assertEqual(list(sm.get_opcodes()), [ ('insert', 0, 0, 0, 1), ('equal', 0, 100, 1, 101)]) sm = difflib.SequenceMatcher(None, 'b' * 100, 'b' * 50 + 'a' + 'b' * 50) self.assertAlmostEqual(sm.ratio(), 0.995, places=3) self.assertEqual(list(sm.get_opcodes()), [ ('equal', 0, 50, 0, 50), ('insert', 50, 50, 50, 51), ('equal', 50, 100, 51, 101)]) def test_one_delete(self): sm = difflib.SequenceMatcher(None, 'a' * 40 + 'c' + 'b' * 40, 'a' * 40 + 'b' * 40) self.assertAlmostEqual(sm.ratio(), 0.994, places=3) self.assertEqual(list(sm.get_opcodes()), [ ('equal', 0, 40, 0, 40), ('delete', 40, 41, 40, 40), ('equal', 41, 81, 40, 80)]) class TestAutojunk(unittest.TestCase): """Tests for the autojunk parameter added in 2.7""" def test_one_insert_homogenous_sequence(self): # By default autojunk=True and the heuristic kicks in for a sequence # of length 200+ seq1 = 'b' * 200 seq2 = 'a' + 'b' * 200 sm = difflib.SequenceMatcher(None, seq1, seq2) self.assertAlmostEqual(sm.ratio(), 0, places=3) # Now turn the heuristic off sm = difflib.SequenceMatcher(None, seq1, seq2, autojunk=False) self.assertAlmostEqual(sm.ratio(), 0.9975, places=3) class TestSFbugs(unittest.TestCase): def test_ratio_for_null_seqn(self): # Check clearing of SF bug 763023 s = difflib.SequenceMatcher(None, [], []) self.assertEqual(s.ratio(), 1) self.assertEqual(s.quick_ratio(), 1) self.assertEqual(s.real_quick_ratio(), 1) def test_comparing_empty_lists(self): # Check fix for bug #979794 group_gen = difflib.SequenceMatcher(None, [], []).get_grouped_opcodes() self.assertRaises(StopIteration, group_gen.next) diff_gen = difflib.unified_diff([], []) self.assertRaises(StopIteration, diff_gen.next) def test_matching_blocks_cache(self): # Issue #21635 s = difflib.SequenceMatcher(None, "abxcd", "abcd") first = s.get_matching_blocks() second = s.get_matching_blocks() self.assertEqual(second[0].size, 2) self.assertEqual(second[1].size, 2) self.assertEqual(second[2].size, 0) def test_added_tab_hint(self): # Check fix for bug #1488943 diff = list(difflib.Differ().compare(["\tI am a buggy"],["\t\tI am a bug"])) self.assertEqual("- \tI am a buggy", diff[0]) self.assertEqual("? --\n", diff[1]) self.assertEqual("+ \t\tI am a bug", diff[2]) self.assertEqual("? +\n", diff[3]) patch914575_from1 = """ 1. Beautiful is beTTer than ugly. 2. Explicit is better than implicit. 3. Simple is better than complex. 4. Complex is better than complicated. """ patch914575_to1 = """ 1. Beautiful is better than ugly. 3. Simple is better than complex. 4. Complicated is better than complex. 5. Flat is better than nested. """ patch914575_from2 = """ \t\tLine 1: preceeded by from:[tt] to:[ssss] \t\tLine 2: preceeded by from:[sstt] to:[sssst] \t \tLine 3: preceeded by from:[sstst] to:[ssssss] Line 4: \thas from:[sst] to:[sss] after : Line 5: has from:[t] to:[ss] at end\t """ patch914575_to2 = """ Line 1: preceeded by from:[tt] to:[ssss] \tLine 2: preceeded by from:[sstt] to:[sssst] Line 3: preceeded by from:[sstst] to:[ssssss] Line 4: has from:[sst] to:[sss] after : Line 5: has from:[t] to:[ss] at end """ patch914575_from3 = """line 0 1234567890123456789012345689012345 line 1 line 2 line 3 line 4 changed line 5 changed line 6 changed line 7 line 8 subtracted line 9 1234567890123456789012345689012345 short line just fits in!! just fits in two lines yup!! the end""" patch914575_to3 = """line 0 1234567890123456789012345689012345 line 1 line 2 added line 3 line 4 chanGEd line 5a chanGed line 6a changEd line 7 line 8 line 9 1234567890 another long line that needs to be wrapped just fitS in!! just fits in two lineS yup!! the end""" class TestSFpatches(unittest.TestCase): def test_html_diff(self): # Check SF patch 914575 for generating HTML differences f1a = ((patch914575_from1 + '123\n'*10)*3) t1a = (patch914575_to1 + '123\n'*10)*3 f1b = '456\n'*10 + f1a t1b = '456\n'*10 + t1a f1a = f1a.splitlines() t1a = t1a.splitlines() f1b = f1b.splitlines() t1b = t1b.splitlines() f2 = patch914575_from2.splitlines() t2 = patch914575_to2.splitlines() f3 = patch914575_from3 t3 = patch914575_to3 i = difflib.HtmlDiff() j = difflib.HtmlDiff(tabsize=2) k = difflib.HtmlDiff(wrapcolumn=14) full = i.make_file(f1a,t1a,'from','to',context=False,numlines=5) tables = '\n'.join( [ '<h2>Context (first diff within numlines=5(default))</h2>', i.make_table(f1a,t1a,'from','to',context=True), '<h2>Context (first diff after numlines=5(default))</h2>', i.make_table(f1b,t1b,'from','to',context=True), '<h2>Context (numlines=6)</h2>', i.make_table(f1a,t1a,'from','to',context=True,numlines=6), '<h2>Context (numlines=0)</h2>', i.make_table(f1a,t1a,'from','to',context=True,numlines=0), '<h2>Same Context</h2>', i.make_table(f1a,f1a,'from','to',context=True), '<h2>Same Full</h2>', i.make_table(f1a,f1a,'from','to',context=False), '<h2>Empty Context</h2>', i.make_table([],[],'from','to',context=True), '<h2>Empty Full</h2>', i.make_table([],[],'from','to',context=False), '<h2>tabsize=2</h2>', j.make_table(f2,t2), '<h2>tabsize=default</h2>', i.make_table(f2,t2), '<h2>Context (wrapcolumn=14,numlines=0)</h2>', k.make_table(f3.splitlines(),t3.splitlines(),context=True,numlines=0), '<h2>wrapcolumn=14,splitlines()</h2>', k.make_table(f3.splitlines(),t3.splitlines()), '<h2>wrapcolumn=14,splitlines(True)</h2>', k.make_table(f3.splitlines(True),t3.splitlines(True)), ]) actual = full.replace('</body>','\n%s\n</body>' % tables) # temporarily uncomment next two lines to baseline this test #with open('test_difflib_expect.html','w') as fp: # fp.write(actual) with open(findfile('test_difflib_expect.html')) as fp: self.assertEqual(actual, fp.read()) def test_recursion_limit(self): # Check if the problem described in patch #1413711 exists. limit = sys.getrecursionlimit() old = [(i%2 and "K:%d" or "V:A:%d") % i for i in range(limit*2)] new = [(i%2 and "K:%d" or "V:B:%d") % i for i in range(limit*2)] difflib.SequenceMatcher(None, old, new).get_opcodes() class TestOutputFormat(unittest.TestCase): def test_tab_delimiter(self): args = ['one', 'two', 'Original', 'Current', '2005-01-26 23:30:50', '2010-04-02 10:20:52'] ud = difflib.unified_diff(*args, lineterm='') self.assertEqual(list(ud)[0:2], [ "--- Original\t2005-01-26 23:30:50", "+++ Current\t2010-04-02 10:20:52"]) cd = difflib.context_diff(*args, lineterm='') self.assertEqual(list(cd)[0:2], [ "*** Original\t2005-01-26 23:30:50", "--- Current\t2010-04-02 10:20:52"]) def test_no_trailing_tab_on_empty_filedate(self): args = ['one', 'two', 'Original', 'Current'] ud = difflib.unified_diff(*args, lineterm='') self.assertEqual(list(ud)[0:2], ["--- Original", "+++ Current"]) cd = difflib.context_diff(*args, lineterm='') self.assertEqual(list(cd)[0:2], ["*** Original", "--- Current"]) def test_range_format_unified(self): # Per the diff spec at http://www.unix.org/single_unix_specification/ spec = '''\ Each <range> field shall be of the form: %1d", <beginning line number> if the range contains exactly one line, and: "%1d,%1d", <beginning line number>, <number of lines> otherwise. If a range is empty, its beginning line number shall be the number of the line just before the range, or 0 if the empty range starts the file. ''' fmt = difflib._format_range_unified self.assertEqual(fmt(3,3), '3,0') self.assertEqual(fmt(3,4), '4') self.assertEqual(fmt(3,5), '4,2') self.assertEqual(fmt(3,6), '4,3') self.assertEqual(fmt(0,0), '0,0') def test_range_format_context(self): # Per the diff spec at http://www.unix.org/single_unix_specification/ spec = '''\ The range of lines in file1 shall be written in the following format if the range contains two or more lines: "*** %d,%d ****\n", <beginning line number>, <ending line number> and the following format otherwise: "*** %d ****\n", <ending line number> The ending line number of an empty range shall be the number of the preceding line, or 0 if the range is at the start of the file. Next, the range of lines in file2 shall be written in the following format if the range contains two or more lines: "--- %d,%d ----\n", <beginning line number>, <ending line number> and the following format otherwise: "--- %d ----\n", <ending line number> ''' fmt = difflib._format_range_context self.assertEqual(fmt(3,3), '3') self.assertEqual(fmt(3,4), '4') self.assertEqual(fmt(3,5), '4,5') self.assertEqual(fmt(3,6), '4,6') self.assertEqual(fmt(0,0), '0') def test_main(): difflib.HtmlDiff._default_prefix = 0 Doctests = doctest.DocTestSuite(difflib) run_unittest( TestWithAscii, TestAutojunk, TestSFpatches, TestSFbugs, TestOutputFormat, Doctests) if __name__ == '__main__': test_main()
mit
Mazecreator/tensorflow
tensorflow/contrib/tensor_forest/python/ops/tensor_forest_ops.py
166
1220
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Custom ops used by tensorforest.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.contrib.tensor_forest.python.ops.gen_tensor_forest_ops import * # pylint: enable=wildcard-import from tensorflow.contrib.util import loader from tensorflow.python.platform import resource_loader _tensor_forest_ops = loader.load_op_library( resource_loader.get_path_to_datafile('_tensor_forest_ops.so'))
apache-2.0
google/google-ctf
third_party/edk2/BaseTools/Source/Python/UPT/Logger/StringTable.py
1
46519
## @file # This file is used to define strings used in the UPT tool # # Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> # # This program and the accompanying materials are licensed and made available # under the terms and conditions of the BSD License which accompanies this # distribution. The full text of the license may be found at # http://opensource.org/licenses/bsd-license.php # # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. ## """ This file contains user visible strings in a format that can be used for localization """ import gettext # # string table starts here... # ## strings are classified as following types # MSG_...: it is a message string # ERR_...: it is a error string # WRN_...: it is a warning string # HLP_...: it is a help string # _ = gettext.gettext MSG_USAGE_STRING = _("\n" "UEFI Packaging Tool (UEFIPT)\n" "%prog [options]" ) ## # Version and Copyright # MSG_VERSION_NUMBER = _("1.1") MSG_VERSION = _("UEFI Packaging Tool (UEFIPT) - Revision " + \ MSG_VERSION_NUMBER) MSG_COPYRIGHT = _("Copyright (c) 2011 - 2018 Intel Corporation All Rights Reserved.") MSG_VERSION_COPYRIGHT = _("\n %s\n %s" % (MSG_VERSION, MSG_COPYRIGHT)) MSG_USAGE = _("%s [options]\n%s" % ("UPT", MSG_VERSION_COPYRIGHT)) MSG_DESCRIPTION = _("The UEFIPT is used to create, " + \ "install or remove a UEFI Distribution Package. " + \ "If WORKSPACE environment variable is present, " + \ "then UPT will install packages to the location specified by WORKSPACE, " + \ "otherwise UPT will install packages to the current directory. " + \ "Option -n will override this default installation location") # # INF Parser related strings. # ERR_INF_PARSER_HEADER_FILE = _( "The Header comment section should start with an @file at the top.") ERR_INF_PARSER_HEADER_MISSGING = _( "The Header comment is missing. It must be corrected before continuing.") ERR_INF_PARSER_UNKNOWN_SECTION = _("An unknown section was found. " "It must be corrected before continuing. ") ERR_INF_PARSER_NO_SECTION_ERROR = _("No section was found. " "A section must be included before continuing.") ERR_INF_PARSER_BUILD_OPTION_FORMAT_INVALID = \ _("Build Option format incorrect.") ERR_INF_PARSER_BINARY_ITEM_FORMAT_INVALID = _( "The format of binary %s item is incorrect. " "It should contain at least %d elements.") ERR_INF_PARSER_BINARY_ITEM_FORMAT_INVALID_MAX = _( "The format of binary %s item is invalid, " "it should contain not more than %d elements.") ERR_INF_PARSER_BINARY_ITEM_INVALID_FILETYPE = _( "The Binary FileType is incorrect. It should in %s") ERR_INF_PARSER_BINARY_ITEM_FILE_NOT_EXIST = _( "The Binary File: %s not exist.") ERR_INF_PARSER_BINARY_ITEM_FILENAME_NOT_EXIST = _( "The Binary File Name item not exist") ERR_INF_PARSER_BINARY_VER_TYPE = _( "Only this type is allowed: \"%s\".") ERR_INF_PARSER_MULTI_DEFINE_SECTION = \ _("Multiple define sections found. " "It must be corrected before continuing.") ERR_INF_PARSER_DEFINE_ITEM_MORE_THAN_ONE_FOUND = \ _("More than 1 %s is defined in DEFINES section. " "It must be corrected before continuing.") ERR_INF_PARSER_DEFINE_NAME_INVALID = \ _("Incorrect name format for : %s") ERR_INF_PARSER_DEFINE_GUID_INVALID = \ _("The format of this GUID is incorrect: %s") ERR_INF_PARSER_DEFINE_MODULETYPE_INVALID = _("Incorrect MODULE_TYPE: %s") ERR_INF_PARSER_DEFINE_FROMAT_INVALID = _("Incorrect format: %s") ERR_INF_PARSER_FILE_NOT_EXIST = _("This file does not exist: %s") ERR_INF_PARSER_FILE_NOT_EXIST_OR_NAME_INVALID = \ _("The file does not exist or not in sub-directories " "or has an incorrect file name of the directory containing the INF or DEC file: %s. " "It must be corrected before continuing") ERR_INF_PARSER_DEFINE_SHADOW_INVALID = \ _("The SHADOW keyword is only valid for" " SEC, PEI_CORE and PEIM module types.") ERR_INF_PARSER_DEFINE_SECTION_HEADER_INVALID = \ _("The format of the section header is incorrect") ERR_INF_PARSER_DEPEX_SECTION_INVALID = \ _("A module can't have a Depex section when its module type is %s") ERR_INF_PARSER_DEPEX_SECTION_INVALID_FOR_BASE_LIBRARY_CLASS = \ _("A base type library class can't have a Depex section with module type not defined.") ERR_INF_PARSER_DEPEX_SECTION_INVALID_FOR_LIBRARY_CLASS = \ _("A library class can't have a Depex section when its supported module type list is not defined.") ERR_INF_PARSER_DEPEX_SECTION_INVALID_FOR_DRIVER = \ _("A driver can't have a Depex section when its module type is UEFI_DRIVER.") ERR_INF_PARSER_DEPEX_SECTION_NOT_DETERMINED = \ _("Cannot determine the module's Depex type. The Depex's module types are conflict") ERR_INF_PARSER_DEFINE_SECTION_MUST_ITEM_NOT_EXIST = _( "No %s found in INF file, please check it.") ERR_INF_PARSER_DEPEX_SECTION_MODULE_TYPE_ERROR = \ _("The module type of [Depex] section is invalid, not support type of %s") ERR_INF_PARSER_DEPEX_SECTION_CONTENT_MISSING = \ _("Missing content in: %s") ERR_INF_PARSER_DEPEX_SECTION_CONTENT_ERROR = \ _("The [Depex] section contains invalid content: %s") ERR_INF_PARSER_DEPEX_SECTION_SEC_TYPE_ERROR = \ _("The format is incorrect. The section type keyword of the content in the" " [Depex] section is only for 'PEI_DEPEX', 'DXE_DEPEX', 'SMM_DEPEX', " "it does not support type: %s") ERR_INF_PARSER_UE_SECTION_USER_ID_ERROR = \ _("This format is incorrect. " "The UserID: %s in [UserExtension] section is incorrect.") ERR_INF_PARSER_UE_SECTION_ID_STRING_ERROR = \ _("This format is incorrect. " "IdString: %s in [UserExtension] section is incorrect.") ERR_INF_PARSER_LIBRARY_SECTION_CONTENT_ERROR = \ _("The format is incorrect. " "You can only have a Library name and a Feature flag in one line.") ERR_INF_PARSER_LIBRARY_SECTION_LIBNAME_MISSING = \ _("Format invalid. Please specify a library name.") ERR_INF_PARSER_SOURCES_SECTION_CONTENT_ERROR = \ _("The format is incorrect. It should be formatted as follows: " "FileName, Family | TagName | ToolCode | FeatureFlagExpr.") ERR_INF_PARSER_PCD_SECTION_TYPE_ERROR = \ _("The PCD section type is incorrect. The value should be this list: %s") ERR_INF_PARSER_PCD_SECTION_CONTENT_ERROR = \ _("PcdName format invalid." "Should like following: PcdName | Value | FeatureFlag.") ERR_INF_PARSER_PCD_NAME_FORMAT_ERROR = \ _("Format invalid." "Should like following: <TokenSpaceGuidCName>.<PcdCName> ") ERR_INF_PARSER_GUID_PPI_PROTOCOL_SECTION_CONTENT_ERROR = \ _("The format is incorrect. " "It should be formatted as follows: CName | FeatureFlag.") ERR_INF_PARSER_PACKAGE_SECTION_CONTENT_ERROR = \ _("The format is incorrect. " "It should be formatted as follows: <TokenSpaceGuidCName>.<PcdCName>") ERR_INF_PARSER_PCD_TAIL_COMMENTS_INVALID = \ _("The format is incorrect. " "Multiple usage descriptions must be described on subsequent lines.") ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR = \ _("This section format is incorrect: %s.") ERR_INF_PARSER_SECTION_NAME_DUPLICATE = \ _("This section has multiple section names, " "only one section name is permitted.") ERR_INF_PARSER_SECTION_ARCH_CONFLICT = \ _("The 'common' ARCH must not be used with the specified ARCHs.") ERR_INF_PARSER_SOURCE_SECTION_TAGNAME_INVALID = \ _("This TagName is incorrect: %s. " "It must be corrected before continuing.") ERR_INF_PARSER_TAGNAME_NOT_PERMITTED = \ _("TagName is not permitted: %s. " "It must be corrected before continuing.") ERR_INF_PARSER_TOOLCODE_NOT_PERMITTED = \ _("ToolCode is not permitted: %s. " "It must be corrected before continuing.") ERR_INF_PARSER_SOURCE_SECTION_FAMILY_INVALID = \ _("This family is incorrect: %s. " "It must be corrected before continuing. ") ERR_INF_PARSER_SOURCE_SECTION_SECTIONNAME_INVALID = \ _("This SectionName is incorrect: %s. " "It must be corrected before continuing.") ERR_INF_PARSER_PCD_CVAR_GUID = \ _("TokenSpaceGuidCName must be valid C variable format.") ERR_INF_PARSER_PCD_CVAR_PCDCNAME = \ _("PcdCName must be valid C variable format.") ERR_INF_PARSER_PCD_VALUE_INVALID = \ _("The PCD value is incorrect. It must be corrected before continuing.") ERR_INF_PARSER_FEATURE_FLAG_EXP_SYNTAX_INVLID = \ _("Incorrect feature flag expression: %s") ERR_INF_PARSER_FEATURE_FLAG_EXP_MISSING = \ _("The feature flag expression is missing. Please specify a feature flag.") ERR_INF_PARSER_INVALID_CNAME = \ _("Incorrect CName: %s. You must specify a valid C variable name.") ERR_INF_PARSER_CNAME_MISSING = \ _("Missing CName. Specify a valid C variable name.") ERR_INF_PARSER_DEFINE_SECTION_KEYWORD_INVALID = \ _("The Define section contains an invalid keyword: \"%s\"." "It must be corrected before continuing.") ERR_INF_PARSER_FILE_MISS_DEFINE = \ _("The following file listed in the module " "directory is not listed in the INF: %s") ERR_INF_PARSER_VERSION_NUMBER_DEPRICATED = \ _("VERSION_NUMBER deprecated. " "The INF file %s should be modified to use the VERSION_STRING instead.") ERR_INF_PARSER_VER_EXIST_BOTH_NUM_STR = \ _("The INF file %s defines both VERSION_NUMBER and VERSION_STRING, " "using VERSION_STRING") ERR_INF_PARSER_NOT_SUPPORT_EDKI_INF = _("EDKI INF is not supported") ERR_INF_PARSER_EDKI_COMMENT_IN_EDKII = _("The EDKI style comment is not supported in EDKII modules") ERR_INF_PARSER_FEATUREPCD_USAGE_INVALID = _("The usage for FeaturePcd can only" " be type of \"CONSUMES\".") ERR_INF_PARSER_DEFINE_ITEM_NO_NAME = _("No name specified") ERR_INF_PARSER_DEFINE_ITEM_NO_VALUE = _("No value specified") ERR_INF_PARSER_MODULETYPE_INVALID = _("Drivers and applications are not allowed to have a MODULE_TYPE of \"BASE\". " "Only libraries are permitted to a have a MODULE_TYPE of \"BASE\".") ERR_INF_GET_PKG_DEPENDENCY_FAIL = _("Failed to get PackageDependencies information from file %s") ERR_INF_NO_PKG_DEPENDENCY_INFO = _("There are no packages defined that use the AsBuilt PCD information.") # # Item duplicate # ERR_INF_PARSER_ITEM_DUPLICATE_IN_DEC = \ _('"%s" is redefined in its dependent DEC files') ERR_INF_PARSER_ITEM_DUPLICATE = _("%s define duplicated! " "It must be corrected before continuing.") ERR_INF_PARSER_ITEM_DUPLICATE_COMMON = _("%s define duplicated! Item listed" "in an architectural section must not be listed in the common architectural" "section.It must be corrected before continuing.") ERR_INF_PARSER_UE_SECTION_DUPLICATE_ERROR = \ _("%s define duplicated! Each UserExtensions section header must have a " "unique set of UserId, IdString and Arch values. " "It must be corrected before continuing.") ERR_INF_PARSER_DEFINE_LIB_NAME_INVALID = \ _("The name 'NULL' for LibraryClass is a reserved word." "Please don't use it.") ERR_GLOBAL_MARCO_INVALID = \ _("Using global MACRO in INF/DEC is not permitted: %s . " "It must be corrected before continuing.") ERR_MARCO_DEFINITION_MISS_ERROR = \ _("MACRO expand incorrectly, can not find the MACRO definition. " "It must be corrected before continuing.") # # AsBuilt related # ERR_LIB_CONTATIN_ASBUILD_AND_COMMON = _("A binary INF file should not contain both AsBuilt LIB_INSTANCES information " "and a common library entry.") ERR_LIB_INSTANCE_MISS_GUID = _("Could not get FILE_GUID definition from instance INF file.") ERR_BO_CONTATIN_ASBUILD_AND_COMMON = _("A binary INF file should contain either AsBuilt information " "or a common build option entry, not both.") ERR_ASBUILD_PCD_SECTION_TYPE = _("The AsBuilt INF file contains a PCD section type that is not permitted: %s.") ERR_ASBUILD_PATCHPCD_FORMAT_INVALID = _("The AsBuilt PatchPcd entry must contain 3 elements: PcdName|Value|Offset") ERR_ASBUILD_PCDEX_FORMAT_INVALID = _("The AsBuilt PcdEx entry must contain one element: PcdName") ERR_ASBUILD_PCD_VALUE_INVALID = \ _("The AsBuilt PCD value %s is incorrect or not align with it's datum type %s. " "It must be corrected before continuing.") ERR_ASBUILD_PCD_TOKENSPACE_GUID_VALUE_MISS = _("Package file value could not be retrieved for %s.") ERR_ASBUILD_PCD_DECLARITION_MISS = _("PCD Declaration in DEC files could not be found for: %s.") ERR_ASBUILD_PCD_OFFSET_FORMAT_INVALID = _("PCD offset format invalid, number of (0-4294967295) or" "Hex number of UINT32 allowed : %s.") # # XML parser related strings # ERR_XML_PARSER_REQUIRED_ITEM_MISSING = \ _("The XML section/attribute '%s' is required under %s, it can't be missing or empty") ERR_XML_INVALID_VARIABLENAME = \ _("The VariableName of the GUID in the XML tree does not conform to the packaging specification. " "Only a Hex Byte Array of UCS-2 format or L\"string\" is allowed): %s %s %s") ERR_XML_INVALID_LIB_SUPMODLIST = _("The LIBRARY_CLASS entry %s must have the list appended using the format as: \n" "BASE SEC PEI_CORE PEIM DXE_CORE DXE_DRIVER SMM_CORE DXE_SMM_DRIVER DXE_RUNTIME_DRIVER " "DXE_SAL_DRIVER UEFI_DRIVER UEFI_APPLICATION USER_DEFINED\n Current is %s.") ERR_XML_INVALID_EXTERN_SUPARCHLIST = \ _("There is a mismatch of SupArchList %s between the EntryPoint, UnloadImage, Constructor, " "and Destructor elements in the ModuleSurfaceArea.ModuleProperties: SupArchList: %s. ") ERR_XML_INVALID_EXTERN_SUPMODLIST = _("The SupModList attribute of the CONSTRUCTOR or DESTRUCTOR element: %s does not " "match the Supported Module Types listed after LIBRARY_CLASS = <Keyword> | %s") ERR_XML_INVALID_EXTERN_SUPMODLIST_NOT_LIB = _("The module is not a library module. " "The MODULE_TYPE : %s listed in the ModuleSurfaceArea.Header " "must match the SupModList attribute %s") ERR_XML_INVALID_BINARY_FILE_TYPE = _("Invalid binary file type %s.") # # Verbosity related strings. # MSG_DISTRIBUTION_PACKAGE_FILE_EXISTS = _( "The distribution package file %s already exists.\nPress Y to override it." " To exit the application, press any other key.") MSG_CHECK_MODULE_EXIST = _( "\nChecking to see if module exists in workspace started ...") MSG_CHECK_MODULE_EXIST_FINISH = \ _("Checking to see if module exists in workspace ... Done.") MSG_CHECK_MODULE_DEPEX_START = _( "\nChecking to see if module depex met by workspace started ...") MSG_CHECK_MODULE_DEPEX_FINISH = _( "Checking to see if module depex met by workspace ... Done.") MSG_CHECK_PACKAGE_START = _( "\nChecking to see if package exists in workspace started ...") MSG_CHECK_PACKAGE_FINISH = _( "Checking to see if package exists in workspace ... Done.") MSG_CHECK_DP_START = \ _("\nChecking to see if DP exists in workspace ... Done.") MSG_CHECK_DP_FINISH = _("Check DP exists in workspace ... Done.") MSG_MODULE_DEPEND_ON = _("Module %s depends on Package %s") MSG_INIT_IPI_START = _("\nInitialize IPI database started ...") MSG_INIT_IPI_FINISH = _("Initialize IPI database ... Done.") MSG_GET_DP_INSTALL_LIST = _( "\nGetting list of DP install information started ...") MSG_GET_DP_INSTALL_INFO_START = _( "\nGetting list of DP install information started ...") MSG_GET_DP_INSTALL_INFO_FINISH = _("Getting DP install information ... Done.") MSG_UZIP_PARSE_XML = _( "Unzipping and parsing distribution package XML file ... ") MSG_INSTALL_PACKAGE = _("Installing package ... %s") MSG_INSTALL_MODULE = _("Installing module ... %s") MSG_NEW_FILE_NAME_FOR_DIST = _( "Provide new filename for distribution file to be saved:\n") MSG_UPDATE_PACKAGE_DATABASE = _("Update Distribution Package Database ...") MSG_PYTHON_ON = _("(Python %s on %s) ") MSG_SEARCH_FOR_HELP = _( "\n(Please send email to edk2-devel@lists.01.org for\n" " help, attach the following call stack trace.)\n") MSG_REMOVE_TEMP_FILE_STARTED = _("Removing temp files started ... ") MSG_REMOVE_TEMP_FILE_DONE = _("Removing temp files ... Done.") MSG_FINISH = _("Successfully Done.") MSG_COMPRESS_DISTRIBUTION_PKG = _("Compressing Distribution Package File ...") MSG_CONFIRM_REMOVE = _( "Some packages or modules depend on this distribution package.\n" "Do you really want to remove it?") MSG_CONFIRM_REMOVE2 = _( "This file has been modified: %s. Do you want to remove it?" "Press Y to remove or other key to keep it") MSG_CONFIRM_REMOVE3 = _( "This is a newly created file: %s. Are you sure you want to remove it? " "Press Y to remove or any other key to keep it") MSG_USER_DELETE_OP = _( "Press Y to delete all files or press any other key to quit:") MSG_REMOVE_FILE = _("Removing file: %s ...") MSG_INITIALIZE_ECC_STARTED = _("\nInitialize ECC database started ...") MSG_INITIALIZE_ECC_DONE = _("Initialize ECC database ... Done.") MSG_DEFINE_STATEMENT_FOUND = _("DEFINE statement '%s' found in section %s") MSG_PARSING = _("Parsing %s ...") MSG_REPKG_CONFLICT = \ _("Repackaging is not allowed on this file: %s. " "It was installed from distribution %s(Guid %s Version %s).") MSG_INVALID_MODULE_INTRODUCED = _("Some modules are not valid after removal.") MSG_CHECK_LOG_FILE = _("Please check log file %s for full list") MSG_NEW_FILE_NAME = _( "Provide new filename:\n") MSG_RELATIVE_PATH_ONLY = _("Please specify a relative path, full path is not allowed: %s") MSG_NEW_PKG_PATH = _( "Select package location. To quit with no input, press [Enter].") MSG_CHECK_DP_FOR_REPLACE = _("Verifying the dependency rule for replacement of distributions:\n %s replaces %s") MSG_CHECK_DP_FOR_INSTALL = _("Verifying the dependency rule for installation of distribution:\n %s") MSG_REPLACE_ALREADY_INSTALLED_DP = _("Distribution with the same GUID/Version is already installed, " "replace would result in two instances, which is not allowed") MSG_RECOVER_START = _('An error was detected, recovery started ...') MSG_RECOVER_DONE = _('Recovery completed.') MSG_RECOVER_FAIL = _('Recovery failed.') # # Error related strings. # ERR_DEPENDENCY_NOT_MATCH = _( "Module %s's dependency on package %s (GUID %s Version %s) " "cannot be satisfied") ERR_MODULE_NOT_INSTALLED = _( "This module is not installed in the workspace: %s\n") ERR_DIR_ALREADY_EXIST = _( "This directory already exists: %s.\n" "Select another location. Press [Enter] with no input to quit:") ERR_USER_INTERRUPT = _("The user has paused the application") ERR_DIST_FILE_TOOMANY = _( "Only one .content and one .pkg file in ZIP file are allowed.") ERR_DIST_FILE_TOOFEW = _( "Must have one .content and one .pkg file in the ZIP file.") ERR_FILE_ALREADY_EXIST = _( "This file already exists: %s.\n" "Select another path to continue. To quit with no input press [Enter]:") ERR_SPECIFY_PACKAGE = _( "One distribution package must be specified") ERR_FILE_BROKEN = _( "This file is invalid in the distribution package: %s") ERR_PACKAGE_NOT_MATCH_DEPENDENCY = _( "This distribution package does not meet the dependency requirements") ERR_UNKNOWN_FATAL_INSTALL_ERR = \ _("Unknown unrecoverable error when installing: %s") ERR_UNKNOWN_FATAL_REPLACE_ERR = \ _("Unknown unrecoverable error during replacement of distributions: %s replaces %s") ERR_OPTION_NOT_FOUND = _("Options not found") ERR_INVALID_PACKAGE_NAME = _("Incorrect package name: %s. ") ERR_INVALID_PACKAGE_PATH = \ _("Incorrect package path: %s. The path must be a relative path.") ERR_NOT_FOUND = _("This was not found: %s") ERR_INVALID_MODULE_NAME = _("This is not a valid module name: %s") ERR_INVALID_METAFILE_PATH = _('This file must be in sub-directory of WORKSPACE: %s.') ERR_INVALID_MODULE_PATH = \ _("Incorrect module path: %s. The path must be a relative path.") ERR_UNKNOWN_FATAL_CREATING_ERR = _("Unknown error when creating: %s") ERR_PACKAGE_NOT_INSTALLED = _( "This distribution package not installed: %s") ERR_DISTRIBUTION_NOT_INSTALLED = _( "The distribution package is not installed.") ERR_UNKNOWN_FATAL_REMOVING_ERR = _("Unknown error when removing package") ERR_UNKNOWN_FATAL_INVENTORYWS_ERR = _("Unknown error when inventorying WORKSPACE") ERR_NOT_CONFIGURE_WORKSPACE_ENV = _( "The WORKSPACE environment variable must be configured.") ERR_NO_TEMPLATE_FILE = _("This package information data file is not found: %s") ERR_DEBUG_LEVEL = _( "Not supported debug level. Use default level instead.") ERR_REQUIRE_T_OPTION = _( "Option -t is required during distribution creation.") ERR_REQUIRE_O_OPTION = _( "Option -o is required during distribution replacement.") ERR_REQUIRE_U_OPTION = _( "Option -u is required during distribution replacement.") ERR_REQUIRE_I_C_R_OPTION = _( "Options -i, -c and -r are mutually exclusive.") ERR_I_C_EXCLUSIVE = \ _("Option -c and -i are mutually exclusive.") ERR_I_R_EXCLUSIVE = \ _("Option -i and -r are mutually exclusive.") ERR_C_R_EXCLUSIVE = \ _("Option -c and -r are mutually exclusive.") ERR_U_ICR_EXCLUSIVE = \ _("Option -u and -c/-i/-r are mutually exclusive.") ERR_L_OA_EXCLUSIVE = \ _("Option -l and -c/-i/-r/-u are mutually exclusive.") ERR_FAILED_LOAD = _("Failed to load %s\n\t%s") ERR_PLACEHOLDER_DIFFERENT_REPEAT = _( "${%s} has different repeat time from others.") ERR_KEY_NOTALLOWED = _("This keyword is not allowed: %s") ERR_NOT_FOUND_ENVIRONMENT = _("Environment variable not found") ERR_WORKSPACE_NOTEXIST = _("WORKSPACE doesn't exist") ERR_SPACE_NOTALLOWED = _( "Whitespace characters are not allowed in the WORKSPACE path. ") ERR_MACRONAME_NOGIVEN = _("No MACRO name given") ERR_MACROVALUE_NOGIVEN = _("No MACRO value given") ERR_MACRONAME_INVALID = _("Incorrect MACRO name: %s") ERR_MACROVALUE_INVALID = _("Incorrect MACRO value: %s") ERR_NAME_ONLY_DEFINE = _( "This variable can only be defined via environment variable: %s") ERR_EDK_GLOBAL_SAMENAME = _( "EDK_GLOBAL defined a macro with the same name as one defined by 'DEFINE'") ERR_SECTIONNAME_INVALID = _( "An incorrect section name was found: %s. 'The correct file is '%s' .") ERR_CHECKFILE_NOTFOUND = _( "Can't find file '%s' defined in section '%s'") ERR_INVALID_NOTFOUND = _( "Incorrect statement '%s' was found in section '%s'") ERR_TEMPLATE_NOTFOUND = _("This package information data file is not found: %s") ERR_SECTION_NAME_INVALID = _('Incorrect section name: %s') ERR_SECTION_REDEFINE = _( "This section already defined: %s.") ERR_SECTION_NAME_NONE = \ _('The section needs to be specified first.') ERR_KEYWORD_INVALID = _('Invalid keyword: %s') ERR_VALUE_INVALID = _("Invalid \"%s\" value in section [%s].") ERR_FILELIST_LOCATION = _( 'The directory "%s" must contain this file: "%s".') ERR_KEYWORD_REDEFINE = _( "Keyword in this section can only be used once: %s.") ERR_FILELIST_EXIST = _( 'This file does not exist: %s.') ERR_COPYRIGHT_CONTENT = _( "The copyright content must contain the word \"Copyright\" (case insensitive).") ERR_WRONG_FILELIST_FORMAT = \ _('File list format is incorrect.' 'The correct format is: filename|key=value[|key=value]') ERR_FILELIST_ATTR = _( "The value of attribute \"%s\" includes illegal character.") ERR_UNKNOWN_FILELIST_ATTR = _( 'Unknown attribute name: %s.') ERR_EMPTY_VALUE = _("Empty value is not allowed") ERR_KEYWORD_MANDATORY = _('This keyword is mandatory: %s') ERR_BOOLEAN_VALUE = _( 'Value of key [%s] must be true or false, current: [%s]') ERR_GUID_VALUE = _( 'GUID must have the format of 8-4-4-4-12 with HEX value. ' 'Current value: [%s]') ERR_VERSION_VALUE = _( 'The value of key [%s] must be a decimal number. Found: [%s]') ERR_VERSION_XMLSPEC = _( 'XmlSpecification value must be 1.1, current: %s.') ERR_INVALID_GUID = _("Incorrect GUID value string: %s") ERR_FILE_NOT_FOUND = \ _("File or directory not found in workspace") ERR_FILE_OPEN_FAILURE = _("Could not open file") ERR_FILE_WRITE_FAILURE = _("Could not write file.") ERR_FILE_PARSE_FAILURE = _("Could not parse file") ERR_FILE_READ_FAILURE = _("Could not read file") ERR_FILE_CREATE_FAILURE = _("Could not create file") ERR_FILE_CHECKSUM_FAILURE = _("Checksum of file is incorrect") ERR_FILE_COMPRESS_FAILURE = _("File compression did not correctly") ERR_FILE_DECOMPRESS_FAILURE = \ _("File decompression did not complete correctly") ERR_FILE_MOVE_FAILURE = _("Move file did not complete successfully") ERR_FILE_DELETE_FAILURE = _("File could not be deleted") ERR_FILE_COPY_FAILURE = _("File did not copy correctly") ERR_FILE_POSITIONING_FAILURE = _("Could not find file seek position") ERR_FILE_TYPE_MISMATCH = _("Incorrect file type") ERR_FILE_CASE_MISMATCH = _("File name case mismatch") ERR_FILE_DUPLICATED = _("Duplicate file found") ERR_FILE_UNKNOWN_ERROR = _("Unknown error encountered on file") ERR_FILE_NAME_INVALIDE = _("This file name is invalid, it must not be an absolute path or " "contain a period \".\" or \"..\": %s.") ERR_OPTION_UNKNOWN = _("Unknown option") ERR_OPTION_MISSING = _("Missing option") ERR_OPTION_CONFLICT = _("Options conflict") ERR_OPTION_VALUE_INVALID = _("Invalid option value") ERR_OPTION_DEPRECATED = _("Deprecated option") ERR_OPTION_NOT_SUPPORTED = _("Unsupported option") ERR_OPTION_UNKNOWN_ERROR = _("Unknown error when processing options") ERR_PARAMETER_INVALID = _("Invalid parameter") ERR_PARAMETER_MISSING = _("Missing parameter") ERR_PARAMETER_UNKNOWN_ERROR = _("Unknown error in parameters") ERR_FORMAT_INVALID = _("Invalid syntax/format") ERR_FORMAT_NOT_SUPPORTED = _("Syntax/format not supported") ERR_FORMAT_UNKNOWN = _("Unknown format") ERR_FORMAT_UNKNOWN_ERROR = _("Unknown error in syntax/format ") ERR_RESOURCE_NOT_AVAILABLE = _("Not available") ERR_RESOURCE_ALLOCATE_FAILURE = _("A resource allocation has failed") ERR_RESOURCE_FULL = _("Full") ERR_RESOURCE_OVERFLOW = _("Overflow") ERR_RESOURCE_UNDERRUN = _("Underrun") ERR_RESOURCE_UNKNOWN_ERROR = _("Unknown error") ERR_ATTRIBUTE_NOT_AVAILABLE = _("Not available") ERR_ATTRIBUTE_RETRIEVE_FAILURE = _("Unable to retrieve") ERR_ATTRIBUTE_SET_FAILURE = _("Unable to set") ERR_ATTRIBUTE_UPDATE_FAILURE = _("Unable to update") ERR_ATTRIBUTE_ACCESS_DENIED = _("Access denied") ERR_ATTRIBUTE_UNKNOWN_ERROR = _("Unknown error when accessing") ERR_COMMAND_FAILURE = _("Unable to execute command") ERR_IO_NOT_READY = _("Not ready") ERR_IO_BUSY = _("Busy") ERR_IO_TIMEOUT = _("Timeout") ERR_IO_UNKNOWN_ERROR = _("Unknown error in IO operation") ERR_UNKNOWN_ERROR = _("Unknown error") ERR_UPT_ALREADY_INSTALLED_ERROR = _("Already installed") ERR_UPT_ENVIRON_MISSING_ERROR = _("Environ missing") ERR_UPT_REPKG_ERROR = _("File not allowed for RePackage") ERR_UPT_DB_UPDATE_ERROR = _("Update database did not complete successfully") ERR_UPT_INI_PARSE_ERROR = _("INI file parse error") ERR_COPYRIGHT_MISSING = \ _("Header comment section must have copyright information") ERR_LICENSE_MISSING = \ _("Header comment section must have license information") ERR_INVALID_BINARYHEADER_FORMAT = \ _("Binary Header comment section must have abstract,description,copyright,license information") ERR_MULTIPLE_BINARYHEADER_EXIST = \ _("the inf file at most support one BinaryHeader at the fileheader section.") ERR_INVALID_COMMENT_FORMAT = _("Comment must start with #") ERR_USER_ABORT = _("User has stopped the application") ERR_DIST_EXT_ERROR = \ _("Distribution file extension should be '.dist'. Current given: '%s'.") ERR_DIST_FILENAME_ONLY_FOR_REMOVE = \ _("Only distribution filename without path allowed during remove. Current given: '%s'.") ERR_NOT_STANDALONE_MODULE_ERROR = \ _("Module %s is not a standalone module (found in Package %s)") ERR_UPT_ALREADY_RUNNING_ERROR = \ _("UPT is already running, only one instance is allowed") ERR_MUL_DEC_ERROR = _("Multiple DEC files found within one package directory tree %s: %s, %s") ERR_INSTALL_FILE_FROM_EMPTY_CONTENT = _("Error file to be installed is not found in content file: %s") ERR_INSTALL_FILE_DEC_FILE_ERROR = _("Could not obtain the TokenSpaceGuidCName and the PcdCName from the DEC files " "that the package depends on for this pcd entry: TokenValue: %s Token: %s") ERR_NOT_SUPPORTED_SA_MODULE = _("Stand-alone module distribution does not allow EDK 1 INF") ERR_INSTALL_DIST_NOT_FOUND = \ _("Distribution file to be installed is not found in current working directory or workspace: %s") ERR_REPLACE_DIST_NOT_FOUND = \ _("Distribution file for replace function was not found in the current working directory or workspace: %s") ERR_DIST_FILENAME_ONLY_FOR_REPLACE_ORIG = \ _("Only a distribution file name without a path is allowed for " "the distribution to be replaced during replace. Current given: '%s'.") ERR_UNIPARSE_DBLQUOTE_UNMATCHED = \ _("Only Language entry can contain a couple of matched quote in one line") ERR_UNIPARSE_NO_SECTION_EXIST = _("No PackageDef or ModuleDef section exists in the UNI file.") ERR_UNIPARSE_STRNAME_FORMAT_ERROR = _("The String Token Name %s must start with \"STR_\"") ERR_UNIPARSE_SEP_LANGENTRY_LINE = _("Each <LangEntry> should be in a separate line :%s.") ERR_UNIPARSE_MULTI_ENTRY_EXIST = \ _("There are same entries : %s in the UNI file, every kind of entry should be only one.") ERR_UNIPARSE_ENTRY_ORDER_WRONG = \ _("The string entry order in UNI file should be <AbstractStrings>, <DescriptionStrings>, \ <BinaryAbstractStrings>, <BinaryDescriptionStrings>.") ERR_UNIPARSE_STRTOKEN_FORMAT_ERROR = _("The String Token Type %s must be one of the '_PROMPT', '_HELP' and '_ERR_'.") ERR_UNIPARSE_LINEFEED_UNDER_EXIST = _("Line feed should not exist under this line: %s.") ERR_UNIPARSE_LINEFEED_UP_EXIST = _("Line feed should not exist up this line: %s.") ERR_UNI_MISS_STRING_ENTRY = _("String entry missed in this Entry, %s.") ERR_UNI_MISS_LANGENTRY = _("Language entry missed in this Entry, %s.") ERR_BINARY_HEADER_ORDER = _("Binary header must follow the file header.") ERR_NO_SOURCE_HEADER = _("File header statement \"## @file\" must exist at the first place.") ERR_UNI_FILE_SUFFIX_WRONG = _("The UNI file must have an extension of '.uni', '.UNI' or '.Uni'") ERR_UNI_FILE_NAME_INVALID = _("The use of '..', '../' and './' in the UNI file is prohibited.") ERR_UNI_SUBGUID_VALUE_DEFINE_DEC_NOT_FOUND = _("There are no DEC file to define the GUID value for \ this GUID CName: '%s'.") # # Expression error message # ERR_EXPR_RIGHT_PAREN = \ _('Missing ")" in expression "%s".') ERR_EXPR_FACTOR = \ _('"%s" is expected to be HEX, integer, macro, quoted string or PcdName in ' 'expression "%s".') ERR_EXPR_STRING_ITEM = \ _('"%s" is expected to be HEX, integer, macro, quoted string or PcdName in ' 'expression [%s].') ERR_EXPR_EQUALITY = \ _('"%s" is expected to be ==, EQ, != or NE in expression "%s".') ERR_EXPR_BOOLEAN = \ _('The string "%s" in expression "%s" can not be recognized as a part of the logical expression.') ERR_EXPR_EMPTY = _('Boolean value cannot be empty.') ERR_EXPRESS_EMPTY = _('Expression can not be empty.') ERR_EXPR_LOGICAL = \ _('The following is not a valid logical expression: "%s".') ERR_EXPR_OR = _('The expression: "%s" must be encapsulated in open "(" and close ")" ' 'parenthesis when using | or ||.') ERR_EXPR_RANGE = \ _('The following is not a valid range expression: "%s".') ERR_EXPR_RANGE_FACTOR = \ _('"%s" is expected to be HEX, integer in valid range expression "%s".') ERR_EXPR_RANGE_DOUBLE_PAREN_NESTED = \ _('Double parentheses nested is not allowed in valid range expression: "%s".') ERR_EXPR_RANGE_EMPTY = _('Valid range can not be empty.') ERR_EXPR_LIST_EMPTY = _('Valid list can not be empty.') ERR_PAREN_NOT_USED = _('Parenthesis must be used on both sides of "OR", "AND" in valid range : %s.') ERR_EXPR_LIST = \ _('The following is not a valid list expression: "%s".') # DEC parser error message # ERR_DECPARSE_STATEMENT_EMPTY = \ _('Must have at least one statement in section %s.') ERR_DECPARSE_DEFINE_DEFINED = \ _('%s already defined in define section.') ERR_DECPARSE_DEFINE_SECNAME = \ _('No arch and others can be followed for define section.') ERR_DECPARSE_DEFINE_MULTISEC = \ _('The DEC file does not allow multiple define sections.') ERR_DECPARSE_DEFINE_REQUIRED = \ _("Field [%s] is required in define section.") ERR_DECPARSE_DEFINE_FORMAT = \ _("Wrong define section format, must be KEY = Value.") ERR_DECPARSE_DEFINE_UNKNOWKEY = \ _("Unknown key [%s] in define section.") ERR_DECPARSE_DEFINE_SPEC = \ _("Specification value must be HEX numbers or decimal numbers.") ERR_DECPARSE_DEFINE_PKGNAME = \ _("Package name must be AlphaNumeric characters.") ERR_DECPARSE_DEFINE_PKGGUID = \ _("GUID format error, must be HEX value with form 8-4-4-4-12.") ERR_DECPARSE_DEFINE_PKGVERSION = \ _("Version number must be decimal number.") ERR_DECPARSE_DEFINE_PKGVUNI = \ _("UNI file name format error or file does not exist.") ERR_DECPARSE_INCLUDE = \ _("Incorrect path: [%s].") ERR_DECPARSE_LIBCLASS_SPLIT = \ _("Library class format error, must be Libraryclass|Headerpath.") ERR_DECPARSE_LIBCLASS_EMPTY = \ _("Class name or file name must not be empty.") ERR_DECPARSE_LIBCLASS_LIB = \ _("Class name format error, must start with upper case letter followed with " "zero or more alphanumeric characters.") ERR_DECPARSE_LIBCLASS_PATH_EXT = _("File name must be end with .h.") ERR_DECPARSE_LIBCLASS_PATH_DOT = _("Path must not include '..'.") ERR_DECPARSE_LIBCLASS_PATH_EXIST = _("File name [%s] does not exist.") ERR_DECPARSE_PCD_CVAR_GUID = \ _("TokenSpaceGuidCName must be valid C variable format.") ERR_DECPARSE_PCD_SPLIT = \ _("Incorrect PcdName. The format must be TokenSpaceGuidCName.PcdCName" "|PcdData|PcdType|Token.") ERR_DECPARSE_PCD_NAME = \ _("Incorrect PCD name. The correct format must be " "<TokenSpaceGuidCName>.<PcdCName>.") ERR_DECPARSE_PCD_CVAR_PCDCNAME = \ _("PcdCName must be valid C variable format.") ERR_DECPARSE_PCD_TYPE = \ _('Incorrect PCD data type. A PCD data type must be one of ' '"UINT8", "UINT16", "UINT32", "UINT64", "VOID*", "BOOLEAN".') ERR_DECPARSE_PCD_VOID = \ _("Incorrect value [%s] of type [%s]. Value must be printable and in the " "form of{...} for array, or ""..."" for string, or L""...""" "for unicode string.") ERR_DECPARSE_PCD_VALUE_EMPTY = \ _("Pcd value can not be empty.") ERR_DECPARSE_PCD_BOOL = \ _("Invalid value [%s] of type [%s]; must be expression, TRUE, FALSE, 0 or 1.") ERR_DECPARSE_PCD_INT = _("Incorrect value [%s] of type [%s]."\ " Value must be a hexadecimal, decimal or octal in C language format.") ERR_DECPARSE_PCD_INT_NEGTIVE = _("Incorrect value [%s] of type [%s];" " must not be signed number.") ERR_DECPARSE_PCD_INT_EXCEED = _("Incorrect value [%s] of type [%s]; " "the number is too long for this type.") ERR_DECPARSE_PCD_FEATUREFLAG = \ _("PcdFeatureFlag only allow BOOLEAN type.") ERR_DECPARSE_PCD_TOKEN = \ _("An incorrect PCD token found: [%s]. " "It must start with 0x followed by 1 - 8 hexadecimal. ") ERR_DECPARSE_PCD_TOKEN_INT = _("Incorrect token number [%s]. " "This token number exceeds the maximal value of unsigned 32.") ERR_DECPARSE_PCD_TOKEN_UNIQUE = _("Token number must be unique to the token space: %s.") ERR_DECPARSE_CGUID = \ _("No GUID name or value specified, must be <CName> = <GuidValueInCFormat>.") ERR_DECPARSE_CGUID_NAME = \ _("No GUID name specified, must be <CName> = <GuidValueInCFormat>.") ERR_DECPARSE_CGUID_GUID = \ _("No GUID value specified, must be <CName> = <GuidValueInCFormat>.") ERR_DECPARSE_CGUID_GUIDFORMAT = \ _("Incorrect GUID value format, must be <GuidValueInCFormat:" "{8,4,4,{2,2,2,2,2,2,2,2}}>.") ERR_DECPARSE_CGUID_NOT_FOUND = _("Unable to find the GUID value of this GUID CName : '%s'.") ERR_DECPARSE_FILEOPEN = _("Unable to open: [%s].") ERR_DECPARSE_SECTION_EMPTY = _("Empty sections are not allowed.") ERR_DECPARSE_SECTION_UE = _("Incorrect UserExtensions format. " "Must be UserExtenxions.UserId.IdString[.Arch]+.") ERR_DECPARSE_SECTION_UE_USERID = _("Invalid UserId, must be underscore" "or alphanumeric characters.") ERR_DECPARSE_SECTION_UE_IDSTRING = \ _("Incorrect IdString, must be \" ... \".") ERR_DECPARSE_ARCH = \ _("Unknown arch, must be 'common' or start with upper case letter followed by" " zero or more upper case letters and numbers.") ERR_DECPARSE_SECTION_COMMA = _("Section cannot end with comma.") ERR_DECPARSE_SECTION_COMMON = \ _("'COMMON' must not be used with specific ARCHs in the same section.") ERR_DECPARSE_SECTION_IDENTIFY = \ _("Section header must start with and end with brackets[].") ERR_DECPARSE_SECTION_SUBEMPTY = \ _("Missing a sub-section name in section: [%s]. " "All sub-sections need to have names. ") ERR_DECPARSE_SECTION_SUBTOOMANY = _("Too many DOT splits in [%s].") ERR_DECPARSE_SECTION_UNKNOW = _("Section name [%s] unknown.") ERR_DECPARSE_SECTION_FEATUREFLAG = \ _("[%s] must not be in the same section as other types of PCD.") ERR_DECPARSE_MACRO_PAIR = _("No macro name/value given.") ERR_DECPARSE_MACRO_NAME = _("No macro name given.") ERR_DECPARSE_MACRO_NAME_UPPER = \ _("Macro name must start with upper case letter followed " "by zero or more upper case letters or numbers. Current macro name is: [%s].") ERR_DECPARSE_SECTION_NAME = \ _('Cannot mix different section names %s.') ERR_DECPARSE_BACKSLASH = \ _('Backslash must be the last character on a line and ' 'preceded by a space character.') ERR_DECPARSE_BACKSLASH_EMPTY = \ _('Empty line after previous line that has backslash is not allowed.') ERR_DECPARSE_REDEFINE = _( "\"%s\" already defined in line %d.") ERR_DECPARSE_MACRO_RESOLVE = _("Macro %s in %s cannot be resolved.") ERR_DECPARSE_UE_DUPLICATE = \ _("Duplicated UserExtensions header found.") ERR_DECPARSE_PCDERRORMSG_MISS_VALUE_SPLIT = \ _("Missing '|' between Pcd's error code and Pcd's error message.") ERR_DECPARSE_PCD_MISS_ERRORMSG = \ _("Missing Pcd's error message.") ERR_DECPARSE_PCD_UNMATCHED_ERRORCODE = \ _("There is no error message matched with this Pcd error code : %s in both DEC and UNI file.") ERR_DECPARSE_PCD_NODEFINED = _("The PCD : %s used in the Expression is undefined.") # # Used to print the current line content which cause error raise. # Be attached to the end of every error message above. # ERR_DECPARSE_LINE = _(" Parsing line: \"%s\".") # # Warning related strings. # WRN_PACKAGE_EXISTED = _( "A package with this GUID and Version already exists: " "GUID %s, Version %s.") WRN_MODULE_EXISTED = _("This module already exists: %s") WRN_FILE_EXISTED = _("This file already exists: %s") WRN_FILE_NOT_OVERWRITTEN = \ _("This file already exist and cannot be overwritten: %s") WRN_DIST_PKG_INSTALLED = _("This distribution package %s has previously been installed.") WRN_DIST_NOT_FOUND = _( "Distribution is not found at location %s") WRN_MULTI_PCD_RANGES = _( "A PCD can only have one type of @ValidRange, @ValidList, and @Expression comment") WRN_MULTI_PCD_VALIDVALUE = _( "A PCD can only have one of @ValidList comment") WRN_MULTI_PCD_PROMPT = _( "A PCD can only have one of @Prompt comment") WRN_MISSING_USAGE = _("Missing usage") WRN_INVALID_GUID_TYPE = _("This is and incorrect Guid type: %s") WRN_MISSING_GUID_TYPE = _("Missing Guid Type") WRN_INVALID_USAGE = _("This is an incorrect Usage: %s") WRN_INF_PARSER_MODULE_INVALID_HOB_TYPE = \ _("This is an incorrect HOB type: %s") WRN_INF_PARSER_MODULE_INVALID_EVENT_TYPE = \ _("This is an incorrect EVENT type: %s") WRN_INF_PARSER_MODULE_INVALID_BOOTMODE_TYPE = \ _("This is an incorrect BOOTMODE type: %s") WRN_INVALID_MODULE_TYPE = \ _("This is an incorrect Module type: %s") WRN_MODULE_PARSE_FAILED = \ _("Parsing of this module did not complete correctly: %s.") WRN_EDK1_INF_FOUND = \ _("EDK 1 module file found: %s") WRN_INVALID_COPYRIGHT = \ _("Copyright information is not right") WARN_SPECIAL_SECTION_LOCATION_WRONG = _("Warning. A special section should be " "at the end of a file or at the end of a section.") WARN_INSTALLED_PACKAGE_NOT_FOUND = \ _("File not found. The DEC file for a package cannot be found in GUID/Version/Install path: %s %s %s") WARN_CUSTOMPATH_OVERRIDE_USEGUIDEDPATH = \ _("option selection of --custom-path will override the option --use-guided-paths") # # Help related strings. # HLP_PRINT_DEBUG_INFO = _( "Print DEBUG statements, where DEBUG_LEVEL is 0-9") HLP_PRINT_INFORMATIONAL_STATEMENT = _("Print informational statements") HLP_RETURN_NO_DISPLAY = _( "Returns only the exit code, informational and error messages are" " not displayed") HLP_RETURN_AND_DISPLAY = _( "Returns the exit code and displays error messages only") HLP_SPECIFY_PACKAGE_NAME_INSTALL = _( "Specify the UEFI Distribution Package filename to install") HLP_SPECIFY_PACKAGE_NAME_CREATE = _( "Specify the UEFI Distribution Package filename to create") HLP_SPECIFY_PACKAGE_NAME_REMOVE = _( "Specify the UEFI Distribution Package filename to remove") HLP_SPECIFY_TEMPLATE_NAME_CREATE = _( "Specify Package Information Data filename to create package") HLP_SPECIFY_DEC_NAME_CREATE = _( "Specify dec file names to create package") HLP_SPECIFY_INF_NAME_CREATE = _( "Specify inf file names to create package") HLP_LIST_DIST_INSTALLED = _( "List the UEFI Distribution Packages that have been installed") HLP_NO_SUPPORT_GUI = _( "Starting the tool in graphical mode is not supported in this version") HLP_DISABLE_PROMPT = _( "Disable user prompts for removing modified files. Valid only when -r is present") HLP_CUSTOM_PATH_PROMPT = _( "Enable user prompting for alternate installation directories") HLP_SKIP_LOCK_CHECK = _( "Skip the check for multiple instances") HLP_SPECIFY_PACKAGE_NAME_REPLACE = _( "Specify the UEFI Distribution Package file name to replace the existing file name") HLP_SPECIFY_PACKAGE_NAME_TO_BE_REPLACED = _( "Specify the UEFI Distribution Package file name to be replaced") HLP_USE_GUIDED_PATHS = _( "Install packages to the following directory path by default: <PackageName>_<PACKAGE_GUID>_<PACKAGE_VERSION>") HLP_TEST_INSTALL = _( "Specify the UEFI Distribution Package filenames to install") MSG_TEST_INSTALL_PASS = _("All distribution package file are satisfied for dependence check.") MSG_TEST_INSTALL_FAIL = _("NOT all distribution package file are satisfied for dependence check.")
apache-2.0
kitanata/resume
env/lib/python2.7/site-packages/xhtml2pdf/xhtml2pdf_reportlab.py
44
32291
# -*- coding: utf-8 -*- # Copyright 2010 Dirk Holtwick, holtwick.it # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from hashlib import md5 from reportlab.lib.enums import TA_RIGHT from reportlab.lib.styles import ParagraphStyle from reportlab.lib.utils import flatten, open_for_read, getStringIO, \ LazyImageReader, haveImages from reportlab.platypus.doctemplate import BaseDocTemplate, PageTemplate, IndexingFlowable from reportlab.platypus.flowables import Flowable, CondPageBreak, \ KeepInFrame, ParagraphAndImage from reportlab.platypus.tableofcontents import TableOfContents from reportlab.platypus.tables import Table, TableStyle from xhtml2pdf.reportlab_paragraph import Paragraph from xhtml2pdf.util import getUID, getBorderStyle from types import StringType, TupleType, ListType, IntType import StringIO import cgi import copy import logging import reportlab.pdfbase.pdfform as pdfform import sys try: import PIL.Image as PILImage except: try: import Image as PILImage except: PILImage = None log = logging.getLogger("xhtml2pdf") MAX_IMAGE_RATIO = 0.95 class PTCycle(list): def __init__(self): self._restart = 0 self._idx = 0 list.__init__(self) def cyclicIterator(self): while 1: yield self[self._idx] self._idx += 1 if self._idx >= len(self): self._idx = self._restart class PmlMaxHeightMixIn: def setMaxHeight(self, availHeight): self.availHeightValue = availHeight if availHeight < 70000: if hasattr(self, "canv"): if not hasattr(self.canv, "maxAvailHeightValue"): self.canv.maxAvailHeightValue = 0 self.availHeightValue = self.canv.maxAvailHeightValue = max( availHeight, self.canv.maxAvailHeightValue) else: self.availHeightValue = availHeight if not hasattr(self, "availHeightValue"): self.availHeightValue = 0 return self.availHeightValue def getMaxHeight(self): if not hasattr(self, "availHeightValue"): return 0 return self.availHeightValue class PmlBaseDoc(BaseDocTemplate): """ We use our own document template to get access to the canvas and set some informations once. """ def beforePage(self): # Tricky way to set producer, because of not real privateness in Python info = "pisa HTML to PDF <http://www.htmltopdf.org>" self.canv._doc.info.producer = info ''' # Convert to ASCII because there is a Bug in Reportlab not # supporting other than ASCII. Send to list on 23.1.2007 author = toString(self.pml_data.get("author", "")).encode("ascii","ignore") subject = toString(self.pml_data.get("subject", "")).encode("ascii","ignore") title = toString(self.pml_data.get("title", "")).encode("ascii","ignore") # print repr((author,title,subject)) self.canv.setAuthor(author) self.canv.setSubject(subject) self.canv.setTitle(title) if self.pml_data.get("fullscreen", 0): self.canv.showFullScreen0() if self.pml_data.get("showoutline", 0): self.canv.showOutline() if self.pml_data.get("duration", None) is not None: self.canv.setPageDuration(self.pml_data["duration"]) ''' def afterFlowable(self, flowable): # Does the flowable contain fragments? if getattr(flowable, "outline", False): self.notify('TOCEntry', ( flowable.outlineLevel, cgi.escape(copy.deepcopy(flowable.text), 1), self.page)) def handle_nextPageTemplate(self, pt): ''' if pt has also templates for even and odd page convert it to list ''' has_left_template = self._has_template_for_name(pt + '_left') has_right_template = self._has_template_for_name(pt + '_right') if has_left_template and has_right_template: pt = [pt + '_left', pt + '_right'] '''On endPage change to the page template with name or index pt''' if type(pt) is StringType: if hasattr(self, '_nextPageTemplateCycle'): del self._nextPageTemplateCycle for t in self.pageTemplates: if t.id == pt: self._nextPageTemplateIndex = self.pageTemplates.index(t) return raise ValueError("can't find template('%s')" % pt) elif type(pt) is IntType: if hasattr(self, '_nextPageTemplateCycle'): del self._nextPageTemplateCycle self._nextPageTemplateIndex = pt elif type(pt) in (ListType, TupleType): #used for alternating left/right pages #collect the refs to the template objects, complain if any are bad c = PTCycle() for ptn in pt: #special case name used to short circuit the iteration if ptn == '*': c._restart = len(c) continue for t in self.pageTemplates: if t.id == ptn.strip(): c.append(t) break if not c: raise ValueError("No valid page templates in cycle") elif c._restart > len(c): raise ValueError("Invalid cycle restart position") #ensure we start on the first one$ self._nextPageTemplateCycle = c.cyclicIterator() else: raise TypeError("Argument pt should be string or integer or list") def _has_template_for_name(self, name): for template in self.pageTemplates: if template.id == name.strip(): return True return False class PmlPageTemplate(PageTemplate): PORTRAIT = 'portrait' LANDSCAPE = 'landscape' # by default portrait pageorientation = PORTRAIT def __init__(self, **kw): self.pisaStaticList = [] self.pisaBackgroundList = [] self.pisaBackground = None PageTemplate.__init__(self, **kw) self._page_count = 0 self._first_flow = True def isFirstFlow(self, canvas): if self._first_flow: if canvas.getPageNumber() <= self._page_count: self._first_flow = False else: self._page_count = canvas.getPageNumber() canvas._doctemplate._page_count = canvas.getPageNumber() return self._first_flow def isPortrait(self): return self.pageorientation == self.PORTRAIT def isLandscape(self): return self.pageorientation == self.LANDSCAPE def beforeDrawPage(self, canvas, doc): canvas.saveState() try: # Background pisaBackground = None if (self.isFirstFlow(canvas) and hasattr(self, "pisaBackground") and self.pisaBackground and (not self.pisaBackground.notFound())): # Is image not PDF if self.pisaBackground.mimetype.startswith("image/"): try: img = PmlImageReader(StringIO.StringIO(self.pisaBackground.getData())) iw, ih = img.getSize() pw, ph = canvas._pagesize width = pw # min(iw, pw) # max wfactor = float(width) / iw height = ph # min(ih, ph) # max hfactor = float(height) / ih factor_min = min(wfactor, hfactor) if self.isPortrait(): w = iw * factor_min h = ih * factor_min canvas.drawImage(img, 0, ph - h, w, h) elif self.isLandscape(): factor_max = max(wfactor, hfactor) w = ih * factor_max h = iw * factor_min canvas.drawImage(img, 0, 0, w, h) except: log.exception("Draw background") # PDF! else: pisaBackground = self.pisaBackground if pisaBackground: self.pisaBackgroundList.append(pisaBackground) def pageNumbering(objList): for obj in flatten(objList): if isinstance(obj, PmlParagraph): for frag in obj.frags: if frag.pageNumber: frag.text = str(pagenumber) elif frag.pageCount: frag.text = str(canvas._doctemplate._page_count) elif isinstance(obj, PmlTable): # Flatten the cells ([[1,2], [3,4]] becomes [1,2,3,4]) flat_cells = [item for sublist in obj._cellvalues for item in sublist] pageNumbering(flat_cells) try: # Paint static frames pagenumber = canvas.getPageNumber() for frame in self.pisaStaticList: frame = copy.deepcopy(frame) story = frame.pisaStaticStory pageNumbering(story) frame.addFromList(story, canvas) except Exception: # TODO: Kill this! log.debug("PmlPageTemplate", exc_info=1) finally: canvas.restoreState() _ctr = 1 class PmlImageReader(object): # TODO We need a factory here, returning either a class for java or a class for PIL """ Wraps up either PIL or Java to get data from bitmaps """ _cache = {} def __init__(self, fileName): if isinstance(fileName, PmlImageReader): self.__dict__ = fileName.__dict__ # borgize return #start wih lots of null private fields, to be populated by #the relevant engine. self.fileName = fileName self._image = None self._width = None self._height = None self._transparent = None self._data = None imageReaderFlags = 0 if PILImage and isinstance(fileName, PILImage.Image): self._image = fileName self.fp = getattr(fileName, 'fp', None) try: self.fileName = self._image.fileName except AttributeError: self.fileName = 'PILIMAGE_%d' % id(self) else: try: self.fp = open_for_read(fileName, 'b') if isinstance(self.fp, StringIO.StringIO().__class__): imageReaderFlags = 0 # avoid messing with already internal files if imageReaderFlags > 0: # interning data = self.fp.read() if imageReaderFlags & 2: # autoclose try: self.fp.close() except: pass if imageReaderFlags & 4: # cache the data if not self._cache: from rl_config import register_reset register_reset(self._cache.clear) data = self._cache.setdefault(md5(data).digest(), data) self.fp = getStringIO(data) elif imageReaderFlags == - 1 and isinstance(fileName, (str, unicode)): #try Ralf Schmitt's re-opening technique of avoiding too many open files self.fp.close() del self.fp # will become a property in the next statement self.__class__ = LazyImageReader if haveImages: #detect which library we are using and open the image if not self._image: self._image = self._read_image(self.fp) if getattr(self._image, 'format', None) == 'JPEG': self.jpeg_fh = self._jpeg_fh else: from reportlab.pdfbase.pdfutils import readJPEGInfo try: self._width, self._height, c = readJPEGInfo(self.fp) except: raise RuntimeError('Imaging Library not available, unable to import bitmaps only jpegs') self.jpeg_fh = self._jpeg_fh self._data = self.fp.read() self._dataA = None self.fp.seek(0) except: # TODO: Kill the catch-all et, ev, tb = sys.exc_info() if hasattr(ev, 'args'): a = str(ev.args[- 1]) + (' fileName=%r' % fileName) ev.args = ev.args[: - 1] + (a,) raise et, ev, tb else: raise def _read_image(self, fp): if sys.platform[0:4] == 'java': from javax.imageio import ImageIO from java.io import ByteArrayInputStream input_stream = ByteArrayInputStream(fp.read()) return ImageIO.read(input_stream) elif PILImage: return PILImage.open(fp) def _jpeg_fh(self): fp = self.fp fp.seek(0) return fp def jpeg_fh(self): return None def getSize(self): if self._width is None or self._height is None: if sys.platform[0:4] == 'java': self._width = self._image.getWidth() self._height = self._image.getHeight() else: self._width, self._height = self._image.size return self._width, self._height def getRGBData(self): "Return byte array of RGB data as string" if self._data is None: self._dataA = None if sys.platform[0:4] == 'java': import jarray # TODO: Move to top. from java.awt.image import PixelGrabber width, height = self.getSize() buffer = jarray.zeros(width * height, 'i') pg = PixelGrabber(self._image, 0, 0, width, height, buffer, 0, width) pg.grabPixels() # there must be a way to do this with a cast not a byte-level loop, # I just haven't found it yet... pixels = [] a = pixels.append for rgb in buffer: a(chr((rgb >> 16) & 0xff)) a(chr((rgb >> 8) & 0xff)) a(chr(rgb & 0xff)) self._data = ''.join(pixels) self.mode = 'RGB' else: im = self._image mode = self.mode = im.mode if mode == 'RGBA': im.load() self._dataA = PmlImageReader(im.split()[3]) im = im.convert('RGB') self.mode = 'RGB' elif mode not in ('L', 'RGB', 'CMYK'): im = im.convert('RGB') self.mode = 'RGB' self._data = im.tobytes() return self._data def getImageData(self): width, height = self.getSize() return width, height, self.getRGBData() def getTransparent(self): if sys.platform[0:4] == 'java': return None elif "transparency" in self._image.info: transparency = self._image.info["transparency"] * 3 palette = self._image.palette if hasattr(palette, 'palette'): palette = palette.palette elif hasattr(palette, 'data'): palette = palette.data else: return None # 8-bit PNGs could give an empty string as transparency value, so # we have to be careful here. try: return map(ord, palette[transparency:transparency + 3]) except: return None else: return None def __str__(self): try: fn = self.fileName.read() if not fn: fn = id(self) return "PmlImageObject_%s" % hash(fn) except: fn = self.fileName if not fn: fn = id(self) return fn class PmlImage(Flowable, PmlMaxHeightMixIn): def __init__(self, data, width=None, height=None, mask="auto", mimetype=None, **kw): self.kw = kw self.hAlign = 'CENTER' self._mask = mask self._imgdata = data # print "###", repr(data) self.mimetype = mimetype img = self.getImage() if img: self.imageWidth, self.imageHeight = img.getSize() self.drawWidth = width or self.imageWidth self.drawHeight = height or self.imageHeight def wrap(self, availWidth, availHeight): " This can be called more than once! Do not overwrite important data like drawWidth " availHeight = self.setMaxHeight(availHeight) # print "image wrap", id(self), availWidth, availHeight, self.drawWidth, self.drawHeight width = min(self.drawWidth, availWidth) wfactor = float(width) / self.drawWidth height = min(self.drawHeight, availHeight * MAX_IMAGE_RATIO) hfactor = float(height) / self.drawHeight factor = min(wfactor, hfactor) self.dWidth = self.drawWidth * factor self.dHeight = self.drawHeight * factor # print "imgage result", factor, self.dWidth, self.dHeight return self.dWidth, self.dHeight def getImage(self): img = PmlImageReader(StringIO.StringIO(self._imgdata)) return img def draw(self): img = self.getImage() self.canv.drawImage( img, 0, 0, self.dWidth, self.dHeight, mask=self._mask) def identity(self, maxLen=None): r = Flowable.identity(self, maxLen) return r class PmlParagraphAndImage(ParagraphAndImage, PmlMaxHeightMixIn): def wrap(self, availWidth, availHeight): self.I.canv = self.canv result = ParagraphAndImage.wrap(self, availWidth, availHeight) del self.I.canv return result def split(self, availWidth, availHeight): # print "# split", id(self) if not hasattr(self, "wI"): self.wI, self.hI = self.I.wrap(availWidth, availHeight) # drawWidth, self.I.drawHeight return ParagraphAndImage.split(self, availWidth, availHeight) class PmlParagraph(Paragraph, PmlMaxHeightMixIn): def _calcImageMaxSizes(self, availWidth, availHeight): self.hasImages = False for frag in self.frags: if hasattr(frag, "cbDefn") and frag.cbDefn.kind == "img": img = frag.cbDefn if img.width > 0 and img.height > 0: self.hasImages = True width = min(img.width, availWidth) wfactor = float(width) / img.width height = min(img.height, availHeight * MAX_IMAGE_RATIO) # XXX 99% because 100% do not work... hfactor = float(height) / img.height factor = min(wfactor, hfactor) img.height *= factor img.width *= factor def wrap(self, availWidth, availHeight): availHeight = self.setMaxHeight(availHeight) style = self.style self.deltaWidth = style.paddingLeft + style.paddingRight + style.borderLeftWidth + style.borderRightWidth self.deltaHeight = style.paddingTop + style.paddingBottom + style.borderTopWidth + style.borderBottomWidth # reduce the available width & height by the padding so the wrapping # will use the correct size availWidth -= self.deltaWidth availHeight -= self.deltaHeight # Modify maxium image sizes self._calcImageMaxSizes(availWidth, availHeight) # call the base class to do wrapping and calculate the size Paragraph.wrap(self, availWidth, availHeight) #self.height = max(1, self.height) #self.width = max(1, self.width) # increase the calculated size by the padding self.width = self.width + self.deltaWidth self.height = self.height + self.deltaHeight return self.width, self.height def split(self, availWidth, availHeight): if len(self.frags) <= 0: return [] #the split information is all inside self.blPara if not hasattr(self, 'deltaWidth'): self.wrap(availWidth, availHeight) availWidth -= self.deltaWidth availHeight -= self.deltaHeight return Paragraph.split(self, availWidth, availHeight) def draw(self): # Create outline if getattr(self, "outline", False): # Check level and add all levels last = getattr(self.canv, "outlineLast", - 1) + 1 while last < self.outlineLevel: # print "(OUTLINE", last, self.text key = getUID() self.canv.bookmarkPage(key) self.canv.addOutlineEntry( self.text, key, last, not self.outlineOpen) last += 1 self.canv.outlineLast = self.outlineLevel key = getUID() self.canv.bookmarkPage(key) self.canv.addOutlineEntry( self.text, key, self.outlineLevel, not self.outlineOpen) last += 1 # Draw the background and borders here before passing control on to # ReportLab. This is because ReportLab can't handle the individual # components of the border independently. This will also let us # support more border styles eventually. canvas = self.canv style = self.style bg = style.backColor leftIndent = style.leftIndent bp = 0 # style.borderPadding x = leftIndent - bp y = - bp w = self.width - (leftIndent + style.rightIndent) + 2 * bp h = self.height + 2 * bp if bg: # draw a filled rectangle (with no stroke) using bg color canvas.saveState() canvas.setFillColor(bg) canvas.rect(x, y, w, h, fill=1, stroke=0) canvas.restoreState() # we need to hide the bg color (if any) so Paragraph won't try to draw it again style.backColor = None # offset the origin to compensate for the padding canvas.saveState() canvas.translate( (style.paddingLeft + style.borderLeftWidth), -1 * (style.paddingTop + style.borderTopWidth)) # + (style.leading / 4))) # Call the base class draw method to finish up Paragraph.draw(self) canvas.restoreState() # Reset color because we need it again if we run 2-PASS like we # do when using TOC style.backColor = bg canvas.saveState() def _drawBorderLine(bstyle, width, color, x1, y1, x2, y2): # We need width and border style to be able to draw a border if width and getBorderStyle(bstyle): # If no color for border is given, the text color is used (like defined by W3C) if color is None: color = style.textColor # print "Border", bstyle, width, color if color is not None: canvas.setStrokeColor(color) canvas.setLineWidth(width) canvas.line(x1, y1, x2, y2) _drawBorderLine(style.borderLeftStyle, style.borderLeftWidth, style.borderLeftColor, x, y, x, y + h) _drawBorderLine(style.borderRightStyle, style.borderRightWidth, style.borderRightColor, x + w, y, x + w, y + h) _drawBorderLine(style.borderTopStyle, style.borderTopWidth, style.borderTopColor, x, y + h, x + w, y + h) _drawBorderLine(style.borderBottomStyle, style.borderBottomWidth, style.borderBottomColor, x, y, x + w, y) canvas.restoreState() class PmlKeepInFrame(KeepInFrame, PmlMaxHeightMixIn): def wrap(self, availWidth, availHeight): availWidth = max(availWidth, 1.0) availHeight = max(availHeight, 1.0) self.maxWidth = availWidth self.maxHeight = self.setMaxHeight(availHeight) return KeepInFrame.wrap(self, availWidth, availHeight) class PmlTable(Table, PmlMaxHeightMixIn): def _normWidth(self, w, maxw): """ Helper for calculating percentages """ if type(w) == type(""): w = ((maxw / 100.0) * float(w[: - 1])) elif (w is None) or (w == "*"): w = maxw return min(w, maxw) def _listCellGeom(self, V, w, s, W=None, H=None, aH=72000): # print "#", self.availHeightValue if aH == 72000: aH = self.getMaxHeight() or aH return Table._listCellGeom(self, V, w, s, W=W, H=H, aH=aH) def wrap(self, availWidth, availHeight): self.setMaxHeight(availHeight) # Strange bug, sometime the totalWidth is not set !? try: self.totalWidth except: self.totalWidth = availWidth # Prepare values totalWidth = self._normWidth(self.totalWidth, availWidth) remainingWidth = totalWidth remainingCols = 0 newColWidths = self._colWidths # Calculate widths that are fix # IMPORTANT!!! We can not substitute the private value # self._colWidths therefore we have to modify list in place for i, colWidth in enumerate(newColWidths): if (colWidth is not None) or (colWidth == '*'): colWidth = self._normWidth(colWidth, totalWidth) remainingWidth -= colWidth else: remainingCols += 1 colWidth = None newColWidths[i] = colWidth # Distribute remaining space minCellWidth = totalWidth * 0.01 if remainingCols > 0: for i, colWidth in enumerate(newColWidths): if colWidth is None: newColWidths[i] = max(minCellWidth, remainingWidth / remainingCols) # - 0.1 # Bigger than totalWidth? Lets reduce the fix entries propotionally if sum(newColWidths) > totalWidth: quotient = totalWidth / sum(newColWidths) for i in range(len(newColWidths)): newColWidths[i] = newColWidths[i] * quotient # To avoid rounding errors adjust one col with the difference diff = sum(newColWidths) - totalWidth if diff > 0: newColWidths[0] -= diff return Table.wrap(self, availWidth, availHeight) class PmlPageCount(IndexingFlowable): def __init__(self): IndexingFlowable.__init__(self) self.second_round = False def isSatisfied(self): s = self.second_round self.second_round = True return s def drawOn(self, canvas, x, y, _sW=0): pass class PmlTableOfContents(TableOfContents): def wrap(self, availWidth, availHeight): """ All table properties should be known by now. """ widths = (availWidth - self.rightColumnWidth, self.rightColumnWidth) # makes an internal table which does all the work. # we draw the LAST RUN's entries! If there are # none, we make some dummy data to keep the table # from complaining if len(self._lastEntries) == 0: _tempEntries = [(0, 'Placeholder for table of contents', 0)] else: _tempEntries = self._lastEntries lastMargin = 0 tableData = [] tableStyle = [ ('VALIGN', (0, 0), (- 1, - 1), 'TOP'), ('LEFTPADDING', (0, 0), (- 1, - 1), 0), ('RIGHTPADDING', (0, 0), (- 1, - 1), 0), ('TOPPADDING', (0, 0), (- 1, - 1), 0), ('BOTTOMPADDING', (0, 0), (- 1, - 1), 0), ] for i, entry in enumerate(_tempEntries): level, text, pageNum = entry[:3] leftColStyle = self.levelStyles[level] if i: # Not for first element tableStyle.append(( 'TOPPADDING', (0, i), (- 1, i), max(lastMargin, leftColStyle.spaceBefore))) # print leftColStyle.leftIndent lastMargin = leftColStyle.spaceAfter #right col style is right aligned rightColStyle = ParagraphStyle(name='leftColLevel%d' % level, parent=leftColStyle, leftIndent=0, alignment=TA_RIGHT) leftPara = Paragraph(text, leftColStyle) rightPara = Paragraph(str(pageNum), rightColStyle) tableData.append([leftPara, rightPara]) self._table = Table( tableData, colWidths=widths, style=TableStyle(tableStyle)) self.width, self.height = self._table.wrapOn(self.canv, availWidth, availHeight) return self.width, self.height class PmlRightPageBreak(CondPageBreak): def __init__(self): pass def wrap(self, availWidth, availHeight): if not self.canv.getPageNumber() % 2: self.width = availWidth self.height = availHeight return availWidth, availHeight self.width = self.height = 0 return 0, 0 class PmlLeftPageBreak(CondPageBreak): def __init__(self): pass def wrap(self, availWidth, availHeight): if self.canv.getPageNumber() % 2: self.width = availWidth self.height = availHeight return availWidth, availHeight self.width = self.height = 0 return 0, 0 # --- Pdf Form class PmlInput(Flowable): def __init__(self, name, type="text", width=10, height=10, default="", options=[]): self.width = width self.height = height self.type = type self.name = name self.default = default self.options = options def wrap(self, *args): return self.width, self.height def draw(self): c = self.canv c.saveState() c.setFont("Helvetica", 10) if self.type == "text": pdfform.textFieldRelative(c, self.name, 0, 0, self.width, self.height) c.rect(0, 0, self.width, self.height) elif self.type == "radio": c.rect(0, 0, self.width, self.height) elif self.type == "checkbox": if self.default: pdfform.buttonFieldRelative(c, self.name, "Yes", 0, 0) else: pdfform.buttonFieldRelative(c, self.name, "Off", 0, 0) c.rect(0, 0, self.width, self.height) elif self.type == "select": pdfform.selectFieldRelative(c, self.name, self.default, self.options, 0, 0, self.width, self.height) c.rect(0, 0, self.width, self.height) c.restoreState()
mit
sigterm9/sigcoin
share/qt/make_spinner.py
4415
1035
#!/usr/bin/env python # W.J. van der Laan, 2011 # Make spinning .mng animation from a .png # Requires imagemagick 6.7+ from __future__ import division from os import path from PIL import Image from subprocess import Popen SRC='img/reload_scaled.png' DST='../../src/qt/res/movies/update_spinner.mng' TMPDIR='/tmp' TMPNAME='tmp-%03i.png' NUMFRAMES=35 FRAMERATE=10.0 CONVERT='convert' CLOCKWISE=True DSIZE=(16,16) im_src = Image.open(SRC) if CLOCKWISE: im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT) def frame_to_filename(frame): return path.join(TMPDIR, TMPNAME % frame) frame_files = [] for frame in xrange(NUMFRAMES): rotation = (frame + 0.5) / NUMFRAMES * 360.0 if CLOCKWISE: rotation = -rotation im_new = im_src.rotate(rotation, Image.BICUBIC) im_new.thumbnail(DSIZE, Image.ANTIALIAS) outfile = frame_to_filename(frame) im_new.save(outfile, 'png') frame_files.append(outfile) p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST]) p.communicate()
mit
evenmarbles/mlpy
tests/test_mdp.py
1
1816
#!/usr/bin/env python # -*- coding: utf-8 -*- """ test_mdp ---------------------------------- Tests for `mlpy.mdp` module. """ import pytest class TestMDPModel(object): def setup_method(self, _): from mlpy.mdp.stateaction import Action Action.description = None Action.nfeatures = None def test_model_creation(self): from mlpy.mdp import MDPModelFactory # create discrete model with pytest.raises(ValueError): MDPModelFactory.create('discretemodel') from mlpy.mdp.stateaction import Action Action.description = { 'out': {'value': [-0.004]}, 'in': {'value': [0.004]}, 'kick': {'value': [-1.0]} } MDPModelFactory.create('discretemodel') # create decision tree model MDPModelFactory.create('decisiontreemodel') MDPModelFactory.create('decisiontreemodel', explorer_type='unknownbonusexplorer') MDPModelFactory.create('decisiontreemodel', explorer_type='leastvisitedbonusexplorer', explorer_params={'rmax': 1.0}) with pytest.raises(ValueError): MDPModelFactory.create('decisiontreemodel', explorer_type='undefined') # create CASML model case_template = { "state": { "type": "float", "value": "data.state", "is_index": True, "retrieval_method": "radius-n", "retrieval_method_params": 0.01 }, "delta_state": { "type": "float", "value": "data.next_state - data.state", "is_index": False, } } MDPModelFactory.create('casml', case_template) def teardown_method(self, _): pass
mit
kindohm/The_Force
pythonBridge/websocketUDPBridge.py
2
2948
import time, sys, os, pkg_resources import SocketServer from twisted.python import log from twisted.internet import reactor from twisted.application import service from twisted.internet.protocol import DatagramProtocol, Protocol, Factory from twisted.web.server import Site from twisted.web.static import File from autobahn.twisted.websocket import WebSocketServerProtocol, \ WebSocketServerFactory from autobahn.twisted.resource import WebSocketResource, \ HTTPChannelHixie76Aware # constants SERVER_IP = '169.254.118.168' # SERVER_IP = '127.0.0.1' SERVER_UDP_PORT = 7600 SERVER_WS_PORT = 8000 SERVER_HTTP_PORT = 9000 SERVER_HTTP_RESOURCES = 'web' #CLIENT_IP = '169.254.133.240' CLIENT_IP = '127.0.0.1' CLIENT_UDP_PORT = 7500 # [HTTP] > [CLIENT WS] > [SERVER WS] > bridge > [SERVER UDP] > [CLIENT UDP] class Bridge(): def __init__(self): self.udpServer = None self.wsServer = None def setUdpServer(self, udpServer): self.udpServer = udpServer def setWebsocketServer(self, wsServer): self.wsServer = wsServer def udpToWebsocket(self, data): if self.wsServer is not None: self.wsServer.sendMessage(data, True) def websocketToUdp(self, data): if self.udpServer is not None: self.udpServer.transport.write(data, (CLIENT_IP, CLIENT_UDP_PORT)) # udp server class UDPServer(DatagramProtocol): def __init__(self, bridge): self.bridge = bridge self.bridge.setUdpServer(self) def datagramReceived(self, data, (host, port)): self.bridge.udpToWebsocket(data) #print data # websocket server class BridgedWebSocketServerFactory(WebSocketServerFactory): def __init__(self, url, debug, debugCodePaths, bridge): WebSocketServerFactory.__init__(self, url, debug = debug, debugCodePaths = debugCodePaths) self.bridge = bridge class WebSocketServer(WebSocketServerProtocol): def onOpen(self): print 'WebSocket connection open.' def onConnect(self, request): self.factory.bridge.setWebsocketServer(self) print 'Client connecting: {0}'.format(request.peer) def onMessage(self, payload, isBinary): self.factory.bridge.websocketToUdp(payload) def onClose(self, wasClean, code, reason): print 'WebSocket connection closed: {0}'.format(reason) # initalize servers if __name__ == '__main__': bridge = Bridge() log.startLogging(sys.stdout) # websocket setup wsAddress = 'ws://%s:%d' % (SERVER_IP, SERVER_WS_PORT) factory = BridgedWebSocketServerFactory(wsAddress, False, False, bridge) factory.protocol = WebSocketServer reactor.listenTCP(SERVER_WS_PORT, factory) # http setup webdir = os.path.abspath(SERVER_HTTP_RESOURCES) site = Site(File(webdir)) site.protocol = HTTPChannelHixie76Aware reactor.listenTCP(SERVER_HTTP_PORT, site) # udp setup reactor.listenUDP(SERVER_UDP_PORT, UDPServer(bridge)) # start session reactor.run()
mit
raildo/keystone
keystone/contrib/access/core.py
6
2112
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob.dec from keystone.common import wsgi from keystone import config from keystone.openstack.common import log as logging from keystone.openstack.common import timeutils CONF = config.CONF LOG = logging.getLogger('access') APACHE_TIME_FORMAT = '%d/%b/%Y:%H:%M:%S' APACHE_LOG_FORMAT = ( '%(remote_addr)s - %(remote_user)s [%(datetime)s] "%(method)s %(url)s ' '%(http_version)s" %(status)s %(content_length)s') class AccessLogMiddleware(wsgi.Middleware): """Writes an access log to INFO.""" @webob.dec.wsgify def __call__(self, request): data = { 'remote_addr': request.remote_addr, 'remote_user': request.remote_user or '-', 'method': request.method, 'url': request.url, 'http_version': request.http_version, 'status': 500, 'content_length': '-'} try: response = request.get_response(self.application) data['status'] = response.status_int data['content_length'] = len(response.body) or '-' finally: # must be calculated *after* the application has been called now = timeutils.utcnow() # timeutils may not return UTC, so we can't hardcode +0000 data['datetime'] = '%s %s' % (now.strftime(APACHE_TIME_FORMAT), now.strftime('%z') or '+0000') LOG.info(APACHE_LOG_FORMAT % data) return response
apache-2.0
pmediano/ComputationalNeurodynamics
Fall2015/Exercise_3/NetworkWattsStrogatz.py
4
1095
""" Computational Neurodynamics Exercise 3 (C) Murray Shanahan et al, 2015 """ import numpy as np import numpy.random as rn from NetworkRingLattice import NetworkRingLattice def NetworkWattsStrogatz(N, k, p): """ Creates a ring lattice with N nodes and neighbourhood size k, then rewires it according to the Watts-Strogatz procedure with probability p. Inputs: N -- Number of nodes k -- Neighbourhood size of the initial ring lattice p -- Rewiring probability """ # Create a regular string lattice CIJ = NetworkRingLattice(N, k) # Loop over all connections and swap each of them with probability p for i in range(N): for j in range(i+1, N): if CIJ[i, j] and rn.random() < p: # We modify connections in both directions (i.e. [i,j] and [j,i]) # to maintain network undirectedness (i.e. symmetry). CIJ[i, j] = 0 CIJ[j, i] = 0 # PEDRO # h = np.mod(i + np.ceil(rn.random()*(N-1)) - 1, N) h = int(np.mod(i + np.ceil(rn.random()*(N-1)) - 1, N)) CIJ[i, h] = 1 CIJ[h, i] = 1 return(CIJ)
gpl-3.0
jhayworth/config
.emacs.d/elpy/rpc-venv/local/lib/python2.7/site-packages/jedi/inference/context.py
2
16966
from abc import abstractmethod from contextlib import contextmanager from parso.tree import search_ancestor from parso.python.tree import Name from jedi.inference.filters import ParserTreeFilter, MergedFilter, \ GlobalNameFilter from jedi.inference.names import AnonymousParamName, TreeNameDefinition from jedi.inference.base_value import NO_VALUES, ValueSet from jedi.parser_utils import get_parent_scope from jedi import debug from jedi import parser_utils class AbstractContext(object): # Must be defined: inference_state and tree_node and parent_context as an attribute/property def __init__(self, inference_state): self.inference_state = inference_state self.predefined_names = {} @abstractmethod def get_filters(self, until_position=None, origin_scope=None): raise NotImplementedError def goto(self, name_or_str, position): from jedi.inference import finder filters = _get_global_filters_for_name( self, name_or_str if isinstance(name_or_str, Name) else None, position, ) names = finder.filter_name(filters, name_or_str) debug.dbg('context.goto %s in (%s): %s', name_or_str, self, names) return names def py__getattribute__(self, name_or_str, name_context=None, position=None, analysis_errors=True): """ :param position: Position of the last statement -> tuple of line, column """ if name_context is None: name_context = self names = self.goto(name_or_str, position) string_name = name_or_str.value if isinstance(name_or_str, Name) else name_or_str # This paragraph is currently needed for proper branch type inference # (static analysis). found_predefined_types = None if self.predefined_names and isinstance(name_or_str, Name): node = name_or_str while node is not None and not parser_utils.is_scope(node): node = node.parent if node.type in ("if_stmt", "for_stmt", "comp_for", 'sync_comp_for'): try: name_dict = self.predefined_names[node] types = name_dict[string_name] except KeyError: continue else: found_predefined_types = types break if found_predefined_types is not None and names: from jedi.inference import flow_analysis check = flow_analysis.reachability_check( context=self, value_scope=self.tree_node, node=name_or_str, ) if check is flow_analysis.UNREACHABLE: values = NO_VALUES else: values = found_predefined_types else: values = ValueSet.from_sets(name.infer() for name in names) if not names and not values and analysis_errors: if isinstance(name_or_str, Name): from jedi.inference import analysis message = ("NameError: name '%s' is not defined." % string_name) analysis.add(name_context, 'name-error', name_or_str, message) debug.dbg('context.names_to_types: %s -> %s', names, values) if values: return values return self._check_for_additional_knowledge(name_or_str, name_context, position) def _check_for_additional_knowledge(self, name_or_str, name_context, position): name_context = name_context or self # Add isinstance and other if/assert knowledge. if isinstance(name_or_str, Name) and not name_context.is_instance(): flow_scope = name_or_str base_nodes = [name_context.tree_node] if any(b.type in ('comp_for', 'sync_comp_for') for b in base_nodes): return NO_VALUES from jedi.inference.finder import check_flow_information while True: flow_scope = get_parent_scope(flow_scope, include_flows=True) n = check_flow_information(name_context, flow_scope, name_or_str, position) if n is not None: return n if flow_scope in base_nodes: break return NO_VALUES def get_root_context(self): parent_context = self.parent_context if parent_context is None: return self return parent_context.get_root_context() def is_module(self): return False def is_builtins_module(self): return False def is_class(self): return False def is_stub(self): return False def is_instance(self): return False def is_compiled(self): return False @abstractmethod def py__name__(self): raise NotImplementedError def get_value(self): raise NotImplementedError @property def name(self): return None def get_qualified_names(self): return () def py__doc__(self): return '' @contextmanager def predefine_names(self, flow_scope, dct): predefined = self.predefined_names predefined[flow_scope] = dct try: yield finally: del predefined[flow_scope] class ValueContext(AbstractContext): """ Should be defined, otherwise the API returns empty types. """ def __init__(self, value): super(ValueContext, self).__init__(value.inference_state) self._value = value @property def tree_node(self): return self._value.tree_node @property def parent_context(self): return self._value.parent_context def is_module(self): return self._value.is_module() def is_builtins_module(self): return self._value == self.inference_state.builtins_module def is_class(self): return self._value.is_class() def is_stub(self): return self._value.is_stub() def is_instance(self): return self._value.is_instance() def is_compiled(self): return self._value.is_compiled() def py__name__(self): return self._value.py__name__() @property def name(self): return self._value.name def get_qualified_names(self): return self._value.get_qualified_names() def py__doc__(self): return self._value.py__doc__() def get_value(self): return self._value def __repr__(self): return '%s(%s)' % (self.__class__.__name__, self._value) class TreeContextMixin(object): def infer_node(self, node): from jedi.inference.syntax_tree import infer_node return infer_node(self, node) def create_value(self, node): from jedi.inference import value if node == self.tree_node: assert self.is_module() return self.get_value() parent_context = self.create_context(node) if node.type in ('funcdef', 'lambdef'): func = value.FunctionValue.from_context(parent_context, node) if parent_context.is_class(): class_value = parent_context.parent_context.create_value(parent_context.tree_node) instance = value.AnonymousInstance( self.inference_state, parent_context.parent_context, class_value) func = value.BoundMethod( instance=instance, class_context=class_value.as_context(), function=func ) return func elif node.type == 'classdef': return value.ClassValue(self.inference_state, parent_context, node) else: raise NotImplementedError("Probably shouldn't happen: %s" % node) def create_context(self, node): def from_scope_node(scope_node, is_nested=True): if scope_node == self.tree_node: return self if scope_node.type in ('funcdef', 'lambdef', 'classdef'): return self.create_value(scope_node).as_context() elif scope_node.type in ('comp_for', 'sync_comp_for'): parent_scope = parser_utils.get_parent_scope(scope_node) parent_context = from_scope_node(parent_scope) if node.start_pos >= scope_node.children[-1].start_pos: return parent_context return CompForContext(parent_context, scope_node) raise Exception("There's a scope that was not managed: %s" % scope_node) def parent_scope(node): while True: node = node.parent if parser_utils.is_scope(node): return node elif node.type in ('argument', 'testlist_comp'): if node.children[1].type in ('comp_for', 'sync_comp_for'): return node.children[1] elif node.type == 'dictorsetmaker': for n in node.children[1:4]: # In dictionaries it can be pretty much anything. if n.type in ('comp_for', 'sync_comp_for'): return n scope_node = parent_scope(node) if scope_node.type in ('funcdef', 'classdef'): colon = scope_node.children[scope_node.children.index(':')] if node.start_pos < colon.start_pos: parent = node.parent if not (parent.type == 'param' and parent.name == node): scope_node = parent_scope(scope_node) return from_scope_node(scope_node, is_nested=True) def create_name(self, tree_name): definition = tree_name.get_definition() if definition and definition.type == 'param' and definition.name == tree_name: funcdef = search_ancestor(definition, 'funcdef', 'lambdef') func = self.create_value(funcdef) return AnonymousParamName(func, tree_name) else: context = self.create_context(tree_name) return TreeNameDefinition(context, tree_name) class FunctionContext(TreeContextMixin, ValueContext): def get_filters(self, until_position=None, origin_scope=None): yield ParserTreeFilter( self.inference_state, parent_context=self, until_position=until_position, origin_scope=origin_scope ) class ModuleContext(TreeContextMixin, ValueContext): def py__file__(self): return self._value.py__file__() def get_filters(self, until_position=None, origin_scope=None): filters = self._value.get_filters(origin_scope) # Skip the first filter and replace it. next(filters) yield MergedFilter( ParserTreeFilter( parent_context=self, until_position=until_position, origin_scope=origin_scope ), self.get_global_filter(), ) for f in filters: # Python 2... yield f def get_global_filter(self): return GlobalNameFilter(self, self.tree_node) @property def string_names(self): return self._value.string_names @property def code_lines(self): return self._value.code_lines def get_value(self): """ This is the only function that converts a context back to a value. This is necessary for stub -> python conversion and vice versa. However this method shouldn't be moved to AbstractContext. """ return self._value class NamespaceContext(TreeContextMixin, ValueContext): def get_filters(self, until_position=None, origin_scope=None): return self._value.get_filters() def get_value(self): return self._value def py__file__(self): return self._value.py__file__() class ClassContext(TreeContextMixin, ValueContext): def get_filters(self, until_position=None, origin_scope=None): yield self.get_global_filter(until_position, origin_scope) def get_global_filter(self, until_position=None, origin_scope=None): return ParserTreeFilter( parent_context=self, until_position=until_position, origin_scope=origin_scope ) class CompForContext(TreeContextMixin, AbstractContext): def __init__(self, parent_context, comp_for): super(CompForContext, self).__init__(parent_context.inference_state) self.tree_node = comp_for self.parent_context = parent_context def get_filters(self, until_position=None, origin_scope=None): yield ParserTreeFilter(self) def get_value(self): return None def py__name__(self): return '<comprehension context>' def __repr__(self): return '%s(%s)' % (self.__class__.__name__, self.tree_node) class CompiledContext(ValueContext): def get_filters(self, until_position=None, origin_scope=None): return self._value.get_filters() class CompiledModuleContext(CompiledContext): code_lines = None def get_value(self): return self._value @property def string_names(self): return self._value.string_names def py__file__(self): return self._value.py__file__() def _get_global_filters_for_name(context, name_or_none, position): # For functions and classes the defaults don't belong to the # function and get inferred in the value before the function. So # make sure to exclude the function/class name. if name_or_none is not None: ancestor = search_ancestor(name_or_none, 'funcdef', 'classdef', 'lambdef') lambdef = None if ancestor == 'lambdef': # For lambdas it's even more complicated since parts will # be inferred later. lambdef = ancestor ancestor = search_ancestor(name_or_none, 'funcdef', 'classdef') if ancestor is not None: colon = ancestor.children[-2] if position is not None and position < colon.start_pos: if lambdef is None or position < lambdef.children[-2].start_pos: position = ancestor.start_pos return get_global_filters(context, position, name_or_none) def get_global_filters(context, until_position, origin_scope): """ Returns all filters in order of priority for name resolution. For global name lookups. The filters will handle name resolution themselves, but here we gather possible filters downwards. >>> from jedi._compatibility import u, no_unicode_pprint >>> from jedi import Script >>> script = Script(u(''' ... x = ['a', 'b', 'c'] ... def func(): ... y = None ... ''')) >>> module_node = script._module_node >>> scope = next(module_node.iter_funcdefs()) >>> scope <Function: func@3-5> >>> context = script._get_module_context().create_context(scope) >>> filters = list(get_global_filters(context, (4, 0), None)) First we get the names from the function scope. >>> no_unicode_pprint(filters[0]) # doctest: +ELLIPSIS MergedFilter(<ParserTreeFilter: ...>, <GlobalNameFilter: ...>) >>> sorted(str(n) for n in filters[0].values()) # doctest: +NORMALIZE_WHITESPACE ['<TreeNameDefinition: string_name=func start_pos=(3, 4)>', '<TreeNameDefinition: string_name=x start_pos=(2, 0)>'] >>> filters[0]._filters[0]._until_position (4, 0) >>> filters[0]._filters[1]._until_position Then it yields the names from one level "lower". In this example, this is the module scope (including globals). As a side note, you can see, that the position in the filter is None on the globals filter, because there the whole module is searched. >>> list(filters[1].values()) # package modules -> Also empty. [] >>> sorted(name.string_name for name in filters[2].values()) # Module attributes ['__doc__', '__name__', '__package__'] Finally, it yields the builtin filter, if `include_builtin` is true (default). >>> list(filters[3].values()) # doctest: +ELLIPSIS [...] """ base_context = context from jedi.inference.value.function import BaseFunctionExecutionContext while context is not None: # Names in methods cannot be resolved within the class. for filter in context.get_filters( until_position=until_position, origin_scope=origin_scope): yield filter if isinstance(context, (BaseFunctionExecutionContext, ModuleContext)): # The position should be reset if the current scope is a function. until_position = None context = context.parent_context # Add builtins to the global scope. yield next(base_context.inference_state.builtins_module.get_filters())
gpl-3.0
soldag/home-assistant
tests/components/climate/test_device_action.py
15
6595
"""The tests for Climate device actions.""" import pytest import voluptuous_serialize import homeassistant.components.automation as automation from homeassistant.components.climate import DOMAIN, const, device_action from homeassistant.helpers import config_validation as cv, device_registry from homeassistant.setup import async_setup_component from tests.common import ( MockConfigEntry, assert_lists_same, async_get_device_automations, async_mock_service, mock_device_registry, mock_registry, ) @pytest.fixture def device_reg(hass): """Return an empty, loaded, registry.""" return mock_device_registry(hass) @pytest.fixture def entity_reg(hass): """Return an empty, loaded, registry.""" return mock_registry(hass) async def test_get_actions(hass, device_reg, entity_reg): """Test we get the expected actions from a climate.""" config_entry = MockConfigEntry(domain="test", data={}) config_entry.add_to_hass(hass) device_entry = device_reg.async_get_or_create( config_entry_id=config_entry.entry_id, connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")}, ) entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id) hass.states.async_set("climate.test_5678", const.HVAC_MODE_COOL, {}) hass.states.async_set("climate.test_5678", "attributes", {"supported_features": 17}) expected_actions = [ { "domain": DOMAIN, "type": "set_hvac_mode", "device_id": device_entry.id, "entity_id": "climate.test_5678", }, { "domain": DOMAIN, "type": "set_preset_mode", "device_id": device_entry.id, "entity_id": "climate.test_5678", }, ] actions = await async_get_device_automations(hass, "action", device_entry.id) assert_lists_same(actions, expected_actions) async def test_get_action_hvac_only(hass, device_reg, entity_reg): """Test we get the expected actions from a climate.""" config_entry = MockConfigEntry(domain="test", data={}) config_entry.add_to_hass(hass) device_entry = device_reg.async_get_or_create( config_entry_id=config_entry.entry_id, connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")}, ) entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id) hass.states.async_set("climate.test_5678", const.HVAC_MODE_COOL, {}) hass.states.async_set("climate.test_5678", "attributes", {"supported_features": 1}) expected_actions = [ { "domain": DOMAIN, "type": "set_hvac_mode", "device_id": device_entry.id, "entity_id": "climate.test_5678", }, ] actions = await async_get_device_automations(hass, "action", device_entry.id) assert_lists_same(actions, expected_actions) async def test_action(hass): """Test for actions.""" hass.states.async_set( "climate.entity", const.HVAC_MODE_COOL, { const.ATTR_HVAC_MODES: [const.HVAC_MODE_COOL, const.HVAC_MODE_OFF], const.ATTR_PRESET_MODES: [const.PRESET_HOME, const.PRESET_AWAY], }, ) assert await async_setup_component( hass, automation.DOMAIN, { automation.DOMAIN: [ { "trigger": { "platform": "event", "event_type": "test_event_set_hvac_mode", }, "action": { "domain": DOMAIN, "device_id": "abcdefgh", "entity_id": "climate.entity", "type": "set_hvac_mode", "hvac_mode": const.HVAC_MODE_OFF, }, }, { "trigger": { "platform": "event", "event_type": "test_event_set_preset_mode", }, "action": { "domain": DOMAIN, "device_id": "abcdefgh", "entity_id": "climate.entity", "type": "set_preset_mode", "preset_mode": const.PRESET_AWAY, }, }, ] }, ) set_hvac_mode_calls = async_mock_service(hass, "climate", "set_hvac_mode") set_preset_mode_calls = async_mock_service(hass, "climate", "set_preset_mode") hass.bus.async_fire("test_event_set_hvac_mode") await hass.async_block_till_done() assert len(set_hvac_mode_calls) == 1 assert len(set_preset_mode_calls) == 0 hass.bus.async_fire("test_event_set_preset_mode") await hass.async_block_till_done() assert len(set_hvac_mode_calls) == 1 assert len(set_preset_mode_calls) == 1 async def test_capabilities(hass): """Test getting capabilities.""" hass.states.async_set( "climate.entity", const.HVAC_MODE_COOL, { const.ATTR_HVAC_MODES: [const.HVAC_MODE_COOL, const.HVAC_MODE_OFF], const.ATTR_PRESET_MODES: [const.PRESET_HOME, const.PRESET_AWAY], }, ) # Set HVAC mode capabilities = await device_action.async_get_action_capabilities( hass, { "domain": DOMAIN, "device_id": "abcdefgh", "entity_id": "climate.entity", "type": "set_hvac_mode", }, ) assert capabilities and "extra_fields" in capabilities assert voluptuous_serialize.convert( capabilities["extra_fields"], custom_serializer=cv.custom_serializer ) == [ { "name": "hvac_mode", "options": [("cool", "cool"), ("off", "off")], "required": True, "type": "select", } ] # Set preset mode capabilities = await device_action.async_get_action_capabilities( hass, { "domain": DOMAIN, "device_id": "abcdefgh", "entity_id": "climate.entity", "type": "set_preset_mode", }, ) assert capabilities and "extra_fields" in capabilities assert voluptuous_serialize.convert( capabilities["extra_fields"], custom_serializer=cv.custom_serializer ) == [ { "name": "preset_mode", "options": [("home", "home"), ("away", "away")], "required": True, "type": "select", } ]
apache-2.0