repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
ansible/ansible-modules-core
refs/heads/devel
network/cumulus/__init__.py
12133432
lino-framework/book
refs/heads/master
lino_book/projects/mti/fixtures/__init__.py
12133432
oliver-sanders/cylc
refs/heads/master
cylc/flow/network/resolvers.py
1
# THIS FILE IS PART OF THE CYLC SUITE ENGINE. # Copyright (C) 2008-2019 NIWA & British Crown (Met Office) & Contributors. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """GraphQL resolvers for use in data accessing and mutation of workflows.""" from fnmatch import fnmatchcase from getpass import getuser from operator import attrgetter from graphene.utils.str_converters import to_snake_case from cylc.flow.data_store_mgr import ( ID_DELIM, EDGES, FAMILY_PROXIES, TASK_PROXIES, WORKFLOW) from cylc.flow.network.schema import NodesEdges, PROXY_NODES # Message Filters def collate_workflow_atts(workflow): """Collate workflow filter attributes, setting defaults if non-existent.""" # Append new atts to the end of the list, # this will retain order used in index access. return [ workflow.owner, workflow.name, workflow.status, ] def workflow_ids_filter(w_atts, items): """Match id arguments with workflow attributes. Returns a boolean.""" # Return true if workflow matches any id arg. for owner, name, status in set(items): if ((not owner or fnmatchcase(w_atts[0], owner)) and (not name or fnmatchcase(w_atts[1], name)) and (not status or w_atts[2] == status)): return True return False def workflow_filter(flow, args): """Filter workflows based on attribute arguments""" w_atts = collate_workflow_atts(flow[WORKFLOW]) # The w_atts (workflow attributes) list contains ordered workflow values # or defaults (see collate function for index item). return ((not args.get('workflows') or workflow_ids_filter(w_atts, args['workflows'])) and not (args.get('exworkflows') and workflow_ids_filter(w_atts, args['exworkflows']))) def collate_node_atts(node): """Collate node filter attributes, setting defaults if non-existent.""" owner, workflow, _ = node.id.split(ID_DELIM, 2) # Append new atts to the end of the list, # this will retain order used in index access # 0 - owner # 1 - workflow # 2 - Cycle point or None # 3 - name or namespace list # 4 - submit number or None # 5 - state return [ owner, workflow, getattr(node, 'cycle_point', None), getattr(node, 'namespace', [node.name]), getattr(node, 'submit_num', None), getattr(node, 'state', None), ] def node_ids_filter(n_atts, items): """Match id arguments with node attributes. Returns a boolean.""" for owner, workflow, cycle, name, submit_num, state in items: if ((not owner or fnmatchcase(n_atts[0], owner)) and (not workflow or fnmatchcase(n_atts[1], workflow)) and (not cycle or fnmatchcase(n_atts[2], cycle)) and any(fnmatchcase(nn, name) for nn in n_atts[3]) and (not submit_num or fnmatchcase(str(n_atts[4]), submit_num.lstrip('0'))) and (not state or n_atts[5] == state)): return True return False def node_filter(node, args): """Filter nodes based on attribute arguments""" n_atts = collate_node_atts(node) # The n_atts (node attributes) list contains ordered node values # or defaults (see collate function for index item). return ( (args.get('ghosts') or n_atts[5] != '') and (not args.get('states') or n_atts[5] in args['states']) and not (args.get('exstates') and n_atts[5] in args['exstates']) and (args.get('is_held') is None or (node.is_held == args['is_held'])) and (args.get('mindepth', -1) < 0 or node.depth >= args['mindepth']) and (args.get('maxdepth', -1) < 0 or node.depth <= args['maxdepth']) and # Now filter node against id arg lists (not args.get('ids') or node_ids_filter(n_atts, args['ids'])) and not (args.get('exids') and node_ids_filter(n_atts, args['exids'])) ) def get_flow_data_from_ids(data_store, native_ids): """Return workflow data by id.""" w_ids = set() for native_id in native_ids: o_name, w_name, _ = native_id.split(ID_DELIM, 2) flow_id = f'{o_name}{ID_DELIM}{w_name}' w_ids.add(flow_id) return [ data_store[w_id] for w_id in w_ids if w_id in data_store ] def get_data_elements(flow, nat_ids, element_type): """Return data elements by id.""" return [ flow[element_type][n_id] for n_id in nat_ids if n_id in flow[element_type] ] def sort_elements(elements, args): """Sort iterable of elements by given attribute.""" sort_args = args.get('sort') if sort_args and elements: sort_keys = [ key for key in [to_snake_case(k) for k in sort_args.keys] if hasattr(elements[0], key) ] if sort_keys: elements.sort( key=attrgetter(*sort_keys), reverse=sort_args.reverse) return elements class BaseResolvers: """Data access methods for resolving GraphQL queries.""" def __init__(self, data): self.data = data # Query resolvers async def get_workflows_data(self, args): """Return list of data from workflows.""" return [ flow for flow in self.data.values() if workflow_filter(flow, args)] async def get_workflows(self, args): """Return workflow elements.""" return sort_elements( [flow[WORKFLOW] for flow in await self.get_workflows_data(args)], args) # nodes async def get_nodes_all(self, node_type, args): """Return nodes from all workflows, filter by args.""" return sort_elements( [n for flow in await self.get_workflows_data(args) for n in flow.get(node_type).values() if node_filter(n, args)], args) async def get_nodes_by_ids(self, node_type, args): """Return protobuf node objects for given id.""" nat_ids = set(args.get('native_ids', [])) if node_type == PROXY_NODES: node_types = [TASK_PROXIES, FAMILY_PROXIES] else: node_types = [node_type] return sort_elements( [node for flow in get_flow_data_from_ids(self.data, nat_ids) for node_type in node_types for node in get_data_elements(flow, nat_ids, node_type) if node_filter(node, args)], args) async def get_node_by_id(self, node_type, args): """Return protobuf node object for given id.""" n_id = args.get('id') o_name, w_name, _ = n_id.split(ID_DELIM, 2) w_id = f'{o_name}{ID_DELIM}{w_name}' flow = self.data.get(w_id) if not flow: return None if node_type == PROXY_NODES: return ( flow[TASK_PROXIES].get(n_id) or flow[FAMILY_PROXIES].get(n_id)) return flow[node_type].get(n_id) # edges async def get_edges_all(self, args): """Return edges from all workflows, filter by args.""" return sort_elements( [e for flow in await self.get_workflows_data(args) for e in flow.get(EDGES).values()], args) async def get_edges_by_ids(self, args): """Return protobuf edge objects for given id.""" # TODO: Filter by given native ids. nat_ids = set(args.get('native_ids', [])) return sort_elements( [edge for flow in get_flow_data_from_ids(self.data, nat_ids) for edge in get_data_elements(flow, nat_ids, EDGES)], args) async def get_nodes_edges(self, root_nodes, args): """Return nodes and edges within a specified distance of root nodes.""" # Initial root node selection. nodes = root_nodes node_ids = set(n.id for n in root_nodes) edges = [] edge_ids = set() # Setup for edgewise search. new_nodes = root_nodes for _ in range(args['distance']): # Gather edges. # Edges should be unique (graph not circular), # but duplicates will be present as node holds all associated. new_edge_ids = set( e_id for n in new_nodes for e_id in n.edges).difference(edge_ids) edge_ids.update(new_edge_ids) new_edges = [ edge for flow in get_flow_data_from_ids(self.data, new_edge_ids) for edge in get_data_elements(flow, new_edge_ids, EDGES) ] edges += new_edges # Gather nodes. # One of source or target will be in current set of nodes. new_node_ids = set( [e.source for e in new_edges] + [e.target for e in new_edges]).difference(node_ids) # Stop searching on no new nodes if not new_node_ids: break node_ids.update(new_node_ids) new_nodes = [ node for flow in get_flow_data_from_ids(self.data, new_node_ids) for node in get_data_elements(flow, new_node_ids, TASK_PROXIES) ] nodes += new_nodes return NodesEdges( nodes=sort_elements(nodes, args), edges=sort_elements(edges, args)) class Resolvers(BaseResolvers): """Workflow Service context GraphQL query and mutation resolvers.""" schd = None def __init__(self, data, **kwargs): super().__init__(data) # Set extra attributes for key, value in kwargs.items(): if hasattr(self, key): setattr(self, key, value) # Mutations async def mutator(self, *m_args): """Mutate workflow.""" _, command, w_args, args = m_args w_ids = [flow[WORKFLOW].id for flow in await self.get_workflows_data(w_args)] if not w_ids: return 'Error: No matching Workflow' w_id = w_ids[0] result = await self._mutation_mapper(command, args) if result is None: result = (True, 'Command queued') return [{'id': w_id, 'response': result}] async def nodes_mutator(self, *m_args): """Mutate node items of associated workflows.""" _, command, ids, w_args, args = m_args w_ids = [flow[WORKFLOW].id for flow in await self.get_workflows_data(w_args)] if not w_ids: return 'Error: No matching Workflow' w_id = w_ids[0] # match proxy ID args with workflows items = [] for owner, workflow, cycle, name, submit_num, state in ids: if workflow and owner is None: owner = "*" if (not (owner and workflow) or fnmatchcase(w_id, f'{owner}{ID_DELIM}{workflow}')): if cycle is None: cycle = '*' id_arg = f'{cycle}/{name}' if submit_num: id_arg = f'{id_arg}/{submit_num}' if state: id_arg = f'{id_arg}:{state}' items.append(id_arg) if items: if command == 'insert_tasks': args['items'] = items elif command == 'put_messages': args['task_job'] = items[0] else: args['task_globs'] = items result = await self._mutation_mapper(command, args) if result is None: result = (True, 'Command queued') return [{'id': w_id, 'response': result}] async def _mutation_mapper(self, command, kwargs): """Map between GraphQL resolvers and internal command interface.""" method = getattr(self.schd.server, command) return method(user=getuser(), **kwargs)
vijaysbhat/incubator-airflow
refs/heads/master
airflow/migrations/versions/e3a246e0dc1_current_schema.py
38
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """current schema Revision ID: e3a246e0dc1 Revises: Create Date: 2015-08-18 16:35:00.883495 """ # revision identifiers, used by Alembic. revision = 'e3a246e0dc1' down_revision = None branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa from sqlalchemy import func from sqlalchemy.engine.reflection import Inspector def upgrade(): conn = op.get_bind() inspector = Inspector.from_engine(conn) tables = inspector.get_table_names() if 'connection' not in tables: op.create_table( 'connection', sa.Column('id', sa.Integer(), nullable=False), sa.Column('conn_id', sa.String(length=250), nullable=True), sa.Column('conn_type', sa.String(length=500), nullable=True), sa.Column('host', sa.String(length=500), nullable=True), sa.Column('schema', sa.String(length=500), nullable=True), sa.Column('login', sa.String(length=500), nullable=True), sa.Column('password', sa.String(length=500), nullable=True), sa.Column('port', sa.Integer(), nullable=True), sa.Column('extra', sa.String(length=5000), nullable=True), sa.PrimaryKeyConstraint('id') ) if 'dag' not in tables: op.create_table( 'dag', sa.Column('dag_id', sa.String(length=250), nullable=False), sa.Column('is_paused', sa.Boolean(), nullable=True), sa.Column('is_subdag', sa.Boolean(), nullable=True), sa.Column('is_active', sa.Boolean(), nullable=True), sa.Column('last_scheduler_run', sa.DateTime(), nullable=True), sa.Column('last_pickled', sa.DateTime(), nullable=True), sa.Column('last_expired', sa.DateTime(), nullable=True), sa.Column('scheduler_lock', sa.Boolean(), nullable=True), sa.Column('pickle_id', sa.Integer(), nullable=True), sa.Column('fileloc', sa.String(length=2000), nullable=True), sa.Column('owners', sa.String(length=2000), nullable=True), sa.PrimaryKeyConstraint('dag_id') ) if 'dag_pickle' not in tables: op.create_table( 'dag_pickle', sa.Column('id', sa.Integer(), nullable=False), sa.Column('pickle', sa.PickleType(), nullable=True), sa.Column('created_dttm', sa.DateTime(), nullable=True), sa.Column('pickle_hash', sa.BigInteger(), nullable=True), sa.PrimaryKeyConstraint('id') ) if 'import_error' not in tables: op.create_table( 'import_error', sa.Column('id', sa.Integer(), nullable=False), sa.Column('timestamp', sa.DateTime(), nullable=True), sa.Column('filename', sa.String(length=1024), nullable=True), sa.Column('stacktrace', sa.Text(), nullable=True), sa.PrimaryKeyConstraint('id') ) if 'job' not in tables: op.create_table( 'job', sa.Column('id', sa.Integer(), nullable=False), sa.Column('dag_id', sa.String(length=250), nullable=True), sa.Column('state', sa.String(length=20), nullable=True), sa.Column('job_type', sa.String(length=30), nullable=True), sa.Column('start_date', sa.DateTime(), nullable=True), sa.Column('end_date', sa.DateTime(), nullable=True), sa.Column('latest_heartbeat', sa.DateTime(), nullable=True), sa.Column('executor_class', sa.String(length=500), nullable=True), sa.Column('hostname', sa.String(length=500), nullable=True), sa.Column('unixname', sa.String(length=1000), nullable=True), sa.PrimaryKeyConstraint('id') ) op.create_index( 'job_type_heart', 'job', ['job_type', 'latest_heartbeat'], unique=False ) if 'known_event_type' not in tables: op.create_table( 'known_event_type', sa.Column('id', sa.Integer(), nullable=False), sa.Column('know_event_type', sa.String(length=200), nullable=True), sa.PrimaryKeyConstraint('id') ) if 'log' not in tables: op.create_table( 'log', sa.Column('id', sa.Integer(), nullable=False), sa.Column('dttm', sa.DateTime(), nullable=True), sa.Column('dag_id', sa.String(length=250), nullable=True), sa.Column('task_id', sa.String(length=250), nullable=True), sa.Column('event', sa.String(length=30), nullable=True), sa.Column('execution_date', sa.DateTime(), nullable=True), sa.Column('owner', sa.String(length=500), nullable=True), sa.PrimaryKeyConstraint('id') ) if 'sla_miss' not in tables: op.create_table( 'sla_miss', sa.Column('task_id', sa.String(length=250), nullable=False), sa.Column('dag_id', sa.String(length=250), nullable=False), sa.Column('execution_date', sa.DateTime(), nullable=False), sa.Column('email_sent', sa.Boolean(), nullable=True), sa.Column('timestamp', sa.DateTime(), nullable=True), sa.Column('description', sa.Text(), nullable=True), sa.PrimaryKeyConstraint('task_id', 'dag_id', 'execution_date') ) if 'slot_pool' not in tables: op.create_table( 'slot_pool', sa.Column('id', sa.Integer(), nullable=False), sa.Column('pool', sa.String(length=50), nullable=True), sa.Column('slots', sa.Integer(), nullable=True), sa.Column('description', sa.Text(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('pool') ) if 'task_instance' not in tables: op.create_table( 'task_instance', sa.Column('task_id', sa.String(length=250), nullable=False), sa.Column('dag_id', sa.String(length=250), nullable=False), sa.Column('execution_date', sa.DateTime(), nullable=False), sa.Column('start_date', sa.DateTime(), nullable=True), sa.Column('end_date', sa.DateTime(), nullable=True), sa.Column('duration', sa.Integer(), nullable=True), sa.Column('state', sa.String(length=20), nullable=True), sa.Column('try_number', sa.Integer(), nullable=True), sa.Column('hostname', sa.String(length=1000), nullable=True), sa.Column('unixname', sa.String(length=1000), nullable=True), sa.Column('job_id', sa.Integer(), nullable=True), sa.Column('pool', sa.String(length=50), nullable=True), sa.Column('queue', sa.String(length=50), nullable=True), sa.Column('priority_weight', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('task_id', 'dag_id', 'execution_date') ) op.create_index( 'ti_dag_state', 'task_instance', ['dag_id', 'state'], unique=False ) op.create_index( 'ti_pool', 'task_instance', ['pool', 'state', 'priority_weight'], unique=False ) op.create_index( 'ti_state_lkp', 'task_instance', ['dag_id', 'task_id', 'execution_date', 'state'], unique=False ) if 'user' not in tables: op.create_table( 'user', sa.Column('id', sa.Integer(), nullable=False), sa.Column('username', sa.String(length=250), nullable=True), sa.Column('email', sa.String(length=500), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('username') ) if 'variable' not in tables: op.create_table( 'variable', sa.Column('id', sa.Integer(), nullable=False), sa.Column('key', sa.String(length=250), nullable=True), sa.Column('val', sa.Text(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('key') ) if 'chart' not in tables: op.create_table( 'chart', sa.Column('id', sa.Integer(), nullable=False), sa.Column('label', sa.String(length=200), nullable=True), sa.Column('conn_id', sa.String(length=250), nullable=False), sa.Column('user_id', sa.Integer(), nullable=True), sa.Column('chart_type', sa.String(length=100), nullable=True), sa.Column('sql_layout', sa.String(length=50), nullable=True), sa.Column('sql', sa.Text(), nullable=True), sa.Column('y_log_scale', sa.Boolean(), nullable=True), sa.Column('show_datatable', sa.Boolean(), nullable=True), sa.Column('show_sql', sa.Boolean(), nullable=True), sa.Column('height', sa.Integer(), nullable=True), sa.Column('default_params', sa.String(length=5000), nullable=True), sa.Column('x_is_date', sa.Boolean(), nullable=True), sa.Column('iteration_no', sa.Integer(), nullable=True), sa.Column('last_modified', sa.DateTime(), nullable=True), sa.ForeignKeyConstraint(['user_id'], ['user.id'], ), sa.PrimaryKeyConstraint('id') ) if 'known_event' not in tables: op.create_table( 'known_event', sa.Column('id', sa.Integer(), nullable=False), sa.Column('label', sa.String(length=200), nullable=True), sa.Column('start_date', sa.DateTime(), nullable=True), sa.Column('end_date', sa.DateTime(), nullable=True), sa.Column('user_id', sa.Integer(), nullable=True), sa.Column('known_event_type_id', sa.Integer(), nullable=True), sa.Column('description', sa.Text(), nullable=True), sa.ForeignKeyConstraint(['known_event_type_id'], ['known_event_type.id'], ), sa.ForeignKeyConstraint(['user_id'], ['user.id'], ), sa.PrimaryKeyConstraint('id') ) if 'xcom' not in tables: op.create_table( 'xcom', sa.Column('id', sa.Integer(), nullable=False), sa.Column('key', sa.String(length=512), nullable=True), sa.Column('value', sa.PickleType(), nullable=True), sa.Column( 'timestamp', sa.DateTime(), default=func.now(), nullable=False), sa.Column('execution_date', sa.DateTime(), nullable=False), sa.Column('task_id', sa.String(length=250), nullable=False), sa.Column('dag_id', sa.String(length=250), nullable=False), sa.PrimaryKeyConstraint('id') ) def downgrade(): op.drop_table('known_event') op.drop_table('chart') op.drop_table('variable') op.drop_table('user') op.drop_index('ti_state_lkp', table_name='task_instance') op.drop_index('ti_pool', table_name='task_instance') op.drop_index('ti_dag_state', table_name='task_instance') op.drop_table('task_instance') op.drop_table('slot_pool') op.drop_table('sla_miss') op.drop_table('log') op.drop_table('known_event_type') op.drop_index('job_type_heart', table_name='job') op.drop_table('job') op.drop_table('import_error') op.drop_table('dag_pickle') op.drop_table('dag') op.drop_table('connection') op.drop_table('xcom')
redhat-openstack/rally
refs/heads/master
tests/unit/common/test_sshutils.py
9
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from rally.common import sshutils from tests.unit import test class FakeParamikoException(Exception): pass class SSHTestCase(test.TestCase): """Test all small SSH methods.""" def setUp(self): super(SSHTestCase, self).setUp() self.ssh = sshutils.SSH("root", "example.net") @mock.patch("rally.common.sshutils.SSH._get_pkey") def test_construct(self, mock_ssh__get_pkey): mock_ssh__get_pkey.return_value = "pkey" ssh = sshutils.SSH("root", "example.net", port=33, pkey="key", key_filename="kf", password="secret") mock_ssh__get_pkey.assert_called_once_with("key") self.assertEqual("root", ssh.user) self.assertEqual("example.net", ssh.host) self.assertEqual(33, ssh.port) self.assertEqual("pkey", ssh.pkey) self.assertEqual("kf", ssh.key_filename) self.assertEqual("secret", ssh.password) def test_construct_default(self): self.assertEqual("root", self.ssh.user) self.assertEqual("example.net", self.ssh.host) self.assertEqual(22, self.ssh.port) self.assertIsNone(self.ssh.pkey) self.assertIsNone(self.ssh.key_filename) self.assertIsNone(self.ssh.password) @mock.patch("rally.common.sshutils.paramiko") def test__get_pkey_invalid(self, mock_paramiko): mock_paramiko.SSHException = FakeParamikoException rsa = mock_paramiko.rsakey.RSAKey dss = mock_paramiko.dsskey.DSSKey rsa.from_private_key.side_effect = mock_paramiko.SSHException dss.from_private_key.side_effect = mock_paramiko.SSHException self.assertRaises(sshutils.SSHError, self.ssh._get_pkey, "key") @mock.patch("rally.common.sshutils.six.moves.StringIO") @mock.patch("rally.common.sshutils.paramiko") def test__get_pkey_dss(self, mock_paramiko, mock_string_io): mock_paramiko.SSHException = FakeParamikoException mock_string_io.return_value = "string_key" mock_paramiko.dsskey.DSSKey.from_private_key.return_value = "dss_key" rsa = mock_paramiko.rsakey.RSAKey rsa.from_private_key.side_effect = mock_paramiko.SSHException key = self.ssh._get_pkey("key") dss_calls = mock_paramiko.dsskey.DSSKey.from_private_key.mock_calls self.assertEqual([mock.call("string_key")], dss_calls) self.assertEqual(key, "dss_key") mock_string_io.assert_called_once_with("key") @mock.patch("rally.common.sshutils.six.moves.StringIO") @mock.patch("rally.common.sshutils.paramiko") def test__get_pkey_rsa(self, mock_paramiko, mock_string_io): mock_paramiko.SSHException = FakeParamikoException mock_string_io.return_value = "string_key" mock_paramiko.rsakey.RSAKey.from_private_key.return_value = "rsa_key" dss = mock_paramiko.dsskey.DSSKey dss.from_private_key.side_effect = mock_paramiko.SSHException key = self.ssh._get_pkey("key") rsa_calls = mock_paramiko.rsakey.RSAKey.from_private_key.mock_calls self.assertEqual([mock.call("string_key")], rsa_calls) self.assertEqual(key, "rsa_key") mock_string_io.assert_called_once_with("key") @mock.patch("rally.common.sshutils.SSH._get_pkey") @mock.patch("rally.common.sshutils.paramiko") def test__get_client(self, mock_paramiko, mock_ssh__get_pkey): mock_ssh__get_pkey.return_value = "key" fake_client = mock.Mock() mock_paramiko.SSHClient.return_value = fake_client mock_paramiko.AutoAddPolicy.return_value = "autoadd" ssh = sshutils.SSH("admin", "example.net", pkey="key") client = ssh._get_client() self.assertEqual(fake_client, client) client_calls = [ mock.call.set_missing_host_key_policy("autoadd"), mock.call.connect("example.net", username="admin", port=22, pkey="key", key_filename=None, password=None, timeout=1), ] self.assertEqual(client_calls, client.mock_calls) def test_close(self): with mock.patch.object(self.ssh, "_client") as m_client: self.ssh.close() m_client.close.assert_called_once_with() self.assertFalse(self.ssh._client) @mock.patch("rally.common.sshutils.six.moves.StringIO") def test_execute(self, mock_string_io): mock_string_io.side_effect = stdio = [mock.Mock(), mock.Mock()] stdio[0].read.return_value = "stdout fake data" stdio[1].read.return_value = "stderr fake data" with mock.patch.object(self.ssh, "run", return_value=0) as mock_run: status, stdout, stderr = self.ssh.execute("cmd", stdin="fake_stdin", timeout=43) mock_run.assert_called_once_with( "cmd", stdin="fake_stdin", stdout=stdio[0], stderr=stdio[1], timeout=43, raise_on_error=False) self.assertEqual(0, status) self.assertEqual("stdout fake data", stdout) self.assertEqual("stderr fake data", stderr) @mock.patch("rally.common.sshutils.time") def test_wait_timeout(self, mock_time): mock_time.time.side_effect = [1, 50, 150] self.ssh.execute = mock.Mock(side_effect=[sshutils.SSHError, sshutils.SSHError, 0]) self.assertRaises(sshutils.SSHTimeout, self.ssh.wait) self.assertEqual([mock.call("uname")] * 2, self.ssh.execute.mock_calls) @mock.patch("rally.common.sshutils.time") def test_wait(self, mock_time): mock_time.time.side_effect = [1, 50, 100] self.ssh.execute = mock.Mock(side_effect=[sshutils.SSHError, sshutils.SSHError, 0]) self.ssh.wait() self.assertEqual([mock.call("uname")] * 3, self.ssh.execute.mock_calls) class SSHRunTestCase(test.TestCase): """Test SSH.run method in different aspects. Also tested method "execute". """ def setUp(self): super(SSHRunTestCase, self).setUp() self.fake_client = mock.Mock() self.fake_session = mock.Mock() self.fake_transport = mock.Mock() self.fake_transport.open_session.return_value = self.fake_session self.fake_client.get_transport.return_value = self.fake_transport self.fake_session.recv_ready.return_value = False self.fake_session.recv_stderr_ready.return_value = False self.fake_session.send_ready.return_value = False self.fake_session.exit_status_ready.return_value = True self.fake_session.recv_exit_status.return_value = 0 self.ssh = sshutils.SSH("admin", "example.net") self.ssh._get_client = mock.Mock(return_value=self.fake_client) @mock.patch("rally.common.sshutils.select") def test_execute(self, mock_select): mock_select.select.return_value = ([], [], []) self.fake_session.recv_ready.side_effect = [1, 0, 0] self.fake_session.recv_stderr_ready.side_effect = [1, 0] self.fake_session.recv.return_value = "ok" self.fake_session.recv_stderr.return_value = "error" self.fake_session.exit_status_ready.return_value = 1 self.fake_session.recv_exit_status.return_value = 127 self.assertEqual((127, "ok", "error"), self.ssh.execute("cmd")) self.fake_session.exec_command.assert_called_once_with("cmd") @mock.patch("rally.common.sshutils.select") def test_execute_args(self, mock_select): mock_select.select.return_value = ([], [], []) self.fake_session.recv_ready.side_effect = [1, 0, 0] self.fake_session.recv_stderr_ready.side_effect = [1, 0] self.fake_session.recv.return_value = "ok" self.fake_session.recv_stderr.return_value = "error" self.fake_session.exit_status_ready.return_value = 1 self.fake_session.recv_exit_status.return_value = 127 result = self.ssh.execute(["cmd", "arg1", "arg2 with space"]) self.assertEqual((127, "ok", "error"), result) self.fake_session.exec_command.assert_called_once_with( "cmd arg1 'arg2 with space'") @mock.patch("rally.common.sshutils.select") def test_run(self, mock_select): mock_select.select.return_value = ([], [], []) self.assertEqual(0, self.ssh.run("cmd")) @mock.patch("rally.common.sshutils.select") def test_run_nonzero_status(self, mock_select): mock_select.select.return_value = ([], [], []) self.fake_session.recv_exit_status.return_value = 1 self.assertRaises(sshutils.SSHError, self.ssh.run, "cmd") self.assertEqual(1, self.ssh.run("cmd", raise_on_error=False)) @mock.patch("rally.common.sshutils.select") def test_run_stdout(self, mock_select): mock_select.select.return_value = ([], [], []) self.fake_session.recv_ready.side_effect = [True, True, False] self.fake_session.recv.side_effect = ["ok1", "ok2"] stdout = mock.Mock() self.ssh.run("cmd", stdout=stdout) self.assertEqual([mock.call("ok1"), mock.call("ok2")], stdout.write.mock_calls) @mock.patch("rally.common.sshutils.select") def test_run_stderr(self, mock_select): mock_select.select.return_value = ([], [], []) self.fake_session.recv_stderr_ready.side_effect = [True, False] self.fake_session.recv_stderr.return_value = "error" stderr = mock.Mock() self.ssh.run("cmd", stderr=stderr) stderr.write.assert_called_once_with("error") @mock.patch("rally.common.sshutils.select") def test_run_stdin(self, mock_select): """Test run method with stdin. Third send call was called with "e2" because only 3 bytes was sent by second call. So remainig 2 bytes of "line2" was sent by third call. """ mock_select.select.return_value = ([], [], []) self.fake_session.exit_status_ready.side_effect = [0, 0, 0, True] self.fake_session.send_ready.return_value = True self.fake_session.send.side_effect = [5, 3, 2] fake_stdin = mock.Mock() fake_stdin.read.side_effect = ["line1", "line2", ""] fake_stdin.closed = False def close(): fake_stdin.closed = True fake_stdin.close = mock.Mock(side_effect=close) self.ssh.run("cmd", stdin=fake_stdin) call = mock.call send_calls = [call("line1"), call("line2"), call("e2")] self.assertEqual(send_calls, self.fake_session.send.mock_calls) @mock.patch("rally.common.sshutils.select") def test_run_select_error(self, mock_select): self.fake_session.exit_status_ready.return_value = False mock_select.select.return_value = ([], [], [True]) self.assertRaises(sshutils.SSHError, self.ssh.run, "cmd") @mock.patch("rally.common.sshutils.time") @mock.patch("rally.common.sshutils.select") def test_run_timemout(self, mock_select, mock_time): mock_time.time.side_effect = [1, 3700] mock_select.select.return_value = ([], [], []) self.fake_session.exit_status_ready.return_value = False self.assertRaises(sshutils.SSHTimeout, self.ssh.run, "cmd") @mock.patch("rally.common.sshutils.open", create=True) def test__put_file_shell(self, mock_open): self.ssh.run = mock.Mock() self.ssh._put_file_shell("localfile", "remotefile", 0o42) self.ssh.run.assert_called_once_with( "cat > remotefile; chmod 042 remotefile", stdin=mock_open.return_value.__enter__.return_value) @mock.patch("rally.common.sshutils.os.stat") def test__put_file_sftp(self, mock_stat): sftp = self.fake_client.open_sftp.return_value = mock.MagicMock() sftp.__enter__.return_value = sftp mock_stat.return_value = os.stat_result([0o753] + [0] * 9) self.ssh._put_file_sftp("localfile", "remotefile") sftp.put.assert_called_once_with("localfile", "remotefile") mock_stat.assert_called_once_with("localfile") sftp.chmod.assert_called_once_with("remotefile", 0o753) sftp.__exit__.assert_called_once_with(None, None, None) def test__put_file_sftp_mode(self): sftp = self.fake_client.open_sftp.return_value = mock.MagicMock() sftp.__enter__.return_value = sftp self.ssh._put_file_sftp("localfile", "remotefile", mode=0o753) sftp.put.assert_called_once_with("localfile", "remotefile") sftp.chmod.assert_called_once_with("remotefile", 0o753) sftp.__exit__.assert_called_once_with(None, None, None) def test_put_file(self): self.ssh._put_file_sftp = mock.Mock( side_effect=sshutils.paramiko.SSHException()) self.ssh._put_file_shell = mock.Mock() self.ssh.put_file("foo", "bar", 42) self.ssh._put_file_sftp.assert_called_once_with("foo", "bar", mode=42) self.ssh._put_file_shell.assert_called_once_with("foo", "bar", mode=42)
ClovisIRex/Snake-django
refs/heads/master
env/lib/python3.6/site-packages/pip/_vendor/requests/status_codes.py
481
# -*- coding: utf-8 -*- from .structures import LookupDict _codes = { # Informational. 100: ('continue',), 101: ('switching_protocols',), 102: ('processing',), 103: ('checkpoint',), 122: ('uri_too_long', 'request_uri_too_long'), 200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'), 201: ('created',), 202: ('accepted',), 203: ('non_authoritative_info', 'non_authoritative_information'), 204: ('no_content',), 205: ('reset_content', 'reset'), 206: ('partial_content', 'partial'), 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'), 208: ('already_reported',), 226: ('im_used',), # Redirection. 300: ('multiple_choices',), 301: ('moved_permanently', 'moved', '\\o-'), 302: ('found',), 303: ('see_other', 'other'), 304: ('not_modified',), 305: ('use_proxy',), 306: ('switch_proxy',), 307: ('temporary_redirect', 'temporary_moved', 'temporary'), 308: ('permanent_redirect', 'resume_incomplete', 'resume',), # These 2 to be removed in 3.0 # Client Error. 400: ('bad_request', 'bad'), 401: ('unauthorized',), 402: ('payment_required', 'payment'), 403: ('forbidden',), 404: ('not_found', '-o-'), 405: ('method_not_allowed', 'not_allowed'), 406: ('not_acceptable',), 407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'), 408: ('request_timeout', 'timeout'), 409: ('conflict',), 410: ('gone',), 411: ('length_required',), 412: ('precondition_failed', 'precondition'), 413: ('request_entity_too_large',), 414: ('request_uri_too_large',), 415: ('unsupported_media_type', 'unsupported_media', 'media_type'), 416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'), 417: ('expectation_failed',), 418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'), 421: ('misdirected_request',), 422: ('unprocessable_entity', 'unprocessable'), 423: ('locked',), 424: ('failed_dependency', 'dependency'), 425: ('unordered_collection', 'unordered'), 426: ('upgrade_required', 'upgrade'), 428: ('precondition_required', 'precondition'), 429: ('too_many_requests', 'too_many'), 431: ('header_fields_too_large', 'fields_too_large'), 444: ('no_response', 'none'), 449: ('retry_with', 'retry'), 450: ('blocked_by_windows_parental_controls', 'parental_controls'), 451: ('unavailable_for_legal_reasons', 'legal_reasons'), 499: ('client_closed_request',), # Server Error. 500: ('internal_server_error', 'server_error', '/o\\', '✗'), 501: ('not_implemented',), 502: ('bad_gateway',), 503: ('service_unavailable', 'unavailable'), 504: ('gateway_timeout',), 505: ('http_version_not_supported', 'http_version'), 506: ('variant_also_negotiates',), 507: ('insufficient_storage',), 509: ('bandwidth_limit_exceeded', 'bandwidth'), 510: ('not_extended',), 511: ('network_authentication_required', 'network_auth', 'network_authentication'), } codes = LookupDict(name='status_codes') for code, titles in _codes.items(): for title in titles: setattr(codes, title, code) if not title.startswith('\\'): setattr(codes, title.upper(), code)
jalavik/invenio
refs/heads/master
invenio/modules/pidstore/views.py
5
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2014, 2015 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Persistent identifier store views.""" from __future__ import absolute_import, unicode_literals from flask import Blueprint from .models import PersistentIdentifier blueprint = Blueprint( 'pidstore', __name__, ) # # Template filters # @blueprint.app_template_filter('pid_exists') def pid_exists(value, pidtype="doi"): """Check if a persistent identifier exists.""" return PersistentIdentifier.get(pidtype, value) is not None @blueprint.app_template_filter('doi_link') def doi_link(value): """Convert DOI to a link.""" return """<a href="http://dx.doi.org/%(doi)s">%(doi)s</a>""" % dict( doi=value )
carlini/cleverhans
refs/heads/master
cleverhans/__init__.py
1
"""The CleverHans adversarial example library""" from cleverhans.devtools.version import append_dev_version # If possible attach a hex digest to the version string to keep track of # changes in the development branch __version__ = append_dev_version('2.0.0')
nypl-spacetime/oldnyc
refs/heads/master
nyc/expand-pickle.py
2
#!/usr/bin/python '''Split single image records in the pickle file into multiple records. Each photo extracted from the original image gets its own record in the new pickle.''' import os, sys parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0,parentdir) import cPickle import copy import record import json from collections import defaultdict assert len(sys.argv) == 4, 'Usage: %s records.pickle photos.json photos.pickle' _, in_pickle, photos_json, out_pickle = sys.argv rs = record.AllRecords(in_pickle) expansions = json.load(file(photos_json)) f = file(out_pickle, "w") p = cPickle.Pickler(f, 2) skipped = 0 num_images, num_photos = 0, 0 for idx, r in enumerate(rs): digital_id = r.photo_id() image_file = '%s.jpg' % digital_id if image_file not in expansions: skipped += 1 continue num_images += 1 if len(expansions[image_file]) == 0: r.thumbnail_url = image_file r.photo_url = image_file p.dump(r) num_photos += 1 else: for photo_file in expansions[image_file].keys(): new_r = copy.deepcopy(r) new_id, _ = os.path.splitext(photo_file) new_r.tabular['i'] = [new_id] new_r.thumbnail_url = photo_file new_r.photo_url = photo_file p.dump(new_r) num_photos += 1 if num_photos % 1000 == 0: sys.stderr.write('Dumped %d images / %d photos\n' % (num_images, num_photos)) sys.stderr.write('Skipped %d records\n' % skipped)
jelugbo/ddi
refs/heads/master
common/lib/capa/capa/safe_exec/lazymod.py
68
"""A module proxy for delayed importing of modules. From http://barnesc.blogspot.com/2006/06/automatic-python-imports-with-autoimp.html, in the public domain. """ import sys class LazyModule(object): """A lazy module proxy.""" def __init__(self, modname): self.__dict__['__name__'] = modname self._set_mod(None) def _set_mod(self, mod): if mod is not None: self.__dict__ = mod.__dict__ self.__dict__['_lazymod_mod'] = mod def _load_mod(self): __import__(self.__name__) self._set_mod(sys.modules[self.__name__]) def __getattr__(self, name): if self.__dict__['_lazymod_mod'] is None: self._load_mod() mod = self.__dict__['_lazymod_mod'] if hasattr(mod, name): return getattr(mod, name) else: try: subname = '%s.%s' % (self.__name__, name) __import__(subname) submod = getattr(mod, name) except ImportError: raise AttributeError("'module' object has no attribute %r" % name) self.__dict__[name] = LazyModule(subname, submod) return self.__dict__[name]
nkmk/python-snippets
refs/heads/master
notebook/numpy_tile.py
1
import numpy as np a = np.array([0, 1, 2, 3]) print(np.tile(a, 2)) # [0 1 2 3 0 1 2 3] print(np.tile(a, (3, 2))) # [[0 1 2 3 0 1 2 3] # [0 1 2 3 0 1 2 3] # [0 1 2 3 0 1 2 3]] print(np.tile(a, (2, 1))) # [[0 1 2 3] # [0 1 2 3]] a = np.array([[11, 12], [21, 22]]) print(np.tile(a, 2)) # [[11 12 11 12] # [21 22 21 22]] print(np.tile(a, (3, 2))) # [[11 12 11 12] # [21 22 21 22] # [11 12 11 12] # [21 22 21 22] # [11 12 11 12] # [21 22 21 22]] print(np.tile(a, (2, 1))) # [[11 12] # [21 22] # [11 12] # [21 22]]
gVallverdu/pymatgen
refs/heads/master
pymatgen/analysis/chemenv/__init__.py
132
__author__ = 'waroquiers'
mollstam/UnrealPy
refs/heads/master
UnrealPyEmbed/Source/Python/Lib/python27/lib-tk/test/test_tkinter/test_text.py
40
import unittest import Tkinter as tkinter from test.test_support import requires, run_unittest from test_ttk.support import AbstractTkTest requires('gui') class TextTest(AbstractTkTest, unittest.TestCase): def setUp(self): super(TextTest, self).setUp() self.text = tkinter.Text(self.root) def test_debug(self): text = self.text olddebug = text.debug() try: text.debug(0) self.assertEqual(text.debug(), 0) text.debug(1) self.assertEqual(text.debug(), 1) finally: text.debug(olddebug) self.assertEqual(text.debug(), olddebug) def test_search(self): text = self.text # pattern and index are obligatory arguments. self.assertRaises(tkinter.TclError, text.search, None, '1.0') self.assertRaises(tkinter.TclError, text.search, 'a', None) self.assertRaises(tkinter.TclError, text.search, None, None) # Invalid text index. self.assertRaises(tkinter.TclError, text.search, '', 0) # Check if we are getting the indices as strings -- you are likely # to get Tcl_Obj under Tk 8.5 if Tkinter doesn't convert it. text.insert('1.0', 'hi-test') self.assertEqual(text.search('-test', '1.0', 'end'), '1.2') self.assertEqual(text.search('test', '1.0', 'end'), '1.3') tests_gui = (TextTest, ) if __name__ == "__main__": run_unittest(*tests_gui)
giobauermeister/openembedded
refs/heads/master
recipes/python/python-pyyaml/setup.py
69
NAME = 'PyYAML' VERSION = '3.06' DESCRIPTION = "YAML parser and emitter for Python" LONG_DESCRIPTION = """\ YAML is a data serialization format designed for human readability and interaction with scripting languages. PyYAML is a YAML parser and emitter for Python. PyYAML features a complete YAML 1.1 parser, Unicode support, pickle support, capable extension API, and sensible error messages. PyYAML supports standard YAML tags and provides Python-specific tags that allow to represent an arbitrary Python object. PyYAML is applicable for a broad range of tasks from complex configuration files to object serialization and persistance.""" AUTHOR = "Kirill Simonov" AUTHOR_EMAIL = 'xi@resolvent.net' LICENSE = "MIT" PLATFORMS = "Any" URL = "http://pyyaml.org/wiki/PyYAML" DOWNLOAD_URL = "http://pyyaml.org/download/pyyaml/%s-%s.tar.gz" % (NAME, VERSION) CLASSIFIERS = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup", ] from distutils.core import setup from distutils.extension import Extension from Cython.Distutils import build_ext import sys, os.path if __name__ == '__main__': setup( name=NAME, version=VERSION, description=DESCRIPTION, long_description=LONG_DESCRIPTION, author=AUTHOR, author_email=AUTHOR_EMAIL, license=LICENSE, platforms=PLATFORMS, url=URL, download_url=DOWNLOAD_URL, classifiers=CLASSIFIERS, package_dir={'': 'lib'}, packages=['yaml'], ext_modules = [ Extension( "_yaml", ["ext/_yaml.pyx"], libraries = ["yaml"] ) ], cmdclass={ 'build_ext': build_ext, }, )
nuagenetworks/tempest
refs/heads/master
tempest/api/compute/keypairs/__init__.py
12133432
willemneal/Docky
refs/heads/master
lib/pygments/styles/vim.py
135
# -*- coding: utf-8 -*- """ pygments.styles.vim ~~~~~~~~~~~~~~~~~~~ A highlighting style for Pygments, inspired by vim. :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic, Whitespace, Token class VimStyle(Style): """ Styles somewhat like vim 7.0 """ background_color = "#000000" highlight_color = "#222222" default_style = "#cccccc" styles = { Token: "#cccccc", Whitespace: "", Comment: "#000080", Comment.Preproc: "", Comment.Special: "bold #cd0000", Keyword: "#cdcd00", Keyword.Declaration: "#00cd00", Keyword.Namespace: "#cd00cd", Keyword.Pseudo: "", Keyword.Type: "#00cd00", Operator: "#3399cc", Operator.Word: "#cdcd00", Name: "", Name.Class: "#00cdcd", Name.Builtin: "#cd00cd", Name.Exception: "bold #666699", Name.Variable: "#00cdcd", String: "#cd0000", Number: "#cd00cd", Generic.Heading: "bold #000080", Generic.Subheading: "bold #800080", Generic.Deleted: "#cd0000", Generic.Inserted: "#00cd00", Generic.Error: "#FF0000", Generic.Emph: "italic", Generic.Strong: "bold", Generic.Prompt: "bold #000080", Generic.Output: "#888", Generic.Traceback: "#04D", Error: "border:#FF0000" }
bennylope/ciafactbook
refs/heads/master
countries/tests.py
6666
""" This file demonstrates writing tests using the unittest module. These will pass when you run "manage.py test". Replace this with more appropriate tests for your application. """ from django.test import TestCase class SimpleTest(TestCase): def test_basic_addition(self): """ Tests that 1 + 1 always equals 2. """ self.assertEqual(1 + 1, 2)
ngokevin/zamboni
refs/heads/master
lib/es/__init__.py
12133432
av8ramit/tensorflow
refs/heads/master
tensorflow/python/ops/distributions/__init__.py
12133432
zakuro9715/lettuce
refs/heads/master
tests/integration/lib/Django-1.2.5/django/conf/locale/mk/__init__.py
12133432
jwren/intellij-community
refs/heads/master
python/testData/completion/importInMiddleOfHierarchy/xyzzy/__init__.py
12133432
KellyChan/Python
refs/heads/master
javascript/backbone/backbone-templates/backbone-fileupload/venvs/lib/python2.7/site-packages/django/conf/locale/ml/__init__.py
12133432
sparrow629/Lofter-image-Crawler
refs/heads/master
TestFile/foldername.py
1
#!/usr/bin/python import re import urllib import os def getHtml(url): page = urllib.urlopen(url) html = page.read() return html def foldername(html,url): reg_month = r'<div class="month">(.*?)</div>' month = re.compile(reg_month) time_month = re.findall(month, html) reg_date = r'<div class="date">(.*?)</div>' date = re.compile(reg_date) time_date = re.findall(date, html) reg_year = r'<div class="year">(.*?)</div>' year = re.compile(reg_year) time_year = re.findall(year, html) postfix = '.lofter.com' blogname = url[7:url.index(postfix)] # print blogname # print time_month # print time_date # print time_year foldername = blogname +"-" + time_year[0] +"-"+ time_month[0] +"-"+ time_date[0] imgpath = r'/Users/sparrow/Desktop/demo/jpgcrawler/%s' % (foldername) if not os.path.exists(imgpath): os.makedirs(imgpath) return imgpath html = getHtml("http://sexvvip.lofter.com/?page=2&t=1467794880000") print foldername(html, "http://sexvvip.lofter.com/?page=2&t=1467794880000")
1st/django
refs/heads/master
tests/i18n/other/locale/__init__.py
12133432
hihacoder/codestyle
refs/heads/master
csc/core/__init__.py
12133432
jbeich/Aquaria
refs/heads/warnings
ExternalLibs/freetype2/src/tools/docmaker/tohtml.py
395
# ToHTML (c) 2002, 2003, 2005, 2006, 2007, 2008 # David Turner <david@freetype.org> from sources import * from content import * from formatter import * import time # The following defines the HTML header used by all generated pages. html_header_1 = """\ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <title>\ """ html_header_2 = """\ API Reference</title> <style type="text/css"> body { font-family: Verdana, Geneva, Arial, Helvetica, serif; color: #000000; background: #FFFFFF; } p { text-align: justify; } h1 { text-align: center; } li { text-align: justify; } td { padding: 0 0.5em 0 0.5em; } td.left { padding: 0 0.5em 0 0.5em; text-align: left; } a:link { color: #0000EF; } a:visited { color: #51188E; } a:hover { color: #FF0000; } span.keyword { font-family: monospace; text-align: left; white-space: pre; color: darkblue; } pre.colored { color: blue; } ul.empty { list-style-type: none; } </style> </head> <body> """ html_header_3 = """ <table align=center><tr><td><font size=-1>[<a href="\ """ html_header_3i = """ <table align=center><tr><td width="100%"></td> <td><font size=-1>[<a href="\ """ html_header_4 = """\ ">Index</a>]</font></td> <td width="100%"></td> <td><font size=-1>[<a href="\ """ html_header_5 = """\ ">TOC</a>]</font></td></tr></table> <center><h1>\ """ html_header_5t = """\ ">Index</a>]</font></td> <td width="100%"></td></tr></table> <center><h1>\ """ html_header_6 = """\ API Reference</h1></center> """ # The HTML footer used by all generated pages. html_footer = """\ </body> </html>\ """ # The header and footer used for each section. section_title_header = "<center><h1>" section_title_footer = "</h1></center>" # The header and footer used for code segments. code_header = '<pre class="colored">' code_footer = '</pre>' # Paragraph header and footer. para_header = "<p>" para_footer = "</p>" # Block header and footer. block_header = '<table align=center width="75%"><tr><td>' block_footer_start = """\ </td></tr></table> <hr width="75%"> <table align=center width="75%"><tr><td><font size=-2>[<a href="\ """ block_footer_middle = """\ ">Index</a>]</font></td> <td width="100%"></td> <td><font size=-2>[<a href="\ """ block_footer_end = """\ ">TOC</a>]</font></td></tr></table> """ # Description header/footer. description_header = '<table align=center width="87%"><tr><td>' description_footer = "</td></tr></table><br>" # Marker header/inter/footer combination. marker_header = '<table align=center width="87%" cellpadding=5><tr bgcolor="#EEEEFF"><td><em><b>' marker_inter = "</b></em></td></tr><tr><td>" marker_footer = "</td></tr></table>" # Header location header/footer. header_location_header = '<table align=center width="87%"><tr><td>' header_location_footer = "</td></tr></table><br>" # Source code extracts header/footer. source_header = '<table align=center width="87%"><tr bgcolor="#D6E8FF"><td><pre>\n' source_footer = "\n</pre></table><br>" # Chapter header/inter/footer. chapter_header = '<br><table align=center width="75%"><tr><td><h2>' chapter_inter = '</h2><ul class="empty"><li>' chapter_footer = '</li></ul></td></tr></table>' # Index footer. index_footer_start = """\ <hr> <table><tr><td width="100%"></td> <td><font size=-2>[<a href="\ """ index_footer_end = """\ ">TOC</a>]</font></td></tr></table> """ # TOC footer. toc_footer_start = """\ <hr> <table><tr><td><font size=-2>[<a href="\ """ toc_footer_end = """\ ">Index</a>]</font></td> <td width="100%"></td> </tr></table> """ # source language keyword coloration/styling keyword_prefix = '<span class="keyword">' keyword_suffix = '</span>' section_synopsis_header = '<h2>Synopsis</h2>' section_synopsis_footer = '' # Translate a single line of source to HTML. This will convert # a "<" into "&lt.", ">" into "&gt.", etc. def html_quote( line ): result = string.replace( line, "&", "&amp;" ) result = string.replace( result, "<", "&lt;" ) result = string.replace( result, ">", "&gt;" ) return result # same as 'html_quote', but ignores left and right brackets def html_quote0( line ): return string.replace( line, "&", "&amp;" ) def dump_html_code( lines, prefix = "" ): # clean the last empty lines l = len( self.lines ) while l > 0 and string.strip( self.lines[l - 1] ) == "": l = l - 1 # The code footer should be directly appended to the last code # line to avoid an additional blank line. print prefix + code_header, for line in self.lines[0 : l + 1]: print '\n' + prefix + html_quote( line ), print prefix + code_footer, class HtmlFormatter( Formatter ): def __init__( self, processor, project_title, file_prefix ): Formatter.__init__( self, processor ) global html_header_1, html_header_2, html_header_3 global html_header_4, html_header_5, html_footer if file_prefix: file_prefix = file_prefix + "-" else: file_prefix = "" self.headers = processor.headers self.project_title = project_title self.file_prefix = file_prefix self.html_header = html_header_1 + project_title + \ html_header_2 + \ html_header_3 + file_prefix + "index.html" + \ html_header_4 + file_prefix + "toc.html" + \ html_header_5 + project_title + \ html_header_6 self.html_index_header = html_header_1 + project_title + \ html_header_2 + \ html_header_3i + file_prefix + "toc.html" + \ html_header_5 + project_title + \ html_header_6 self.html_toc_header = html_header_1 + project_title + \ html_header_2 + \ html_header_3 + file_prefix + "index.html" + \ html_header_5t + project_title + \ html_header_6 self.html_footer = "<center><font size=""-2"">generated on " + \ time.asctime( time.localtime( time.time() ) ) + \ "</font></center>" + html_footer self.columns = 3 def make_section_url( self, section ): return self.file_prefix + section.name + ".html" def make_block_url( self, block ): return self.make_section_url( block.section ) + "#" + block.name def make_html_words( self, words ): """ convert a series of simple words into some HTML text """ line = "" if words: line = html_quote( words[0] ) for w in words[1:]: line = line + " " + html_quote( w ) return line def make_html_word( self, word ): """analyze a simple word to detect cross-references and styling""" # look for cross-references m = re_crossref.match( word ) if m: try: name = m.group( 1 ) rest = m.group( 2 ) block = self.identifiers[name] url = self.make_block_url( block ) return '<a href="' + url + '">' + name + '</a>' + rest except: # we detected a cross-reference to an unknown item sys.stderr.write( \ "WARNING: undefined cross reference '" + name + "'.\n" ) return '?' + name + '?' + rest # look for italics and bolds m = re_italic.match( word ) if m: name = m.group( 1 ) rest = m.group( 3 ) return '<i>' + name + '</i>' + rest m = re_bold.match( word ) if m: name = m.group( 1 ) rest = m.group( 3 ) return '<b>' + name + '</b>' + rest return html_quote( word ) def make_html_para( self, words ): """ convert words of a paragraph into tagged HTML text, handle xrefs """ line = "" if words: line = self.make_html_word( words[0] ) for word in words[1:]: line = line + " " + self.make_html_word( word ) # convert `...' quotations into real left and right single quotes line = re.sub( r"(^|\W)`(.*?)'(\W|$)", \ r'\1&lsquo;\2&rsquo;\3', \ line ) # convert tilde into non-breakable space line = string.replace( line, "~", "&nbsp;" ) return para_header + line + para_footer def make_html_code( self, lines ): """ convert a code sequence to HTML """ line = code_header + '\n' for l in lines: line = line + html_quote( l ) + '\n' return line + code_footer def make_html_items( self, items ): """ convert a field's content into some valid HTML """ lines = [] for item in items: if item.lines: lines.append( self.make_html_code( item.lines ) ) else: lines.append( self.make_html_para( item.words ) ) return string.join( lines, '\n' ) def print_html_items( self, items ): print self.make_html_items( items ) def print_html_field( self, field ): if field.name: print "<table><tr valign=top><td><b>" + field.name + "</b></td><td>" print self.make_html_items( field.items ) if field.name: print "</td></tr></table>" def html_source_quote( self, line, block_name = None ): result = "" while line: m = re_source_crossref.match( line ) if m: name = m.group( 2 ) prefix = html_quote( m.group( 1 ) ) length = len( m.group( 0 ) ) if name == block_name: # this is the current block name, if any result = result + prefix + '<b>' + name + '</b>' elif re_source_keywords.match( name ): # this is a C keyword result = result + prefix + keyword_prefix + name + keyword_suffix elif self.identifiers.has_key( name ): # this is a known identifier block = self.identifiers[name] result = result + prefix + '<a href="' + \ self.make_block_url( block ) + '">' + name + '</a>' else: result = result + html_quote( line[:length] ) line = line[length:] else: result = result + html_quote( line ) line = [] return result def print_html_field_list( self, fields ): print "<p></p>" print "<table cellpadding=3 border=0>" for field in fields: if len( field.name ) > 22: print "<tr valign=top><td colspan=0><b>" + field.name + "</b></td></tr>" print "<tr valign=top><td></td><td>" else: print "<tr valign=top><td><b>" + field.name + "</b></td><td>" self.print_html_items( field.items ) print "</td></tr>" print "</table>" def print_html_markup( self, markup ): table_fields = [] for field in markup.fields: if field.name: # we begin a new series of field or value definitions, we # will record them in the 'table_fields' list before outputting # all of them as a single table # table_fields.append( field ) else: if table_fields: self.print_html_field_list( table_fields ) table_fields = [] self.print_html_items( field.items ) if table_fields: self.print_html_field_list( table_fields ) # # Formatting the index # def index_enter( self ): print self.html_index_header self.index_items = {} def index_name_enter( self, name ): block = self.identifiers[name] url = self.make_block_url( block ) self.index_items[name] = url def index_exit( self ): # block_index already contains the sorted list of index names count = len( self.block_index ) rows = ( count + self.columns - 1 ) / self.columns print "<table align=center border=0 cellpadding=0 cellspacing=0>" for r in range( rows ): line = "<tr>" for c in range( self.columns ): i = r + c * rows if i < count: bname = self.block_index[r + c * rows] url = self.index_items[bname] line = line + '<td><a href="' + url + '">' + bname + '</a></td>' else: line = line + '<td></td>' line = line + "</tr>" print line print "</table>" print index_footer_start + \ self.file_prefix + "toc.html" + \ index_footer_end print self.html_footer self.index_items = {} def index_dump( self, index_filename = None ): if index_filename == None: index_filename = self.file_prefix + "index.html" Formatter.index_dump( self, index_filename ) # # Formatting the table of content # def toc_enter( self ): print self.html_toc_header print "<center><h1>Table of Contents</h1></center>" def toc_chapter_enter( self, chapter ): print chapter_header + string.join( chapter.title ) + chapter_inter print "<table cellpadding=5>" def toc_section_enter( self, section ): print '<tr valign=top><td class="left">' print '<a href="' + self.make_section_url( section ) + '">' + \ section.title + '</a></td><td>' print self.make_html_para( section.abstract ) def toc_section_exit( self, section ): print "</td></tr>" def toc_chapter_exit( self, chapter ): print "</table>" print chapter_footer def toc_index( self, index_filename ): print chapter_header + \ '<a href="' + index_filename + '">Global Index</a>' + \ chapter_inter + chapter_footer def toc_exit( self ): print toc_footer_start + \ self.file_prefix + "index.html" + \ toc_footer_end print self.html_footer def toc_dump( self, toc_filename = None, index_filename = None ): if toc_filename == None: toc_filename = self.file_prefix + "toc.html" if index_filename == None: index_filename = self.file_prefix + "index.html" Formatter.toc_dump( self, toc_filename, index_filename ) # # Formatting sections # def section_enter( self, section ): print self.html_header print section_title_header print section.title print section_title_footer maxwidth = 0 for b in section.blocks.values(): if len( b.name ) > maxwidth: maxwidth = len( b.name ) width = 70 # XXX magic number if maxwidth <> 0: # print section synopsis print section_synopsis_header print "<table align=center cellspacing=5 cellpadding=0 border=0>" columns = width / maxwidth if columns < 1: columns = 1 count = len( section.block_names ) rows = ( count + columns - 1 ) / columns for r in range( rows ): line = "<tr>" for c in range( columns ): i = r + c * rows line = line + '<td></td><td>' if i < count: name = section.block_names[i] line = line + '<a href="#' + name + '">' + name + '</a>' line = line + '</td>' line = line + "</tr>" print line print "</table><br><br>" print section_synopsis_footer print description_header print self.make_html_items( section.description ) print description_footer def block_enter( self, block ): print block_header # place html anchor if needed if block.name: print '<h4><a name="' + block.name + '">' + block.name + '</a></h4>' # dump the block C source lines now if block.code: header = '' for f in self.headers.keys(): if block.source.filename.find( f ) >= 0: header = self.headers[f] + ' (' + f + ')' break; # if not header: # sys.stderr.write( \ # 'WARNING: No header macro for ' + block.source.filename + '.\n' ) if header: print header_location_header print 'Defined in ' + header + '.' print header_location_footer print source_header for l in block.code: print self.html_source_quote( l, block.name ) print source_footer def markup_enter( self, markup, block ): if markup.tag == "description": print description_header else: print marker_header + markup.tag + marker_inter self.print_html_markup( markup ) def markup_exit( self, markup, block ): if markup.tag == "description": print description_footer else: print marker_footer def block_exit( self, block ): print block_footer_start + self.file_prefix + "index.html" + \ block_footer_middle + self.file_prefix + "toc.html" + \ block_footer_end def section_exit( self, section ): print html_footer def section_dump_all( self ): for section in self.sections: self.section_dump( section, self.file_prefix + section.name + '.html' ) # eof
louietsai/python-for-android
refs/heads/master
python-build/python-libs/gdata/build/lib/gdata/data.py
133
#!/usr/bin/env python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This module is used for version 2 of the Google Data APIs. """Provides classes and constants for the XML in the Google Data namespace. Documentation for the raw XML which these classes represent can be found here: http://code.google.com/apis/gdata/docs/2.0/elements.html """ __author__ = 'j.s@google.com (Jeff Scudder)' import atom.core import atom.data GDATA_TEMPLATE = '{http://schemas.google.com/g/2005}%s' OPENSEARCH_TEMPLATE = '{http://a9.com/-/spec/opensearchrss/1.0/}%s' BATCH_TEMPLATE = '{http://schemas.google.com/gdata/batch}%s' # Labels used in batch request entries to specify the desired CRUD operation. BATCH_INSERT = 'insert' BATCH_UPDATE = 'update' BATCH_DELETE = 'delete' BATCH_QUERY = 'query' EVENT_LOCATION = 'http://schemas.google.com/g/2005#event' ALTERNATE_LOCATION = 'http://schemas.google.com/g/2005#event.alternate' PARKING_LOCATION = 'http://schemas.google.com/g/2005#event.parking' CANCELED_EVENT = 'http://schemas.google.com/g/2005#event.canceled' CONFIRMED_EVENT = 'http://schemas.google.com/g/2005#event.confirmed' TENTATIVE_EVENT = 'http://schemas.google.com/g/2005#event.tentative' CONFIDENTIAL_EVENT = 'http://schemas.google.com/g/2005#event.confidential' DEFAULT_EVENT = 'http://schemas.google.com/g/2005#event.default' PRIVATE_EVENT = 'http://schemas.google.com/g/2005#event.private' PUBLIC_EVENT = 'http://schemas.google.com/g/2005#event.public' OPAQUE_EVENT = 'http://schemas.google.com/g/2005#event.opaque' TRANSPARENT_EVENT = 'http://schemas.google.com/g/2005#event.transparent' CHAT_MESSAGE = 'http://schemas.google.com/g/2005#message.chat' INBOX_MESSAGE = 'http://schemas.google.com/g/2005#message.inbox' SENT_MESSAGE = 'http://schemas.google.com/g/2005#message.sent' SPAM_MESSAGE = 'http://schemas.google.com/g/2005#message.spam' STARRED_MESSAGE = 'http://schemas.google.com/g/2005#message.starred' UNREAD_MESSAGE = 'http://schemas.google.com/g/2005#message.unread' BCC_RECIPIENT = 'http://schemas.google.com/g/2005#message.bcc' CC_RECIPIENT = 'http://schemas.google.com/g/2005#message.cc' SENDER = 'http://schemas.google.com/g/2005#message.from' REPLY_TO = 'http://schemas.google.com/g/2005#message.reply-to' TO_RECIPIENT = 'http://schemas.google.com/g/2005#message.to' ASSISTANT_REL = 'http://schemas.google.com/g/2005#assistant' CALLBACK_REL = 'http://schemas.google.com/g/2005#callback' CAR_REL = 'http://schemas.google.com/g/2005#car' COMPANY_MAIN_REL = 'http://schemas.google.com/g/2005#company_main' FAX_REL = 'http://schemas.google.com/g/2005#fax' HOME_REL = 'http://schemas.google.com/g/2005#home' HOME_FAX_REL = 'http://schemas.google.com/g/2005#home_fax' ISDN_REL = 'http://schemas.google.com/g/2005#isdn' MAIN_REL = 'http://schemas.google.com/g/2005#main' MOBILE_REL = 'http://schemas.google.com/g/2005#mobile' OTHER_REL = 'http://schemas.google.com/g/2005#other' OTHER_FAX_REL = 'http://schemas.google.com/g/2005#other_fax' PAGER_REL = 'http://schemas.google.com/g/2005#pager' RADIO_REL = 'http://schemas.google.com/g/2005#radio' TELEX_REL = 'http://schemas.google.com/g/2005#telex' TTL_TDD_REL = 'http://schemas.google.com/g/2005#tty_tdd' WORK_REL = 'http://schemas.google.com/g/2005#work' WORK_FAX_REL = 'http://schemas.google.com/g/2005#work_fax' WORK_MOBILE_REL = 'http://schemas.google.com/g/2005#work_mobile' WORK_PAGER_REL = 'http://schemas.google.com/g/2005#work_pager' NETMEETING_REL = 'http://schemas.google.com/g/2005#netmeeting' OVERALL_REL = 'http://schemas.google.com/g/2005#overall' PRICE_REL = 'http://schemas.google.com/g/2005#price' QUALITY_REL = 'http://schemas.google.com/g/2005#quality' EVENT_REL = 'http://schemas.google.com/g/2005#event' EVENT_ALTERNATE_REL = 'http://schemas.google.com/g/2005#event.alternate' EVENT_PARKING_REL = 'http://schemas.google.com/g/2005#event.parking' AIM_PROTOCOL = 'http://schemas.google.com/g/2005#AIM' MSN_PROTOCOL = 'http://schemas.google.com/g/2005#MSN' YAHOO_MESSENGER_PROTOCOL = 'http://schemas.google.com/g/2005#YAHOO' SKYPE_PROTOCOL = 'http://schemas.google.com/g/2005#SKYPE' QQ_PROTOCOL = 'http://schemas.google.com/g/2005#QQ' GOOGLE_TALK_PROTOCOL = 'http://schemas.google.com/g/2005#GOOGLE_TALK' ICQ_PROTOCOL = 'http://schemas.google.com/g/2005#ICQ' JABBER_PROTOCOL = 'http://schemas.google.com/g/2005#JABBER' REGULAR_COMMENTS = 'http://schemas.google.com/g/2005#regular' REVIEW_COMMENTS = 'http://schemas.google.com/g/2005#reviews' MAIL_BOTH = 'http://schemas.google.com/g/2005#both' MAIL_LETTERS = 'http://schemas.google.com/g/2005#letters' MAIL_PARCELS = 'http://schemas.google.com/g/2005#parcels' MAIL_NEITHER = 'http://schemas.google.com/g/2005#neither' GENERAL_ADDRESS = 'http://schemas.google.com/g/2005#general' LOCAL_ADDRESS = 'http://schemas.google.com/g/2005#local' OPTIONAL_ATENDEE = 'http://schemas.google.com/g/2005#event.optional' REQUIRED_ATENDEE = 'http://schemas.google.com/g/2005#event.required' ATTENDEE_ACCEPTED = 'http://schemas.google.com/g/2005#event.accepted' ATTENDEE_DECLINED = 'http://schemas.google.com/g/2005#event.declined' ATTENDEE_INVITED = 'http://schemas.google.com/g/2005#event.invited' ATTENDEE_TENTATIVE = 'http://schemas.google.com/g/2005#event.tentative' class Error(Exception): pass class MissingRequiredParameters(Error): pass class LinkFinder(atom.data.LinkFinder): """Mixin used in Feed and Entry classes to simplify link lookups by type. Provides lookup methods for edit, edit-media, post, ACL and other special links which are common across Google Data APIs. """ def find_html_link(self): """Finds the first link with rel of alternate and type of text/html.""" for link in self.link: if link.rel == 'alternate' and link.type == 'text/html': return link.href return None FindHtmlLink = find_html_link def get_html_link(self): for a_link in self.link: if a_link.rel == 'alternate' and a_link.type == 'text/html': return a_link return None GetHtmlLink = get_html_link def find_post_link(self): """Get the URL to which new entries should be POSTed. The POST target URL is used to insert new entries. Returns: A str for the URL in the link with a rel matching the POST type. """ return self.find_url('http://schemas.google.com/g/2005#post') FindPostLink = find_post_link def get_post_link(self): return self.get_link('http://schemas.google.com/g/2005#post') GetPostLink = get_post_link def find_acl_link(self): return self.find_url( 'http://schemas.google.com/acl/2007#accessControlList') FindAclLink = find_acl_link def get_acl_link(self): return self.get_link( 'http://schemas.google.com/acl/2007#accessControlList') GetAclLink = get_acl_link def find_feed_link(self): return self.find_url('http://schemas.google.com/g/2005#feed') FindFeedLink = find_feed_link def get_feed_link(self): return self.get_link('http://schemas.google.com/g/2005#feed') GetFeedLink = get_feed_link def find_previous_link(self): return self.find_url('previous') FindPreviousLink = find_previous_link def get_previous_link(self): return self.get_link('previous') GetPreviousLink = get_previous_link class TotalResults(atom.core.XmlElement): """opensearch:TotalResults for a GData feed.""" _qname = OPENSEARCH_TEMPLATE % 'totalResults' class StartIndex(atom.core.XmlElement): """The opensearch:startIndex element in GData feed.""" _qname = OPENSEARCH_TEMPLATE % 'startIndex' class ItemsPerPage(atom.core.XmlElement): """The opensearch:itemsPerPage element in GData feed.""" _qname = OPENSEARCH_TEMPLATE % 'itemsPerPage' class ExtendedProperty(atom.core.XmlElement): """The Google Data extendedProperty element. Used to store arbitrary key-value information specific to your application. The value can either be a text string stored as an XML attribute (.value), or an XML node (XmlBlob) as a child element. This element is used in the Google Calendar data API and the Google Contacts data API. """ _qname = GDATA_TEMPLATE % 'extendedProperty' name = 'name' value = 'value' def get_xml_blob(self): """Returns the XML blob as an atom.core.XmlElement. Returns: An XmlElement representing the blob's XML, or None if no blob was set. """ if self._other_elements: return self._other_elements[0] else: return None GetXmlBlob = get_xml_blob def set_xml_blob(self, blob): """Sets the contents of the extendedProperty to XML as a child node. Since the extendedProperty is only allowed one child element as an XML blob, setting the XML blob will erase any preexisting member elements in this object. Args: blob: str or atom.core.XmlElement representing the XML blob stored in the extendedProperty. """ # Erase any existing extension_elements, clears the child nodes from the # extendedProperty. if isinstance(blob, atom.core.XmlElement): self._other_elements = [blob] else: self._other_elements = [atom.core.parse(str(blob))] SetXmlBlob = set_xml_blob class GDEntry(atom.data.Entry, LinkFinder): """Extends Atom Entry to provide data processing""" etag = '{http://schemas.google.com/g/2005}etag' def get_id(self): if self.id is not None and self.id.text is not None: return self.id.text.strip() return None GetId = get_id def is_media(self): if self.find_media_edit_link(): return True return False IsMedia = is_media def find_media_link(self): """Returns the URL to the media content, if the entry is a media entry. Otherwise returns None. """ if self.is_media(): return self.content.src return None FindMediaLink = find_media_link class GDFeed(atom.data.Feed, LinkFinder): """A Feed from a GData service.""" etag = '{http://schemas.google.com/g/2005}etag' total_results = TotalResults start_index = StartIndex items_per_page = ItemsPerPage entry = [GDEntry] def get_id(self): if self.id is not None and self.id.text is not None: return self.id.text.strip() return None GetId = get_id def get_generator(self): if self.generator and self.generator.text: return self.generator.text.strip() return None class BatchId(atom.core.XmlElement): """Identifies a single operation in a batch request.""" _qname = BATCH_TEMPLATE % 'id' class BatchOperation(atom.core.XmlElement): """The CRUD operation which this batch entry represents.""" _qname = BATCH_TEMPLATE % 'operation' type = 'type' class BatchStatus(atom.core.XmlElement): """The batch:status element present in a batch response entry. A status element contains the code (HTTP response code) and reason as elements. In a single request these fields would be part of the HTTP response, but in a batch request each Entry operation has a corresponding Entry in the response feed which includes status information. See http://code.google.com/apis/gdata/batch.html#Handling_Errors """ _qname = BATCH_TEMPLATE % 'status' code = 'code' reason = 'reason' content_type = 'content-type' class BatchEntry(GDEntry): """An atom:entry for use in batch requests. The BatchEntry contains additional members to specify the operation to be performed on this entry and a batch ID so that the server can reference individual operations in the response feed. For more information, see: http://code.google.com/apis/gdata/batch.html """ batch_operation = BatchOperation batch_id = BatchId batch_status = BatchStatus class BatchInterrupted(atom.core.XmlElement): """The batch:interrupted element sent if batch request was interrupted. Only appears in a feed if some of the batch entries could not be processed. See: http://code.google.com/apis/gdata/batch.html#Handling_Errors """ _qname = BATCH_TEMPLATE % 'interrupted' reason = 'reason' success = 'success' failures = 'failures' parsed = 'parsed' class BatchFeed(GDFeed): """A feed containing a list of batch request entries.""" interrupted = BatchInterrupted entry = [BatchEntry] def add_batch_entry(self, entry=None, id_url_string=None, batch_id_string=None, operation_string=None): """Logic for populating members of a BatchEntry and adding to the feed. If the entry is not a BatchEntry, it is converted to a BatchEntry so that the batch specific members will be present. The id_url_string can be used in place of an entry if the batch operation applies to a URL. For example query and delete operations require just the URL of an entry, no body is sent in the HTTP request. If an id_url_string is sent instead of an entry, a BatchEntry is created and added to the feed. This method also assigns the desired batch id to the entry so that it can be referenced in the server's response. If the batch_id_string is None, this method will assign a batch_id to be the index at which this entry will be in the feed's entry list. Args: entry: BatchEntry, atom.data.Entry, or another Entry flavor (optional) The entry which will be sent to the server as part of the batch request. The item must have a valid atom id so that the server knows which entry this request references. id_url_string: str (optional) The URL of the entry to be acted on. You can find this URL in the text member of the atom id for an entry. If an entry is not sent, this id will be used to construct a new BatchEntry which will be added to the request feed. batch_id_string: str (optional) The batch ID to be used to reference this batch operation in the results feed. If this parameter is None, the current length of the feed's entry array will be used as a count. Note that batch_ids should either always be specified or never, mixing could potentially result in duplicate batch ids. operation_string: str (optional) The desired batch operation which will set the batch_operation.type member of the entry. Options are 'insert', 'update', 'delete', and 'query' Raises: MissingRequiredParameters: Raised if neither an id_ url_string nor an entry are provided in the request. Returns: The added entry. """ if entry is None and id_url_string is None: raise MissingRequiredParameters('supply either an entry or URL string') if entry is None and id_url_string is not None: entry = BatchEntry(id=atom.data.Id(text=id_url_string)) if batch_id_string is not None: entry.batch_id = BatchId(text=batch_id_string) elif entry.batch_id is None or entry.batch_id.text is None: entry.batch_id = BatchId(text=str(len(self.entry))) if operation_string is not None: entry.batch_operation = BatchOperation(type=operation_string) self.entry.append(entry) return entry AddBatchEntry = add_batch_entry def add_insert(self, entry, batch_id_string=None): """Add an insert request to the operations in this batch request feed. If the entry doesn't yet have an operation or a batch id, these will be set to the insert operation and a batch_id specified as a parameter. Args: entry: BatchEntry The entry which will be sent in the batch feed as an insert request. batch_id_string: str (optional) The batch ID to be used to reference this batch operation in the results feed. If this parameter is None, the current length of the feed's entry array will be used as a count. Note that batch_ids should either always be specified or never, mixing could potentially result in duplicate batch ids. """ self.add_batch_entry(entry=entry, batch_id_string=batch_id_string, operation_string=BATCH_INSERT) AddInsert = add_insert def add_update(self, entry, batch_id_string=None): """Add an update request to the list of batch operations in this feed. Sets the operation type of the entry to insert if it is not already set and assigns the desired batch id to the entry so that it can be referenced in the server's response. Args: entry: BatchEntry The entry which will be sent to the server as an update (HTTP PUT) request. The item must have a valid atom id so that the server knows which entry to replace. batch_id_string: str (optional) The batch ID to be used to reference this batch operation in the results feed. If this parameter is None, the current length of the feed's entry array will be used as a count. See also comments for AddInsert. """ self.add_batch_entry(entry=entry, batch_id_string=batch_id_string, operation_string=BATCH_UPDATE) AddUpdate = add_update def add_delete(self, url_string=None, entry=None, batch_id_string=None): """Adds a delete request to the batch request feed. This method takes either the url_string which is the atom id of the item to be deleted, or the entry itself. The atom id of the entry must be present so that the server knows which entry should be deleted. Args: url_string: str (optional) The URL of the entry to be deleted. You can find this URL in the text member of the atom id for an entry. entry: BatchEntry (optional) The entry to be deleted. batch_id_string: str (optional) Raises: MissingRequiredParameters: Raised if neither a url_string nor an entry are provided in the request. """ self.add_batch_entry(entry=entry, id_url_string=url_string, batch_id_string=batch_id_string, operation_string=BATCH_DELETE) AddDelete = add_delete def add_query(self, url_string=None, entry=None, batch_id_string=None): """Adds a query request to the batch request feed. This method takes either the url_string which is the query URL whose results will be added to the result feed. The query URL will be encapsulated in a BatchEntry, and you may pass in the BatchEntry with a query URL instead of sending a url_string. Args: url_string: str (optional) entry: BatchEntry (optional) batch_id_string: str (optional) Raises: MissingRequiredParameters """ self.add_batch_entry(entry=entry, id_url_string=url_string, batch_id_string=batch_id_string, operation_string=BATCH_QUERY) AddQuery = add_query def find_batch_link(self): return self.find_url('http://schemas.google.com/g/2005#batch') FindBatchLink = find_batch_link class EntryLink(atom.core.XmlElement): """The gd:entryLink element. Represents a logically nested entry. For example, a <gd:who> representing a contact might have a nested entry from a contact feed. """ _qname = GDATA_TEMPLATE % 'entryLink' entry = GDEntry rel = 'rel' read_only = 'readOnly' href = 'href' class FeedLink(atom.core.XmlElement): """The gd:feedLink element. Represents a logically nested feed. For example, a calendar feed might have a nested feed representing all comments on entries. """ _qname = GDATA_TEMPLATE % 'feedLink' feed = GDFeed rel = 'rel' read_only = 'readOnly' count_hint = 'countHint' href = 'href' class AdditionalName(atom.core.XmlElement): """The gd:additionalName element. Specifies additional (eg. middle) name of the person. Contains an attribute for the phonetic representaton of the name. """ _qname = GDATA_TEMPLATE % 'additionalName' yomi = 'yomi' class Comments(atom.core.XmlElement): """The gd:comments element. Contains a comments feed for the enclosing entry (such as a calendar event). """ _qname = GDATA_TEMPLATE % 'comments' rel = 'rel' feed_link = FeedLink class Country(atom.core.XmlElement): """The gd:country element. Country name along with optional country code. The country code is given in accordance with ISO 3166-1 alpha-2: http://www.iso.org/iso/iso-3166-1_decoding_table """ _qname = GDATA_TEMPLATE % 'country' code = 'code' class EmailImParent(atom.core.XmlElement): address = 'address' label = 'label' rel = 'rel' primary = 'primary' class Email(EmailImParent): """The gd:email element. An email address associated with the containing entity (which is usually an entity representing a person or a location). """ _qname = GDATA_TEMPLATE % 'email' display_name = 'displayName' class FamilyName(atom.core.XmlElement): """The gd:familyName element. Specifies family name of the person, eg. "Smith". """ _qname = GDATA_TEMPLATE % 'familyName' yomi = 'yomi' class Im(EmailImParent): """The gd:im element. An instant messaging address associated with the containing entity. """ _qname = GDATA_TEMPLATE % 'im' protocol = 'protocol' class GivenName(atom.core.XmlElement): """The gd:givenName element. Specifies given name of the person, eg. "John". """ _qname = GDATA_TEMPLATE % 'givenName' yomi = 'yomi' class NamePrefix(atom.core.XmlElement): """The gd:namePrefix element. Honorific prefix, eg. 'Mr' or 'Mrs'. """ _qname = GDATA_TEMPLATE % 'namePrefix' class NameSuffix(atom.core.XmlElement): """The gd:nameSuffix element. Honorific suffix, eg. 'san' or 'III'. """ _qname = GDATA_TEMPLATE % 'nameSuffix' class FullName(atom.core.XmlElement): """The gd:fullName element. Unstructured representation of the name. """ _qname = GDATA_TEMPLATE % 'fullName' class Name(atom.core.XmlElement): """The gd:name element. Allows storing person's name in a structured way. Consists of given name, additional name, family name, prefix, suffix and full name. """ _qname = GDATA_TEMPLATE % 'name' given_name = GivenName additional_name = AdditionalName family_name = FamilyName name_prefix = NamePrefix name_suffix = NameSuffix full_name = FullName class OrgDepartment(atom.core.XmlElement): """The gd:orgDepartment element. Describes a department within an organization. Must appear within a gd:organization element. """ _qname = GDATA_TEMPLATE % 'orgDepartment' class OrgJobDescription(atom.core.XmlElement): """The gd:orgJobDescription element. Describes a job within an organization. Must appear within a gd:organization element. """ _qname = GDATA_TEMPLATE % 'orgJobDescription' class OrgName(atom.core.XmlElement): """The gd:orgName element. The name of the organization. Must appear within a gd:organization element. Contains a Yomigana attribute (Japanese reading aid) for the organization name. """ _qname = GDATA_TEMPLATE % 'orgName' yomi = 'yomi' class OrgSymbol(atom.core.XmlElement): """The gd:orgSymbol element. Provides a symbol of an organization. Must appear within a gd:organization element. """ _qname = GDATA_TEMPLATE % 'orgSymbol' class OrgTitle(atom.core.XmlElement): """The gd:orgTitle element. The title of a person within an organization. Must appear within a gd:organization element. """ _qname = GDATA_TEMPLATE % 'orgTitle' class Organization(atom.core.XmlElement): """The gd:organization element. An organization, typically associated with a contact. """ _qname = GDATA_TEMPLATE % 'organization' label = 'label' primary = 'primary' rel = 'rel' department = OrgDepartment job_description = OrgJobDescription name = OrgName symbol = OrgSymbol title = OrgTitle class When(atom.core.XmlElement): """The gd:when element. Represents a period of time or an instant. """ _qname = GDATA_TEMPLATE % 'when' end = 'endTime' start = 'startTime' value = 'valueString' class OriginalEvent(atom.core.XmlElement): """The gd:originalEvent element. Equivalent to the Recurrence ID property specified in section 4.8.4.4 of RFC 2445. Appears in every instance of a recurring event, to identify the original event. Contains a <gd:when> element specifying the original start time of the instance that has become an exception. """ _qname = GDATA_TEMPLATE % 'originalEvent' id = 'id' href = 'href' when = When class PhoneNumber(atom.core.XmlElement): """The gd:phoneNumber element. A phone number associated with the containing entity (which is usually an entity representing a person or a location). """ _qname = GDATA_TEMPLATE % 'phoneNumber' label = 'label' rel = 'rel' uri = 'uri' primary = 'primary' class PostalAddress(atom.core.XmlElement): """The gd:postalAddress element.""" _qname = GDATA_TEMPLATE % 'postalAddress' label = 'label' rel = 'rel' uri = 'uri' primary = 'primary' class Rating(atom.core.XmlElement): """The gd:rating element. Represents a numeric rating of the enclosing entity, such as a comment. Each rating supplies its own scale, although it may be normalized by a service; for example, some services might convert all ratings to a scale from 1 to 5. """ _qname = GDATA_TEMPLATE % 'rating' average = 'average' max = 'max' min = 'min' num_raters = 'numRaters' rel = 'rel' value = 'value' class Recurrence(atom.core.XmlElement): """The gd:recurrence element. Represents the dates and times when a recurring event takes place. The string that defines the recurrence consists of a set of properties, each of which is defined in the iCalendar standard (RFC 2445). Specifically, the string usually begins with a DTSTART property that indicates the starting time of the first instance of the event, and often a DTEND property or a DURATION property to indicate when the first instance ends. Next come RRULE, RDATE, EXRULE, and/or EXDATE properties, which collectively define a recurring event and its exceptions (but see below). (See section 4.8.5 of RFC 2445 for more information about these recurrence component properties.) Last comes a VTIMEZONE component, providing detailed timezone rules for any timezone ID mentioned in the preceding properties. Google services like Google Calendar don't generally generate EXRULE and EXDATE properties to represent exceptions to recurring events; instead, they generate <gd:recurrenceException> elements. However, Google services may include EXRULE and/or EXDATE properties anyway; for example, users can import events and exceptions into Calendar, and if those imported events contain EXRULE or EXDATE properties, then Calendar will provide those properties when it sends a <gd:recurrence> element. Note the the use of <gd:recurrenceException> means that you can't be sure just from examining a <gd:recurrence> element whether there are any exceptions to the recurrence description. To ensure that you find all exceptions, look for <gd:recurrenceException> elements in the feed, and use their <gd:originalEvent> elements to match them up with <gd:recurrence> elements. """ _qname = GDATA_TEMPLATE % 'recurrence' class RecurrenceException(atom.core.XmlElement): """The gd:recurrenceException element. Represents an event that's an exception to a recurring event-that is, an instance of a recurring event in which one or more aspects of the recurring event (such as attendance list, time, or location) have been changed. Contains a <gd:originalEvent> element that specifies the original recurring event that this event is an exception to. When you change an instance of a recurring event, that instance becomes an exception. Depending on what change you made to it, the exception behaves in either of two different ways when the original recurring event is changed: - If you add, change, or remove comments, attendees, or attendee responses, then the exception remains tied to the original event, and changes to the original event also change the exception. - If you make any other changes to the exception (such as changing the time or location) then the instance becomes "specialized," which means that it's no longer as tightly tied to the original event. If you change the original event, specialized exceptions don't change. But see below. For example, say you have a meeting every Tuesday and Thursday at 2:00 p.m. If you change the attendance list for this Thursday's meeting (but not for the regularly scheduled meeting), then it becomes an exception. If you change the time for this Thursday's meeting (but not for the regularly scheduled meeting), then it becomes specialized. Regardless of whether an exception is specialized or not, if you do something that deletes the instance that the exception was derived from, then the exception is deleted. Note that changing the day or time of a recurring event deletes all instances, and creates new ones. For example, after you've specialized this Thursday's meeting, say you change the recurring meeting to happen on Monday, Wednesday, and Friday. That change deletes all of the recurring instances of the Tuesday/Thursday meeting, including the specialized one. If a particular instance of a recurring event is deleted, then that instance appears as a <gd:recurrenceException> containing a <gd:entryLink> that has its <gd:eventStatus> set to "http://schemas.google.com/g/2005#event.canceled". (For more information about canceled events, see RFC 2445.) """ _qname = GDATA_TEMPLATE % 'recurrenceException' specialized = 'specialized' entry_link = EntryLink original_event = OriginalEvent class Reminder(atom.core.XmlElement): """The gd:reminder element. A time interval, indicating how long before the containing entity's start time or due time attribute a reminder should be issued. Alternatively, may specify an absolute time at which a reminder should be issued. Also specifies a notification method, indicating what medium the system should use to remind the user. """ _qname = GDATA_TEMPLATE % 'reminder' absolute_time = 'absoluteTime' method = 'method' days = 'days' hours = 'hours' minutes = 'minutes' class Agent(atom.core.XmlElement): """The gd:agent element. The agent who actually receives the mail. Used in work addresses. Also for 'in care of' or 'c/o'. """ _qname = GDATA_TEMPLATE % 'agent' class HouseName(atom.core.XmlElement): """The gd:housename element. Used in places where houses or buildings have names (and not necessarily numbers), eg. "The Pillars". """ _qname = GDATA_TEMPLATE % 'housename' class Street(atom.core.XmlElement): """The gd:street element. Can be street, avenue, road, etc. This element also includes the house number and room/apartment/flat/floor number. """ _qname = GDATA_TEMPLATE % 'street' class PoBox(atom.core.XmlElement): """The gd:pobox element. Covers actual P.O. boxes, drawers, locked bags, etc. This is usually but not always mutually exclusive with street. """ _qname = GDATA_TEMPLATE % 'pobox' class Neighborhood(atom.core.XmlElement): """The gd:neighborhood element. This is used to disambiguate a street address when a city contains more than one street with the same name, or to specify a small place whose mail is routed through a larger postal town. In China it could be a county or a minor city. """ _qname = GDATA_TEMPLATE % 'neighborhood' class City(atom.core.XmlElement): """The gd:city element. Can be city, village, town, borough, etc. This is the postal town and not necessarily the place of residence or place of business. """ _qname = GDATA_TEMPLATE % 'city' class Subregion(atom.core.XmlElement): """The gd:subregion element. Handles administrative districts such as U.S. or U.K. counties that are not used for mail addressing purposes. Subregion is not intended for delivery addresses. """ _qname = GDATA_TEMPLATE % 'subregion' class Region(atom.core.XmlElement): """The gd:region element. A state, province, county (in Ireland), Land (in Germany), departement (in France), etc. """ _qname = GDATA_TEMPLATE % 'region' class Postcode(atom.core.XmlElement): """The gd:postcode element. Postal code. Usually country-wide, but sometimes specific to the city (e.g. "2" in "Dublin 2, Ireland" addresses). """ _qname = GDATA_TEMPLATE % 'postcode' class Country(atom.core.XmlElement): """The gd:country element. The name or code of the country. """ _qname = GDATA_TEMPLATE % 'country' class FormattedAddress(atom.core.XmlElement): """The gd:formattedAddress element. The full, unstructured postal address. """ _qname = GDATA_TEMPLATE % 'formattedAddress' class StructuredPostalAddress(atom.core.XmlElement): """The gd:structuredPostalAddress element. Postal address split into components. It allows to store the address in locale independent format. The fields can be interpreted and used to generate formatted, locale dependent address. The following elements reperesent parts of the address: agent, house name, street, P.O. box, neighborhood, city, subregion, region, postal code, country. The subregion element is not used for postal addresses, it is provided for extended uses of addresses only. In order to store postal address in an unstructured form formatted address field is provided. """ _qname = GDATA_TEMPLATE % 'structuredPostalAddress' rel = 'rel' mail_class = 'mailClass' usage = 'usage' label = 'label' primary = 'primary' agent = Agent house_name = HouseName street = Street po_box = PoBox neighborhood = Neighborhood city = City subregion = Subregion region = Region postcode = Postcode country = Country formatted_address = FormattedAddress class Where(atom.core.XmlElement): """The gd:where element. A place (such as an event location) associated with the containing entity. The type of the association is determined by the rel attribute; the details of the location are contained in an embedded or linked-to Contact entry. A <gd:where> element is more general than a <gd:geoPt> element. The former identifies a place using a text description and/or a Contact entry, while the latter identifies a place using a specific geographic location. """ _qname = GDATA_TEMPLATE % 'where' label = 'label' rel = 'rel' value = 'valueString' entry_link = EntryLink class AttendeeType(atom.core.XmlElement): """The gd:attendeeType element.""" _qname = GDATA_TEMPLATE % 'attendeeType' value = 'value' class AttendeeStatus(atom.core.XmlElement): """The gd:attendeeStatus element.""" _qname = GDATA_TEMPLATE % 'attendeeStatus' value = 'value' class Who(atom.core.XmlElement): """The gd:who element. A person associated with the containing entity. The type of the association is determined by the rel attribute; the details about the person are contained in an embedded or linked-to Contact entry. The <gd:who> element can be used to specify email senders and recipients, calendar event organizers, and so on. """ _qname = GDATA_TEMPLATE % 'who' email = 'email' rel = 'rel' value = 'valueString' attendee_status = AttendeeStatus attendee_type = AttendeeType entry_link = EntryLink
dalgr/svtplay-dl
refs/heads/master
lib/svtplay_dl/service/svtplay.py
1
# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- from __future__ import absolute_import import re import os import xml.etree.ElementTree as ET import copy import json import hashlib from svtplay_dl.log import log from svtplay_dl.service import Service, OpenGraphThumbMixin from svtplay_dl.utils import filenamify, is_py2 from svtplay_dl.utils.urllib import urlparse, urljoin, parse_qs from svtplay_dl.fetcher.hds import hdsparse from svtplay_dl.fetcher.hls import hlsparse from svtplay_dl.fetcher.dash import dashparse from svtplay_dl.subtitle import subtitle from svtplay_dl.error import ServiceError class Svtplay(Service, OpenGraphThumbMixin): supported_domains = ['svtplay.se', 'svt.se', 'beta.svtplay.se', 'svtflow.se'] def get(self): parse = urlparse(self.url) if parse.netloc == "www.svtplay.se" or parse.netloc == "svtplay.se": if parse.path[:6] != "/video" and parse.path[:6] != "/klipp": yield ServiceError("This mode is not supported anymore. Need the url with the video.") return query = parse_qs(parse.query) self.access = None if "accessService" in query: self.access = query["accessService"] match = re.search("__svtplay'] = ({.*});", self.get_urldata()) if not match: yield ServiceError("Can't find video info.") return janson = json.loads(match.group(1))["videoPage"] if "programTitle" not in janson["video"]: yield ServiceError("Can't find any video on that page.") return if self.access: for i in janson["video"]["versions"]: if i["accessService"] == self.access: url = urljoin("http://www.svtplay.se", i["contentUrl"]) res = self.http.get(url) match = re.search("__svtplay'] = ({.*});", res.text) if not match: yield ServiceError("Can't find video info.") return janson = json.loads(match.group(1))["videoPage"] if "live" in janson["video"]: self.options.live = janson["video"]["live"] if self.options.output_auto: self.options.service = "svtplay" self.options.output = self.outputfilename(janson["video"], self.options.output) if self.exclude(): yield ServiceError("Excluding video.") return if "programVersionId" in janson["video"]: vid = janson["video"]["programVersionId"] else: vid = janson["video"]["id"] res = self.http.get("http://api.svt.se/videoplayer-api/video/{0}".format(vid)) try: janson = res.json() except json.decoder.JSONDecodeError: yield ServiceError("Can't decode api request: {0}".format(res.request.url)) return videos = self._get_video(janson) for i in videos: yield i def _get_video(self, janson): if "live" in janson: self.options.live = janson["live"] if "subtitleReferences" in janson: for i in janson["subtitleReferences"]: if i["format"] == "websrt" and "url" in i: yield subtitle(copy.copy(self.options), "wrst", i["url"]) if "videoReferences" in janson: if len(janson["videoReferences"]) == 0: yield ServiceError("Media doesn't have any associated videos.") return for i in janson["videoReferences"]: streams = None alt_streams = None alt = None query = parse_qs(urlparse(i["url"]).query) if "alt" in query and len(query["alt"]) > 0: alt = self.http.get(query["alt"][0]) if i["format"] == "hls": streams = hlsparse(self.options, self.http.request("get", i["url"]), i["url"]) if alt: alt_streams = hlsparse(self.options, self.http.request("get", alt.request.url), alt.request.url) elif i["format"] == "hds": match = re.search(r"\/se\/secure\/", i["url"]) if not match: streams = hdsparse(self.options, self.http.request("get", i["url"], params={"hdcore": "3.7.0"}), i["url"]) if alt: alt_streams = hdsparse(self.options, self.http.request("get", alt.request.url, params={"hdcore": "3.7.0"}), alt.request.url) elif i["format"] == "dash264" or i["format"] == "dashhbbtv": streams = dashparse(self.options, self.http.request("get", i["url"]), i["url"]) if alt: alt_streams = dashparse(self.options, self.http.request("get", alt.request.url), alt.request.url) if streams: for n in list(streams.keys()): yield streams[n] if alt_streams: for n in list(alt_streams.keys()): yield alt_streams[n] def _last_chance(self, videos, page, maxpage=2): if page > maxpage: return videos res = self.http.get("http://www.svtplay.se/sista-chansen?sida={}".format(page)) match = re.search("__svtplay'] = ({.*});", res.text) if not match: return videos dataj = json.loads(match.group(1)) pages = dataj["gridPage"]["pagination"]["totalPages"] for i in dataj["gridPage"]["content"]: videos.append(i["contentUrl"]) page += 1 self._last_chance(videos, page, pages) return videos def _genre(self, jansson): videos = [] parse = urlparse(self._url) dataj = jansson["clusterPage"] tab = re.search("tab=(.+)", parse.query) if tab: tab = tab.group(1) for i in dataj["tabs"]: if i["slug"] == tab: videos = self.videos_to_list(i["content"], videos) else: videos = self.videos_to_list(dataj["clips"], videos) return videos def find_all_episodes(self, options): parse = urlparse(self._url) if len(parse.path) > 7 and parse.path[-7:] == "rss.xml": rss_url = self.url else: rss_url = re.search(r'<link rel="alternate" type="application/rss\+xml" [^>]*href="([^"]+)"', self.get_urldata()) if rss_url: rss_url = rss_url.group(1) valid_rss = False tab = None if parse.query: match = re.search("tab=(.+)", parse.query) if match: tab = match.group(1) #Clips and tab can not be used with RSS-feed if rss_url and not self.options.include_clips and not tab: rss_data = self.http.request("get", rss_url).content try: xml = ET.XML(rss_data) episodes = [x.text for x in xml.findall(".//item/link")] #TODO add better checks for valid RSS-feed here valid_rss = True except ET.ParseError: log.info("Error parsing RSS-feed at %s, make sure it is a valid RSS-feed, will use other method to find episodes." % rss_url) else: #if either tab or include_clips is set remove rss.xml from url if set manually. if len(parse.path) > 7 and parse.path[-7:] == "rss.xml": self._url = self.url.replace("rss.xml","") if not valid_rss: videos = [] tab = None match = re.search("__svtplay'] = ({.*});", self.get_urldata()) if re.search("sista-chansen", parse.path): videos = self._last_chance(videos, 1) elif not match: log.error("Couldn't retrieve episode list.") return else: dataj = json.loads(match.group(1)) if re.search("/genre", parse.path): videos = self._genre(dataj) else: if parse.query: match = re.search("tab=(.+)", parse.query) if match: tab = match.group(1) items = dataj["relatedVideoContent"]["relatedVideosAccordion"] for i in items: if tab: if i["slug"] == tab: videos = self.videos_to_list(i["videos"], videos) else: if "klipp" not in i["slug"] and "kommande" not in i["slug"]: videos = self.videos_to_list(i["videos"], videos) if self.options.include_clips: if i["slug"] == "klipp": videos = self.videos_to_list(i["videos"], videos) episodes = [urljoin("http://www.svtplay.se", x) for x in videos] if options.all_last > 0: return sorted(episodes[-options.all_last:]) return sorted(episodes) def videos_to_list(self, lvideos, videos): for n in lvideos: parse = urlparse(n["contentUrl"]) if parse.path not in videos: filename = self.outputfilename(n, self.options.output) if not self.exclude2(filename): videos.append(parse.path) if "versions" in n: for i in n["versions"]: parse = urlparse(i["contentUrl"]) filename = "" # output is None here. if "accessService" in i: if i["accessService"] == "audioDescription": filename += "-syntolkat" if i["accessService"] == "signInterpretation": filename += "-teckentolkat" if not self.exclude2(filename) and parse.path not in videos: videos.append(parse.path) return videos def outputfilename(self, data, filename): if filename: directory = os.path.dirname(filename) else: directory = "" name = None if data["programTitle"]: name = filenamify(data["programTitle"]) other = filenamify(data["title"]) if "programVersionId" in data: vid = str(data["programVersionId"]) else: vid = str(data["id"]) if is_py2: id = hashlib.sha256(vid).hexdigest()[:7] else: id = hashlib.sha256(vid.encode("utf-8")).hexdigest()[:7] if name == other: other = None elif name is None: name = other other = None season = self.seasoninfo(data) title = name if season: title += ".{}".format(season) if other: title += ".{}".format(other) if "accessService" in data: if data["accessService"] == "audioDescription": title += "-syntolkat" if data["accessService"] == "signInterpretation": title += "-teckentolkat" title += "-{}-svtplay".format(id) title = filenamify(title) if len(directory): output = os.path.join(directory, title) else: output = title return output def seasoninfo(self, data): if "season" in data and data["season"]: season = "{:02d}".format(data["season"]) episode = "{:02d}".format(data["episodeNumber"]) if int(season) == 0 and int(episode) == 0: return None return "S{}E{}".format(season, episode) else: return None
PeterWangIntel/chromium-crosswalk
refs/heads/master
tools/telemetry/telemetry/value/list_of_string_values_unittest.py
8
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import unittest from telemetry import page as page_module from telemetry.page import page_set from telemetry import value from telemetry.value import list_of_string_values from telemetry.value import none_values class TestBase(unittest.TestCase): def setUp(self): ps = page_set.PageSet(file_path=os.path.dirname(__file__)) ps.AddUserStory(page_module.Page('http://www.bar.com/', ps, ps.base_dir)) ps.AddUserStory(page_module.Page('http://www.baz.com/', ps, ps.base_dir)) ps.AddUserStory(page_module.Page('http://www.foo.com/', ps, ps.base_dir)) self.page_set = ps @property def pages(self): return self.page_set.pages class ListOfStringValuesTest(TestBase): def testListSamePageMergingWithSamePageConcatenatePolicy(self): page0 = self.pages[0] v0 = list_of_string_values.ListOfStringValues( page0, 'x', 'label', ['L1', 'L2'], same_page_merge_policy=value.CONCATENATE) v1 = list_of_string_values.ListOfStringValues( page0, 'x', 'label', ['L3', 'L4'], same_page_merge_policy=value.CONCATENATE) self.assertTrue(v1.IsMergableWith(v0)) vM = (list_of_string_values.ListOfStringValues. MergeLikeValuesFromSamePage([v0, v1])) self.assertEquals(page0, vM.page) self.assertEquals('x', vM.name) self.assertEquals('label', vM.units) self.assertEquals(value.CONCATENATE, vM.same_page_merge_policy) self.assertEquals(True, vM.important) self.assertEquals(['L1', 'L2', 'L3', 'L4'], vM.values) def testListSamePageMergingWithPickFirstPolicy(self): page0 = self.pages[0] v0 = list_of_string_values.ListOfStringValues( page0, 'x', 'label', ['L1', 'L2'], same_page_merge_policy=value.PICK_FIRST) v1 = list_of_string_values.ListOfStringValues( page0, 'x', 'label', ['L3', 'L4'], same_page_merge_policy=value.PICK_FIRST) self.assertTrue(v1.IsMergableWith(v0)) vM = (list_of_string_values.ListOfStringValues. MergeLikeValuesFromSamePage([v0, v1])) self.assertEquals(page0, vM.page) self.assertEquals('x', vM.name) self.assertEquals('label', vM.units) self.assertEquals(value.PICK_FIRST, vM.same_page_merge_policy) self.assertEquals(True, vM.important) self.assertEquals(['L1', 'L2'], vM.values) def testListDifferentPageMerging(self): page0 = self.pages[0] page1 = self.pages[0] v0 = list_of_string_values.ListOfStringValues( page0, 'x', 'label', ['L1', 'L2'], same_page_merge_policy=value.CONCATENATE) v1 = list_of_string_values.ListOfStringValues( page1, 'x', 'label', ['L3', 'L4'], same_page_merge_policy=value.CONCATENATE) self.assertTrue(v1.IsMergableWith(v0)) vM = (list_of_string_values.ListOfStringValues. MergeLikeValuesFromDifferentPages([v0, v1])) self.assertEquals(None, vM.page) self.assertEquals('x', vM.name) self.assertEquals('label', vM.units) self.assertEquals(value.CONCATENATE, vM.same_page_merge_policy) self.assertEquals(True, vM.important) self.assertEquals(['L1', 'L2', 'L3', 'L4'], vM.values) def testListWithNoneValueMerging(self): page0 = self.pages[0] v0 = list_of_string_values.ListOfStringValues( page0, 'x', 'unit', ['L1', 'L2'], same_page_merge_policy=value.CONCATENATE) v1 = list_of_string_values.ListOfStringValues( page0, 'x', 'unit', None, same_page_merge_policy=value.CONCATENATE, none_value_reason='n') self.assertTrue(v1.IsMergableWith(v0)) vM = (list_of_string_values.ListOfStringValues. MergeLikeValuesFromSamePage([v0, v1])) self.assertEquals(None, vM.values) self.assertEquals(none_values.MERGE_FAILURE_REASON, vM.none_value_reason) def testListWithNoneValueMustHaveNoneReason(self): page0 = self.pages[0] self.assertRaises(none_values.NoneValueMissingReason, lambda: list_of_string_values.ListOfStringValues( page0, 'x', 'unit', None)) def testListWithNoneReasonMustHaveNoneValue(self): page0 = self.pages[0] self.assertRaises(none_values.ValueMustHaveNoneValue, lambda: list_of_string_values.ListOfStringValues( page0, 'x', 'unit', ['L1', 'L2'], none_value_reason='n')) def testAsDict(self): v = list_of_string_values.ListOfStringValues( None, 'x', 'unit', ['foo', 'bar'], same_page_merge_policy=value.PICK_FIRST, important=False) d = v.AsDictWithoutBaseClassEntries() self.assertEquals(d, { 'values': ['foo', 'bar'] }) def testNoneValueAsDict(self): v = list_of_string_values.ListOfStringValues( None, 'x', 'unit', None, same_page_merge_policy=value.PICK_FIRST, important=False, none_value_reason='n') d = v.AsDictWithoutBaseClassEntries() self.assertEquals(d, { 'values': None, 'none_value_reason': 'n' }) def testFromDict(self): d = { 'type': 'list_of_string_values', 'name': 'x', 'units': 'unit', 'values': ['foo', 'bar'] } v = value.Value.FromDict(d, {}) self.assertTrue(isinstance(v, list_of_string_values.ListOfStringValues)) self.assertEquals(v.values, ['foo', 'bar']) def testFromDictNoneValue(self): d = { 'type': 'list_of_string_values', 'name': 'x', 'units': 'unit', 'values': None, 'none_value_reason': 'n' } v = value.Value.FromDict(d, {}) self.assertTrue(isinstance(v, list_of_string_values.ListOfStringValues)) self.assertEquals(v.values, None) self.assertEquals(v.none_value_reason, 'n')
apporc/cinder
refs/heads/master
cinder/tests/unit/backup/fake_swift_client2.py
28
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # Copyright (C) 2014 TrilioData, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import os import socket import tempfile from six.moves import http_client from swiftclient import client as swift class FakeSwiftClient2(object): def __init__(self, *args, **kwargs): pass @classmethod def Connection(self, *args, **kargs): return FakeSwiftConnection2() class FakeSwiftConnection2(object): def __init__(self, *args, **kwargs): self.tempdir = tempfile.mkdtemp() def head_container(self, container): if container == 'missing_container': raise swift.ClientException('fake exception', http_status=http_client.NOT_FOUND) elif container == 'unauthorized_container': raise swift.ClientException('fake exception', http_status=http_client.UNAUTHORIZED) elif container == 'socket_error_on_head': raise socket.error(111, 'ECONNREFUSED') def put_container(self, container): pass def get_container(self, container, **kwargs): fake_header = None container_dir = tempfile.gettempdir() + '/' + container fake_body = [] for f in os.listdir(container_dir): try: f.index(kwargs['prefix']) fake_body.append({'name': f}) except Exception: pass return fake_header, fake_body def head_object(self, container, name): return {'etag': 'fake-md5-sum'} def get_object(self, container, name): if container == 'socket_error_on_get': raise socket.error(111, 'ECONNREFUSED') object_path = tempfile.gettempdir() + '/' + container + '/' + name with open(object_path, 'rb') as object_file: return (None, object_file.read()) def put_object(self, container, name, reader, content_length=None, etag=None, chunk_size=None, content_type=None, headers=None, query_string=None): object_path = tempfile.gettempdir() + '/' + container + '/' + name with open(object_path, 'wb') as object_file: object_file.write(reader.read()) return hashlib.md5(reader.read()).hexdigest() def delete_object(self, container, name): pass
eayunstack/horizon
refs/heads/master
openstack_dashboard/dashboards/admin/dashboard.py
31
# Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.utils.translation import ugettext_lazy as _ import horizon class SystemPanels(horizon.PanelGroup): slug = "admin" name = _("System") panels = ('overview', 'metering', 'hypervisors', 'aggregates', 'instances', 'volumes', 'flavors', 'images', 'networks', 'routers', 'defaults', 'metadata_defs', 'info') class Admin(horizon.Dashboard): name = _("Admin") slug = "admin" panels = (SystemPanels,) default_panel = 'overview' permissions = ('openstack.roles.admin',) horizon.register(Admin)
2PacIsAlive/deepnet.works
refs/heads/master
deep_networks/data/dicom/plot.py
2
import logging #import pylab import matplotlib.pyplot as plt from skimage import measure from mpl_toolkits.mplot3d.art3d import Poly3DCollection class Plotter(object): logging.basicConfig(level=logging.INFO) log = logging.getLogger(__name__) def plot_3d(self, image, threshold=-300): """ Adapted from: https://www.kaggle.com/gzuidhof/data-science-bowl-2017/full-preprocessing-tutorial """ # Position the scan upright, # so the head of the patient would be at the top facing the camera p = image.transpose(2,1,0) verts, faces = measure.marching_cubes(p, threshold) fig = plt.figure(figsize=(10, 10)) ax = fig.add_subplot(111, projection='3d') # Fancy indexing: `verts[faces]` to generate a collection of triangles mesh = Poly3DCollection(verts[faces], alpha=0.1) face_color = [0.5, 0.5, 1] mesh.set_facecolor(face_color) ax.add_collection3d(mesh) ax.set_xlim(0, p.shape[0]) ax.set_ylim(0, p.shape[1]) ax.set_zlim(0, p.shape[2]) plt.show() def plot(dataset): log.info("plotting {}".format(dataset.PatientsName)) #pylab.imshow(dataset.pixel_array, cmap=pylab.cm.bone) def save(dataset): log.info("saving {}".format(dataset.PatientsName)) pylab.imshow(dataset.pixel_array, cmap=pylab.cm.bone) pylab.savefig(dataset.PatientsName + '.png', bbox_inches='tight')
smendez/lean
refs/heads/master
paart/apps/agencies/migrations/0002_auto__add_field_agency_registration.py
2
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Agency.registration' db.add_column('agencies_agency', 'registration', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, unique=True), keep_default=False) def backwards(self, orm): # Deleting field 'Agency.registration' db.delete_column('agencies_agency', 'registration') models = { 'agencies.agency': { 'Meta': {'ordering': "['name']", 'object_name': 'Agency'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}), 'registration': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}) } } complete_apps = ['agencies']
grimmjow8/ansible
refs/heads/devel
test/integration/targets/module_utils/module_utils/sub/bam/bam.py
298
#!/usr/bin/env python bam = "BAM FROM sub/bam/bam.py"
Ledoux/ShareYourSystem
refs/heads/master
Pythonlogy/ShareYourSystem/Standards/Viewers/Boxer/01_ExampleDoc.py
1
#ImportModules import ShareYourSystem as SYS #Definition an Tree instance MyBoxer=SYS.BoxerClass().view() MyBoxer.MeteoredConcurrentDDPClientVariable.stop() #Definition the AttestedStr SYS._attest( [ 'MyBoxer is '+SYS._str( MyBoxer, **{ 'RepresentingBaseKeyStrsListBool':False, 'RepresentingAlineaIsBool':False } ) ] ) #Print
toolmacher/micropython
refs/heads/master
tests/basics/try_finally_return.py
82
def func1(): try: return "it worked" finally: print("finally 1") print(func1()) def func2(): try: return "it worked" finally: print("finally 2") def func3(): try: s = func2() return s + ", did this work?" finally: print("finally 3") print(func3()) # for loop within try-finally def f(): try: for i in [1, 2]: return i finally: print('finally') print(f()) # multiple for loops within try-finally def f(): try: for i in [1, 2]: for j in [3, 4]: return (i, j) finally: print('finally') print(f()) # multiple for loops and nested try-finally's def f(): try: for i in [1, 2]: for j in [3, 4]: try: for k in [5, 6]: for l in [7, 8]: return (i, j, k, l) finally: print('finally 2') finally: print('finally 1') print(f()) # multiple for loops that are optimised, and nested try-finally's def f(): try: for i in range(1, 3): for j in range(3, 5): try: for k in range(5, 7): for l in range(7, 9): return (i, j, k, l) finally: print('finally 2') finally: print('finally 1') print(f())
ingokegel/intellij-community
refs/heads/master
python/testData/mover/multiLineSelection4.py
83
class Test(object): <selection> a = 1 b = 2 <caret></selection> def q(self): c = 3
ibarria0/Cactus
refs/heads/master
cactus/tests/data/skeleton/plugins/version.py
5
from __future__ import print_function import os INFO = { 'name': 'Version Updater', 'description': 'Add a version to /versions.txt after each deploy' } # Set up extra django template tags def templateTags(): pass # Build actions # def preBuild(site): # print 'preBuild' # # def postBuild(site): # print 'postBuild' # Build page actions # def preBuildPage(site, path, context, data): # print 'preBuildPage', path # return context, data # # def postBuildPage(site, path): # print 'postBuildPage', path # pass # Deploy actions def preDeploy(site): # Add a deploy log at /versions.txt import urllib2 import datetime import platform import codecs import getpass url = site.config.get('aws-bucket-website') data = u'' # If this is the first deploy we don't have to fetch the old file if url: try: data = urllib2.urlopen('http://%s/versions.txt' % url, timeout=8.0).read() + u'\n' except: print("Could not fetch the previous versions.txt, skipping...") return data += u'\t'.join([datetime.datetime.now().isoformat(), platform.node(), getpass.getuser()]) codecs.open(os.path.join(site.paths['build'], 'versions.txt'), 'w', 'utf8').write(data) def postDeploy(site): pass
isidorn/test2
refs/heads/master
packaging/scripts/VirtuaBuild/smoke_test.py
6
#!/usr/bin/python # Copyright 2010-2012 RethinkDB, all rights reserved. # usage: ./smoke_test.py --mode OS_NAME --num-keys SOME_NUMBER_HERE import time, sys, os, socket, random, time, signal, subprocess sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, 'test', 'common'))) import driver, memcached_workload_common from vcoptparse import * op = OptParser() op["num_keys"] = IntFlag("--num-keys", 500) op["mode"] = StringFlag("--mode", "debug") op["pkg_type"] = StringFlag("--pkg-type", "deb") # "deb" or "rpm" opts = op.parse(sys.argv) num_keys = opts["num_keys"] base_port = 11213 # port that RethinkDB runs from by default if opts["pkg_type"] == "rpm": def install(path): return "rpm -i %s --nodeps" % path def get_binary(path): return "rpm -qpil %s | grep /usr/bin" % path def uninstall(cmd_name): return "which %s | xargs readlink -f | xargs rpm -qf | xargs rpm -e" % cmd_name elif opts["pkg_type"] == "deb": def install(path): return "dpkg -i %s" % path def get_binary(path): return "dpkg -c %s | grep /usr/bin/rethinkdb-.* | sed 's/^.*\(\/usr.*\)$/\\1/'" % path def uninstall(cmd_name): return "which %s | xargs readlink -f | xargs dpkg -S | sed 's/^\(.*\):.*$/\\1/' | xargs dpkg -r" % cmd_name else: print >>sys.stderr, "Error: Unknown package type." exit(0) def purge_installed_packages(): try: old_binaries_raw = exec_command(["ls", "/usr/bin/rethinkdb*"], shell = True).stdout.readlines() except Exception, e: print "Nothing to remove." return old_binaries = map(lambda x: x.strip('\n'), old_binaries_raw) print "Binaries scheduled for removal: ", old_binaries try: exec_command(uninstall(old_binaries[0]), shell = True) except Exception, e: exec_command('rm -f ' + old_binaries[0]) purge_installed_packages() def exec_command(cmd, bg = False, shell = False): if type(cmd) == type("") and not shell: cmd = cmd.split(" ") elif type(cmd) == type([]) and shell: cmd = " ".join(cmd) print cmd if bg: return subprocess.Popen(cmd, stdout = subprocess.PIPE, shell = shell) # doesn't actually run in background: it just skips the waiting part else: proc = subprocess.Popen(cmd, stdout = subprocess.PIPE, shell = shell) proc.wait() if proc.poll(): raise RuntimeError("Error: command ended with signal %d." % proc.poll()) return proc def wait_until_started_up(proc, host, port, timeout = 600): time_limit = time.time() + timeout while time.time() < time_limit: if proc.poll() is not None: raise RuntimeError("Process stopped unexpectedly with return code %d." % proc.poll()) s = socket.socket() try: s.connect((host, port)) except socket.error, e: time.sleep(1) else: break finally: s.close() else: raise RuntimeError("Could not connect to process.") def test_against(host, port, timeout = 600): with memcached_workload_common.make_memcache_connection({"address": (host, port), "mclib": "pylibmc", "protocol": "text"}) as mc: temp = 0 time_limit = time.time() + timeout while not temp and time.time() < time_limit: try: temp = mc.set("test", "test") print temp except Exception, e: print e pass time.sleep(1) goodsets = 0 goodgets = 0 for i in range(num_keys): try: if mc.set(str(i), str(i)): goodsets += 1 except: pass for i in range(num_keys): try: if mc.get(str(i)) == str(i): goodgets += 1 except: pass return goodsets, goodgets cur_dir = exec_command("pwd").stdout.readline().strip('\n') p = exec_command("find build/%s -name *.%s" % (opts["mode"], opts["pkg_type"])) raw = p.stdout.readlines() res_paths = map(lambda x: os.path.join(cur_dir, x.strip('\n')), raw) print "Packages to install:", res_paths failed_test = False for path in res_paths: print "TESTING A NEW PACKAGE" print "Uninstalling old packages..." purge_installed_packages() print "Done uninstalling..." print "Installing RethinkDB..." target_binary_name = exec_command(get_binary(path), shell = True).stdout.readlines()[0].strip('\n') print "Target binary name:", target_binary_name exec_command(install(path)) print "Starting RethinkDB..." exec_command("rm -rf rethinkdb_data") exec_command("rm -f core.*") proc = exec_command("rethinkdb", bg = True) # gets the IP address s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("rethinkdb.com", 80)) ip = s.getsockname()[0] s.close() print "IP Address detected:", ip wait_until_started_up(proc, ip, base_port) print "Testing..." res = test_against(ip, base_port) print "Tests completed. Killing instance now..." proc.send_signal(signal.SIGINT) timeout = 60 # 1 minute to shut down time_limit = time.time() + timeout while proc.poll() is None and time.time() < time_limit: pass if proc.poll() != 0: print "RethinkDB failed to shut down properly. (TEST FAILED)" failed_test = False if res != (num_keys, num_keys): print "Done: FAILED" print "Results: %d successful sets, %d successful gets (%d total)" % (res[0], res[1], num_keys) failed_test = True else: print "Done: PASSED" print "Done." if failed_test: exit(1) else: exit(0)
kubaszostak/gdal-dragndrop
refs/heads/master
osgeo/apps/Python27/Lib/ctypes/_endian.py
2
import sys from ctypes import * _array_type = type(Array) def _other_endian(typ): """Return the type with the 'other' byte order. Simple types like c_int and so on already have __ctype_be__ and __ctype_le__ attributes which contain the types, for more complicated types arrays and structures are supported. """ # check _OTHER_ENDIAN attribute (present if typ is primitive type) if hasattr(typ, _OTHER_ENDIAN): return getattr(typ, _OTHER_ENDIAN) # if typ is array if isinstance(typ, _array_type): return _other_endian(typ._type_) * typ._length_ # if typ is structure if issubclass(typ, Structure): return typ raise TypeError("This type does not support other endian: %s" % typ) class _swapped_meta(type(Structure)): def __setattr__(self, attrname, value): if attrname == "_fields_": fields = [] for desc in value: name = desc[0] typ = desc[1] rest = desc[2:] fields.append((name, _other_endian(typ)) + rest) value = fields super(_swapped_meta, self).__setattr__(attrname, value) ################################################################ # Note: The Structure metaclass checks for the *presence* (not the # value!) of a _swapped_bytes_ attribute to determine the bit order in # structures containing bit fields. if sys.byteorder == "little": _OTHER_ENDIAN = "__ctype_be__" LittleEndianStructure = Structure class BigEndianStructure(Structure): """Structure with big endian byte order""" __metaclass__ = _swapped_meta _swappedbytes_ = None elif sys.byteorder == "big": _OTHER_ENDIAN = "__ctype_le__" BigEndianStructure = Structure class LittleEndianStructure(Structure): """Structure with little endian byte order""" __metaclass__ = _swapped_meta _swappedbytes_ = None else: raise RuntimeError("Invalid byteorder")
thresholdsoftware/asylum
refs/heads/master
openerp/addons/hr_attendance/res_config.py
434
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class hr_attendance_config_settings(osv.osv_memory): _inherit = 'hr.config.settings' _columns = { 'group_hr_attendance': fields.boolean('Track attendances for all employees', implied_group='base.group_hr_attendance', help="Allocates attendance group to all users."), } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
huangchaosuper/ReportViewer
refs/heads/master
reportviewer/node_modules/cordova/node_modules/plugman/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSToolFile.py
2736
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Visual Studio project reader/writer.""" import gyp.common import gyp.easy_xml as easy_xml class Writer(object): """Visual Studio XML tool file writer.""" def __init__(self, tool_file_path, name): """Initializes the tool file. Args: tool_file_path: Path to the tool file. name: Name of the tool file. """ self.tool_file_path = tool_file_path self.name = name self.rules_section = ['Rules'] def AddCustomBuildRule(self, name, cmd, description, additional_dependencies, outputs, extensions): """Adds a rule to the tool file. Args: name: Name of the rule. description: Description of the rule. cmd: Command line of the rule. additional_dependencies: other files which may trigger the rule. outputs: outputs of the rule. extensions: extensions handled by the rule. """ rule = ['CustomBuildRule', {'Name': name, 'ExecutionDescription': description, 'CommandLine': cmd, 'Outputs': ';'.join(outputs), 'FileExtensions': ';'.join(extensions), 'AdditionalDependencies': ';'.join(additional_dependencies) }] self.rules_section.append(rule) def WriteIfChanged(self): """Writes the tool file.""" content = ['VisualStudioToolFile', {'Version': '8.00', 'Name': self.name }, self.rules_section ] easy_xml.WriteXmlIfChanged(content, self.tool_file_path, encoding="Windows-1252")
moondrop-entertainment/django-nonrel-drawp
refs/heads/master
tests/regressiontests/forms/localflavor/cl.py
86
from django.contrib.localflavor.cl.forms import CLRutField, CLRegionSelect from django.core.exceptions import ValidationError from utils import LocalFlavorTestCase class CLLocalFlavorTests(LocalFlavorTestCase): def test_CLRegionSelect(self): f = CLRegionSelect() out = u'''<select name="foo"> <option value="RM">Regi\xf3n Metropolitana de Santiago</option> <option value="I">Regi\xf3n de Tarapac\xe1</option> <option value="II">Regi\xf3n de Antofagasta</option> <option value="III">Regi\xf3n de Atacama</option> <option value="IV">Regi\xf3n de Coquimbo</option> <option value="V">Regi\xf3n de Valpara\xedso</option> <option value="VI">Regi\xf3n del Libertador Bernardo O&#39;Higgins</option> <option value="VII">Regi\xf3n del Maule</option> <option value="VIII">Regi\xf3n del B\xedo B\xedo</option> <option value="IX">Regi\xf3n de la Araucan\xeda</option> <option value="X">Regi\xf3n de los Lagos</option> <option value="XI">Regi\xf3n de Ays\xe9n del General Carlos Ib\xe1\xf1ez del Campo</option> <option value="XII">Regi\xf3n de Magallanes y la Ant\xe1rtica Chilena</option> <option value="XIV">Regi\xf3n de Los R\xedos</option> <option value="XV">Regi\xf3n de Arica-Parinacota</option> </select>''' self.assertEqual(f.render('foo', 'bar'), out) def test_CLRutField(self): error_invalid = [u'The Chilean RUT is not valid.'] error_format = [u'Enter a valid Chilean RUT. The format is XX.XXX.XXX-X.'] valid = { '11-6': '11-6', '116': '11-6', '767484100': '76.748.410-0', '78.412.790-7': '78.412.790-7', '8.334.6043': '8.334.604-3', '76793310-K': '76.793.310-K', '76793310-k': '76.793.310-K', } invalid = { '11.111.111-0': error_invalid, '111': error_invalid, } self.assertFieldOutput(CLRutField, valid, invalid) # deal with special "Strict Mode". invalid = { '11-6': error_format, '767484100': error_format, '8.334.6043': error_format, '76793310-K': error_format, '11.111.111-0': error_invalid } self.assertFieldOutput(CLRutField, {}, invalid, field_kwargs={"strict": True} )
kenorb-contrib/BitTorrent
refs/heads/master
python_bt_codebase_library/BTL/protocol.py
11
# The contents of this file are subject to the Python Software Foundation # License Version 2.3 (the License). You may not copy or use this file, in # either source code or executable form, except in compliance with the License. # You may obtain a copy of the License at http://www.python.org/license. # # Software distributed under the License is distributed on an AS IS basis, # WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License # for the specific language governing rights and limitations under the # License. # # by Greg Hazel from twisted.internet import protocol from BTL.decorate import decorate_func ## someday twisted might do this for me class SmartReconnectingClientFactory(protocol.ReconnectingClientFactory): def buildProtocol(self, addr): prot = protocol.ReconnectingClientFactory.buildProtocol(self, addr) # decorate the protocol with a delay reset prot.connectionMade = decorate_func(self.resetDelay, prot.connectionMade) return prot
gonzolino/heat
refs/heads/master
heat/api/openstack/v1/services.py
8
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_messaging import exceptions from webob import exc from heat.api.openstack.v1 import util from heat.common.i18n import _ from heat.common import serializers from heat.common import wsgi from heat.rpc import client as rpc_client class ServiceController(object): """WSGI controller for reporting the heat engine status in Heat v1 API.""" # Define request scope (must match what is in policy.json) REQUEST_SCOPE = 'service' def __init__(self, options): self.options = options self.rpc_client = rpc_client.EngineClient() @util.policy_enforce def index(self, req): try: services = self.rpc_client.list_services(req.context) return {'services': services} except exceptions.MessagingTimeout: msg = _('All heat engines are down.') raise exc.HTTPServiceUnavailable(msg) def create_resource(options): deserializer = wsgi.JSONRequestDeserializer() serializer = serializers.JSONResponseSerializer() return wsgi.Resource(ServiceController(options), deserializer, serializer)
Ichimonji10/robottelo
refs/heads/master
tests/foreman/api/test_computeprofile.py
2
# -*- encoding: utf-8 -*- """Unit tests for the Compute Profile feature. @Requirement: Computeprofile @CaseAutomation: Automated @CaseLevel: Acceptance @CaseComponent: API @TestType: Functional @CaseImportance: High @Upstream: No """ from nailgun import entities from requests.exceptions import HTTPError from robottelo.datafactory import invalid_values_list, valid_data_list from robottelo.decorators import run_only_on, tier1 from robottelo.test import APITestCase class ComputeProfileTestCase(APITestCase): """Tests for compute profiles.""" @run_only_on('sat') @tier1 def test_positive_create_with_name(self): """Create new Compute Profile using different names @id: 97d04911-9368-4674-92c7-1e3ff114bc18 @Assert: Compute Profile is created """ for name in valid_data_list(): with self.subTest(name): profile = entities.ComputeProfile(name=name).create() self.assertEqual(name, profile.name) @run_only_on('sat') @tier1 def test_negative_create(self): """Attempt to create Compute Profile using invalid names only @id: 2d34a1fd-70a5-4e59-b2e2-86fbfe8e31ab @Assert: Compute Profile is not created """ for name in invalid_values_list(): with self.subTest(name): with self.assertRaises(HTTPError): entities.ComputeProfile(name=name).create() @run_only_on('sat') @tier1 def test_positive_update_name(self): """Update selected Compute Profile entity using proper names @id: c79193d7-2e0f-4ed9-b947-05feeddabfda @Assert: Compute Profile is updated. """ profile = entities.ComputeProfile().create() for new_name in valid_data_list(): with self.subTest(new_name): updated_profile = entities.ComputeProfile( id=profile.id, name=new_name).update(['name']) self.assertEqual(new_name, updated_profile.name) @run_only_on('sat') @tier1 def test_negative_update_name(self): """Attempt to update Compute Profile entity using invalid names only @id: 042b40d5-a78b-4e65-b5cb-5b270b800b37 @Assert: Compute Profile is not updated. """ profile = entities.ComputeProfile().create() for new_name in invalid_values_list(): with self.subTest(new_name): with self.assertRaises(HTTPError): entities.ComputeProfile( id=profile.id, name=new_name).update(['name']) updated_profile = entities.ComputeProfile(id=profile.id).read() self.assertNotEqual(new_name, updated_profile.name) @run_only_on('sat') @tier1 def test_positive_delete(self): """Delete Compute Profile entity @id: 0a620e23-7ba6-4178-af7a-fd1e332f478f @Assert: Compute Profile is deleted successfully. """ for name in valid_data_list(): with self.subTest(name): profile = entities.ComputeProfile(name=name).create() profile.delete() with self.assertRaises(HTTPError): entities.ComputeProfile(id=profile.id).read()
mvaled/sentry
refs/heads/master
src/sentry/south_migrations/0454_resolve_duplicate_0452.py
1
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): # Flag to indicate if this migration is too risky # to run online and needs to be coordinated for offline is_dangerous = False def forwards(self, orm): pass def backwards(self, orm): pass models = { 'sentry.activity': { 'Meta': {'object_name': 'Activity'}, 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}) }, 'sentry.apiapplication': { 'Meta': {'object_name': 'ApiApplication'}, 'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'client_id': ('django.db.models.fields.CharField', [], {'default': "'1c9325a8b551454699a4e25930bb44257a66de0b26cc4405b975783e3968e813'", 'unique': 'True', 'max_length': '64'}), 'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'92b2439401184e489b06acaa46845e913c6bb744c20c4d17944b5e0ef544259a'"}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'default': "'Fit Moray'", 'max_length': '64', 'blank': 'True'}), 'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}), 'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}), 'redirect_uris': ('django.db.models.fields.TextField', [], {}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}) }, 'sentry.apiauthorization': { 'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization'}, 'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}), 'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.apigrant': { 'Meta': {'object_name': 'ApiGrant'}, 'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}), 'code': ('django.db.models.fields.CharField', [], {'default': "'07df76ffdeca4ba4aa3ebd48585cbed9'", 'max_length': '64', 'db_index': 'True'}), 'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 1, 8, 0, 0)', 'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}), 'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.apikey': { 'Meta': {'object_name': 'ApiKey'}, 'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}), 'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}), 'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}), 'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}) }, 'sentry.apitoken': { 'Meta': {'object_name': 'ApiToken'}, 'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 2, 7, 0, 0)', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'refresh_token': ('django.db.models.fields.CharField', [], {'default': "'f6256b19cdc94c04916d13d6f1e6e7966acaca2a610d4a83b29be970500c6290'", 'max_length': '64', 'unique': 'True', 'null': 'True'}), 'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}), 'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}), 'token': ('django.db.models.fields.CharField', [], {'default': "'2f6c7882b42541689a0ca902b9eddc6bd840fcbedcf84631aa6a18c92d817fc7'", 'unique': 'True', 'max_length': '64'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.assistantactivity': { 'Meta': {'unique_together': "(('user', 'guide_id'),)", 'object_name': 'AssistantActivity', 'db_table': "'sentry_assistant_activity'"}, 'dismissed_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'guide_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'useful': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}), 'viewed_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}) }, 'sentry.auditlogentry': { 'Meta': {'object_name': 'AuditLogEntry'}, 'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}), 'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}), 'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"}) }, 'sentry.authenticator': { 'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"}, 'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}), 'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.authidentity': { 'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'}, 'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}), 'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.authprovider': { 'Meta': {'object_name': 'AuthProvider'}, 'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}), 'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}), 'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}), 'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}) }, 'sentry.broadcast': { 'Meta': {'object_name': 'Broadcast'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 1, 15, 0, 0)', 'null': 'True', 'blank': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}) }, 'sentry.broadcastseen': { 'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'}, 'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}), 'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.commit': { 'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"}, 'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'message': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}) }, 'sentry.commitauthor': { 'Meta': {'unique_together': "(('organization_id', 'email'), ('organization_id', 'external_id'))", 'object_name': 'CommitAuthor'}, 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'external_id': ('django.db.models.fields.CharField', [], {'max_length': '164', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}) }, 'sentry.commitfilechange': { 'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange'}, 'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}), 'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '1'}) }, 'sentry.counter': { 'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}), 'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}) }, 'sentry.deletedorganization': { 'Meta': {'object_name': 'DeletedOrganization'}, 'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}) }, 'sentry.deletedproject': { 'Meta': {'object_name': 'DeletedProject'}, 'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}) }, 'sentry.deletedteam': { 'Meta': {'object_name': 'DeletedTeam'}, 'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}), 'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}) }, 'sentry.deploy': { 'Meta': {'object_name': 'Deploy'}, 'date_finished': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'notified': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}) }, 'sentry.discoversavedquery': { 'Meta': {'object_name': 'DiscoverSavedQuery'}, 'created_by': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.DiscoverSavedQueryProject']", 'symmetrical': 'False'}), 'query': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}) }, 'sentry.discoversavedqueryproject': { 'Meta': {'unique_together': "(('project', 'discover_saved_query'),)", 'object_name': 'DiscoverSavedQueryProject'}, 'discover_saved_query': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DiscoverSavedQuery']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}) }, 'sentry.distribution': { 'Meta': {'unique_together': "(('release', 'name'),)", 'object_name': 'Distribution'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}) }, 'sentry.email': { 'Meta': {'object_name': 'Email'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('sentry.db.models.fields.citext.CIEmailField', [], {'unique': 'True', 'max_length': '75'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}) }, 'sentry.environment': { 'Meta': {'unique_together': "(('organization_id', 'name'),)", 'object_name': 'Environment'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.EnvironmentProject']", 'symmetrical': 'False'}) }, 'sentry.environmentproject': { 'Meta': {'unique_together': "(('project', 'environment'),)", 'object_name': 'EnvironmentProject'}, 'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_hidden': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}) }, 'sentry.event': { 'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"}, 'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}), 'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}), 'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'}) }, 'sentry.eventattachment': { 'Meta': {'unique_together': "(('project_id', 'event_id', 'file'),)", 'object_name': 'EventAttachment', 'index_together': "(('project_id', 'date_added'),)"}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}), 'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.TextField', [], {}), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}) }, 'sentry.eventmapping': { 'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}) }, 'sentry.eventprocessingissue': { 'Meta': {'unique_together': "(('raw_event', 'processing_issue'),)", 'object_name': 'EventProcessingIssue'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'processing_issue': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProcessingIssue']"}), 'raw_event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.RawEvent']"}) }, 'sentry.eventtag': { 'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('group_id', 'key_id', 'value_id'),)"}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}) }, 'sentry.eventuser': { 'Meta': {'unique_together': "(('project_id', 'ident'), ('project_id', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project_id', 'email'), ('project_id', 'username'), ('project_id', 'ip_address'))"}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}), 'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}) }, 'sentry.externalissue': { 'Meta': {'unique_together': "(('organization_id', 'integration_id', 'key'),)", 'object_name': 'ExternalIssue'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'metadata': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'title': ('django.db.models.fields.TextField', [], {'null': 'True'}) }, 'sentry.featureadoption': { 'Meta': {'unique_together': "(('organization', 'feature_id'),)", 'object_name': 'FeatureAdoption'}, 'applicable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}), 'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'feature_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}) }, 'sentry.file': { 'Meta': {'object_name': 'File'}, 'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}), 'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}), 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'db_index': 'True'}), 'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.TextField', [], {}), 'path': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, 'sentry.fileblob': { 'Meta': {'object_name': 'FileBlob'}, 'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'path': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}) }, 'sentry.fileblobindex': { 'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'}, 'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}), 'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}) }, 'sentry.fileblobowner': { 'Meta': {'unique_together': "(('blob', 'organization'),)", 'object_name': 'FileBlobOwner'}, 'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}) }, 'sentry.group': { 'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"}, 'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}), 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}), 'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}), 'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}), 'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}), 'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}), 'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'}) }, 'sentry.groupassignee': { 'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}), 'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'null': 'True', 'to': "orm['sentry.Team']"}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'null': 'True', 'to': "orm['sentry.User']"}) }, 'sentry.groupbookmark': { 'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"}) }, 'sentry.groupcommitresolution': { 'Meta': {'unique_together': "(('group_id', 'commit_id'),)", 'object_name': 'GroupCommitResolution'}, 'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}) }, 'sentry.groupemailthread': { 'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'}, 'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"}) }, 'sentry.groupenvironment': { 'Meta': {'unique_together': "[('group_id', 'environment_id')]", 'object_name': 'GroupEnvironment', 'index_together': "[('environment_id', 'first_release_id')]"}, 'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'first_release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}) }, 'sentry.grouphash': { 'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'}, 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}), 'group_tombstone_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}), 'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'state': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}) }, 'sentry.grouplink': { 'Meta': {'unique_together': "(('group_id', 'linked_type', 'linked_id'),)", 'object_name': 'GroupLink'}, 'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'linked_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'linked_type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}), 'relationship': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '2'}) }, 'sentry.groupmeta': { 'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'}, 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'value': ('django.db.models.fields.TextField', [], {}) }, 'sentry.groupredirect': { 'Meta': {'object_name': 'GroupRedirect'}, 'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'}) }, 'sentry.grouprelease': { 'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'}, 'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}) }, 'sentry.groupresolution': { 'Meta': {'object_name': 'GroupResolution'}, 'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}) }, 'sentry.grouprulestatus': { 'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}), 'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}) }, 'sentry.groupseen': { 'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'}, 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'}) }, 'sentry.groupshare': { 'Meta': {'object_name': 'GroupShare'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}), 'uuid': ('django.db.models.fields.CharField', [], {'default': "'71b230995a97424a84fcb75cf7e7d975'", 'unique': 'True', 'max_length': '32'}) }, 'sentry.groupsnooze': { 'Meta': {'object_name': 'GroupSnooze'}, 'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'state': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}), 'until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'user_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'user_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}) }, 'sentry.groupsubscription': { 'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}), 'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.grouptagkey': { 'Meta': {'unique_together': "(('project_id', 'group_id', 'key'),)", 'object_name': 'GroupTagKey'}, 'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}), 'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}) }, 'sentry.grouptagvalue': { 'Meta': {'unique_together': "(('group_id', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project_id', 'key', 'value', 'last_seen'),)"}, 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}), 'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'sentry.grouptombstone': { 'Meta': {'object_name': 'GroupTombstone'}, 'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'blank': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'previous_group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'unique': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}) }, 'sentry.identity': { 'Meta': {'unique_together': "(('idp', 'external_id'), ('idp', 'user'))", 'object_name': 'Identity'}, 'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'idp': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.IdentityProvider']"}), 'scopes': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.identityprovider': { 'Meta': {'unique_together': "(('type', 'external_id'),)", 'object_name': 'IdentityProvider'}, 'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, 'sentry.integration': { 'Meta': {'unique_together': "(('provider', 'external_id'),)", 'object_name': 'Integration'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'metadata': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationIntegration']", 'to': "orm['sentry.Organization']"}), 'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectIntegration']", 'to': "orm['sentry.Project']"}), 'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}) }, 'sentry.integrationexternalproject': { 'Meta': {'unique_together': "(('organization_integration_id', 'external_id'),)", 'object_name': 'IntegrationExternalProject'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'organization_integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'resolved_status': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'unresolved_status': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, 'sentry.latestrelease': { 'Meta': {'unique_together': "(('repository_id', 'environment_id'),)", 'object_name': 'LatestRelease'}, 'commit_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'deploy_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'environment_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'release_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'repository_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}) }, 'sentry.lostpasswordhash': { 'Meta': {'object_name': 'LostPasswordHash'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'}) }, 'sentry.option': { 'Meta': {'object_name': 'Option'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}) }, 'sentry.organization': { 'Meta': {'object_name': 'Organization'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}), 'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}) }, 'sentry.organizationaccessrequest': { 'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}), 'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}) }, 'sentry.organizationavatar': { 'Meta': {'object_name': 'OrganizationAvatar'}, 'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Organization']"}) }, 'sentry.organizationintegration': { 'Meta': {'unique_together': "(('organization', 'integration'),)", 'object_name': 'OrganizationIntegration'}, 'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'default_auth_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}) }, 'sentry.organizationmember': { 'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}), 'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}), 'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}), 'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}), 'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}), 'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'token_expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}), 'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"}) }, 'sentry.organizationmemberteam': { 'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"}, 'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}), 'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}) }, 'sentry.organizationonboardingtask': { 'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'}, 'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}), 'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}) }, 'sentry.organizationoption': { 'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}) }, 'sentry.processingissue': { 'Meta': {'unique_together': "(('project', 'checksum', 'type'),)", 'object_name': 'ProcessingIssue'}, 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}), 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '30'}) }, 'sentry.project': { 'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Project'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}), 'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectTeam']", 'to': "orm['sentry.Team']"}) }, 'sentry.projectavatar': { 'Meta': {'object_name': 'ProjectAvatar'}, 'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Project']"}) }, 'sentry.projectbookmark': { 'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectBookmark'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True', 'blank': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.projectcficachefile': { 'Meta': {'unique_together': "(('project', 'debug_file'),)", 'object_name': 'ProjectCfiCacheFile'}, 'cache_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}), 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'debug_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDebugFile']", 'on_delete': 'models.DO_NOTHING', 'db_column': "'dsym_file_id'"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}) }, 'sentry.projectdebugfile': { 'Meta': {'object_name': 'ProjectDebugFile', 'db_table': "'sentry_projectdsymfile'", 'index_together': "(('project', 'debug_id'),)"}, 'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}), 'debug_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_column': "'uuid'"}), 'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'object_name': ('django.db.models.fields.TextField', [], {}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}) }, 'sentry.projectintegration': { 'Meta': {'unique_together': "(('project', 'integration'),)", 'object_name': 'ProjectIntegration'}, 'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}) }, 'sentry.projectkey': { 'Meta': {'object_name': 'ProjectKey'}, 'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}), 'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'rate_limit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'rate_limit_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}), 'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}) }, 'sentry.projectoption': { 'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}) }, 'sentry.projectownership': { 'Meta': {'object_name': 'ProjectOwnership'}, 'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'fallthrough': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}), 'raw': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'schema': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}) }, 'sentry.projectplatform': { 'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}) }, 'sentry.projectredirect': { 'Meta': {'unique_together': "(('organization', 'redirect_slug'),)", 'object_name': 'ProjectRedirect'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'redirect_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}) }, 'sentry.projectsymcachefile': { 'Meta': {'unique_together': "(('project', 'debug_file'),)", 'object_name': 'ProjectSymCacheFile'}, 'cache_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}), 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'debug_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDebugFile']", 'on_delete': 'models.DO_NOTHING', 'db_column': "'dsym_file_id'"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}) }, 'sentry.projectteam': { 'Meta': {'unique_together': "(('project', 'team'),)", 'object_name': 'ProjectTeam'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}) }, 'sentry.promptsactivity': { 'Meta': {'unique_together': "(('user', 'feature', 'organization_id', 'project_id'),)", 'object_name': 'PromptsActivity'}, 'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'feature': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.pullrequest': { 'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'PullRequest', 'db_table': "'sentry_pull_request'", 'index_together': "(('repository_id', 'date_added'), ('organization_id', 'merge_commit_sha'))"}, 'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'merge_commit_sha': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'message': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'title': ('django.db.models.fields.TextField', [], {'null': 'True'}) }, 'sentry.pullrequestcommit': { 'Meta': {'unique_together': "(('pull_request', 'commit'),)", 'object_name': 'PullRequestCommit', 'db_table': "'sentry_pullrequest_commit'"}, 'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'pull_request': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.PullRequest']"}) }, 'sentry.rawevent': { 'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'RawEvent'}, 'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}) }, 'sentry.relay': { 'Meta': {'object_name': 'Relay'}, 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_internal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'public_key': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'relay_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}) }, 'sentry.release': { 'Meta': {'unique_together': "(('organization', 'version'),)", 'object_name': 'Release'}, 'authors': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}), 'commit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}), 'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'symmetrical': 'False', 'through': "orm['sentry.ReleaseProject']", 'to': "orm['sentry.Project']"}), 'ref': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}), 'total_deploys': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'version': ('django.db.models.fields.CharField', [], {'max_length': '250'}) }, 'sentry.releasecommit': { 'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'}, 'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}) }, 'sentry.releaseenvironment': { 'Meta': {'unique_together': "(('organization', 'release', 'environment'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"}, 'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}) }, 'sentry.releasefile': { 'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile', 'index_together': "(('release', 'name'),)"}, 'dist': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Distribution']", 'null': 'True'}), 'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'name': ('django.db.models.fields.TextField', [], {}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}) }, 'sentry.releaseheadcommit': { 'Meta': {'unique_together': "(('repository_id', 'release'),)", 'object_name': 'ReleaseHeadCommit'}, 'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}), 'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}) }, 'sentry.releaseproject': { 'Meta': {'unique_together': "(('project', 'release'),)", 'object_name': 'ReleaseProject', 'db_table': "'sentry_release_project'"}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}) }, 'sentry.releaseprojectenvironment': { 'Meta': {'unique_together': "(('project', 'release', 'environment'),)", 'object_name': 'ReleaseProjectEnvironment'}, 'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'new_issues_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}) }, 'sentry.repository': { 'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository'}, 'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}) }, 'sentry.reprocessingreport': { 'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'ReprocessingReport'}, 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}) }, 'sentry.rule': { 'Meta': {'object_name': 'Rule'}, 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}) }, 'sentry.savedsearch': { 'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'query': ('django.db.models.fields.TextField', [], {}) }, 'sentry.savedsearchuserdefault': { 'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.scheduleddeletion': { 'Meta': {'unique_together': "(('app_label', 'model_name', 'object_id'),)", 'object_name': 'ScheduledDeletion'}, 'aborted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_scheduled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 2, 7, 0, 0)'}), 'guid': ('django.db.models.fields.CharField', [], {'default': "'e234f0c5b624418d80af9a72810119fd'", 'unique': 'True', 'max_length': '32'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'in_progress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'model_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'object_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}) }, 'sentry.scheduledjob': { 'Meta': {'object_name': 'ScheduledJob'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_scheduled': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'payload': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}) }, 'sentry.sentryapp': { 'Meta': {'object_name': 'SentryApp'}, 'application': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'sentry_app'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.ApiApplication']"}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'date_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'events': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_alertable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.TextField', [], {}), 'overview': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'owned_sentry_apps'", 'to': "orm['sentry.Organization']"}), 'proxy_user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'sentry_app'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.User']"}), 'redirect_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}), 'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}), 'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}), 'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'uuid': ('django.db.models.fields.CharField', [], {'default': "'4af6e22a-34e5-46e8-902c-df0aaed6ea5c'", 'max_length': '64'}), 'webhook_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}) }, 'sentry.sentryappavatar': { 'Meta': {'object_name': 'SentryAppAvatar'}, 'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), 'sentry_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.SentryApp']"}) }, 'sentry.sentryappinstallation': { 'Meta': {'object_name': 'SentryAppInstallation'}, 'api_grant': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'sentry_app_installation'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.ApiGrant']"}), 'authorization': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'sentry_app_installation'", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['sentry.ApiAuthorization']"}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'date_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_app_installations'", 'to': "orm['sentry.Organization']"}), 'sentry_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'installations'", 'to': "orm['sentry.SentryApp']"}), 'uuid': ('django.db.models.fields.CharField', [], {'default': "'49829810-4d47-4d08-b3b2-7aeba82c5f45'", 'max_length': '64'}) }, 'sentry.servicehook': { 'Meta': {'object_name': 'ServiceHook'}, 'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'events': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}), 'guid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'111451e6f86643b082fc14d5224b8557fc8bdc7410fe41689cc1ce285ebab08f'"}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '512'}), 'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}) }, 'sentry.tagkey': { 'Meta': {'unique_together': "(('project_id', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}) }, 'sentry.tagvalue': { 'Meta': {'unique_together': "(('project_id', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'", 'index_together': "(('project_id', 'key', 'last_seen'),)"}, 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}), 'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'sentry.team': { 'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}) }, 'sentry.teamavatar': { 'Meta': {'object_name': 'TeamAvatar'}, 'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), 'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Team']"}) }, 'sentry.user': { 'Meta': {'object_name': 'User', 'db_table': "'auth_user'"}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_sentry_app': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}) }, 'sentry.useravatar': { 'Meta': {'object_name': 'UserAvatar'}, 'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"}) }, 'sentry.useremail': { 'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'}, 'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}), 'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'ate0bmL4OMNDYSnUagxlTlL2ceuWcRSc'", 'max_length': '32'}) }, 'sentry.userip': { 'Meta': {'unique_together': "(('user', 'ip_address'),)", 'object_name': 'UserIP'}, 'country_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'region_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.useroption': { 'Meta': {'unique_together': "(('user', 'project', 'key'), ('user', 'organization', 'key'))", 'object_name': 'UserOption'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}), 'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}) }, 'sentry.userpermission': { 'Meta': {'unique_together': "(('user', 'permission'),)", 'object_name': 'UserPermission'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'permission': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.userreport': { 'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"}, 'comments': ('django.db.models.fields.TextField', [], {}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']", 'null': 'True'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'event_user_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}) } } complete_apps = ['sentry']
DeMille/emailhooks
refs/heads/master
django_nonrel/django/contrib/auth/tests/hashers.py
19
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.conf.global_settings import PASSWORD_HASHERS as default_hashers from django.contrib.auth.hashers import (is_password_usable, check_password, make_password, PBKDF2PasswordHasher, load_hashers, PBKDF2SHA1PasswordHasher, get_hasher, identify_hasher, UNUSABLE_PASSWORD, MAXIMUM_PASSWORD_LENGTH, password_max_length) from django.utils import unittest from django.utils.unittest import skipUnless try: import crypt except ImportError: crypt = None try: import bcrypt # Django 1.5 works only with py-bcrypt, not with bcrypt. py-bcrypt has # '_bcrypt' attribute, bcrypt doesn't. if not hasattr(bcrypt, '_bcrypt'): bcrypt = None except ImportError: bcrypt = None class TestUtilsHashPass(unittest.TestCase): def setUp(self): load_hashers(password_hashers=default_hashers) def test_simple(self): encoded = make_password('lètmein') self.assertTrue(encoded.startswith('pbkdf2_sha256$')) self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) # Long password self.assertRaises( ValueError, make_password, b"1" * (MAXIMUM_PASSWORD_LENGTH + 1), ) def test_pkbdf2(self): encoded = make_password('lètmein', 'seasalt', 'pbkdf2_sha256') self.assertEqual(encoded, 'pbkdf2_sha256$10000$seasalt$CWWFdHOWwPnki7HvkcqN9iA2T3KLW1cf2uZ5kvArtVY=') self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "pbkdf2_sha256") # Long password self.assertRaises( ValueError, make_password, b"1" * (MAXIMUM_PASSWORD_LENGTH + 1), "seasalt", "pbkdf2_sha256", ) def test_sha1(self): encoded = make_password('lètmein', 'seasalt', 'sha1') self.assertEqual(encoded, 'sha1$seasalt$cff36ea83f5706ce9aa7454e63e431fc726b2dc8') self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "sha1") # Long password self.assertRaises( ValueError, make_password, b"1" * (MAXIMUM_PASSWORD_LENGTH + 1), "seasalt", "sha1", ) def test_md5(self): encoded = make_password('lètmein', 'seasalt', 'md5') self.assertEqual(encoded, 'md5$seasalt$3f86d0d3d465b7b458c231bf3555c0e3') self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "md5") # Long password self.assertRaises( ValueError, make_password, b"1" * (MAXIMUM_PASSWORD_LENGTH + 1), "seasalt", "md5", ) def test_unsalted_md5(self): encoded = make_password('lètmein', '', 'unsalted_md5') self.assertEqual(encoded, '88a434c88cca4e900f7874cd98123f43') self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "unsalted_md5") # Alternate unsalted syntax alt_encoded = "md5$$%s" % encoded self.assertTrue(is_password_usable(alt_encoded)) self.assertTrue(check_password('lètmein', alt_encoded)) self.assertFalse(check_password('lètmeinz', alt_encoded)) # Long password self.assertRaises( ValueError, make_password, b"1" * (MAXIMUM_PASSWORD_LENGTH + 1), "", "unsalted_md5", ) def test_unsalted_sha1(self): encoded = make_password('lètmein', '', 'unsalted_sha1') self.assertEqual(encoded, 'sha1$$6d138ca3ae545631b3abd71a4f076ce759c5700b') self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "unsalted_sha1") # Raw SHA1 isn't acceptable alt_encoded = encoded[6:] self.assertFalse(check_password('lètmein', alt_encoded)) # Long password self.assertRaises( ValueError, make_password, b"1" * (MAXIMUM_PASSWORD_LENGTH + 1), "", "unslated_sha1", ) @skipUnless(crypt, "no crypt module to generate password.") def test_crypt(self): encoded = make_password('lètmei', 'ab', 'crypt') self.assertEqual(encoded, 'crypt$$ab1Hv2Lg7ltQo') self.assertTrue(is_password_usable(encoded)) self.assertTrue(check_password('lètmei', encoded)) self.assertFalse(check_password('lètmeiz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "crypt") # Long password self.assertRaises( ValueError, make_password, b"1" * (MAXIMUM_PASSWORD_LENGTH + 1), "seasalt", "crypt", ) @skipUnless(bcrypt, "py-bcrypt not installed") def test_bcrypt(self): encoded = make_password('lètmein', hasher='bcrypt') self.assertTrue(is_password_usable(encoded)) self.assertTrue(encoded.startswith('bcrypt$')) self.assertTrue(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertEqual(identify_hasher(encoded).algorithm, "bcrypt") # Long password self.assertRaises( ValueError, make_password, b"1" * (MAXIMUM_PASSWORD_LENGTH + 1), hasher="bcrypt", ) def test_unusable(self): encoded = make_password(None) self.assertFalse(is_password_usable(encoded)) self.assertFalse(check_password(None, encoded)) self.assertFalse(check_password(UNUSABLE_PASSWORD, encoded)) self.assertFalse(check_password('', encoded)) self.assertFalse(check_password('lètmein', encoded)) self.assertFalse(check_password('lètmeinz', encoded)) self.assertRaises(ValueError, identify_hasher, encoded) def test_bad_algorithm(self): def doit(): make_password('lètmein', hasher='lolcat') self.assertRaises(ValueError, doit) self.assertRaises(ValueError, identify_hasher, "lolcat$salt$hash") def test_bad_encoded(self): self.assertFalse(is_password_usable('lètmein_badencoded')) self.assertFalse(is_password_usable('')) def test_max_password_length_decorator(self): @password_max_length(10) def encode(s, password, salt): return True self.assertTrue(encode(None, b"1234", b"1234")) self.assertRaises(ValueError, encode, None, b"1234567890A", b"1234") def test_low_level_pkbdf2(self): hasher = PBKDF2PasswordHasher() encoded = hasher.encode('lètmein', 'seasalt') self.assertEqual(encoded, 'pbkdf2_sha256$10000$seasalt$CWWFdHOWwPnki7HvkcqN9iA2T3KLW1cf2uZ5kvArtVY=') self.assertTrue(hasher.verify('lètmein', encoded)) def test_low_level_pbkdf2_sha1(self): hasher = PBKDF2SHA1PasswordHasher() encoded = hasher.encode('lètmein', 'seasalt') self.assertEqual(encoded, 'pbkdf2_sha1$10000$seasalt$oAfF6vgs95ncksAhGXOWf4Okq7o=') self.assertTrue(hasher.verify('lètmein', encoded)) def test_upgrade(self): self.assertEqual('pbkdf2_sha256', get_hasher('default').algorithm) for algo in ('sha1', 'md5'): encoded = make_password('lètmein', hasher=algo) state = {'upgraded': False} def setter(password): state['upgraded'] = True self.assertTrue(check_password('lètmein', encoded, setter)) self.assertTrue(state['upgraded']) def test_no_upgrade(self): encoded = make_password('lètmein') state = {'upgraded': False} def setter(): state['upgraded'] = True self.assertFalse(check_password('WRONG', encoded, setter)) self.assertFalse(state['upgraded']) def test_no_upgrade_on_incorrect_pass(self): self.assertEqual('pbkdf2_sha256', get_hasher('default').algorithm) for algo in ('sha1', 'md5'): encoded = make_password('lètmein', hasher=algo) state = {'upgraded': False} def setter(): state['upgraded'] = True self.assertFalse(check_password('WRONG', encoded, setter)) self.assertFalse(state['upgraded'])
mrfuxi/docker-py
refs/heads/master
docker/utils/ports/__init__.py
57
from .ports import ( split_port, build_port_bindings ) # flake8: noqa
madmouser1/aubio
refs/heads/master
python/demos/demo_pitch.py
4
#! /usr/bin/env python import sys from aubio import source, pitch, freqtomidi if len(sys.argv) < 2: print "Usage: %s <filename> [samplerate]" % sys.argv[0] sys.exit(1) filename = sys.argv[1] downsample = 1 samplerate = 44100 / downsample if len( sys.argv ) > 2: samplerate = int(sys.argv[2]) win_s = 4096 / downsample # fft size hop_s = 512 / downsample # hop size s = source(filename, samplerate, hop_s) samplerate = s.samplerate tolerance = 0.8 pitch_o = pitch("yin", win_s, hop_s, samplerate) pitch_o.set_unit("midi") pitch_o.set_tolerance(tolerance) pitches = [] confidences = [] # total number of frames read total_frames = 0 while True: samples, read = s() pitch = pitch_o(samples)[0] #pitch = int(round(pitch)) confidence = pitch_o.get_confidence() #if confidence < 0.8: pitch = 0. #print "%f %f %f" % (total_frames / float(samplerate), pitch, confidence) pitches += [pitch] confidences += [confidence] total_frames += read if read < hop_s: break if 0: sys.exit(0) #print pitches from numpy import array, ma import matplotlib.pyplot as plt from demo_waveform_plot import get_waveform_plot, set_xlabels_sample2time skip = 1 pitches = array(pitches[skip:]) confidences = array(confidences[skip:]) times = [t * hop_s for t in range(len(pitches))] fig = plt.figure() ax1 = fig.add_subplot(311) ax1 = get_waveform_plot(filename, samplerate = samplerate, block_size = hop_s, ax = ax1) plt.setp(ax1.get_xticklabels(), visible = False) ax1.set_xlabel('') def array_from_text_file(filename, dtype = 'float'): import os.path from numpy import array filename = os.path.join(os.path.dirname(__file__), filename) return array([line.split() for line in open(filename).readlines()], dtype = dtype) ax2 = fig.add_subplot(312, sharex = ax1) import sys, os.path ground_truth = os.path.splitext(filename)[0] + '.f0.Corrected' if os.path.isfile(ground_truth): ground_truth = array_from_text_file(ground_truth) true_freqs = ground_truth[:,2] true_freqs = ma.masked_where(true_freqs < 2, true_freqs) true_times = float(samplerate) * ground_truth[:,0] ax2.plot(true_times, true_freqs, 'r') ax2.axis( ymin = 0.9 * true_freqs.min(), ymax = 1.1 * true_freqs.max() ) # plot raw pitches ax2.plot(times, pitches, '.g') # plot cleaned up pitches cleaned_pitches = pitches #cleaned_pitches = ma.masked_where(cleaned_pitches < 0, cleaned_pitches) #cleaned_pitches = ma.masked_where(cleaned_pitches > 120, cleaned_pitches) cleaned_pitches = ma.masked_where(confidences < tolerance, cleaned_pitches) ax2.plot(times, cleaned_pitches, '.-') #ax2.axis( ymin = 0.9 * cleaned_pitches.min(), ymax = 1.1 * cleaned_pitches.max() ) #ax2.axis( ymin = 55, ymax = 70 ) plt.setp(ax2.get_xticklabels(), visible = False) ax2.set_ylabel('f0 (midi)') # plot confidence ax3 = fig.add_subplot(313, sharex = ax1) # plot the confidence ax3.plot(times, confidences) # draw a line at tolerance ax3.plot(times, [tolerance]*len(confidences)) ax3.axis( xmin = times[0], xmax = times[-1]) ax3.set_ylabel('condidence') set_xlabels_sample2time(ax3, times[-1], samplerate) plt.show() #plt.savefig(os.path.basename(filename) + '.svg')
kjung/scikit-learn
refs/heads/master
examples/ensemble/plot_voting_probas.py
316
""" =========================================================== Plot class probabilities calculated by the VotingClassifier =========================================================== Plot the class probabilities of the first sample in a toy dataset predicted by three different classifiers and averaged by the `VotingClassifier`. First, three examplary classifiers are initialized (`LogisticRegression`, `GaussianNB`, and `RandomForestClassifier`) and used to initialize a soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that the predicted probabilities of the `RandomForestClassifier` count 5 times as much as the weights of the other classifiers when the averaged probability is calculated. To visualize the probability weighting, we fit each classifier on the training set and plot the predicted class probabilities for the first sample in this example dataset. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import VotingClassifier clf1 = LogisticRegression(random_state=123) clf2 = RandomForestClassifier(random_state=123) clf3 = GaussianNB() X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]]) y = np.array([1, 1, 2, 2]) eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='soft', weights=[1, 1, 5]) # predict class probabilities for all classifiers probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)] # get class probabilities for the first sample in the dataset class1_1 = [pr[0, 0] for pr in probas] class2_1 = [pr[0, 1] for pr in probas] # plotting N = 4 # number of groups ind = np.arange(N) # group positions width = 0.35 # bar width fig, ax = plt.subplots() # bars for classifier 1-3 p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green') p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen') # bars for VotingClassifier p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue') p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue') # plot annotations plt.axvline(2.8, color='k', linestyle='dashed') ax.set_xticks(ind + width) ax.set_xticklabels(['LogisticRegression\nweight 1', 'GaussianNB\nweight 1', 'RandomForestClassifier\nweight 5', 'VotingClassifier\n(average probabilities)'], rotation=40, ha='right') plt.ylim([0, 1]) plt.title('Class probabilities for sample 1 by different classifiers') plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left') plt.show()
ucb-sejits/opentuner
refs/heads/master
opentuner/search/objective.py
1
import abc import logging from fn import _ import opentuner from opentuner.resultsdb.models import * log = logging.getLogger(__name__) class SearchObjective(object): """ delegates the comparison of results and configurations """ __metaclass__ = abc.ABCMeta @abc.abstractmethod def result_order_by_terms(self): """return database columns required to order by the objective""" return [] @abc.abstractmethod def result_compare(self, result1, result2): """cmp() compatible comparison of resultsdb.models.Result""" return def config_compare(self, config1, config2): """cmp() compatible comparison of resultsdb.models.Configuration""" return self.result_compare(self.driver.results_query(config=config1).one(), self.driver.results_query(config=config2).one()) @abc.abstractmethod def result_relative(self, result1, result2): """return None, or a relative goodness of resultsdb.models.Result""" return def config_relative(self, config1, config2): """return None, or a relative goodness of resultsdb.models.Configuration""" return self.result_relative(self.driver.results_query(config=config1).one(), self.driver.results_query(config=config2).one()) def __init__(self): self.driver = None def set_driver(self, driver): self.driver = driver def result_order_by(self, q): return q.order_by(*self.result_order_by_terms()) def compare(self, a, b): """cmp() compatible compare""" if isinstance(a, Configuration): return self.config_compare(a, b) if isinstance(a, Result): return self.result_compare(a, b) assert False def relative(self, a, b): if isinstance(a, Configuration): return self.config_relative(a, b) if isinstance(a, Result): return self.result_relative(a, b) assert None def lt(self, a, b): return self.compare(a, b) < 0 def lte(self, a, b): return self.compare(a, b) <= 0 def gt(self, a, b): return self.compare(a, b) > 0 def gte(self, a, b): return self.compare(a, b) >= 0 def min(self, *l): if len(l) == 1: l = l[0] rv = l[0] for i in l[1:]: if self.lt(i, rv): rv = i return rv def max(self, *l): if len(l) == 1: l = l[0] rv = l[0] for i in l[1:]: if self.gt(i, rv): rv = i return rv def limit_from_config(self, config): """ a time limit to kill a result after such that it can be compared to config """ return max(map(_.time, self.driver.results_query(config=config))) def project_compare(self, a1, a2, b1, b2, factor=1.0): """ linearly project both a and b forward to see how they will compare in the future """ a3 = Result() b3 = Result() a3.time = _project(a1.time, a2.time, factor) a3.accuracy = _project(a1.accuracy, a2.accuracy, factor) a3.energy = _project(a1.energy, a2.energy, factor) a3.confidence = _project(a1.confidence, a2.confidence, factor) return self.result_compare(a3, b3) def display(self, result): """ produce a string version of a resultsdb.models.Result() """ rv = [] for k in ('time', 'accuracy', 'energy', 'size', 'confidence'): v = getattr(result, k) if v is not None: rv.append('%s=%.4f' % (k, float(v))) return ', '.join(rv) def filter_acceptable(self, query): """Return a Result() query that only returns acceptable results""" return query def is_acceptable(self, result): """Test if a Result() meets thresholds""" return True def stats_quality_score(self, result, worst_result, best_result): """return a score for statistics""" if not self.is_acceptable(result): return worst_result.time else: return result.time def _project(a1, a2, factor): if a1 is None or a2 is None: return None return a2 + factor * (a2 - a1) class MinimizeValue(SearchObjective): __metaclass__ = abc.ABCMeta @abc.abstractproperty def value(self): pass def result_order_by_terms(self): """return database columns required to order by the objective""" return [Result.__dict__[self.value]] def result_compare(self, result1, result2): """cmp() compatible comparison of resultsdb.models.Result""" return cmp(result1.__dict__[self.value], result2.__dict__[self.value]) def config_compare(self, config1, config2): """cmp() compatible comparison of resultsdb.models.Configuration""" return cmp(min(map(_.__dict__[self.value], self.driver.results_query(config=config1))), min(map(_.__dict__[self.value], self.driver.results_query(config=config2)))) def result_relative(self, result1, result2): """return None, or a relative goodness of resultsdb.models.Result""" if result2.__dict__[self.value] == 0: return float('inf') * result1.__dict__[self.value] return result1.__dict__[self.value] / result2.__dict__[self.value] class MinimizeTime(MinimizeValue): """ minimize Result().time """ @property def value(self): return 'time' class MinimizeEnergy(MinimizeValue): """ minimize Result().energy """ @property def value(self): return 'time' class MaximizeAccuracy(SearchObjective): """ maximize Result().accuracy """ def result_order_by_terms(self): """return database columns required to order by the objective""" return [-Result.accuracy] def result_compare(self, result1, result2): """cmp() compatible comparison of resultsdb.models.Result""" # note opposite order return cmp(result2.accuracy, result1.accuracy) def result_relative(self, result1, result2): """return None, or a relative goodness of resultsdb.models.Result""" # note opposite order if result1.accuracy == 0: return float('inf') * result2.accuracy return result2.accuracy / result1.accuracy def stats_quality_score(self, result, worst_result, best_result): """return a score for statistics""" if not self.is_acceptable(result): return worst_result.time else: return result.time def stats_raw_score(self, result): return result.accuracy class MaximizeAccuracyMinimizeSize(MaximizeAccuracy): """ maximize Result().accuracy, break ties with Result().size """ def result_order_by_terms(self): """return database columns required to order by the objective""" return [-Result.accuracy, Result.size] def result_compare(self, result1, result2): """cmp() compatible comparison of resultsdb.models.Result""" return cmp((-result1.accuracy, result1.size), (-result2.accuracy, result2.size)) def display(self, result): """ produce a string version of a resultsdb.models.Result() """ return "accuracy=%.8f, size=%.1f" % (result.accuracy, result.size) def result_relative(self, result1, result2): """return None, or a relative goodness of resultsdb.models.Result""" # unimplemented for now log.warning('result_relative() not yet implemented for %s', self.__class__.__name__) return None class ThresholdAccuracyMinimizeTime(SearchObjective): """ if accuracy >= target: minimize time else: maximize accuracy """ def __init__(self, accuracy_target, low_accuracy_limit_multiplier=10.0): self.accuracy_target = accuracy_target self.low_accuracy_limit_multiplier = low_accuracy_limit_multiplier super(ThresholdAccuracyMinimizeTime, self).__init__() def result_order_by_terms(self): """return database columns required to order by the objective""" return ["min(accuracy, %f) desc" % self.accuracy_target, opentuner.resultsdb.models.Result.time] def result_compare(self, result1, result2): """cmp() compatible comparison of resultsdb.models.Result""" return cmp((-min(self.accuracy_target, result1.accuracy), result1.time), (-min(self.accuracy_target, result2.accuracy), result2.time)) def config_compare(self, config1, config2): """cmp() compatible comparison of resultsdb.models.Configuration""" return self.result_compare( self.driver.results_query(config=config1, objective_ordered=True)[0], self.driver.results_query(config=config2, objective_ordered=True)[0]) def limit_from_config(self, config): """ a time limit to kill a result after such that it can be compared to config """ results = self.driver.results_query(config=config) if results.count() == 0: return None if self.accuracy_target > min(map(_.accuracy, results)): m = self.low_accuracy_limit_multiplier else: m = 1.0 return m * max(map(_.time, results)) def filter_acceptable(self, query): """Return a Result() query that only returns acceptable results""" return query.filter(opentuner.resultsdb.models.Result.accuracy >= self.accuracy_target) def is_acceptable(self, result): """Test if a Result() meets thresholds""" return result.accuracy >= self.accuracy_target def result_relative(self, result1, result2): """return None, or a relative goodness of resultsdb.models.Result""" # unimplemented for now log.warning('result_relative() not yet implemented for %s', self.__class__.__name__) return None
slevenhagen/odoo-npg
refs/heads/8.0
addons/website_crm_partner_assign/controllers/main.py
271
# -*- coding: utf-8 -*- import werkzeug from openerp import SUPERUSER_ID from openerp.addons.web import http from openerp.addons.web.http import request from openerp.addons.website.models.website import slug, unslug from openerp.tools.translate import _ class WebsiteCrmPartnerAssign(http.Controller): _references_per_page = 40 @http.route([ '/partners', '/partners/page/<int:page>', '/partners/grade/<model("res.partner.grade"):grade>', '/partners/grade/<model("res.partner.grade"):grade>/page/<int:page>', '/partners/country/<model("res.country"):country>', '/partners/country/<model("res.country"):country>/page/<int:page>', '/partners/grade/<model("res.partner.grade"):grade>/country/<model("res.country"):country>', '/partners/grade/<model("res.partner.grade"):grade>/country/<model("res.country"):country>/page/<int:page>', ], type='http', auth="public", website=True) def partners(self, country=None, grade=None, page=0, **post): country_all = post.pop('country_all', False) partner_obj = request.registry['res.partner'] country_obj = request.registry['res.country'] search = post.get('search', '') base_partner_domain = [('is_company', '=', True), ('grade_id.website_published', '=', True), ('website_published', '=', True)] if search: base_partner_domain += ['|', ('name', 'ilike', search), ('website_description', 'ilike', search)] # group by grade grade_domain = list(base_partner_domain) if not country and not country_all: country_code = request.session['geoip'].get('country_code') if country_code: country_ids = country_obj.search(request.cr, request.uid, [('code', '=', country_code)], context=request.context) if country_ids: country = country_obj.browse(request.cr, request.uid, country_ids[0], context=request.context) if country: grade_domain += [('country_id', '=', country.id)] grades = partner_obj.read_group( request.cr, SUPERUSER_ID, grade_domain, ["id", "grade_id"], groupby="grade_id", orderby="grade_id DESC", context=request.context) grades_partners = partner_obj.search( request.cr, SUPERUSER_ID, grade_domain, context=request.context, count=True) # flag active grade for grade_dict in grades: grade_dict['active'] = grade and grade_dict['grade_id'][0] == grade.id grades.insert(0, { 'grade_id_count': grades_partners, 'grade_id': (0, _("All Categories")), 'active': bool(grade is None), }) # group by country country_domain = list(base_partner_domain) if grade: country_domain += [('grade_id', '=', grade.id)] countries = partner_obj.read_group( request.cr, SUPERUSER_ID, country_domain, ["id", "country_id"], groupby="country_id", orderby="country_id", context=request.context) countries_partners = partner_obj.search( request.cr, SUPERUSER_ID, country_domain, context=request.context, count=True) # flag active country for country_dict in countries: country_dict['active'] = country and country_dict['country_id'] and country_dict['country_id'][0] == country.id countries.insert(0, { 'country_id_count': countries_partners, 'country_id': (0, _("All Countries")), 'active': bool(country is None), }) # current search if grade: base_partner_domain += [('grade_id', '=', grade.id)] if country: base_partner_domain += [('country_id', '=', country.id)] # format pager if grade and not country: url = '/partners/grade/' + slug(grade) elif country and not grade: url = '/partners/country/' + slug(country) elif country and grade: url = '/partners/grade/' + slug(grade) + '/country/' + slug(country) else: url = '/partners' url_args = {} if search: url_args['search'] = search if country_all: url_args['country_all'] = True partner_count = partner_obj.search_count( request.cr, SUPERUSER_ID, base_partner_domain, context=request.context) pager = request.website.pager( url=url, total=partner_count, page=page, step=self._references_per_page, scope=7, url_args=url_args) # search partners matching current search parameters partner_ids = partner_obj.search( request.cr, SUPERUSER_ID, base_partner_domain, order="grade_id DESC", context=request.context) # todo in trunk: order="grade_id DESC, implemented_count DESC", offset=pager['offset'], limit=self._references_per_page partners = partner_obj.browse(request.cr, SUPERUSER_ID, partner_ids, request.context) # remove me in trunk partners = sorted(partners, key=lambda x: (x.grade_id.sequence if x.grade_id else 0, len([i for i in x.implemented_partner_ids if i.website_published])), reverse=True) partners = partners[pager['offset']:pager['offset'] + self._references_per_page] google_map_partner_ids = ','.join(map(str, [p.id for p in partners])) values = { 'countries': countries, 'current_country': country, 'grades': grades, 'current_grade': grade, 'partners': partners, 'google_map_partner_ids': google_map_partner_ids, 'pager': pager, 'searches': post, 'search_path': "%s" % werkzeug.url_encode(post), } return request.website.render("website_crm_partner_assign.index", values) # Do not use semantic controller due to SUPERUSER_ID @http.route(['/partners/<partner_id>'], type='http', auth="public", website=True) def partners_detail(self, partner_id, partner_name='', **post): _, partner_id = unslug(partner_id) current_grade, current_country = None, None grade_id = post.get('grade_id') country_id = post.get('country_id') if grade_id: grade_ids = request.registry['res.partner.grade'].exists(request.cr, request.uid, int(grade_id), context=request.context) if grade_ids: current_grade = request.registry['res.partner.grade'].browse(request.cr, request.uid, grade_ids[0], context=request.context) if country_id: country_ids = request.registry['res.country'].exists(request.cr, request.uid, int(country_id), context=request.context) if country_ids: current_country = request.registry['res.country'].browse(request.cr, request.uid, country_ids[0], context=request.context) if partner_id: partner = request.registry['res.partner'].browse(request.cr, SUPERUSER_ID, partner_id, context=request.context) if partner.exists() and partner.website_published: values = { 'main_object': partner, 'partner': partner, 'current_grade': current_grade, 'current_country': current_country } return request.website.render("website_crm_partner_assign.partner", values) return self.partners(**post)
madjar/cython
refs/heads/master
tests/run/for_in_iter.py
26
# mode: run # tag: forin import sys import cython try: from builtins import next except ImportError: def next(it): return it.next() def for_in_pyiter_pass(it): """ >>> it = Iterable(5) >>> for_in_pyiter_pass(it) >>> next(it) Traceback (most recent call last): StopIteration """ for item in it: pass def for_in_pyiter(it): """ >>> for_in_pyiter(Iterable(5)) [0, 1, 2, 3, 4] """ l = [] for item in it: l.append(item) return l def for_in_list(): """ >>> for_in_pyiter([1,2,3,4,5]) [1, 2, 3, 4, 5] """ @cython.test_assert_path_exists('//TupleNode//IntNode') @cython.test_fail_if_path_exists('//ListNode//IntNode') def for_in_literal_list(): """ >>> for_in_literal_list() [1, 2, 3, 4] """ l = [] for i in [1,2,3,4]: l.append(i) return l @cython.test_assert_path_exists('//TupleNode//IntNode') @cython.test_fail_if_path_exists('//ListNode//IntNode') def for_in_literal_mult_list(): """ >>> for_in_literal_mult_list() [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4] """ l = [] for i in [1,2,3,4] * 3: l.append(i) return l class Iterable(object): """ >>> for_in_pyiter(Iterable(5)) [0, 1, 2, 3, 4] """ def __init__(self, N): self.N = N self.i = 0 def __iter__(self): return self def __next__(self): if self.i < self.N: i = self.i self.i += 1 return i raise StopIteration next = __next__ if sys.version_info[0] >= 3: class NextReplacingIterable(object): def __init__(self): self.i = 0 def __iter__(self): return self def __next__(self): if self.i > 5: raise StopIteration self.i += 1 self.__next__ = self.next2 return 1 def next2(self): self.__next__ = self.next3 return 2 def next3(self): del self.__next__ raise StopIteration else: class NextReplacingIterable(object): def __init__(self): self.i = 0 def __iter__(self): return self def next(self): if self.i > 5: raise StopIteration self.i += 1 self.next = self.next2 return 1 def next2(self): self.next = self.next3 return 2 def next3(self): del self.next raise StopIteration def for_in_next_replacing_iter(): """ >>> for_in_pyiter(NextReplacingIterable()) [1, 1, 1, 1, 1, 1] """ def for_in_gen(N): """ >>> for_in_pyiter(for_in_gen(10)) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] """ for i in range(N): yield i
sgml/popcorn_maker
refs/heads/master
vendor-local/lib/python/south/db/postgresql_psycopg2.py
20
import uuid from django.db.backends.util import truncate_name from south.db import generic class DatabaseOperations(generic.DatabaseOperations): """ PsycoPG2 implementation of database operations. """ backend_name = "postgres" def create_index_name(self, table_name, column_names, suffix=""): """ Generate a unique name for the index Django's logic for naming field indexes is different in the postgresql_psycopg2 backend, so we follow that for single-column indexes. """ if len(column_names) == 1: return truncate_name( '%s_%s%s' % (table_name, column_names[0], suffix), self._get_connection().ops.max_name_length() ) return super(DatabaseOperations, self).create_index_name(table_name, column_names, suffix) @generic.copy_column_constraints @generic.delete_column_constraints def rename_column(self, table_name, old, new): if old == new: # Short-circuit out return [] self.execute('ALTER TABLE %s RENAME COLUMN %s TO %s;' % ( self.quote_name(table_name), self.quote_name(old), self.quote_name(new), )) @generic.invalidate_table_constraints def rename_table(self, old_table_name, table_name): "will rename the table and an associated ID sequence and primary key index" # First, rename the table generic.DatabaseOperations.rename_table(self, old_table_name, table_name) # Then, try renaming the ID sequence # (if you're using other AutoFields... your problem, unfortunately) self.commit_transaction() self.start_transaction() try: generic.DatabaseOperations.rename_table(self, old_table_name + "_id_seq", table_name + "_id_seq") except: if self.debug: print " ~ No such sequence (ignoring error)" self.rollback_transaction() else: self.commit_transaction() self.start_transaction() # Rename primary key index, will not rename other indices on # the table that are used by django (e.g. foreign keys). Until # figure out how, you need to do this yourself. try: generic.DatabaseOperations.rename_table(self, old_table_name + "_pkey", table_name + "_pkey") except: if self.debug: print " ~ No such primary key (ignoring error)" self.rollback_transaction() else: self.commit_transaction() self.start_transaction() def rename_index(self, old_index_name, index_name): "Rename an index individually" generic.DatabaseOperations.rename_table(self, old_index_name, index_name) def _default_value_workaround(self, value): "Support for UUIDs on psql" if isinstance(value, uuid.UUID): return str(value) else: return super(DatabaseOperations, self)._default_value_workaround(value) def _db_type_for_alter_column(self, field): return self._db_positive_type_for_alter_column(DatabaseOperations, field) def _alter_add_column_mods(self, field, name, params, sqls): return self._alter_add_positive_check(DatabaseOperations, field, name, params, sqls)
hvnsweeting/Diamond
refs/heads/master
src/collectors/netapp/test/testnetappDisk.py
29
#!/usr/bin/python # coding=utf-8 ############################################################################### from test import CollectorTestCase from test import get_collector_config from test import unittest from netappDisk import netappDisk ############################################################################### class TestnetappDisk(CollectorTestCase): def setUp(self): config = get_collector_config('netappDisk', { }) self.collector = netappDisk(config, None) def test_import(self): self.assertTrue(netappDisk) ############################################################################### if __name__ == "__main__": unittest.main()
kdwink/intellij-community
refs/heads/master
python/testData/refactoring/rename/renameShadowingVariable.py
83
def lab(): pass lab = 1 print(l<caret>ab)
ovnicraft/openerp-restaurant
refs/heads/master
website_mail/__openerp__.py
379
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Website Mail', 'category': 'Hidden', 'summary': 'Website Module for Mail', 'version': '0.1', 'description': """Glue module holding mail improvements for website.""", 'author': 'OpenERP SA', 'depends': ['website', 'mail', 'email_template'], 'data': [ 'views/snippets.xml', 'views/website_mail.xml', 'views/website_email_designer.xml', 'views/email_template_view.xml', 'data/mail_groups.xml', 'security/website_mail.xml', ], 'qweb': [ 'static/src/xml/website_mail.xml' ], 'installable': True, 'auto_install': True, }
carthagecollege/django-djtinue
refs/heads/master
djtinue/admissions/urls.py
1
from django.conf.urls import include, url from django.views.generic import TemplateView from djtinue.admissions import views urlpatterns = [ # information request url( r'^information-request/$', views.info_request, name='info_request' ), url( r'^information-request/success/$', TemplateView.as_view( template_name='admissions/inforequest_success.html' ), name='info_request_success' ), url( r'^information-session/success/$', TemplateView.as_view( template_name='admissions/infosession_success.html' ), name='info_session_success' ), url( r'^information-session/(?P<session_type>[a-zA-Z0-9_-]+)/$', views.info_session, name='info_session' ), # application url( r'^application/', include('djtinue.admissions.application.urls') ), ]
sfrenza/test-for-bot
refs/heads/master
venv/Lib/site-packages/click/_termui_impl.py
136
""" click._termui_impl ~~~~~~~~~~~~~~~~~~ This module contains implementations for the termui module. To keep the import time of Click down, some infrequently used functionality is placed in this module and only imported as needed. :copyright: (c) 2014 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import os import sys import time import math from ._compat import _default_text_stdout, range_type, PY2, isatty, \ open_stream, strip_ansi, term_len, get_best_encoding, WIN from .utils import echo from .exceptions import ClickException if os.name == 'nt': BEFORE_BAR = '\r' AFTER_BAR = '\n' else: BEFORE_BAR = '\r\033[?25l' AFTER_BAR = '\033[?25h\n' def _length_hint(obj): """Returns the length hint of an object.""" try: return len(obj) except (AttributeError, TypeError): try: get_hint = type(obj).__length_hint__ except AttributeError: return None try: hint = get_hint(obj) except TypeError: return None if hint is NotImplemented or \ not isinstance(hint, (int, long)) or \ hint < 0: return None return hint class ProgressBar(object): def __init__(self, iterable, length=None, fill_char='#', empty_char=' ', bar_template='%(bar)s', info_sep=' ', show_eta=True, show_percent=None, show_pos=False, item_show_func=None, label=None, file=None, color=None, width=30): self.fill_char = fill_char self.empty_char = empty_char self.bar_template = bar_template self.info_sep = info_sep self.show_eta = show_eta self.show_percent = show_percent self.show_pos = show_pos self.item_show_func = item_show_func self.label = label or '' if file is None: file = _default_text_stdout() self.file = file self.color = color self.width = width self.autowidth = width == 0 if length is None: length = _length_hint(iterable) if iterable is None: if length is None: raise TypeError('iterable or length is required') iterable = range_type(length) self.iter = iter(iterable) self.length = length self.length_known = length is not None self.pos = 0 self.avg = [] self.start = self.last_eta = time.time() self.eta_known = False self.finished = False self.max_width = None self.entered = False self.current_item = None self.is_hidden = not isatty(self.file) self._last_line = None def __enter__(self): self.entered = True self.render_progress() return self def __exit__(self, exc_type, exc_value, tb): self.render_finish() def __iter__(self): if not self.entered: raise RuntimeError('You need to use progress bars in a with block.') self.render_progress() return self def render_finish(self): if self.is_hidden: return self.file.write(AFTER_BAR) self.file.flush() @property def pct(self): if self.finished: return 1.0 return min(self.pos / (float(self.length) or 1), 1.0) @property def time_per_iteration(self): if not self.avg: return 0.0 return sum(self.avg) / float(len(self.avg)) @property def eta(self): if self.length_known and not self.finished: return self.time_per_iteration * (self.length - self.pos) return 0.0 def format_eta(self): if self.eta_known: t = self.eta + 1 seconds = t % 60 t /= 60 minutes = t % 60 t /= 60 hours = t % 24 t /= 24 if t > 0: days = t return '%dd %02d:%02d:%02d' % (days, hours, minutes, seconds) else: return '%02d:%02d:%02d' % (hours, minutes, seconds) return '' def format_pos(self): pos = str(self.pos) if self.length_known: pos += '/%s' % self.length return pos def format_pct(self): return ('% 4d%%' % int(self.pct * 100))[1:] def format_progress_line(self): show_percent = self.show_percent info_bits = [] if self.length_known: bar_length = int(self.pct * self.width) bar = self.fill_char * bar_length bar += self.empty_char * (self.width - bar_length) if show_percent is None: show_percent = not self.show_pos else: if self.finished: bar = self.fill_char * self.width else: bar = list(self.empty_char * (self.width or 1)) if self.time_per_iteration != 0: bar[int((math.cos(self.pos * self.time_per_iteration) / 2.0 + 0.5) * self.width)] = self.fill_char bar = ''.join(bar) if self.show_pos: info_bits.append(self.format_pos()) if show_percent: info_bits.append(self.format_pct()) if self.show_eta and self.eta_known and not self.finished: info_bits.append(self.format_eta()) if self.item_show_func is not None: item_info = self.item_show_func(self.current_item) if item_info is not None: info_bits.append(item_info) return (self.bar_template % { 'label': self.label, 'bar': bar, 'info': self.info_sep.join(info_bits) }).rstrip() def render_progress(self): from .termui import get_terminal_size nl = False if self.is_hidden: buf = [self.label] nl = True else: buf = [] # Update width in case the terminal has been resized if self.autowidth: old_width = self.width self.width = 0 clutter_length = term_len(self.format_progress_line()) new_width = max(0, get_terminal_size()[0] - clutter_length) if new_width < old_width: buf.append(BEFORE_BAR) buf.append(' ' * self.max_width) self.max_width = new_width self.width = new_width clear_width = self.width if self.max_width is not None: clear_width = self.max_width buf.append(BEFORE_BAR) line = self.format_progress_line() line_len = term_len(line) if self.max_width is None or self.max_width < line_len: self.max_width = line_len buf.append(line) buf.append(' ' * (clear_width - line_len)) line = ''.join(buf) # Render the line only if it changed. if line != self._last_line: self._last_line = line echo(line, file=self.file, color=self.color, nl=nl) self.file.flush() def make_step(self, n_steps): self.pos += n_steps if self.length_known and self.pos >= self.length: self.finished = True if (time.time() - self.last_eta) < 1.0: return self.last_eta = time.time() self.avg = self.avg[-6:] + [-(self.start - time.time()) / (self.pos)] self.eta_known = self.length_known def update(self, n_steps): self.make_step(n_steps) self.render_progress() def finish(self): self.eta_known = 0 self.current_item = None self.finished = True def next(self): if self.is_hidden: return next(self.iter) try: rv = next(self.iter) self.current_item = rv except StopIteration: self.finish() self.render_progress() raise StopIteration() else: self.update(1) return rv if not PY2: __next__ = next del next def pager(text, color=None): """Decide what method to use for paging through text.""" stdout = _default_text_stdout() if not isatty(sys.stdin) or not isatty(stdout): return _nullpager(stdout, text, color) pager_cmd = (os.environ.get('PAGER', None) or '').strip() if pager_cmd: if WIN: return _tempfilepager(text, pager_cmd, color) return _pipepager(text, pager_cmd, color) if os.environ.get('TERM') in ('dumb', 'emacs'): return _nullpager(stdout, text, color) if WIN or sys.platform.startswith('os2'): return _tempfilepager(text, 'more <', color) if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0: return _pipepager(text, 'less', color) import tempfile fd, filename = tempfile.mkstemp() os.close(fd) try: if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0: return _pipepager(text, 'more', color) return _nullpager(stdout, text, color) finally: os.unlink(filename) def _pipepager(text, cmd, color): """Page through text by feeding it to another program. Invoking a pager through this might support colors. """ import subprocess env = dict(os.environ) # If we're piping to less we might support colors under the # condition that cmd_detail = cmd.rsplit('/', 1)[-1].split() if color is None and cmd_detail[0] == 'less': less_flags = os.environ.get('LESS', '') + ' '.join(cmd_detail[1:]) if not less_flags: env['LESS'] = '-R' color = True elif 'r' in less_flags or 'R' in less_flags: color = True if not color: text = strip_ansi(text) c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, env=env) encoding = get_best_encoding(c.stdin) try: c.stdin.write(text.encode(encoding, 'replace')) c.stdin.close() except (IOError, KeyboardInterrupt): pass # Less doesn't respect ^C, but catches it for its own UI purposes (aborting # search or other commands inside less). # # That means when the user hits ^C, the parent process (click) terminates, # but less is still alive, paging the output and messing up the terminal. # # If the user wants to make the pager exit on ^C, they should set # `LESS='-K'`. It's not our decision to make. while True: try: c.wait() except KeyboardInterrupt: pass else: break def _tempfilepager(text, cmd, color): """Page through text by invoking a program on a temporary file.""" import tempfile filename = tempfile.mktemp() if not color: text = strip_ansi(text) encoding = get_best_encoding(sys.stdout) with open_stream(filename, 'wb')[0] as f: f.write(text.encode(encoding)) try: os.system(cmd + ' "' + filename + '"') finally: os.unlink(filename) def _nullpager(stream, text, color): """Simply print unformatted text. This is the ultimate fallback.""" if not color: text = strip_ansi(text) stream.write(text) class Editor(object): def __init__(self, editor=None, env=None, require_save=True, extension='.txt'): self.editor = editor self.env = env self.require_save = require_save self.extension = extension def get_editor(self): if self.editor is not None: return self.editor for key in 'VISUAL', 'EDITOR': rv = os.environ.get(key) if rv: return rv if WIN: return 'notepad' for editor in 'vim', 'nano': if os.system('which %s >/dev/null 2>&1' % editor) == 0: return editor return 'vi' def edit_file(self, filename): import subprocess editor = self.get_editor() if self.env: environ = os.environ.copy() environ.update(self.env) else: environ = None try: c = subprocess.Popen('%s "%s"' % (editor, filename), env=environ, shell=True) exit_code = c.wait() if exit_code != 0: raise ClickException('%s: Editing failed!' % editor) except OSError as e: raise ClickException('%s: Editing failed: %s' % (editor, e)) def edit(self, text): import tempfile text = text or '' if text and not text.endswith('\n'): text += '\n' fd, name = tempfile.mkstemp(prefix='editor-', suffix=self.extension) try: if WIN: encoding = 'utf-8-sig' text = text.replace('\n', '\r\n') else: encoding = 'utf-8' text = text.encode(encoding) f = os.fdopen(fd, 'wb') f.write(text) f.close() timestamp = os.path.getmtime(name) self.edit_file(name) if self.require_save \ and os.path.getmtime(name) == timestamp: return None f = open(name, 'rb') try: rv = f.read() finally: f.close() return rv.decode('utf-8-sig').replace('\r\n', '\n') finally: os.unlink(name) def open_url(url, wait=False, locate=False): import subprocess def _unquote_file(url): try: import urllib except ImportError: import urllib if url.startswith('file://'): url = urllib.unquote(url[7:]) return url if sys.platform == 'darwin': args = ['open'] if wait: args.append('-W') if locate: args.append('-R') args.append(_unquote_file(url)) null = open('/dev/null', 'w') try: return subprocess.Popen(args, stderr=null).wait() finally: null.close() elif WIN: if locate: url = _unquote_file(url) args = 'explorer /select,"%s"' % _unquote_file( url.replace('"', '')) else: args = 'start %s "" "%s"' % ( wait and '/WAIT' or '', url.replace('"', '')) return os.system(args) try: if locate: url = os.path.dirname(_unquote_file(url)) or '.' else: url = _unquote_file(url) c = subprocess.Popen(['xdg-open', url]) if wait: return c.wait() return 0 except OSError: if url.startswith(('http://', 'https://')) and not locate and not wait: import webbrowser webbrowser.open(url) return 0 return 1 def _translate_ch_to_exc(ch): if ch == '\x03': raise KeyboardInterrupt() if ch == '\x04': raise EOFError() if WIN: import msvcrt def getchar(echo): rv = msvcrt.getch() if echo: msvcrt.putchar(rv) _translate_ch_to_exc(rv) if PY2: enc = getattr(sys.stdin, 'encoding', None) if enc is not None: rv = rv.decode(enc, 'replace') else: rv = rv.decode('cp1252', 'replace') return rv else: import tty import termios def getchar(echo): if not isatty(sys.stdin): f = open('/dev/tty') fd = f.fileno() else: fd = sys.stdin.fileno() f = None try: old_settings = termios.tcgetattr(fd) try: tty.setraw(fd) ch = os.read(fd, 32) if echo and isatty(sys.stdout): sys.stdout.write(ch) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) sys.stdout.flush() if f is not None: f.close() except termios.error: pass _translate_ch_to_exc(ch) return ch.decode(get_best_encoding(sys.stdin), 'replace')
nelson-liu/scikit-learn
refs/heads/master
examples/neural_networks/plot_mnist_filters.py
79
""" ===================================== Visualization of MLP weights on MNIST ===================================== Sometimes looking at the learned coefficients of a neural network can provide insight into the learning behavior. For example if weights look unstructured, maybe some were not used at all, or if very large coefficients exist, maybe regularization was too low or the learning rate too high. This example shows how to plot some of the first layer weights in a MLPClassifier trained on the MNIST dataset. The input data consists of 28x28 pixel handwritten digits, leading to 784 features in the dataset. Therefore the first layer weight matrix have the shape (784, hidden_layer_sizes[0]). We can therefore visualize a single column of the weight matrix as a 28x28 pixel image. To make the example run faster, we use very few hidden units, and train only for a very short time. Training longer would result in weights with a much smoother spatial appearance. """ print(__doc__) import matplotlib.pyplot as plt from sklearn.datasets import fetch_mldata from sklearn.neural_network import MLPClassifier mnist = fetch_mldata("MNIST original") # rescale the data, use the traditional train/test split X, y = mnist.data / 255., mnist.target X_train, X_test = X[:60000], X[60000:] y_train, y_test = y[:60000], y[60000:] # mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4, # solver='sgd', verbose=10, tol=1e-4, random_state=1) mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4, solver='sgd', verbose=10, tol=1e-4, random_state=1, learning_rate_init=.1) mlp.fit(X_train, y_train) print("Training set score: %f" % mlp.score(X_train, y_train)) print("Test set score: %f" % mlp.score(X_test, y_test)) fig, axes = plt.subplots(4, 4) # use global min / max to ensure all weights are shown on the same scale vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max() for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()): ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin, vmax=.5 * vmax) ax.set_xticks(()) ax.set_yticks(()) plt.show()
camilonova/django
refs/heads/master
django/contrib/gis/geos/mutable_list.py
21
# Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved. # Released under the New BSD license. """ This module contains a base type which provides list-style mutations without specific data storage methods. See also http://static.aryehleib.com/oldsite/MutableLists.html Author: Aryeh Leib Taurog. """ from functools import total_ordering @total_ordering class ListMixin: """ A base class which provides complete list interface. Derived classes must call ListMixin's __init__() function and implement the following: function _get_single_external(self, i): Return single item with index i for general use. The index i will always satisfy 0 <= i < len(self). function _get_single_internal(self, i): Same as above, but for use within the class [Optional] Note that if _get_single_internal and _get_single_internal return different types of objects, _set_list must distinguish between the two and handle each appropriately. function _set_list(self, length, items): Recreate the entire object. NOTE: items may be a generator which calls _get_single_internal. Therefore, it is necessary to cache the values in a temporary: temp = list(items) before clobbering the original storage. function _set_single(self, i, value): Set the single item at index i to value [Optional] If left undefined, all mutations will result in rebuilding the object using _set_list. function __len__(self): Return the length int _minlength: The minimum legal length [Optional] int _maxlength: The maximum legal length [Optional] type or tuple _allowed: A type or tuple of allowed item types [Optional] """ _minlength = 0 _maxlength = None # ### Python initialization and special list interface methods ### def __init__(self, *args, **kwargs): if not hasattr(self, '_get_single_internal'): self._get_single_internal = self._get_single_external if not hasattr(self, '_set_single'): self._set_single = self._set_single_rebuild self._assign_extended_slice = self._assign_extended_slice_rebuild super().__init__(*args, **kwargs) def __getitem__(self, index): "Get the item(s) at the specified index/slice." if isinstance(index, slice): return [self._get_single_external(i) for i in range(*index.indices(len(self)))] else: index = self._checkindex(index) return self._get_single_external(index) def __delitem__(self, index): "Delete the item(s) at the specified index/slice." if not isinstance(index, (int, slice)): raise TypeError("%s is not a legal index" % index) # calculate new length and dimensions origLen = len(self) if isinstance(index, int): index = self._checkindex(index) indexRange = [index] else: indexRange = range(*index.indices(origLen)) newLen = origLen - len(indexRange) newItems = (self._get_single_internal(i) for i in range(origLen) if i not in indexRange) self._rebuild(newLen, newItems) def __setitem__(self, index, val): "Set the item(s) at the specified index/slice." if isinstance(index, slice): self._set_slice(index, val) else: index = self._checkindex(index) self._check_allowed((val,)) self._set_single(index, val) # ### Special methods for arithmetic operations ### def __add__(self, other): 'add another list-like object' return self.__class__(list(self) + list(other)) def __radd__(self, other): 'add to another list-like object' return other.__class__(list(other) + list(self)) def __iadd__(self, other): 'add another list-like object to self' self.extend(list(other)) return self def __mul__(self, n): 'multiply' return self.__class__(list(self) * n) def __rmul__(self, n): 'multiply' return self.__class__(list(self) * n) def __imul__(self, n): 'multiply' if n <= 0: del self[:] else: cache = list(self) for i in range(n - 1): self.extend(cache) return self def __eq__(self, other): olen = len(other) for i in range(olen): try: c = self[i] == other[i] except IndexError: # self must be shorter return False if not c: return False return len(self) == olen def __lt__(self, other): olen = len(other) for i in range(olen): try: c = self[i] < other[i] except IndexError: # self must be shorter return True if c: return c elif other[i] < self[i]: return False return len(self) < olen # ### Public list interface Methods ### # ## Non-mutating ## def count(self, val): "Standard list count method" count = 0 for i in self: if val == i: count += 1 return count def index(self, val): "Standard list index method" for i in range(0, len(self)): if self[i] == val: return i raise ValueError('%s not found in object' % val) # ## Mutating ## def append(self, val): "Standard list append method" self[len(self):] = [val] def extend(self, vals): "Standard list extend method" self[len(self):] = vals def insert(self, index, val): "Standard list insert method" if not isinstance(index, int): raise TypeError("%s is not a legal index" % index) self[index:index] = [val] def pop(self, index=-1): "Standard list pop method" result = self[index] del self[index] return result def remove(self, val): "Standard list remove method" del self[self.index(val)] def reverse(self): "Standard list reverse method" self[:] = self[-1::-1] def sort(self, cmp=None, key=None, reverse=False): "Standard list sort method" if key: temp = [(key(v), v) for v in self] temp.sort(key=lambda x: x[0], reverse=reverse) self[:] = [v[1] for v in temp] else: temp = list(self) if cmp is not None: temp.sort(cmp=cmp, reverse=reverse) else: temp.sort(reverse=reverse) self[:] = temp # ### Private routines ### def _rebuild(self, newLen, newItems): if newLen and newLen < self._minlength: raise ValueError('Must have at least %d items' % self._minlength) if self._maxlength is not None and newLen > self._maxlength: raise ValueError('Cannot have more than %d items' % self._maxlength) self._set_list(newLen, newItems) def _set_single_rebuild(self, index, value): self._set_slice(slice(index, index + 1, 1), [value]) def _checkindex(self, index, correct=True): length = len(self) if 0 <= index < length: return index if correct and -length <= index < 0: return index + length raise IndexError('invalid index: %s' % index) def _check_allowed(self, items): if hasattr(self, '_allowed'): if False in [isinstance(val, self._allowed) for val in items]: raise TypeError('Invalid type encountered in the arguments.') def _set_slice(self, index, values): "Assign values to a slice of the object" try: iter(values) except TypeError: raise TypeError('can only assign an iterable to a slice') self._check_allowed(values) origLen = len(self) valueList = list(values) start, stop, step = index.indices(origLen) # CAREFUL: index.step and step are not the same! # step will never be None if index.step is None: self._assign_simple_slice(start, stop, valueList) else: self._assign_extended_slice(start, stop, step, valueList) def _assign_extended_slice_rebuild(self, start, stop, step, valueList): 'Assign an extended slice by rebuilding entire list' indexList = range(start, stop, step) # extended slice, only allow assigning slice of same size if len(valueList) != len(indexList): raise ValueError('attempt to assign sequence of size %d ' 'to extended slice of size %d' % (len(valueList), len(indexList))) # we're not changing the length of the sequence newLen = len(self) newVals = dict(zip(indexList, valueList)) def newItems(): for i in range(newLen): if i in newVals: yield newVals[i] else: yield self._get_single_internal(i) self._rebuild(newLen, newItems()) def _assign_extended_slice(self, start, stop, step, valueList): 'Assign an extended slice by re-assigning individual items' indexList = range(start, stop, step) # extended slice, only allow assigning slice of same size if len(valueList) != len(indexList): raise ValueError('attempt to assign sequence of size %d ' 'to extended slice of size %d' % (len(valueList), len(indexList))) for i, val in zip(indexList, valueList): self._set_single(i, val) def _assign_simple_slice(self, start, stop, valueList): 'Assign a simple slice; Can assign slice of any length' origLen = len(self) stop = max(start, stop) newLen = origLen - stop + start + len(valueList) def newItems(): for i in range(origLen + 1): if i == start: yield from valueList if i < origLen: if i < start or i >= stop: yield self._get_single_internal(i) self._rebuild(newLen, newItems())
mfigurnov/sact
refs/heads/master
imagenet_eval.py
1
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Evaluates a trained ResNet model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import tensorflow as tf from tensorflow.contrib import slim import imagenet_data_provider import imagenet_model import summary_utils import utils FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.') tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/resnet/', 'Directory where the model was written to.') tf.app.flags.DEFINE_string('eval_dir', '/tmp/resnet/', 'Directory where the results are saved to.') tf.app.flags.DEFINE_string('dataset_dir', None, 'Directory with Imagenet data.') tf.app.flags.DEFINE_integer('eval_interval_secs', 600, 'The frequency, in seconds, with which evaluation is run.') tf.app.flags.DEFINE_integer('num_examples', 50000, 'The number of examples to evaluate') tf.app.flags.DEFINE_integer( 'batch_size', 32, 'The number of examples to evaluate per evaluation iteration.') tf.app.flags.DEFINE_string( 'split_name', 'validation', 'The name of the train/test split, either \'train\' or \'validation\'.') tf.app.flags.DEFINE_float('moving_average_decay', 0.9999, 'The decay to use for the moving average.') tf.app.flags.DEFINE_integer('image_size', 224, 'Image resolution for resize.') tf.app.flags.DEFINE_string( 'model', '101', 'Depth of the network to train (50, 101, 152, 200), or number of layers' ' in each block (e.g. 3_4_23_3).') tf.app.flags.DEFINE_string( 'model_type', 'vanilla', 'Options: vanilla (basic ResNet model), act (Adaptive Computation Time), ' 'act_early_stopping (act implementation which actually saves time), ' 'sact (Spatially Adaptive Computation Time)') tf.app.flags.DEFINE_float('tau', 1.0, 'The value of tau (ponder relative cost).') tf.app.flags.DEFINE_bool('evaluate_once', False, 'Evaluate the model just once?') def main(_): g = tf.Graph() with g.as_default(): data_tuple = imagenet_data_provider.provide_data( FLAGS.split_name, FLAGS.batch_size, dataset_dir=FLAGS.dataset_dir, is_training=False, image_size=FLAGS.image_size) images, one_hot_labels, examples_per_epoch, num_classes = data_tuple # Define the model: with slim.arg_scope(imagenet_model.resnet_arg_scope(is_training=False)): model = utils.split_and_int(FLAGS.model) logits, end_points = imagenet_model.get_network( images, model, num_classes, model_type=FLAGS.model_type) predictions = tf.argmax(end_points['predictions'], 1) # Define the metrics: labels = tf.argmax(one_hot_labels, 1) metric_map = { 'eval/Accuracy': tf.contrib.metrics.streaming_accuracy(predictions, labels), 'eval/Recall@5': tf.contrib.metrics.streaming_sparse_recall_at_k( end_points['predictions'], tf.expand_dims(labels, 1), 5), } metric_map.update(summary_utils.flops_metric_map(end_points, True)) if FLAGS.model_type in ['act', 'act_early_stopping', 'sact']: metric_map.update(summary_utils.act_metric_map(end_points, True)) names_to_values, names_to_updates = tf.contrib.metrics.aggregate_metric_map( metric_map) for name, value in names_to_values.iteritems(): summ = tf.summary.scalar(name, value, collections=[]) summ = tf.Print(summ, [value], name) tf.add_to_collection(tf.GraphKeys.SUMMARIES, summ) if FLAGS.model_type == 'sact': summary_utils.add_heatmaps_image_summary(end_points, border=10) # This ensures that we make a single pass over all of the data. num_batches = math.ceil(FLAGS.num_examples / float(FLAGS.batch_size)) if not FLAGS.evaluate_once: eval_function = slim.evaluation.evaluation_loop checkpoint_path = FLAGS.checkpoint_dir kwargs = {'eval_interval_secs': FLAGS.eval_interval_secs} else: eval_function = slim.evaluation.evaluate_once checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_dir) assert checkpoint_path is not None kwargs = {} eval_function( FLAGS.master, checkpoint_path, logdir=FLAGS.eval_dir, num_evals=num_batches, eval_op=names_to_updates.values(), **kwargs) if __name__ == '__main__': tf.app.run()
akaariai/django
refs/heads/master
django/utils/checksums.py
105
""" Common checksum routines. """ __all__ = ['luhn'] import warnings from django.utils import six from django.utils.deprecation import RemovedInDjango20Warning warnings.warn( "django.utils.checksums will be removed in Django 2.0. The " "luhn() function is now included in django-localflavor 1.1+.", RemovedInDjango20Warning ) LUHN_ODD_LOOKUP = (0, 2, 4, 6, 8, 1, 3, 5, 7, 9) # sum_of_digits(index * 2) def luhn(candidate): """ Checks a candidate number for validity according to the Luhn algorithm (used in validation of, for example, credit cards). Both numeric and string candidates are accepted. """ if not isinstance(candidate, six.string_types): candidate = str(candidate) try: evens = sum(int(c) for c in candidate[-1::-2]) odds = sum(LUHN_ODD_LOOKUP[int(c)] for c in candidate[-2::-2]) return ((evens + odds) % 10 == 0) except ValueError: # Raised if an int conversion fails return False
umitproject/tease-o-matic
refs/heads/master
django/core/management/commands/sqlreset.py
313
from optparse import make_option from django.core.management.base import AppCommand from django.core.management.sql import sql_reset from django.db import connections, DEFAULT_DB_ALIAS class Command(AppCommand): help = "Prints the DROP TABLE SQL, then the CREATE TABLE SQL, for the given app name(s)." option_list = AppCommand.option_list + ( make_option('--database', action='store', dest='database', default=DEFAULT_DB_ALIAS, help='Nominates a database to print the ' 'SQL for. Defaults to the "default" database.'), ) output_transaction = True def handle_app(self, app, **options): return u'\n'.join(sql_reset(app, self.style, connections[options.get('database', DEFAULT_DB_ALIAS)])).encode('utf-8')
ychen820/microblog
refs/heads/master
y/google-cloud-sdk/platform/google_appengine/lib/django-1.3/django/core/management/commands/sqlreset.py
313
from optparse import make_option from django.core.management.base import AppCommand from django.core.management.sql import sql_reset from django.db import connections, DEFAULT_DB_ALIAS class Command(AppCommand): help = "Prints the DROP TABLE SQL, then the CREATE TABLE SQL, for the given app name(s)." option_list = AppCommand.option_list + ( make_option('--database', action='store', dest='database', default=DEFAULT_DB_ALIAS, help='Nominates a database to print the ' 'SQL for. Defaults to the "default" database.'), ) output_transaction = True def handle_app(self, app, **options): return u'\n'.join(sql_reset(app, self.style, connections[options.get('database', DEFAULT_DB_ALIAS)])).encode('utf-8')
ojengwa/odoo
refs/heads/8.0
addons/account/tests/test_reconciliation.py
179
from openerp.tests.common import TransactionCase import time class TestReconciliation(TransactionCase): """Tests for reconciliation (account.tax) Test used to check that when doing a sale or purchase invoice in a different currency, the result will be balanced. """ def setUp(self): super(TestReconciliation, self).setUp() self.account_invoice_model = self.registry('account.invoice') self.account_invoice_line_model = self.registry('account.invoice.line') self.acc_bank_stmt_model = self.registry('account.bank.statement') self.acc_bank_stmt_line_model = self.registry('account.bank.statement.line') self.res_currency_model = self.registry('res.currency') self.res_currency_rate_model = self.registry('res.currency.rate') self.partner_agrolait_id = self.registry("ir.model.data").get_object_reference(self.cr, self.uid, "base", "res_partner_2")[1] self.currency_swiss_id = self.registry("ir.model.data").get_object_reference(self.cr, self.uid, "base", "CHF")[1] self.currency_usd_id = self.registry("ir.model.data").get_object_reference(self.cr, self.uid, "base", "USD")[1] self.account_rcv_id = self.registry("ir.model.data").get_object_reference(self.cr, self.uid, "account", "a_recv")[1] self.account_fx_income_id = self.registry("ir.model.data").get_object_reference(self.cr, self.uid, "account", "income_fx_income")[1] self.account_fx_expense_id = self.registry("ir.model.data").get_object_reference(self.cr, self.uid, "account", "income_fx_expense")[1] self.product_id = self.registry("ir.model.data").get_object_reference(self.cr, self.uid, "product", "product_product_4")[1] self.bank_journal_usd_id = self.registry("ir.model.data").get_object_reference(self.cr, self.uid, "account", "bank_journal_usd")[1] self.account_usd_id = self.registry("ir.model.data").get_object_reference(self.cr, self.uid, "account", "usd_bnk")[1] self.company_id = self.registry("ir.model.data").get_object_reference(self.cr, self.uid, "base", "main_company")[1] #set expense_currency_exchange_account_id and income_currency_exchange_account_id to the according accounts self.registry("res.company").write(self.cr, self.uid, [self.company_id], {'expense_currency_exchange_account_id': self.account_fx_expense_id, 'income_currency_exchange_account_id':self.account_fx_income_id}) def test_balanced_customer_invoice(self): cr, uid = self.cr, self.uid #we create an invoice in CHF invoice_id = self.account_invoice_model.create(cr, uid, {'partner_id': self.partner_agrolait_id, 'reference_type': 'none', 'currency_id': self.currency_swiss_id, 'name': 'invoice to client', 'account_id': self.account_rcv_id, 'type': 'out_invoice', 'date_invoice': time.strftime('%Y')+'-07-01', # to use USD rate rateUSDbis }) self.account_invoice_line_model.create(cr, uid, {'product_id': self.product_id, 'quantity': 1, 'price_unit': 100, 'invoice_id': invoice_id, 'name': 'product that cost 100',}) #validate purchase self.registry('account.invoice').signal_workflow(cr, uid, [invoice_id], 'invoice_open') invoice_record = self.account_invoice_model.browse(cr, uid, [invoice_id]) #we pay half of it on a journal with currency in dollar (bank statement) bank_stmt_id = self.acc_bank_stmt_model.create(cr, uid, { 'journal_id': self.bank_journal_usd_id, 'date': time.strftime('%Y')+'-07-15', }) bank_stmt_line_id = self.acc_bank_stmt_line_model.create(cr, uid, {'name': 'half payment', 'statement_id': bank_stmt_id, 'partner_id': self.partner_agrolait_id, 'amount': 42, 'amount_currency': 50, 'currency_id': self.currency_swiss_id, 'date': time.strftime('%Y')+'-07-15',}) #reconcile the payment with the invoice for l in invoice_record.move_id.line_id: if l.account_id.id == self.account_rcv_id: line_id = l break self.acc_bank_stmt_line_model.process_reconciliation(cr, uid, bank_stmt_line_id, [ {'counterpart_move_line_id': line_id.id, 'credit':50, 'debit':0, 'name': line_id.name,}]) #we check that the line is balanced (bank statement line) move_line_ids = self.acc_bank_stmt_model.browse(cr,uid,bank_stmt_id).move_line_ids self.assertEquals(len(move_line_ids), 3) checked_line = 0 for move_line in move_line_ids: if move_line.account_id.id == self.account_usd_id: self.assertEquals(move_line.debit, 27.47) self.assertEquals(move_line.credit, 0.0) self.assertEquals(move_line.amount_currency, 42) self.assertEquals(move_line.currency_id.id, self.currency_usd_id) checked_line += 1 continue if move_line.account_id.id == self.account_rcv_id: self.assertEquals(move_line.debit, 0.0) self.assertEquals(move_line.credit, 38.21) self.assertEquals(move_line.amount_currency, -50) self.assertEquals(move_line.currency_id.id, self.currency_swiss_id) checked_line += 1 continue if move_line.account_id.id == self.account_fx_expense_id: self.assertEquals(move_line.debit, 10.74) self.assertEquals(move_line.credit, 0.0) checked_line += 1 continue self.assertEquals(checked_line, 3) def test_balanced_supplier_invoice(self): cr, uid = self.cr, self.uid #we create a supplier invoice in CHF invoice_id = self.account_invoice_model.create(cr, uid, {'partner_id': self.partner_agrolait_id, 'reference_type': 'none', 'currency_id': self.currency_swiss_id, 'name': 'invoice to client', 'account_id': self.account_rcv_id, 'type': 'in_invoice', 'date_invoice': time.strftime('%Y')+'-07-01', }) self.account_invoice_line_model.create(cr, uid, {'product_id': self.product_id, 'quantity': 1, 'price_unit': 100, 'invoice_id': invoice_id, 'name': 'product that cost 100',}) #validate purchase self.registry('account.invoice').signal_workflow(cr, uid, [invoice_id], 'invoice_open') invoice_record = self.account_invoice_model.browse(cr, uid, [invoice_id]) #we pay half of it on a journal with currency in dollar (bank statement) bank_stmt_id = self.acc_bank_stmt_model.create(cr, uid, { 'journal_id': self.bank_journal_usd_id, 'date': time.strftime('%Y')+'-07-15', }) bank_stmt_line_id = self.acc_bank_stmt_line_model.create(cr, uid, {'name': 'half payment', 'statement_id': bank_stmt_id, 'partner_id': self.partner_agrolait_id, 'amount': -42, 'amount_currency': -50, 'currency_id': self.currency_swiss_id, 'date': time.strftime('%Y')+'-07-15',}) #reconcile the payment with the invoice for l in invoice_record.move_id.line_id: if l.account_id.id == self.account_rcv_id: line_id = l break self.acc_bank_stmt_line_model.process_reconciliation(cr, uid, bank_stmt_line_id, [ {'counterpart_move_line_id': line_id.id, 'credit':0, 'debit':50, 'name': line_id.name,}]) #we check that the line is balanced (bank statement line) move_line_ids = self.acc_bank_stmt_model.browse(cr,uid,bank_stmt_id).move_line_ids self.assertEquals(len(move_line_ids), 3) checked_line = 0 for move_line in move_line_ids: if move_line.account_id.id == self.account_usd_id: self.assertEquals(move_line.debit, 0.0) self.assertEquals(move_line.credit, 27.47) self.assertEquals(move_line.amount_currency, -42) self.assertEquals(move_line.currency_id.id, self.currency_usd_id) checked_line += 1 continue if move_line.account_id.id == self.account_rcv_id: self.assertEquals(move_line.debit, 38.21) self.assertEquals(move_line.credit, 0.0) self.assertEquals(move_line.amount_currency, 50) self.assertEquals(move_line.currency_id.id, self.currency_swiss_id) checked_line += 1 continue if move_line.account_id.id == self.account_fx_income_id: self.assertEquals(move_line.debit, 0.0) self.assertEquals(move_line.credit, 10.74) checked_line += 1 continue self.assertEquals(checked_line, 3) def test_balanced_exchanges_gain_loss(self): # The point of this test is to show that we handle correctly the gain/loss exchanges during reconciliations in foreign currencies. # For instance, with a company set in EUR, and a USD rate set to 0.033, # the reconciliation of an invoice of 2.00 USD (60.61 EUR) and a bank statement of two lines of 1.00 USD (30.30 EUR) # will lead to an exchange loss, that should be handled correctly within the journal items. cr, uid = self.cr, self.uid # We update the currency rate of the currency USD in order to force the gain/loss exchanges in next steps self.res_currency_rate_model.create(cr, uid, { 'name': time.strftime('%Y-%m-%d') + ' 00:00:00', 'currency_id': self.currency_usd_id, 'rate': 0.033, }) # We create a customer invoice of 2.00 USD invoice_id = self.account_invoice_model.create(cr, uid, { 'partner_id': self.partner_agrolait_id, 'currency_id': self.currency_usd_id, 'name': 'Foreign invoice with exchange gain', 'account_id': self.account_rcv_id, 'type': 'out_invoice', 'date_invoice': time.strftime('%Y-%m-%d'), 'journal_id': self.bank_journal_usd_id, 'invoice_line': [ (0, 0, { 'name': 'line that will lead to an exchange gain', 'quantity': 1, 'price_unit': 2, }) ] }) self.registry('account.invoice').signal_workflow(cr, uid, [invoice_id], 'invoice_open') invoice = self.account_invoice_model.browse(cr, uid, invoice_id) # We create a bank statement with two lines of 1.00 USD each. bank_stmt_id = self.acc_bank_stmt_model.create(cr, uid, { 'journal_id': self.bank_journal_usd_id, 'date': time.strftime('%Y-%m-%d'), 'line_ids': [ (0, 0, { 'name': 'half payment', 'partner_id': self.partner_agrolait_id, 'amount': 1.0, 'date': time.strftime('%Y-%m-%d') }), (0, 0, { 'name': 'second half payment', 'partner_id': self.partner_agrolait_id, 'amount': 1.0, 'date': time.strftime('%Y-%m-%d') }) ] }) statement = self.acc_bank_stmt_model.browse(cr, uid, bank_stmt_id) # We process the reconciliation of the invoice line with the two bank statement lines line_id = None for l in invoice.move_id.line_id: if l.account_id.id == self.account_rcv_id: line_id = l break for statement_line in statement.line_ids: self.acc_bank_stmt_line_model.process_reconciliation(cr, uid, statement_line.id, [ {'counterpart_move_line_id': line_id.id, 'credit': 1.0, 'debit': 0.0, 'name': line_id.name} ]) # The invoice should be paid, as the payments totally cover its total self.assertEquals(invoice.state, 'paid', 'The invoice should be paid by now') reconcile = None for payment in invoice.payment_ids: reconcile = payment.reconcile_id break # The invoice should be reconciled (entirely, not a partial reconciliation) self.assertTrue(reconcile, 'The invoice should be totally reconciled') result = {} exchange_loss_line = None for line in reconcile.line_id: res_account = result.setdefault(line.account_id, {'debit': 0.0, 'credit': 0.0, 'count': 0}) res_account['debit'] = res_account['debit'] + line.debit res_account['credit'] = res_account['credit'] + line.credit res_account['count'] += 1 if line.credit == 0.01: exchange_loss_line = line # We should be able to find a move line of 0.01 EUR on the Debtors account, being the cent we lost during the currency exchange self.assertTrue(exchange_loss_line, 'There should be one move line of 0.01 EUR in credit') # The journal items of the reconciliation should have their debit and credit total equal # Besides, the total debit and total credit should be 60.61 EUR (2.00 USD) self.assertEquals(sum([res['debit'] for res in result.values()]), 60.61) self.assertEquals(sum([res['credit'] for res in result.values()]), 60.61) counterpart_exchange_loss_line = None for line in exchange_loss_line.move_id.line_id: if line.account_id.id == self.account_fx_expense_id: counterpart_exchange_loss_line = line # We should be able to find a move line of 0.01 EUR on the Foreign Exchange Loss account self.assertTrue(counterpart_exchange_loss_line, 'There should be one move line of 0.01 EUR on account "Foreign Exchange Loss"')
ShassAro/ShassAro
refs/heads/master
Bl_project/blVirtualEnv/lib/python2.7/re.py
4
/usr/lib/python2.7/re.py
safawo/antlr4
refs/heads/master
runtime/Python2/src/antlr4/atn/ATNConfig.py
5
# # [The "BSD license"] # Copyright (c) 2012 Terence Parr # Copyright (c) 2012 Sam Harwell # Copyright (c) 2014 Eric Vergnaud # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #/ # A tuple: (ATN state, predicted alt, syntactic, semantic context). # The syntactic context is a graph-structured stack node whose # path(s) to the root is the rule invocation(s) # chain used to arrive at the state. The semantic context is # the tree of semantic predicates encountered before reaching # an ATN state. #/ from io import StringIO from antlr4.atn.ATNState import ATNState, DecisionState from antlr4.atn.SemanticContext import SemanticContext class ATNConfig(object): def __init__(self, state=None, alt=None, context=None, semantic=None, config=None): if config is not None: if state is None: state = config.state if alt is None: alt = config.alt if context is None: context = config.context if semantic is None: semantic = config.semanticContext if semantic is None: semantic = SemanticContext.NONE # The ATN state associated with this configuration#/ self.state = state # What alt (or lexer rule) is predicted by this configuration#/ self.alt = alt # The stack of invoking states leading to the rule/states associated # with this config. We track only those contexts pushed during # execution of the ATN simulator. self.context = context self.semanticContext = semantic # We cannot execute predicates dependent upon local context unless # we know for sure we are in the correct context. Because there is # no way to do this efficiently, we simply cannot evaluate # dependent predicates unless we are in the rule that initially # invokes the ATN simulator. # # closure() tracks the depth of how far we dip into the # outer context: depth &gt; 0. Note that it may not be totally # accurate depth since I don't ever decrement. TODO: make it a boolean then self.reachesIntoOuterContext = 0 if config is None else config.reachesIntoOuterContext self.precedenceFilterSuppressed = False if config is None else config.precedenceFilterSuppressed # An ATN configuration is equal to another if both have # the same state, they predict the same alternative, and # syntactic/semantic contexts are the same. #/ def __eq__(self, other): if self is other: return True elif not isinstance(other, ATNConfig): return False else: return self.state.stateNumber==other.state.stateNumber \ and self.alt==other.alt \ and ((self.context is other.context) or (self.context==other.context)) \ and self.semanticContext==other.semanticContext \ and self.precedenceFilterSuppressed==other.precedenceFilterSuppressed def __hash__(self): return hash( str(self.state.stateNumber) + "/" + str(self.alt) + "/" + str(self.context) + "/" + str(self.semanticContext) ) def __str__(self): return unicode(self) def __unicode__(self): with StringIO() as buf: buf.write(u"(") buf.write(unicode(self.state)) buf.write(u",") buf.write(unicode(self.alt)) if self.context is not None: buf.write(u",[") buf.write(unicode(self.context)) buf.write(u"]") if self.semanticContext is not None and self.semanticContext is not SemanticContext.NONE: buf.write(u",") buf.write(unicode(self.semanticContext)) if self.reachesIntoOuterContext>0: buf.write(u",up=") buf.write(unicode(self.reachesIntoOuterContext)) buf.write(u')') return buf.getvalue() class LexerATNConfig(ATNConfig): def __init__(self, state, alt=None, context=None, semantic=SemanticContext.NONE, lexerActionExecutor=None, config=None): super(LexerATNConfig, self).__init__(state=state, alt=alt, context=context, semantic=semantic, config=config) if config is not None: if lexerActionExecutor is None: lexerActionExecutor = config.lexerActionExecutor # This is the backing field for {@link #getLexerActionExecutor}. self.lexerActionExecutor = lexerActionExecutor self.passedThroughNonGreedyDecision = False if config is None else self.checkNonGreedyDecision(config, state) def __hash__(self): return hash(str(self.state.stateNumber) + str(self.alt) + str(self.context) \ + str(self.semanticContext) + str(1 if self.passedThroughNonGreedyDecision else 0) \ + str(self.lexerActionExecutor)) def __eq__(self, other): if self is other: return True elif not isinstance(other, LexerATNConfig): return False if self.passedThroughNonGreedyDecision != other.passedThroughNonGreedyDecision: return False if self.lexerActionExecutor is not other.lexerActionExecutor: return False return super(LexerATNConfig, self).__eq__(other) def checkNonGreedyDecision(self, source, target): return source.passedThroughNonGreedyDecision \ or isinstance(target, DecisionState) and target.nonGreedy
mattlinares/django-reg
refs/heads/master
registration/backends/default/urls.py
57
""" URLconf for registration and activation, using django-registration's default backend. If the default behavior of these views is acceptable to you, simply use a line like this in your root URLconf to set up the default URLs for registration:: (r'^accounts/', include('registration.backends.default.urls')), This will also automatically set up the views in ``django.contrib.auth`` at sensible default locations. If you'd like to customize registration behavior, feel free to set up your own URL patterns for these views instead. """ from django.conf.urls import patterns from django.conf.urls import include from django.conf.urls import url from django.views.generic.base import TemplateView from registration.backends.default.views import ActivationView from registration.backends.default.views import RegistrationView urlpatterns = patterns('', url(r'^activate/complete/$', TemplateView.as_view(template_name='registration/activation_complete.html'), name='registration_activation_complete'), # Activation keys get matched by \w+ instead of the more specific # [a-fA-F0-9]{40} because a bad activation key should still get to the view; # that way it can return a sensible "invalid key" message instead of a # confusing 404. url(r'^activate/(?P<activation_key>\w+)/$', ActivationView.as_view(), name='registration_activate'), url(r'^register/$', RegistrationView.as_view(), name='registration_register'), url(r'^register/complete/$', TemplateView.as_view(template_name='registration/registration_complete.html'), name='registration_complete'), url(r'^register/closed/$', TemplateView.as_view(template_name='registration/registration_closed.html'), name='registration_disallowed'), (r'', include('registration.auth_urls')), )
patriciolobos/desa8
refs/heads/master
openerp/addons/website_mail/models/mail_message.py
264
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import SUPERUSER_ID from openerp.tools import html2plaintext from openerp.tools.translate import _ from openerp.osv import osv, fields, expression class MailMessage(osv.Model): _inherit = 'mail.message' def _get_description_short(self, cr, uid, ids, name, arg, context=None): res = dict.fromkeys(ids, False) for message in self.browse(cr, uid, ids, context=context): if message.subject: res[message.id] = message.subject else: plaintext_ct = '' if not message.body else html2plaintext(message.body) res[message.id] = plaintext_ct[:30] + '%s' % (' [...]' if len(plaintext_ct) >= 30 else '') return res _columns = { 'description': fields.function( _get_description_short, type='char', help='Message description: either the subject, or the beginning of the body' ), 'website_published': fields.boolean( 'Published', help="Visible on the website as a comment", copy=False, ), } def default_get(self, cr, uid, fields_list, context=None): defaults = super(MailMessage, self).default_get(cr, uid, fields_list, context=context) # Note: explicitly implemented in default_get() instead of _defaults, # to avoid setting to True for all existing messages during upgrades. # TODO: this default should probably be dynamic according to the model # on which the messages are attached, thus moved to create(). if 'website_published' in fields_list: defaults.setdefault('website_published', True) return defaults def _search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None): """ Override that adds specific access rights of mail.message, to restrict messages to published messages for public users. """ if uid != SUPERUSER_ID: group_ids = self.pool.get('res.users').browse(cr, uid, uid, context=context).groups_id group_user_id = self.pool.get("ir.model.data").get_object_reference(cr, uid, 'base', 'group_public')[1] if group_user_id in [group.id for group in group_ids]: args = expression.AND([[('website_published', '=', True)], list(args)]) return super(MailMessage, self)._search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count, access_rights_uid=access_rights_uid) def check_access_rule(self, cr, uid, ids, operation, context=None): """ Add Access rules of mail.message for non-employee user: - read: - raise if the type is comment and subtype NULL (internal note) """ if uid != SUPERUSER_ID: group_ids = self.pool.get('res.users').browse(cr, uid, uid, context=context).groups_id group_user_id = self.pool.get("ir.model.data").get_object_reference(cr, uid, 'base', 'group_public')[1] if group_user_id in [group.id for group in group_ids]: cr.execute('SELECT id FROM "%s" WHERE website_published IS FALSE AND id = ANY (%%s)' % (self._table), (ids,)) if cr.fetchall(): raise osv.except_osv( _('Access Denied'), _('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % (self._description, operation)) return super(MailMessage, self).check_access_rule(cr, uid, ids=ids, operation=operation, context=context)
rmboggs/django
refs/heads/master
django/conf/locale/ro/formats.py
619
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j F Y' TIME_FORMAT = 'H:i' DATETIME_FORMAT = 'j F Y, H:i' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j F' SHORT_DATE_FORMAT = 'd.m.Y' SHORT_DATETIME_FORMAT = 'd.m.Y, H:i' # FIRST_DAY_OF_WEEK = # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior # DATE_INPUT_FORMATS = # TIME_INPUT_FORMATS = # DATETIME_INPUT_FORMATS = DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' # NUMBER_GROUPING =
apache/bloodhound
refs/heads/trunk
trac/trac/web/tests/wikisyntax.py
8
import unittest from trac.wiki.tests import formatter TEST_CASES = """ ============================== htdocs: links resolver htdocs:release-1.0.tar.gz [htdocs:release-1.0.tar.gz Release 1.0] ------------------------------ <p> <a href="/chrome/site/release-1.0.tar.gz">htdocs:release-1.0.tar.gz</a> </p> <p> <a href="/chrome/site/release-1.0.tar.gz">Release 1.0</a> </p> ------------------------------ """ def suite(): return formatter.suite(TEST_CASES, file=__file__) if __name__ == '__main__': unittest.main(defaultTest='suite')
jalexvig/tensorflow
refs/heads/master
tensorflow/contrib/distributions/python/ops/bijectors/scale_tril.py
13
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ScaleTriL bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.distributions.python.ops.bijectors import affine_scalar from tensorflow.contrib.distributions.python.ops.bijectors import chain from tensorflow.contrib.distributions.python.ops.bijectors import fill_triangular from tensorflow.contrib.distributions.python.ops.bijectors import softplus from tensorflow.contrib.distributions.python.ops.bijectors import transform_diagonal from tensorflow.python.util import deprecation __all__ = [ "ScaleTriL", ] class ScaleTriL(chain.Chain): """Transforms unconstrained vectors to TriL matrices with positive diagonal. This is implemented as a simple `tfb.Chain` of `tfb.FillTriangular` followed by `tfb.TransformDiagonal`, and provided mostly as a convenience. The default setup is somewhat opinionated, using a Softplus transformation followed by a small shift (`1e-5`) which attempts to avoid numerical issues from zeros on the diagonal. #### Examples ```python tfb = tf.contrib.distributions.bijectors b = tfb.ScaleTriL( diag_bijector=tfb.Exp(), diag_shift=None) b.forward(x=[0., 0., 0.]) # Result: [[1., 0.], # [0., 1.]] b.inverse(y=[[1., 0], [.5, 2]]) # Result: [log(2), .5, log(1)] # Define a distribution over PSD matrices of shape `[3, 3]`, # with `1 + 2 + 3 = 6` degrees of freedom. dist = tfd.TransformedDistribution( tfd.Normal(tf.zeros(6), tf.ones(6)), tfb.Chain([tfb.CholeskyOuterProduct(), tfb.ScaleTriL()])) # Using an identity transformation, ScaleTriL is equivalent to # tfb.FillTriangular. b = tfb.ScaleTriL( diag_bijector=tfb.Identity(), diag_shift=None) # For greater control over initialization, one can manually encode # pre- and post- shifts inside of `diag_bijector`. b = tfb.ScaleTriL( diag_bijector=tfb.Chain([ tfb.AffineScalar(shift=1e-3), tfb.Softplus(), tfb.AffineScalar(shift=0.5413)]), # softplus_inverse(1.) # = log(expm1(1.)) = 0.5413 diag_shift=None) ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, diag_bijector=None, diag_shift=1e-5, validate_args=False, name="scale_tril"): """Instantiates the `ScaleTriL` bijector. Args: diag_bijector: `Bijector` instance, used to transform the output diagonal to be positive. Default value: `None` (i.e., `tfb.Softplus()`). diag_shift: Float value broadcastable and added to all diagonal entries after applying the `diag_bijector`. Setting a positive value forces the output diagonal entries to be positive, but prevents inverting the transformation for matrices with diagonal entries less than this value. Default value: `1e-5` (i.e., no shift is applied). validate_args: Python `bool` indicating whether arguments should be checked for correctness. Default value: `False` (i.e., arguments are not validated). name: Python `str` name given to ops managed by this object. Default value: `scale_tril`. """ if diag_bijector is None: diag_bijector = softplus.Softplus(validate_args=validate_args) if diag_shift is not None: diag_bijector = chain.Chain([affine_scalar.AffineScalar(shift=diag_shift), diag_bijector]) super(ScaleTriL, self).__init__( [transform_diagonal.TransformDiagonal(diag_bijector=diag_bijector), fill_triangular.FillTriangular()], validate_args=validate_args, name=name)
ettm2012/MissionPlanner
refs/heads/master
Lib/struct.py
69
from _struct import * from _struct import _clearcache from _struct import __doc__
reddit-coder/RedditSearchAndDump
refs/heads/master
Main/RedditGenreFinder.py
2
#!/usr/bin/python3.2 #Author: reddit_coder #This software is under GPLv3, this is free to distribute, change and use. import urllib.request as url import json import webbrowser from time import sleep browser = input('What browser are you using? (firefox, safari, konqueror, opera, internet explorer, netscape) ').lower() if browser == 'internet explorer': browser = 'windows-default' handle = webbrowser.get(browser) try: #self explanatory #subreddit = input("Please enter the subreddit you would like to look through: ") genre = input("Please enter what you are looking for: ").lower() pages = int(input("How many pages back do you want to look through? ")) except: print("Invalid input. Did you accidentally type a non-number for the page count?") exit() hdr = {'User-Agent' : 'genre parser by reddit_coder'} after = '' found = dict() #if its the first page, leave 'after' parameter blank else use the 'after' value from previous page #Go through all pages specified and if the title or flair matches criteria then add to a dictionary while pages != 0: if after != '': req = url.Request('http://www.reddit.com/r/metal/.json?after=' + after, None, hdr) else: req = url.Request('http://www.reddit.com/r/metal/.json', None, hdr) info = url.urlopen(req).read() info = json.loads(info.decode("UTF-8")) for i in info['data']['children']: try: print(i['data']['title'].lower()) if genre in i['data']['title'].lower(): content = i['data']['media']['oembed']['url'] found[i['data']['title']] = content if genre in i['data']['link_flair_text'].lower(): content = i['data']['media']['oembed']['url'] found[i['data']['title']] = content except: print('') pages -= 1 after = info['data']['after'] sleep(2) #this is important, reddit api states the average number of calls should be one every 2 seconds #allow the user to choose between opening all found links or just printing them openNew = input("Would you like to open all the found links in new tabs? (y/n)").lower() count = 0 for k,v in found.items(): if openNew == 'y': if count == 0: handle.open_new(v) else: handle.open_new_tab(v) count+=1 else: print(v)
FHannes/intellij-community
refs/heads/master
python/lib/Lib/site-packages/django/template/loaders/cached.py
229
""" Wrapper class that takes a list of template loaders as an argument and attempts to load templates from them in order, caching the result. """ from django.core.exceptions import ImproperlyConfigured from django.template.base import TemplateDoesNotExist from django.template.loader import BaseLoader, get_template_from_string, find_template_loader, make_origin from django.utils.hashcompat import sha_constructor from django.utils.importlib import import_module class Loader(BaseLoader): is_usable = True def __init__(self, loaders): self.template_cache = {} self._loaders = loaders self._cached_loaders = [] @property def loaders(self): # Resolve loaders on demand to avoid circular imports if not self._cached_loaders: for loader in self._loaders: self._cached_loaders.append(find_template_loader(loader)) return self._cached_loaders def find_template(self, name, dirs=None): for loader in self.loaders: try: template, display_name = loader(name, dirs) return (template, make_origin(display_name, loader, name, dirs)) except TemplateDoesNotExist: pass raise TemplateDoesNotExist(name) def load_template(self, template_name, template_dirs=None): key = template_name if template_dirs: # If template directories were specified, use a hash to differentiate key = '-'.join([template_name, sha_constructor('|'.join(template_dirs)).hexdigest()]) if key not in self.template_cache: template, origin = self.find_template(template_name, template_dirs) if not hasattr(template, 'render'): try: template = get_template_from_string(template, origin, template_name) except TemplateDoesNotExist: # If compiling the template we found raises TemplateDoesNotExist, # back off to returning the source and display name for the template # we were asked to load. This allows for correct identification (later) # of the actual template that does not exist. return template, origin self.template_cache[key] = template return self.template_cache[key], None def reset(self): "Empty the template cache." self.template_cache.clear()
MrLYC/Python-Which
refs/heads/master
src/which.py
1
#!/usr/bin/env python # encoding: utf-8 from argparse import ArgumentParser import os import platform PLATFORM_NAME = platform.system() class WhichCommand(object): def __init__(self, name): self.name = name @property def path_list(self): path = os.environ.get("PATH", "") return path.split(os.pathsep) def is_executable(self, path): if not os.path.isfile(path) and not os.path.islink(path): return False if PLATFORM_NAME == "Windows": return True return os.access(path, os.X_OK) def find(self): for p in self.path_list: if not os.path.isdir(p): continue for f in os.listdir(p): path = os.path.join(p, f) if f == self.name and self.is_executable(path): yield path if __name__ == "__main__": parser = ArgumentParser("which - locate a command") parser.add_argument("filename") parser.add_argument( "-a", dest="all", action="store_true", help="print all matching pathnames of each argument", ) args = parser.parse_args() which_cmd = WhichCommand(args.filename) existed = False for p in which_cmd.find(): print p existed = True if not args.all: break if not existed: parser.exit(1) parser.exit(0)
datsfosure/ansible
refs/heads/devel
lib/ansible/inventory/__init__.py
14
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ############################################# from __future__ import (absolute_import, division, print_function) __metaclass__ = type import fnmatch import os import sys import re import stat import subprocess from ansible import constants as C from ansible import errors from ansible.inventory.ini import InventoryParser from ansible.inventory.script import InventoryScript from ansible.inventory.dir import InventoryDirectory from ansible.inventory.group import Group from ansible.inventory.host import Host from ansible.plugins import vars_loader from ansible.utils.path import is_executable from ansible.utils.vars import combine_vars class Inventory(object): """ Host inventory for ansible. """ #__slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset', # 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list', # '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir'] def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST): # the host file file, or script path, or list of hosts # if a list, inventory data will NOT be loaded self.host_list = host_list self._loader = loader self._variable_manager = variable_manager # caching to avoid repeated calculations, particularly with # external inventory scripts. self._vars_per_host = {} self._vars_per_group = {} self._hosts_cache = {} self._groups_list = {} self._pattern_cache = {} # to be set by calling set_playbook_basedir by playbook code self._playbook_basedir = None # the inventory object holds a list of groups self.groups = [] # a list of host(names) to contain current inquiries to self._restriction = None self._also_restriction = None self._subset = None if isinstance(host_list, basestring): if "," in host_list: host_list = host_list.split(",") host_list = [ h for h in host_list if h and h.strip() ] if host_list is None: self.parser = None elif isinstance(host_list, list): self.parser = None all = Group('all') self.groups = [ all ] ipv6_re = re.compile('\[([a-f:A-F0-9]*[%[0-z]+]?)\](?::(\d+))?') for x in host_list: m = ipv6_re.match(x) if m: all.add_host(Host(m.groups()[0], m.groups()[1])) else: if ":" in x: tokens = x.rsplit(":", 1) # if there is ':' in the address, then this is an ipv6 if ':' in tokens[0]: all.add_host(Host(x)) else: all.add_host(Host(tokens[0], tokens[1])) else: all.add_host(Host(x)) elif os.path.exists(host_list): if os.path.isdir(host_list): # Ensure basedir is inside the directory self.host_list = os.path.join(self.host_list, "") self.parser = InventoryDirectory(loader=self._loader, filename=host_list) self.groups = self.parser.groups.values() else: # check to see if the specified file starts with a # shebang (#!/), so if an error is raised by the parser # class we can show a more apropos error shebang_present = False try: inv_file = open(host_list) first_line = inv_file.readlines()[0] inv_file.close() if first_line.startswith('#!'): shebang_present = True except: pass if is_executable(host_list): try: self.parser = InventoryScript(loader=self._loader, filename=host_list) self.groups = self.parser.groups.values() except: if not shebang_present: raise errors.AnsibleError("The file %s is marked as executable, but failed to execute correctly. " % host_list + \ "If this is not supposed to be an executable script, correct this with `chmod -x %s`." % host_list) else: raise else: try: self.parser = InventoryParser(filename=host_list) self.groups = self.parser.groups.values() except: if shebang_present: raise errors.AnsibleError("The file %s looks like it should be an executable inventory script, but is not marked executable. " % host_list + \ "Perhaps you want to correct this with `chmod +x %s`?" % host_list) else: raise vars_loader.add_directory(self.basedir(), with_subdir=True) else: raise errors.AnsibleError("Unable to find an inventory file (%s), " "specify one with -i ?" % host_list) self._vars_plugins = [ x for x in vars_loader.all(self) ] # FIXME: shouldn't be required, since the group/host vars file # management will be done in VariableManager # get group vars from group_vars/ files and vars plugins for group in self.groups: # FIXME: combine_vars group.vars = combine_vars(group.vars, self.get_group_variables(group.name)) # get host vars from host_vars/ files and vars plugins for host in self.get_hosts(): # FIXME: combine_vars host.vars = combine_vars(host.vars, self.get_host_variables(host.name)) def _match(self, str, pattern_str): try: if pattern_str.startswith('~'): return re.search(pattern_str[1:], str) else: return fnmatch.fnmatch(str, pattern_str) except Exception, e: raise errors.AnsibleError('invalid host pattern: %s' % pattern_str) def _match_list(self, items, item_attr, pattern_str): results = [] try: if not pattern_str.startswith('~'): pattern = re.compile(fnmatch.translate(pattern_str)) else: pattern = re.compile(pattern_str[1:]) except Exception, e: raise errors.AnsibleError('invalid host pattern: %s' % pattern_str) for item in items: if pattern.match(getattr(item, item_attr)): results.append(item) return results def get_hosts(self, pattern="all"): """ find all host names matching a pattern string, taking into account any inventory restrictions or applied subsets. """ # process patterns if isinstance(pattern, list): pattern = ';'.join(pattern) patterns = pattern.replace(";",":").split(":") hosts = self._get_hosts(patterns) # exclude hosts not in a subset, if defined if self._subset: subset = self._get_hosts(self._subset) hosts = [ h for h in hosts if h in subset ] # exclude hosts mentioned in any restriction (ex: failed hosts) if self._restriction is not None: hosts = [ h for h in hosts if h in self._restriction ] if self._also_restriction is not None: hosts = [ h for h in hosts if h in self._also_restriction ] return hosts def _get_hosts(self, patterns): """ finds hosts that match a list of patterns. Handles negative matches as well as intersection matches. """ # Host specifiers should be sorted to ensure consistent behavior pattern_regular = [] pattern_intersection = [] pattern_exclude = [] for p in patterns: if p.startswith("!"): pattern_exclude.append(p) elif p.startswith("&"): pattern_intersection.append(p) elif p: pattern_regular.append(p) # if no regular pattern was given, hence only exclude and/or intersection # make that magically work if pattern_regular == []: pattern_regular = ['all'] # when applying the host selectors, run those without the "&" or "!" # first, then the &s, then the !s. patterns = pattern_regular + pattern_intersection + pattern_exclude hosts = [] for p in patterns: # avoid resolving a pattern that is a plain host if p in self._hosts_cache: hosts.append(self.get_host(p)) else: that = self.__get_hosts(p) if p.startswith("!"): hosts = [ h for h in hosts if h not in that ] elif p.startswith("&"): hosts = [ h for h in hosts if h in that ] else: to_append = [ h for h in that if h.name not in [ y.name for y in hosts ] ] hosts.extend(to_append) return hosts def __get_hosts(self, pattern): """ finds hosts that positively match a particular pattern. Does not take into account negative matches. """ if pattern in self._pattern_cache: return self._pattern_cache[pattern] (name, enumeration_details) = self._enumeration_info(pattern) hpat = self._hosts_in_unenumerated_pattern(name) result = self._apply_ranges(pattern, hpat) self._pattern_cache[pattern] = result return result def _enumeration_info(self, pattern): """ returns (pattern, limits) taking a regular pattern and finding out which parts of it correspond to start/stop offsets. limits is a tuple of (start, stop) or None """ # Do not parse regexes for enumeration info if pattern.startswith('~'): return (pattern, None) # The regex used to match on the range, which can be [x] or [x-y]. pattern_re = re.compile("^(.*)\[([-]?[0-9]+)(?:(?:-)([0-9]+))?\](.*)$") m = pattern_re.match(pattern) if m: (target, first, last, rest) = m.groups() first = int(first) if last: if first < 0: raise errors.AnsibleError("invalid range: negative indices cannot be used as the first item in a range") last = int(last) else: last = first return (target, (first, last)) else: return (pattern, None) def _apply_ranges(self, pat, hosts): """ given a pattern like foo, that matches hosts, return all of hosts given a pattern like foo[0:5], where foo matches hosts, return the first 6 hosts """ # If there are no hosts to select from, just return the # empty set. This prevents trying to do selections on an empty set. # issue#6258 if not hosts: return hosts (loose_pattern, limits) = self._enumeration_info(pat) if not limits: return hosts (left, right) = limits if left == '': left = 0 if right == '': right = 0 left=int(left) right=int(right) try: if left != right: return hosts[left:right] else: return [ hosts[left] ] except IndexError: raise errors.AnsibleError("no hosts matching the pattern '%s' were found" % pat) def _create_implicit_localhost(self, pattern): new_host = Host(pattern) new_host.set_variable("ansible_python_interpreter", sys.executable) new_host.set_variable("ansible_connection", "local") new_host.ipv4_address = '127.0.0.1' ungrouped = self.get_group("ungrouped") if ungrouped is None: self.add_group(Group('ungrouped')) ungrouped = self.get_group('ungrouped') self.get_group('all').add_child_group(ungrouped) ungrouped.add_host(new_host) return new_host def _hosts_in_unenumerated_pattern(self, pattern): """ Get all host names matching the pattern """ results = [] hosts = [] hostnames = set() # ignore any negative checks here, this is handled elsewhere pattern = pattern.replace("!","").replace("&", "") def __append_host_to_results(host): if host not in results and host.name not in hostnames: hostnames.add(host.name) results.append(host) groups = self.get_groups() for group in groups: if pattern == 'all': for host in group.get_hosts(): __append_host_to_results(host) else: if self._match(group.name, pattern) and group.name not in ('all', 'ungrouped'): for host in group.get_hosts(): __append_host_to_results(host) else: matching_hosts = self._match_list(group.get_hosts(), 'name', pattern) for host in matching_hosts: __append_host_to_results(host) if pattern in ["localhost", "127.0.0.1", "::1"] and len(results) == 0: new_host = self._create_implicit_localhost(pattern) results.append(new_host) return results def clear_pattern_cache(self): ''' called exclusively by the add_host plugin to allow patterns to be recalculated ''' self._pattern_cache = {} def groups_for_host(self, host): if host in self._hosts_cache: return self._hosts_cache[host].get_groups() else: return [] def groups_list(self): if not self._groups_list: groups = {} for g in self.groups: groups[g.name] = [h.name for h in g.get_hosts()] ancestors = g.get_ancestors() for a in ancestors: if a.name not in groups: groups[a.name] = [h.name for h in a.get_hosts()] self._groups_list = groups return self._groups_list def get_groups(self): return self.groups def get_host(self, hostname): if hostname not in self._hosts_cache: self._hosts_cache[hostname] = self._get_host(hostname) return self._hosts_cache[hostname] def _get_host(self, hostname): if hostname in ['localhost', '127.0.0.1', '::1']: for host in self.get_group('all').get_hosts(): if host.name in ['localhost', '127.0.0.1', '::1']: return host return self._create_implicit_localhost(hostname) else: for group in self.groups: for host in group.get_hosts(): if hostname == host.name: return host return None def get_group(self, groupname): for group in self.groups: if group.name == groupname: return group return None def get_group_variables(self, groupname, update_cached=False, vault_password=None): if groupname not in self._vars_per_group or update_cached: self._vars_per_group[groupname] = self._get_group_variables(groupname, vault_password=vault_password) return self._vars_per_group[groupname] def _get_group_variables(self, groupname, vault_password=None): group = self.get_group(groupname) if group is None: raise Exception("group not found: %s" % groupname) vars = {} # plugin.get_group_vars retrieves just vars for specific group vars_results = [ plugin.get_group_vars(group, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_group_vars')] for updated in vars_results: if updated is not None: # FIXME: combine_vars vars = combine_vars(vars, updated) # Read group_vars/ files # FIXME: combine_vars vars = combine_vars(vars, self.get_group_vars(group)) return vars def get_vars(self, hostname, update_cached=False, vault_password=None): host = self.get_host(hostname) if not host: raise Exception("host not found: %s" % hostname) return host.get_vars() def get_host_variables(self, hostname, update_cached=False, vault_password=None): if hostname not in self._vars_per_host or update_cached: self._vars_per_host[hostname] = self._get_host_variables(hostname, vault_password=vault_password) return self._vars_per_host[hostname] def _get_host_variables(self, hostname, vault_password=None): host = self.get_host(hostname) if host is None: raise errors.AnsibleError("host not found: %s" % hostname) vars = {} # plugin.run retrieves all vars (also from groups) for host vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')] for updated in vars_results: if updated is not None: # FIXME: combine_vars vars = combine_vars(vars, updated) # plugin.get_host_vars retrieves just vars for specific host vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')] for updated in vars_results: if updated is not None: # FIXME: combine_vars vars = combine_vars(vars, updated) # still need to check InventoryParser per host vars # which actually means InventoryScript per host, # which is not performant if self.parser is not None: # FIXME: combine_vars vars = combine_vars(vars, self.parser.get_host_variables(host)) # Read host_vars/ files # FIXME: combine_vars vars = combine_vars(vars, self.get_host_vars(host)) return vars def add_group(self, group): if group.name not in self.groups_list(): self.groups.append(group) self._groups_list = None # invalidate internal cache else: raise errors.AnsibleError("group already in inventory: %s" % group.name) def list_hosts(self, pattern="all"): """ return a list of hostnames for a pattern """ result = [ h for h in self.get_hosts(pattern) ] if len(result) == 0 and pattern in ["localhost", "127.0.0.1", "::1"]: result = [pattern] return result def list_groups(self): return sorted([ g.name for g in self.groups ], key=lambda x: x) def restrict_to_hosts(self, restriction): """ Restrict list operations to the hosts given in restriction. This is used to exclude failed hosts in main playbook code, don't use this for other reasons. """ if not isinstance(restriction, list): restriction = [ restriction ] self._restriction = restriction def also_restrict_to(self, restriction): """ Works like restict_to but offers an additional restriction. Playbooks use this to implement serial behavior. """ if not isinstance(restriction, list): restriction = [ restriction ] self._also_restriction = restriction def subset(self, subset_pattern): """ Limits inventory results to a subset of inventory that matches a given pattern, such as to select a given geographic of numeric slice amongst a previous 'hosts' selection that only select roles, or vice versa. Corresponds to --limit parameter to ansible-playbook """ if subset_pattern is None: self._subset = None else: subset_pattern = subset_pattern.replace(',',':') subset_pattern = subset_pattern.replace(";",":").split(":") results = [] # allow Unix style @filename data for x in subset_pattern: if x.startswith("@"): fd = open(x[1:]) results.extend(fd.read().split("\n")) fd.close() else: results.append(x) self._subset = results def remove_restriction(self): """ Do not restrict list operations """ self._restriction = None def lift_also_restriction(self): """ Clears the also restriction """ self._also_restriction = None def is_file(self): """ did inventory come from a file? """ if not isinstance(self.host_list, basestring): return False return os.path.exists(self.host_list) def basedir(self): """ if inventory came from a file, what's the directory? """ if not self.is_file(): return None dname = os.path.dirname(self.host_list) if dname is None or dname == '' or dname == '.': cwd = os.getcwd() return os.path.abspath(cwd) return os.path.abspath(dname) def src(self): """ if inventory came from a file, what's the directory and file name? """ if not self.is_file(): return None return self.host_list def playbook_basedir(self): """ returns the directory of the current playbook """ return self._playbook_basedir def set_playbook_basedir(self, dir_name): """ sets the base directory of the playbook so inventory can use it as a basedir for host_ and group_vars, and other things. """ # Only update things if dir is a different playbook basedir if dir_name != self._playbook_basedir: self._playbook_basedir = dir_name # get group vars from group_vars/ files # FIXME: excluding the new_pb_basedir directory may result in group_vars # files loading more than they should, however with the file caching # we do this shouldn't be too much of an issue. Still, this should # be fixed at some point to allow a "first load" to touch all of the # directories, then later runs only touch the new basedir specified for group in self.groups: #group.vars = combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True)) group.vars = combine_vars(group.vars, self.get_group_vars(group)) # get host vars from host_vars/ files for host in self.get_hosts(): #host.vars = combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True)) host.vars = combine_vars(host.vars, self.get_host_vars(host)) # invalidate cache self._vars_per_host = {} self._vars_per_group = {} def get_host_vars(self, host, new_pb_basedir=False): """ Read host_vars/ files """ return self._get_hostgroup_vars(host=host, group=None, new_pb_basedir=new_pb_basedir) def get_group_vars(self, group, new_pb_basedir=False): """ Read group_vars/ files """ return self._get_hostgroup_vars(host=None, group=group, new_pb_basedir=new_pb_basedir) def _get_hostgroup_vars(self, host=None, group=None, new_pb_basedir=False): """ Loads variables from group_vars/<groupname> and host_vars/<hostname> in directories parallel to the inventory base directory or in the same directory as the playbook. Variables in the playbook dir will win over the inventory dir if files are in both. """ results = {} scan_pass = 0 _basedir = self.basedir() # look in both the inventory base directory and the playbook base directory # unless we do an update for a new playbook base dir if not new_pb_basedir: basedirs = [_basedir, self._playbook_basedir] else: basedirs = [self._playbook_basedir] for basedir in basedirs: # this can happen from particular API usages, particularly if not run # from /usr/bin/ansible-playbook if basedir is None: basedir = './' scan_pass = scan_pass + 1 # it's not an eror if the directory does not exist, keep moving if not os.path.exists(basedir): continue # save work of second scan if the directories are the same if _basedir == self._playbook_basedir and scan_pass != 1: continue # FIXME: these should go to VariableManager if group and host is None: # load vars in dir/group_vars/name_of_group base_path = os.path.join(basedir, "group_vars/%s" % group.name) results = self._variable_manager.add_group_vars_file(base_path, self._loader) elif host and group is None: # same for hostvars in dir/host_vars/name_of_host base_path = os.path.join(basedir, "host_vars/%s" % host.name) results = self._variable_manager.add_host_vars_file(base_path, self._loader) # all done, results is a dictionary of variables for this particular host. return results
mycodeday/crm-platform
refs/heads/master
report_webkit/report/webkit_report_demo.py
438
from openerp.addons.report_webkit.webkit_report import webkit_report_extender from openerp import SUPERUSER_ID @webkit_report_extender("report_webkit.webkit_demo_report") def extend_demo(pool, cr, uid, localcontext, context): admin = pool.get("res.users").browse(cr, uid, SUPERUSER_ID, context) localcontext.update({ "admin_name": admin.name, })
moorecoin/MooreCoinMiningAlgorithm
refs/heads/master
qa/rpc-tests/mempool_spendcoinbase.py
1
#!/usr/bin/env python2 # copyright (c) 2014 the moorecoin core developers # distributed under the mit software license, see the accompanying # file copying or http://www.opensource.org/licenses/mit-license.php. # # test spending coinbase transactions. # the coinbase transaction in block n can appear in block # n+100... so is valid in the mempool when the best block # height is n+99. # this test makes sure coinbase spends that will be mature # in the next block are accepted into the memory pool, # but less mature coinbase spends are not. # from test_framework.test_framework import moorecointestframework from test_framework.util import * import os import shutil # create one-input, one-output, no-fee transaction: class mempoolspendcoinbasetest(moorecointestframework): def setup_network(self): # just need one node for this test args = ["-checkmempool", "-debug=mempool"] self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, args)) self.is_network_split = false def create_tx(self, from_txid, to_address, amount): inputs = [{ "txid" : from_txid, "vout" : 0}] outputs = { to_address : amount } rawtx = self.nodes[0].createrawtransaction(inputs, outputs) signresult = self.nodes[0].signrawtransaction(rawtx) assert_equal(signresult["complete"], true) return signresult["hex"] def run_test(self): chain_height = self.nodes[0].getblockcount() assert_equal(chain_height, 200) node0_address = self.nodes[0].getnewaddress() # coinbase at height chain_height-100+1 ok in mempool, should # get mined. coinbase at height chain_height-100+2 is # is too immature to spend. b = [ self.nodes[0].getblockhash(n) for n in range(101, 103) ] coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ] spends_raw = [ self.create_tx(txid, node0_address, 50) for txid in coinbase_txids ] spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0]) # coinbase at height 102 should be too immature to spend assert_raises(jsonrpcexception, self.nodes[0].sendrawtransaction, spends_raw[1]) # mempool should have just spend_101: assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ]) # mine a block, spend_101 should get confirmed self.nodes[0].generate(1) assert_equal(set(self.nodes[0].getrawmempool()), set()) # ... and now height 102 can be spent: spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1]) assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ]) if __name__ == '__main__': mempoolspendcoinbasetest().main()
AVarfolomeev/picasso-graphic
refs/heads/master
tools/gyp/tools/pretty_sln.py
806
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Prints the information in a sln file in a diffable way. It first outputs each projects in alphabetical order with their dependencies. Then it outputs a possible build order. """ __author__ = 'nsylvain (Nicolas Sylvain)' import os import re import sys import pretty_vcproj def BuildProject(project, built, projects, deps): # if all dependencies are done, we can build it, otherwise we try to build the # dependency. # This is not infinite-recursion proof. for dep in deps[project]: if dep not in built: BuildProject(dep, built, projects, deps) print project built.append(project) def ParseSolution(solution_file): # All projects, their clsid and paths. projects = dict() # A list of dependencies associated with a project. dependencies = dict() # Regular expressions that matches the SLN format. # The first line of a project definition. begin_project = re.compile(('^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942' '}"\) = "(.*)", "(.*)", "(.*)"$')) # The last line of a project definition. end_project = re.compile('^EndProject$') # The first line of a dependency list. begin_dep = re.compile('ProjectSection\(ProjectDependencies\) = postProject$') # The last line of a dependency list. end_dep = re.compile('EndProjectSection$') # A line describing a dependency. dep_line = re.compile(' *({.*}) = ({.*})$') in_deps = False solution = open(solution_file) for line in solution: results = begin_project.search(line) if results: # Hack to remove icu because the diff is too different. if results.group(1).find('icu') != -1: continue # We remove "_gyp" from the names because it helps to diff them. current_project = results.group(1).replace('_gyp', '') projects[current_project] = [results.group(2).replace('_gyp', ''), results.group(3), results.group(2)] dependencies[current_project] = [] continue results = end_project.search(line) if results: current_project = None continue results = begin_dep.search(line) if results: in_deps = True continue results = end_dep.search(line) if results: in_deps = False continue results = dep_line.search(line) if results and in_deps and current_project: dependencies[current_project].append(results.group(1)) continue # Change all dependencies clsid to name instead. for project in dependencies: # For each dependencies in this project new_dep_array = [] for dep in dependencies[project]: # Look for the project name matching this cldis for project_info in projects: if projects[project_info][1] == dep: new_dep_array.append(project_info) dependencies[project] = sorted(new_dep_array) return (projects, dependencies) def PrintDependencies(projects, deps): print "---------------------------------------" print "Dependencies for all projects" print "---------------------------------------" print "-- --" for (project, dep_list) in sorted(deps.items()): print "Project : %s" % project print "Path : %s" % projects[project][0] if dep_list: for dep in dep_list: print " - %s" % dep print "" print "-- --" def PrintBuildOrder(projects, deps): print "---------------------------------------" print "Build order " print "---------------------------------------" print "-- --" built = [] for (project, _) in sorted(deps.items()): if project not in built: BuildProject(project, built, projects, deps) print "-- --" def PrintVCProj(projects): for project in projects: print "-------------------------------------" print "-------------------------------------" print project print project print project print "-------------------------------------" print "-------------------------------------" project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]), projects[project][2])) pretty = pretty_vcproj argv = [ '', project_path, '$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]), ] argv.extend(sys.argv[3:]) pretty.main(argv) def main(): # check if we have exactly 1 parameter. if len(sys.argv) < 2: print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0] return 1 (projects, deps) = ParseSolution(sys.argv[1]) PrintDependencies(projects, deps) PrintBuildOrder(projects, deps) if '--recursive' in sys.argv: PrintVCProj(projects) return 0 if __name__ == '__main__': sys.exit(main())
nikhilprathapani/python-for-android
refs/heads/master
python3-alpha/python3-src/Lib/tkinter/test/runtktests.py
67
""" Use this module to get and run all tk tests. tkinter tests should live in a package inside the directory where this file lives, like test_tkinter. Extensions also should live in packages following the same rule as above. """ import os import sys import unittest import importlib import test.support this_dir_path = os.path.abspath(os.path.dirname(__file__)) def is_package(path): for name in os.listdir(path): if name in ('__init__.py', '__init__.pyc', '__init.pyo'): return True return False def get_tests_modules(basepath=this_dir_path, gui=True, packages=None): """This will import and yield modules whose names start with test_ and are inside packages found in the path starting at basepath. If packages is specified it should contain package names that want their tests collected. """ py_ext = '.py' for dirpath, dirnames, filenames in os.walk(basepath): for dirname in list(dirnames): if dirname[0] == '.': dirnames.remove(dirname) if is_package(dirpath) and filenames: pkg_name = dirpath[len(basepath) + len(os.sep):].replace('/', '.') if packages and pkg_name not in packages: continue filenames = filter( lambda x: x.startswith('test_') and x.endswith(py_ext), filenames) for name in filenames: try: yield importlib.import_module( ".%s.%s" % (pkg_name, name[:-len(py_ext)]), "tkinter.test") except test.support.ResourceDenied: if gui: raise def get_tests(text=True, gui=True, packages=None): """Yield all the tests in the modules found by get_tests_modules. If nogui is True, only tests that do not require a GUI will be returned.""" attrs = [] if text: attrs.append('tests_nogui') if gui: attrs.append('tests_gui') for module in get_tests_modules(gui=gui, packages=packages): for attr in attrs: for test in getattr(module, attr, ()): yield test if __name__ == "__main__": test.support.use_resources = ['gui'] test.support.run_unittest(*get_tests())
shakamunyi/neutron
refs/heads/master
neutron/plugins/sriovnicagent/pci_lib.py
15
# Copyright 2014 Mellanox Technologies, Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re from oslo_log import log as logging from neutron.agent.linux import ip_lib from neutron.i18n import _LE, _LW from neutron.plugins.sriovnicagent.common import exceptions as exc LOG = logging.getLogger(__name__) class PciDeviceIPWrapper(ip_lib.IPWrapper): """Wrapper class for ip link commands. wrapper for getting/setting pci device details using ip link... """ VF_PATTERN = r"^vf\s+(?P<vf_index>\d+)\s+" MAC_PATTERN = r"MAC\s+(?P<mac>[a-fA-F0-9:]+)," STATE_PATTERN = r"\s+link-state\s+(?P<state>\w+)" ANY_PATTERN = ".*," VF_LINE_FORMAT = VF_PATTERN + MAC_PATTERN + ANY_PATTERN + STATE_PATTERN VF_DETAILS_REG_EX = re.compile(VF_LINE_FORMAT) class LinkState(object): ENABLE = "enable" DISABLE = "disable" def __init__(self, dev_name): super(PciDeviceIPWrapper, self).__init__() self.dev_name = dev_name def get_assigned_macs(self, vf_list): """Get assigned mac addresses for vf list. @param vf_list: list of vf indexes @return: list of assigned mac addresses """ try: out = self._execute('', "link", ("show", self.dev_name)) except Exception as e: LOG.exception(_LE("Failed executing ip command")) raise exc.IpCommandError(dev_name=self.dev_name, reason=e) vf_lines = self._get_vf_link_show(vf_list, out) vf_details_list = [] if vf_lines: for vf_line in vf_lines: vf_details = self._parse_vf_link_show(vf_line) if vf_details: vf_details_list.append(vf_details) return [details.get("MAC") for details in vf_details_list] def get_vf_state(self, vf_index): """Get vf state {True/False} @param vf_index: vf index @todo: Handle "auto" state """ try: out = self._execute('', "link", ("show", self.dev_name)) except Exception as e: LOG.exception(_LE("Failed executing ip command")) raise exc.IpCommandError(dev_name=self.dev_name, reason=e) vf_lines = self._get_vf_link_show([vf_index], out) if vf_lines: vf_details = self._parse_vf_link_show(vf_lines[0]) if vf_details: state = vf_details.get("link-state", self.LinkState.DISABLE) if state != self.LinkState.DISABLE: return True return False def set_vf_state(self, vf_index, state): """sets vf state. @param vf_index: vf index @param state: required state {True/False} """ status_str = self.LinkState.ENABLE if state else \ self.LinkState.DISABLE try: self._execute('', "link", ("set", self.dev_name, "vf", str(vf_index), "state", status_str)) except Exception as e: LOG.exception(_LE("Failed executing ip command")) raise exc.IpCommandError(dev_name=self.dev_name, reason=e) def _get_vf_link_show(self, vf_list, link_show_out): """Get link show output for VFs get vf link show command output filtered by given vf list @param vf_list: list of vf indexes @param link_show_out: link show command output @return: list of output rows regarding given vf_list """ vf_lines = [] for line in link_show_out.split("\n"): line = line.strip() if line.startswith("vf"): details = line.split() index = int(details[1]) if index in vf_list: vf_lines.append(line) if not vf_lines: LOG.warning(_LW("Cannot find vfs %(vfs)s in device %(dev_name)s"), {'vfs': vf_list, 'dev_name': self.dev_name}) return vf_lines def _parse_vf_link_show(self, vf_line): """Parses vf link show command output line. @param vf_line: link show vf line """ vf_details = {} pattern_match = self.VF_DETAILS_REG_EX.match(vf_line) if pattern_match: vf_details["vf"] = int(pattern_match.group("vf_index")) vf_details["MAC"] = pattern_match.group("mac") vf_details["link-state"] = pattern_match.group("state") else: LOG.warning(_LW("failed to parse vf link show line %(line)s: " "for %(device)s"), {'line': vf_line, 'device': self.dev_name}) return vf_details
tempbottle/kbengine
refs/heads/master
kbe/src/lib/python/Lib/test/test_multiprocessing_forkserver.py
92
import unittest import test._test_multiprocessing test._test_multiprocessing.install_tests_in_module_dict(globals(), 'forkserver') if __name__ == '__main__': unittest.main()
stencila/hub
refs/heads/master
manager/projects/migrations/0011_auto_20200806_0605.py
1
# Generated by Django 3.0.8 on 2020-08-06 06:05 from django.db import migrations, models import django.db.models.deletion import projects.models.projects class Migration(migrations.Migration): dependencies = [ ('projects', '0010_auto_20200804_0354'), ] operations = [ migrations.AddField( model_name='project', name='key', field=models.CharField(default=projects.models.projects.generate_project_key, help_text='A unique, and very difficult to guess, key to access this project if it is not public.', max_length=64), ), migrations.AddField( model_name='project', name='liveness', field=models.CharField(choices=[('latest', 'Use latest snapshot'), ('pinned', 'Pinned to snapshot')], default='latest', help_text='Where to serve the content for this project from.', max_length=16), ), migrations.AddField( model_name='project', name='pinned', field=models.ForeignKey(blank=True, help_text='If pinned, the snapshot to pin to, when serving content.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='project_pinned', to='projects.Snapshot'), ), ]
j00bar/django-widgy
refs/heads/master
widgy/contrib/form_builder/south_migrations/0008_auto__add_field_emailsuccesshandler_subject.py
1
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'EmailSuccessHandler.subject' db.add_column('form_builder_emailsuccesshandler', 'subject', self.gf('django.db.models.fields.CharField')(default='', max_length=255), keep_default=False) def backwards(self, orm): # Deleting field 'EmailSuccessHandler.subject' db.delete_column('form_builder_emailsuccesshandler', 'subject') models = { 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'form_builder.choicefield': { 'Meta': {'object_name': 'ChoiceField'}, 'choices': ('django.db.models.fields.TextField', [], {}), 'help_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '25'}) }, 'form_builder.emailsuccesshandler': { 'Meta': {'object_name': 'EmailSuccessHandler'}, 'content': ('widgy.contrib.page_builder.db.fields.MarkdownField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'to': ('django.db.models.fields.EmailField', [], {'max_length': '75'}) }, 'form_builder.emailuserhandler': { 'Meta': {'object_name': 'EmailUserHandler'}, 'content': ('widgy.contrib.page_builder.db.fields.MarkdownField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'null': 'True', 'to': "orm['widgy.Node']"}) }, 'form_builder.form': { 'Meta': {'object_name': 'Form'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'default': "u'Untitled form 8'", 'max_length': '255'}) }, 'form_builder.forminput': { 'Meta': {'object_name': 'FormInput'}, 'help_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'form_builder.formsubmission': { 'Meta': {'object_name': 'FormSubmission'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 15, 0, 0)'}), 'form_ident': ('django.db.models.fields.CharField', [], {'max_length': '36'}), 'form_node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'form_submissions'", 'on_delete': 'models.PROTECT', 'to': "orm['widgy.Node']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'form_builder.formvalue': { 'Meta': {'object_name': 'FormValue'}, 'field_ident': ('django.db.models.fields.CharField', [], {'max_length': '36'}), 'field_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'field_node': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['widgy.Node']", 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'submission': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'values'", 'to': "orm['form_builder.FormSubmission']"}), 'value': ('django.db.models.fields.TextField', [], {}) }, 'form_builder.multiplechoicefield': { 'Meta': {'object_name': 'MultipleChoiceField'}, 'choices': ('django.db.models.fields.TextField', [], {}), 'help_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '25'}) }, 'form_builder.savedatahandler': { 'Meta': {'object_name': 'SaveDataHandler'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'form_builder.submitbutton': { 'Meta': {'object_name': 'SubmitButton'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'text': ('django.db.models.fields.CharField', [], {'default': "u'submit'", 'max_length': '255'}) }, 'form_builder.textarea': { 'Meta': {'object_name': 'Textarea'}, 'help_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) }, 'form_builder.uncaptcha': { 'Meta': {'object_name': 'Uncaptcha'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'widgy.node': { 'Meta': {'object_name': 'Node'}, 'content_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'depth': ('django.db.models.fields.PositiveIntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_frozen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) } } complete_apps = ['form_builder']
alvin319/CarnotKE
refs/heads/master
jyhton/bugtests/test340c.py
13
import java.lang.Exception raise java.lang.Exception
NoviceInDisguise/render-to-loop
refs/heads/master
basic-crossfade/loop.py
1
import bpy scn = bpy.context.scene #set convenient variable to current scene sel = bpy.context.selected_sequences #set convenient variable to selected sequences if len(sel) == 1: #if only one (1) sequence is selected, then execute track = sel[0] #set track to the element in the array str_len = track.frame_final_duration #set variable to the length of selected sequence bpy.ops.sequencer.cut(frame=(str_len/2)+1,type='HARD',side='RIGHT') #cut track in half #update selected sel = bpy.context.selected_sequences #reset sel to selected sequences track = sel[0] #reset track to the element in the array #move track track.channel = track.channel+1 #move right sequence up one channel track.frame_start = 1 #move sequence to frame 1 ############################ #keyframe top track opacity# ############################ path = bpy.context.selected_sequences #get data path data_str = str(path) #convert data path to string data_str = data_str.split("scenes['Scene'].") #split string in to a list data_str = data_str[-1] #get last piece data_str = data_str[:-1] #remove last character print("Selected sequence data path: " + str(data_str)) #set to 1 track.blend_alpha = 1 #set alpha blend value scn.keyframe_insert(data_path=data_str + ".blend_alpha",frame=float(1)) #keyframe alpha blend value print("Keyframe set on frame 1.0") #set to 0 track.blend_alpha = 0 #set alpha blend value scn.keyframe_insert(data_path=data_str + ".blend_alpha",frame=float(str_len/2)) #keyframe alpha blend value print("Keyframe set on frame " + str(str_len/2)) scn.frame_end = str_len/2 #set end frame to last frame print("End frame set to " + str(str_len/2)) #print strip length elif len(sel_sequence) > 1: print("ERROR: Multiple sequences selected") elif len(sel_sequence) < 1: print("ERROR: Please select a valid sequence") else: print("ERROR: Unknown error")
craighiller/serendipity
refs/heads/master
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/eucjpprober.py
2918
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### import sys from . import constants from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import EUCJPDistributionAnalysis from .jpcntx import EUCJPContextAnalysis from .mbcssm import EUCJPSMModel class EUCJPProber(MultiByteCharSetProber): def __init__(self): MultiByteCharSetProber.__init__(self) self._mCodingSM = CodingStateMachine(EUCJPSMModel) self._mDistributionAnalyzer = EUCJPDistributionAnalysis() self._mContextAnalyzer = EUCJPContextAnalysis() self.reset() def reset(self): MultiByteCharSetProber.reset(self) self._mContextAnalyzer.reset() def get_charset_name(self): return "EUC-JP" def feed(self, aBuf): aLen = len(aBuf) for i in range(0, aLen): # PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte codingState = self._mCodingSM.next_state(aBuf[i]) if codingState == constants.eError: if constants._debug: sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n') self._mState = constants.eNotMe break elif codingState == constants.eItsMe: self._mState = constants.eFoundIt break elif codingState == constants.eStart: charLen = self._mCodingSM.get_current_charlen() if i == 0: self._mLastChar[1] = aBuf[0] self._mContextAnalyzer.feed(self._mLastChar, charLen) self._mDistributionAnalyzer.feed(self._mLastChar, charLen) else: self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen) self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1], charLen) self._mLastChar[0] = aBuf[aLen - 1] if self.get_state() == constants.eDetecting: if (self._mContextAnalyzer.got_enough_data() and (self.get_confidence() > constants.SHORTCUT_THRESHOLD)): self._mState = constants.eFoundIt return self.get_state() def get_confidence(self): contxtCf = self._mContextAnalyzer.get_confidence() distribCf = self._mDistributionAnalyzer.get_confidence() return max(contxtCf, distribCf)
tgarc/rideagg
refs/heads/master
requests/packages/chardet/eucjpprober.py
2918
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### import sys from . import constants from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import EUCJPDistributionAnalysis from .jpcntx import EUCJPContextAnalysis from .mbcssm import EUCJPSMModel class EUCJPProber(MultiByteCharSetProber): def __init__(self): MultiByteCharSetProber.__init__(self) self._mCodingSM = CodingStateMachine(EUCJPSMModel) self._mDistributionAnalyzer = EUCJPDistributionAnalysis() self._mContextAnalyzer = EUCJPContextAnalysis() self.reset() def reset(self): MultiByteCharSetProber.reset(self) self._mContextAnalyzer.reset() def get_charset_name(self): return "EUC-JP" def feed(self, aBuf): aLen = len(aBuf) for i in range(0, aLen): # PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte codingState = self._mCodingSM.next_state(aBuf[i]) if codingState == constants.eError: if constants._debug: sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n') self._mState = constants.eNotMe break elif codingState == constants.eItsMe: self._mState = constants.eFoundIt break elif codingState == constants.eStart: charLen = self._mCodingSM.get_current_charlen() if i == 0: self._mLastChar[1] = aBuf[0] self._mContextAnalyzer.feed(self._mLastChar, charLen) self._mDistributionAnalyzer.feed(self._mLastChar, charLen) else: self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen) self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1], charLen) self._mLastChar[0] = aBuf[aLen - 1] if self.get_state() == constants.eDetecting: if (self._mContextAnalyzer.got_enough_data() and (self.get_confidence() > constants.SHORTCUT_THRESHOLD)): self._mState = constants.eFoundIt return self.get_state() def get_confidence(self): contxtCf = self._mContextAnalyzer.get_confidence() distribCf = self._mDistributionAnalyzer.get_confidence() return max(contxtCf, distribCf)
Denisolt/Tensorflow_Chat_Bot
refs/heads/master
local/lib/python2.7/site-packages/numpy/compat/__init__.py
264
""" Compatibility module. This module contains duplicated code from Python itself or 3rd party extensions, which may be included for the following reasons: * compatibility * we may only need a small subset of the copied library/module """ from __future__ import division, absolute_import, print_function from . import _inspect from . import py3k from ._inspect import getargspec, formatargspec from .py3k import * __all__ = [] __all__.extend(_inspect.__all__) __all__.extend(py3k.__all__)
David-Wobrock/django-fake-database-backends
refs/heads/master
tests/test_project/test_project/__init__.py
12133432
evoskuil/czmq
refs/heads/master
setup.py
17
from distutils.core import setup setup(name='czmq', description="""The high-level C binding for 0MQ""", version='0.1', url='https://github.com/zeromq/czmq', packages=['czmq'], package_dir={'': 'bindings/python'}, )
PaulKinlan/cli-caniuse
refs/heads/master
site/app/scripts/bower_components/jsrepl-build/extern/python/reloop-closured/lib/python2.7/shutil.py
76
"""Utility functions for copying and archiving files and directory trees. XXX The functions here don't copy the resource fork or other metadata on Mac. """ import os import sys import stat from os.path import abspath import fnmatch import collections import errno try: from pwd import getpwnam except ImportError: getpwnam = None try: from grp import getgrnam except ImportError: getgrnam = None __all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2", "copytree", "move", "rmtree", "Error", "SpecialFileError", "ExecError", "make_archive", "get_archive_formats", "register_archive_format", "unregister_archive_format"] class Error(EnvironmentError): pass class SpecialFileError(EnvironmentError): """Raised when trying to do a kind of operation (e.g. copying) which is not supported on a special file (e.g. a named pipe)""" class ExecError(EnvironmentError): """Raised when a command could not be executed""" try: WindowsError except NameError: WindowsError = None def copyfileobj(fsrc, fdst, length=16*1024): """copy data from file-like object fsrc to file-like object fdst""" while 1: buf = fsrc.read(length) if not buf: break fdst.write(buf) def _samefile(src, dst): # Macintosh, Unix. if hasattr(os.path, 'samefile'): try: return os.path.samefile(src, dst) except OSError: return False # All other platforms: check for same pathname. return (os.path.normcase(os.path.abspath(src)) == os.path.normcase(os.path.abspath(dst))) def copyfile(src, dst): """Copy data from src to dst""" if _samefile(src, dst): raise Error("`%s` and `%s` are the same file" % (src, dst)) for fn in [src, dst]: try: st = os.stat(fn) except OSError: # File most likely does not exist pass else: # XXX What about other special files? (sockets, devices...) if stat.S_ISFIFO(st.st_mode): raise SpecialFileError("`%s` is a named pipe" % fn) with open(src, 'rb') as fsrc: with open(dst, 'wb') as fdst: copyfileobj(fsrc, fdst) def copymode(src, dst): """Copy mode bits from src to dst""" if hasattr(os, 'chmod'): st = os.stat(src) mode = stat.S_IMODE(st.st_mode) os.chmod(dst, mode) def copystat(src, dst): """Copy all stat info (mode bits, atime, mtime, flags) from src to dst""" st = os.stat(src) mode = stat.S_IMODE(st.st_mode) if hasattr(os, 'utime'): os.utime(dst, (st.st_atime, st.st_mtime)) if hasattr(os, 'chmod'): os.chmod(dst, mode) if hasattr(os, 'chflags') and hasattr(st, 'st_flags'): try: os.chflags(dst, st.st_flags) except OSError, why: if (not hasattr(errno, 'EOPNOTSUPP') or why.errno != errno.EOPNOTSUPP): raise def copy(src, dst): """Copy data and mode bits ("cp src dst"). The destination may be a directory. """ if os.path.isdir(dst): dst = os.path.join(dst, os.path.basename(src)) copyfile(src, dst) copymode(src, dst) def copy2(src, dst): """Copy data and all stat info ("cp -p src dst"). The destination may be a directory. """ if os.path.isdir(dst): dst = os.path.join(dst, os.path.basename(src)) copyfile(src, dst) copystat(src, dst) def ignore_patterns(*patterns): """Function that can be used as copytree() ignore parameter. Patterns is a sequence of glob-style patterns that are used to exclude files""" def _ignore_patterns(path, names): ignored_names = [] for pattern in patterns: ignored_names.extend(fnmatch.filter(names, pattern)) return set(ignored_names) return _ignore_patterns def copytree(src, dst, symlinks=False, ignore=None): """Recursively copy a directory tree using copy2(). The destination directory must not already exist. If exception(s) occur, an Error is raised with a list of reasons. If the optional symlinks flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. The optional ignore argument is a callable. If given, it is called with the `src` parameter, which is the directory being visited by copytree(), and `names` which is the list of `src` contents, as returned by os.listdir(): callable(src, names) -> ignored_names Since copytree() is called recursively, the callable will be called once for each directory that is copied. It returns a list of names relative to the `src` directory that should not be copied. XXX Consider this example code rather than the ultimate tool. """ names = os.listdir(src) if ignore is not None: ignored_names = ignore(src, names) else: ignored_names = set() os.makedirs(dst) errors = [] for name in names: if name in ignored_names: continue srcname = os.path.join(src, name) dstname = os.path.join(dst, name) try: if symlinks and os.path.islink(srcname): linkto = os.readlink(srcname) os.symlink(linkto, dstname) elif os.path.isdir(srcname): copytree(srcname, dstname, symlinks, ignore) else: # Will raise a SpecialFileError for unsupported file types copy2(srcname, dstname) # catch the Error from the recursive copytree so that we can # continue with other files except Error, err: errors.extend(err.args[0]) except EnvironmentError, why: errors.append((srcname, dstname, str(why))) try: copystat(src, dst) except OSError, why: if WindowsError is not None and isinstance(why, WindowsError): # Copying file access times may fail on Windows pass else: errors.extend((src, dst, str(why))) if errors: raise Error, errors def rmtree(path, ignore_errors=False, onerror=None): """Recursively delete a directory tree. If ignore_errors is set, errors are ignored; otherwise, if onerror is set, it is called to handle the error with arguments (func, path, exc_info) where func is os.listdir, os.remove, or os.rmdir; path is the argument to that function that caused it to fail; and exc_info is a tuple returned by sys.exc_info(). If ignore_errors is false and onerror is None, an exception is raised. """ if ignore_errors: def onerror(*args): pass elif onerror is None: def onerror(*args): raise try: if os.path.islink(path): # symlinks to directories are forbidden, see bug #1669 raise OSError("Cannot call rmtree on a symbolic link") except OSError: onerror(os.path.islink, path, sys.exc_info()) # can't continue even if onerror hook returns return names = [] try: names = os.listdir(path) except os.error, err: onerror(os.listdir, path, sys.exc_info()) for name in names: fullname = os.path.join(path, name) try: mode = os.lstat(fullname).st_mode except os.error: mode = 0 if stat.S_ISDIR(mode): rmtree(fullname, ignore_errors, onerror) else: try: os.remove(fullname) except os.error, err: onerror(os.remove, fullname, sys.exc_info()) try: os.rmdir(path) except os.error: onerror(os.rmdir, path, sys.exc_info()) def _basename(path): # A basename() variant which first strips the trailing slash, if present. # Thus we always get the last component of the path, even for directories. return os.path.basename(path.rstrip(os.path.sep)) def move(src, dst): """Recursively move a file or directory to another location. This is similar to the Unix "mv" command. If the destination is a directory or a symlink to a directory, the source is moved inside the directory. The destination path must not already exist. If the destination already exists but is not a directory, it may be overwritten depending on os.rename() semantics. If the destination is on our current filesystem, then rename() is used. Otherwise, src is copied to the destination and then removed. A lot more could be done here... A look at a mv.c shows a lot of the issues this implementation glosses over. """ real_dst = dst if os.path.isdir(dst): if _samefile(src, dst): # We might be on a case insensitive filesystem, # perform the rename anyway. os.rename(src, dst) return real_dst = os.path.join(dst, _basename(src)) if os.path.exists(real_dst): raise Error, "Destination path '%s' already exists" % real_dst try: os.rename(src, real_dst) except OSError: if os.path.isdir(src): if _destinsrc(src, dst): raise Error, "Cannot move a directory '%s' into itself '%s'." % (src, dst) copytree(src, real_dst, symlinks=True) rmtree(src) else: copy2(src, real_dst) os.unlink(src) def _destinsrc(src, dst): src = abspath(src) dst = abspath(dst) if not src.endswith(os.path.sep): src += os.path.sep if not dst.endswith(os.path.sep): dst += os.path.sep return dst.startswith(src) def _get_gid(name): """Returns a gid, given a group name.""" if getgrnam is None or name is None: return None try: result = getgrnam(name) except KeyError: result = None if result is not None: return result[2] return None def _get_uid(name): """Returns an uid, given a user name.""" if getpwnam is None or name is None: return None try: result = getpwnam(name) except KeyError: result = None if result is not None: return result[2] return None def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0, owner=None, group=None, logger=None): """Create a (possibly compressed) tar file from all the files under 'base_dir'. 'compress' must be "gzip" (the default), "bzip2", or None. 'owner' and 'group' can be used to define an owner and a group for the archive that is being built. If not provided, the current owner and group will be used. The output tar file will be named 'base_name' + ".tar", possibly plus the appropriate compression extension (".gz", or ".bz2"). Returns the output filename. """ tar_compression = {'gzip': 'gz', 'bzip2': 'bz2', None: ''} compress_ext = {'gzip': '.gz', 'bzip2': '.bz2'} # flags for compression program, each element of list will be an argument if compress is not None and compress not in compress_ext.keys(): raise ValueError, \ ("bad value for 'compress': must be None, 'gzip' or 'bzip2'") archive_name = base_name + '.tar' + compress_ext.get(compress, '') archive_dir = os.path.dirname(archive_name) if not os.path.exists(archive_dir): logger.info("creating %s" % archive_dir) if not dry_run: os.makedirs(archive_dir) # creating the tarball import tarfile # late import so Python build itself doesn't break if logger is not None: logger.info('Creating tar archive') uid = _get_uid(owner) gid = _get_gid(group) def _set_uid_gid(tarinfo): if gid is not None: tarinfo.gid = gid tarinfo.gname = group if uid is not None: tarinfo.uid = uid tarinfo.uname = owner return tarinfo if not dry_run: tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress]) try: tar.add(base_dir, filter=_set_uid_gid) finally: tar.close() return archive_name def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False): # XXX see if we want to keep an external call here if verbose: zipoptions = "-r" else: zipoptions = "-rq" from distutils.errors import DistutilsExecError from distutils.spawn import spawn try: spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run) except DistutilsExecError: # XXX really should distinguish between "couldn't find # external 'zip' command" and "zip failed". raise ExecError, \ ("unable to create zip file '%s': " "could neither import the 'zipfile' module nor " "find a standalone zip utility") % zip_filename def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None): """Create a zip file from all the files under 'base_dir'. The output zip file will be named 'base_name' + ".zip". Uses either the "zipfile" Python module (if available) or the InfoZIP "zip" utility (if installed and found on the default search path). If neither tool is available, raises ExecError. Returns the name of the output zip file. """ zip_filename = base_name + ".zip" archive_dir = os.path.dirname(base_name) if not os.path.exists(archive_dir): if logger is not None: logger.info("creating %s", archive_dir) if not dry_run: os.makedirs(archive_dir) # If zipfile module is not available, try spawning an external 'zip' # command. try: import zipfile except ImportError: zipfile = None if zipfile is None: _call_external_zip(base_dir, zip_filename, verbose, dry_run) else: if logger is not None: logger.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) if not dry_run: zip = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED) for dirpath, dirnames, filenames in os.walk(base_dir): for name in filenames: path = os.path.normpath(os.path.join(dirpath, name)) if os.path.isfile(path): zip.write(path, path) if logger is not None: logger.info("adding '%s'", path) zip.close() return zip_filename _ARCHIVE_FORMATS = { 'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"), 'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"), 'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"), 'zip': (_make_zipfile, [],"ZIP file") } def get_archive_formats(): """Returns a list of supported formats for archiving and unarchiving. Each element of the returned sequence is a tuple (name, description) """ formats = [(name, registry[2]) for name, registry in _ARCHIVE_FORMATS.items()] formats.sort() return formats def register_archive_format(name, function, extra_args=None, description=''): """Registers an archive format. name is the name of the format. function is the callable that will be used to create archives. If provided, extra_args is a sequence of (name, value) tuples that will be passed as arguments to the callable. description can be provided to describe the format, and will be returned by the get_archive_formats() function. """ if extra_args is None: extra_args = [] if not isinstance(function, collections.Callable): raise TypeError('The %s object is not callable' % function) if not isinstance(extra_args, (tuple, list)): raise TypeError('extra_args needs to be a sequence') for element in extra_args: if not isinstance(element, (tuple, list)) or len(element) !=2 : raise TypeError('extra_args elements are : (arg_name, value)') _ARCHIVE_FORMATS[name] = (function, extra_args, description) def unregister_archive_format(name): del _ARCHIVE_FORMATS[name] def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0, dry_run=0, owner=None, group=None, logger=None): """Create an archive file (eg. zip or tar). 'base_name' is the name of the file to create, minus any format-specific extension; 'format' is the archive format: one of "zip", "tar", "bztar" or "gztar". 'root_dir' is a directory that will be the root directory of the archive; ie. we typically chdir into 'root_dir' before creating the archive. 'base_dir' is the directory where we start archiving from; ie. 'base_dir' will be the common prefix of all files and directories in the archive. 'root_dir' and 'base_dir' both default to the current directory. Returns the name of the archive file. 'owner' and 'group' are used when creating a tar archive. By default, uses the current owner and group. """ save_cwd = os.getcwd() if root_dir is not None: if logger is not None: logger.debug("changing into '%s'", root_dir) base_name = os.path.abspath(base_name) if not dry_run: os.chdir(root_dir) if base_dir is None: base_dir = os.curdir kwargs = {'dry_run': dry_run, 'logger': logger} try: format_info = _ARCHIVE_FORMATS[format] except KeyError: raise ValueError, "unknown archive format '%s'" % format func = format_info[0] for arg, val in format_info[1]: kwargs[arg] = val if format != 'zip': kwargs['owner'] = owner kwargs['group'] = group try: filename = func(base_name, base_dir, **kwargs) finally: if root_dir is not None: if logger is not None: logger.debug("changing back to '%s'", save_cwd) os.chdir(save_cwd) return filename
getzze/magnum.fe
refs/heads/master
site-packages/magnumfe/integrators/integrator.py
2
# Copyright (C) 2011-2015 Claas Abert # # This file is part of magnum.fe. # # magnum.fe is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # magnum.fe is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with magnum.fe. If not, see <http://www.gnu.org/licenses/>. # # Last modified by Claas Abert, 2015-01-10 from __future__ import absolute_import __all__ = ["Integrator"] class Integrator(object): def __init__(self): """ Superclass for time integrators. Subclass have to implement the :code:`step` method. """ pass def step(self, state, dt): """ Calculate :math:`\\vec{m}(t+\Delta t)` for a given timestep. *Arguments* state (:class:`State`) The magnetization configuration. dt (:class:`float`) The time-step size. """ pass
nibaozhu/project_x
refs/heads/master
src/test/p1/18.py
2
#!/usr/bin/python import random, sys, os, math for i in range(10): print(str(i) + ": " + str(random.randint(0,10)))
xumi1993/seispy
refs/heads/master
seispy/rf.py
1
import obspy from obspy.io.sac import SACTrace import re from seispy.io import wsfetch from os.path import join from seispy.para import para import seispy from seispy.eq import eq from seispy.setuplog import setuplog import glob import numpy as np from datetime import timedelta import pandas as pd from obspy.taup import TauPyModel import configparser import argparse import sys import matplotlib.pyplot as plt from PyQt5.QtWidgets import QApplication from seispy.sviewerui import MatplotlibWidget def pickphase(eqs, para): app = QApplication(sys.argv) ui = MatplotlibWidget(eqs, para) ui.show() if app.exec_() == 0: ui.exit_app() return def datestr2regex(datestr): pattern = datestr.replace('%Y', r'\d{4}') pattern = pattern.replace('%m', r'\d{2}') pattern = pattern.replace('%d', r'\d{2}') pattern = pattern.replace('%j', r'\d{3}') pattern = pattern.replace('%H', r'\d{2}') pattern = pattern.replace('%M', r'\d{2}') pattern = pattern.replace('%S', r'\d{2}') return pattern def read_catalog(logpath, b_time, e_time, stla, stlo, magmin=5.5, magmax=10, dismin=30, dismax=90): col = ['date', 'evla', 'evlo', 'evdp', 'mag'] eq_lst = pd.DataFrame(columns=col) with open(logpath) as f: lines = f.readlines() for line in lines: line_sp = line.strip().split() date_now = obspy.UTCDateTime.strptime('.'.join(line_sp[0:3]) + 'T' + '.'.join(line_sp[4:7]), '%Y.%m.%dT%H.%M.%S') evla = float(line_sp[7]) evlo = float(line_sp[8]) evdp = float(line_sp[9]) mw = float(line_sp[10]) dis = seispy.distaz(stla, stlo, evla, evlo).delta # bazi = seispy.distaz(stla, stlo, evla, evlo).getBaz() if b_time <= date_now <= e_time and magmin <= mw <= magmax and dismin <= dis <= dismax: this_data = pd.DataFrame([[date_now, evla, evlo, evdp, mw]], columns=col) eq_lst = eq_lst.append(this_data, ignore_index=True) return eq_lst def load_station_info(pathname, ref_comp, suffix): try: ex_sac = glob.glob(join(pathname, '*{0}*{1}'.format(ref_comp, suffix)))[0] except Exception: raise FileNotFoundError('no such SAC file in {0}'.format(pathname)) ex_tr = SACTrace.read(ex_sac, headonly=True) if (ex_tr.stla is None or ex_tr.stlo is None): raise ValueError('The stlo and stla are not in the SACHeader') return ex_tr.knetwk, ex_tr.kstnm, ex_tr.stla, ex_tr.stlo, ex_tr.stel def match_eq(eq_lst, pathname, stla, stlo, ref_comp='Z', suffix='SAC', offset=None, tolerance=210, dateformat='%Y.%j.%H.%M.%S', switchEN=False, reverseE=False, reverseN=False): pattern = datestr2regex(dateformat) ref_eqs = glob.glob(join(pathname, '*{0}*{1}'.format(ref_comp, suffix))) sac_files = [] for ref_sac in ref_eqs: try: datestr = re.findall(pattern, ref_sac)[0] except IndexError: raise IndexError('Error data format of {} in {}'.format(dateformat, ref_sac)) if isinstance(offset, (int, float)): sac_files.append([datestr, obspy.UTCDateTime.strptime(datestr, dateformat), -offset]) elif offset is None: try: tr = obspy.read(ref_sac)[0] except TypeError: continue sac_files.append([datestr, tr.stats.starttime, tr.stats.sac.o]) else: raise TypeError('offset should be int or float type') new_col = ['dis', 'bazi', 'data'] eq_match = pd.DataFrame(columns=new_col) for datestr, b_time, offs in sac_files: date_range_begin = b_time + timedelta(seconds=offs - tolerance) date_range_end = b_time + timedelta(seconds=offs + tolerance) results = eq_lst[(eq_lst.date > date_range_begin) & (eq_lst.date < date_range_end)] if len(results) != 1: continue try: this_eq = eq(pathname, datestr, suffix, switchEN=switchEN, reverseE=reverseE, reverseN=reverseN) except Exception as e: continue this_eq.get_time_offset(results.iloc[0]['date']) daz = seispy.distaz(stla, stlo, results.iloc[0]['evla'], results.iloc[0]['evlo']) this_df = pd.DataFrame([[daz.delta, daz.baz, this_eq]], columns=new_col, index=results.index.values) eq_match = eq_match.append(this_df) ind = eq_match.index.drop_duplicates(False) eq_match = eq_match.loc[ind] return pd.concat([eq_lst, eq_match], axis=1, join='inner') class stainfo(): def __init__(self): self.network = '' self.station = '' self.stla = 0. self.stlo = 0. self.stel = 0. def get_stainfo(self): return self.__dict__ def load_stainfo(self, pathname, ref_comp, suffix): (self.network, self.station, self.stla, self.stlo, self.stel) = load_station_info(pathname, ref_comp, suffix) def CfgParser(cfg_file): cf = configparser.RawConfigParser() pa = para() try: cf.read(cfg_file) except Exception: raise FileNotFoundError('Cannot open configure file %s' % cfg_file) pa.datapath = cf.get('path', 'datapath') pa.rfpath = cf.get('path', 'rfpath') pa.imagepath = cf.get('path', 'imagepath') logpath = cf.get('path', 'catalogpath') if logpath != '': pa.catalogpath = cf.get('path', 'catalogpath') sections = cf.sections() sections.remove('path') for sec in sections: for key, value in cf.items(sec): if key == 'date_begin': pa.__dict__[key] = obspy.UTCDateTime(value) elif key == 'date_end': pa.__dict__[key] = obspy.UTCDateTime(value) elif key == 'offset': try: pa.__dict__[key] = float(value) except: pa.__dict__[key] = None elif key == 'itmax': pa.__dict__[key] = int(value) elif key == 'only_r': pa.__dict__[key] = cf.getboolean(sec, 'only_r') elif key == 'criterion': pa.criterion = value else: try: pa.__dict__[key] = float(value) except ValueError: pa.__dict__[key] = value return pa def CfgModify(cfg_file, session, key, value): cf = configparser.RawConfigParser() try: cf.read(cfg_file) except Exception: raise FileNotFoundError('Cannot open configure file %s' % cfg_file) cf.set(session, key, value) cf.write(open(cfg_file, 'w')) def _plotampt(x, y, ampt, shift_all): xx, yy = np.meshgrid(x, y) f = plt.figure(figsize=(8, 8)) plt.pcolor(xx, yy, ampt) plt.scatter(shift_all, y) return f class RF(object): def __init__(self, cfg_file=None, log=None): if cfg_file is None: self.para = para() elif isinstance(cfg_file, str): self.para = CfgParser(cfg_file) else: raise TypeError('cfg should be \'str\' not \'{0}\''.format(type(cfg_file))) if not isinstance(self.para, para): raise TypeError('Input value should be class seispy.rf.para') if log is None: self.logger = setuplog() else: self.logger = log self.eq_lst = pd.DataFrame() self.eqs = pd.DataFrame() self.model = TauPyModel('iasp91') self.stainfo = stainfo() @property def date_begin(self): return self.para.date_begin @date_begin.setter def date_begin(self, value): self.para.date_begin = value @property def date_end(self): return self.para.date_end @date_end.setter def date_end(self, value): self.para.date_end = value def load_stainfo(self): try: self.logger.RFlog.info('Load station info from {0}'.format(self.para.datapath)) self.stainfo.load_stainfo(self.para.datapath, self.para.ref_comp, self.para.suffix) except Exception as e: self.logger.RFlog.error('{0}'.format(e)) raise e def search_eq(self, local=False, server=None, catalog='GCMT'): if not local: try: if server is None: server = self.para.catalog_server self.logger.RFlog.info('Searching earthquakes from {}'.format(server)) self.eq_lst = wsfetch(server, starttime=self.para.date_begin, endtime=self.para.date_end, latitude=self.stainfo.stla, longitude=self.stainfo.stlo, minmagnitude=self.para.magmin, maxmagnitude=self.para.magmax, minradius=self.para.dismin, maxradius=self.para.dismax, catalog=catalog) except Exception as e: raise ConnectionError(e) else: try: self.logger.RFlog.info( 'Searching earthquakes from {0} to {1}'.format(self.date_begin.strftime('%Y.%m.%dT%H:%M:%S'), self.date_end.strftime('%Y.%m.%dT%H:%M:%S'))) self.eq_lst = read_catalog(self.para.catalogpath, self.para.date_begin, self.para.date_end, self.stainfo.stla, self.stainfo.stlo, magmin=self.para.magmin, magmax=self.para.magmax, dismin=self.para.dismin, dismax=self.para.dismax) except Exception as e: self.logger.RFlog.error('{0}'.format(e)) raise e self.logger.RFlog.info('Found {} earthquakes'.format(self.eq_lst.shape[0])) def match_eq(self, switchEN=False, reverseE=False, reverseN=False): try: self.logger.RFlog.info('Match SAC files') self.eqs = match_eq(self.eq_lst, self.para.datapath, self.stainfo.stla, self.stainfo.stlo, ref_comp=self.para.ref_comp, suffix=self.para.suffix, offset=self.para.offset, tolerance=self.para.tolerance, dateformat=self.para.dateformat, switchEN=switchEN, reverseE=reverseE, reverseN=reverseN) except Exception as e: self.logger.RFlog.error('{0}'.format(e)) raise e self.logger.RFlog.info('{0} earthquakes matched'.format(self.eqs.shape[0])) ''' def save(self, path=''): if path == '': path = '{0}.{1}.h5'.format(self.stainfo.network, self.stainfo.station) d = {'para': self.para.__dict__, 'stainfo': self.stainfo.__dict__, 'eq_lst': self.eq_lst, 'eqs': self.eqs} try: self.logger.RFlog.info('Saving project to {0}'.format(path)) dd.io.save(path, d) except PerformanceWarning: pass except Exception as e: self.logger.RFlog.error('{0}'.format(e)) raise IOError(e) def load(self, path): try: self.logger.RFlog.info('Loading {0}'.format(path)) fdd = dd.io.load(path) except Exception as e: self.logger.RFlog.error('{0}'.format(e)) raise IOError('Cannot read {0}'.format(path)) try: self.para.__dict__.update(fdd['para']) self.stainfo.__dict__.update(fdd['stainfo']) self.eq_lst = fdd['eq_lst'] self.eqs = fdd['eqs'] except Exception as e: raise ValueError(e) ''' def detrend(self): self.logger.RFlog.info('Detrend all data') for _, row in self.eqs.iterrows(): row['data'].detrend() def filter(self, freqmin=None, freqmax=None, order=4): if freqmin is None: freqmin = self.para.freqmin if freqmax is None: freqmax = self.para.freqmax self.logger.RFlog.info('Filter all data from {0} to {1}'.format(freqmin, freqmax)) for _, row in self.eqs.iterrows(): row['data'].filter(freqmin=freqmin, freqmax=freqmax, order=order) def cal_phase(self): self.logger.RFlog.info('Calculate arrivals and ray parameters for all data') for _, row in self.eqs.iterrows(): row['data'].get_arrival(self.model, row['evdp'], row['dis']) # row['data'].get_raypara(self.model, row['evdp'], row['dis']) def baz_correct(self, time_b=10, time_e=20, offset=90, correct_angle=None): if correct_angle is not None: self.logger.RFlog.info('correct back-azimuth with {} deg.'.format(correct_angle)) self.eqs['bazi'] = np.mod(self.eqs['bazi'] + correct_angle, 360) else: self.logger.RFlog.info('correct back-azimuth with T energy minimization') y = np.arange(self.eqs.shape[0]) shift_all = np.array([]) x = np.arange(-offset, offset) ampt_all = np.empty([0, x.shape[0]]) for i, row in self.eqs.iterrows(): curr_baz, ampt = row['data'].search_baz(row['bazi'], time_b=time_b, time_e=time_e, offset=offset) shift_all = np.append(shift_all, curr_baz) ampt_all = np.vstack((ampt_all, ampt)) if None in shift_all: self.logger.RFlog.error('Range of searching bazi is too small.') sys.exit(1) baz_shift = np.mean(shift_all[np.where(np.logical_not(np.isnan(shift_all)))]) # fig = _plotampt(x, y, ampt_all, shift_all) # fig.savefig('{}_rotation.png'.format(self.stainfo.station)) # self._baz_confirm(offset, ampt_all) self.logger.RFlog.info('Average {:.1f} deg offset in back-azimuth'.format(baz_shift)) self.eqs['bazi'] = np.mod(self.eqs['bazi'] + baz_shift, 360) # def _baz_confirm(self, offset, ampt_all): # y = np.arange(self.eqs.shape[0]) # x = np.linspace(-offset, offset, ampt_all.shape[1]) # fig = _plotampt(x, y, ampt_all) # if messagebox.askyesno('Back-azimuth correction', # 'Would you like to keep this correction?'): # fig.close() # else: # self.logger.RFlog.error('Manual interruption.') # fig.close() # sys.exit(1) def rotate(self, method='NE->RT', search_inc=False): self.logger.RFlog.info('Rotate {0} phase {1}'.format(self.para.phase, method)) drop_idx = [] for i, row in self.eqs.iterrows(): try: row['data'].rotate(row['bazi'], method=method, phase=self.para.phase, search_inc=search_inc) except Exception as e: self.logger.RFlog.error('{}: {}'.format(row['data'].datestr, e)) drop_idx.append(i) self.eqs.drop(drop_idx, inplace=True) def drop_eq_snr(self, length=None): if length is None: length = self.para.noiselen self.logger.RFlog.info('Reject data record with SNR less than {0}'.format(self.para.noisegate)) drop_lst = [] for i, row in self.eqs.iterrows(): snr_E, snr_N, snr_Z = row['data'].snr(length=length, phase=self.para.phase) if (np.nan in (snr_E, snr_N, snr_Z) or snr_E < self.para.noisegate or snr_N < self.para.noisegate or snr_Z < self.para.noisegate): drop_lst.append(i) self.eqs.drop(drop_lst, inplace=True) self.logger.RFlog.info('{0} events left after SNR calculation'.format(self.eqs.shape[0])) def trim(self): for _, row in self.eqs.iterrows(): row['data'].trim(self.para.time_before, self.para.time_after, self.para.phase) def pick(self): pickphase(self.eqs, self.para) self.logger.RFlog.info('{0} events left after virtual checking'.format(self.eqs.shape[0])) def deconv(self, itmax=None, minderr=None): if itmax is None: itmax = self.para.itmax if minderr is None: minderr = self.para.minderr if self.para.phase == 'P': shift = self.para.time_before time_after = self.para.time_after elif self.para.phase == 'S': shift = self.para.time_after time_after = self.para.time_before else: pass drop_lst = [] count = 0 for i, row in self.eqs.iterrows(): count += 1 try: row['data'].deconvolute(shift, time_after, self.para.gauss, phase=self.para.phase, only_r=self.para.only_r, itmax=itmax, minderr=minderr, target_dt=self.para.target_dt) self.logger.RFlog.info('Iterative Decon {0} ({3}/{4}) iterations: {1}; final RMS: {2:.4f}'.format( row['data'].datestr, row['data'].it, row['data'].rms[-1], count, self.eqs.shape[0])) except Exception as e: self.logger.RFlog.error('{}: {}'.format(row['data'].datestr, e)) drop_lst.append(i) self.eqs.drop(drop_lst, inplace=True) def saverf(self): npts = int((self.para.time_before + self.para.time_after)/self.para.target_dt+1) if self.para.phase == 'P': shift = self.para.time_before criterion = self.para.criterion elif self.para.phase == 'S': shift = self.para.time_after criterion = None else: pass good_lst = [] self.logger.RFlog.info('Save RFs with criterion of {}'.format(criterion)) for i, row in self.eqs.iterrows(): if row['data'].judge_rf(shift, npts, criterion=criterion): row['data'].saverf(self.para.rfpath, evtstr=row['date'].strftime('%Y.%j.%H.%M.%S'), phase=self.para.phase, shift=shift, evla=row['evla'], evlo=row['evlo'], evdp=row['evdp'], baz=row['bazi'], mag=row['mag'], gcarc=row['dis'], gauss=self.para.gauss, only_r=self.para.only_r) good_lst.append(i) self.logger.RFlog.info('{} PRFs are saved.'.format(len(good_lst))) self.eqs = self.eqs.loc[good_lst] def prf(): parser = argparse.ArgumentParser(description="Calculating RFs for single station") parser.add_argument('cfg_file', type=str, help='Path to RF configure file') parser.add_argument('-l', help="use local catalog. Default is false", dest='islocal', action='store_true') parser.add_argument('-r', help='Reverse components: EN, E or N', dest='comp', default=None, type=str) parser.add_argument('-s', help='Switch East and North components', dest='isswitch', action='store_true') parser.add_argument('-b', help='Correct back-azimuth with minimal ' 'energy of T component. "baz" is specified ' 'as half-searching range. Default value is 90 deg', dest='baz', nargs='?', const=0, type=float) arg = parser.parse_args() if arg.comp is not None: arg.comp = arg.comp.upper() if arg.comp == 'NE' or arg.comp == 'EN': reverseE = True reverseN = True elif arg.comp == 'E': reverseE = True reverseN = False elif arg.comp == 'N': reverseE = False reverseN = True else: raise ValueError('component name must be in EN, E or N') else: reverseN = False reverseE = False # if arg.baz is not None: # try: # baz_corr = [float(val) for val in arg.b.split('/')] # except: # raise ValueError('Format of baz correction must be as 10/20/45') pjt = RF(cfg_file=arg.cfg_file) pjt.load_stainfo() pjt.search_eq(local=arg.islocal) pjt.match_eq(switchEN=arg.isswitch, reverseN=reverseN, reverseE=reverseE) pjt.detrend() pjt.filter() pjt.cal_phase() pjt.drop_eq_snr() if arg.baz is not None and arg.baz != 0: pjt.baz_correct(correct_angle=arg.baz) elif arg.baz is not None and arg.baz == 0: pjt.baz_correct() else: pass pjt.trim() pjt.rotate() pjt.deconv() pjt.saverf() def setpar(): parser = argparse.ArgumentParser(description="Set parameters to configure file") parser.add_argument('cfg_file', type=str, help='Path to configure file') parser.add_argument('session', type=str, help='session name') parser.add_argument('key', type=str, help='key name') parser.add_argument('value', type=str, help='value') arg = parser.parse_args() CfgModify(arg.cfg_file, arg.session, arg.key, arg.value) if __name__ == '__main__': # get_events_test() prf() # proj_test()
cherishlm/ssdb
refs/heads/master
deps/cpy/antlr3/tokens.py
99
"""ANTLR3 runtime package""" # begin[licence] # # [The "BSD licence"] # Copyright (c) 2005-2008 Terence Parr # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # end[licence] from antlr3.constants import EOF, DEFAULT_CHANNEL, INVALID_TOKEN_TYPE ############################################################################ # # basic token interface # ############################################################################ class Token(object): """@brief Abstract token baseclass.""" def getText(self): """@brief Get the text of the token. Using setter/getter methods is deprecated. Use o.text instead. """ raise NotImplementedError def setText(self, text): """@brief Set the text of the token. Using setter/getter methods is deprecated. Use o.text instead. """ raise NotImplementedError def getType(self): """@brief Get the type of the token. Using setter/getter methods is deprecated. Use o.type instead.""" raise NotImplementedError def setType(self, ttype): """@brief Get the type of the token. Using setter/getter methods is deprecated. Use o.type instead.""" raise NotImplementedError def getLine(self): """@brief Get the line number on which this token was matched Lines are numbered 1..n Using setter/getter methods is deprecated. Use o.line instead.""" raise NotImplementedError def setLine(self, line): """@brief Set the line number on which this token was matched Using setter/getter methods is deprecated. Use o.line instead.""" raise NotImplementedError def getCharPositionInLine(self): """@brief Get the column of the tokens first character, Columns are numbered 0..n-1 Using setter/getter methods is deprecated. Use o.charPositionInLine instead.""" raise NotImplementedError def setCharPositionInLine(self, pos): """@brief Set the column of the tokens first character, Using setter/getter methods is deprecated. Use o.charPositionInLine instead.""" raise NotImplementedError def getChannel(self): """@brief Get the channel of the token Using setter/getter methods is deprecated. Use o.channel instead.""" raise NotImplementedError def setChannel(self, channel): """@brief Set the channel of the token Using setter/getter methods is deprecated. Use o.channel instead.""" raise NotImplementedError def getTokenIndex(self): """@brief Get the index in the input stream. An index from 0..n-1 of the token object in the input stream. This must be valid in order to use the ANTLRWorks debugger. Using setter/getter methods is deprecated. Use o.index instead.""" raise NotImplementedError def setTokenIndex(self, index): """@brief Set the index in the input stream. Using setter/getter methods is deprecated. Use o.index instead.""" raise NotImplementedError def getInputStream(self): """@brief From what character stream was this token created. You don't have to implement but it's nice to know where a Token comes from if you have include files etc... on the input.""" raise NotImplementedError def setInputStream(self, input): """@brief From what character stream was this token created. You don't have to implement but it's nice to know where a Token comes from if you have include files etc... on the input.""" raise NotImplementedError ############################################################################ # # token implementations # # Token # +- CommonToken # \- ClassicToken # ############################################################################ class CommonToken(Token): """@brief Basic token implementation. This implementation does not copy the text from the input stream upon creation, but keeps start/stop pointers into the stream to avoid unnecessary copy operations. """ def __init__(self, type=None, channel=DEFAULT_CHANNEL, text=None, input=None, start=None, stop=None, oldToken=None): Token.__init__(self) if oldToken is not None: self.type = oldToken.type self.line = oldToken.line self.charPositionInLine = oldToken.charPositionInLine self.channel = oldToken.channel self.index = oldToken.index self._text = oldToken._text if isinstance(oldToken, CommonToken): self.input = oldToken.input self.start = oldToken.start self.stop = oldToken.stop else: self.type = type self.input = input self.charPositionInLine = -1 # set to invalid position self.line = 0 self.channel = channel #What token number is this from 0..n-1 tokens; < 0 implies invalid index self.index = -1 # We need to be able to change the text once in a while. If # this is non-null, then getText should return this. Note that # start/stop are not affected by changing this. self._text = text # The char position into the input buffer where this token starts self.start = start # The char position into the input buffer where this token stops # This is the index of the last char, *not* the index after it! self.stop = stop def getText(self): if self._text is not None: return self._text if self.input is None: return None return self.input.substring(self.start, self.stop) def setText(self, text): """ Override the text for this token. getText() will return this text rather than pulling from the buffer. Note that this does not mean that start/stop indexes are not valid. It means that that input was converted to a new string in the token object. """ self._text = text text = property(getText, setText) def getType(self): return self.type def setType(self, ttype): self.type = ttype def getLine(self): return self.line def setLine(self, line): self.line = line def getCharPositionInLine(self): return self.charPositionInLine def setCharPositionInLine(self, pos): self.charPositionInLine = pos def getChannel(self): return self.channel def setChannel(self, channel): self.channel = channel def getTokenIndex(self): return self.index def setTokenIndex(self, index): self.index = index def getInputStream(self): return self.input def setInputStream(self, input): self.input = input def __str__(self): if self.type == EOF: return "<EOF>" channelStr = "" if self.channel > 0: channelStr = ",channel=" + str(self.channel) txt = self.text if txt is not None: txt = txt.replace("\n","\\\\n") txt = txt.replace("\r","\\\\r") txt = txt.replace("\t","\\\\t") else: txt = "<no text>" return "[@%d,%d:%d=%r,<%d>%s,%d:%d]" % ( self.index, self.start, self.stop, txt, self.type, channelStr, self.line, self.charPositionInLine ) class ClassicToken(Token): """@brief Alternative token implementation. A Token object like we'd use in ANTLR 2.x; has an actual string created and associated with this object. These objects are needed for imaginary tree nodes that have payload objects. We need to create a Token object that has a string; the tree node will point at this token. CommonToken has indexes into a char stream and hence cannot be used to introduce new strings. """ def __init__(self, type=None, text=None, channel=DEFAULT_CHANNEL, oldToken=None ): Token.__init__(self) if oldToken is not None: self.text = oldToken.text self.type = oldToken.type self.line = oldToken.line self.charPositionInLine = oldToken.charPositionInLine self.channel = oldToken.channel self.text = text self.type = type self.line = None self.charPositionInLine = None self.channel = channel self.index = None def getText(self): return self.text def setText(self, text): self.text = text def getType(self): return self.type def setType(self, ttype): self.type = ttype def getLine(self): return self.line def setLine(self, line): self.line = line def getCharPositionInLine(self): return self.charPositionInLine def setCharPositionInLine(self, pos): self.charPositionInLine = pos def getChannel(self): return self.channel def setChannel(self, channel): self.channel = channel def getTokenIndex(self): return self.index def setTokenIndex(self, index): self.index = index def getInputStream(self): return None def setInputStream(self, input): pass def toString(self): channelStr = "" if self.channel > 0: channelStr = ",channel=" + str(self.channel) txt = self.text if txt is None: txt = "<no text>" return "[@%r,%r,<%r>%s,%r:%r]" % (self.index, txt, self.type, channelStr, self.line, self.charPositionInLine ) __str__ = toString __repr__ = toString EOF_TOKEN = CommonToken(type=EOF) INVALID_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE) # In an action, a lexer rule can set token to this SKIP_TOKEN and ANTLR # will avoid creating a token for this symbol and try to fetch another. SKIP_TOKEN = CommonToken(type=INVALID_TOKEN_TYPE)
bzhpwr/MediExports
refs/heads/master
project_env/lib/python2.6/site-packages/flask/exthook.py
783
# -*- coding: utf-8 -*- """ flask.exthook ~~~~~~~~~~~~~ Redirect imports for extensions. This module basically makes it possible for us to transition from flaskext.foo to flask_foo without having to force all extensions to upgrade at the same time. When a user does ``from flask.ext.foo import bar`` it will attempt to import ``from flask_foo import bar`` first and when that fails it will try to import ``from flaskext.foo import bar``. We're switching from namespace packages because it was just too painful for everybody involved. This is used by `flask.ext`. :copyright: (c) 2011 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import sys import os from ._compat import reraise class ExtensionImporter(object): """This importer redirects imports from this submodule to other locations. This makes it possible to transition from the old flaskext.name to the newer flask_name without people having a hard time. """ def __init__(self, module_choices, wrapper_module): self.module_choices = module_choices self.wrapper_module = wrapper_module self.prefix = wrapper_module + '.' self.prefix_cutoff = wrapper_module.count('.') + 1 def __eq__(self, other): return self.__class__.__module__ == other.__class__.__module__ and \ self.__class__.__name__ == other.__class__.__name__ and \ self.wrapper_module == other.wrapper_module and \ self.module_choices == other.module_choices def __ne__(self, other): return not self.__eq__(other) def install(self): sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self] def find_module(self, fullname, path=None): if fullname.startswith(self.prefix): return self def load_module(self, fullname): if fullname in sys.modules: return sys.modules[fullname] modname = fullname.split('.', self.prefix_cutoff)[self.prefix_cutoff] for path in self.module_choices: realname = path % modname try: __import__(realname) except ImportError: exc_type, exc_value, tb = sys.exc_info() # since we only establish the entry in sys.modules at the # very this seems to be redundant, but if recursive imports # happen we will call into the move import a second time. # On the second invocation we still don't have an entry for # fullname in sys.modules, but we will end up with the same # fake module name and that import will succeed since this # one already has a temporary entry in the modules dict. # Since this one "succeeded" temporarily that second # invocation now will have created a fullname entry in # sys.modules which we have to kill. sys.modules.pop(fullname, None) # If it's an important traceback we reraise it, otherwise # we swallow it and try the next choice. The skipped frame # is the one from __import__ above which we don't care about if self.is_important_traceback(realname, tb): reraise(exc_type, exc_value, tb.tb_next) continue module = sys.modules[fullname] = sys.modules[realname] if '.' not in modname: setattr(sys.modules[self.wrapper_module], modname, module) return module raise ImportError('No module named %s' % fullname) def is_important_traceback(self, important_module, tb): """Walks a traceback's frames and checks if any of the frames originated in the given important module. If that is the case then we were able to import the module itself but apparently something went wrong when the module was imported. (Eg: import of an import failed). """ while tb is not None: if self.is_important_frame(important_module, tb): return True tb = tb.tb_next return False def is_important_frame(self, important_module, tb): """Checks a single frame if it's important.""" g = tb.tb_frame.f_globals if '__name__' not in g: return False module_name = g['__name__'] # Python 2.7 Behavior. Modules are cleaned up late so the # name shows up properly here. Success! if module_name == important_module: return True # Some python versions will will clean up modules so early that the # module name at that point is no longer set. Try guessing from # the filename then. filename = os.path.abspath(tb.tb_frame.f_code.co_filename) test_string = os.path.sep + important_module.replace('.', os.path.sep) return test_string + '.py' in filename or \ test_string + os.path.sep + '__init__.py' in filename