repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
jacekburys/csmith
programs/validate.py
Python
bsd-2-clause
1,157
0.025929
import math import sys import re valuePattern = re.compile('= (.+)$') def extractValue(line): match = re.search(valuePattern, line) if match: return float.fromhex(match.group(1)) else: return "ERROR" intervalPattern = re.compile('= \[(.*?), (.*?)\]') def extractInterval(line): match = re.search(intervalPattern, line) if m
atch: lower =
float.fromhex(match.group(1)) upper = float.fromhex(match.group(2)) return (lower, upper) else: return "ERROR" def isInInterval(value, lower, upper): return lower<=value and value<=upper #f1 - values, f2 - ranges f1 = open(str(sys.argv[1]), 'r') f2 = open(str(sys.argv[2]), 'r') wide = 0 total = 0 result = 0 for line1, line2 in zip(f1.readlines(), f2.readlines()): total+=1 value = extractValue(line1) lower, upper = extractInterval(line2) if math.isnan(value): if math.isfinite(lower) and math.isfinite(upper): print(line1) print(line2) result = 1 continue if lower!=upper: wide+=1 if not isInInterval(value, lower, upper): print(line1) print(line2) result = 1 print(total, wide) f1.close() f2.close() sys.exit(result)
call-me-jimi/taskmanager
taskmanager/lib/hDBSessionMaker.py
Python
gpl-2.0
4,630
0.019654
# create a Session object by sessionmaker import os import ConfigParser import sqlalchemy.orm # get path to taskmanager. it is assumed that this script is in the lib directory of # the taskmanager package. tmpath = os.path.normpath( os.path.join( os.path.dirname( os.path.realpath(__file__) ) + '/..' ) ) etcpath = '%s/etc' % tmpath # for configuration files # library is in the same folder from hDatabase import Base class hDBSessionMaker( object ): def __init__( self, configFileName=None, createTables=False, echo=False ): if not configFileName: # use default config file etcpath = os.path.normpath( os.path.join( os.path.dirname( os.path.realpath(__file__) ) + '/../etc' ) ) # default config file for database connection configFileName = "{etcPath}/serversettings.cfg".format(etcPath=etcpath) # read config file if os.path.exists( configFileName ): config = ConfigParser.ConfigParser() config.read( configFileName ) else: sys.stderr.write( "ERROR: Could not find Config file {c}!".format( c=configFileName) ) sys.exit( -1 ) databaseDialect = config.get( 'DATABASE', 'database_dialect' ) databaseHost = config.get( 'DATABASE', 'database_host' ) databasePort = config.get( 'DATABASE', 'database_port' ) databaseName = config.get( 'DATABASE', 'database_name' ) databaseUsername = config.get( 'DATABASE', 'database_username' ) databasePassword = config.get( 'DATABASE', 'database_password' ) ## @var engine
#The engine that is connected to the database #use "echo=True" for SQL printing statements to stdout
self.engine = sqlalchemy.create_engine( "{dialect}://{user}:{password}@{host}:{port}/{name}".format( dialect=databaseDialect, user=databaseUsername, password=databasePassword, host=databaseHost, port=databasePort, name=databaseName), pool_size=50, # number of connections to keep open inside the connection pool max_overflow=100, # number of connections to allow in connection pool "overflow", that is connections that can be opened above and beyond the pool_size setting, which defaults to five. pool_recycle=3600, # this setting causes the pool to recycle connections after the given number of seconds has passed. echo=False ) # Create all tables in the engine. This is equivalent to "Create Table" # statements in raw SQL. Base.metadata.create_all( self.engine ) ## @var DBsession # define a Session class which will serve as a factory for new Session objects # # http://docs.sqlalchemy.org/en/rel_0_9/orm/session.html: # Session is a regular Python class which can be directly instantiated. However, to standardize how sessions are # configured and acquired, the sessionmaker class is normally used to create a top level Session configuration # which can then be used throughout an application without the need to repeat the configurational arguments. # sessionmaker() is a Session factory. A factory is just something that produces a new object when called. # # Thread local factory for sessions. See http://docs.sqlalchemy.org/en/rel_0_9/orm/session.html#contextual-thread-local-sessions # SessionFactory = sqlalchemy.orm.sessionmaker( bind = self.engine ) self.DBSession = sqlalchemy.orm.scoped_session( SessionFactory )
hexlism/xx_net
gae_proxy/local/proxy_handler.py
Python
bsd-2-clause
14,990
0.002935
#!/usr/bin/env python # coding:utf-8 import errno import socket import ssl import urlparse import OpenSSL NetWorkIOError = (socket.error, ssl.SSLError, OpenSSL.SSL.Error, OSError) from proxy import xlog import simple_http_client import simple_http_server from cert_util import CertUtil from config import config import gae_handler import direct_handler from connect_control import touch_active import web_control class GAEProxyHandler(simple_http_server.HttpServerHandle
r): gae_support_methods = tuple(["GET", "POST", "HEAD", "PUT", "DELETE", "PATCH"]) bufsize = 256*1024 max_retry = 3 def setup(self)
: self.__class__.do_GET = self.__class__.do_METHOD self.__class__.do_PUT = self.__class__.do_METHOD self.__class__.do_POST = self.__class__.do_METHOD self.__class__.do_HEAD = self.__class__.do_METHOD self.__class__.do_DELETE = self.__class__.do_METHOD self.__class__.do_OPTIONS = self.__class__.do_METHOD self.self_check_response_data = "HTTP/1.1 200 OK\r\n"\ "Access-Control-Allow-Origin: *\r\n"\ "Content-Type: text/plain\r\n"\ "Content-Length: 2\r\n\r\nOK" def forward_local(self): host = self.headers.get('Host', '') host_ip, _, port = host.rpartition(':') http_client = simple_http_client.HTTP_client((host_ip, int(port))) request_headers = dict((k.title(), v) for k, v in self.headers.items()) payload = b'' if 'Content-Length' in request_headers: try: payload_len = int(request_headers.get('Content-Length', 0)) payload = self.rfile.read(payload_len) except Exception as e: xlog.warn('forward_local read payload failed:%s', e) return self.parsed_url = urlparse.urlparse(self.path) if len(self.parsed_url[4]): path = '?'.join([self.parsed_url[2], self.parsed_url[4]]) else: path = self.parsed_url[2] content, status, response = http_client.request(self.command, path, request_headers, payload) if not status: xlog.warn("forward_local fail") return out_list = [] out_list.append("HTTP/1.1 %d\r\n" % status) for key, value in response.getheaders(): key = key.title() out_list.append("%s: %s\r\n" % (key, value)) out_list.append("\r\n") out_list.append(content) self.wfile.write("".join(out_list)) def do_METHOD(self): touch_active() host = self.headers.get('Host', '') host_ip, _, port = host.rpartition(':') if host_ip == "127.0.0.1" and port == str(config.LISTEN_PORT): controler = web_control.ControlHandler(self.client_address, self.headers, self.command, self.path, self.rfile, self.wfile) if self.command == "GET": return controler.do_GET() elif self.command == "POST": return controler.do_POST() else: xlog.warn("method not defined: %s", self.command) return if self.path[0] == '/' and host: self.path = 'http://%s%s' % (host, self.path) elif not host and '://' in self.path: host = urlparse.urlparse(self.path).netloc if host.startswith("127.0.0.1") or host.startswith("localhost"): #xlog.warn("Your browser forward localhost to proxy.") return self.forward_local() if self.path == "http://www.twitter.com/xxnet": xlog.debug("%s %s", self.command, self.path) # for web_ui status page # auto detect browser proxy setting is work return self.wfile.write(self.self_check_response_data) self.parsed_url = urlparse.urlparse(self.path) if host in config.HOSTS_GAE: return self.do_AGENT() if host in config.HOSTS_FWD or host in config.HOSTS_DIRECT: return self.wfile.write(('HTTP/1.1 301\r\nLocation: %s\r\n\r\n' % self.path.replace('http://', 'https://', 1)).encode()) if host.endswith(config.HOSTS_GAE_ENDSWITH): return self.do_AGENT() if host.endswith(config.HOSTS_FWD_ENDSWITH) or host.endswith(config.HOSTS_DIRECT_ENDSWITH): return self.wfile.write(('HTTP/1.1 301\r\nLocation: %s\r\n\r\n' % self.path.replace('http://', 'https://', 1)).encode()) return self.do_AGENT() # Called by do_METHOD and do_CONNECT_AGENT def do_AGENT(self): def get_crlf(rfile): crlf = rfile.readline(2) if crlf != "\r\n": xlog.warn("chunk header read fail crlf") request_headers = dict((k.title(), v) for k, v in self.headers.items()) payload = b'' if 'Content-Length' in request_headers: try: payload_len = int(request_headers.get('Content-Length', 0)) #logging.debug("payload_len:%d %s %s", payload_len, self.command, self.path) payload = self.rfile.read(payload_len) except NetWorkIOError as e: xlog.error('handle_method_urlfetch read payload failed:%s', e) return elif 'Transfer-Encoding' in request_headers: # chunked, used by facebook android client payload = "" while True: chunk_size_str = self.rfile.readline(65537) chunk_size_list = chunk_size_str.split(";") chunk_size = int("0x"+chunk_size_list[0], 0) if len(chunk_size_list) > 1 and chunk_size_list[1] != "\r\n": xlog.warn("chunk ext: %s", chunk_size_str) if chunk_size == 0: while True: line = self.rfile.readline(65537) if line == "\r\n": break else: xlog.warn("entity header:%s", line) break payload += self.rfile.read(chunk_size) get_crlf(self.rfile) gae_handler.handler(self.command, self.path, request_headers, payload, self.wfile) def do_CONNECT(self): touch_active() host, _, port = self.path.rpartition(':') if host in config.HOSTS_GAE: return self.do_CONNECT_AGENT() if host in config.HOSTS_DIRECT: return self.do_CONNECT_DIRECT() if host.endswith(config.HOSTS_GAE_ENDSWITH): return self.do_CONNECT_AGENT() if host.endswith(config.HOSTS_DIRECT_ENDSWITH): return self.do_CONNECT_DIRECT() return self.do_CONNECT_AGENT() def do_CONNECT_AGENT(self): """deploy fake cert to client""" # GAE supports the following HTTP methods: GET, POST, HEAD, PUT, DELETE, and PATCH host, _, port = self.path.rpartition(':') port = int(port) certfile = CertUtil.get_cert(host) xlog.info('GAE %s %s:%d ', self.command, host, port) self.__realconnection = None self.wfile.write(b'HTTP/1.1 200 OK\r\n\r\n') try: ssl_sock = ssl.wrap_socket(self.connection, keyfile=certfile, certfile=certfile, server_side=True) except ssl.SSLError as e: xlog.info('ssl error: %s, create full domain cert for host:%s', e, host) certfile = CertUtil.get_cert(host, full_name=True) return except Exception as e: if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET): xlog.exception('ssl.wrap_socket(self.connection=%r) failed: %s path:%s, errno:%s', self.connection, e, self.path, e.args[0]) return self.__realconnection = self.connection self.__realwfile = self.wfile self.__realrfile = self.rfile self.connection = ssl_sock self.rfile = self.connection.makefile('rb', self.bufsize) self.wfile = self.connection.makefile('wb', 0) try: self.raw_requestline = self.rfile.readline(65537) if len(self.raw_requestline) > 65536:
dahlstrom-g/intellij-community
python/testData/refactoring/rename/epydocRenameParameter_after.py
Python
apache-2.0
120
0
def func(bar): """ \\some comment @param bar: The parameter value.
@type bar: Its type.""
" pass
kevin-coder/tensorflow-fork
tensorflow/python/autograph/operators/control_flow.py
Python
apache-2.0
14,926
0.007571
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Control flow statements: loops, conditionals, etc.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.autograph.operators import py_builtins from tensorflow.python.autograph.operators import special_values from tensorflow.python.autograph.pyct import errors from tensorflow.python.autograph.utils import ag_logging from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import func_graph from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_math_ops LIMIT_PYTHON_ITERATIONS = True PYTHON_MAX_ITERATIONS = 100000000 # Fails in about one minute for empty loops. WARN_INEFFICIENT_UNROLL = True INEFFICIENT_UNROLL_MIN_ITERATIONS = 3000 INEFFICIENT_UNROLL_MIN_OPS = 1 def for_stmt(iter_, extra_test, body, init_state): """Functional form of a for statement. The loop operates on a state, which includes all symbols that are variant across loop iterations, excluding the iterate as well as the variables local to the loop. For example, given the loop below that calculates the geometric and arithmetic means or some numbers: geo_mean = 1 arith_mean = 0 for i in range(n): a = numbers[i] geo_mean *= a arith_mean += a The state is represented by the variables geo_mean and arith_mean. The argument for initial_state may contain the tuple (1, 0), the body will include the arguments geo_mean and arith_mean and will return a tuple representing the new values for geo_mean and respectively arith_mean. Args: iter_: The entity being iterated over. extra_test: Callable with the state as arguments, and boolean return type. An additional loop condition. body: Callable with the iterate and the state as arguments, and state as return type. The actual loop body. init_state: Tuple containing the initial state. Returns: Tuple containing the final state. """ if tensor_util.is_tensor(iter_): return _known_len_tf_for_stmt(iter_, extra_test, body, init_state) elif isinstance(iter_, dataset_ops.DatasetV2): # Check for undefined symbols and report an error. This prevents the error # from propagating into the TF runtime. We have more information here and # can provide a clearer error message. undefined = tuple(filter(special_values.is_undefined, init_state)) if undefined: raise ValueError( 'TensorFlow requires that the following symbols must be defined' ' before the loop: {}'.format( tuple(s.symbol_name for s in undefined))) return _dataset_for_stmt(iter_, extra_test, body, init_state) else: return _py_for_stmt(iter_, extra_test, body, init_state) def _py_for_stmt(iter_, extra_test, body, init_state): """Overload of for_stmt that executes a Python for loop.""" state = init_state for target in iter_: if extra_test is not None and not extra_test(*state): break state = body(target, *state) return state def _known_len_tf_for_stmt(iter_, extra_test, body, init_state): """Overload of for_stmt that iterates over objects that admit a length.""" n = py_builtins.len_(iter_) def while_body(iterate_index, *state): iterate = iter_[iterate_index] new_state = body(iterate, *state) state = (iterate_index + 1,) if new_state: state += new_state return state def while_cond(iterate_index, *state): if extra_test is not None: return gen_math_ops.logical_and(iterate_index < n, extra_test(*state)) return iterate_index < n results = _tf_while_stmt( while_cond, while_body, init_state=(0,) + init_state, opts=dict(maximum_iterations=n)) # Dropping the iteration index because it's not syntactically visible. # TODO(mdan): Don't. if isinstance(results, (tuple, list)): assert len(results) >= 1 # Has at least the iterate. if len(results) > 1: results = results[1:] else: results = () return results def _dataset_for_stmt(ds, extra_test, body, init_state): """Overload of for_stmt that iterates over TF Datasets.""" if extra_test is not None: raise NotImplementedError( 'break and return statements are not yet supported in ' 'for/Dataset
loops.') def reduce_body(state, iterate): new_state = body(iterate, *state) return new_state if init_state: return ds.reduce(init_state, reduce_body) # Workaround for Datset.reduce not allowing empty state tensors - create # a dummy state variable that remains unused. def reduce_body_with_dummy_state(state, iterate): reduce_body((), iterate)
return state ds.reduce((constant_op.constant(0),), reduce_body_with_dummy_state) return () def while_stmt(test, body, init_state, opts=None): """Functional form of a while statement. The loop operates on a so-called state, which includes all symbols that are variant across loop iterations. In what follows we refer to state as either a tuple of entities that represent an actual state, or a list of arguments of the corresponding types. Args: test: Callable with the state as arguments, and boolean return type. The loop condition. body: Callable with the state as arguments, and state as return type. The actual loop body. init_state: Tuple containing the initial state. opts: Optional dict of extra loop parameters. Returns: Tuple containing the final state. """ # Evaluate the initial test once in order to do the dispatch. The evaluation # is isolated to minimize unwanted side effects. # TODO(mdan): Do a full iteration - some state types might lower to Tensor. with func_graph.FuncGraph('tmp').as_default(): init_test = test(*init_state) # TensorFlow: Multiple evaluations are acceptable in this case, so we're fine # with the re-evaluation of `test` that `_tf_while_stmt` will make. if tensor_util.is_tensor(init_test): return _tf_while_stmt(test, body, init_state, opts) # Normal Python: We already consumed one evaluation of `test`; consistently, # unroll one iteration before dispatching to a normal loop. # TODO(mdan): Push the "init_test" value via opts into _py_while_stmt? if not init_test: return init_state init_state = body(*init_state) return _py_while_stmt(test, body, init_state, opts) def _tf_while_stmt(test, body, init_state, opts): """Overload of while_stmt that stages a TF while_stmt.""" if opts is None: opts = {} undefined = tuple(filter(special_values.is_undefined, init_state)) if undefined: raise ValueError( 'TensorFlow requires that the following symbols must be initialized ' 'to a Tensor, Variable or TensorArray before the loop: {}'.format( tuple(s.symbol_name for s in undefined))) # Non-v2 while_loop unpacks the results when there is only one return value. # This enforces consistency across versions. opts['return_same_structure'] = True retval = control_flow_ops.while_loop(test, body, init_state, **opts) return retval class _PythonLoopChecker(object): """Verifies Python loops for TF-specific limits.""" def __init__(self): self.iterations = 0 self.check_inefficient_unroll = WARN_INEFFICIENT_UNROLL # Triggered when w
lanacioncom/ddjj_admin_lanacion
admin_ddjj_app/migrations/0001_initial.py
Python
mit
15,375
0.003187
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Bien', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('descripcion', models.CharField(max_length=255, blank=True)), ('direccion', models.CharField(max_length=255, blank=True)), ('barrio', models.CharField(max_length=255, blank=True)), ('localidad', models.CharField(max_length=255, blank=True)), ('provincia', models.CharField(max_length=255, blank=True)), ('pais', models.CharField(max_length=255, blank=True)), ('modelo', models.IntegerField(null=True, blank=True)), ('entidad', models.CharField(max_length=255, blank=True)), ('ramo', models.CharField(max_length=255, blank=True)), ('cant_acciones', models.CharField(max_length=255, blank=True)), ('fecha_desde', models.DateField(null=True, blank=True)), ('destino', models.CharField(max_length=255, blank=True)), ('origen', models.CharField(max_length=255, blank=True)), ('superficie', models.DecimalField(help_text='Superficie de la propiedad', null=True, max_digits=10, decimal_places=2, blank=True)), ('unidad_medida_id', models.IntegerField(blank=True, help_text='Unidad de medida usada para la superficie', null=True, choices=[(0, 'm2'), (1, 'ha')])), ('m_mejoras_id', models.IntegerField(blank=True, null=True, choices=[(0, '$'), (1, 'us$'), (2, 'E'), (3, '$ Uruguayos'), (4, '\xa3'), (5, 'A'), (6, 'A$'), (7, '$L')])), ('mejoras', models.DecimalField(null=True, max_digits=10, decimal_places=2, blank=True)), ('m_valor_fiscal_id', models.IntegerField(blank=True, null=True, choices=[(0, '$'), (1, 'us$'), (2, 'E'), (3, '$ Uruguayos'), (4, '\xa3'), (5, 'A'), (6, 'A$'), (7, '$L')])), ('valor_fiscal', models.DecimalField(null=True, max_digits=10, decimal_places=2, blank=True)), ('m_valor_adq_id', models.IntegerField(blank=True, null=True, choices=[(0, '$'), (1, 'us$'), (2, 'E'), (3, '$ Uruguayos'), (4, '\xa3'), (5, 'A'), (6, 'A$'), (7, '$L')])), ('valor_adq', models.DecimalField(null=True, max_digits=10, decimal_places=2, blank=True)), ('fecha_hasta', models.DateField(null=True, blank=True)), ('titular_dominio', models.CharField(max_length=255, blank=True)), ('porcentaje', models.DecimalField(help_text="<strong>NO</strong> incluir el signo '%'.<br> Si ingresa un n\xfamero decimal use '.' (punto) como delimitador", null=True, max_digits=10, decimal_places=2, blank=True)), ('vinculo', models.CharField(default='Titular', help_text='Indica la relacion con el titular de la DDJJ', max_length=255, blank=True, choices=[('Conviviente', 'Conviviente'), ('C\xf3nyuge', 'C\xf3nyuge'), ('Hijo/a', 'Hijo/a'), ('Titular', 'Titular')])), ('periodo', models.CharField(max_length=255, blank=True)), ('obs', models.CharField(max_length=255, blank=True)), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('tipo_bien_s', models.CharField(max_length=255, blank=True)), ('nombre_bien_s', models.CharField(max_length=255, blank=True)), ], options={ 'ordering': ['tipo_bien', 'nombre_bien'], 'db_table': 'biens', 'verbose_name_plural': 'bienes', }, bases=(models.Model,), ), migrations.CreateModel( name='Cargo', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('jurisdiccion', models.CharField(max_length=255, blank=True)), ('cargo', models.CharField(help_text='Nombre del cargo', max_length=255)), ('poder_id', models.IntegerField(blank=True, null=True, choices=[(0, 'Ejecutivo'), (1, 'Legislativo'), (2, 'Judicial')])), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ], options={ 'ordering': ['cargo'], 'db_table': 'cargos', }, bases=(models.Model,), ), migrations.CreateModel( name='ContenidoDdjjs', fields=[ ('id', models.IntegerField(serialize=False, primary_key=True)), ('ddjj_id', models.IntegerField(null=True, blank=True)), ('ddjj_ano', models.CharField(max_length=255, blank=True)), ('ddjj_tipo', models.CharField(max_length=255, blank=True)), ('poder_id', models.IntegerField(null=True, blank=True)), ('persona_str', models.CharField(max_length=255, blank=True)), ('persona_id', models.IntegerField(null=True, blank=True)), ('cargo_str', models.CharField(max_length=255, blank=True)), ('cargo_id', models.IntegerField(null=True, blank=True)), ('contenido', models.TextField(blank=True)), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ], options={ 'db_table': 'contenido_ddjjs', }, bases=(models.Model,), ), migrations.CreateModel( name='Ddjj', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('ano', models.IntegerField()), ('tipo_ddjj_id', models.IntegerField(choices=[(0, 'alta'), (1, 'baja'), (2, 'inicial'), (3, 'anual')])), ('funcionario', models.CharField(help_text='Este campo lo completa el sistema.', max_length=255, blank=True)), ('url', models.CharField(help_text='Url DocumentCloud', max_length=255, blank=True)), ('key', models.IntegerField(help_text='Este campo lo completa el sistema.', null=True, blank=True)), ('clave', models.CharField(help_text='Este campo lo completa el sistema.', max_length=255, blank=True)), ('flag_presenta', models.IntegerField(default=1, choices=[(0, 'Si'), (1, 'No')], blank=True, help_text="<strong style='color:blue'>'Solo el PDF'</strong> si solo se muestra el pdf, ej: cartas donde declaran que la ddjj es igual a la del a\xf1o anterior", null=True, verbose_name='Carta de DDJJ')), ('obs', models.TextField(blank=True)), ('flag_search', models.CharField(help_text='Este campo lo completa el sistema.', max_length=255, blank=True)), ('visitas', models.DecimalField(null=True, max_digits=10, decimal_places=0, blank=True)), ('status', models.IntegerField(default=0, help_text='Indica si puede ser publicada', choices=[(0, 'Deshabilitado'), (1, 'Habilitado')])), ('poder_id', models.IntegerField(choices=[(0, 'Ejecutivo'), (1, 'Legislativo'), (2, 'Judicial')])), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ], options={ 'ordering': ['persona'], 'db_table': 'ddjjs', 'verbose_name': 'Declaraci\xf3n Jurada', 'verbose_name_plural': 'Declaraciones Juradas', }, bases=(models.Model,), ), migrations.CreateModel( name='Jurisdiccion', fields=[ ('id', models.AutoFi
eld(verbose_name='ID', serialize=Fals
e, auto_created=
p4u/magicOS
files/home/crypto/mp-agent.py
Python
gpl-3.0
2,955
0.045685
import os import json import urllib import socket import subprocess as sub import string import sys MPCONF = "/etc/magicpool.conf" MPURL = "https://magicpool.org/main/download_config/${U}/${W}/${G}" SGCONF = "/home/crypto/.sgminer/sgminer.conf" def niceprint(data): return json.dumps(data,sort_keys=True,indent=4, separators=(',', ': ')).__str__() def getURL(url): try: u = urllib.urlopen(url) data = u.read() except: print("ERROR: cannot fetch url %s" %url) sys.exit(1) return data def saveConf(conf): os.system("cp %s %s" %(SGCONF,SGCONF+".old")) c = open(SGCONF,"w") c.write(niceprint(conf)) c.close() def restart(): os.system("md5sum %s | awk '{print $1}' > /tmp/get-pool.md5.1" % SGCONF) os.system("md5sum %s | awk '{print $1}' > /tmp/get-pool.md5.2" % (SGCONF+".old")) md51 = open
("/tmp/get-pool.md5.1","r") md52 = open("/tmp/get-pool.md5.2","r") if md51.read() == md52.read(): print "No changes in configuration" else: print "Found changes in configuration, restarting sgminer" #os.system('echo "quit|1" | nc 127.0.0.1 4028') os.system('killall -USR1 sgminer') md51.clo
se() md52.close() def getMPconf(): try: mpconf = open(MPCONF,"r") mp = json.loads(mpconf.read()) user = mp['username'] worker = mp['workeralias'] except: user = "generic" worker = "generic" return {"user":user,"worker":worker} def getMPremote(): url = MPURL mpconf = getMPconf() gpu = getGPU() s = string.Template(MPURL) mpurl = s.substitute(U=mpconf["user"],W=mpconf["worker"],G=gpu) print("Requesting URL %s" %mpurl) print(getURL(mpurl)) try: data = json.loads(getURL(mpurl)) except: print("ERROR: Cannot decode the magicpool json response") sys.exit(1) if 'ERROR' in data: print("ERROR: Some error in magicpool web server") sys.exit(1) if 'REBOOT' in data: os.execute("sudo reboot") sys.exit(2) return data def getSGconf(): try: fd_conf = open(SGCONF,"r") data = json.loads(fd_conf.read()) fd_conf.close() except: print("WARNING: cannot read current sgminer config file") data = {} return data def getGPU(): vcards = [] p = sub.Popen('lspci',stdout=sub.PIPE,stderr=sub.PIPE) output, errors = p.communicate() for pci in string.split(output,'\n'): if string.find(pci,'VGA') > 0: try: vcards.append(string.split(pci,':')[2]) except: print("Card not recognized") cards = "" for v in vcards: cards = v.replace(',','').replace('\'','').replace(' ','%20').replace('[','%5B').replace(']','%5D') return cards remoteconf = getMPremote() saveConf(remoteconf) restart() #return json.loads(getURL(MPURL)) #print(niceprint(getSGconf())) #conf["pools"] = remote["pools"] #i=0 ##while i < len(conf["pools"]): # new_u = conf["pools"][i]["user"].replace("USER",USER) # new_p = conf["pools"][i]["pass"].replace("PASS",PASS) # conf["pools"][i]["user"] = new_u # conf["pools"][i]["pass"] = new_p # i=i+1 # #print niceprint(conf) #fd_conf.close() #saveConf() #restart()
lfzyx/ButterSalt
manage.py
Python
mit
505
0
""" RUN RUN RUN ! """ from buttersalt import create_app from flask_script import Manager, Shell app = create_
app('default') manager = Manager(app) def make_shell_context(): return dict(app=app) manager.add_command("shell", Shell(make_context=make_shell_context)) @manager.command def test(): """Run the unit tests.""" import unittest tests = unittest.TestLoader().discover('tests
') unittest.TextTestRunner(verbosity=2).run(tests) if __name__ == "__main__": manager.run()
istio/tools
perf/load/auto-mtls/scale.py
Python
apache-2.0
4,271
0.000937
#!/usr/bin/env python3 # Copyright Istio Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http:#www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limi
tations under the License. import sys import os import random import time import typing import subprocess import argparse import http.server from urllib.parse import urlparse, parse_qs TEST_NAMESPACE = 'automtls' ISTIO_DEPLOY = 'svc-0-back-istio' LEGACY_DEPLOY = 'svc-0-back-legacy' class testHTTPServer_RequestHandler(http.server.BaseHTTPRequestHandler): def do
_GET(self): self.send_response(200) self.send_header('Content-type', 'text/html') self.end_headers() query = parse_qs(urlparse(self.path).query) istio_percent = random.random() if 'istio' in query: istio_percent = float(query['istio'][0]) message = simulate_sidecar_rollout(istio_percent) self.wfile.write(bytes(message, "utf8")) return def get_deployment_replicas(namespace, deployment: str): cmd = 'kubectl get deployment {dep} -n{ns} {jsonpath}'.format( ns=namespace, dep=deployment, jsonpath='''-ojsonpath={.status.replicas}''') p = subprocess.Popen(cmd.split(' '), stdout=subprocess.PIPE) output = p.communicate()[0] if len(output) == 0: return 0 return int(output) def wait_deployment(namespace, deployment: str): cmd = ('kubectl rollout status deployments/{dep} -n{ns}').format( dep=deployment, ns=namespace ) print(cmd) p = subprocess.Popen(cmd.split(' ')) p.wait() def scale_deployment(namespace, deployment: str, replica: int): cmd = 'kubectl scale deployment {dep} -n{ns} --replicas {replica}'.format( dep=deployment, ns=namespace, replica=replica ) print(cmd) p = subprocess.Popen(cmd.split(' ')) p.wait() def simulate_sidecar_rollout(istio_percent: float): ''' Updates deployments with or without Envoy sidecar. wait indicates whether the command wait till all pods become ready. ''' output = 'Namespace {}, sidecar deployment: {}, nosidecar deployment: {}'.format( TEST_NAMESPACE, ISTIO_DEPLOY, LEGACY_DEPLOY) # Wait to be stablized before attempting to scale. wait_deployment(TEST_NAMESPACE, ISTIO_DEPLOY) wait_deployment(TEST_NAMESPACE, LEGACY_DEPLOY) istio_count = get_deployment_replicas(TEST_NAMESPACE, ISTIO_DEPLOY) legacy_count = get_deployment_replicas(TEST_NAMESPACE, LEGACY_DEPLOY) total = istio_count + legacy_count output = 'sidecar replica {}, legacy replica {}\n\n'.format( istio_count, legacy_count) istio_count = int(istio_percent * total) legacy_count = total - istio_count output += ('======================================\n' 'Scale Istio count {sc}, legacy count {nsc}\n\n').format( sc=istio_count, nsc=legacy_count ) scale_deployment(TEST_NAMESPACE, ISTIO_DEPLOY, istio_count) scale_deployment(TEST_NAMESPACE, LEGACY_DEPLOY, legacy_count) print(output) return output def continuous_rollout(): ''' Simulate long running rollout, used for large performance cluster. ''' iteration = 1 while True: print('Start rollout iteration {}'.format(iteration)) message = simulate_sidecar_rollout(random.random()) iteration += 1 time.sleep(660) parser = argparse.ArgumentParser(description='Auto mTLS test runner') parser.add_argument('-m', '--mode', default='ci', type=str, help='mode, http | ci') args = parser.parse_args() if __name__ == '__main__': if args.mode == 'http': print('starting the rollout server simulation...') server_address = ('127.0.0.1', 8000) httpd = http.server.HTTPServer(server_address, testHTTPServer_RequestHandler) httpd.serve_forever() else: continuous_rollout()
mgupta011235/TweetSafe
tfidf/train_xgboost.py
Python
gpl-3.0
7,095
0.00902
from sklearn.feature_extraction.text import TfidfVectorizer import xgboost as xgb import cPickle as pickle from string import punctuation from nltk import word_tokenize from nltk.stem import snowball import numpy as np import pandas as pd import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt plt.style.use('ggplot') from nltk.tokenize import PunktSentenceTokenizer import time stemmer = snowball.SnowballStemmer("english") ############################################################################### #OHS tokenization code def load_data(filename): ''' Load data into a data frame for use in running model ''' return pickle.load(open(filename, 'rb')) def stem_tokens(tokens, stemmer): '''Stem the tokens.''' stemmed = [] for item in tokens: stemmed.append(stemmer.stem(item)) return stemmed def OHStokenize(text): '''Tokenize & stem. Stems automatically for now. Leaving "stemmer" out of function call, so it works with TfidfVectorizer''' tokens = word_tokenize(text) stems = stem_tokens(tokens, stemmer) return stems ########################################################################### # tokenization code def seperatePunct(incomingString): ''' Input:str, Output: str with all puncuations seperated by spaces ''' outstr = '' characters = set(['!','@','#','$',"%","^","&","*",":","\\", "(",")","+","=","?","\'","\"",";","/", "{","}","[","]","<",">","~","`","|"]) for char in incomingString: if char in characters: outstr = outstr + ' ' + char + ' ' else: outstr = outstr + char return outstr def hasNumbers(inputString): ''' Input: str Output: returns a 1 if the string contains a number ''' return any(char.isdigit() for char in inputString) def text_cleaner(wordList): ''' INPUT: List of words to be tokenized OUTPUT: List of tokenized words ''' tokenziedList = [] for word in wordList: #remove these substrings from the word word = word.replace('[deleted]','') word = word.replace('&gt','') #if link, replace with linktag if 'http' in word: tokenziedList.append('LINK_TAG') continue #if reference to subreddit, replace with reddittag if '/r/' in word: tokenziedList.append('SUBREDDIT_TAG') continue #if reference to reddit user, replace with usertag if '/u/' in word: tokenziedList.append('USER_TAG') continue #if reference to twitter user, replace with usertag if '@' in word: tokenziedList.append('USER_TAG') continue #if number, replace with numtag #m8 is a word, 5'10" and 54-59, 56:48 are numbers if hasNumbers(word) and not any(char.isalpha() for char in word): tokenziedList.append('NUM_TAG') continue #seperate puncuations and add to tokenizedList newwords = seperatePunct(word).split(" ") tokenziedList.extend(newwords) return tokenziedList def mytokenize(comment): ''' Input: takes in a reddit comment as a str or unicode and tokenizes it Output: a tokenized list ''' tokenizer = PunktSentenceTokenizer() sentenceList = tokenizer.tokenize(comment) wordList = [] for sentence in sentenceList: wordList.extend(sentence.split(" ")) return text_cleaner(wordList) ############################################################################## #main def main(): print "entering main..." path = 'labeledRedditComments2.p' cvpath = 'twitter_cross_val.csv' load_tstart = time.time() print 'loading data...' df = load_data(path) dfcv = pd.read_csv(cvpath) load_tstop = time.time() #take a subset of the data for testing this code # randNums = np.random.randint(low=0,high=len(df.index),size=(200,1)) # rowList = [int(row) for row in randNums] # dfsmall = df.ix[rowList,:] nf = df #create training set and labels X = nf.body y = nf.label Xcv = dfcv['tweet_text'].values ycv = dfcv['label'].values vect_tstart = time.time()dfscore.plot.barh? print "creating vectorizer..." vect = TfidfVectorizer(stop_words='english', decode_error='ignore', tokenizer=OHStokenize) print "vectorizing..." # fit & transform comments matrix tfidf_X = vect.fit_transform(X) print "pickling vectorizer..." pickle.dump(vect, open('vect.p', 'wb')) tfidf_Xcv = vect.transform(Xcv) vect_tstop = time.time() print "converting data..." #convert to dense so that DMatrix doesn't drop cols with all zeros tfidf_Xcvd = tfidf_Xcv.todense() #data conversion to DMatrix xg_train = xgb.DMatrix(tfidf_X, label=y) xg_cv = xgb.DMatrix(tfidf_Xcvd, label=ycv) # print "loading vectorizer..." # vect = pickle.load(open('vect.p', 'rb')) # # cvpath = 'twitter_cr
oss_val.csv' # dfcv = pd.read_csv(cvpath) # Xcv = dfcv['tweet_text'].values # ycv = dfcv['label'].values # # print "transforming cross val data..." # tfidf_Xcv = vect.transform(Xcv) # tfidf_Xcvd = tfidf_Xcv.todense() # # xg_cv = xgb.DMatrix(tfidf_Xcvd, label=ycv) # print "loading training data..." # xg_train = xgb.DMatrix('xg_tr
ain2.buffer') # xg_cv = xgb.DMatrix('xg_cv2.buffer') train_tstart = time.time() print 'training...' #parameters param = {'max_depth':4, 'eta':0.3, 'silent':1, 'objective':'binary:logistic', 'eval_metric':'auc' } #number of boosted rounds num_round = 163 # what to apply the eval metric to # what the eval metric on these as you train to obj watchlist = [(xg_train, 'train'), (xg_cv, 'eval')] #dict with the results of the model on the eval_metric results = dict() #train model model = xgb.train(param, xg_train, num_round, watchlist, evals_result=results, #store eval results in results dic verbose_eval=True) #dont print output to screen train_tstop = time.time() print "saving model..." model.save_model('xgbfinal4.model') # # dump model # model.dump_model('dump2.raw.txt') # # # dump model with feature map # model.dump_model('dump2.nice.txt') # save dmatrix into binary buffer xg_train.save_binary('xg_train4.buffer') # xg_cv.save_binary('xg_cv2.buffer') # print "load data: {}".format(load_tstop - load_tstart) # print "tfidf: {}".format(vect_tstop - vect_tstart) # print "train: {}".format(train_tstop - train_tstart) # To load saved model: # model = xgb.Booster(model_file='../../xgb_models/xgb.model') if __name__ == '__main__': '''This script trains a TFIDF model using xgboost on the reddit corpus''' main()
SKIRT/PTS
core/prep/sphconvert.py
Python
agpl-3.0
6,914
0.019818
#!/usr/bin/env python # -*- coding: utf8 -*- # ***************************************************************** # ** PTS -- Python Toolkit for working with SKIRT ** # ** © Astronomical Observatory, Ghent University ** # ***************************************************************** ## \package pts.core.prep.sphconvert Converting SPH output data to SKIRT input format. # # The functions in this module allow converting SPH data files in text column # format to the SKIRT input format. Currently supported are: # - EAGLE old column text format (compatible with SKIRT5) # - AWAT column text format # - DOLAG column text format # # There is a separate function for star and gas particles, for each format. # The arguments for each function are: # - infile: the name of the input file in foreign format # - outfile: the name of the output file in SKIRT6 format (file is overwritten) # ----------------------------------------------------------------- # Import standard modules import math as math import numpy as np # ----------------------------------------------------------------- # EAGLE column text format # ----------------------------------------------------------------- ## EAGLE star particles: # - incoming: x(kpc) y(kpc) z(kpc) t(yr) h(kpc) Z(0-1) M(Msun) # - outgoing: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) t(yr) def convert_stars_EAGLE(infile, outfile): x,y,z,t,h,Z,M = np.loadtxt(infile, unpack=True) fid = open(outfile, 'w') fid.write('# SPH Star Particles\n') fid.write('# Converted from EAGLE SKIRT5 output format into SKIRT6 format\n') fid.write('# Columns contain: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) t(yr)\n') np.savetxt(fid, np.transpose((x*1e3,y*1e3,z*1e3,h*1e3,M,Z,t)), fmt="%1.9g") fid.close() ## EAGLE gas particles: # - incoming: x(kpc) y(kpc) z(kpc) SFR(?) h(kpc) Z(0-1) M(Msun) # - outgoing: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) def convert_gas_EAGLE(infile, outfile): x,y,z,SFR,h,Z,M = np.loadtxt(infile, unpack=True) fid = open(outfile, 'w') fid.write('# SPH Gas Particles\n') fid.write('# Converted from EAGLE SKIRT5 output format into SKIRT6 format\n') fid.write('# Columns contain: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1)\n') np.savetxt(fid, np.transpose((x*1e3,y*1e3,z*1e3,h*1e3,M,Z)), fmt="%1.9g") fid.close() # ----------------------------------------------------------------- # AWAT column text format # ----------------------------------------------------------------- ## AWAT star particles: # - incoming: x y z vx vy vz M ms0 mzHe mzC mzN mzO mzNe mzMg mzSi mzFe mzZ Z ts id flagfd rho h ... # - units: x,y,z,h (100kpc); M (1e12 Msun); ts(0.471Gyr) with t = (1Gyr-ts) # - outgoing: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) t(yr) def convert_stars_AWAT(infile, outfile): x,y,z,M,Z,ts,h = np.loadtxt(infile, usecols=(0,1,2,6,17,18,22), unpack=True) fid = open(outfile, 'w') fid.write('# SPH Star Particles\n') fid.write('# Converted from AWAT output format into SKIRT6 format\n') fid.write('# Columns contain: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) t(yr)\n') np.savetxt(fid, np.transpose((x*1e5,y*1e5,z*1e5,h*1e5,M*1e12,Z,1e9-ts*0.471e9)), fmt="%1.9g") fid.close() ## AWAT gas particles: # - incoming: x y z vx vy vz M rho u mzHe mzC mzN mzO mzNe mzMg mzSi mzFe mzZ id flagfd h myu nhp Temp ... # - units: x,y,z,h (100kpc); M (1e12 Msun); mzZ (Msun) so that Z=mzZ/(M*1e12) # - outgoing: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) def convert_gas_AWAT(infile, outfile): x,y,z,M,mzZ,h = np.loadtxt(infile, usecols=(0,1,2,6,17,20), unpack=True) fid = open(outfile, 'w') fid.write('# SPH Gas Particles\n') fid.write('# Converted from AWAT output format into SKIRT6 format\n') fid.write('# Columns contain: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1)\n') np.savetxt(fid, np.transpose((x*1e5,y*1e5,z*1e5,h*1e5,M*1e12,mzZ/(M*1e12))), fmt="%1.9g") fid.close() # ----------------------------------------------------------------- # DOLAG column text format # ----------------------------------------------------------------- # return the age of a star (in yr) given the universe expansion factor when the star was born (in range 0-1) @np.vectorize def age(R): H0 = 2.3e-18 OmegaM0 = 0.27 yr = 365.25 * 24 * 3600 T0 = 13.7e9 return T0 - (2./3./H0/np.sqrt(1-OmegaM0)) * np.arcsinh(np.sqrt( (1/OmegaM0-1)*R**3 )) / yr # return the radius of a particle (in kpc) given its mass (in Msun) and density (in Msun/kpc3) @np.vectorize def radius(M,rho): return (M/rho*3/4/math.pi*64)**(1./3.) ## DOLAG star particles: # - incoming: id x y z vx vy vz M R # - units: x,y,z (kpc); M (Msun); R (0-1); assume Z=0.02 & h=1kpc; calculate t(R) # - outgoing: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) t(yr) def convert_stars_DOLAG(infile, outfile): x,y,z,M,R = np.loadtxt(infile, usecols=(1,2,3,7,8), unpack=True) fid = open(outfile, 'w') fid.write('# SPH Star Particles\n') fid.write('# Converted from DOLAG output format into SKIRT6 format\n') fid.write('# Columns contain: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) t(yr)\n') np.savetxt(fid, np.transpose((x*1e3,y*1e3,z*1e3,np.ones_like(x)*1e3,M,np.ones_like(x)*0.02,age(R))), fmt="%1.9g") fid.close() ## DOLAG gas particles: # - incoming: id x y z vx vy vz M rho T cf u sfr # - units: x,y,z (kpc); M (Msun); assume Z=0.02; calculate h(M,rho) # - outgoing: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1) def convert_gas_DOLAG(infile, outfile): x,y,z,M,rho = np.loadtxt(infile, usecols=(1,2,3,7,8), unpack=True) fid = open(outfile, 'w') fid.write('# SPH Gas Particles\n') fid.write('# Converted from DOLAG output format into SKIRT6 format\n') fid.write('# Columns contain: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1)\n') np.savetxt(fid, np.transpose((x*1e3,y*1e3,z*1e3,radius(M,rho)*1e3,M,np.ones_like(x)*0.02)), fmt="%1.9g") fid.close() # ----------------------------------------------------------------- # ULB column text format # ----------------------------------------------------------------- ## ULB gas particles: # - incoming: x y z M h rho vx vy vz ... # - units: x,y,z,h (100AU); M (Msun) # - outgoing: x(pc) y(pc) z(pc) h
(pc) M(Msun) Z(0-1) def convert_gas_ULB(infile, outfile): PARSEC = 3.08568e16 # 1 parsec (in m) AU = 1.496e11 # 1 AU (in m) CONV = (100. * AU) / PARSEC x,y,z,M,h = np.loadtxt(infil
e, usecols=(0,1,2,3,4), unpack=True) fid = open(outfile, 'w') fid.write('# SPH Gas Particles\n') fid.write('# Converted from ULB output format into SKIRT6 format\n') fid.write('# Columns contain: x(pc) y(pc) z(pc) h(pc) M(Msun) Z(0-1)\n') np.savetxt(fid, np.transpose((x*CONV,y*CONV,z*CONV,5*h*CONV,M,np.zeros_like(M)+0.02)), fmt="%1.9g") # inflated h! fid.close() # -----------------------------------------------------------------
cydenix/OpenGLCffi
OpenGLCffi/GLES1/EXT/EXT/discard_framebuffer.py
Python
mit
181
0.016575
from OpenGLCffi.GLES1 import par
ams @params(api='gles1', prms=['target', 'numAttachments', 'attachments']) def glDiscardFramebufferEXT(target, numAttachments, attachments): pass
homeworkprod/byceps
tests/integration/blueprints/admin/jobs/test_views.py
Python
bsd-3-clause
693
0
""" :Copyright: 2006-2021 Jochen Kupperschmidt :License: Revised BSD (see `LICENSE` file for details) """ import pytest from tests.helpers import login_user def test_view_for_brand(jobs_admin_client): url = '/admin/jobs/' response = jobs_admin_client.get(url) assert response.status_code == 200 @pytest.fixture(scope='pa
ckage') def jobs_admin(make_admin): permission_ids = { 'admin.access', 'jobs.view', } admin = make_admin('JobsAdmin', permission_ids) login_user(admin.id) return admin @pytest.fixture(scope='package') def jobs_admin_client(make_client, admin_app, jobs_admin): return make_client(admin_app, user_id=jobs_admin.
id)
eunchong/build
scripts/slave/recipe_modules/cronet/__init__.py
Python
bsd-3-clause
187
0
DEPS =
[ 'chromium', 'chromium_android', 'gsutil', 'recipe_engine/json', 'recipe_engine/path', 'rec
ipe_engine/properties', 'recipe_engine/python', 'recipe_engine/step', ]
cwant/tessagon
tests/core/test_tile.py
Python
apache-2.0
2,833
0
from core_tests_base import CoreTestsBase, FakeTessagon, FakeTileSubClass class TestTile(CoreTestsBase): # Note: these tests are highly dependent on the behavior of # FakeTessagon and FakeAdaptor def test_add_vert(self): tessagon = FakeTessagon() tile = FakeTileSubClass(tessagon, u_range=[0.5, 1.0], v_range=[2.5, 3.0]) tile.add_vert(['top', 'left'], 0.25, 0.75) assert tile.blend(0.25, 0.75) == [0.625, 2.875] # One vert added assert tile.verts['top']['left'] == tile.f(0.625, 2.875) assert tile.verts['top']['right'] is None assert tile.verts['bottom']['left'] is None assert tile.verts['bottom']['right'] is None def test_add_vert_u_symmetric(self): tessagon = FakeTessagon() tile = FakeTileSubClass(tessagon, u_range=[0.5, 1.0], v_range=[2.5, 3.0], u_symmetric=True)
tile.add_vert(['top', 'left'], 0.25, 0.75) # [0.75, 0.75] is reflection of [0.25, 0.75] in U direction assert tile.blend(0.75, 0.75) == [0.875, 2.875] # Two verts added assert tile.verts['top']['left'] == tile.f(0.625, 2.875) assert tile.verts['top']['right'] == tile.f(0.875, 2.875) assert tile.verts['bottom']['left'] is None assert tile.verts['bottom']
['right'] is None def test_add_vert_v_symmetric(self): tessagon = FakeTessagon() tile = FakeTileSubClass(tessagon, u_range=[0.5, 1.0], v_range=[2.5, 3.0], v_symmetric=True) tile.add_vert(['top', 'left'], 0.25, 0.75) # [0.25, 0.25] is reflection of [0.25, 0.75] in V direction assert tile.blend(0.25, 0.25) == [0.625, 2.625] # Two verts added assert tile.verts['top']['left'] == tile.f(0.625, 2.875) assert tile.verts['top']['right'] is None assert tile.verts['bottom']['left'] == tile.f(0.625, 2.625) assert tile.verts['bottom']['right'] is None def test_add_vert_u_v_symmetric(self): tessagon = FakeTessagon() tile = FakeTileSubClass(tessagon, u_range=[0.5, 1.0], v_range=[2.5, 3.0], u_symmetric=True, v_symmetric=True) tile.add_vert(['top', 'left'], 0.25, 0.75) # [0.75, 0.25] is reflection of [0.25, 0.75] in U and V directions assert tile.blend(0.75, 0.25) == [0.875, 2.625] # Four verts added assert tile.verts['top']['left'] == tile.f(0.625, 2.875) assert tile.verts['top']['right'] == tile.f(0.875, 2.875) assert tile.verts['bottom']['left'] == tile.f(0.625, 2.625) assert tile.verts['bottom']['right'] == tile.f(0.875, 2.625)
nCoda/lychee
lychee/__init__.py
Python
gpl-3.0
1,444
0.00277
#!/usr/bin/env python3 # -*- coding: utf-8 -*- #-------------------------------------------------------------------------------------------------- # Program Name: Lychee # Program Description: MEI document manager for formalized document control # # Filename: lychee/__init__.py # Purpose: Initialize Lychee. # # Copyright (C) 2016, 2017 Christopher Antila # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see <http://www.gnu.org/licenses/>. #-------------------------------------------------------------------------------------------------- """ Initialize Lychee. """ __all__ = [
'converters', 'document', 'exceptions', 'logs', 'namespaces', 'signals', 'tui', 'workflow', 'vcs', 'views', ] from lychee import * from ._version import get_versions __version__ = get_ver
sions()['version'] del get_versions
aurynn/openstack-artifice
tests/test_models.py
Python
apache-2.0
6,823
0.003957
import unittest from artifice.models import usage, tenants, resources, Session from sqlalchemy import create_engine from sqlalchemy.exc import IntegrityError, InvalidRequestError from sqlalchemy.orm.exc import FlushError import os from artifice.models.usage import Usage from artifice.models.tenants import Tenant from artifice.models.resources import Resource
from datetime import datetime, timedelta TENANT_ID = "test tenant" RESOURCE_ID = "test resource" RESOURCE_ID_TWO = "A DIFFERENT RESOURCE" USAGE_ID = 12345 class SessionBase(unittest.TestCase): def setUp(self): engine = create_engine(os.environ["DATABASE_URL"]) Session.configure(bind=engine) self.session = Session() self.objects = [] self.session.rollback() d
ef tearDown(self): self.session.rollback() for t in self.objects: try: self.session.delete(t) except InvalidRequestError: # This is fine pass self.session.commit() self.session = None class TestTenant(SessionBase): def test_create_tenant(self): t = tenants.Tenant() self.objects.append(t) t.id = TENANT_ID self.session.add(t) self.session.flush() self.session.commit() t2 = self.session.query(tenants.Tenant)\ .filter(tenants.Tenant.id == TENANT_ID)[0] self.assertTrue( t2 is not None ) self.assertEqual( t2.id, TENANT_ID ) def test_create_identical_tenant_fails(self): # First pass self.test_create_tenant() try: self.test_create_tenant() except (IntegrityError, FlushError) as e: self.assertTrue ( True ) except Exception as e: # self.fail ( e.__class__ ) self.fail ( e ) class TestResource(SessionBase): def test_create_resource(self): r = resources.Resource() t = tenants.Tenant() t.id = TENANT_ID r.tenant = t r.id = RESOURCE_ID self.session.add(r) self.session.add(t) self.objects.extend((r,t)) self.session.flush() self.session.commit() r2 = self.session.query(resources.Resource)\ .filter(resources.Resource.id == RESOURCE_ID)[0] self.assertEqual(r2.id, r.id) self.assertEqual( r2.tenant.id, t.id ) def test_create_resource_with_bad_tenant_fails(self): r = resources.Resource() t = tenants.Tenant() r.tenant = t self.objects.extend((r,t)) self.session.add(r) self.session.add(t) try: self.session.commit() except IntegrityError: self.assertTrue(True) except Exception as e: self.fail(e) def test_create_resource_without_tenant_fails(self): r = resources.Resource() r.id = RESOURCE_ID self.session.add(r) self.objects.append(r) try: self.session.commit() except IntegrityError: self.assertTrue(True) except Exception as e: self.fail(e) class TestUsage(SessionBase): """Tests various states of the Usage objects.""" # def setUp(self): # super(TestUsage, self).setUp() # self.resource # def tearDown(self): # pass def test_save_usage_to_database(self): r = Resource() r.id = RESOURCE_ID t = Tenant() t.id = TENANT_ID r.tenant = t self.objects.extend((r, t)) start = datetime.now() - timedelta(days=30) end = datetime.now() u = Usage(r, t, 1, start, end ) u.id = USAGE_ID self.objects.append(u) self.session.add(u) self.session.add(r) self.session.add(t) self.session.commit() u2 = self.session.query(Usage)[0] self.assertTrue( u2.resource.id == r.id ) self.assertTrue( u2.tenant.tenant.id == t.id ) self.assertTrue( u2.created == u.created ) print u2.time def test_overlap_throws_exception(self): self.test_save_usage_to_database() r = self.session.query(Resource).filter(Resource.id == RESOURCE_ID)[0] t = self.session.query(Tenant).filter(Tenant.id == TENANT_ID)[0] start = datetime.now() - timedelta(days=15) end = datetime.now() u2 = Usage(r, t, 2, start, end) self.session.add(u2) try: self.session.commit() except IntegrityError: self.assertTrue(True) except Exception as e: self.fail(e) def test_overlap_with_different_resource_succeeds(self): self.test_save_usage_to_database() t = self.session.query(Tenant).filter(Tenant.id == TENANT_ID)[0] r = Resource() r.id = RESOURCE_ID_TWO r.tenant = t start = datetime.now() - timedelta(days=30) end = datetime.now() u = Usage(r, t, 2, start, end) self.objects.extend((r, u)) self.session.add(u) self.session.add(r) try: self.session.commit() except IntegrityError as e: self.fail("Integrity violation: %s" % e) except Exception as e: self.fail("Major exception: %s" % e) def test_non_overlap_succeeds(self): self.test_save_usage_to_database() r = self.session.query(Resource).filter(Resource.id == RESOURCE_ID)[0] t = self.session.query(Tenant).filter(Tenant.id == TENANT_ID)[0] start = datetime.now() end = datetime.now() + timedelta(days=30) u = Usage(r, t, 1, start, end) self.session.add(u) try: self.session.commit() self.objects.append(u) except IntegrityError as e: self.fail("Integrity violation: %s" % e) except Exception as e: self.fail("Fail: %s" % e) def test_tenant_does_not_exist_fails(self): pass def test_resource_does_not_exist_fails(self): pass def test_resource_belongs_to_different_tenant_fails(self): self.test_save_usage_to_database() t = Tenant() t.id = "TENANT TWO" r = self.session.query(Resource).filter(Resource.id == RESOURCE_ID)[0] start = datetime.now() end = datetime.now() + timedelta(days=30) self.session.add(t) self.objects.append(t) try: u = Usage(r, t, 1, start, end) self.session.commit() self.objects.append(u) self.fail("Should not have saved!") except (IntegrityError, AssertionError) as e : self.assertTrue(True) # Pass except Exception as e: self.fail(e.__class__)
EmadMokhtar/Django
tests/model_fields/models.py
Python
mit
12,373
0.000162
import os import tempfile import uuid from django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation, ) from django.contrib.contenttypes.models import ContentType from django.core.files.storage import FileSystemStorage from django.db import models from django.db.models.fields.files import ImageField, ImageFieldFile from django.db.models.fields.related import ( ForeignKey, ForeignObject, ManyToManyField, OneToOneField, ) from django.utils.translation import gettext_lazy as _ try: from PIL import Image except ImportError: Image = None class Foo(models.Model): a = models.CharField(max_length=10) d = models.DecimalField(max_digits=5, decimal_places=3) def get_foo(): return Foo.objects.get(id=1).pk class Bar(models.Model): b = models.CharField(max_length=10) a = models.ForeignKey(Foo, models.CASCADE, default=get_foo, related_name='bars') class Whiz(models.Model): CHOICES = ( ('Group 1', ( (1, 'First'), (2, 'Second'), ) ), ('Group 2', ( (3, 'Third'), (4, 'Fourth'), ) ), (0, 'Other'), (5, _('translated')), ) c = models.IntegerField(choices=CHOICES, null=True) class WhizDelayed(models.Model): c = models.IntegerField(choices=(), null=True) # Contrived way of adding choices later. WhizDelayed._meta.get_field('c').choices = Whiz.CHOICES class WhizIter(models.Model): c = models.IntegerField(choices=iter(Whiz.CHOICES), null=True) class WhizIterEmpty(models.Model): c = models.CharField(choices=iter(()), blank=True, max_length=1) class Choiceful(models.Model): no_choices = models.IntegerField(null=True) empty_choices = models.IntegerField(choices=(), null=True) with_choices = models.IntegerField(choices=[(1, 'A')], null=True) empty_choices_bool = models.BooleanField(choices=()) empty_choices_text = models.TextField(choices=()) class BigD(models.Model): d = models.DecimalField(max_digits=32, decimal_places=30) class FloatModel(models.Model): size = models.FloatField() class BigS(models.Model): s = models.SlugField(max_length=255) class UnicodeSlugField(models.Model): s = models.SlugField(max_length=255, allow_unicode=True) class SmallIntegerModel(models.Model): value = models.SmallIntegerField() class IntegerModel(models.Model): value = models.IntegerField() class BigIntegerModel(models.Model): value = models.BigIntegerField() null_value = models.BigIntegerField(null=True, blank=True) class PositiveSmallIntegerModel(models.Model): value = models.PositiveSmallIntegerField() class PositiveIntegerModel(models.Model): value = models.PositiveIntegerField() class Post(models.Model): title = models.CharField(max_length=100) body = models.TextField() class NullBooleanModel(models.Model): nbfield = models.BooleanField(null=True, blank=True) nbfield_old = models.NullBooleanField() class BooleanModel(models.Model): bfield = models.BooleanField() string = models.CharField(max_length=10, default='abc') class DateTimeModel(models.Model): d = models.DateField() dt = models.DateTimeField() t = models.TimeField() cl
ass DurationModel(models.Model): field = models.DurationField() class NullDurationModel(models.Model): field = models.DurationField(null=True) class PrimaryKeyCharModel(models.Model): string = models.CharField(max_length=10, primary_key=True) class FksToBoo
leans(models.Model): """Model with FKs to models with {Null,}BooleanField's, #15040""" bf = models.ForeignKey(BooleanModel, models.CASCADE) nbf = models.ForeignKey(NullBooleanModel, models.CASCADE) class FkToChar(models.Model): """Model with FK to a model with a CharField primary key, #19299""" out = models.ForeignKey(PrimaryKeyCharModel, models.CASCADE) class RenamedField(models.Model): modelname = models.IntegerField(name="fieldname", choices=((1, 'One'),)) class VerboseNameField(models.Model): id = models.AutoField("verbose pk", primary_key=True) field1 = models.BigIntegerField("verbose field1") field2 = models.BooleanField("verbose field2", default=False) field3 = models.CharField("verbose field3", max_length=10) field4 = models.DateField("verbose field4") field5 = models.DateTimeField("verbose field5") field6 = models.DecimalField("verbose field6", max_digits=6, decimal_places=1) field7 = models.EmailField("verbose field7") field8 = models.FileField("verbose field8", upload_to="unused") field9 = models.FilePathField("verbose field9") field10 = models.FloatField("verbose field10") # Don't want to depend on Pillow in this test # field_image = models.ImageField("verbose field") field11 = models.IntegerField("verbose field11") field12 = models.GenericIPAddressField("verbose field12", protocol="ipv4") field13 = models.NullBooleanField("verbose field13") field14 = models.PositiveIntegerField("verbose field14") field15 = models.PositiveSmallIntegerField("verbose field15") field16 = models.SlugField("verbose field16") field17 = models.SmallIntegerField("verbose field17") field18 = models.TextField("verbose field18") field19 = models.TimeField("verbose field19") field20 = models.URLField("verbose field20") field21 = models.UUIDField("verbose field21") field22 = models.DurationField("verbose field22") class GenericIPAddress(models.Model): ip = models.GenericIPAddressField(null=True, protocol='ipv4') ############################################################################### # These models aren't used in any test, just here to ensure they validate # successfully. # See ticket #16570. class DecimalLessThanOne(models.Model): d = models.DecimalField(max_digits=3, decimal_places=3) # See ticket #18389. class FieldClassAttributeModel(models.Model): field_class = models.CharField ############################################################################### class DataModel(models.Model): short_data = models.BinaryField(max_length=10, default=b'\x08') data = models.BinaryField() ############################################################################### # FileField class Document(models.Model): myfile = models.FileField(upload_to='unused', unique=True) ############################################################################### # ImageField # If Pillow available, do these tests. if Image: class TestImageFieldFile(ImageFieldFile): """ Custom Field File class that records whether or not the underlying file was opened. """ def __init__(self, *args, **kwargs): self.was_opened = False super().__init__(*args, **kwargs) def open(self): self.was_opened = True super().open() class TestImageField(ImageField): attr_class = TestImageFieldFile # Set up a temp directory for file storage. temp_storage_dir = tempfile.mkdtemp() temp_storage = FileSystemStorage(temp_storage_dir) temp_upload_to_dir = os.path.join(temp_storage.location, 'tests') class Person(models.Model): """ Model that defines an ImageField with no dimension fields. """ name = models.CharField(max_length=50) mugshot = TestImageField(storage=temp_storage, upload_to='tests') class AbstractPersonWithHeight(models.Model): """ Abstract model that defines an ImageField with only one dimension field to make sure the dimension update is correctly run on concrete subclass instance post-initialization. """ mugshot = TestImageField(storage=temp_storage, upload_to='tests', height_field='mugshot_height') mugshot_height = models.PositiveSmallIntegerField() class Meta: abstract = True class PersonWithHeight(AbstractPersonWithHeight): """ Concrete model that subclass an abstract one with only on dimension field. """ n
Marketing1by1/petl
petl/io/numpy.py
Python
mit
5,251
0.00019
# -*- coding: utf-8 -*- from __future__ import division, print_function, absolute_import from petl.compat import next, string_types from petl.util.base import iterpeek, ValuesView, Table from petl.util.materialise import columns def infer_dtype(table): import numpy as np # get numpy to infer dtype it = iter(table) hdr = next(it) flds = list(map(str, hdr)) rows = tuple(it) dtype = np.rec.array(rows).dtype dtype.names = flds return dtype def construct_dtype(flds, peek, dtype): import numpy as np if dtype is None: dtype = infer_dtype(peek) elif isinstance(dtype, string_types): # insert field names from source table typestrings = [s.strip() for s in dtype.split(',')] dtype = [(f, t) for f, t in zip(flds, typestrings)] elif (isinstance(dtype, dict) and ('names' not in dtype or 'formats' not in dtype)): # allow for partial specification of dtype cols = columns(peek) newdtype = {'names': [], 'formats': []} for f in flds: newdtype['names'].append(f) if f in dtype and isinstance(dtype[f], tuple): # assume fully specified newdtype['formats'].append(dtype[f][0]) elif f not in dtype: # not specified at all a = np.array(cols[f]) newdtype['formats'].append(a.dtype) else: # assume directly specified, just need to add offset newdtype['formats'].append(dtype[f]) dtype = newdtype return dtype def toarray(table, dtype=None, count=-1, sample=1000): """ Load data from the given `table` into a `numpy <http://www.numpy.org/>`_ structured array. E.g.:: >>> import petl as etl >>> table = [('foo', 'bar', 'baz'), ... ('apples', 1, 2.5), ... ('oranges', 3, 4.4), ... ('pears', 7, .1)] >>> a = etl.toarray(table) >>> a array([('apples', 1, 2.5), ('oranges', 3, 4.4), ('pears', 7, 0.1)], dtype=(numpy.record, [('foo', '<U7'), ('bar', '<i8'), ('baz', '<f8')])) >>> # the dtype can be specified as a string ... a = etl.toarray(table, dtype='a4, i2, f4') >>> a array([(b'appl', 1, 2.5), (b'oran', 3, 4.4), (b'pear', 7, 0.1)], dtype=[('foo', 'S4'), ('bar', '<i2'), ('baz', '<f4')]) >>> # the dtype can also be partially specified ... a = etl.toarray(table, dtype={'foo': 'a4'}) >>> a array([(b'appl', 1, 2.5), (b'oran', 3, 4.4), (b'pear', 7, 0.1)], dtype=[('foo', 'S4'), ('bar', '<i8'), ('baz', '<f8')]) If the dtype is not completely specified, `sample` rows will be examined to infer an appropriate dtype. """ import numpy as np it = iter(table) peek, it = iterpeek(it, sample) hdr = next(it) flds = list(map(str, hdr)) dtype = construct_dtype(flds, peek, dtype) # numpy is fussy about having tuples, need to make sure it = (tuple(row) for row in it) sa = np.fromiter(it, dtype=dtype, count=count) return sa Table.toarray = toarray def torecarray(*args, **kwargs): """ Convenient shorthand for ``toarray(*args, **kwargs).view(np.recarray)``. """ import numpy as np return toarray(*args, **kwargs).view(np.recarray) Table.torecarray = torecarray def fromarray(a): """ Extract a table from a `numpy <http://www.numpy.org/>`_ structured array, e.g.:: >>> import petl as etl >>> import numpy as np >>> a = np.array([('apples', 1, 2.5), ... ('oranges', 3, 4.4), ... ('pears', 7, 0.1)], ... dtype='U8, i4,f4') >>> table = etl.fromarray(a) >>> table +-----------+----+-----+ | f0 | f1 | f2 | +===========+====+=====+ | 'apples' | 1 | 2.5 | +-----------+----+-----+ | 'oranges' | 3 | 4.4 | +-----------+----+-----+ | 'pears' | 7 | 0.1 | +-----------+----+-----+ """ return ArrayView(a) class ArrayView(Table): def __init__(self, a): self.a = a def __iter__(self): yield tuple(self.a.dtype.names) for row in self.a: yield tuple(row) def valuestoarray(vals, dtype=None, count=-1, sample=1000): """ Load values from a table column into a `numpy <http://www.numpy.org/>`_ array, e.g.:: >>> import petl as etl >>> ta
ble = [('foo', 'bar', 'baz'), ... ('apples', 1, 2.5), ... ('oranges', 3, 4.4), ... ('pears', 7, .1)] >>> table = etl.wrap(table) >>> table.values('bar').array() array([1, 3, 7]) >>> # specify dtype
... table.values('bar').array(dtype='i4') array([1, 3, 7], dtype=int32) """ import numpy as np it = iter(vals) if dtype is None: peek, it = iterpeek(it, sample) dtype = np.array(peek).dtype a = np.fromiter(it, dtype=dtype, count=count) return a ValuesView.toarray = valuestoarray ValuesView.array = valuestoarray
GehirnInc/py3oauth2
py3oauth2/tests/test_utils.py
Python
mit
1,798
0
# -*- coding: utf-8 -*- from nose.tools import ( eq_, raises, ) from py3oauth2.utils import ( normalize_netloc, normalize_path, normalize_query, normalize_url, ) def test_normalize_url(): eq_(normalize_url('http://a/b/c/%7Bfoo%7D'), normalize_url('hTTP://a/./b/../b/%63/%7bfoo%7d')) @raises(ValueError) def test_normalize_url_unknown_scheme(): normalize_url('example://example.com/') @raises(ValueError) def test_normalize_url_fragment(): normalize_url('http://example.com/#foo') @raises(ValueError) def test_normalize_url_invalid_port(): normalize_url('https://example.com:1bb/#foo') def test_normalize_netloc(): eq_(normalize_netloc('eXamPLe.com', 80), 'example.com') e
q_(normalize_netloc('user:pass@example.com', 80), 'user:pass@example.com') eq_(normalize_netloc('user:@example.com', 80), 'user@example.com') eq_(normalize_netloc(':pass@example.com', 80), ':pass@example.com') eq_(normalize_netloc('example.com:443', 80), 'example.com:443') eq_(normalize_netloc('example.co
m:80', 80), 'example.com') eq_(normalize_netloc('example.com:', 80), 'example.com') def test_normalize_query(): eq_(normalize_query(''), '') eq_(normalize_query('b=c&a=b'), 'a=b&b=c') eq_(normalize_query('b&a=b'), 'a=b') eq_(normalize_query('b=&a=b'), 'a=b') eq_(normalize_query('b=%e3%81%84&a=%e3%81%82'), 'a=%E3%81%82&b=%E3%81%84') def test_normalize_path(): eq_(normalize_path(''), '/') eq_(normalize_path('//'), '/') eq_(normalize_path('/a//b'), '/a/b/') eq_(normalize_path('/a/./b'), '/a/b/') eq_(normalize_path('/a/foo/../b'), '/a/b/') eq_(normalize_path('/%e3%81%82%a%e3%81%84'), '/%E3%81%82%a%E3%81%84/') eq_(normalize_path('/%e3%81%82a%e3%81%84'), '/%E3%81%82a%E3%81%84/')
fortharris/RedCenter
Extensions/VaultManager.py
Python
gpl-3.0
6,621
0.005588
import os from PyQt4 import QtCore, QtGui from Extensions.Global import sizeformat class SearchWidget(QtGui.QLabel): def __init__(self, parent): QtGui.QLabel.__init__(self, parent) self._parent = parent self.setStyleSheet("""background: rgba(0, 0, 0, 50); border-radius: 0px;""") self.setFixedSize(300, 28) self.setPixmap(QtGui.QPixmap("Icons\\line")) self.setScaledContents(True) self.searchTimer = QtCore.QTimer() self.searchTimer.setSingleShot(True) self.searchTimer.setInterval(200) self.searchTimer.timeout.connect(self.gotoText) self.textFindLine = QtGui.QLineEdit(self) self.textFindLine.setStyleSheet("background: white; border-radius: 0px;") self.textFindLine.setGeometry(3, 2, 270, 23) self.textFindLine.grabKeyboard() self.textFindLine.setTextMargins(2, 1, 22, 1) self.textFindLine.textChanged.connect(self.show) self.textFindLine.textChanged.connect(self.searchTimer.start) self.clearTextFindLineButton = QtGui.QPushButton(self.textFindLine) self.clearTextFindLineButton.setGeometry(250, 2, 15, 15) self.clearTextFindLineButton.setFlat(True) self.clearTextFindLineButton.setIcon(QtGui.QIcon("Icons\\clearLeft")) self.clearTextFindLineButton.setStyleSheet("background: white; border: none;") self.clearTextFindLineButton.clicked.connect(self.textFindLine.clear) self.finderCloseButton = QtGui.QToolButton(self) self.finderCloseButton.setStyleSheet("background: none;") self.finderCloseButton.setGeometry(278, 6, 15, 15) self.finderCloseButton.setAutoRaise(True) self.finderCloseButton.setIconSize(QtCore.QSize(25, 25)) self.finderCloseButton.setIcon(QtGui.QIcon("Icons\\Cross")) self.finderCloseButton.clicked.connect(self.hide) def gotoText(self): text = self.textFindLine.text() self._parent.gotoText(text) class VaultManager(QtGui.QListWidget): def __init__(self, vaultItemCountLabel, sizeLabel, busyIndicatorWidget, parent): QtGui.QListWidget.__init__(self, parent) self.redCenter = parent self.setLayoutMode(1) self.setBatchSize(1) self.setUniformItemSizes(True) self.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection) self.setAlternatingRowColors(True) self.setIconSize(QtCore.QSize(30, 30)) self.itemSelectionChanged.connect(self.selectionMade) searchWidget = SearchWidget(self) searchWidget.move(80, 0) searchWidget.hide() self.vaultItemCountLabel = vaultItemCountLabel self.sizeLabel = sizeLabel self.busyIndicatorWidget = busyIndicatorWidget self.vaultZeroContentLabel = QtGui.QLabel("Empty", self) self.vaultZeroContentLabel.setGeometry(150, 20, 100, 50) self.vaultZeroContentLabel.setAlignment(QtCore.Qt.AlignCenter) self.vaultZeroContentLabel.setStyleSheet("background: none; font: 20px; color: lightgrey;") self.vaultZeroContentLabel.hide() self.vaultCleanUp() def gotoText(self, text): for i in self.vaultKeyList: if self.logDict[i].split('|')[0].startswith(text): index = self.vaultKeyList.index(i) self.setCurrentRow(index) break def loadVault(self): try: logList = [] self.vaultKeyList = [] file = open("Vault\\LOG","r") for i in file.readlines(): if i.strip() == '': pass else: logList.append(tuple(i.strip().split('||'))) file.close() self.logDict = dict(logList) self.vaultContentsSize = 0 self.clear() size = QtCore.QSize() size.setHeight(40) for key, property in self.logDict.items(): self.vaultKeyList.append(key) ## extract attributes attrib = self.logDict[key].split('|') # get locking time time_split = key.split('=')[0].split('-') date = QtCore.QDate(int(time_split[0]), int(time_split[1]), int(time_split[3])).toString() item = QtGui.QListWidgetItem(attrib[0]) item.setToolTip('Original Location: ' + attrib[2] + '\nModified: ' + date) item.setSizeHint(size) # assign icon if attrib[1] == "exec": item.setIcon(QtGui.QIcon("Icons\\executable")) else: item.setIcon(QtGui.QIcon("Icons\\unknown")) self.addItem(item) self.vaultContentsSize += int(attrib[3]) self.vaultItemCountLabel.setText("Items: "
+ str(len(self.logDict))) # display size of total files self.sizeLabel.setText(sizeformat(self.vaultContentsSize)) self.showVaultEmptyLabel() except: self.redCenter.showMessage("Problem loading items in the vault.") self.redCenter.hideMessage() def showVaultEmptyLabel(self): if self.count() > 0: self.vaultZeroContentLabel.hide()
else: self.vaultZeroContentLabel.show() def selectionMade(self): self.selected = self.selectedItems() if len(self.selected) > 0: self.redCenter.unlockButton.setEnabled(True) self.redCenter.deleteButton.setEnabled(True) else: self.redCenter.unlockButton.setEnabled(False) self.redCenter.deleteButton.setEnabled(False) def vaultCleanUp(self): logList = [] file = open("Vault\\LOG","r") for i in file.readlines(): if i.strip() == '': pass else: logList.append(tuple(i.strip().split('||'))) file.close() logDict = dict(logList) filesList = os.listdir("Vault\\Files") bookedFilesList = [] for i, v in logDict.items(): bookedFilesList.append(i) for i in filesList: if i not in bookedFilesList: path = os.path.join("Vault\\Files", i) try: os.remove(path) except: pass
hack4sec/hbs-cli
tests/integration/test_HashlistsByAlgLoaderThread.py
Python
mit
8,236
0.004492
# -*- coding: utf-8 -*- """ This is part of HashBruteStation software Docs EN: http://hack4sec.pro/wiki/index.php/Hash_Brute_Station_en Docs RU: http://hack4sec.pro/wiki/index.php/Hash_Brute_Station License: MIT Copyright (c) Anton Kuzmin <http://anton-kuzmin.ru> (ru) <http://anton-kuzmin.pro> (en) Integration tests for HashlistsByAlgLoaderThread """ import sys import time import pytest sys.path.append('../../') from libs.common import md5 from classes.HashlistsByAlgLoaderThread import HashlistsByAlgLoaderThread from classes.HashlistsLoaderThread import HashlistsLoaderThread from CommonIntegration import CommonIntegration class Test_HashlistsByAlgLoaderThread(CommonIntegration): """ Class of integration tests - HashlistsByAlgLoaderThread """ thrd = None loader_thrd = None def setup(self): """ Tests setup """ self._clean_db() self.thrd = HashlistsByAlgLoaderThread() self.thrd.delay_per_check = 1 self.thrd.catch_exceptions = False self.loader_thrd = HashlistsLoaderThread() self.loader_thrd.delay_per_check = 1 self.loader_thrd.catch_exceptions = False def teardown(self): """ Tests teardown """ if isinstance(self.thrd, HashlistsByAlgLoaderThread): self.thrd.available = False time.sleep(1) del self.thrd if isinstance(self.loader_thrd, HashlistsLoaderThread): self.loader_thrd.available = False time.sleep(1) del self.loader_thrd self._clean_db() test_data = [ ( [ {'hash': 'a', 'salt': '\\ta\'1\\', 'summ': md5('a:\\ta\'1\\')}, {'hash': 'b', 'salt': '\\nb"2\\', 'summ': md5('b:\\nb"2\\')} ], 1 ), ( [ {'hash': 'a', 'salt': '1', 'summ': md5('a:1')}, {'hash': 'b', 'salt': '2', 'summ': md5('b:2')} ], 1 ), ( [ {'hash': 'a', 'salt': '', 'summ': md5('a')}, {'hash': 'b', 'salt': '', 'summ': md5('b')} ], 0 ), ] @pytest.mark.parametrize("hashes,have_salt", test_data) def test_simple_build(self, hashes, have_salt): """ Simple common hashlist build :param hashes: hashes rows :param have_salt: Does alg has salt? :return: """ self._add_hashlist(have_salts=have_salt) for _hash in hashes: self._add_hash(hash=_hash['hash'], salt=_hash['salt'], summ=_hash['summ']) assert self.db.fetch_one("SELECT id FROM hashlists WHERE common_by_alg") is None self.thrd.start() self.loader_thrd.start() time.sleep(5) test_hashlist_data = {'id': 2, 'name': 'All-MD4', 'have_salts': have_salt, 'delimiter': self.thrd.DELIMITER, 'cracked': 0, 'uncrack
ed': 2, 'errors': '', 'parsed': 1, 'status': 'ready', 'common_by_alg': 3} hashlist_data = self.db.fetch_row("SELECT * FROM hashlists WHERE common_by_alg") assert int(self.db.fetch_one("SELECT when_loaded FROM hashlists WHERE common_by_alg")) > 0 for field in test_hashlist_data: assert hashlist_data[field] == test_hashlist_data[field] for _hash in hashes: assert self.db.fetch_one( "SELECT COUNT(id) FRO
M hashes WHERE hash = {0} AND salt={1} AND summ = {2} AND hashlist_id = 2". format(self.db.quote(_hash['hash']), self.db.quote(_hash['salt']), self.db.quote(_hash['summ'])) ) == 1 test_data = [ ( [ {'hash': 'a', 'salt': '1', 'summ': md5('a:1'), 'cracked': 0}, {'hash': 'b', 'salt': '2', 'summ': md5('b:2'), 'cracked': 1}, {'hash': 'c', 'salt': '3', 'summ': md5('c:3'), 'cracked': 0}, {'hash': 'd', 'salt': '4', 'summ': md5('d:4'), 'cracked': 0}, ], [ {'hash': 'a', 'salt': '1', 'summ': md5('a:1'), 'cracked': 0}, {'hash': 'b', 'salt': '2', 'summ': md5('b:2'), 'cracked': 0}, ], 1 ), ( [ {'hash': 'a', 'salt': '', 'summ': md5('a'), 'cracked': 0}, {'hash': 'b', 'salt': '', 'summ': md5('b'), 'cracked': 1}, {'hash': 'c', 'salt': '', 'summ': md5('c'), 'cracked': 0}, {'hash': 'd', 'salt': '', 'summ': md5('d'), 'cracked': 0}, ], [ {'hash': 'a', 'salt': '', 'summ': md5('a'), 'cracked': 0}, {'hash': 'b', 'salt': '', 'summ': md5('b'), 'cracked': 0}, ], 0 ), ] @pytest.mark.parametrize("hashes_in_self,hashes_in_common,have_salt", test_data) def test_update_exists_list(self, hashes_in_self, hashes_in_common, have_salt): """ Updating exists common hashlist :param hashes_in_self: Hashes in usual hashlist :param hashes_in_common: Hashes in common hashlist :param have_salt: Does alg has salt? :return: """ self._add_hashlist(have_salts=have_salt) for _hash in hashes_in_self: self._add_hash(hash=_hash['hash'], salt=_hash['salt'], summ=_hash['summ'], cracked=_hash['cracked']) self._add_hashlist(id=2, alg_id=3, common_by_alg=3, have_salts=have_salt) for _hash in hashes_in_common: self._add_hash( hashlist_id=2, hash=_hash['hash'], salt=_hash['salt'], summ=_hash['summ'], cracked=_hash['cracked'] ) assert self.db.fetch_one("SELECT COUNT(id) FROM hashes WHERE hash='b'") == 2 assert self.db.fetch_one("SELECT COUNT(id) FROM hashes WHERE hash='c'") == 1 assert self.db.fetch_one("SELECT COUNT(id) FROM hashes WHERE hash='d'") == 1 self.thrd.start() self.loader_thrd.start() time.sleep(5) assert self.db.fetch_one("SELECT COUNT(id) FROM hashes WHERE hash='b'") == 1 assert self.db.fetch_one("SELECT COUNT(id) FROM hashes WHERE hash='c'") == 2 assert self.db.fetch_one("SELECT COUNT(id) FROM hashes WHERE hash='d'") == 2 assert [{'hash': 'a'}, {'hash': 'c'}, {'hash': 'd'}] \ == self.db.fetch_all("SELECT hash FROM hashes WHERE hashlist_id = 2") test_data = [('outparsing'), ('waitoutparse')] @pytest.mark.parametrize("status", test_data) def test_build_with_parsing_alg(self, status): """ Try build no ready hashlist :param status: :return: """ self._add_hashlist() self._add_hash(hash='a', summ='111') self._add_hash(hash='b', summ='222') self._add_hashlist(id=2, alg_id=3, common_by_alg=0) self._add_work_task(hashlist_id=2, status=status) assert self.db.fetch_one("SELECT id FROM hashlists WHERE common_by_alg") is None self.thrd.start() self.loader_thrd.start() time.sleep(5) test_hashlist_data = {'id': 3, 'name': 'All-MD4', 'have_salts': 0, 'delimiter': self.thrd.DELIMITER, 'cracked': 0, 'uncracked': 0, 'errors': '', 'parsed': 0, 'status': 'ready', 'common_by_alg': 3} hashlist_data = self.db.fetch_row("SELECT * FROM hashlists WHERE common_by_alg") for field in test_hashlist_data: assert hashlist_data[field] == test_hashlist_data[field] self.db.update("task_works", {'status': 'wait'}, 'id=1') time.sleep(5) test_hashlist_data = {'id': 3, 'name': 'All-MD4', 'have_salts': 0, 'delimiter': self.thrd.DELIMITER, 'cracked': 0, 'uncracked': 2, 'errors': '', 'parsed': 1, 'status': 'ready', 'common_by_alg': 3} hashlist_data = self.db.fetch_row("SELECT * FROM hashlists WHERE common_by_alg") for field in test_hashlist_data: assert hashlist_data[field] == test_hashlist_data[field] assert self.db.fetch_all("SELECT hash FROM hashes WH
anhstudios/swganh
data/scripts/templates/object/weapon/melee/polearm/crafted_saber/shared_sword_lightsaber_polearm_s1_gen2.py
Python
mit
500
0.044
####
NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Weapon() result.template = "object/weapon/melee/polearm/crafted_saber/shared_sword_lightsaber_polearm_s1_gen2.iff" result.attribute_template_id = 10 result.stfName("weapon_name","sword_lightsaber_lance_type1") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
jakogut/python-relatorio
relatorio/templates/base.py
Python
gpl-3.0
1,717
0.000582
############################################################################### # # Copyright (c) 2007, 2008 OpenHex SPRL. (http://openhex.com) All Rights # Reserved. # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 3 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### __metaclass__ = type import genshi.core from genshi.template import NewTextTemplate, MarkupTemplate from relatorio.reporting import MIMETemplateLoader class Relatori
oStream(genshi.core.Stream): "Base class for the relatorio stre
ams." def render(self, method=None, encoding='utf-8', out=None, **kwargs): "calls the serializer to render the template" return self.serializer(self.events) def serialize(self, method='xml', **kwargs): "generates the bitstream corresponding to the template" return self.render(method, **kwargs) def __or__(self, function): "Support for the bitwise operator" return RelatorioStream(self.events | function, self.serializer) MIMETemplateLoader.add_factory('text', NewTextTemplate) MIMETemplateLoader.add_factory('xml', MarkupTemplate)
gerencianet/gn-api-sdk-python
examples/pix_create_charge.py
Python
mit
463
0.004338
from gerencianet import Gerencianet from credentials import CREDENTIALS gn = Gerencianet(CREDENTIALS) params = { 'txid': '' } body = { 'calendario': { 'expiracao':
3600 }, 'devedor': { 'cpf': '', 'nome': '' }, 'valor': { 'original': '0.50' }, 'chave': '', 'solicitacaoPagador': 'Cobrança dos serviços prestados.' } response = gn.pix_create_charge(
params=params,body=body) print(response)
TaiSakuma/AlphaTwirl
tests/unit/selection/factories/test_AllFactory.py
Python
bsd-3-clause
685
0.020438
from alphatwirl.selection.factories.AllFactory import AllFactory from alphatwirl.selection.modules.basic import All from alphatwirl.selection.modules.basic import Any from alphatwirl.selection.modules.LambdaStr import LambdaStr import unittest ##__________________________________________________________________|| class Test_AllFactory(unittest.TestCase): def test_obj(self): path_cfg_list = ("ev : ev.nJet[0] >= 2", "ev : ev.nMET[0] >= 200") kargs = dict(arg1 = 10, arg2 = 20, AllClass
= All, LambdaStrClass = LambdaStr) obj = AllFactory(path_cfg_list, name = 'test_a
ll', **kargs) ##__________________________________________________________________||
cbrunet/fibermodes
fibermodes/slrc.py
Python
gpl-3.0
9,544
0.000105
# This file is part of FiberModes. # # FiberModes is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # FiberModes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with FiberModes. If not, see <http://www.gnu.org/licenses/>. """Scalar, list, range or code object. This is a convenient object used to encapsulate a parameter that can be either a scalar (float), a list of floats, a range, or a function (code). """ import math import logging import numpy class SLRC(object): """Scalar, list, range or code object. Args: value(mixed): Initial value. Values are assumed to be always sorted. If the value is a `list` or a `numpy.ndarray`, it uses the value inside the list. If the value is a `dict`, it assumes keys `start`, `end`, and `num` to be set, and it creates a range of num values from start to end (included), just like `numpy.linspace`. If the value is a `str`, if assumes this is Python code to be evaluated. This code is evaluated in a restricted environment, where builtins are listed in `rglobals`. `math` module is also available. The code is assumed called inside a function definition, and must return a scalar value. Otherwise, the value is assumed to be a scalar (float or int). """ logger = logging.getLogger(__name__) #: Allowed builtins for code. It includes the math module. rglobals = { '__builtins__': { 'abs': abs, 'all': all, 'any': any, 'bool': bool, 'complex': complex, 'dict': dict, 'divmod': divmod, 'enumerate': enumerate, 'filter': filter, 'float': float, 'frozenset': frozenset, 'int': int, 'iter': iter, 'len': len, 'list': list, 'map': map, 'max': max, 'min': min, 'next': next, 'pow': pow, 'range': range, 'reversed': reversed, 'round': round, 'set': set, 'slice': slice, 'sorted': sorted, 'str': str, 'sum': sum, 'tuple': tuple, 'zip': zip }, 'math': math } def __init__(self, value=0): self.codeParams = None SLRC.value.fset(self, value) @property def value(self): """Return evaluated value of object. Warning: When set, does not check the assigned value. Returns: The return value can be a float, a list, or a function. Use the type attribute if you need to know what kind it is. """ k = self.kind if k == 'range': low = self._value['start'] high = self._value['end'] n = self._value['num'] if n > 1: return [low + index*(high-low)/(n-1) for index in range(n)] elif n == 1: return [low] else: return [] elif k == 'code': cp = ", ".join(self.codeParams) + ", " if self.codeParams else "" code = "def f({}*args, **kwargs):\n".format(cp) for line in self._value.splitlines(): code += " {}\n".format(line) loc = {} exec(code, self.rglobals, loc) return loc['f'] else: return self._value @value.setter def value(self, value): if isinstance(value, SLRC): self._value = value._value else: self._value = value if self.kind == 'list': self._value = sorted(value) self.logger.debug("Value set to {}".format(self._value)) def __iter__(self): k = self.kind if k == 'list': yield from iter(self._value) elif k == 'range': yield from iter(self.value) else: yield self.value def __len__(self): k = self.kind if k == 'list': return len(self._value) elif k == 'range': return self._value['num'] else: return 1 def __getitem__(self, index): if index >= len(self): raise IndexError k = self.kind if k == 'list': return self._value[index] elif k == 'range': low = self._value['start'] high = self._value['end'] n = self._value['num'] return low + index*(high-low)/(n-1) if n > 1 else low else: return self.value @property def kind(self): """Find what is the kind of value. When read, the property returns a string identifying the kind of value contained. When set, the property converts the actual value to a new kind. Conversion is performed as described in the following table. Cases in **bold** are converted without loss of information. Case in *italic* is converted with possible loss of information. Other cases are converted with systematic loss of information. ========== ========== ====== From To Result ========== ========== ======
**scalar** **scalar** No change **scalar** **list** List with one item **scala
r** **range** Range with one item **scalar** **code** Return the value list scalar First item of the list **list** **list** No change *list* *range* Range from first item to last item with same number of elements (but intermediate values could be different) list code Return value of the first item range scalar First item of the range **range** **list** List with items of the range **range** **range** No change range code Return first item of the range code scalar 0 code list [0] code range {'start': 0, 'end': 1, 'num': 2} **code** **code** No change ========== ========== ====== Returns: string. It can be 'scalar', 'list', 'range', or 'code'. """ if isinstance(self._value, list): return 'list' elif isinstance(self._value, numpy.ndarray): return 'list' elif isinstance(self._value, str): return 'code' elif isinstance(self._value, dict): return 'range' else: return 'scalar' @kind.setter def kind(self, value): k = self.kind if k == value: return self.logger.debug("Converted from '{}': {}".format(k, self._value)) if value == 'code': if k == 'scalar': self._value = "return {}".format(self._value) elif k == 'list': self._value = "return {}".format(self._value[0]) elif k == 'range': self._value = "return {}".format(self._value['start']) elif value == 'range': if k == 'scalar': self._value = {'start': self._value, 'end': self._value, 'num': 1} elif k == 'list': self._value = {'start': min(self._value), 'end': max(self._value), 'num': len(self._value)} else: self._value = {'start': 0, 'end': 1, 'num': 2} elif value == 'list': if k == 'scalar': self._value = [self._value]
Princessgladys/googleresourcefinder
app/bubble_test.py
Python
apache-2.0
5,468
0.000549
# Copyright 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for bubble.py.""" from google.appengine.api import users from bubble import HospitalValueInfoExtractor, ValueInfoExtractor from utils import db, HIDDEN_ATTRIBUTE_NAMES import django.utils.translation import bubble import datetime import logging import model import os import unittest import utils def fake_get_message(ns, n, locale=''): message = model.Message(ns=ns, name=n) if ns == 'attribute_value' and n == 'fake_to_localize': message.en = 'fake_localized' else: messag
e.en = 'foo' django_locale = 'en' return message and getattr(message, django_locale) or n class BubbleTest(unittest.TestCase): def setUp(self): self.real_auth_domain = os.environ.get('AUTH_DOMAIN', '') os.environ['AUTH_DOMAIN'] = 'test' self.real_get_message = bubble.get_message bubble.get_message = fake_get_message utils.get_message = fake_get_message def tearDown(self): utils.get_message = self.real_get_message bubble.get_messag
e = self.real_get_message os.environ['AUTH_DOMAIN'] = self.real_auth_domain def test_value_info_extractor(self): s = model.Subject(key_name='haiti:example.org/123', type='hospital') s.set_attribute('title', 'title_foo', datetime.datetime.now(), users.User('test@example.com'), 'nickname_foo', 'affiliation_foo', 'comment_foo') s.set_attribute('attribute_value', 'fake_to_localize', datetime.datetime.now(), users.User('test@example.com'), 'nickname_foo', 'affiliation_foo', 'comment_foo') vai = ValueInfoExtractor(['title'], ['attribute_value']) (special, general, details) = vai.extract(s, ['title']) assert special['title'].raw == 'title_foo' assert general == [] assert details[0].raw == 'title_foo' (special, general, details) = vai.extract(s, ['attribute_value']) assert general[0].raw == 'fake_to_localize' assert general[0].value == 'fake_localized' assert general[0].label == 'foo' def test_hospital_value_info_extractor(self): user = users.User('test@example.com') now = datetime.datetime(2010, 6, 11, 14, 26, 52, 906773) nickname = 'nickname_foo' affiliation = 'affiliation_foo' comment = 'comment_foo' s = model.Subject(key_name='haiti:example.org/123', type='hospital') s.set_attribute('title', 'title_foo', now, user, nickname, affiliation, comment) s.set_attribute(HIDDEN_ATTRIBUTE_NAMES[0], 'hidden_value_foo', now, user, nickname, affiliation, comment) s.set_attribute('organization_name', 'value_foo', now, user, nickname, affiliation, comment) attrs = ['title', 'organization_name', HIDDEN_ATTRIBUTE_NAMES[0]] vai = HospitalValueInfoExtractor() (special, general, details) = vai.extract(s, attrs) assert special['title'].date == '2010-06-11 09:26:52 -05:00' assert special['title'].raw == 'title_foo' assert HIDDEN_ATTRIBUTE_NAMES[0] not in special assert sorted(special) == sorted(vai.special_attribute_names) assert len(general) == 1 assert len(details) == 2 assert general[0].value == 'value_foo' for detail in details: assert detail.value == 'title_foo' or detail.value == 'value_foo' assert detail.value != 'hidden_value_foo' def test_vai_get_value_info(self): s = model.Subject(key_name='example.org/123', type='hospital') s.set_attribute('title', 'title_foo', datetime.datetime(2010, 06, 01), users.User('test@example.com'), 'nickname_foo', 'affiliation_foo\n', 'comment_\nfoo') s.set_attribute('attribute_value', 'fake_to_localize', datetime.datetime(2010, 06, 01), users.User('test@example.com'), 'nickname_foo', '\naffiliation_foo', 'comment_foo') vai = ValueInfoExtractor(['title'], ['attribute_value']) vi = vai.get_value_info(s, 'title') assert vi.label == 'foo' assert vi.raw == 'title_foo' assert vi.author == 'nickname_foo' assert vi.affiliation == 'affiliation_foo ' assert vi.comment == 'comment_ foo' assert vi.date == '2010-05-31 19:00:00 -05:00' vi = vai.get_value_info(s, 'attribute_value') assert vi.label == 'foo' assert vi.raw == 'fake_to_localize' assert vi.value == 'fake_localized' assert vi.author == 'nickname_foo' assert vi.affiliation == ' affiliation_foo' assert vi.comment == 'comment_foo' assert vi.date == '2010-05-31 19:00:00 -05:00'
DinoCow/airflow
tests/providers/amazon/aws/hooks/test_kinesis.py
Python
apache-2.0
2,545
0.001572
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distr
ibuted with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0
# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import unittest import uuid from airflow.providers.amazon.aws.hooks.kinesis import AwsFirehoseHook try: from moto import mock_kinesis except ImportError: mock_kinesis = None class TestAwsFirehoseHook(unittest.TestCase): @unittest.skipIf(mock_kinesis is None, 'mock_kinesis package not present') @mock_kinesis def test_get_conn_returns_a_boto3_connection(self): hook = AwsFirehoseHook( aws_conn_id='aws_default', delivery_stream="test_airflow", region_name="us-east-1" ) self.assertIsNotNone(hook.get_conn()) @unittest.skipIf(mock_kinesis is None, 'mock_kinesis package not present') @mock_kinesis def test_insert_batch_records_kinesis_firehose(self): hook = AwsFirehoseHook( aws_conn_id='aws_default', delivery_stream="test_airflow", region_name="us-east-1" ) response = hook.get_conn().create_delivery_stream( DeliveryStreamName="test_airflow", S3DestinationConfiguration={ 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', 'BucketARN': 'arn:aws:s3:::kinesis-test', 'Prefix': 'airflow/', 'BufferingHints': {'SizeInMBs': 123, 'IntervalInSeconds': 124}, 'CompressionFormat': 'UNCOMPRESSED', }, ) stream_arn = response['DeliveryStreamARN'] self.assertEqual(stream_arn, "arn:aws:firehose:us-east-1:123456789012:deliverystream/test_airflow") records = [{"Data": str(uuid.uuid4())} for _ in range(100)] response = hook.put_records(records) self.assertEqual(response['FailedPutCount'], 0) self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
josyb/myhdl
example/uart_tx/uart_tx.py
Python
lgpl-2.1
2,870
0.009408
from myhdl import always, always_seq, block, delay, enum, instance, intbv, ResetSignal, Signal, StopSimulation @block def uart_tx(tx_bit, tx_valid, tx_byte, tx_clk, tx_rst): index = Signal(intbv(0, min=0, max=8)) st = enum('IDLE', 'START', 'DATA') state = Signal(st.IDLE) @always(tx_clk.posedge, tx_rst.negedge) def fsm(): if tx_rst == 0: tx_bit.next = 1 index.next = 0 state.next = st.IDLE else: if state == st.IDLE: tx_bit.next = 1 if tx_valid: # a pulse state.next = st.START elif state == st.START: tx_bit.next = 0 index.next = 7 state.next = st.DATA elif state == st.DATA: tx_bit.next = tx_byte[index] if index == 0: state.next = st.IDLE else: index.next = index - 1 return fsm @block def uart_tx_2(tx_bit, tx_valid, tx_byte, tx_clk, tx_rst): index = Signal(intbv(0, min=0, max=8)) st = enum('IDLE', 'START', 'DATA') state = Signal(st.IDLE) @always_seq(tx_clk.posedge, reset=tx_rst) def fsm(): if state == st.IDLE: tx_bit.next = 1 if tx_valid: # a pulse state.next = st.START elif state == st.START: tx_bit.next = 0 index.next = 7 state.next = st.DATA elif state == st.DATA: tx_bit.next = tx_byte[index] if index == 0: state.next = st.IDLE else: index.next = index - 1 return fsm @block def tb(uart_tx): tx_bit = Signal(bool(1)) tx_valid = Signal(bool(0)) tx_byte = Signal(intbv(0)[8:]) tx_clk = Signal(bool(0)) # tx_rst = Signal(bool(1)) tx_rst = ResetSignal(1, active=0, isasync=True) uart_tx_inst = uart_tx(tx_bit, tx_valid, tx_byte, tx_clk, tx_rst) # toVerilog(uart_tx, tx_bit, tx_valid, tx_byte, tx_clk, tx_rst) @always(delay(10)) def clk_gen(): tx_clk.next = not tx_clk @instance def stimulus(): tx_rst.next = 1 yield delay(100) tx_rst.next = 0 yield delay(100) tx_rst.next = 1 yield delay(100) for v in (0x00, 0xff, 0x55, 0
xaa): yield tx_clk.negedge tx_byte.next = v tx_valid.next = 1 yield tx_clk.negedge tx_valid.next = 0 yield delay(16 * 20)
raise StopSimulation return clk_gen, stimulus, uart_tx_inst dut = uart_tx_2 inst = tb(dut) inst.config_sim(trace=True) inst.run_sim(10000)
absortium/poloniex-api
setup.py
Python
mit
909
0
import os from setuptools import setup, find_packages with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme: README = readme.read() # allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name='poloniex', version='0.1', packages=[ 'poloniex', 'poloniex.wamp', 'poloniex.api' ], include_package_data=True, de
scription='Python Poloniex API', long_description=README, url='https://github.com/absortium/poloniex.git', author='Andrey Samokhvalov', license='MIT', author_email='andrew.shvv@gmail.com', install_requires=[ 'asyncio', 'aiohttp', 'autobahn', 'pp-ez', 'requests' ], classifiers=[ 'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5', ], )
lucienfostier/gaffer
python/GafferOSLUI/OSLImageUI.py
Python
bsd-3-clause
9,411
0.040697
########################################################################## # # Copyright (c) 2013-2014, John Haddon. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import IECore import Gaffer import GafferUI import GafferOSL import imath import functools _channelNamesOptions = { "RGB" : IECore.Color3fData( imath.Color3f( 1 ) ), "RGBA" : IECore.Color4fData( imath.Color4f( 1 ) ), "R" : IECore.FloatData( 1 ), "G" : IECore.FloatData( 1 ), "B" : IECore.FloatData( 1 ), "A" : IECore.FloatData( 1 ), "customChannel" : IECore.FloatData( 1 ), "customLayer" : IECore.Color3fData( imath.Color3f( 1 ) ), "customLayerRGBA" : IECore.Color4fData( imath.Color4f( 1 ) ), "closure" : None, } ########################################################################## # _ChannelsFooter ########################################################################## class _ChannelsFooter( GafferUI.PlugValueWidget ) : def __init__( self, plug ) : row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal ) GafferUI.PlugValueWidget.__init__( self, row, plug ) with row : GafferUI.Spacer( imath.V2i( GafferUI.PlugWidget.labelWidth(), 1 ) ) menuButton = GafferUI.MenuButton( image = "plus.png", hasFrame = False, menu = GafferUI.Menu( Gaffer.WeakMethod( self.__menuDefinition ), title = "Add Input" ), toolTip = "Add Input" ) menuButton.setEnabled( not Gaffer.MetadataAlgo.readOnly( plug ) ) GafferUI.Spacer( imath.V2i( 1 ), imath.V2i( 999999, 1 ), parenting = { "expand" : True } ) def _updateFromPlug( self ) : self.setEnabled( self._editable() ) def __menuDefinition( self ) : result = IECore.MenuDefinition() usedNames = set() for p in self.getPlug().children(): # TODO - this method for checking if a plug variesWithContext should probably live in PlugAlgo # ( it's based on Switch::variesWithContext ) sourcePlug = p["name"].source() variesWithContext = sourcePlug.direction() == Gaffer.Plug.Direction.Out and isinstance( ComputeNode, sourcePlug.node() ) if not variesWithContext: usedNames.add( p["name"].getValue() ) # Use a fixed order for some standard options that we want to list in a specific order sortedOptions = [] for label in ["RGB", "RGBA", "R", "G", "B", "A" ]: sortedOptions.append( (label, _channelNamesOptions[label] ) ) for label, defaultData in sorted( _channelNamesOptions.items() ): if not label in [ i[0] for i in sortedOptions ]: sortedOptions.append( (label, defaultData) ) categories = { "Standard" : [], "Custom" : [], "Advanced" : [] } for label, defaultData in sortedOptions: if label == "closure": categories["Advanced"].append( ( label, label, defaultData ) ) else: bareLabel = label.replace( "RGBA", "" ).replace( "RGB", "" ) channelName = bareLabel if label.startswith( "custom" ): if channelName in usedNames: suffix = 2 while True: channelName = bareLabel + str( suffix ) if not channelName in usedNames: break suffix += 1 categories["Custom"].append( ( label, channelName, defaultData ) ) else: if channelName in usedNames: continue categories["Standard"].append( ( label, channelName, defaultData ) ) for category in [ "Standard", "Custom", "Advanced" ]: for ( menuLabel, channelName, defaultData ) in categories[category]: result.append( "/" + category + "/" + menuLabel, { "command" : functools.partial( Gaffer.WeakMethod( self.__addPlug ), channelName, defaultData ), } ) return result def __addPlug( self, name, defaultData ) : alphaValue = None if isinstance( defaultData, IECore.Color4fData ): alphaValue = Gaffer.FloatPlug( "value", Gaffer.Plug.Direction.In, defaultData.value.a ) defaultData = IECore.Color3fData( imath.Color3f( defaultData.value.r, defaultData.value.g, defaultData.value.b ) ) if defaultData == None: plugName = "closure" name = "" valuePlug = GafferOSL.ClosurePlug( "value" ) else: plugName = "channel" valuePlug = Gaffer.PlugAlgo.createPlugFromData( "value", Gaffer.Plug.Direction.In, Gaffer.Plug.Flags.Default, defaultData ) with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) : self.getPlug().addChild( Gaffer.NameValuePlug(
name, valuePlug, True, plugName, Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) ) if alphaValue: self.getPlug().addChild( Gaffer.NameValuePlug( name + ".A" if name else "A", alphaValue, True, plugName, Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) ) def __channelLabelFromPlug( plug ): if plug.typeId() == GafferOSL.ClosurePlug.staticTypeId(): return plug.parent().getNam
e() elif plug.typeId() == Gaffer.Color3fPlug.staticTypeId() and plug.parent()["name"].getValue() == "": return "[RGB]" else: return plug.parent()["name"].getValue() ########################################################################## # Metadata ########################################################################## Gaffer.Metadata.registerNode( GafferOSL.OSLImage, "description", """ Executes OSL shaders to perform image processing. Use the shaders from the OSL/ImageProcessing menu to read values from the input image and then write values back to it. """, "plugAdderOptions", IECore.CompoundData( _channelNamesOptions ), "layout:activator:defaultFormatActive", lambda node : not node["in"].getInput(), plugs = { "defaultFormat" : [ "description", """ The resolution and aspect ratio to output when there is no input image provided. """, "layout:activator", "defaultFormatActive", ], "channels" : [ "description", """ Define image channels to output by adding child plugs and connecting corresponding OSL shaders. You can drive RGB layers with a color, or connect individual channels to a float. If you want to add multiple channels at once, you can also add a closure plug, which can accept a connection from an OSLCode with a combined output closure. """, "layout:customWidget:footer:widgetType", "GafferOSLUI.OSLImageUI._ChannelsFooter", "layout:customWidget:footer:index", -1, "nodule:type", "GafferUI::CompoundNodule", "noduleLayout:section", "left", "noduleLayout:spacing", 0.2, "plugValueWidget:type", "GafferUI.LayoutPlugValueWidget", # Add + button for showing and hiding parameters in the GraphEditor "noduleLayout:customGadget:addButton:gadgetType", "GafferOSLUI.OSLImageUI.PlugAdder",
Crystal-SDS/dashboard
crystal_dashboard/dashboards/crystal/zones/models.py
Python
gpl-3.0
192
0
class Zone: def __in
it__(self, id_zone, name, re
gion, description): self.id = id_zone self.name = name self.region = region self.description = description
endlessm/chromium-browser
third_party/catapult/netlog_viewer/netlog_viewer_build/build_for_appengine.py
Python
bsd-3-clause
1,450
0.004138
#!/usr/bin/env python # Copyright 2018 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import glob import os import shutil import subprocess import sys netlog_viewer_root_path = os.path.abspath( os.path.join(os.path.dirname(__file__), '..')) sys.path.append(netlog_viewer_root_path) import netlog_viewer_project project = netlog_viewer_project.NetlogViewerProject() src_dir = project.netlog_viewer_src_path out_dir = os.path.join(netlog_viewer_root_path, "appengine", "static") components_dir = os.path.join(project.catapult_third_party_path, "polymer", "components") if os.path.exists(out_dir): shutil.rmtree(out_dir) os.mkdir(out_dir) in_html = os.path.join(src_dir, 'index.html') out_html = os.path.join(out_dir, 'vulcanized.html') try: subprocess.check_call(['vulcanize', in_html, '--inline-scripts', '--inline-css', '--strip-comments', '--redirect', '/components|' + components_dir, '--out-html', out_html]) except OSError: sys.stderr.write(''' ERROR: Could not execute "vulcanize
". To install vulcanize on Linux: sudo apt-get install npm sudo npm install -g vulcanize '''[1:]) sys.exit(1) for fn in glob.glob(os.path.join(src_dir, "*.png")): shutil.copyfile(fn, os.path.join(out_dir, o
s.path.split(fn)[1]))
petrutlucian94/nova
nova/objects/dns_domain.py
Python
apache-2.0
2,520
0
# Copyright (C) 2014, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import db from nova import objects from nova.objects import base from nova.objects import fields # TODO(berrange): Remove NovaObjectDictCompat class DNSDomain(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version VERSION = '1.0' fields = { 'domain': fields.StringField(), 'scope': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), } @staticmethod def _from_db_object(context, vif, db_vif): for field in vif.fields: vif[field] = db_vif[field] vif._context = context vif.obj_reset_changes() return vif @base.remotable_classmethod def get_by_domain(cls, context, domain): db_dnsd = db.dnsdomain_get(context, domain) if db_dnsd: return cls._from_db_object(context, cls(), db_dnsd) @base.remotable_classmethod def register_for_zone(cls, context, domain, zone): db.dnsdomain_register_for_zone(context, domain, zone) @base.remotable_classmethod def register_for_project(cls, context, domain, project): db.dnsdomain_register_for_project(context, domain, project) @base.remotable_classmethod def delete_by_domain(cls, context, domain): db.dnsdomain_unregister(context, domain) class DNSDomainList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('DNSDomain'), } child_versions = { '1.0': '1.0', } @base.remotab
le_classmethod def get_all(cls, contex
t): db_domains = db.dnsdomain_get_all(context) return base.obj_make_list(context, cls(context), objects.DNSDomain, db_domains)
zuun77/givemegoogletshirts
leetcode/python/897_increasing-order-search-tree2.py
Python
apache-2.0
796
0.003769
# Definition for a binary tree node. class TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None class Solution: def increasingBST(self, root: TreeNode) -> TreeNode: def dfs(node): if not node.left and not node.right: return node head = None if node.right:
node.right = dfs(node.right) if node.left: head = dfs(node.left) cur = head while cur.right:
cur = cur.right cur.right = node return head else: return node return dfs(root) t = TreeNode(2) t.left = TreeNode(1) tt = Solution().increasingBST(t) print(tt.val, tt.right.val)
alon/polinax
libs/external_libs/docutils-0.4/test/test_transforms/test_substitutions.py
Python
gpl-2.0
9,979
0.004109
#! /usr/bin/env python # Author: David Goodger # Contact: goodger@users.sourceforge.net # Revision: $Revision: 4233 $ # Date: $Date: 2005-12-29 00:48:48 +0100 (Thu, 29 Dec 2005) $ # Copyright: This module has been placed in the public domain. """ Tests for docutils.transforms.references.Substitutions. """ from __init__ import DocutilsTestSupport from docutils.transforms.references import Substitutions from docutils.parsers.rst import Parser def suite(
): parser = Parser() s = DocutilsTestSupport.TransformTestSuite(parser) s.generateTests(totest) return s totest = {} totest['substitutions'] = ((Substitutions,), [ ["""\ The |biohazard| symbol is deservedly scary-looking. .. |biohazard| image:: biohazard.png """, """\ <document source="
test data"> <paragraph> The \n\ <image alt="biohazard" uri="biohazard.png"> symbol is deservedly scary-looking. <substitution_definition names="biohazard"> <image alt="biohazard" uri="biohazard.png"> """], ["""\ Here's an |unknown| substitution. """, """\ <document source="test data"> <paragraph> Here's an \n\ <problematic ids="id2" refid="id1"> |unknown| substitution. <system_message backrefs="id2" ids="id1" level="3" line="1" source="test data" type="ERROR"> <paragraph> Undefined substitution referenced: "unknown". """], [u"""\ Substitutions support case differences: .. |eacute| replace:: \u00E9 .. |Eacute| replace:: \u00C9 |Eacute|\\t\\ |eacute|, and even |EACUTE|. """, u"""\ <document source="test data"> <paragraph> Substitutions support case differences: <substitution_definition names="eacute"> \u00E9 <substitution_definition names="Eacute"> \u00C9 <paragraph> \u00C9 t \u00E9 , and even \n\ \u00C9 . """], [u"""\ Indirect substitution definitions with multiple references: |substitute| my coke for gin |substitute| you for my mum at least I'll get my washing done .. |substitute| replace:: |replace| .. |replace| replace:: swap """, u"""\ <document source="test data"> <paragraph> Indirect substitution definitions with multiple references: <paragraph> swap my coke for gin swap you for my mum at least I'll get my washing done <substitution_definition names="substitute"> swap <substitution_definition names="replace"> swap """], ["""\ .. |l| unicode:: U+00AB .. left chevron .. |r| unicode:: U+00BB .. right chevron .. |.| replace:: |l|\ ``.``\ |r| .. Delete either of the following lines, and there is no error. Regular expression |.| will match any character .. Note:: Note that |.| matches *exactly* one character """, u"""\ <document source="test data"> <substitution_definition names="l"> \xab <substitution_definition names="r"> \xbb <substitution_definition names="."> \xab <literal> . \xbb <comment xml:space="preserve"> Delete either of the following lines, and there is no error. <paragraph> Regular expression \n\ \xab <literal> . \xbb will match any character <note> <paragraph> Note that \n\ \xab <literal> . \xbb matches \n\ <emphasis> exactly one character """], ["""\ .. |sub| replace:: |sub| """, """\ <document source="test data"> <system_message level="3" line="1" names="sub" source="test data" type="ERROR"> <paragraph> Circular substitution definition detected: <literal_block xml:space="preserve"> .. |sub| replace:: |sub| """], ["""\ .. |sub| replace:: |indirect1| .. |indirect1| replace:: |indirect2| .. |indirect2| replace:: |Sub| """, """\ <document source="test data"> <system_message level="3" line="1" names="sub" source="test data" type="ERROR"> <paragraph> Circular substitution definition detected: <literal_block xml:space="preserve"> .. |sub| replace:: |indirect1| <system_message level="3" line="2" names="indirect1" source="test data" type="ERROR"> <paragraph> Circular substitution definition detected: <literal_block xml:space="preserve"> .. |indirect1| replace:: |indirect2| <system_message level="3" line="3" names="indirect2" source="test data" type="ERROR"> <paragraph> Circular substitution definition detected: <literal_block xml:space="preserve"> .. |indirect2| replace:: |Sub| """], ["""\ .. |indirect1| replace:: |indirect2| .. |indirect2| replace:: |Sub| .. |sub| replace:: |indirect1| Use |sub| and |indirect1| and |sub| again (and |sub| one more time). """, """\ <document source="test data"> <system_message level="3" line="1" names="indirect1" source="test data" type="ERROR"> <paragraph> Circular substitution definition detected: <literal_block xml:space="preserve"> .. |indirect1| replace:: |indirect2| <system_message level="3" line="2" names="indirect2" source="test data" type="ERROR"> <paragraph> Circular substitution definition detected: <literal_block xml:space="preserve"> .. |indirect2| replace:: |Sub| <system_message level="3" line="3" names="sub" source="test data" type="ERROR"> <paragraph> Circular substitution definition detected: <literal_block xml:space="preserve"> .. |sub| replace:: |indirect1| <paragraph> Use \n\ <problematic ids="id8" refid="id7"> and \n\ <problematic ids="id2" refid="id1"> |indirect1| and \n\ <problematic ids="id4" refid="id3"> |sub| again (and \n\ <problematic ids="id6" refid="id5"> |sub| one more time). <system_message backrefs="id2" ids="id1" level="3" line="5" source="test data" type="ERROR"> <paragraph> Circular substitution definition referenced: "indirect1". <system_message backrefs="id4" ids="id3" level="3" line="5" source="test data" type="ERROR"> <paragraph> Circular substitution definition referenced: "sub". <system_message backrefs="id6" ids="id5" level="3" line="5" source="test data" type="ERROR"> <paragraph> Circular substitution definition referenced: "sub". <system_message backrefs="id8" ids="id7" level="3" source="test data" type="ERROR"> <paragraph> Circular substitution definition referenced: "Sub". """], ]) totest['unicode'] = ((Substitutions,), [ ["""\ Insert an em-dash (|mdash|), a copyright symbol (|copy|), a non-breaking space (|nbsp|), a backwards-not-equals (|bne|), and a captial omega (|Omega|). .. |mdash| unicode:: 0x02014 .. |copy| unicode:: \\u00A9 .. |nbsp| unicode:: &#x000A0; .. |bne| unicode:: U0003D U020E5 .. |Omega| unicode:: U+003A9 """, u"""\ <document source="test data"> <paragraph> Insert an em-dash ( \u2014 ), a copyright symbol ( \u00a9 ), a non-breaking space ( \u00a0 ), a backwards-not-equals ( = \u20e5 ), and a captial omega ( \u03a9 ). <substitution_definition names="mdash"> \u2014 <substitution_definition names="copy"> \u00a9 <substitution_definition names="nbsp"> \u00a0 <substitution_definition names="bne"> = \u20e5 <substitution_definition names="Omega"> \u03a9 """], [""" Testing comments and extra text. Copyright |copy| 2003, |BogusMegaCorp (TM)|. .. |copy| unicode:: 0xA9 .. copyright sign .. |BogusMegaCorp (TM)| unicode:: BogusMegaCorp U+2122 .. with trademark sign """, u"""\ <document source="test data"> <paragraph> Testing comments and extra text. <paragraph> Copyright \n\ \u00a9 2003, \n\ BogusMegaCorp
lfalvarez/nouabook
votainteligente/urls.py
Python
gpl-3.0
1,050
0.013333
from django.conf.urls import patterns, include, url # Uncomment the next two lines to enable the admin: from django.contrib import admin from django.conf.urls.i18n import i18n_patterns admin.autodiscover() urlpatterns = patterns('', # Examples: # url(r'^$', 'votainteligente.views.home', name='home'), # url(r'^votainteligente/', include('votainteligente.foo.urls')), # Uncomment the admin/doc line below to en
able admin documentation: # url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin: url(r'^admin/', include(admin.site.urls)), url(r'^i18n/', include('django.conf.urls.i18n')), #url(r'^', include('elections.urls')), #('^pages/', include('flatpages_i18n.urls')),#('^pages/', include('django.contrib.flatpages.urls')),
#(r'^tinymce/', include('tinymce.urls')), ) urlpatterns += i18n_patterns('', url(r'^', include('elections.urls')), url(r'^page', include('flatpages_i18n.urls')), (r'^tinymce/', include('tinymce.urls')), )
wufangjie/leetcode
011. Container With Most Water.py
Python
gpl-3.0
2,524
0.001981
''' Given n non-negative integers a1, a2, ..., an, where each represents a point at coordinate (i, ai). n vertical lines are drawn such that the two endpoints of line i is at (i, ai) and (i, 0). Find two lines, which together with x-axis forms a container, such that the container contains the most water. Note: You may not slant the container. ''' class Solution(object): def maxArea(self, height): """ :type height: List[int] :rtype: int """ lo, hi = 0, len(height) - 1 theBest = 0 while lo < hi: theBest = max(theBest, (hi - lo) * min(height[lo], height[hi])) if height[lo] < height[hi]: lo += 1 else: hi -= 1 return theBest def maxArea_TLE2(self, height): maxlen = len(height) def _max_area_as_short_side(i): left = right = 0 for j in range(i): if height[j] >= height[i]: left = height[i] * (i - j) break for j in range(maxlen - 1, i, -1): if height[j] >= height[i]: right = height[i] * (j - i) break return max(left, right) theBest = maxHeight = 0 for i in range(maxlen >> 1): if height[i] < maxHeight: continue else: maxHeight = height[i] theBest = max(theBest, _max_area_as_short_side(i)) left = theBest theBest = maxHeight = 0 for i in range(maxlen - 1, (maxlen >> 1) - 1, -1): # the mid () if height[i] < maxHe
ight: continue else: maxHeight = height[i] theBest = max(theBest, _max_area_as_short_side(i)) return max(left, theBest) def maxArea_TLE(self, height): maxlen = len(height) def _max_area_as_short_side(i): left = right = 0 for j in range(i): if height[j] >= height[i]: left = h
eight[i] * (i - j) break for j in range(maxlen - 1, i, -1): if height[j] >= height[i]: right = height[i] * (j - i) break return max(left, right) return max([_max_area_as_short_side(i) for i in range(maxlen)]) if __name__ == '__main__': assert Solution().maxArea([2, 1]) == 1 assert Solution().maxArea(range(15001))
zekroTJA/regiusBot
commands/cmd_log.py
Python
mit
1,480
0.004054
import os import discord import requests from utils import functions description = "Show bot log" perm = 2 async def ex(message, client): if not os.path.isfile("screenlog.0"): await client.send_message(message.channel, embed=discord.Embed(colour=discord.Color.red(), description="File `screenlog.0` does not exist!")) else: with open("screenlog.0") as f: lines = f.readlines() log_full = "" for string in lines: log_full += string if len(lines) > 10: lines = lines[len(lines) - 10:len(lines)] log = "" for string in lines: log += string message_send = a
wait client.send_message(message.channel, embed=discord.Embed( description="Uploading log to pastebin.com ...")) params = {"api_option": "paste", "api_dev_key": functions.get_settings()["secrets"]["pastebin"], "api_paste_code": log_full, "api_paste_private": "1", "api_paste_expire_date": "10M"
} paste = requests.post("https://pastebin.com/api/api_post.php", data=params).text.replace( "https://pastebin.com/", "https://pastebin.com/raw/") await client.delete_message(message_send) await client.send_message(message.channel, "**Log of `screenlog.0`**\n*Full log file here: " + paste + "*\n\n" + "```" + log + "```")
ammongit/scripts
print-months.py
Python
mit
175
0
#!/usr/bin/
env python3 import calendar if __name__ == "__main__": for num in range(1, 13): month = calendar.month_name[num] print(f"{num:0
2} - {month}")
stczhc/neupy
examples/gd/mnist_mlp.py
Python
mit
1,266
0
import theano import numpy as np from sklearn.preprocessing import OneHotEncoder from sklearn import cross_validation, metrics, datasets from neu
py import algorithms, layers, environment environment.reproducible() theano.config.floatX = 'float32' mnist = datasets.fetch_mlda
ta('MNIST original') target_scaler = OneHotEncoder() target = mnist.target.reshape((-1, 1)) target = target_scaler.fit_transform(target).todense() data = mnist.data / 255. data = data - data.mean(axis=0) x_train, x_test, y_train, y_test = cross_validation.train_test_split( data.astype(np.float32), target.astype(np.float32), train_size=(6 / 7.) ) network = algorithms.Momentum( [ layers.Relu(784), layers.Relu(500), layers.Softmax(300), layers.ArgmaxOutput(10), ], error='categorical_crossentropy', step=0.01, verbose=True, shuffle_data=True, momentum=0.99, nesterov=True, ) network.train(x_train, y_train, x_test, y_test, epochs=20) y_predicted = network.predict(x_test) y_test = np.asarray(y_test.argmax(axis=1)).reshape(len(y_test)) print(metrics.classification_report(y_test, y_predicted)) score = metrics.accuracy_score(y_test, y_predicted) print("Validation accuracy: {:.2f}%".format(100 * score))
dsiddharth/access-keys
keystone/contrib/oauth1/__init__.py
Python
apache-2.0
690
0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.contrib
.oauth1.core import * # flake8: noqa
larrybradley/astropy
astropy/utils/iers/iers.py
Python
bsd-3-clause
46,942
0.000639
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ The astropy.utils.iers package provides access to the tables provided by the International Earth Rotation and Reference Systems Service, in particular allowing interpolation of published UT1-UTC values for given times. These are used in `astropy.time` to provide UT1 values. The polar motions are also used for determining earth orientation for celestial-to-terrestrial coordinate transformations (in `astropy.coordinates`). """ import re from datetime import datetime from warnings import warn from urllib.parse import urlparse import numpy as np import erfa from astropy.time import Time, TimeDelta from astropy import config as _config from astropy import units as u from astropy.table import QTable, MaskedColumn from astropy.utils.data import (get_pkg_data_filename, clear_download_cache, is_url_in_cache, get_readable_fileobj) from astropy.utils.state import ScienceState from astropy import utils from astropy.utils.exceptions import AstropyWarning __all__ = ['Conf', 'conf', 'earth_orientation_table', 'IERS', 'IERS_B', 'IERS_A', 'IERS_Auto', 'FROM_IERS_B', 'FROM_IERS_A', 'FROM_IERS_A_PREDICTION', 'TIME_BEFORE_IERS_RANGE', 'TIME_BEYOND_IERS_RANGE', 'IERS_A_FILE', 'IERS_A_URL', 'IERS_A_URL_MIRROR', 'IERS_A_README', 'IERS_B_FILE', 'IERS_B_URL', 'IERS_B_README', 'IERSRangeError', 'IERSStaleWarning', 'LeapSeconds', 'IERS_LEAP_SECOND_FILE', 'IERS_LEAP_SECOND_URL', 'IETF_LEAP_SECOND_URL'] # IERS-A default file name, URL, and ReadMe with content description IERS_A_FILE = 'finals2000A.all' IERS_A_URL = 'ftp://anonymous:mail%40astropy.org@gdc.cddis.eosdis.nasa.gov/pub/products/iers/finals2000A.all' # noqa: E501 IERS_A_URL_MIRROR = 'https://datacenter.iers.org/data/9/finals2000A.all' IERS_A_README = get_pkg_data_filename('data/ReadMe.finals2000A') # IERS-B default file name, URL, and ReadMe with content description IERS_B_FILE = get_pkg_data_filename('data/eopc04_IAU2000.62-now') IERS_B_URL = 'http://hpiers.obspm.fr/iers/eop/eopc04/eopc04_IAU2000.62-now' IER
S_B_README = get_pkg_data_filename('d
ata/ReadMe.eopc04_IAU2000') # LEAP SECONDS default file name, URL, and alternative format/URL IERS_LEAP_SECOND_FILE = get_pkg_data_filename('data/Leap_Second.dat') IERS_LEAP_SECOND_URL = 'https://hpiers.obspm.fr/iers/bul/bulc/Leap_Second.dat' IETF_LEAP_SECOND_URL = 'https://www.ietf.org/timezones/data/leap-seconds.list' # Status/source values returned by IERS.ut1_utc FROM_IERS_B = 0 FROM_IERS_A = 1 FROM_IERS_A_PREDICTION = 2 TIME_BEFORE_IERS_RANGE = -1 TIME_BEYOND_IERS_RANGE = -2 MJD_ZERO = 2400000.5 INTERPOLATE_ERROR = """\ interpolating from IERS_Auto using predictive values that are more than {0} days old. Normally you should not see this error because this class automatically downloads the latest IERS-A table. Perhaps you are offline? If you understand what you are doing then this error can be suppressed by setting the auto_max_age configuration variable to ``None``: from astropy.utils.iers import conf conf.auto_max_age = None """ MONTH_ABBR = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] def download_file(*args, **kwargs): """ Overload astropy.utils.data.download_file within iers module to use a custom (longer) wait time. This just passes through ``*args`` and ``**kwargs`` after temporarily setting the download_file remote timeout to the local ``iers.conf.remote_timeout`` value. """ kwargs.setdefault('http_headers', {'User-Agent': 'astropy/iers', 'Accept': '*/*'}) with utils.data.conf.set_temp('remote_timeout', conf.remote_timeout): return utils.data.download_file(*args, **kwargs) def _none_to_float(value): """ Convert None to a valid floating point value. Especially for auto_max_age = None. """ return (value if value is not None else np.finfo(float).max) class IERSStaleWarning(AstropyWarning): pass class Conf(_config.ConfigNamespace): """ Configuration parameters for `astropy.utils.iers`. """ auto_download = _config.ConfigItem( True, 'Enable auto-downloading of the latest IERS data. If set to False ' 'then the local IERS-B file will be used by default (even if the ' 'full IERS file with predictions was already downloaded and cached). ' 'This parameter also controls whether internet resources will be ' 'queried to update the leap second table if the installed version is ' 'out of date. Default is True.') auto_max_age = _config.ConfigItem( 30.0, 'Maximum age (days) of predictive data before auto-downloading. ' 'See "Auto refresh behavior" in astropy.utils.iers documentation for details. ' 'Default is 30.') iers_auto_url = _config.ConfigItem( IERS_A_URL, 'URL for auto-downloading IERS file data.') iers_auto_url_mirror = _config.ConfigItem( IERS_A_URL_MIRROR, 'Mirror URL for auto-downloading IERS file data.') remote_timeout = _config.ConfigItem( 10.0, 'Remote timeout downloading IERS file data (seconds).') system_leap_second_file = _config.ConfigItem( '', 'System file with leap seconds.') iers_leap_second_auto_url = _config.ConfigItem( IERS_LEAP_SECOND_URL, 'URL for auto-downloading leap seconds.') ietf_leap_second_auto_url = _config.ConfigItem( IETF_LEAP_SECOND_URL, 'Alternate URL for auto-downloading leap seconds.') conf = Conf() class IERSRangeError(IndexError): """ Any error for when dates are outside of the valid range for IERS """ class IERS(QTable): """Generic IERS table class, defining interpolation functions. Sub-classed from `astropy.table.QTable`. The table should hold columns 'MJD', 'UT1_UTC', 'dX_2000A'/'dY_2000A', and 'PM_x'/'PM_y'. """ iers_table = None """Cached table, returned if ``open`` is called without arguments.""" @classmethod def open(cls, file=None, cache=False, **kwargs): """Open an IERS table, reading it from a file if not loaded before. Parameters ---------- file : str or None full local or network path to the ascii file holding IERS data, for passing on to the ``read`` class methods (further optional arguments that are available for some IERS subclasses can be added). If None, use the default location from the ``read`` class method. cache : bool Whether to use cache. Defaults to False, since IERS files are regularly updated. Returns ------- IERS An IERS table class instance Notes ----- On the first call in a session, the table will be memoized (in the ``iers_table`` class attribute), and further calls to ``open`` will return this stored table if ``file=None`` (the default). If a table needs to be re-read from disk, pass on an explicit file location or use the (sub-class) close method and re-open. If the location is a network location it is first downloaded via download_file. For the IERS class itself, an IERS_B sub-class instance is opened. """ if file is not None or cls.iers_table is None: if file is not None: if urlparse(file).netloc: kwargs.update(file=download_file(file, cache=cache)) else: kwargs.update(file=file) # TODO: the below is really ugly and probably a bad idea. Instead, # there should probably be an IERSBase class, which provides # useful methods but cannot really be used on its own, and then # *perhaps* an IERS class which provides best defaults. But for # backwards compatibility, we use the IERS_B reader for IERS here. if cls is IERS: cls.ie
zaycev/mokujin
mokujin/sourcesearch.py
Python
apache-2.0
9,946
0.001609
#!/usr/bin/env python # coding: utf-8 # Copyright (C) USC Information Sciences Institute # Author: Vladimir M. Zaytsev <zaytsev@usc.edu> # URL: <http://nlg.isi.edu/> # For more information, see README.md # For license information, see LICENSE import logging from mokujin.logicalform import POS from mokujin.index import REL_ID_MAP from mokujin.index import ID_REL_MAP from mokujin.index import REL_POS_MAP class PotentialSource(object): def __init__(self, source_id, triples): self.source_id = source_id self.triples = triples self.triples_count = -1 self.total_pattern_source_triple_freq = -1 self.total_pattern_target_triple_freq = -1 self.norm_source_freq = -1 self.norm_target_freq = -1 def calculate_freqs(self): self.triples_count = len(self.triples) self.total_pattern_source_triple_freq = 0 norm_source_freqs = [] norm_target_freqs = [] triples = [] for target_triple, source_triple, target_triple_pattern_freq in self.triples: source_triple_freq = source_triple[-1] target_triple_freq = target_triple[-1] self.total_pattern_source_triple_freq += source_triple_freq self.total_pattern_target_triple_freq += target_triple_freq patterns_freq = target_triple_pattern_freq + source_triple[-1] norm_source_freq = float(source_triple_freq) / float(patterns_freq) norm_target_freq = float(target_triple_freq) / float(patterns_freq) norm_source_freqs.append(norm_source_freq) norm_target_freqs.append(norm_target_freq) triples.append((source_triple, norm_source_freq)) self.norm_source_freq = sum(norm_source_freqs) self.norm_target_freq = sum(norm_target_freqs) self.triples = triples self.triples.sort(key=lambda triple: -triple[1]) class PatternSearchQuery(object): def __init__(self, key_term, seed_triple): self.seed_triple = seed_triple self.rel_type = seed_triple[0] self.arg_list = [] self.key_term = key_term for i in range(1, len(seed_triple) - 1): if seed_triple[i] != key_term and seed_triple[i] >= 0: self.arg_list.append((seed_triple[i], i)) else: self.key_term_i = i self.len_constraint_flt = lambda triple: len(triple) == len(self.seed_triple) self.self_filter = lambda triple: triple[self.key_term_i] != self.key_term def exact_pattern_match(self, triple): if len(self.seed_triple) != len(triple): return False for i in xrange(len(self.seed_triple)): if i != self.key_term_i and self.seed_triple[i] != triple[i]: return False return True def find_triples(self, engine, strict=True): triples = engine.search(rel_type=self.rel_type, arg_query=self.arg_list) triples = filter(self.self_filter, triples) if strict: triples = filter(self.len_constraint_flt, triples) triples = filter(self.exact_pattern_match, triples) return triples class TripleStoreExplorer(object): def __init__(self, search_engine, stop_terms=(), concept_net=()): self.engine = search_engine self.rel_id_map = REL_ID_MAP self.id_rel_map = ID_REL_MAP self.stop_terms = self.map_stop_terms(stop_terms) self.concept_net = self.map_concept_net(concept_net) def calc_term_triples_freq(self, term_id, threshold=0.0): triples_count = 0.0 triples_freq = 0.0 triples = self.engine.search(arg_query=(term_id,)) triples = filter(lambda tr: not self.is_light_triple(tr), triples) for triple in triples: triples_freq = triple[-1] if triples_freq > threshold: triples_count += 1 triples_freq += triple[-1] return triples_count, triples_freq def is_light_triple(self, triple): pos_tags = REL_POS_MAP[triple[0]] not_light = 0 for i in range(1, len(triple) - 1): if triple[i] not in self.stop_terms and pos_tags[i - 1] is not POS.PREP: not_light += 1 if not_light == 2: return False return True def find_triples_by_patterns(self, term_id, target_triples): siblings_dict = dict() siblings_num = 0 for target_triple in target_triples: query = PatternSearchQuery(term_id, target_triple) siblings = query.find_triples(self.engine, strict=False) siblings = filter(lambda tr: not self.is_light_triple(tr), siblings) siblings_num += len(siblings) pattern_freq = sum([triple[-1] for triple in siblings]) for sibling in siblings: source_id = sibling[query.key_term_i] if source_id >= 0: if source_id in siblings_dict: siblings_dict[source_id].append((target_triple, sibling, pattern_freq)) else: siblings_dict[source_id] = [(target_triple, sibling, pattern_freq)] return siblings_dict, siblings_num def map_stop_terms(self, stop_list_obj): stop_terms_ids = set() for term in stop_list_obj.stop_words: term_id = self.engine.term_id_map.get(term, -1) if term_id != -1: stop_terms_ids.add(term_id) logging.info("MAPPED %d/%d STOP TERMS" % (len(stop_terms_ids), len(stop_list_obj.stop_words))) for term in stop_list_obj.stop_words: term_id = self.engine.term_id_map.get(term, -1) # if term_id == -1: # logging.info("TERM NOT FOUND IN INDEX: %s" % term) stop_terms_ids.add(-1) return stop_terms_ids def map_concept_net(self, concept_net_obj): concept_net = dict() mapped = 0 for rel_type, arg1, arg2, pos in concept_net_obj.relations: arg_1_id = self.engine.term_id_map.get(arg1) arg_2_id = self.engine.term_id_map.get(arg2) if arg_1_id is not None and arg_2_id is not None: mapped += 1 if arg_1_id in concept_net: concept_net[arg_1_id].add(arg_2_id) else: concept_net[arg_1_id] = {arg_2_id} logging.info("USING %d RELATIONS FROM CONCEPT NET" % mapped) return concept_net def find_potential_sources(self, term, threshold=0): """ Find all potential sources for given target term and calculate their frequencies. """ target_term_id = self.engine.term_id_map.get(term) print "%r" % target_term_id, term if target_term_id is None: return None target_triples = self.engine.search(arg_query=(target_term_id,)) target_triples_num = len(target_triples) target_triples_freq = sum([target[-1] for target in target_triples]) print "\tTARGET: triples %d, frequency %d" % (target_triples_nu
m, target_triples_freq) print "\tFOUND TARGET TRIPLES FOR %s: %d" % (term, len(target_triples)) target_triples = filter(lambda s: s[-1] >= threshold, target_triples) print "\tAFTER FILTERING (f>=%f): %d" % (threshold, len(target_triples)) target_triples = filter(lambda tr: not self.is_light_triple(tr), target_triples) print "\tAFTER IGNORING LIGHT TRIPLES: %d" % len(target_triples) source_triples, source_triple_num =
self.find_triples_by_patterns(target_term_id, target_triples) print "\tFOUND SOURCE TRIPLES FOR %s: %d" % (term, source_triple_num) potential_sources = [] stops_ignored = 0 cnect_ignored = 0 for source_term_id, triples in source_triples.iteritems(): if source_term_id in self.stop_terms: stops_ignored += 1 continue if target_term_id in self.concept_net and source_term_id in self.concept_net[target_term_id]: cnect_ignored += 1 continue if
weiting-chen/manila
manila/scheduler/filters/capacity_filter.py
Python
apache-2.0
5,433
0
# Copyright (c) 2012 Intel # Copyright (c) 2012 OpenStack, LLC. # Copyright (c) 2015 EMC Corporation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math from oslo_log import log from manila.i18n import _LE from manila.i18n import _LW from manila.openstack.common.scheduler import filters LOG = log.getLogger(__name__) class CapacityFilter(filters.BaseHostFilter): """CapacityFilter filters based on share host's capacity utilization.""" def host_passes(self, host_state, filter_properties): """Return True if host has sufficient capacity.""" share_size = filter_properties.get('size') if host_state.free_capacity_gb is None: # Fail Safe LOG.error(_LE("Free capacity not set: " "share node info collection broken.")) return False free_space = host_state.free_capacity_gb total_space = host_state.total_capacity_gb reserved = float(host_state.reserved_percentage) / 100 if free_space in ('infinite', 'unknown'): # NOTE(zhiteng) for those back-ends cannot report actual # available capacity, we assume it is able to serve the # request. Even if it was not, the retry mechanism is # able to handle the failure by rescheduling return True elif total_space in ('infinite', 'unknown'): # NOTE(xyang): If total_space is 'infinite' or 'unknown' and # reserved is 0, we assume the back-ends can serve the request. # If total_space is 'infinite' or 'unknown' and reserved # is not 0, we cannot calculate the reserved space. # float(total_space) will throw an exception. total*reserved # also won't work. So the back-ends cannot serve the request. return reserved == 0 total = float(total_space) if total <= 0: LOG.warning(_LW("Insufficient free space for share creation. " "Total capacity is %(total).2f on host %(host)s."), {"total": total, "host": host_state.host}) return False # NOTE(xyang): Calculate how much free space is left after taking # into account the reserved space. free = math.floor(free_space - total * reserved) msg_args = {"host": host_state.host, "requested": share_size, "available": free} LOG.debug("Space information for share creation " "on host %(host)s (requested / avail): " "%(requested)s/%(available)s", msg_args) # NOTE(xyang): Only evaluate using max_over_subscription_ratio # if thin_provisioning_support is True. Check if the ratio of # provisioned capacity over total capacity would exceed # subscription ratio. # If max_over_subscription_ratio = 1, the provisioned_ratio # should still be limited by the max_over_subscription_ratio; # otherwise, it could result in infinite provisioning. if (host_state.thin_provisioning_support and host_state.max_over_subscription_ratio >= 1): provisioned_ratio = ((host_state.provisioned_capacity_gb + share_size) / total) if provisioned_ratio > host_state.max_over_subscription_ratio: LOG.warning(_LW( "Insufficient free space for thin provisioning. " "The ratio of provisioned capacity over total capacity " "%(provisioned_ratio).2f would exceed the maximum over " "subscription ratio %(oversub_ratio).2f on host " "%(host)s."), {"provisioned_ratio": provisioned_ratio, "oversub_ratio": host_state.max_over_subscription_ratio,
"host": host_state.host}) return False else: # NOTE(xyang): Adjust free_virtual calculation based on # free and max_over_subscription_ratio. adjusted_free_virtual = ( free * host_state.max_over_subscription_ratio) return adjusted_free_virtual >= share_size elif host_state.thin_provisioning_support: LOG.error(_LE("Invalid max_over_subscription_ratio: %(ratio)s. "
"Valid value should be >= 1."), {"ratio": host_state.max_over_subscription_ratio}) return False if free < share_size: LOG.warning(_LW("Insufficient free space for share creation " "on host %(host)s (requested / avail): " "%(requested)s/%(available)s"), msg_args) return False return True
catapult-project/catapult
telemetry/third_party/altgraph/altgraph/ObjectGraph.py
Python
bsd-3-clause
6,431
0.000622
""" altgraph.ObjectGraph - Graph of objects with an identifier ========================================================== A graph of objects that have a "graphident" attribute. graphide
nt is the key for the object in the graph """ from __future__ import print_function from __future__ import absolute_import from altgraph import GraphError from altgraph.Graph import Graph from altgraph.GraphUtil import filter_stack from six.moves import map class ObjectGraph(object): """ A graph of objects that have
a "graphident" attribute. graphident is the key for the object in the graph """ def __init__(self, graph=None, debug=0): if graph is None: graph = Graph() self.graphident = self self.graph = graph self.debug = debug self.indent = 0 graph.add_node(self, None) def __repr__(self): return '<%s>' % (type(self).__name__,) def flatten(self, condition=None, start=None): """ Iterate over the subgraph that is entirely reachable by condition starting from the given start node or the ObjectGraph root """ if start is None: start = self start = self.getRawIdent(start) return self.graph.iterdata(start=start, condition=condition) def nodes(self): for ident in self.graph: node = self.graph.node_data(ident) if node is not None: yield self.graph.node_data(ident) def get_edges(self, node): start = self.getRawIdent(node) _, _, outraw, incraw = self.graph.describe_node(start) def iter_edges(lst, n): seen = set() for tpl in (self.graph.describe_edge(e) for e in lst): ident = tpl[n] if ident not in seen: yield self.findNode(ident) seen.add(ident) return iter_edges(outraw, 3), iter_edges(incraw, 2) def edgeData(self, fromNode, toNode): start = self.getRawIdent(fromNode) stop = self.getRawIdent(toNode) edge = self.graph.edge_by_node(start, stop) return self.graph.edge_data(edge) def updateEdgeData(self, fromNode, toNode, edgeData): start = self.getRawIdent(fromNode) stop = self.getRawIdent(toNode) edge = self.graph.edge_by_node(start, stop) self.graph.update_edge_data(edge, edgeData) def filterStack(self, filters): """ Filter the ObjectGraph in-place by removing all edges to nodes that do not match every filter in the given filter list Returns a tuple containing the number of: (nodes_visited, nodes_removed, nodes_orphaned) """ visited, removes, orphans = filter_stack(self.graph, self, filters) for last_good, tail in orphans: self.graph.add_edge(last_good, tail, edge_data='orphan') for node in removes: self.graph.hide_node(node) return len(visited)-1, len(removes), len(orphans) def removeNode(self, node): """ Remove the given node from the graph if it exists """ ident = self.getIdent(node) if ident is not None: self.graph.hide_node(ident) def removeReference(self, fromnode, tonode): """ Remove all edges from fromnode to tonode """ if fromnode is None: fromnode = self fromident = self.getIdent(fromnode) toident = self.getIdent(tonode) if fromident is not None and toident is not None: while True: edge = self.graph.edge_by_node(fromident, toident) if edge is None: break self.graph.hide_edge(edge) def getIdent(self, node): """ Get the graph identifier for a node """ ident = self.getRawIdent(node) if ident is not None: return ident node = self.findNode(node) if node is None: return None return node.graphident def getRawIdent(self, node): """ Get the identifier for a node object """ if node is self: return node ident = getattr(node, 'graphident', None) return ident def __contains__(self, node): return self.findNode(node) is not None def findNode(self, node): """ Find the node on the graph """ ident = self.getRawIdent(node) if ident is None: ident = node try: return self.graph.node_data(ident) except KeyError: return None def addNode(self, node): """ Add a node to the graph referenced by the root """ self.msg(4, "addNode", node) try: self.graph.restore_node(node.graphident) except GraphError: self.graph.add_node(node.graphident, node) def createReference(self, fromnode, tonode, edge_data=None): """ Create a reference from fromnode to tonode """ if fromnode is None: fromnode = self fromident, toident = self.getIdent(fromnode), self.getIdent(tonode) if fromident is None or toident is None: return self.msg(4, "createReference", fromnode, tonode, edge_data) self.graph.add_edge(fromident, toident, edge_data=edge_data) def createNode(self, cls, name, *args, **kw): """ Add a node of type cls to the graph if it does not already exist by the given name """ m = self.findNode(name) if m is None: m = cls(name, *args, **kw) self.addNode(m) return m def msg(self, level, s, *args): """ Print a debug message with the given level """ if s and level <= self.debug: print("%s%s %s" % (" " * self.indent, s, ' '.join(map(repr, args)))) def msgin(self, level, s, *args): """ Print a debug message and indent """ if level <= self.debug: self.msg(level, s, *args) self.indent = self.indent + 1 def msgout(self, level, s, *args): """ Dedent and print a debug message """ if level <= self.debug: self.indent = self.indent - 1 self.msg(level, s, *args)
harisbal/pandas
pandas/tests/extension/base/__init__.py
Python
bsd-3-clause
2,015
0
"""Base test suite for extension arrays. These tests are intended for third-party libraries to subclass to validate that their extension arrays and dtypes satisfy the interface. Moving or renaming the tests should not be done lightly. Libraries are expected to implement a few pytest fixtures to provide data for the tests. The fixtures may be located in either * The same module as your test class. * A ``conftest.py`` in the same directory as your test class. The full list of fixtures may be found in the ``conftest.py`` next to this file. .. code-block:: python import pytest from pandas.tests.extension.base import BaseDtypeTests @pytest.fixture def dtype(): return MyDtype() class TestMyDtype(BaseDtypeTests): pa
ss Your class ``TestDtype`` will inherit all the tests defined on ``BaseDtypeTests``. pytest's fixture discover will supply your ``dtype`` wherever the test requires it. You're free to implement additional tests. All the tests in these modules use ``self.assert_frame_equal`` or ``self.assert_series_equal`` for dataframe or series comparisons. By default, they use the usual ``pandas.testing.assert_fram
e_equal`` and ``pandas.testing.assert_series_equal``. You can override the checks used by defining the staticmethods ``assert_frame_equal`` and ``assert_series_equal`` on your base test class. """ from .casting import BaseCastingTests # noqa from .constructors import BaseConstructorsTests # noqa from .dtype import BaseDtypeTests # noqa from .getitem import BaseGetitemTests # noqa from .groupby import BaseGroupbyTests # noqa from .interface import BaseInterfaceTests # noqa from .methods import BaseMethodsTests # noqa from .ops import BaseArithmeticOpsTests, BaseComparisonOpsTests, BaseOpsUtil # noqa from .reduce import BaseNoReduceTests, BaseNumericReduceTests, BaseBooleanReduceTests # noqa from .missing import BaseMissingTests # noqa from .reshaping import BaseReshapingTests # noqa from .setitem import BaseSetitemTests # noqa
shingonoide/odoo_ezdoo
addons/http_session_redis/__init__.py
Python
agpl-3.0
1,038
0.000963
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # This module copyright (C) 2015
BarraDev Consulting (<http://www.barradev.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WI
THOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from . import http_session_redis
394954369/horizon
openstack_dashboard/api/cinder.py
Python
apache-2.0
13,116
0
# Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 OpenStack Foundation # Copyright 2012 Nebula, Inc. # Copyright (c) 2012 X.commerce, a business unit of eBay Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations #
under the License. from __future__ import absolute_import import logging from django.conf import settings from
django.utils.translation import ugettext_lazy as _ from cinderclient.v1.contrib import list_extensions as cinder_list_extensions from horizon import exceptions from horizon.utils.memoized import memoized # noqa from openstack_dashboard.api import base from openstack_dashboard.api import nova LOG = logging.getLogger(__name__) # API static values VOLUME_STATE_AVAILABLE = "available" DEFAULT_QUOTA_NAME = 'default' VERSIONS = base.APIVersionManager("volume", preferred_version=1) try: from cinderclient.v1 import client as cinder_client_v1 VERSIONS.load_supported_version(1, {"client": cinder_client_v1, "version": 1}) except ImportError: pass try: from cinderclient.v2 import client as cinder_client_v2 VERSIONS.load_supported_version(2, {"client": cinder_client_v2, "version": 2}) except ImportError: pass class BaseCinderAPIResourceWrapper(base.APIResourceWrapper): @property def name(self): # If a volume doesn't have a name, use its id. return (getattr(self._apiresource, 'name', None) or getattr(self._apiresource, 'display_name', None) or getattr(self._apiresource, 'id', None)) @property def description(self): return (getattr(self._apiresource, 'description', None) or getattr(self._apiresource, 'display_description', None)) class Volume(BaseCinderAPIResourceWrapper): _attrs = ['id', 'name', 'description', 'size', 'status', 'created_at', 'volume_type', 'availability_zone', 'imageRef', 'bootable', 'snapshot_id', 'source_volid', 'attachments', 'tenant_name', 'os-vol-host-attr:host', 'os-vol-tenant-attr:tenant_id', 'metadata', 'volume_image_metadata', 'encrypted'] @property def is_bootable(self): return self.bootable == 'true' class VolumeSnapshot(BaseCinderAPIResourceWrapper): _attrs = ['id', 'name', 'description', 'size', 'status', 'created_at', 'volume_id', 'os-extended-snapshot-attributes:project_id'] class VolumeBackup(BaseCinderAPIResourceWrapper): _attrs = ['id', 'name', 'description', 'container', 'size', 'status', 'created_at', 'volume_id', 'availability_zone'] _volume = None @property def volume(self): return self._volume @volume.setter def volume(self, value): self._volume = value class VolTypeExtraSpec(object): def __init__(self, type_id, key, val): self.type_id = type_id self.id = key self.key = key self.value = val def cinderclient(request): api_version = VERSIONS.get_active_version() insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False) cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None) cinder_url = "" try: # The cinder client assumes that the v2 endpoint type will be # 'volumev2'. However it also allows 'volume' type as a # fallback if the requested version is 2 and there is no # 'volumev2' endpoint. if api_version['version'] == 2: try: cinder_url = base.url_for(request, 'volumev2') except exceptions.ServiceCatalogException: LOG.warning("Cinder v2 requested but no 'volumev2' service " "type available in Keystone catalog. Falling back " "to 'volume'.") if cinder_url == "": cinder_url = base.url_for(request, 'volume') except exceptions.ServiceCatalogException: LOG.debug('no volume service configured.') raise LOG.debug('cinderclient connection created using token "%s" and url "%s"' % (request.user.token.id, cinder_url)) c = api_version['client'].Client(request.user.username, request.user.token.id, project_id=request.user.tenant_id, auth_url=cinder_url, insecure=insecure, cacert=cacert, http_log_debug=settings.DEBUG) c.client.auth_token = request.user.token.id c.client.management_url = cinder_url return c def _replace_v2_parameters(data): if VERSIONS.active < 2: data['display_name'] = data['name'] data['display_description'] = data['description'] del data['name'] del data['description'] return data def volume_list(request, search_opts=None): """To see all volumes in the cloud as an admin you can pass in a special search option: {'all_tenants': 1} """ c_client = cinderclient(request) if c_client is None: return [] return [Volume(v) for v in c_client.volumes.list(search_opts=search_opts)] def volume_get(request, volume_id): volume_data = cinderclient(request).volumes.get(volume_id) for attachment in volume_data.attachments: if "server_id" in attachment: instance = nova.server_get(request, attachment['server_id']) attachment['instance_name'] = instance.name else: # Nova volume can occasionally send back error'd attachments # the lack a server_id property; to work around that we'll # give the attached instance a generic name. attachment['instance_name'] = _("Unknown instance") return Volume(volume_data) def volume_create(request, size, name, description, volume_type, snapshot_id=None, metadata=None, image_id=None, availability_zone=None, source_volid=None): data = {'name': name, 'description': description, 'volume_type': volume_type, 'snapshot_id': snapshot_id, 'metadata': metadata, 'imageRef': image_id, 'availability_zone': availability_zone, 'source_volid': source_volid} data = _replace_v2_parameters(data) volume = cinderclient(request).volumes.create(size, **data) return Volume(volume) def volume_extend(request, volume_id, new_size): return cinderclient(request).volumes.extend(volume_id, new_size) def volume_delete(request, volume_id): return cinderclient(request).volumes.delete(volume_id) def volume_update(request, volume_id, name, description): vol_data = {'name': name, 'description': description} vol_data = _replace_v2_parameters(vol_data) return cinderclient(request).volumes.update(volume_id, **vol_data) def volume_reset_state(request, volume_id, state): return cinderclient(request).volumes.reset_state(volume_id, state) def volume_snapshot_get(request, snapshot_id): snapshot = cinderclient(request).volume_snapshots.get(snapshot_id) return VolumeSnapshot(snapshot) def volume_snapshot_list(request, search_opts=None): c_client = cinderclient(request) if c_client is None: return [] return [VolumeSnapshot(s) for s in c_client.volume_snapshots.list( search_
derekjchow/models
research/object_detection/builders/model_builder.py
Python
apache-2.0
23,976
0.005172
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A function to build a DetectionModel from configuration.""" import functools from object_detection.builders import anchor_generator_builder from object_detection.builders import box_coder_builder from object_detection.builders import box_predictor_builder from object_detection.builders import hyperparams_builder from object_detection.builders import image_resizer_builder from object_detection.builders import losses_builder from object_detection.builders import matcher_builder from object_detection.builders import post_processing_builder from object_detection.builders import region_similarity_calculator_builder as sim_calc from object_detection.core import balanced_positive_negative_sampler as sampler from object_detection.core import post_processing from object_detection.core import target_assigner from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.meta_architectures import rfcn_meta_arch from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import faster_rcnn_inception_resnet_v2_feature_extractor as frcnn_inc_res from object_detection.models import faster_rcnn_inception_v2_feature_extractor as frcnn_inc_v2 from object_detection.models import faster_rcnn_nas_feature_extractor as frcnn_nas from object_detection.models import faster_rcnn_pnas_feature_extractor as frcnn_pnas from object_detection.models import faster_rcnn_resnet_v1_feature_extractor as frcnn_resnet_v1 from object_detection.models import ssd_resnet_v1_fpn_feature_extractor as ssd_resnet_v1_fpn from object_detection.models import ssd_resnet_v1_ppn_feature_extractor as ssd_resnet_v1_ppn from object_detection.models.embedded_ssd_mobilenet_v1_feature_extractor import EmbeddedSSDMobileNetV1FeatureExtractor from object_detection.models.ssd_inception_v2_feature_extractor import SSDInceptionV2FeatureExtractor from object_detection.models.ssd_inception_v3_feature_extractor import SSDInceptionV3FeatureExtractor from object_detection.models.ssd_mobilenet_v1_feature_extractor import SSDMobileNetV1FeatureExtractor from object_detection.models.ssd_mobilenet_v1_fpn_feature_extractor import SSDMobileNetV1FpnFeatureExtractor from object_detection.models.ssd_mobilenet_v1_keras_feature_extractor import SSDMobileNetV1KerasFeatureExtractor from object_detection.models.ssd_mobilenet_v1_ppn_feature_extractor import SSDMobileNetV1PpnFeatureExtractor from object_detection.models.ssd_mobilenet_v2_feature_extractor import SSDMobileNetV2FeatureExtractor from object_detection.models.ssd_mobilenet_v2_fpn_feature_extractor import SSDMobileNetV2FpnFeatureExtractor from object_detection.models.ssd_mobilenet_v2_keras_feature_extractor import SSDMobileNetV2KerasFeatureExtractor from object_detection.models.ssd_pnasnet_feature_extractor import SSDPNASNetFeatureExtractor from object_detection.predictors import rfcn_box_predictor from object_detection.predictors.heads import mask_head from object_detection.protos import model_pb2 from object_detection.utils import ops # A map of names to SSD feature extractors. SSD_FEATURE_EXTRACTOR_CLASS_MAP = { 'ssd_inception_v2': SSDInceptionV2FeatureExtractor, 'ssd_inception_v3': SSDInceptionV3FeatureExtractor, 'ssd_mobilenet_v1': SSDMobileNetV1FeatureExtractor, 'ssd_mobilenet_v1_fpn': SSDMobileNetV1FpnFeatureExtractor, 'ssd_mobilenet_v1_ppn': SSDMobileNetV1PpnFeatureExtractor, 'ssd_mobilenet_v2': SSDMobileNetV2FeatureExtractor, 'ssd_mobilenet_v2_fpn': SSDMobileNetV2FpnFeatureExtractor, 'ssd_resnet50_v1_fpn': ssd_resnet_v1_fpn.SSDResnet50V1FpnFeatureExtractor, 'ssd_resnet101_v1_fpn': ssd_resnet_v1_fpn.SSDResnet101V1FpnFeatureExtractor, 'ssd_resnet152_v1_fpn': ssd_resnet_v1_fpn.SSDResnet152V1FpnFeatureExtractor, 'ssd_resnet50_v1_ppn': ssd_resnet_v1_ppn.SSDResnet50V1PpnFeatureExtractor, 'ssd_resnet101_v1_ppn': ssd_resnet_v1_ppn.SSDResnet101V1PpnFeatureExtractor, 'ssd_resnet152_v1_ppn': ssd_resnet_v1_ppn.SSDResnet152V1PpnFeatureExtractor, 'embedded_ssd_mobilenet_v1': EmbeddedSSDMobileNetV1FeatureExtractor, 'ssd_pnasnet': SSDPNASNetFeatureExtractor, } SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP = { 'ssd_mobilenet_v1_keras': SSDMobileNetV1KerasFeatureExtractor, 'ssd_mobilenet_v2_keras': SSDMobileNetV2KerasFeatureExtractor } # A map of names to Faster R-CNN feature extractors. FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP = { 'faster_rcnn_nas': frcnn_nas.FasterRCNNNASFeatureExtractor, 'faster_rcnn_pnas': frcnn_pnas.FasterRCNNPNASFeatureExtractor, 'faster_rcnn_inception_resnet_v2': frcnn_inc_res.FasterRCNNInceptionResnetV2FeatureExtractor, 'faster_rcnn_inception_v2': frcnn_inc_v2.FasterRCNNInceptionV2FeatureExtractor, 'faster_rcnn_resnet50': frcnn_resnet_v1.FasterRCNNResnet50FeatureExtractor, 'fast
er_rcnn_resnet101': frcnn_resnet_v1.FasterRCNNResnet101FeatureExtractor, 'faster_rcnn_resnet152': frcnn_resnet_v1.FasterRCNNResnet152FeatureExtractor, } def build(model_config, is_training, add_summaries=True): """Builds a DetectionModel based on the model config. Args: model_config: A model.
proto object containing the config for the desired DetectionModel. is_training: True if this model is being built for training purposes. add_summaries: Whether to add tensorflow summaries in the model graph. Returns: DetectionModel based on the config. Raises: ValueError: On invalid meta architecture or model. """ if not isinstance(model_config, model_pb2.DetectionModel): raise ValueError('model_config not of type model_pb2.DetectionModel.') meta_architecture = model_config.WhichOneof('model') if meta_architecture == 'ssd': return _build_ssd_model(model_config.ssd, is_training, add_summaries) if meta_architecture == 'faster_rcnn': return _build_faster_rcnn_model(model_config.faster_rcnn, is_training, add_summaries) raise ValueError('Unknown meta architecture: {}'.format(meta_architecture)) def _build_ssd_feature_extractor(feature_extractor_config, is_training, freeze_batchnorm, reuse_weights=None): """Builds a ssd_meta_arch.SSDFeatureExtractor based on config. Args: feature_extractor_config: A SSDFeatureExtractor proto config from ssd.proto. is_training: True if this feature extractor is being built for training. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. reuse_weights: if the feature extractor should reuse weights. Returns: ssd_meta_arch.SSDFeatureExtractor based on config. Raises: ValueError: On invalid feature extractor type. """ feature_type = feature_extractor_config.type is_keras_extractor = feature_type in SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP depth_multiplier = feature_extractor_config.depth_multiplier min_depth = feature_extractor_config.min_depth pad_to_multiple = feature_extractor_config.pad_to_multiple use_explicit_padding = feature_extractor_config.use_explicit_padding use_depthwise = feature_extractor_config.use_depthwise if is_keras_extractor: conv_hyperparams = hyperparams_builder.KerasLayerHyperparams( feature_extractor_config.conv_hyperparam
rnirmal/openstack-dashboard
django-openstack/django_openstack/middleware/keystone.py
Python
apache-2.0
2,747
0.000364
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2011 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicab
le law or agreed to in writing, software # distributed under the License is d
istributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.contrib import messages from django import shortcuts import openstackx import openstack class User(object): def __init__(self, token=None, user=None, tenant_id=None, admin=None, service_catalog=None, tenant_name=None): self.token = token self.username = user self.tenant_id = tenant_id self.tenant_name = tenant_name self.admin = admin self.service_catalog = service_catalog def is_authenticated(self): # TODO: deal with token expiration return self.token def is_admin(self): return self.admin def get_user_from_request(request): if 'user' not in request.session: return User() return User(token=request.session['token'], user=request.session['user'], tenant_id=request.session['tenant_id'], tenant_name=request.session['tenant'], admin=request.session['admin'], service_catalog=request.session['serviceCatalog']) class LazyUser(object): def __get__(self, request, obj_type=None): if not hasattr(request, '_cached_user'): request._cached_user = get_user_from_request(request) return request._cached_user class AuthenticationMiddleware(object): def process_request(self, request): request.__class__.user = LazyUser() def process_exception(self, request, exception): if type(exception) in [openstack.compute.exceptions.Forbidden, openstackx.api.exceptions.Forbidden]: # flush other error messages, which are collateral damage # when our token expires for message in messages.get_messages(request): pass messages.error(request, 'Your token has expired.\ Please log in again') return shortcuts.redirect('/auth/logout')
vnsofthe/odoo-dev
addons/ebiz_cn/ebiz.py
Python
agpl-3.0
45,688
0.013284
# -*- encoding: utf-8 -*- import time import logging from openerp import tools from dateutil.relativedelta import relativedelta from datetime import datetime, timedelta from openerp.tools.translate import _ from openerp.osv import fields,osv import json import hashlib from openerp.addons.ebiz_cn.top import setDefaultAppInfo from openerp.addons.ebiz_cn.top.api.
rest import ItemsOnsaleGe
tRequest from openerp.addons.ebiz_cn.top.api.rest import TradesSoldIncrementGetRequest from openerp.addons.ebiz_cn.top.api.rest import ItemSkusGetRequest from openerp.addons.ebiz_cn.top.api.rest import TradesSoldGetRequest from openerp.addons.ebiz_cn.top.api.rest import TradeGetRequest from openerp.addons.ebiz_cn.top.api.rest import TradeFullinfoGetRequest from openerp.addons.ebiz_cn.top.api.rest import AlipayUserAccountreportGetRequest from openerp.addons.ebiz_cn.top.api.rest import ItemQuantityUpdateRequest from openerp.addons.ebiz_cn.top.api.rest import LogisticsOfflineSendRequest _logger = logging.getLogger(__name__) class ebiz_shop(osv.osv): _name = 'ebiz.shop' _description = u"电商店铺" def _ebiz_platform(self, cr, uid, context=None): return self.get_platforms(cr, uid, context = context) _columns = { 'name': fields.char(u'店铺名称', size=16, required=True), 'code': fields.char(u'店铺前缀', size=8, required=True, help = u"系统会自动给该店铺的订单编号、客户昵称加上此前缀。通常同一个平台的店铺,前缀设置成一样"), 'platform': fields.selection(_ebiz_platform, u'电商平台', required=True, help = u"淘宝、京东等电商平台" ), 'categ_id': fields.many2one('product.category', string=u"商品默认分类", required=True), 'warehouse_id': fields.many2one('stock.warehouse', string=u"店铺仓", required=True), 'journal_id': fields.many2one('account.journal', string=u"默认销售账簿", required=True), 'post_product_id': fields.many2one('product.product', string=u"邮费", required=True), 'coupon_product_id': fields.many2one('product.product', string=u"优惠减款", required=True), 'gift_product_id': fields.many2one('product.product', string=u"赠品", ), 'appkey': fields.char(u'App Key', ), 'appsecret': fields.char(u'App Secret', ), 'sessionkey': fields.char(u'Session Key', ), 'apiurl': fields.char(u'API URL', ), 'authurl': fields.char(u'Auth URL', ), 'tokenurl': fields.char(u'Token URL', ), } def get_platforms(self, cr, uid, context=None): platforms = [('tb', u'淘宝天猫'), ('sb', u'淘宝沙箱'),] return platforms def search_product(self, cr, uid, ids, product_name = None, start_modified = None, end_modified = None, context=None): """ 1) 按商品名称,商品修改时间搜索店铺商品 2) start_modified、end_modified 都是UTC时间,需要加上8小时传给电商平台 """ shop_id = self.browse(cr, uid, ids[0], context= context) setDefaultAppInfo(shop_id.appkey, shop_id.appsecret) req = ItemsOnsaleGetRequest(shop_id.apiurl, 80) req.fields="approve_status,num_iid,title,nick, outer_id, modified" if product_name: req.q = product_name if start_modified: start_modified = (datetime.strptime(str(start_modified),'%Y-%m-%d %H:%M:%S',) + timedelta(hours=8)).strftime('%Y-%m-%d %H:%M:%S') req.start_modified = start_modified if end_modified: end_modified = (datetime.strptime(str(end_modified),'%Y-%m-%d %H:%M:%S',) + timedelta(hours=8)).strftime('%Y-%m-%d %H:%M:%S') req.end_modified = end_modified req.page_no = 1 req.page_size = 100 total_get = 0 total_results = 100 res = [] while total_get < total_results: resp= req.getResponse(shop_id.sessionkey) total_results = resp.get('items_onsale_get_response').get('total_results') if total_results > 0: res += resp.get('items_onsale_get_response').get('items').get('item') total_get += req.page_size req.page_no = req.page_no + 1 # # 时间需要减去8小时 for r in res: r['modified'] = (datetime.strptime(r['modified'],'%Y-%m-%d %H:%M:%S',) - timedelta(hours=8)).strftime('%Y-%m-%d %H:%M:%S') return res def create_product(self, cr, uid, product_vals, context = None): """ 1) 创建product.template 2) 如果商品有SKU,创建product.attribute, product.attribute.value,product.attribute.line 3) 创建product.product 4) 电商商品、SKU和ERP product.template、product.product的对应关系: 如果没有SKU,则一个商品对应一个product.template、一个product.product,其中商品数字编码填入 product.template的num_iid,商家外部编码填入product.product的default_code,如果没有商家外部编码,则将num_iid填入default_code 如果有SKU,则一个商品对应一个product.template,其中商品数字编码填入product.template的num_iid。每个SKU对应一个product.product,SKU的商家外部编码填入product.product的default_code,SKU的sku_id填入product.product的sku_id """ def get_sku_properties(properties_name ): """SKU属性值格式 20000:3275069:品牌:盈讯;1753146:3485013:型号:F908;-1234:-5678:自定义属性1:属性值1 返回结果 {'品牌':盈讯, '型号':F908, '自定义属性1':属性值1} """ res = {} try: for vals in properties_name.split(';'): v = vals.split(':') res.update({v[2]: v[3] } ) except Exception, e: pass return res product_res = [] #创建Product Template vals_template = { 'name': product_vals['name'], 'num_iid': str(product_vals['num_iid']), 'type': product_vals['type'], 'categ_id': product_vals['categ_id'], 'cost_method': 'real', 'standard_price': 1.0, } skus = product_vals.get('sku', False) if not skus: vals_template.update({'default_code': product_vals['default_code'] } ) prt_ids = self.pool.get('product.product').create(cr, uid, vals_template, context = context) return [prt_ids] template_ids = self.pool.get('product.template').search(cr, uid, [('num_iid', '=', str(product_vals['num_iid']) )], context=context) if not template_ids: template_ids = self.pool.get('product.template').create(cr, uid, vals_template, context = context) else: template_ids = template_ids[0] #处理商品SKU attr_lines = {} for sku in skus: #创建 product.product prt_vals = { 'default_code': sku['outer_id'], 'sku_id': str(sku['sku_id']), 'product_tmpl_id': template_ids, 'attribute_value_ids': [], } #创建属性和属性值 product.attribute, product.attribute.value, #处理product.template上字段attribute_line_ids,对象product.attribute.line #处理product.product上字段attribute_value_ids properties = get_sku_properties(sku['properties_name'] ) for k in properties: attr_ids = self.pool.get('product.attribute').search(cr, uid, [('name', '=', k)], context = context) if attr_ids: attr_ids = attr_ids[0] else: attr_ids = self.pool.get('product.attribute').create(cr, uid, {'name': k }, context = context) attr_val_ids = self.pool.get('product.attribute.value').search(cr, uid, [('name', '=', properties[k]), ('attribute_id', '=', attr_ids)], context = context) if attr_val_ids: attr_val_ids = attr_val_ids[0] else: attr_val_ids = self.pool.get('product.attribute.value').create(cr, uid, {'name': properties[k], 'attribute_id': attr_ids }, context = context) prt_vals['attribute_value_ids'].append( (4, attr_val_ids) ) if attr_ids not in attr_lines: attr_lines[attr_ids] = {attr_val_ids: True} else: attr_lines[attr_ids][attr_val_ids] = True #创建product.product prt_domain = [] if prt_vals['default_code']: prt_domain = [ ('default_code', '=', prt_vals['default_code']) ]
pystorm/pystorm
pystorm/serializers/json_serializer.py
Python
apache-2.0
3,417
0.000585
"""JSON implementation of pystorm serializer""" from __future__ import absolute_import, print_function, unicode_literals import io import logging import simplejson as json from six import PY2 from ..exceptions import StormWentAwayError from .serializer import Serializer log = logging.getLogger(__name__) class JSONSerializer(Serializer): def __init__(self, input_stream, output_stream, reader_lock, writer_lock): super(JSONSerializer, self).__init__(
input_stream, output_stream, reader_lock, writer_lock ) self.input_stream = self._wrap_stream(input_stream) self.output_stream = self._wrap_stream(output_stream) @staticmethod def _wrap_stream(stream): """Returns a TextIOWrapper around the given stream that handles UTF-8 encoding/decoding. """ if hasattr(stream, "bu
ffer"): return io.TextIOWrapper(stream.buffer, encoding="utf-8") elif hasattr(stream, "readable"): return io.TextIOWrapper(stream, encoding="utf-8") # Python 2.x stdin and stdout are just files else: return io.open(stream.fileno(), mode=stream.mode, encoding="utf-8") def read_message(self): """The Storm multilang protocol consists of JSON messages followed by a newline and "end\n". All of Storm's messages (for either bolts or spouts) should be of the form:: '<command or task_id form prior emit>\\nend\\n' Command example, an incoming Tuple to a bolt:: '{ "id": "-6955786537413359385", "comp": "1", "stream": "1", "task": 9, "tuple": ["snow white and the seven dwarfs", "field2", 3]}\\nend\\n' Command example for a spout to emit its next Tuple:: '{"command": "next"}\\nend\\n' Example, the task IDs a prior emit was sent to:: '[12, 22, 24]\\nend\\n' The edge case of where we read ``''`` from ``input_stream`` indicating EOF, usually means that communication with the supervisor has been severed. """ msg = "" num_blank_lines = 0 while True: # readline will return trailing \n so that output is unambigious, we # should only have line == '' if we're at EOF with self._reader_lock: line = self.input_stream.readline() if line == "end\n": break elif line == "": raise StormWentAwayError() elif line == "\n": num_blank_lines += 1 if num_blank_lines % 1000 == 0: log.warn( "While trying to read a command or pending task " "ID, Storm has instead sent %s '\\n' messages.", num_blank_lines, ) continue msg = "{}{}\n".format(msg, line[0:-1]) try: return json.loads(msg) except Exception: log.error("JSON decode error for message: %r", msg, exc_info=True) raise def serialize_dict(self, msg_dict): """Serialize to JSON a message dictionary.""" serialized = json.dumps(msg_dict, namedtuple_as_object=False) if PY2: serialized = serialized.decode("utf-8") serialized = "{}\nend\n".format(serialized) return serialized
CYBAI/servo
tests/wpt/web-platform-tests/tools/wpt/tests/test_run.py
Python
mpl-2.0
1,969
0
import tempfile import shutil import sys from unittest import mock import pytest from tools.wpt import run from tools import localpaths # noqa: F401 from wptrunner.browsers import product_list @pytest.fixture(scope="module") def venv(): from tools.wpt import virtualenv class Virtualenv(virtualenv.Virtualenv): def __init__(self): self.path = tempfile.mkdtemp() self.skip_virtualenv_setup = False def create(self): return def activate(self): return def start(self): return def install(self, *requirements): return def install_requirements(self, requirements_path): return venv = Virtualenv() yield venv shutil.rmtree(venv.path) @pytest.fixture(scope="module") def logger(): run.setup_logging({}) @pytest.mark.parametrize("platform", ["Windows", "Linux", "Darwin"]) d
ef test_check_environ_fail(platform): m_open = mock.mock_open(read_data=b"") with mock.patch.object(run, "open", m_open): with mock.patch.object(run.platform, "uname", return_value=(platform, "", "", "", "", "")):
with pytest.raises(run.WptrunError) as excinfo: run.check_environ("foo") assert "wpt make-hosts-file" in str(excinfo.value) @pytest.mark.parametrize("product", product_list) def test_setup_wptrunner(venv, logger, product): if product == "firefox_android": pytest.skip("Android emulator doesn't work on docker") parser = run.create_parser() kwargs = vars(parser.parse_args(["--channel=nightly", product])) kwargs["prompt"] = False # Hack to get a real existing path kwargs["binary"] = sys.argv[0] kwargs["webdriver_binary"] = sys.argv[0] if kwargs["product"] == "sauce": kwargs["sauce_browser"] = "firefox" kwargs["sauce_version"] = "63" run.setup_wptrunner(venv, **kwargs)
walkover/auto-tracking-cctv-gateway
gateway/firebase/fcm.py
Python
mit
1,072
0.000933
import logging import sqlite3 from pyfcm import FCMNotification def insert_token(token): try: con = sqlite3.connect('fcm.db') cur = con.cursor() cur.execute('CREATE TABLE IF NOT EXISTS tokens(token TEXT)') cur.execute('INSERT INTO tokens VALUES (?)', (token, )) con.commit() finally: if cur: cur.close() if con: con.close() def notify_all(message_title=None, message_body=None): con = sqlite3.connect('fcm.db') con.row_factory = lambda cursor, row: row[0]
cur = con.cursor() cur.execute('CREATE TABLE IF NOT EXISTS tokens(token TEXT)') cur.execute('SELECT * FROM tokens') registration_ids = [row for row in cur.fetchall()] if len(registration_ids) > 0: noti = FCMNotification('API-KEY') result = noti.notify_multiple_devices(registration_ids=registration_ids,
message_title=message_title, message_body=message_body) return result
emgirardin/compassion-modules
sponsorship_tracking/models/__init__.py
Python
agpl-3.0
501
0
# -*- enco
ding: utf-8 -*- ############################################################################## # # Copyright (C) 2015 Compassion CH (http://www.compassion.ch) # Releasing children from poverty in Jesus' name # @author: David Coninckx <david@coninckx.com> # # The licence is in the file __openerp__.py # ############################################################################## from . import contracts from . import project_compassion from . import ir_ui_me
nu
MTG/dunya
andalusian/api_urls.py
Python
agpl-3.0
3,931
0.008395
# Copyright 2013,2014 Music Technology Group - Universitat Pompeu Fabra # # This file is part of Dunya # # Dunya is free software: you can redistribute it and/or modify it under the # terms of the GNU Affero General Public License as published by the Free Software # Foundation (FSF), either version 3 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program. If not, see http://www.gnu.org/licenses/ from django.conf.urls import url from rest_framework.urlpatterns import format_suffix_patterns import andalusian.api mbid_match = r'(?P<mbid>[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})' uuid_match = r'(?P<uuid>[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})' urlpatterns = [ url(r'^orchestra$', andalusian.api.OrchestraList.as_view(), name='api-andalusian-raaga-list'), url(r'^orchestra/%s$' % mbid_match, andalusian.api.OrchestraDetail.as_view(), name='api-andalusian-raaga-detail'), url(r'^artist$', andalusian.api.ArtistList.as_view(), name='api-andalusian-taala-list'), url(r'^artist/%s$' % mbid_match, andalusian.api.ArtistDetail.as_view(), name='api-andalusian-taala-detail'), url(r'^album$', andalusian.api.AlbumList.as_view(), name='api-andalusian-instrument-list'), url(r'^album/%s$' % mbid_match, andalusian.api.AlbumDetail.as_view(), name='api-andalusian-instrument-detail'), url(r'^work$', andalusian.api.WorkList.as_view(), name='api-andalusian-work-list'), url(r'^work/%s$' % mbid_match, andalusian.api.WorkDetail.as_view(), name='api-andalusian-work-detail'), url(r'^genre$', andalusian.api.GenreList.as_view(), name='api-andalusian-genre-list'), url(r'^genre/(?P<pk>\d+)$', andalusian.api.GenreDetail.as_view(), name='api-andalusian-genre-detail'), url(r'^recording$', andalusian.api.RecordingList.as_view(), name='api-andalusian-recording-list'), url(r'^recording/%s$' % mbid_match, andalusian.api.RecordingDetail.as_view(), name='api-andalusian-recording-detail'), url(r'^recording/%s/lyric$' % mbid_match, andalusian.api.LyricDetail.as_view(), name='api-andalusian-lyric-detail'), url(r'^instrument$', andalusian.api.InstrumentList.as_view(), name='api-andalusian-instrument-list'), url(r'^instrument/%s$' % mbid_match, andalusian.api.InstrumentDetail.as_view(), name='api-andalusian-instrument-detail'), url(r'^tab$', andalusian.api.TabList.as_view(), name='api-andalusian-tab-list'), url(r'^tab/%s$' % uuid_match, andalusian.api.TabDetail.as_view(), name='api-andalusian-tab-detail'), url(r'^mizan$', andalusian.api.MizanList.as_view(), name='api-andalusian-mizan-list'), url(r'^mizan/%s$' % uuid_match, andalusian.api.MizanDetail.as_view(), name='api-andalusian-mizan-detail'), url(r'^nawba$', andalusian.api.NawbaList.as_view(), name='api-andalu
sian-nawba-list'), url(r'^nawba/%s$' % uuid_match, andalusian.api.NawbaDetail.as_view(), name='api-andalusian-nawba-detail'), url(r
'^form$', andalusian.api.FormList.as_view(), name='api-andalusian-form-list'), url(r'^form/%s$' % uuid_match, andalusian.api.FormDetail.as_view(), name='api-andalusian-form-detail'), url(r'^sanaa$', andalusian.api.SanaaList.as_view(), name='api-andalusian-sanaa-list'), url(r'^sanaa/(?P<pk>\d+)$', andalusian.api.SanaaDetail.as_view(), name='api-andalusian-sanaa-detail'), url(r'^poem$', andalusian.api.PoemList.as_view(), name='api-andalusian-poem-list'), url(r'^poem/(?P<pk>\d+)$', andalusian.api.PoemDetail.as_view(), name='api-andalusian-poem-detail'), ] urlpatterns = format_suffix_patterns(urlpatterns, allowed=['json', 'api'])
plotly/python-api
packages/python/plotly/plotly/validators/box/hoverlabel/_font.py
Python
mit
1,855
0.000539
import _plotly_utils.basevalidators class FontValidator(_plotly_utils.basevalidators.CompoundValidator): def __init__(self, plotly_name="font", parent_name="box.hoverlabel", **kwargs): super(FontValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, data_class_str=kwargs.pop("data_class_str", "Font"), data_docs=kwargs.pop( "data_docs", """ color colorsrc Sets the source reference on Chart Studio Cloud for color . family
HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". familysrc Sets the source reference on Chart Studio Cloud for family . size sizesrc Sets the source reference on Chart Studio Cloud for size . """, ), **kwargs )
stack-of-tasks/rbdlpy
tutorial/lib/python2.7/site-packages/OpenGL/GLES1/OES/read_format.py
Python
lgpl-3.0
750
0.009333
'''OpenGL extension OES.read_format This module customises the behaviour of the OpenGL.raw.GLES1.OES.read_format to provide a more Python-friendly API The official definition of this extension is available here: http:
//www.opengl.org/registry/specs/OES/read_format.txt ''' from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GLES1 import _types, _glgets from OpenGL.raw.GLES1.OES.read_format import * from OpenGL.raw.GLES1.OES.read_format import _EXTENSION_NAME def glInitReadFormatOES(): '''Return boolean indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ### END A
UTOGENERATED SECTION
dchabot/ophyd
ophyd/__init__.py
Python
bsd-3-clause
946
0.014799
import logging logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) from . import * # Signals from .signal import (Signal, EpicsSignal, EpicsSignalRO, DerivedSignal) # Positioners from .positioner import (PositionerBase, SoftPositioner) from .epics_motor import EpicsMotor from .pv_pos
itioner import (PVPositioner, PVPositionerPC) from .pseudopos import (PseudoPositioner, PseudoSingle) # Devices from .scaler import EpicsScaler from .device import (Device, Component, FormattedComponent, DynamicDeviceComponent) from .status import Statu
sBase from .mca import EpicsMCA, EpicsDXP # Areadetector-related from .areadetector import * from ._version import get_versions from .commands import (mov, movr, set_pos, wh_pos, set_lm, log_pos, log_pos_diff, log_pos_mov) from .utils.startup import setup as setup_ophyd __version__ = get_versions()['version'] del get_versions
litzler/marioSokoBan
edit.py
Python
gpl-3.0
8,847
0.006107
import pygame from pygame.locals import * # pour les constan
tes touches... from constantes import * from fichiers import * from general import * from aide import * def edit(screen, levelNumber ,mode, lang, langu, levelFinal): motionX = 0 motionY =
0 alsoMario = 0 carte = [[int for lgn in range(NB_BLOCS_HAUTEUR)]for col in range(NB_BLOCS_LARGEUR)] restMario = 0 levelWord = '' clicGaucheEnCours = False clicDroitEnCours = False saved = False objectPos = pygame.Rect(0,0,0,0) exemplePos = pygame.Rect(0,0,0,0) # charger images mur = pygame.image.load(SOURCE_IMG + 'mur.jpg').convert() mur50 = pygame.image.load(SOURCE_IMG + 'mur50.jpg').convert() caisse = pygame.image.load(SOURCE_IMG + 'caisse.jpg').convert() caisse50 = pygame.image.load(SOURCE_IMG + 'caisse50.jpg').convert() caisse_ok = pygame.image.load(SOURCE_IMG + 'caisse_ok.jpg').convert() caisse_ok50 = pygame.image.load(SOURCE_IMG + 'caisse_ok50.jpg').convert() objectif = pygame.image.load(SOURCE_IMG + 'objectif.png').convert_alpha() objectif50 = pygame.image.load(SOURCE_IMG + 'objectif50.png').convert_alpha() mario = pygame.image.load(SOURCE_IMG + 'mario_bas.gif').convert_alpha() mario50 = pygame.image.load(SOURCE_IMG + 'mario_bas50.gif').convert_alpha() quadrillage = pygame.image.load(SOURCE_IMG + 'quadrillage.png').convert_alpha() # objet par défaut objet = MUR # load map chargeCarte(carte, levelNumber) # search mario for i in range(NB_BLOCS_LARGEUR): for j in range(NB_BLOCS_HAUTEUR): if carte[i][j] ==MARIO: alsoMario += 1 # white Bar whiteBar = pygame.Surface((screen.get_width(), 60), screen.get_flags()) whiteBar.fill(WHITE) # police police = pygame.font.Font('angelina.ttf', 20) # define sourceFile default pathFile = printLang(lang) # 'fr' ou 'en' sourceFile = SOURCE_FILE + pathFile + '/edit.lvl' # './files/'fr' ou 'en'/edit.lvl' # H: Help Level: Saved ESC: Exit ou H: Aide Niveau: Sauve ESC: Quitter # nombre de lignes lignes = compteLignes(sourceFile) tableau = [Text() for i in range(lignes)] # initialise tableau en fr ou en initialiseEditTable(sourceFile,lignes,tableau) levelWord = tableau[1].data tableau[1].data = levelWord + ' ' + str(levelNumber) tableau[1].partie = police.render(tableau[1].data, True, BLUE) # event continuer = True while(continuer): # check if there is mario on the map if not initialize the boolean if(objet == MARIO and alsoMario != 0): for i in range(NB_BLOCS_LARGEUR): for j in range(NB_BLOCS_LARGEUR): if carte[i][j]==MARIO: restMario += 1 if restMario == 0: alsoMario = 0 restMario=0 for event in pygame.event.get(): if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE): continuer = False # sortie de la boucle if event.type == KEYDOWN: if event.key == K_ESCAPE: continuer = False elif event.key == K_1 or event.key == K_KP1: objet = MUR elif event.key == K_2 or event.key == K_KP2: objet = CAISSE elif event.key == K_3 or event.key == K_KP3: objet = OBJECTIF elif event.key == K_4 or event.key == K_KP4: objet = MARIO elif event.key == K_5 or event.key == K_KP5: objet = CAISSE_OK elif event.key == K_h and lang == EN: aide(screen,mode,lang,langu) elif event.key == K_a and lang == FR: aide(screen,mode,lang,langu) elif event.key == K_s: saved = True sauveCarte(carte,levelNumber) elif event.key == K_PAGEUP: if levelNumber <= levelFinal: levelNumber += 1 if levelNumber == levelFinal+ 1: carte = [[MUR for lgn in range(NB_BLOCS_HAUTEUR)]for col in range(NB_BLOCS_LARGEUR)] tableau[1].data = levelWord + ' ' + str(levelNumber) tableau[1].partie = police.render(tableau[1].data, True, BLUE) break else: # add level number to tableau[1] tableau[1].data = levelWord + ' ' + str(levelNumber) tableau[1].partie = police.render(tableau[1].data, True, BLUE) chargeCarte(carte, levelNumber) elif event.key == K_PAGEDOWN: if levelNumber > 1: levelNumber -=1 # add level number to tableau[1] tableau[1].data = levelWord + ' ' + str(levelNumber) tableau[1].partie = police.render(tableau[1].data, True, BLUE) chargeCarte(carte, levelNumber) if event.type == MOUSEBUTTONDOWN: motionY, motionX = event.pos if motionX <= 408 and motionY <= 408: if event.button == RIGHT: clicDroitEnCours = True carte[motionX // TAILLE_BLOC][motionY // TAILLE_BLOC] = VIDE if event.button == LEFT: clicGaucheEnCours = True if objet == MARIO and alsoMario != 0: # mario can be put only once. continue else: carte[motionX // TAILLE_BLOC][motionY // TAILLE_BLOC] = objet if objet == MARIO: alsoMario +=1 if event.type == MOUSEBUTTONUP: if event.button == LEFT: clicGaucheEnCours = False elif event.button == RIGHT: clicDroitEnCours = False if event.type == MOUSEMOTION: motionX, motionY = event.pos exemplePos.x = motionX + 20 exemplePos.y = motionY + 20 # screen screen.fill(BLACK) # Ecran tout noir # affichage carte for lgn in range (NB_BLOCS_HAUTEUR): for col in range (NB_BLOCS_LARGEUR): objectPos.x = col * TAILLE_BLOC objectPos.y = lgn * TAILLE_BLOC if carte[lgn][col] == MUR: screen.blit(mur, objectPos) elif carte[lgn][col] == CAISSE: screen.blit(caisse,objectPos) elif carte[lgn][col] == CAISSE_OK: screen.blit(caisse_ok,objectPos) elif carte[lgn][col] == OBJECTIF: screen.blit(objectif,objectPos) elif carte[lgn][col] == MARIO: screen.blit(mario, objectPos) screen.blit(quadrillage, (0, 0)) # whiteBar objectPos.x = 0 objectPos.y = screen.get_height() - whiteBar.get_height() screen.blit(whiteBar,objectPos) # text objectPos.x = 10 objectPos.y = (screen.get_height() - whiteBar.get_height()) + 5 screen.blit(tableau[0].partie,objectPos) objectPos.x = 100 screen.blit(tableau[1].partie,objectPos) if saved: objectPos.x = 200 screen.blit(tableau[2].partie,objectPos) objectPos.x = (screen.get_width() - tableau[3].partie.get_width()) - 10 screen.blit(tableau[3].partie,objectPos) # blit exemple if objet == MUR: screen.blit(mur50, exemplePos) elif objet == CAISSE: screen.blit(caisse50, exemplePos) elif objet == CAISSE_OK: screen.blit(caisse_ok50, exemplePos) elif objet == OBJECTIF: scre
murataydos/popy
setup.py
Python
gpl-2.0
402
0
# -*- coding: utf-8 -*- from distutils.core import setup setup( name='popy', description='Parser for GNU Po files', long_description=open('README.rst').read(), version='0.3.0', packages=['popy'], author='M
urat Aydos', author_email='murataydos@yandex.com', url='https://github.com/murataydos/popy', license='MIT', zip_safe=
False, include_package_data=True )
fboers/jumegX
jumeg_test.py
Python
bsd-3-clause
1,802
0.008324
#!/usr/bin/env python import jumeg import os.path raw_fname = "109925_CAU01A_100715_0842_2_c,rfDC-raw.fif" if not os.path.isfile(raw_fname): print "Please find the test file at the below location on the meg_store2 network drive - \ cp /data/meg_store2/fif_data/jumeg_test_data/109925_CAU01A_100715_0842_2_c,rfDC-raw.fif ." # Function to check and explain the file naming standards #jumeg.jumeg_utils.check_jumeg
_standards(raw_fname) # Function to apply noise reducer jumeg.jumeg_noise_reducer.noise_reducer(raw_fname, verbose=True) # Fi
lter functions #jumeg.jumeg_preprocessing.apply_filter(raw_fname) fclean = raw_fname[:raw_fname.rfind('-raw.fif')] + ',bp1-45Hz-raw.fif' # Evoked functions #jumeg.jumeg_preprocessing.apply_average(fclean) # ICA functions #jumeg.jumeg_preprocessing.apply_ica(fclean) fica_name = fclean[:fclean.rfind('-raw.fif')] + '-ica.fif' # Perform ECG/EOG rejection using ICA #jumeg.jumeg_preprocessing.apply_ica_cleaning(fica_name) #jumeg.jumeg_preprocessing.apply_ica_cleaning(fica_name, unfiltered=True) # OCARTA cleaning from jumeg.decompose import ocarta ocarta_obj = ocarta.JuMEG_ocarta() ocarta_obj.fit(fclean, unfiltered=False, verbose=True) # CTPS functions #jumeg.jumeg_preprocessing.apply_ctps(fica_name) fctps_name = '109925_CAU01A_100715_0842_2_c,rfDC,bp1-45Hz,ctps-trigger.npy' #jumeg.jumeg_preprocessing.apply_ctps_select_ic(fctps_name) # Function recompose brain response components only fname_ctps_ics = '109925_CAU01A_100715_0842_2_c,rfDC,bp1-45Hz,ctps-trigger-ic_selection.txt' #jumeg.jumeg_preprocessing.apply_ica_select_brain_response(fname_ctps_ics) # Function to process empty file empty_fname = '109925_CAU01A_100715_0844_2_c,rfDC-empty.fif' #jumeg.jumeg_preprocessing.apply_create_noise_covariance(empty_fname, verbose=True)
JoshStegmaier/django-nimbus
nimbus/views/generic.py
Python
mit
1,761
0.003407
from django.views.generic import DetailView, ListView from django.views.generic.edit import CreateView, UpdateView from .mixins import GenerateActionMixin class DetailViewWithActionStream(GenerateActionMixin, DetailView): def dispatch(self, request, *args, **kwargs): if not self.request.user.is_anonymous(): self.generate_action() return super(DetailViewWithActionStream, self).dispatch(request, *args, **kwargs) def get_action_actor(self, *args, **kwargs): return self.request.user def get_action_verb(self, *args, **kwargs): return 'viewed' def get_action_action_object(self, *args, **kwargs): return self.get_object() class CreateViewWithActionStream(Gen
erateActionMixin, CreateView): def form_valid(self, form): to_return = super(CreateViewWithActionStream, self)
.form_valid(form) if not self.request.user.is_anonymous(): self.generate_action() return to_return def get_action_actor(self, *args, **kwargs): return self.request.user def get_action_verb(self, *args, **kwargs): return 'added' def get_action_action_object(self, *args, **kwargs): return self.object class UpdateViewWithActionStream(GenerateActionMixin, UpdateView): def form_valid(self, form): to_return = super(UpdateViewWithActionStream, self).form_valid(form) if not self.request.user.is_anonymous(): self.generate_action() return to_return def get_action_actor(self, *args, **kwargs): return self.request.user def get_action_verb(self, *args, **kwargs): return 'updated' def get_action_action_object(self, *args, **kwargs): return self.get_object()
MaterialsDiscovery/PyChemia
pychemia/population/relaxstructures.py
Python
mit
19,699
0.002944
import random import uuid from math import gcd import numpy as np from ._population import Population from pychemia import Composition, Structure, pcm_log from pychemia.analysis import StructureAnalysis, StructureChanger, StructureMatch from pychemia.analysis.splitting import SplitMatch from pychemia.utils.mathematics import unit_vector from pychemia.utils.periodic import atomic_number, covalent_radius from pymongo import ASCENDING from pychemia.db import get_database from pychemia.crystal import CrystalSymmetry class RelaxStructures(Population): def evaluate_entry(self, entry_id): pass def __init__(self, name, composition=None, tag='global', target_forces=1E-3, value_tol=1E-2, distance_tolerance=0.3, min_comp_mult=2, max_comp_mult=8, pcdb_source=None, pressure=0.0, target_stress=None, target_diag_stress=None, target_nondiag_stress=None): """ Defines a population of PyChemia Structures, The 'name' of the database is used to create the MongoDB database and the structures are uniform in composition. A specific 'tag' could be attached to differentiate the other instances running concurrently. The 'delta' argument is the scaling factor for changers and mixers. In the case of populations supported on PyChemia databases the 'new' will erase the database :param name: The name of the population. ie the name of the database :param composition: The composition uniform for all the members :param tag: A tag to differentiate different instances running concurrently :return: A new StructurePopulation object """ if composition is not None: self.composition = Composition(composition) else: self.composition = None self.tag = tag self.target_forces = target_forces self.value_tol = value_tol self.min_comp_mult = min_comp_mult self.max_comp_mult = max_comp_mult self.pcdb_source = pcdb_source self.pressure = pressure if target_stress is None: self.target_stress = target_forces else: self.target_stress = target_stress if target_diag_stress is None: self.target_diag_stress = self.target_stress else: self.target_diag_stress = target_diag_stress if target_diag_stress is None: self.target_nondiag_stress = self.target_stress else: self.target_nondiag_stress = target_nondiag_stress self.name = name Population.__init__(self, name, tag, distance_tolerance=distance_tolerance) if self.pcdb_source is not None: self.sources = {} for i in range(min_comp_mult, max_comp_mult+1): self.sources[i] = [] for entry in self.pcdb_source.entries.find({'structure.natom': i*self.composition.natom, 'structure.nspecies': self.composition.nspecies}, {'_id': 1}): self.sources[i].append(entry['_id']) def recover(self): data = self.get_population_info() if data is not None: self.distance_tolerance = data['distance_tol'] self.value_tol = data['value_tol'] self.name = data['name'] self.target_forces = data['target_forces'] def get_structure(self, entry_id): entry = self.get_entry(entry_id) return Structure.from_dict(entry['structure']) @staticmethod def new_identifier(): return str(uuid.uuid4())[-12:] def new_entry(self, structure, active=True): properties = {'forces': None, 'stress': None, 'energy': None} status = {self.tag: active, 'tag': self.tag} entry = {'structure': structure.to_dict, 'properties': properties, 'status': status} entry_id = self.insert_entry(entry) pcm_log.debug('Added new entry: %s with tag=%s: %s' % (str(entry_id), self.tag, str(active))) return entry_id def get_max_force_stress(self, entry_id): entry = self.get_entry(entry_id, projection={'properties': 1}) max_force = None max_diag_stress = None max_nondiag_stress = None if entry is not None and entry['properties'] is not None: properties = entry['properties'] if 'forces' in properties and 'stress' in properties: if properties['forces'] is not None and properties['stress'] is not None:
forces = np.array(entry['properties']['forces']) stress = np.array(entry['properties']['stress']) max_force = np.max(np.apply_along_axis(np.linalg.norm, 1, forces)) max_diag_stress = np.max(np.abs(stress[:3])) max_nondiag_stress = np.max(np.abs(stress[4:])) return max_force, max_diag_stress, max_non
diag_stress def is_evaluated(self, entry_id): max_force, max_diag_stress, max_nondiag_stress = self.get_max_force_stress(entry_id) if max_force is None or max_diag_stress is None or max_nondiag_stress is None: return False elif max_force < self.target_forces and max_diag_stress < self.target_diag_stress + self.pressure: if max_nondiag_stress < self.target_nondiag_stress: return True else: return False else: return False def add_random(self, random_probability=0.3): """ Add one random structure to the population """ entry_id = None structure = Structure() if self.composition is None: raise ValueError('No composition associated to this population') factor = np.random.randint(self.min_comp_mult, self.max_comp_mult + 1) comp = self.composition.composition.copy() # print("Initial composition: %s" % comp) # print(Composition(comp)) # print(Composition(comp).symbols) for i in comp: comp[i] *= factor new_comp = Composition(comp) while True: rnd = random.random() condition = {'structure.nspecies': new_comp.nspecies, 'structure.natom': new_comp.natom} if self.pcdb_source is None: rnd = 0 elif len(self.sources[factor]) == 0: rnd = 0 if self.pcdb_source is None or rnd < random_probability: pcm_log.debug('Random Structure') structure = Structure.random_cell(new_comp, method='stretching', stabilization_number=5, nparal=5, periodic=True) break else: pcm_log.debug('From source') entry_id = self.sources[factor][np.random.randint(0, len(self.sources[factor]))] structure = self.pcdb_source.get_structure(entry_id) print("chosen structure from database =", structure) sym = CrystalSymmetry(structure) scale_factor = float(np.max(covalent_radius(new_comp.species)) / np.max(covalent_radius(structure.species))) reduce_scale = scale_factor ** (1. / 3) # WIH msg = 'Mult: %d natom: %d From source: %s Spacegroup: %d Scaling: %7.3f' print(msg % (factor, structure.natom, structure.formula, sym.number(), scale_factor)) # structure.set_cell(np.dot(scale_factor * np.eye(3), structure.cell)) # WIH structure.set_cell(np.dot(reduce_scale * np.eye(3), structure.cell)) # WIH print("symbols before change = ", structure.symbols) structure.symbols = new_comp.symbols print("symbols after change = ", structure.symbols) self.sources[factor].remove(entry_id) break return self.new_entry(structure), entry_id def check_duplicates(self, ids): """ Computes dup
ben-jones/centinel
centinel/vpn/openvpn.py
Python
mit
2,675
0
#!/usr/bin/python # openvpn.py: library to handle starting and stopping openvpn instances import subprocess import threading import time class OpenVPN(): def __init__(self, config_file=None, auth_file=None, timeout=10): self.started = False self.stopped = False self.error = False self.notifications = "" self.auth_file = auth_file self.config_file = config_file self.thread = threading.Thread(target=self._invoke_openvpn) self.thread.setDaemon(1) self.timeout = timeout def _invoke_openvpn(self): if self.auth_file is None: cmd = ['sudo', 'openvpn', '--script-security', '2', '--config', self.config_file] else: cmd = ['sudo', 'openvpn', '--script-security', '2', '--config', self.config_file, '--auth-user-pass', self.auth_file] self.process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) self.kill_switch = self.process.terminate self.starting = True while True: line
= self.process.stdout.readline().strip() if not line: break self.output_callback(line, self.process.terminate) def output_callback(self, line, kill_switch): """Set status of openvpn according to what we process""" self.notifications += line + "\n" if "Initialization
Sequence Completed" in line: self.started = True if "ERROR:" in line: self.error = True if "process exiting" in line: self.stopped = True def start(self, timeout=None): """Start openvpn and block until the connection is opened or there is an error """ if not timeout: timeout = self.timeout self.thread.start() start_time = time.time() while start_time + timeout > time.time(): self.thread.join(1) if self.error or self.started: break if self.started: print "openvpn started" else: print "openvpn not started" print self.notifications def stop(self, timeout=None): """Stop openvpn""" if not timeout: timeout = self.timeout self.kill_switch() self.thread.join(timeout) if self.stopped: print "stopped" else: print "not stopped" print self.notifications
pynag/pynag
examples/Parsers/get_service_info.py
Python
gpl-2.0
495
0.010101
#!/usr/bin/python from __future__ impo
rt absolute_import from __future__ import print_function import sys if len(sys.argv) != 3: sys.stderr.write("Usage: %s 'Host Name' 'Service Description'\n" % (sys.argv[0])) sys.exit(2) ## This is for the custom nagios module sys.path.insert(1, '../') from pynag.Parsers import config ## Create the plugin option nc = config('/etc/nagios/nagios.cfg') nc.parse
() service = nc.get_service(sys.argv[1],sys.argv[2]) print(nc.print_conf(service))
nihilus/qb-sync
ext_gdb/sync.py
Python
gpl-3.0
16,046
0.002306
# # Copyright (C) 2012-2014, Quarkslab. # # This file is part of qb-sync. # # qb-sync is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http:/
/www.gnu.org/licenses/>. # #!/usr/bin/env python # -*- coding: utf-8 -*- import os import re import sys import time import socket import errno import base64 import tempfile import threading import gdb try: import configparser except ImportError: import ConfigParser as configparser VERBOSE = 0 HOST = "localhost" PORT = 9100 TIMER_PERIOD = 0.2 # function gdb_execute courtesy of StalkR # Wrapper when gdb.execute(cmd, to_string=True) does not work
def gdb_execute(cmd): f = tempfile.NamedTemporaryFile() gdb.execute("set logging file %s" % f.name) gdb.execute("set logging redirect on") gdb.execute("set logging overwrite") gdb.execute("set logging on") try: gdb.execute(cmd) except Exception as e: gdb.execute("set logging off") f.close() raise e gdb.execute("set logging off") s = open(f.name, "r").read() f.close() return s def get_pid(): inferiors = gdb.inferiors() for inf in gdb.inferiors(): if inf.is_valid(): return inf.pid raise Exception("get_pid(): failed to find program's pid") def get_maps(verbose=True): "Return list of maps (start, end, permissions, file name) via /proc" pid = get_pid() if pid is False: if verbose: print("Program not started") return [] maps = [] mapping = gdb_execute('info proc mappings') try: for line in mapping.splitlines(): e = [x for x in line.strip().split() if x != ''] if (not e) or (len(e) < 5): continue else: if not e[0].startswith('0x'): continue name = (' ').join(e[4:]) e = e[:4] + [name] start, end, size, offset, name = e maps.append([int(start, 16), int(end, 16), int(size, 16), name]) except Exception as e: print(e) print("[sync] failed to parse info proc mappings") return maps def get_mod_by_addr(maps, addr): for mod in maps: if (addr > mod[0]) and (addr < mod[1]): return [mod[0], mod[3]] return None def get_mod_by_name(maps, name): for mod in maps: if os.path.basename(mod[3]) == name: return [mod[0], mod[3]] return None def get_pc(): try: pc_str = str(gdb.parse_and_eval("$pc")) except Exception as e: # debugger may not be running: 'No registers': return None return int((pc_str.split(" ")[0]), 16) class Tunnel(): def __init__(self, host): try: self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.connect((host, PORT)) except socket.error as msg: self.sock.close() self.sock = None self.sync = False print("[sync] Tunnel initialization error: %s" % msg) return None self.sync = True def is_up(self): return (self.sock != None and self.sync == True) def poll(self): if not self.is_up(): return None self.sock.setblocking(False) try: msg = self.sock.recv(4096).decode() except socket.error as e: err = e.args[0] if (err == errno.EAGAIN or err == errno.EWOULDBLOCK): return '\n' else: self.close() return None self.sock.setblocking(True) return msg def send(self, msg): if not self.sock: print("[sync] tunnel_send: tunnel is unavailable (did you forget to sync ?)") return try: self.sock.send(msg.encode()) except socket.error as msg: print(msg) self.sync = False self.close() print("[sync] tunnel_send error: %s" % msg) def close(self): if self.is_up(): self.send("[notice]{\"type\":\"dbg_quit\",\"msg\":\"dbg disconnected\"}\n") if self.sock: try: self.sock.close() except socket.error as msg: print("[sync] tunnel_close error: %s" % msg) self.sync = False self.sock = None # run commands # from https://sourceware.org/gdb/onlinedocs/gdb/Basic-Python.html#Basic-Python # GDB is not thread-safe. If your Python program uses multiple threads, # you must be careful to only call GDB-specific functions in the GDB thread. # post_event ensures this. class Runner(): def __init__(self, batch): self.batch = batch def __call__(self): for cmd in self.batch: if (cmd == ''): continue gdb.execute(cmd, True, False) # periodically poll socket in a dedicated thread class Poller(threading.Thread): def __init__(self, sync): threading.Thread.__init__(self) self.evt_enabled = threading.Event() self.evt_enabled.clear() self.evt_stop = threading.Event() self.evt_stop.clear() self.sync = sync def run(self): while True: if self.evt_stop.is_set(): break self.evt_enabled.wait() if not self.sync.tunnel: break if self.sync.tunnel.is_up(): self.poll() time.sleep(TIMER_PERIOD) def poll(self): msg = self.sync.tunnel.poll() if msg: batch = [cmd.strip() for cmd in msg.split('\n') if cmd] if batch: gdb.post_event(Runner(batch)) else: gdb.post_event(Runner(['syncoff'])) self.stop() def enable(self): self.evt_enabled.set() def disable(self): self.evt_enabled.clear() def stop(self): self.evt_stop.set() class Sync(gdb.Command): def __init__(self): gdb.Command.__init__(self, "sync", gdb.COMMAND_OBSCURE, gdb.COMPLETE_NONE) self.pid = None self.maps = None self.base = None self.offset = None self.tunnel = None self.poller = None gdb.events.exited.connect(self.exit_handler) gdb.events.cont.connect(self.cont_handler) gdb.events.stop.connect(self.stop_handler) gdb.events.new_objfile.connect(self.newobj_handler) print("[sync] commands added") def identity(self): f = tempfile.NamedTemporaryFile() gdb.execute("shell uname -svm > %s" % f.name) id = open(f.name, 'r').read() f.close() return id.strip() def mod_info(self, addr): if not self.maps: self.maps = get_maps() if not self.maps: print("[sync] failed to get maps") return None return get_mod_by_addr(self.maps, addr) def locate(self): offset = get_pc() if not offset: print("<not running>") return if not self.pid: self.pid = get_pid() if not self.pid: print("[sync] failed to get pid") return else: print("[sync] pid: %s" % self.pid) self.offset = offset mod = self.mod_info(self.offset) if mod: if VERBOSE >= 2: print("[sync] mod found") print(mod) base, sym = mod if self.base != base: self.tunnel.send("[notice]{\"type\":\"module
Kyziridis/recommender_system
helpers/Time.py
Python
gpl-3.0
51
0.019608
import time
def start():
return time.time()
atljohnsen/adlcoursebuilder
modules/review/stats.py
Python
apache-2.0
5,775
0.000173
# Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classes for displaying peer review analytics.""" __author__ = 'Sean Lip (sll@google.com)' import os from common import safe_dom from controllers.utils import ApplicationHandler from controllers.utils import HUMAN_READABLE_TIME_FORMAT import jinja2 from models import courses from models import jobs from models import transforms from models import utils from modules.review import peer class ReviewStatsAggregator(object): """Aggregates peer review statistics.""" def __init__(self): # This dict records, for each unit, how many submissions have a given # number of completed reviews. The format of each key-value pair is # unit_id: {num_reviews: count_of_submissions} self.counts_by_completed_reviews = {} def visit(self, review_summary): unit_id = review_summary.unit_id if unit_id not in self.counts_by_completed_reviews: self.counts_by_completed_reviews[unit_id] = {} count = review_summary.completed_count if count not in self.counts_by_completed_reviews[unit_id]: self.counts_by_completed_reviews[unit_id][count] = 1 else: self.counts_by_completed_reviews[unit_id][count] += 1 class ComputeReviewStats(jobs.DurableJob): """A job for computing peer review statistics.""" def run(self): """Computes peer review statistics.""" stats = ReviewStatsAggregator() mapper = utils.QueryMapper( peer.ReviewSummary.all(), batch_size=500, report_every=1000) mapper.run(stats.visit) completed_arrays_by_unit = {} for unit_id in stats.counts_by_completed_reviews: max_completed_reviews = max( stats.counts_by_completed_reviews[unit_id].keys()) completed_reviews_array = [] for i in range(max_completed_reviews + 1): if i in stats.counts_by_completed_reviews[unit_id]: completed_reviews_array.append( stats.counts_by_completed_reviews[unit_id][i]) else: completed_reviews_array.append(0) completed_arrays_by_unit[unit_id] = completed_reviews_array return {'counts_by_completed_reviews': completed_arrays_by_unit} class PeerReviewStatsHandler(ApplicationHandler): """Shows peer review analytics on the dashboard.""" # The key used in the statistics dict that generates the dashboard page. # Must be unique. name = 'peer_review_stats' # The class that generates the data to be displayed. stats_computer = ComputeReviewStats def get_markup(self, job): """Returns Jinja markup for peer review statistics.""" errors = [] stats_calculated = False update_message = safe_dom.Text('') course = courses.Course(self) serialized_units = [] if not job: update_message = safe_dom.Text( 'Peer review statistics have not been calculated yet.') else: if job.status_code == jobs.STATUS_CODE_COMPLETED: stats = transforms.loads(job.output) stats_calculated = True for unit in course.get_peer_reviewed_units(): if unit.unit_id in stats['counts_by_completed_reviews']: unit_stats = ( stats['counts_by_completed_reviews'][unit.unit_id]) serialized_units.append({ 'stats': unit_stats, 'title': unit.title, 'unit_id': unit.unit_id, }) update_message = safe_dom.Text(""" Peer review statistics were last updated at %s in about %s second(s).""" % ( job.updated_on.strftime(HUMAN_READABLE_TIME_FORMAT), job.execution_time_sec)) elif job.status_code == jobs.STATUS_CODE_FAILED: update_message = safe_dom.NodeList().append( safe_dom.Text(""" There was an error updating peer review statistics. Here is the message:""") ).append( safe_dom.Element('br') ).append(
safe_dom.Element('blockquote').add_child(
safe_dom.Element('pre').add_text('\n%s' % job.output))) else: update_message = safe_dom.Text(""" Peer review statistics update started at %s and is running now. Please come back shortly.""" % job.updated_on.strftime( HUMAN_READABLE_TIME_FORMAT)) return jinja2.utils.Markup(self.get_template( 'stats.html', [os.path.dirname(__file__)] ).render({ 'errors': errors, 'serialized_units': serialized_units, 'serialized_units_json': transforms.dumps(serialized_units), 'stats_calculated': stats_calculated, 'update_message': update_message, }, autoescape=True))
olebole/astrometry.net
util/starutil_numpy.py
Python
bsd-3-clause
18,610
0.010586
# This file is part of the Astrometry.net suite. # Licensed under a 3-clause BSD style license - see LICENSE from numpy import * import datetime import numpy as np from functools import reduce arcsecperrad = 3600. * 180. / np.pi axistilt = 23.44 # degrees def ra_normalize(ra): return np.mod(ra, 360.) def ra_ranges_overlap(ralo, rahi, ra1, ra2): ''' Given two ranges, [ralo,rahi], [ra1,ra2], returns True if they overlap.''' import numpy as np x1 = np.cos(np.deg2rad(ralo)) y1 = np.sin(np.deg2rad(ralo)) x2 = np.cos(np.deg2rad(rahi)) y2 = np.sin(np.deg2rad(rahi)) x3 = np.cos(np.deg2rad(ra1)) y3 = np.sin(np.deg2rad(ra1)) x4 = np.cos(np.deg2rad(ra2)) y4 = np.sin(np.deg2rad(ra2)) #cw31 = x1*y3 - x3*y1 cw32 = x2*y3 - x3*y2 cw41 = x1*y4 - x4*y1 #cw42 = x2*y4 - x4*y2 #print('3:', cw31, cw32) #print('4:', cw41, cw42) return np.logical_and(cw32 <= 0, cw41 >= 0) # def transform(long, lat, poleTo, poleFrom): (alphaGP,deltaGP) = deg2rad(poleFrom[0]), deg2rad(poleFrom[1]) lCP = deg2rad(poleTo[0]) alpha = deg2rad(long) delta = deg2rad(lat) ra = rad2deg(lCP - arctan2(sin(alpha - alphaGP), tan(delta) * cos(deltaGP) - cos(alpha - alphaGP) * sin(deltaGP))) dec = rad2deg(arcsin((sin(deltaGP)*sin(delta) + cos(deltaGP)*cos(delta)*cos(alpha - alphaGP)))) ra = ra_normalize(ra) return ra,dec # Galactic (l,b) to equatorial (ra,dec). # Lifted from LSST's afw.coord.Coord class by Steve Bickerton. def lbtoradec(l, b): # aka 'GalacticPoleInFk5' poleTo = (192.8595, 27.12825) # aka 'Fk5PoleInGalactic' poleFrom = (122.9320, 27.12825) return transform(l, b, poleTo, poleFrom) galactictoradec = lbtoradec def eclipticPoleInclination(epoch): T = (epoch - 2000.0) / 100.0 eclincl = (23.0 + 26.0/60.0 + (21.448 - 46.82*T - 0.0006*T*T - 0.0018*T*T*T)/3600.0) return eclincl # Thanks to Steve Bickerton in lsst.afw.Coord : EclipticCoord::toFk5 def ecliptictoradec(a, b, epoch=2000.): eclincl = eclipticPoleInclination(epoch) eclipticPoleInFk5 = (270.0, 90.0 - eclincl) fk5PoleInEcliptic = (90.0, 90.0 - eclincl) return transform(a, b, eclipticPoleInFk5, fk5PoleInEcliptic) # Thanks to Steve Bickerton in lsst.afw.Coord : Fk5Coord::toEcliptic def radectoecliptic(ra, dec, epoch=2000.): eclincl = eclipticPoleInclination(epoch) eclPoleInEquatorial = (270.0, 90.0 - eclincl) equPoleInEcliptic = (90.0, 90.0 - eclincl) return transform(ra, dec, equPoleInEcliptic, eclPoleInEquatorial) # scalars (racenter, deccenter) in deg # scalar radius in deg # arrays (ra,dec) in deg # returns array of booleans def points_within_radius(racenter, deccenter, radius, ra, dec): return radecdotproducts(racenter, deccenter, ra, dec)
>= cos(deg2rad(radius)) def points_within_radius_range(racenter, deccenter, radiuslo, radiushi, ra, dec): d = radecdotproducts(racenter, deccenter, ra, dec) return (d <= cos(deg2rad(radiuslo))) * (d >= cos(deg2rad(radiushi))) # scalars (racenter, deccenter) in deg # arrays (ra,dec) in deg # returns array of cosines def radecdotproducts(racenter, deccenter, ra, dec): xyzc = radectoxyz(racenter, d
eccenter).T xyz = radectoxyz(ra, dec) return dot(xyz, xyzc)[:,0] # RA, Dec in degrees: scalars or 1-d arrays. # returns xyz of shape (N,3) def radectoxyz(ra_deg, dec_deg): ra = deg2rad(ra_deg) dec = deg2rad(dec_deg) cosd = cos(dec) xyz = vstack((cosd * cos(ra), cosd * sin(ra), sin(dec))).T assert(xyz.shape[1] == 3) return xyz # RA,Dec in degrees # returns (dxyz_dra, dxyz_ddec) def derivatives_at_radec(ra_deg, dec_deg): ra = deg2rad(ra_deg) dec = deg2rad(dec_deg) cosd = cos(dec) sind = sin(dec) cosra = cos(ra) sinra = sin(ra) return (180./pi * vstack((cosd * -sinra, cosd * cosra, 0)).T, 180./pi * vstack((-sind * cosra, -sind * sinra, cosd)).T) def xyztoradec(xyz): ''' Converts positions on the unit sphere to RA,Dec in degrees. 'xyz' must be a numpy array, either of shape (3,) or (N,3) Returns a tuple (RA,Dec). If 'xyz' is a scalar, RA,Dec are scalars. If 'xyz' is shape (N,3), RA,Dec are shape (N,). >>> xyztoradec(array([1,0,0])) (0.0, 0.0) >>> xyztoradec(array([ [1,0,0], [0,1,0], [0,0,1]])) (array([ 0., 90., 0.]), array([ 0., 0., 90.])) >>> xyztoradec(array([0,1,0])) (90.0, 0.0) >>> xyztoradec(array([0,0,1])) (0.0, 90.0) ''' if len(xyz.shape) == 1: # HACK! rs,ds = xyztoradec(xyz[newaxis,:]) return (rs[0], ds[0]) (nil,three) = xyz.shape assert(three == 3) ra = arctan2(xyz[:,1], xyz[:,0]) ra += 2*pi * (ra < 0) dec = arcsin(xyz[:,2] / norm(xyz)[:,0]) return (rad2deg(ra), rad2deg(dec)) ##################### # RA,Decs in degrees. Both pairs can be arrays. def distsq_between_radecs(ra1, dec1, ra2, dec2): xyz1 = radectoxyz(ra1, dec1) xyz2 = radectoxyz(ra2, dec2) # (n,3) (m,3) s0 = xyz1.shape[0] s1 = xyz2.shape[0] d2 = zeros((s0,s1)) for s in range(s0): d2[s,:] = sum((xyz1[s,:] - xyz2)**2, axis=1) if s0 == 1 and s1 == 1: d2 = d2[0,0] elif s0 == 1: d2 = d2[0,:] elif s1 == 1: d2 = d2[:,0] return d2 # RA,Decs in degrees. def distsq_between_radecs(ra1, dec1, ra2, dec2): ''' Computes the distance-square on the unit sphere between two (arrays of) RA,Decs. ''' xyz1 = radectoxyz(ra1, dec1) xyz2 = radectoxyz(ra2, dec2) # (n,3) (m,3) s0 = xyz1.shape[0] s1 = xyz2.shape[0] d2 = zeros((s0,s1)) for s in range(s0): d2[s,:] = sum((xyz1[s,:] - xyz2)**2, axis=1) if s0 == 1 and s1 == 1: d2 = d2[0,0] elif s0 == 1: d2 = d2[0,:] elif s1 == 1: d2 = d2[:,0] return d2 # RA,Decs in degrees. def arcsec_between(ra1, dec1, ra2, dec2): ''' Computes the angle between two (arrays of) RA,Decs. >>> from numpy import round >>> print round(arcsec_between(0, 0, 1, 0), 6) 3600.0 >>> print round(arcsec_between(array([0, 1]), array([0, 0]), 1, 0), 6) [ 3600. 0.] >>> print round(arcsec_between(1, 0, array([0, 1]), array([0, 0])), 6) [ 3600. 0.] >>> print round(arcsec_between(array([0, 1]), array([0, 0]), array([0, 1]), array([0, 0])), 6) [[ 0. 3600.] [ 3600. 0.]] ''' return distsq2arcsec(distsq_between_radecs(ra1,dec1,ra2,dec2)) def degrees_between(ra1, dec1, ra2, dec2): return arcsec2deg(arcsec_between(ra1, dec1, ra2, dec2)) def deg2distsq(deg): return rad2distsq(deg2rad(deg)) def deg2dist(deg): return rad2dist(deg2rad(deg)) def rad2dist(r): return sqrt(rad2distsq(r)) def rad2distsq(r): # inverse of distsq2arc; cosine law. return 2.0 * (1.0 - cos(r)); def distsq2rad(dist2): return arccos(1. - dist2 / 2.) def distsq2arcsec(dist2): return rad2arcsec(distsq2rad(dist2)) def distsq2deg(dist2): return rad2deg(distsq2rad(dist2)) def rad2deg(r): return 180.0*r/pi def rad2arcsec(r): return 648000.0*r/pi def arcsec2rad(a): return a*pi/648000.0 def arcsec2deg(a): return rad2deg(arcsec2rad(a)) # x can be an array of shape (N,D) # returns an array of shape (N,1) def norm(x): if len(x.shape) == 2: return sqrt(sum(x**2, axis=1))[:,newaxis] else: return sqrt(sum(x**2)) vector_norm = norm # proper motion (dl, db, dra, or ddec) in mas/yr # dist in kpc # returns velocity in km/s def pmdisttovelocity(pm, dist): # (pm in deg/yr) * (dist in kpc) to (velocity in km/s) pmfactor = 1/3.6e6 * pi/180. * 0.977813952e9 return pm * dist * pmfactor # ra, dec in degrees # pmra = d(RA*cos(Dec))/dt, pmdec = dDec/dt, in deg/yr or mas/yr # returns (l,b, pml,pmb) in degrees and [the same units as pmra,pmdec] # pml is d(l*cos(b))/dt def pm_radectolb(ra, dec, pmra, pmdec): (l1, b1) = radectolb(ra, dec) # the Jo
YuxuanLing/trunk
trunk/code/study/python/Fluent-Python-example-code/15-context-mngr/mirror_gen.py
Python
gpl-3.0
1,453
0
""" A "mirroring" ``stdout`` context manager. While active, the context manager reverses text output to ``stdout``:: # BEGIN MIRROR_GEN_DEMO_1 >>> f
rom mirror_gen import looking_glass >>> with looking_glass() as what: # <1> ... print('Alice, Kitty and Snowdrop') ... print(
what) ... pordwonS dna yttiK ,ecilA YKCOWREBBAJ >>> what 'JABBERWOCKY' # END MIRROR_GEN_DEMO_1 This exposes the context manager operation:: # BEGIN MIRROR_GEN_DEMO_2 >>> from mirror_gen import looking_glass >>> manager = looking_glass() # <1> >>> manager # doctest: +ELLIPSIS <contextlib._GeneratorContextManager object at 0x...> >>> monster = manager.__enter__() # <2> >>> monster == 'JABBERWOCKY' # <3> eurT >>> monster 'YKCOWREBBAJ' >>> manager # doctest: +ELLIPSIS >...x0 ta tcejbo reganaMtxetnoCrotareneG_.biltxetnoc< >>> manager.__exit__(None, None, None) # <4> >>> monster 'JABBERWOCKY' # END MIRROR_GEN_DEMO_2 """ # BEGIN MIRROR_GEN_EX import contextlib @contextlib.contextmanager # <1> def looking_glass(): import sys original_write = sys.stdout.write # <2> def reverse_write(text): # <3> original_write(text[::-1]) sys.stdout.write = reverse_write # <4> yield 'JABBERWOCKY' # <5> sys.stdout.write = original_write # <6> # END MIRROR_GEN_EX
GuessWhoSamFoo/pandas
pandas/tests/extension/test_period.py
Python
bsd-3-clause
4,336
0
import numpy as np import pytest from pandas._libs.tslib import iNaT from pandas.core.dtypes.dtypes import PeriodDtype import pandas as pd from pandas.core.arrays import PeriodArray from pandas.tests.extension import base @pytest.fixture def dtype(): return PeriodDtype(freq='D') @pytest.fixture def data(dtype): return PeriodArray(np.arange(1970, 2070), freq=dtype.freq) @pytest.fixture def data_for_sorting(dtype): return PeriodArray([2018, 2019, 2017], freq=dtype.freq) @pytest.fixture def data_missing(dtype): return PeriodArray([iNaT, 2017], freq=dtype.freq) @pytest.fixture def data_missing_for_sorting(dtype): return PeriodArray([2018, iNaT, 2017], freq=dtype.freq) @pytest.fixture def data_for_grouping(dtype): B = 2018 NA = iNaT A = 2017 C = 2019 return PeriodArray([B, B, NA, NA, A, A, B, C], freq=dtype.freq) @pytest.fixture def na_value(): return pd.NaT class BasePeriodTests(object): pass class TestPeriodDtype(BasePeriodTests, base.BaseDtypeTests): pass class TestConstructors(BasePeriodTests, base.BaseConstructorsTests): pass class TestGetitem(BasePeriodTests, base.BaseGetitemTests): pass class TestMethods(BasePeriodTests, base.BaseMethodsTests): def test_combine_add(self, data_repeated): # Period + Period is not defined. pass class TestInterface(BasePeriodTests, base.BaseInterfaceTests): pass class TestArithmeticOps(BasePeriodTests, base.BaseArithmeticOpsTests): implements = {'__sub__', '__rsub__'} def test_arith_series_with_scalar(self, data, all_arithmetic_operators): # we implement substitution... if all_arithmetic_operators in self.implements: s = pd.Series(data) self.check_opname(s, all_arithmetic_operators, s.iloc[0], exc=None) else: # ... but not the rest. super(TestArithmeticOps, self).test_arith_series_with_scalar( data, all_arithmetic_operators ) def test_arith_series_with_array(self, data, all_arithmetic_operators): if all_arithmetic_operators in self.implements: s = pd.Series(data) self.check_opname(s, all_arithmetic_operators, s.iloc[0], exc=None) else: # ... but not the rest. super(TestArithmeticOps, self).test_arith_series_with_scalar( data, all_arithmetic_operators ) def _check_divmod_op(self, s, op, other, exc=NotImplementedError): super(TestArithmeticOps, self)._check_divmod_op( s, op, other, exc=TypeError ) def test_add_series_with_extension_array(self, data): # we don't implement + for Period s = pd.Series(data) msg = (r"unsupported operand type\(s\) for \+: " r"\'PeriodArray\' and \'PeriodArray\'") with pytest.raises(TypeError, match=msg): s + da
ta def test_error(self): pass def test_
direct_arith_with_series_returns_not_implemented(self, data): # Override to use __sub__ instead of __add__ other = pd.Series(data) result = data.__sub__(other) assert result is NotImplemented class TestCasting(BasePeriodTests, base.BaseCastingTests): pass class TestComparisonOps(BasePeriodTests, base.BaseComparisonOpsTests): def _compare_other(self, s, data, op_name, other): # the base test is not appropriate for us. We raise on comparison # with (some) integers, depending on the value. pass class TestMissing(BasePeriodTests, base.BaseMissingTests): pass class TestReshaping(BasePeriodTests, base.BaseReshapingTests): pass class TestSetitem(BasePeriodTests, base.BaseSetitemTests): pass class TestGroupby(BasePeriodTests, base.BaseGroupbyTests): pass class TestPrinting(BasePeriodTests, base.BasePrintingTests): pass class TestParsing(BasePeriodTests, base.BaseParsingTests): @pytest.mark.parametrize('engine', ['c', 'python']) def test_EA_types(self, engine, data): expected_msg = r'.*must implement _from_sequence_of_strings.*' with pytest.raises(NotImplementedError, match=expected_msg): super(TestParsing, self).test_EA_types(engine, data)
srajag/nova
nova/tests/integrated/v3/test_evacuate.py
Python
apache-2.0
3,892
0.002569
# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova.compute import api as compute_api from nova.compute import manager as compute_manager from nova.servicegroup import api as service_group_api from nova.tests.integrated.v3 import test_servers class EvacuateJsonTest(test_servers.ServersSampleBase): extension_name = "os-evacuate" def _test_evacuate(self, req_subs, server_req, server_resp, expected_resp_code): self.uuid = self._post_server() def fake_service_is_up(self, service): """Simulate validation of instance host is down.""" return False def fake_service_get_by_compute_host(self, context, host): """Simulate that given host is a valid host.""" return { 'host_name': host, 'service': 'compute', 'zone': 'nova' } def fake_check_instance_exists(self, context, instance): """Simulate validation of instance does not exist.""" return False self.stubs.Set(service_group_api.API, 'service_is_up', fake_service_is_up) self.stubs.Set(compute_api.HostAPI, 'service_get_by_compute_host', fake_service_get_by_compute_host) self.stubs.Set(compute_manager.ComputeManager, '_check_instance_exists', fake_check_instance_exists) response = self._do_post('servers/%s/action' % self.uuid, server_req, req_subs) subs = self._get_regexes() self._verify_response(server_resp, subs, response, expected_resp_code) @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance') def test_server_evacuate(self, rebuild_mock): # Note (wingwj): The host can't be the same one req_subs = { 'host': 'testHost', "adminPass": "MySecretPass", "onSharedStorage": 'False' } self._test_evacuate(req_subs, 'server-evacuate-req', 'server-evacuate-resp', 202) rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY, orig_image_ref=mock.ANY, image_ref=mock.ANY, injected_files=mock.ANY, new_pass="MySecretPass", ori
g_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY, on_shared_storage=False, preserve_ephemeral=mock.ANY, host='testHost') @mock.patch('nova.conductor.manager.ComputeTaskM
anager.rebuild_instance') def test_server_evacuate_find_host(self, rebuild_mock): req_subs = { "adminPass": "MySecretPass", "onSharedStorage": 'False' } self._test_evacuate(req_subs, 'server-evacuate-find-host-req', 'server-evacuate-find-host-resp', 202) rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY, orig_image_ref=mock.ANY, image_ref=mock.ANY, injected_files=mock.ANY, new_pass="MySecretPass", orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY, on_shared_storage=False, preserve_ephemeral=mock.ANY, host=None)
pcrews/drizzle-ci-salt
test-install.py
Python
gpl-3.0
3,724
0.005908
#!/usr/bin/python2.7 # # This file is part of drizzle-ci # # Copyright (c) 2013 Sharan Kumar M # # drizzle-ci is free software: you ca
n redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # drizzle-ci is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied
warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with drizzle-ci. If not, see <http://www.gnu.org/licenses/>. # # # ========================== # Test script for drizzle-ci # ========================== # imports import logging import os import re import signal import subprocess import sys # configuring paths path = {} path['root'] = os.getcwd() path['state'] = '/srv/salt' path['pillar'] = '/srv/pillar' # configuring variables logging.basicConfig(format='%(levelname)s:%(message)s',level=logging.INFO) log = logging.getLogger(__name__) copy = 'sudo cp -r {0} {1}' top_file = '''base: '*': - {0} ''' # functions def process_command_line(): ''' A function to return the command line arguments as a dictionary of items ''' opt = {} argv = sys.argv[1:] if len(argv) is 0: opt['minion'] = ['*'] opt['state'] = ['drizzle-dbd','drizzle','jenkins','nova','salt','sysbench','users'] return opt for arg in argv: key = arg.split('=')[0][2:] opt[key] = arg.split('=')[1].split(',') return opt def keyboard_interrupt(signal_type,handler): ''' This function handles the keyboard interrupt ''' log.info('\t\tPressed CTRL+C') log.info('\t\texiting...') exit(0) # processing the command line and kick start! opt = process_command_line() signal.signal(signal.SIGINT,keyboard_interrupt) log.info('\t\tsetting up the environment') # setting up the environment cmd = copy.format(path['state']+'/top.sls',path['state']+'/top.sls.bak') os.system(cmd) cmd = copy.format(path['root']+'/salt',path['state']) os.system(cmd) cmd = copy.format(path['root']+'/pillar', path['pillar']) os.system(cmd) # refreshing pillar data log.info('\t\tsetting up pillar data') for minion in opt['minion']: subprocess.Popen(['sudo','salt',minion,'saltutil.refresh_pillar'],stdout=subprocess.PIPE) # processing each state log.info('\n\t\t==================================================') log.info('\t\tstate minion status ') log.info('\t\t==================================================') for state in opt['state']: top_data = top_file.format(state) with open(path['state']+'/top.sls', 'w') as top_sls: top_sls.write(top_data) for minion in opt['minion']: output = subprocess.Popen(['sudo', 'salt', minion, 'state.highstate'], stdout=subprocess.PIPE) result, error = output.communicate() if error is not None: logging.info('ERROR') logging.info(error) failure = re.search(r'Result:\s+False',result) if failure is not None: status = 'FAILURE' else: status = 'OK' log.info('\t\t'+state.ljust(20)+minion.ljust(20)+status.ljust(10)) # restoring the original top.sls and cleaning up.. log.info('\t\t==================================================') log.info('\n\t\tcleaning up...') cmd = 'sudo mv {0} {1}'.format(path['state']+'/top.sls.bak', path['state']+'/top.sls') os.system(cmd) log.info('\t\tsuccessfully executed')
tparks5/tor-stem
docs/_static/example/event_listening.py
Python
lgpl-3.0
5,286
0.015513
import curses import functools from stem.control import EventType, Controller from stem.util import str_tools # colors that curses can handle COLOR_LIST = { "red": curses.COLOR_RED, "green": curses.COLOR_GREEN, "yellow": curses.COLOR_YELLOW, "blue": curses.COLOR_BLUE, "cyan": curses.COLOR_CYAN, "magenta": curses.COLOR_MAGENTA, "black": curses.COLOR_BLACK, "white": curses.COLOR_WHITE, } GRAPH_WIDTH = 40 GRAPH_HEIGHT = 8 DOWNLOAD_COLOR = "green" UPLOAD_COLOR = "blue" def main(): with Controller.from_port(port = 9051) as controller: controller.authenticate() try: # This makes curses initialize and call draw_bandwidth_graph() with a # reference to the screen, followed by additional arguments (in this # case just the controller). curses.wrapper(draw_bandwidth_graph, controller) except KeyboardInterrupt: pass # the user hit ctrl+c def draw_bandwidth_graph(stdscr, controller): window = Window(stdscr) # (downloaded, uploaded) tuples for the last 40 seconds bandwidth_rates = [(0, 0)] * GRAPH_WIDTH # Making a partial that wraps the window and bandwidth_rates with a function # for Tor to call when it gets a BW event. This causes the 'window' and # 'bandwidth_rates' to be provided as the first two arguments whenever # 'bw_event_handler()' is called. bw_event_handler = functools.partial(_handle_bandwidth_event, window, bandwidth_rates) # Registering this listener with Tor. Tor reports a BW event each second. controller.add_event_listener(bw_event_handler, EventType.BW) # Pause the main thread until the user hits any key... and no, don't you dare # ask where the 'any'
key is. :P stdscr.getch() def _handle_bandwidth_event(window, bandwidth_rates, event): # callback for when tor provides us with a BW event bandwidth_rates.insert(0, (event.read, event.written)) bandwidth_rates = bandwidth_rates[:GRAPH_WIDTH] # truncate old values _render_graph(window, bandwidth_rates) def _render_graph(window, bandwidth_rates): window.erase() download_rates = [entry[0] for entry in bandwidth_rates]
upload_rates = [entry[1] for entry in bandwidth_rates] # show the latest values at the top label = "Downloaded (%s/s):" % str_tools.size_label(download_rates[0], 1) window.addstr(0, 1, label, DOWNLOAD_COLOR, curses.A_BOLD) label = "Uploaded (%s/s):" % str_tools.size_label(upload_rates[0], 1) window.addstr(0, GRAPH_WIDTH + 7, label, UPLOAD_COLOR, curses.A_BOLD) # draw the graph bounds in KB max_download_rate = max(download_rates) max_upload_rate = max(upload_rates) window.addstr(1, 1, "%4i" % (max_download_rate / 1024), DOWNLOAD_COLOR) window.addstr(GRAPH_HEIGHT, 1, " 0", DOWNLOAD_COLOR) window.addstr(1, GRAPH_WIDTH + 7, "%4i" % (max_upload_rate / 1024), UPLOAD_COLOR) window.addstr(GRAPH_HEIGHT, GRAPH_WIDTH + 7, " 0", UPLOAD_COLOR) # draw the graph for col in range(GRAPH_WIDTH): col_height = GRAPH_HEIGHT * download_rates[col] / max(max_download_rate, 1) for row in range(col_height): window.addstr(GRAPH_HEIGHT - row, col + 6, " ", DOWNLOAD_COLOR, curses.A_STANDOUT) col_height = GRAPH_HEIGHT * upload_rates[col] / max(max_upload_rate, 1) for row in range(col_height): window.addstr(GRAPH_HEIGHT - row, col + GRAPH_WIDTH + 12, " ", UPLOAD_COLOR, curses.A_STANDOUT) window.refresh() class Window(object): """ Simple wrapper for the curses standard screen object. """ def __init__(self, stdscr): self._stdscr = stdscr # Mappings of names to the curses color attribute. Initially these all # reference black text, but if the terminal can handle color then # they're set with that foreground color. self._colors = dict([(color, 0) for color in COLOR_LIST]) # allows for background transparency try: curses.use_default_colors() except curses.error: pass # makes the cursor invisible try: curses.curs_set(0) except curses.error: pass # initializes colors if the terminal can handle them try: if curses.has_colors(): color_pair = 1 for name, foreground in COLOR_LIST.items(): background = -1 # allows for default (possibly transparent) background curses.init_pair(color_pair, foreground, background) self._colors[name] = curses.color_pair(color_pair) color_pair += 1 except curses.error: pass def addstr(self, y, x, msg, color = None, attr = curses.A_NORMAL): # Curses throws an error if we try to draw a message that spans out of the # window's bounds (... seriously?), so doing our best to avoid that. if color is not None: if color not in self._colors: recognized_colors = ", ".join(self._colors.keys()) raise ValueError("The '%s' color isn't recognized: %s" % (color, recognized_colors)) attr |= self._colors[color] max_y, max_x = self._stdscr.getmaxyx() if max_x > x and max_y > y: try: self._stdscr.addstr(y, x, msg[:max_x - x], attr) except: pass # maybe an edge case while resizing the window def erase(self): self._stdscr.erase() def refresh(self): self._stdscr.refresh() if __name__ == '__main__': main()
TacticalGoat/reddit
FlairTimer/flairtimer.py
Python
mit
3,977
0.026402
#/u/GoldenSights import praw import time import datetime import sqlite3 '''USER CONFIGURATION''' APP_ID = "" APP_SECRET = "" APP_URI = "" APP_REFRESH = "" # https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/ USERAGENT = "" #This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter Bot". SUBREDDIT = "GoldTesting" #This is the sub or list of subs to scan for new posts. For a single sub, use "sub1". For multiple subreddits, use "sub1+sub2+sub3+..." MAXPOSTS = 60 #This is how many posts you want to retrieve all at once. PRAW can download 100 at a time. WAIT = 30 #This is how many seconds you will wait between cycles. The bot is completely inactive during this time. DELAY = 86400 #This is the time, IN SECONDS, which the post will hold the active flair IGNOREMODS = False #Do you want the bot to ignore posts made by moderators? Use True or False (With capitals! No quotations!) IGNORESELFPOST = False #Do you want the bot to ignore selfposts? IGNORELINK = True #Do you want the bot to ignore linkposts? FLAIRACTIVE = "Active" CSSACTIVE = "active" #The flair text and css class assigned to unsolved posts. TITLEREQS = ['[',']'] #Every part of this list must be included in the title '''All done!''' WAITS = str(WAIT) try: import bot USERAGENT = bot.getaG() except ImportError: pass sql = sqlite3.connect('sql.db') print('Loaded SQL Database') cur = sql.cursor() cur.execute('CREATE TABLE IF NOT EXISTS oldposts(id TEXT)') print('Loaded Oldposts') sql.commit() r = praw.Reddit(USERAGENT) r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI) r.refresh_access_information(APP_REFRESH) def getTime(bool): timeNow = datetime.datetime.now(datetime.timezone.utc) timeUnix = timeNow.timestamp() if bool == False: return timeNow else: return timeUnix def scan(): print('Scanning ' + SUBREDDIT) subreddit = r.get_subreddit(SUBREDDIT) moderators = subreddit.get_moderators() mods = [] for moderator in moderators: mods.append(moderator.name) posts = subreddit.get_new(limit=MAXPOSTS) for post in posts: ctimes = [] pid = post.id ptitle = post.title.lower() try: pauthor = post.author.name except AttributeError: pauthor = '[deleted]' ptime = post.created_utc cur.execute('SELECT * FROM oldposts WHERE id=?', [pid]) if not cur.fetchone(): if (post.is_self == True and IGNORESELFPOST == False) or (post.is_self == False and IGNORELINK == False): if pauthor not in mods or IGNOREMODS == False: if all(char.lower() in ptitle for char in TITLEREQS): try: flair = post.link_flair_text.lower() except AttributeError:
flair = '' if flair == '': print(pid + ': No Flair') now = getTime(True) if (now - ptime) > DELAY: print('\tOld. Ignoring') cur.execute('INSERT INTO oldposts VALUES(?)', [pid]) else: print('\tAssigning Active Flair') post.set_flair(fla
ir_text=FLAIRACTIVE,flair_css_class=CSSACTIVE) elif flair == FLAIRACTIVE.lower(): print(pid + ': Active') now = getTime(True) if (now-ptime) > DELAY: print('\tOld. Removing Flair') post.set_flair(flair_text="",flair_css_class="") cur.execute('INSERT INTO oldposts VALUES(?)', [pid]) else: print('\tActive for ' + ('%.0f' % (DELAY-(now-ptime))) + ' more seconds') else: print(pid + ': Does not contain titlereq') cur.execute('INSERT INTO oldposts VALUES(?)', [pid]) if pauthor in mods and IGNOREMODS == True: print(pid + ', ' + pauthor + ': Ignoring Moderator') cur.execute('INSERT INTO oldposts VALUES(?)', [pid]) else: print(pid + ', ' + pauthor + ': Ignoring post') cur.execute('INSERT INTO oldposts VALUES(?)', [pid]) sql.commit() while True: try: scan() except Exception as e: print('An error has occured:', str(e)) sql.commit() print('Running again in ' + WAITS + ' seconds.\n') time.sleep(WAIT)
sonofeft/LazGUI
lazgui/laz_gui.py
Python
gpl-3.0
8,605
0.020686
#!/usr/bin/env python # -*- coding: ascii -*- r""" LazGUI helps to create Lazarus Pascal GUI project. LazGUI will place all of the required files for the Lazarus project into a subdirectory by project name. The project can be built using "lazbuild" that comes with a Lazarus install, or by opening the <project_name>.lpi file with the Lazarus IDE. LazGUI Copyright (C) 2016 Charlie Taylor This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ----------------------- """ import os, sys import shutil here = os.path.abspath(os.path.dirname(__file__)) ref_proj_files = os.path.join( here, 'ref_proj_files' ) #print 'ref_proj_files =',ref_proj_files from lpi_wrapper import LPI_File from lps_wrapper import LPS_File from lpr_wrapper import LPR_File # for multi-file projects see LICENSE file for authorship info # for single file projects, insert following information __author__ = 'Charlie Taylor' __copyright__ = 'Copyright (c) 2016 Charlie Taylor' __license__ = 'GPL-3' exec( open(os.path.join( here,'_version.py' )).read() ) # creates local __version__ variable __email__ = "cet@appliedpython.com" __status__ = "3 - Alpha" # "3 - Alpha", "4 - Beta", "5 - Production/Stable" # # import statements here. (built-in first, then 3rd party, then yours) # # Code goes below. # Adjust docstrings to suite your taste/requirements. # class LazarusGUI(object): """LazGUI helps to create Lazarus Pascal GUI project.""" def __init__(self, project_name='project1', form1_obj=None, data_file_ext='proj_dat'): """Inits LazarusGUI""" self.project_name = str(project_name) self.data_file_ext= data_file_ext self.form_name_set = set() # save set of form names in lower case self.formL = [] if form1_obj is not None: self.add_form( form1_obj ) def add_form(self, form_obj): form_name = form_obj.form_name form_obj.set_laz_gui_obj( self ) # Don't allow duplicate form names while form_name.lower() in self.form_name_set: form_name = form_name + str( (len(self.formL) + 1) ) self.form_name_set.add( form_name.lower() ) self.formL.append( form_obj ) def save_project_files(self, path_name='', over_write_OK=False): if len(self.formL)==0: print 'Can NOT create project... No Forms have been added.' return targ_abs_path = os.path.abspath( path_name ) if os.path.isfile( targ_abs_path ): print 'Can NOT create project... The provided path_name is an existing file.' print 'Need to provide a directory name.' print 'Existing file =',targ_abs_path return if os.path.isdir( targ_abs_path ): if over_write_OK: print 'Using existing directory for Lazarus project.' print 'path_name =',targ_abs_path else: print 'Can NOT create project... The provided directory already exists.' print 'Enter a new directory name OR set parameter "over_write_OK=True".' print 'Existing directory =',targ_abs_path return else: os.mkdir( targ_abs_path ) print "created new Lazarus project directory:",targ_abs_path form1 = self.formL[0] lpi_obj = LPI_File( project_name=self.project_name, form1_name=form1.form_name ) lps_obj = LPS_File( project_name=self.project_name, form1_name=form1.form_name ) lpr_obj = LPR_File( project_name=self.project_name, form1_name=form1.form_name ) for f in self.formL[1:]: lpi_obj.add_form( new_form_name=f.form_name ) lps_obj.add_form( new_form_name=f.form_name ) lpr_obj.add_form( new_form_name=f.form_name ) # copy I/O Variable Get/Set, and required menu History files for copy_fname in ['get_set_io_var.pas', 'HistoryFiles.pas', 'HistoryLazarus.lrs']: src_fname = os.path.join( ref_proj_files, copy_fname ) targ_fname = os.path.join( targ_abs_path, copy_fname ) print 'Copying',src_fname,' --> ',targ_fname shutil.copy(src_fname, targ_fname) # Create Resource File src_fname = os.path.join( ref_proj_files, 'project1.res' ) targ_fname = os.path.join( targ_abs_path, '%s.res'%self.project_name ) print 'Copying',src_fname,' --> ',targ_fname shutil.copy(src_fname, targ_fname) # Create Icon src_fname = os.path.join( ref_proj_files, 'project1.ico' ) targ_fname = os.path.join( targ_abs_path, '%s.ico'%self.project_name ) print 'Copying',src_fname,' --> ',targ_fname shutil.copy(src_fname, targ_fname) # Create *.lpi file (i.e. ProjectOptions, Units, CompilerOptions, Debugging) targ_fname = os.path.join( targ_abs_path, '%s.lpi'%self.project_name ) print 'Saving --> ',targ_fname with open(targ_fname, 'w') as f: f.write( lpi_obj.file_contents() ) # Create *.lps file (i.e. ProjectSession, Units, PathDelim) targ_fname = os.path.join( targ_abs_path, '%s.lps'%self.project_name ) print 'Saving --> ',targ_fname with open(targ_fname, 'w') as f: f.write( lps_obj.file_contents() ) # Create *.lpr file (i.e. Pascal source for overall project) targ_fname = os.path.join( targ_abs
_path, '%s.lpr'%self.project_name ) print 'Saving --> ',targ_fname with open(targ_fname, 'w') as f: f.write( lpr_obj.file_contents() ) # Create *.pas and *.lfm for each of the Form units for form in self.formL: targ_fname = os.path.join( targ_abs_path, '%s.pas'%form.unit_name.lower()
) print 'Saving --> ',targ_fname with open(targ_fname, 'w') as f: f.write( form.pas_file_contents() ) targ_fname = os.path.join( targ_abs_path, '%s.lfm'%form.unit_name.lower() ) print 'Saving --> ',targ_fname with open(targ_fname, 'w') as f: f.write( form.lfm_file_contents() ) # Create *.bat file to compile and run project targ_fname = os.path.join( targ_abs_path, '%s.bat'%self.project_name ) print 'Saving --> ',targ_fname with open(targ_fname, 'w') as f: f.write( BAT_FILE_TEMPLATE.format( **self.__dict__ ) ) BAT_FILE_TEMPLATE = """rem delete any existing EXE file del {project_name}.exe lazbuild {project_name}.lpi rem Now try to run the EXE file {project_name}.exe """ if __name__ == '__main__': from form import Form from button import Button from labeled_edit import LabeledEdit from layout import Layout from layout import VStackPanel, HStackPanel Lay = VStackPanel(Left=10, Height=0, Top=10, Width=0, TopMargin=10, RightMargin=10, BottomMargin=10, LeftMargin=10) for i in xrange(3): B = Lay.add_widget( Button( widget_name='DoSompin_%i'%i, Left=41+i*5, Height=25, Top=42+i*5, Width=75+i*5, Caption=None, has_OnClick=True) ) print '#%i) bbox ='%i, B.BBox Lay.add_widget(LabeledEdit( label_text='Enter Diameter', widget_name='GetDiam', initial_value='4.56789012345678905678901234567890',
ardi69/pyload-0.4.10
pyload/plugin/account/StahnuTo.py
Python
gpl-3.0
991
0.008073
# -*- coding: utf-8 -*- import re from pyload.plugin.Account import Account class StahnuTo(Account): __name = "StahnuTo" __type = "account" __version = "0.05" __description = """StahnuTo account plugin""" __license = "GPLv3" __authors = [("zoidberg", "zoidberg@mujmail.cz")] def loadAccountInfo(self, user, req): html = req.load(
"http://www.st
ahnu.to/") m = re.search(r'>VIP: (\d+.*)<', html) trafficleft = self.parseTraffic(m.group(1)) if m else 0 return {"premium": trafficleft > 512, "trafficleft": trafficleft, "validuntil": -1} def login(self, user, data, req): html = req.load("http://www.stahnu.to/login.php", post={"username": user, "password": data['password'], "submit": "Login"}, decode=True) if not '<a href="logout.php">' in html: self.wrongPassword()
google-research/lag
libml/train_sr.py
Python
apache-2.0
5,712
0.002276
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Super-resolution Training model. This an import only file to provide training helpers. """ import functools import os import numpy as np import tensorflow as tf from absl import flags from libml import utils, layers from libml.data import as_iterator from libml.train import Model, FLAGS, ModelPro flags.DEFINE_integer('scale', 4, 'Scale by which to increase resolution.') flags.DEFINE_string('downscaler', 'average', 'Downscaling method [average, bicubic].') class EvalSessionPro: def __init__(self, model, checkpoint_dir, **params): self.graph = tf.Graph() with self.graph.as_default(): self.global_step = tf.train.get_or_create_global_step() self.ops = model(**params) ckpt = utils.find_latest_checkpoint(checkpoint_dir, 'stage*/model.ckpt-*.meta') self.sess = tf.train.SingularMonitoredSession(checkpoint_filename_with_path=ckpt) class SRES(Model): """Super-Resolution base class.""" def __init__(self, train_dir, scale, downscaler, **kwargs): self.scale = scale self.downscaler = downscaler Model.__init__(self, train_dir, scale=scale, downscaler=downscaler, **kwargs) def experiment_name(self, **kwargs): args = [x + str(y) for x, y in sorted(kwargs.items()) if x not in {'scale', 'downscaler'}] return os.path.join('%s%dX' % (self.downscaler, self.scale), '_'.join([self.__class__.__name__] + args)) @property def log_scale(self): return utils.ilog2(self.scale) def downscale(self, x, scale=None, order=layers.NCHW): scale = scale or self.scale if scale <= 1: return x if self.downscaler == 'average': return layers.downscale2d(x, scale, order) elif self.downscaler == 'bicubic': return layers.bicubic_downscale2d(x, scale, order) else: raise ValueError('Unknown downscaler "%s"' % self.downscaler) def train_step(self, data, ops): x = next(data) self.sess.run(ops.train_op, feed_dict={ops.x: x['x']}) def make_samples(self, dataset, input_op, sres_op, batch=1, width=8, height=16, feed_extra=None): if 'test_hires' not in self.tmp: with dataset.graph.as_default(): it = iter(as_iterator(dataset.test.batch(width * height).take(1).repeat(), dataset.sess)) self.tmp.test_hires = next(it)['x'] hires = self.tmp.test_hires.copy() with tf.Graph().as_default(), tf.Session() as sess_new: lores = sess_new.run(self.downscale(hires)) pixelated = sess_new.run(layers.upscale2d(lores, self.scale)) images = np.concatenate( [ self.tf_sess.run(sres_op, feed_dict={ input_op: lores[x:x + batch], **(feed_extra or {})}) for x in range(0, lores.shape[0], batch) ], axis=0) images = images.clip(-1, 1) images = np.concatenate([hires, pixelated, images], axis=3) images = utils.images_to_grid(images.reshape((height, width) + images.shape[1:])) return images def add_summaries(self, dataset, ops, feed_extra=None, **kwargs): del kwargs feed_extra = feed_extra.copy() if feed_extra else {} if 'noise' in ops: feed_extra[ops.noise] = 0 def gen_images(): samples = self.make_samples(dataset, ops.y, ops.sres_op, FLAGS.batch, feed_extra=feed_extra) # Prevent summary scaling, force offset/ratio = 0/1 samples[-1, -1] = (-1, 0, 1) return samples samples = tf.py_func(gen_images, [], [tf.float32]) tf.summary.image('samples', samples) def model(self, latent, **kwargs): raise NotImplementedError class SRESPro(ModelPro, SRES): """Progressive Super-Resolution Setup."""
def eval_mode(self, dataset): assert self.eval is None log_scale = utils.ilog2(self.scal
e) model = functools.partial(self.model, dataset=dataset, total_steps=1, lod_start=log_scale, lod_stop=log_scale, lod_max=log_scale) self.eval = EvalSessionPro(model, self.checkpoint_dir, **self.params) print('Eval model %s at global_step %d' % (self.__class__.__name__, self.eval.sess.run(self.eval.global_step))) return self.eval def train_step(self, data, lod, ops): x = next(data) self.sess.run(ops.train_op, feed_dict={ops.x: x['x'], ops.lod: lod}) def add_summaries(self, dataset, ops, lod_fn, **kwargs): del kwargs def gen_images(): feed_extra = {ops.lod: lod_fn()} if 'noise' in ops: feed_extra[ops.noise] = 0 samples = self.make_samples(dataset, ops.y, ops.sres_op, FLAGS.batch, feed_extra=feed_extra) # Prevent summary scaling, force offset/ratio = 0/1 samples[-1, -1] = (-1, 0, 1) return samples samples = tf.py_func(gen_images, [], [tf.float32]) tf.summary.image('samples', samples)
facebookresearch/ParlAI
parlai/agents/random_candidate/random_candidate.py
Python
mit
2,698
0
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Simple agent which chooses a random label. Chooses from the label candidates if they are available. If candidates are not available, it repeats the label. """ from typing import Optional from parlai.core.params import ParlaiParser from parlai.core.opt import Opt import random from parlai.core.agents import Agent class RandomCandidateAgent(Agent): """ Agent returns random candidate if available or repeats the label. """ @classmethod def add_cmdline_args( cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None ) -> ParlaiParser: """ Add command line arguments for this agent. """ parser = parser.add_argument_group('RandomCandidateAgent Arguments') parser.add_argument( '--label_candidates_file', type=str, default=None, help='file of candidate responses to choose from', ) return parser def __init__(self, opt, shared=None): """ Initialize this agent. """ super().__init__(opt) self.id = 'RandomCandidateAgent' random.seed(42) if opt.get('label_candidates_file'): f = open(opt.get('label_candidates_file')) self.label_candidates = f.read().split('\n') def act(self): """ Generate response to last seen observation. Replies with a randomly selected candidate if label_candidates or a candidate file are available. Otherwise, replies with the label if they are available. Oterhwise, replies with generic hardcoded responses if the agent has not observed any messages or if there are no replies to suggest. :returns: message dict with reply """ obs = self.observation if obs is None: return {'text': 'Nothing to reply to yet.'} reply = {} reply['id'] = self.getID() label_candidates = obs.get('label_candidates') if hasattr(self, 'label_candidates'): # override label candidates with candidate file if set label_candidates = self.label_candidates
if label_candidates: label_candidates = list(label_candidates) random.shuffle(label_candidates) reply['text_candidates'] = label_candidates reply['text'] = label_candidates[0] else:
# reply with I don't know. reply['text'] = "I don't know." return reply
cntnboys/410Lab6
bookmarks/main/views.py
Python
apache-2.0
1,325
0.020377
from django.shortcuts import render from django.shortcuts import render_to_response from djan
go.template import RequestContext from django.shortcuts import redirect from main.models import Link from main.models import Tag # Create your views here. def index(request): context = RequestContext(request) links = Link.objects.all() return render_to_response('main/index.html', {'links': links}, context) def tags(request): context = RequestContext(request) tags = Tag.objects.all() return render_to_response('main/tag
s.html', {'tags': tags}, context) def tag(request, tag_name): context = RequestContext(request) the_tag = Tag.objects.get(name=tag_name) links=the_tag.link_set.all() return render_to_response('main/index.html',{'links':links, 'tag_name': '#' + tag_name}, context) def add_link(request): context = RequestContext(request) if request.method == 'POST': url = request.POST.get("url","") tags = request.POST.get("tags","") title = request.POST.get("title","") tags = tags.split(',') l = Link.objects.get_or_create(title=title, url=url)[0] for x in tags: l.tags.add(Tag.objects.get_or_create(name=x)[0]) return redirect(index)
WPI-CS4341/CSP
main.py
Python
mit
5,541
0.002888
""" Written by Harry Liu (yliu17) and Tyler Nickerson (tjnickerson) """ import sys import os.path import pprint from classes.bag import Bag from classes.item import Item from classes.constraint import Constraint from classes.csp import CSP from classes.solver import Solver def main(): # Read command line arguments args = sys.argv[1:] # More than 1 argument supplied if len(args) > 1: # Get data inputfilename inputfilename = args[0] # Bags bags = {} # Items items = {} # Section tracker current_section = 0 # Read each line and add to the examples and output lists if os.path.isfile(inputfilename): with open(inputfilename, "r") as infile: for line in infile: # If the line is a comment, increment the section counter if line[:5].strip() == "#####": current_section += 1 else: # Split the line and remove all tabs, newlines, etc. s = [x.strip() for x in line.split(" ")] if current_section == 1: # Items name = s[0] weight = s[1] items[name] = Item(name, weight) elif current_section == 2: # Bags name = s[0] capacity = s[1] bags[name] = Bag(name,
capacity) elif current_section == 3: # Fitting limits lower_bound = s[0] upper_bound = s[1] for b in bags: constraint = Constraint(
Constraint.BAG_FIT_LIMIT, bags=[bags[b]], min_items=lower_bound, max_items=upper_bound) bags[b].constraints.append(constraint) elif current_section == 4: # Unary inclusive name = s[0] require_bags = [bags[k] for k in s[1:]] constraint = Constraint(Constraint.UNARY_CONSTRAINT_IN_BAGS, items=[ items[name]], bags=require_bags) items[name].constraints.append(constraint) elif current_section == 5: # Unary exclusive name = s[0] reject_bags = [bags[k] for k in s[1:]] constraint = Constraint(Constraint.UNARY_CONSTRAINT_NOT_IN_BAGS, items=[ items[name]], bags=reject_bags) items[name].constraints.append(constraint) elif current_section == 6: # Binary equals item1 = s[0] item2 = s[1] constraint = Constraint(Constraint.BINARY_CONSTRAINT_EQUALITY, items=[ items[item1], items[item2]]) for i in [item1, item2]: items[i].constraints.append(constraint) elif current_section == 7: # Binary not equals item1 = s[0] item2 = s[1] constraint = Constraint(Constraint.BINARY_CONSTRAINT_INEQUALITY, items=[ items[item1], items[item2]]) for i in [item1, item2]: items[i].constraints.append(constraint) elif current_section == 8: # Binary inclusive item1 = s[0] item2 = s[1] value1 = s[2] value2 = s[3] constraint = Constraint(Constraint.BINARY_CONSTRAINT_INCLUSIVITY, items=[ items[item1], items[item2]], bags=[bags[value1], bags[value2]]) items[item1].constraints.append(constraint) items[item2].constraints.append(constraint) csp = CSP(items, bags) solver = Solver() solution = solver.solve(csp) # Output the solution outputfilename = args[1] with open(outputfilename, 'w') as infile: if solution is not None: keys = list(solution.keys()) keys.sort() for bag in keys: total_weight = sum(items[x].weight for x in solution[bag]) infile.write(bag + " " + " ".join(solution[bag]) + "\n") infile.write ("number of items: " + str(len(solution[bag])) + "\n") infile.write ("total weight " + str(total_weight) + "/" + str(bags[bag].capacity) + "\n") infile.write ("wasted capacity: " + str(bags[bag].capacity - total_weight) + "\n") else: infile.write ("No solution!\n") else: # Throw error when cannot open file print("Input file does not exist.") else: # Show usage when not providing enough argument print("Usage: python main.py <inputfilename> <outputfilename") if __name__ == "__main__": main()
johnyf/gr1experiments
examples/bunny_many_goals/make_instances.py
Python
bsd-3-clause
2,595
0
#!/usr/bin/env python """Dump instances for bunny, in Promela and SlugsIn.""" import argparse import itertools im
port pprint import logging import re from tugs import utils log = logging.getLogger(__name__) INPUT_FILE = 'bunny.pml' PROMELA_PATH = 'pml/bunny_many_goals_{i}.txt' SLUGSIN_PATH = 'slugsin/bunny_many_goals_{i}.txt' def dump_promela(n, m): """Dump instances of Promela.""" for i in xrange(n, m): code = make_promela(i) promela_file = PR
OMELA_PATH.format(i=i) with open(promela_file, 'w') as f: f.write(code) log.info('dumped Promela for {i} masters'.format(i=i)) def dump_slugsin(n, m): for i in xrange(n, m): promela_file = PROMELA_PATH.format(i=i) with open(promela_file, 'r') as f: pml_code = f.read() slugsin_code = utils.translate_promela_to_slugsin(pml_code) slugsin_file = SLUGSIN_PATH.format(i=i) with open(slugsin_file, 'w') as f: f.write(slugsin_code) log.info('dumped SlugsIn for {i} masters'.format(i=i)) def make_promela(n): """Return Promela code for instance with size `n`.""" fname = INPUT_FILE with open(fname, 'r') as f: s = f.read() # set number of cells newline = '#define H {n}'.format(n=n) code = re.sub('#define H.*', newline, s) newline = '#define W {m}'.format(m=n-1) code = re.sub('#define W.*', newline, code) # add multiple weak fairness assumptions code += form_progress(n) return code def form_progress(n): """Return conjunction of LTL formulae for progress.""" g0 = ('[]<>((x == 0) && (y == {k}))'.format(k=k) for k in xrange(n)) g1 = ('[]<>((x == {n}) && (y == {k}))'.format(k=k, n=n) for k in xrange(n)) c = itertools.chain(g0, g1) prog = ' && '.join(c) return 'assert ltl { ' + prog + ' }' def main(): # log fh = logging.FileHandler('code_generator_log.txt', mode='w') log.addHandler(fh) log.setLevel(logging.DEBUG) # tugs log log1 = logging.getLogger('tugs.utils') log1.addHandler(fh) log1.setLevel(logging.DEBUG) # record env versions = utils.snapshot_versions() log.info(pprint.pformat(versions)) # args p = argparse.ArgumentParser() p.add_argument('--min', type=int, help='from this # of masters') p.add_argument('--max', type=int, help='to this # of masters') args = p.parse_args() n = args.min m = args.max + 1 dump_promela(n, m) dump_slugsin(n, m) if __name__ == '__main__': main()
multidis/bitQuant02
setup.py
Python
mit
464
0.043103
try: from setuptools import setup except ImportError: from distutils.core import setup
config = { 'description':'End to end solution for bitcoin data gathering, backtesting, and live trading', 'author': 'ross palmer', 'url':'http://rosspalmer.gi
thub.io/bitQuant/', 'license':'MIT', 'version': '0.2.10', 'install_requires': ['SQLAlchemy','pandas','numpy','scipy','PyMySQL'], 'packages': ['bitquant'], 'scripts': [], 'name':'bitquant' } setup(**config)
mscuthbert/abjad
abjad/tools/topleveltools/graph.py
Python
gpl-3.0
2,398
0.000834
# -*- encoding: utf-8 -*- import os import subprocess def graph( expr, image_format='pdf', layout='dot', graph_attributes=None, node_attributes=None, edge_attributes=None, **kwargs ): r'''Graphs `expr` with graphviz and opens resulting image in the default image viewer. :: >>> rtm_syntax = '(3 ((2 (2 1)) 2))' >>> rhythm_tree = rhythmtreetools.RhythmTreeParser()(rtm_syntax)[0] >>> print(rhythm_tree.pretty_rtm_format) (3 ( (2 ( 2 1)) 2)) :: >>> topleveltools.graph(rhythm_tree) # doctest: +SKIP Returns none. ''' from abjad import abjad_configuration from abjad.tools import systemtools if isinstance(expr, str): graphviz_format = expr else: assert hasattr(expr, '__graph__')
graphviz_graph = expr.__graph__(**kwargs) if gr
aph_attributes: graph.attributes.update(graph_attributes) if node_attributes: graph.node_attributes.update(node_attributes) if edge_attributes: graph.edge_attributes.update(edge_attributes) graphviz_format = str(graphviz_graph) assert image_format in ('pdf', 'png') valid_layouts = ( 'circo', 'dot', 'fdp', 'neato', 'osage', 'sfdp', 'twopi', ) assert layout in valid_layouts message = 'cannot find `{}` command-line tool.' message = message.format(layout) message += ' Please download Graphviz from graphviz.org.' assert systemtools.IOManager.find_executable(layout), message ABJADOUTPUT = abjad_configuration['abjad_output_directory'] systemtools.IOManager._ensure_directory_existence(ABJADOUTPUT) dot_path = os.path.join( ABJADOUTPUT, systemtools.IOManager.get_next_output_file_name(file_extension='dot'), ) img_path = os.path.join(ABJADOUTPUT, dot_path.replace('dot', 'pdf')) with open(dot_path, 'w') as f: f.write(graphviz_format) command = '{} -v -T{} {} -o {}' command = command.format(layout, image_format, dot_path, img_path) subprocess.call(command, shell=True) pdf_viewer = abjad_configuration['pdf_viewer'] ABJADOUTPUT = abjad_configuration['abjad_output_directory'] systemtools.IOManager.open_file(img_path, pdf_viewer)
cedadev/ndg_xacml
ndg/xacml/parsers/etree/subjectmatchreader.py
Python
bsd-3-clause
979
0.003064
"""NDG XACML ElementTree based reader for subject match type NERC DataGrid """ __author__ = "P J Kershaw" __date__ = "16/03/10" __copyright__ = "(C) 2010 Science and Technology Facilities Council" __contac
t__ = "Philip.Kershaw@stfc.ac.uk" __license__ = "BSD - see LICENSE file in top-level directory" __contact__ = "Philip.Kershaw@stfc.ac.uk" __revision__ = "$Id$" from ndg.xacml.core.match import SubjectMatch from ndg.xacml.core.attributedesignator import SubjectAttributeDesignator from ndg.xacml.parsers.etree.matchreader import MatchReaderBase class SubjectMatchReader(MatchReaderBase): """ElementTree based parser for XACML SubjectMatch @cv
ar TYPE: XACML class type that this reader will read values into @type TYPE: abc.ABCMeta @cvar ATTRIBUTE_DESIGNATOR_TYPE: type for attribute designator sub-elements @type ATTRIBUTE_DESIGNATOR_TYPE: abc.ABCMeta """ TYPE = SubjectMatch ATTRIBUTE_DESIGNATOR_TYPE = SubjectAttributeDesignator
Kami/libcloud
libcloud/test/dns/test_auroradns.py
Python
apache-2.0
13,377
0.00015
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import sys import json from libcloud.common.types import ProviderError from libcloud.dns.drivers.auroradns import AuroraDNSDriver from libcloud.dns.drivers.auroradns import AuroraDNSHealthCheckType from libcloud.dns.types import RecordType from libcloud.dns.types import ZoneDoesNotExistError from libcloud.dns.types import ZoneAlreadyExistsError from libcloud.dns.types import RecordDoesNotExistError from libcloud.dns.base import Zone from libcloud.test import LibcloudTestCase from libcloud.test import MockHttp from libcloud.test import unittest from libcloud.test.file_fixtures import DNSFileFixtures from libcloud.test.secrets import DNS_PARAMS_AURORADNS from libcloud.utils.py3 import httplib class AuroraDNSDriverTests(LibcloudTestCase): def setUp(self): AuroraDNSDriver.connectionCls.conn_class = AuroraDNSDriverMockHttp AuroraDNSDriverMockHttp.type = None self.driver = AuroraDNSDriver(*DNS_PARAMS_AURORADNS) def test_403_status_code(self): AuroraDNSDriverMockHttp.type = "HTTP_FORBIDDEN" with self.assertRaises(ProviderError) as ctx: self.driver.list_zones() self.assertEqual(ctx.exception.value, "Authorization failed") self.assertEqual(ctx.exception.http_code, 403) def test_merge_extra_data(self): rdata = { 'name': 'localhost', 'type': RecordType.A, 'content': '127.0.0.1' } params = {'ttl': 900, 'prio': 0, 'health_check_id': None, 'disabled': False} for param in params: extra = { param: params[param] } data = self.driver._AuroraDNSDriver__merge_extra_data(rdata, extra) self.assertEqual(data['content'], '127.0.0.1') self.assertEqual(data['type'], RecordType.A) self.assertEqual(data[param], params[param]) self.assertEqual(data['name'], 'localhost') def test_res_to_record(self): res = {'id': 2, 'name': 'www', 'type': 'AAAA', 'content': '2001:db8:100', 'created': 1234, 'modified': 2345, 'disabled': False, 'ttl': 1800, 'prio': 10} zone = Zone(id=1, domain='example.com', type=None, ttl=60, driver=self.driver) record = self.driver._AuroraDNSDriver__res_to_record(zone, res) self.assertEqual(res['name'], record.name) self.assertEqual(res['ttl'], record.extra['ttl']) self.assertEqual(res['prio'], record.extra['priority']) self.assertEqual(res['type'], record.type) self.assertEqual(res['content'], record.data) self.assertEqual(zone, record.zone) self.assertEqual(self.driver, record.driver) def test_record_types(self): types = self.driver.list_record_types() self.assertEqual(len(types), 12) self.assertTrue(RecordType.A in types) self.assertTrue(RecordType.AAAA in types) self.assertTrue(RecordType.MX in types) self.assertTrue(RecordType.NS in types) self.assertTrue(RecordType.SOA in types) self.assertTrue(RecordType.TXT in types) self.assertTrue(RecordType.CNAME in types) self.assertTrue(RecordType.SRV in types) self.assertTrue(RecordType.DS in types) self.assertTrue(RecordType.SSHFP in types) self.assertTrue(RecordType.PTR in types) self.assertTrue(RecordType.TLSA in types) def test_list_zones(self): zones = self.driver.list_zones() self.assertEqual(len(zones), 2) for zone in zones: self.assertTrue(zone.domain.startswith('auroradns')) def test_create_zone(self): zone = self.driver.create_zone('example.com') self.assertEqual(zone.domain, 'example.com') def test_get_zone(self): zone = self.driver.get_zone('example.com') self.assertEqual(zone.domain, 'example.com') self.assertEqual(zone.id, 'ffb62570-8414-4578-a346-526b44e320b7') def test_delete_zone(self): zone = self.driver.get_zone('example.com') self.assertTrue(self.driver.delete_zone(zone)) def test_create_record(self): zone = self.driver.get_zone('example.com') record = zone.create_record(name='localhost', type=RecordType.A, data='127.0.0.1', extra={'ttl': 900}) self.assertEqual(record.id, '5592f1ff') self.assertEqual(record.name, 'localhost') self.assertEqual(record.data, '127.0.0.1') self.assertEqual(record.type, RecordType.A) self.assertEqual(record.extra['ttl'], 900) def test_get_record(self): zone = self.driver.get_zone('example.com') record = self.driver.get_record(zone.id, '5592f1ff') self.assertEqual(record.id, '5592f1ff') self.assertEqual(record.name, 'localhost') self.assertEqual(record.data, '127.0.0.1') self.assertEqual(record.type, RecordType.A) self.assertEqual(record.extra['ttl'], 900) self.assertEqual(record.extra['priority'], None) def test_update_record(self): ttl = 900 zone = self.driver.get_zone('example.com') record = self.driver.get_record(zone.id, '5592f1ff') record = record.update(extra={'ttl': ttl}) self.assertEqual(record.extra['ttl'], ttl) def test_delete_record(self): zone = self.driver.get_zone('example.com') record = self.driver.get_record(zone.id, '5592f1ff') self.assertTrue(record.delete()) def test_list_records(self): zone = self.driver.get_zone('example.com') for record in zone.list_records(): self.assertEqual(record.extra['ttl'], 3600) self.assertEqual(record.extra['disabled'], False) def test_get_zone_non_exist(self): try: self.driver.get_zone('nonexists.example.com') self.fail('expected a ZoneDoesNotExistError') except ZoneDoesNotExistError: pass except Exception: raise def test_delete_zone_non_exist(self): try: self.driver.delete_zone(Zone(id=1, domain='nonexists.example.com', type='NATIVE', driver=AuroraDNSDriver, ttl=3600))
self.fail('expected a ZoneDoesNotExistError') except ZoneDoesNotExistError: pass except Exception: raise def test_create_zone_already_exist(self): try: self.driver.create_zone('exists.example.com') self.fail('expected a ZoneAl
readyExistsError') except ZoneAlreadyExistsError: pass except Exception: raise def test_list_records_non_exist(self): try: self.driver.list_records(Zone(id=1, domain='nonexists.example.com', type='NATIVE', driver=AuroraDNSDriver, ttl=3600)) self.fail('expected a ZoneDoesNotExistError') except ZoneDoesNotExistError: pass except Exception: raise
jorge-marques/shoop
shoop_tests/front/test_simple_search.py
Python
agpl-3.0
1,843
0.00217
# -*- coding: utf-8 -*- # This file is part of Shoop. # # Copyright (c) 2012-2015, Shoop Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. from django.utils import translation import pytest from shoop.front.apps.simple_search.views import get_search_product_ids, SearchView from shoop.testing.factories import get_default_product, get_default_shop, create_product from shoop.testing.utils import apply_request_middleware UNLIKELY_STRING = "TJiCrQWaGChYNathovfViXPWO" NO_RESULTS_FOUND_STRING = "No result
s found" @pytest.mark.django_db def test_simple_search_get_ids_works(rf): prod = get_default_product() bit = prod.name[:5] request = rf.get("/") assert prod.pk in get_search_product_ids(request, bit) assert prod.pk in get_search_product_ids(request, bit) #
Should use cache @pytest.mark.django_db def test_simple_search_view_works(rf): view = SearchView.as_view() prod = create_product(sku=UNLIKELY_STRING, shop=get_default_shop()) query = prod.name[:8] # This test is pretty cruddy. TODO: Un-cruddify this test. resp = view(apply_request_middleware(rf.get("/"))) assert query not in resp.rendered_content resp = view(apply_request_middleware(rf.get("/", {"q": query}))) assert query in resp.rendered_content @pytest.mark.django_db def test_simple_search_no_results(rf): with translation.override("xx"): # use built-in translation get_default_shop() view = SearchView.as_view() resp = view(apply_request_middleware(rf.get("/", {"q": UNLIKELY_STRING}))) assert NO_RESULTS_FOUND_STRING in resp.rendered_content resp = view(apply_request_middleware(rf.get("/"))) assert NO_RESULTS_FOUND_STRING not in resp.rendered_content
joshuahellier/PhDStuff
codes/thesisCodes/kmc/customAnalysis/DensHist.py
Python
mit
2,799
0.005359
import sys import numpy import math from foldyFloatList import foldyFloatList class OOBError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) from KMCLib.PluginInterfaces.KMCAnalysisPlugin import KMCAnalysisPlugin from KMCLib.Utilities.CheckUtilities import checkSequenceOfPositiveIntegers from KMCLib.Utilities.CheckUtilities import checkPositiveFloat from KMCLib.Utilities.CheckUtilities import checkPositiveInteger from KMCLib.Exceptions.Error import Error from KMCLib.Backend.Backend import MPICommons class DensHist(KMCAnalysisPlugin): def __init__(self, spec=None, inProc=None, outProc=None): self.__spec = spec msg = "The 'inProc' parameter must be given as a list of relevant input processes." self.__inProc = checkSequenceOfPositiveIntegers(inProc, msg) msg = "The 'outProc' parameter m
ust be given as a list of relevant output processes." self.__outProc = checkSequenceOfPositiveIntegers(outProc, msg) self.__initTime = 0.0 self.__lastTime = 0.0 self.__currentTime = 0.0 def setup(self, step, time, configuration): self.__initTime = time typeList = configuration.types() self.__histSize = len(typeList) self.__histogram = [] for i in range(0, self.__histSize): self.__histogram.append(
foldyFloatList()) total = 0 for i in typeList: if i in self.__spec: total += 1 self.__currTot = total self.__lastTime = time self.__currentTime = time def registerStep(self, step, time, configuration): self.__currentTime = time if configuration.latestEventProcess() in self.__inProc: self.__currTot += 1 if configuration.latestEventProcess() in self.__outProc: self.__currTot -= 1 if self.__currTot < 0 or self.__currTot > self.__histSize: raise OOBError(0) self.__histogram[self.__currTot].addValue(self.__currentTime - self.__lastTime) self.__lastTime = time def finalize(self): self.__lastTime = self.__currentTime self.__finalHist = [] totalWeight = foldyFloatList() for data in self.__histogram: temp = data.extractSum() totalWeight.addValue(temp) self.__finalHist.append(temp) ovTot = totalWeight.extractSum() for index in range(0, self.__histSize): self.__finalHist[index] = self.__finalHist[index]/ovTot def printResults(self, stream=sys.stdout): if MPICommons.isMaster(): for index in range(0, self.__histSize): stream.write(str(index)+" "+"{:.6E}".format(self.__finalHist[index])+"\n")
danielplohmann/apiscout
apiscout/db_builder/DatabaseBuilder.py
Python
bsd-2-clause
11,496
0.003392
#!/usr/bin/python ######################################################################## # Copyright (c) 2017 # Daniel Plohmann <daniel.plohmann<at>mailbox<dot>org> # All rights reserved. ######################################################################## # # This file is part of apiscout # # apiscout is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see # <http://www.gnu.org/licenses/>. # ######################################################################## import argparse import json import logging from operator import attrgetter import os import re import sys import platform import ctypes import pefile import config from ThreadedCommand import ThreadedCommand LOG = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG, format="%(asctime)-15s %(message)s") def get_system_info(): platform_info = platform.uname() version_info = sys.getwindowsversion() if sys.version > '3': os_name = "%s %s %s (%s)" % (platform_info.system, platform_info.release, version_info.service_pack, platform_info.machine) os_version = platform_info.version else: os_name = "%s %s %s (%s)" % (platform_info[0], platform_info[2], version_info[4], platform_info[4]) os_version = platform_info[3] return os_name, os_version # courtesy of http://stackoverflow.com/a/16076661 def loword(dword): return dword & 0x0000ffff def hiword(dword): return dword >> 16 def get_product_version(pe): try: ms = pe.VS_FIXEDFILEINFO.ProductVersionMS ls = pe.VS_FIXEDFILEINFO.ProductVersionLS return "%d.%d.%d.%d" % (hiword(ms), loword(ms), hiword(ls), loword(ls)) except AttributeError: return "0.0.0.0" def check_aslr(): # first check for a potentially rebased user32.dll from ctypes import windll from ctypes import wintypes check_dlls = ["user32.dll", "kernel32.dll", "ntdll.dll"] offsets = [] is_aslr = False windll.kernel32.GetModuleHandleW.restype = wintypes.HMODULE windll.kernel32.GetModuleHandleW.argtypes = [wintypes.LPCWSTR] windll.kernel32.GetModuleFileNameW.restype = wintypes.DWORD windll.kernel32.GetModuleFileNameW.argtypes = [wintypes.HANDLE, wintypes.LPWSTR, wintypes.DWORD] for dll_name in check_dlls: h_module_base = windll.kernel32.GetModuleHandleW(dll_name) # next get the module's file path module_path = ctypes.create_unicode_buffer(255) windll.kernel32.GetModuleFileNameW(h_module_base, module_path, 255) # then the ImageBase from python.exe file pe = pefile.PE(module_path.value) pe_header_base_addr = pe.OPTIONAL_HEADER.ImageBase offsets.append(pe_header_base_addr - h_module_base) for dll_name, offset in zip(check_dlls, offsets): LOG.debug("Memory vs. File ImageBase offset (%s): 0x%x", dll_name, offset) is_aslr |= offset != 0 return is_aslr class DatabaseBuilder(object): def _extractPeExports(self, filepath): try: pe = pefile.PE(filepath) if hasattr(pe, "DIRECTORY_ENTRY_EXPORT"): dll_entry = {} dll_entry["base_address"] = pe.OPTIONAL_HEADER.ImageBase dll_entry["bitness"] = 32 if pe.FILE_HEADER.Machine == 0x14c else 64 dll_entry["version"] = get_product_version(pe) dll_entry["filepath"] = filepath dll_entry["aslr_offset"] = 0 dll_entry["exports"] = [] min_addr = sys.maxsize max_addr = 0 for exp in sorted(pe.DIRECTORY_ENTRY_EXPORT.symbols, key=attrgetter("address")): export_info = {} min_addr = min(pe.OPTIONAL_HEADER.ImageBase + exp.address, min_addr) max_addr = max(pe.OPTIONAL_HEADER.ImageBase + exp.address, max_addr) export_info["address"] = exp.address if exp.name == None: export_info["name"] = "None" else: export_info["name"] = exp.name.decode("utf-8") export_info["ordinal"] = exp.ordinal dll_entry["exports"].append(export_info) return dll_entry except Exception as exc: return None def _buildDllKey(self, dll_info): filename = os.path.basename(dll_info["filepath"]) return "{}_{}_{}_0x{:x}".format(dll_info["bitness"], dll_info["version"], filename, dll_info["base_address"]) def _isInFilter(self, target_dll, filter_dlls): # since we want to maintain compatibility with Python 2.7, we can't casefold - upper+lower should suffice though. for check_dll in filter_dlls: if target_dll.upper().lower() == check_dll.upper().lower(): return True return False def extractRecursively(self, paths, filter_dlls=False): api_count = 0 pe_count = 0 duplicate_count = 0 skipped_count = 0 num_hit_dlls = 0 api_db = {"dlls": {}} if paths is None: paths = config.DEFAULT_FOLDERS for base in paths: if not os.path.isdir(base): LOG.warn("%s is not a directory, skipping...", base) continue for root, _, files in os.walk(base): for fn in files: if filter_dlls and not self._isInFilter(fn, config.DLL_FILTER): skipped_count += 1 continue elif not (fn.lower().endswith(".dll") or fn.lower().endswith(".drv") or fn.lower().endswith(".mui")): continue pe_count += 1 LOG.info("processing: %s %s", root, fn) dll_summary = self._extractPeExports(root + os.sep + fn)
if dll_summary is not None: dll_key = self._buildDllKey(dll_summary) if dll_key not in api_db["dlls"]: api_db["dlls"][dll_key] = dll_summary num_hit_dlls += 1
api_count += len(dll_summary["exports"]) LOG.info("APIs: %d", len(dll_summary["exports"])) else: duplicate_count += 1 LOG.info("PEs examined: %d (%d duplicates, %d skipped)", pe_count, duplicate_count, skipped_count) LOG.info("Successfully evaluated %d DLLs with %d APIs", num_hit_dlls, api_count) api_db["os_name"], api_db["os_version"] = get_system_info() api_db["aslr_offsets"] = False api_db["num_dlls"] = num_hit_dlls api_db["num_apis"] = api_count api_db["crawled_paths"] = paths api_db["filtered"] = filter_dlls return api_db def extractAslrOffsets(self, api_db): LOG.info("Now check for ASLR...") if check_aslr(): LOG.info(" looks like ASLR is active, let's extract some offsets!") num_offsets = {32: 0, 64: 0} for dll_key in api_db["dlls"]: dll = api_db["dlls"][dll_key] if dll["bitness"] in [32, 64]: offset = self.getAslrOffsetForDll(dll) dll["aslr_offset"] = offset if offset: num_offsets[dll["bitness"]] += 1 LOG.info(
XertroV/nvbclient
nvbclient/tests.py
Python
mit
1,501
0.000666
import unittest import transaction from pyramid import testing from .models import DBSession class TestMyViewSuccessCondition(unittest.TestCase): def setUp(self): self.config = testing.setUp() from sqlalchemy import create_engine engine = create_engine('sqlite://') from .models import ( Base, MyModel, ) DBSession.configure(bind=engine) Base.metadata.create_all(engine) with transaction.manager: model = MyModel(name='one', value=55) DBSession.add(model) def tearDown(self): DBSession.remove() testing.tearDown() def test_passing_view(self): from .views import my_view request = testing.DummyRequest() info = my_vi
ew(request) self.assertEqual(info['one'].name, 'one') self.assertEqual(info['project'], 'nvb-client') class TestMyViewFailureCondition(unittest.TestCase): def setUp(self): self.config = testing.setUp() from sqlalchemy import create_engine engine = create_engine('sqlite://') from .models import ( Base, MyModel, ) DBSession.configure(bind=engine) def tearDown(self): DBSes
sion.remove() testing.tearDown() def test_failing_view(self): from .views import my_view request = testing.DummyRequest() info = my_view(request) self.assertEqual(info.status_int, 500)
suutari/shoop
shuup_tests/front/test_addressbook.py
Python
agpl-3.0
6,230
0.001605
import pytest import re from django.contrib.auth import get_
user_model from django.contrib.auth.models import User from django.core.urlresolvers import reverse from shuup.core.models import SavedAddress from shuup.core.models import get_person_contact from shuup.core.models._contacts import get_company_contact fr
om shuup.testing.factories import get_default_shop, get_address from shuup_tests.utils import SmartClient from shuup_tests.utils.fixtures import regular_user, REGULAR_USER_PASSWORD, REGULAR_USER_USERNAME regular_user = regular_user # noqa def default_address_data(): return { "saved_address-title": "Fakerr", "saved_address-role": "1", "saved_address-status": "1", "address-name": "Derpy Test", "address-street": "Derp-street", "address-city": "Los Angeles", "address-region_code": "CA", "address-postal_code": "90000", "address-country": "US", } def initialize_test(regular_user, person=True): client = SmartClient() get_default_shop() if person: contact = get_person_contact(regular_user) else: contact = get_company_contact(regular_user) client.login(username=REGULAR_USER_USERNAME, password=REGULAR_USER_PASSWORD) return client, contact @pytest.mark.django_db def test_addressbook_no_address(regular_user): client, contact = initialize_test(regular_user) addressbook_url = reverse("shuup:address_book") response, soup = client.response_and_soup(addressbook_url) assert not len(soup(text="Name:")) @pytest.mark.django_db def test_addressbook_has_addresses(regular_user): client, contact = initialize_test(regular_user) address = get_address() address.save() billing_name = address.name contact.default_billing_address = address contact.save() addressbook_url = reverse("shuup:address_book") soup = client.soup(addressbook_url) assert len(soup(text="Name:")) == 1 elems = [p for p in soup.find_all("p") if p.text == "Name: %s" % billing_name] assert len(elems) == 1 address = get_address(**{"name": "Kek Bur"}) address.save() shipping_name = address.name contact.default_shipping_address = address contact.save() soup = client.soup(addressbook_url) elems = [p for p in soup.find_all("p") if p.text == "Name: %s" % billing_name] assert len(elems) == 1 assert len(soup(text="Name:")) == 2 elems = [p for p in soup.find_all("p") if p.text == "Name: %s" % shipping_name] assert len(elems) == 1 @pytest.mark.django_db def test_addressbook_has_saved_addresses(regular_user): client, contact = initialize_test(regular_user) address = get_address() address.save() address_title = "TestAddress" sa = SavedAddress.objects.create(owner=contact, address=address, title=address_title) addressbook_url = reverse("shuup:address_book") soup = client.soup(addressbook_url) elems = [h for h in soup.find_all("h2") if h.text.strip() == address_title] assert len(elems) == 1 assert len(soup(text="Name:")) == 1 second_address_title = "TestAddress2" sa = SavedAddress.objects.create(owner=contact, address=address, title=second_address_title) soup = client.soup(addressbook_url) elems = [h for h in soup.find_all("h2") if h.text.strip() == second_address_title] assert len(elems) == 1 assert len(soup(text="Name:")) == 2 @pytest.mark.django_db def test_addressbook_addresses_create_and_edit(regular_user): client, contact = initialize_test(regular_user) new_address_url = reverse("shuup:address_book_new") soup = client.soup(new_address_url) data = default_address_data() response, soup = client.response_and_soup(new_address_url, data, "post") assert response.status_code == 302 assert SavedAddress.objects.count() == 1 assert SavedAddress.objects.first().owner == contact addressbook_url = reverse("shuup:address_book") soup = client.soup(addressbook_url) elems = [h for h in soup.find_all("h2") if h.text.strip() == data.get("saved_address-title")] assert len(elems) == 1 assert len(soup(text="Name:")) == 1 new_title = "Test Title" soup = client.soup(new_address_url) data.update({"saved_address-title": new_title}) response, soup = client.response_and_soup(new_address_url, data, "post") assert response.status_code == 302 assert SavedAddress.objects.count() == 2 sa = SavedAddress.objects.last() assert sa.owner == contact assert sa.title == new_title soup = client.soup(addressbook_url) elems = [h for h in soup.find_all("h2") if h.text.strip() == new_title] assert len(elems) == 1 assert len(soup(text="Name:")) == 2 # edit old updated_title = "Updated Title" edit_url = reverse("shuup:address_book_edit", kwargs={"pk": sa.pk}) soup = client.soup(edit_url) data.update({"saved_address-title": updated_title}) response, soup = client.response_and_soup(edit_url, data, "post") assert response.status_code == 302 assert SavedAddress.objects.count() == 2 sa = SavedAddress.objects.last() assert sa.owner == contact assert sa.title == updated_title soup = client.soup(addressbook_url) elems = [h for h in soup.find_all("h2") if h.text.strip() == updated_title] assert len(elems) == 1 assert len(soup(text="Name:")) == 2 @pytest.mark.django_db def delete_address(regular_user): client, contact = initialize_test(regular_user) address = get_address() address.save() sa = SavedAddress.objects.create(owner=contact, address=address) delete_url = reverse("shuup:address_book_delete", kwargs={"pk":sa.pk}) response, soup = client.response_and_soup(delete_url) assert response.status_code == 302 assert "Cannot remove address" not in soup user = User.objects.create_user('john', 'doe@example.com', 'doepassword') contact2 = get_person_contact(user) address2 = get_address() address2.save() sa2 = SavedAddress.objects.create(owner=contact2, address=address2) response, soup = client.response_and_soup(delete_url) assert response.status_code == 302 assert "Cannot remove address" in soup
hidat/audio_pipeline
audio_pipeline/test/References.py
Python
mit
3,286
0.018564
format_testing_audio = "audio_pipeline\\test\\test_files\\audio\\tag_test_files" write_testing_audio = "audio_pipeline\\test\\test_files\\audio\\write_test_files" release_mbids = "audio_pipeline/test/test_files/test_mbids/release_mbids.json" artist_mbids = "audio_pipeline/test/test_files/test_mbids/artist_mbids.json" mb_dir = "audio_pipeline/test/test_files/mb_lookups" t1_tags = {'tracktotal': 12, 'album': 'Who Killed...... The Zutons?', 'encoder settings': '-compression-level-5', 'encoder': '(FLAC 1.2.1)', 'albumartist': 'The Zutons', 'label': 'Deltasonic', 'date': '2004-04-19', 'source': 'CD (Lossless)', 'discnumber': 1, 'accurateripdiscid': '012-0011f4ba-00a8233b-8809700c-4', 'batchid': '50024', 'encoded by': 'dBpoweramp Release 14.4', 'title': 'Confusion', 'accurateripresult': 'AccurateRip: Accurate (confidence 62) [37DEB629]', 'artist': 'The Zutons', 'tracknumber': 4, 'disctotal': 1, 'genre': 'Rock', 'mbid': '5560ffa9-3824-44f4-b2bf-a96ae4864187', 'length': '0:07', 'item_code': '8b3b7f33-4e8c-4146-90b7-96611863d133', 'obscenity': 'RED DOT', 'send to enco': 'yes', 'rotation status': 'heavy', 'style': 'Bluegrass'} picard_tags = {'tracknumber': 6, 'totaltracks': 13, 'encoded by': 'dBpoweramp Release 14.4', 'media': 'CD', 'source': 'CD (Lossless)', 'releasestatus': 'official', 'script': 'Latn', 'accurateripresult': 'AccurateRip: Not in database 7CF59426',
'musicbrainz_trackid': '89715e73-cfa8-487f-8aa1-18c3b7d965b9', 'releasecountry': 'GB', 'mbid': '232775fc-277d-46e5-af86-5e01764abe5a', 'musicbrainz_releasetrackid': 'fe85af54-9982-34cc-9e0a-8d4d13a12350', 'disctotal': 1, 'artist': 'Rudi Zygadlo', 'discnumber': 1, 'artists': 'Rudi Zygadlo',
'albumartistsort': 'Zygadlo, Rudi', 'musicbrainz_albumartistid': '48f12b43-153e-42c3-b67c-212372cbfe2b', 'releasetype': 'album', 'batchid': '50024', 'accurateripdiscid': '013-0014462a-00cb7579-bf0a3e0d-6', 'tracktotal': 13, 'catalognumber': 'ZIQ320CD', 'artistsort': 'Zygadlo, Rudi', 'encoder': '(FLAC 1.2.1)', 'musicbrainz_releasegroupid': '06d97cd5-75a4-4ec8-afe3-1127b688c6ee', 'musicbrainz_artistid': '48f12b43-153e-42c3-b67c-212372cbfe2b', 'totaldiscs': 1, 'album': 'Tragicomedies', 'originaldate': '2012-09-17', 'label': 'Planet Mu', 'date': '2012-09-17', 'title': 'The Domino Quivers', 'albumartist': 'Rudi Zygadlo', 'encoder settings': '-compression-level-5', 'originalyear': '2012', 'length': '0:07', 'item_code': '89715e73-cfa8-487f-8aa1-18c3b7d965b9', 'obscenity': 'RED DOT'} unknown_tags = {'accurateripresult': 'AccurateRip: Not in database 7A470C62', 'source': 'CD (Lossless) >> Perfect (Lossless) m4a', 'artist': 'Unknown Artist', 'disctotal': 1, 'tracktotal': 12, 'accurateripdiscid': '012-0010ae26-009c5221-8e08ec0c-4', 'encoded by': 'dBpoweramp Release 14.4', 'encoder': '(FLAC 1.2.1)', 'title': 'Track04', 'tracknumber': 4, 'discnumber': 1, 'length': '0:07'} empty_tags = {}
tgquintela/ElectionsTools
ElectionsTools/cases/previous_elections_spain_analysis.py
Python
mit
2,527
0.004353
import numpy as np import pandas as pd from ElectionsTools.Seats_assignation import DHondt_assignation from previous_elections_spain_parser import * import os pathfiles = '../data/spain_previous_elections_results/provincia/' pathfiles = '/'.join(os.path.realpath(__file__).split('/')[:-1]+[pathfiles]) fles = [pathfiles+'PROV_02_197706_1.xlsx', pathfiles+'PROV_02_197903_1.xlsx', pathfiles+'PROV_02_198210_1.xlsx', pathfiles+'PROV_02_198606_1.xlsx', pathfiles+'PROV_02_198910_1.xlsx', pathfiles+'PROV_02_199306_1.xlsx', pathfiles+'PROV_02_199603_1.xlsx', pathfiles+'PROV_02_200003_1.xlsx', pathfiles+'PROV_02_200403_1.xlsx', pathfiles+'PROV_02_200803_1.xlsx', pathfiles+'PROV_02_201111_1.xlsx'] years = [1977, 1979, 1982, 1986, 1989, 1993, 1996, 2000, 2004, 2008, 2011] def compute_diputes_DHont(filename): ## 1. Parse circ, parties, votes, diputes = parse_data_elecciones_esp(filename) circ_com, votes_com, dips_com = collapse_by_col(circ, votes, diputes, 0) circ_sp, votes_sp, dips_sp = collapse_by_col(circ, votes, diputes, None) votes_sp = votes_sp.reshape(1,len(parties)) ## 2. Assignation objects assign = DHondt_assignation(diputes.sum(1)) assign1 = DHondt_assignation(dips_com.sum(1)) assign2 = DHondt_assignation(np.array([dips_sp.sum(0)])) ## 3
. Compute assignations d, price = assign.assignation(pd.DataFrame(votes, columns=
parties)) d1, price1 = assign1.assignation(pd.DataFrame(votes_com, columns=parties)) d2, price2 = assign2.assignation(pd.DataFrame(votes_sp, columns=parties)) return d, d1, d2, parties def prepare2export(d, d1, d2, parties): logi = np.logical_or(np.logical_or(d.sum(0)>0, d1.sum(0)>0), d2.sum(0)>0) parties = [parties[i] for i in np.where(logi)[0]] d, d1, d2 = d[:, logi].sum(0), d1[:, logi].sum(0), d2[:, logi].sum(0) return d, d1, d2, parties def compute_all_year(year): filename = fles[years.index(year)] d, d1, d2, parties = compute_diputes_DHont(filename) exp_d, exp_d1, exp_d2, exp_parties = prepare2export(d, d1, d2, parties) return exp_d, exp_d1, exp_d2, exp_parties def compute_table_all_years(year): d1, d2, d3, cols = compute_all_year(year) d1, d2, d3 = pd.DataFrame(d1), pd.DataFrame(d2), pd.DataFrame(d3) ind = ['Dhont_estado', 'Dhont_comunidad', 'Dhont_provincia'] exp = pd.concat([d1.T, d2.T, d3.T], axis=0) exp.columns = cols exp.index = ind return exp
timothyclemansinsea/smc
src/smc-build/smc-ansible/export-stripe-to-marketing-project.py
Python
gpl-3.0
667
0.005997
#!/usr/bin
/env python3 # this copies over all files in admin0:~/stripe/ to the ~/stripe folder in the statistics project import sys import os sys.path.insert(0, os.path.expanduser("~/bin/")) os.chdir(os.path.join(os.environ['SMC_ROOT'], "smc-build/smc-ansible")) # host of statistics project from smc_rethinkdb import project_host host = project_host("7561f68d-3d97-4530-b97e-68af2fb4ed13") src = os.path.expanduser("~/stripe/") # push to the project via ansible and set the p
ermissions os.system('ansible %s -m copy -a "src=%s dest=/projects/7561f68d-3d97-4530-b97e-68af2fb4ed13/stripe/ owner=1078872008 group=1078872008 mode=u=rw,go=" -become' % (host, src))
myisabella/LearnPythonTheHardWay
ex19.py
Python
mit
712
0.002809
# Functions and Variables def cheese_and_cra
ckers(cheese_count, boxes_of_crackers): print "You have %d cheeses!" % cheese_count print "You have %d boxes of crackers!" % boxes_of_crackers print "Man that's enough for a party!" print
"Get a blanket.\n" print "We can just give the function numbers directly:" cheese_and_crackers(20, 30) print "OR, we can use variable from our script:" amount_of_cheese = 10 amount_of_crackers = 50 cheese_and_crackers(amount_of_cheese, amount_of_crackers) print "We can even do math inside too:" cheese_and_crackers(10 + 20, 5 + 6) print "And we can combine the two, variables and math:" cheese_and_crackers(amount_of_cheese + 100, amount_of_crackers + 1000)
the-zebulan/CodeWars
tests/beta_tests/test_number_to_bytes.py
Python
mit
827
0
import unittest from katas.beta.nu
mber_to_bytes import to_bytes class NumberToBytesTestCase(unittest.TestCase): def test_equals(self): self.assertEqual(to_bytes(0), ['00000000']) def test_equals_2(self): self.assertEqual(to_bytes(1), ['00000001']) def test_equals_3(self): self.assertEqual(to_bytes(257), ['00000001', '00000001']) def test_equals_4(sel
f): self.assertEqual(to_bytes(0x101), ['00000001', '00000001']) def test_equals_5(self): self.assertEqual(to_bytes(0x000000000101), ['00000001', '00000001']) def test_equals_6(self): self.assertEqual(to_bytes(0xffff), ['11111111', '11111111']) def test_equals_7(self): self.assertEqual(to_bytes(0x1020304), ['00000001', '00000010', '00000011', '00000100'])
OVERLOADROBOTICA/OVERLOADROBOTICA.github.io
mail/formspree-master/formspree/__init__.py
Python
mit
77
0
# -*- coding: utf-8 -*- from app import create_app form
s_app = create_ap
p()