repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
Sw4T/Warband-Development
|
mb_warband_module_system_1166/Module_system 1.166/compiler.py
|
Python
|
mit
| 91,472
| 0.028982
|
import sys
sys.dont_write_bytecode = True
from traceback import format_exc as formatted_exception, extract_stack
from inspect import currentframe as inspect_currentframe, getmembers as inspect_getmembers
from os.path import split as path_split, exists as path_exists
from copy import deepcopy
get_globals = globals
get_locals = locals
headers_package = path_exists('./headers')
try:
if headers_package:
from headers.header_common import *
else:
from header_common import *
except:
print('\nError importing header_common.py file:\n\n%s' % formatted_exception())
if 'wait' in sys.argv: raw_input('Press Enter to finish>')
exit()
def parse_int(value):
if isinstance(value, list) or isinstance(value, tuple): return map(parse_int, value)
#if isinstance(value, VARIABLE) and value.is_static and (value.module is not None) and (value.module[5]): return long(value) & 0xffffffff
try:
if value.is_static and value.module[5]: return value.__long__() & 0xffffffff
except: pass
return long(value)
# Standard W.R.E.C.K. exception class. Used to differentiate Python-generated exceptions from internal exceptions, and to aggregate error information on different levels.
class MSException(Exception):
def formatted(self):
output = []
for index in xrange(len(self.args)):
prefix = ' ' * index
messages = self.args[index].strip().split('\n')
for message in messages:
output.append(prefix)
output.append(message.strip())
output.append('\n')
return ''.join(output)
# Generic class which is used to quietly replace some pipe-joined lists in the game (particularly, item property list and troop attribute/skill/proficiency lists)
class AGGREGATE(dict):
def __or__(self, other):
if not other: return self
result = AGGREGATE(self)
for key, value in other.iteritems():
if type(value) == float: result[key] = max(result.get(key, 0.0), value)
else: result[key] = result.get(key, 0) | value
#result.update(other)
return result
__ror__ = __radd__ = __add__ = __or__
# Standard operations to unparse certain vanilla bitmasks if they've been fed into the compiler
def unparse_item_aggregate(value):
if isinstance(value, AGGREGATE): return value
return AGGREGATE({
'weight': get_weight(value),
'head': get_head_armor(value),
'body': get_body_armor(value),
'leg': get_leg_armor(value),
'diff': get_difficulty(value),
'hp': get_hit_points(value) & 0x3ff, # patch for Native compiler glitch
'speed': get_speed_rating(value),
'msspd': get_missile_speed(value),
'size': get_weapon_length(value),
'qty': get_max_ammo(value),
'swing': get_swing_damage(value),
'thrust': get_thrust_damage(value),
'abundance': get_abundance(value),
})
def unparse_attr_aggregate(value):
if isinstance(value, AGGREGATE): return value
return AGGREGATE({
'str': value & 0xFF,
'agi': (value >> 8) & 0xFF,
'int': (value >> 16) & 0xFF,
'cha': (value >> 24) & 0xFF,
'level': (value >> level_bits) & level_mask
})
def unparse_wp_aggregate(value):
if isinstance(value, AGGREGATE): return value
return AGGREGATE([(i, (value >> (10*i)) & 0x3FF) for i in xrange(num_weapon_proficiencies)])
def unparse_terrain_aggregate(value):
value = str(value).lower()
if value[0:2] == '0x':
value = value[2:]
return AGGREGATE({
'terrain_seed': int('0x0%s' % value[-4:], 16),
'river_seed': int('0x0%s' % value[-12:-8], 16),
'flora_seed': int('0x0%s' % value[-20:-16], 16),
'size_x': int('0x0%s' % value[-29:-24], 16) & 0x3ff,
'size_y': (int('0x0%s' % value[-29:-24], 16) >> 10) & 0x3ff,
'valley': (int('0x0%s' % value[-39:-32], 16) >> 0) & 0x7f,
'hill_height': (int('0x0%s' % value[-39:-32], 16) >> 7) & 0x7f,
'ruggedness': (int('0x0%s' % value[-39:-32], 16) >> 14) & 0x7f,
'vegetation': (int('0x0%s' % value[-39:-32], 16) >> 21) & 0x7f,
'terrain': int('0x0%s' % value[-40:-39], 16),
'polygon_size': (int('0x0%s' % value[-41:-40], 16) & 0x3) + 2,
'disable_grass': (int('0x0%s' % value[-41:-40], 16) >> 2) & 0x1,
'shade_occlude': (int('0x0%s' % value[-32:-31], 16) >> 2) & 0x1,
'place_river': (int('0x0%s' % value[-32:-31], 16) >> 3) & 0x1,
'deep_water': (int('0x0%s' % value[-16:-15], 16) >> 3) & 0x1,
})
else:
value = long(value)
return AGGREGATE({
'terrain_seed': value & 0xffffffff,
'river_seed': (value >> 32) & 0x7fffffff,
'flora_seed': (value >> 64) & 0xffffffff,
'deep_water': (value >> 63) & 0x1,
})
# Basic W.R.E.C.K. class. Represents any valid game reference or object, including entity references, local & global variables, registers of all types, and mathematical expressions including all of the aforementioned data.
class VARIABLE(object):
operations = set(['+', '-', '*', '/', '%', '**', '<<', '>>', '&', '|', '^', 'neg', 'abs', 'val'])
references = None
is_expression = False
is_static = True
module = None
name = None
value = None
operation = None
operands = None
def __init__(self, module = None, name = None, value = None, operation = None, operands = None, static = True):
self.module = module
self.name = name
self.references = set()
if operation:
self.operation = operation
self.operands = operands
self.is_expression = True
if static:
for operand in operands:
if isinstance(operand, VARIABLE) and not operand.is_static: static = False
if operation not in VARIABLE.operations: raise SyntaxError('Illegal MSC expression: %r' % self)
else:
self.valu
|
e = value
self.is_static = static
def __add__(self, other): return VARIABLE(operands = [self, other], operation = '+')
def __sub__(self, other): return VARIABLE(operands = [self, other], operation = '-')
def __mul__(self, other)
|
: return VARIABLE(operands = [self, other], operation = '*')
def __div__(self, other): return VARIABLE(operands = [self, other], operation = '/')
def __mod__(self, other): return VARIABLE(operands = [self, other], operation = '%')
def __pow__(self, other): return VARIABLE(operands = [self, other], operation = '**')
def __lshift__(self, other): return VARIABLE(operands = [self, other], operation = '<<')
def __rshift__(self, other): return VARIABLE(operands = [self, other], operation = '>>')
def __and__(self, other): return VARIABLE(operands = [self, other], operation = '&')
def __or__(self, other): return VARIABLE(operands = [self, other], operation = '|')
def __radd__(self, other): return VARIABLE(operands = [other, self], operation = '+')
def __rsub__(self, other): return VARIABLE(operands = [other, self], operation = '-')
def __rmul__(self, other): return VARIABLE(operands = [other, self], operation = '*')
def __rdiv__(self, other): return VARIABLE(operands = [other, self], operation = '/')
def __rmod__(self, other): return VARIABLE(operands = [other, self], operation = '%')
def __rpow__(self, other): return VARIABLE(operands = [other, self], operation = '**')
def __rlshift__(self, other): return VARIABLE(operands = [other, self], operation = '<<')
def __rrshift__(self, other): return VARIABLE(operands = [other, self], operation = '>>')
def __rand__(self, other): return VARIABLE(operands = [other, self], operation = '&')
def __ror__(self, other): return VARIABLE(operands = [other, self], operation = '|')
def __neg__(self): return VARIABLE(operands = [self], operation = 'neg')
def __pos__(self): return self
def __abs__(self): return VARIABLE(operands = [self], operation = 'abs')
def formatted_name(self):
if self.is_expression: return '<expr>'
if self.module is None: return '?.%s' % self.name
return '%s.%s' % (self.module[2], self.name)
def __str__(self):
return str(self.__long__())
def __repr__(self):
if self.is_expression:
if len(self.operands) == 1:
result = '%s(%r)' % (self.operation, self.operands[0])
else:
operands = [(('(%r)' if (isinstance(op, VARIABLE) and op.is_expression and (len(op.operands) > 1)) else '%r') % op) for op in self.operands]
result = (' %s ' % self.operation).join(operands)
else:
if self.is_static:
value = '?' if self.value is None else str(self.value)
result = '%s[#%s]' % (self.formatted_name(), value)
else:
va
|
projectatomic/atomic-reactor
|
atomic_reactor/plugin.py
|
Python
|
bsd-3-clause
| 24,497
| 0.002449
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
definition of plugin system
plugins are supposed to be run when image is built and we need to extract some information
"""
from __future__ import absolute_import
import copy
import logging
import os
import sys
import traceback
import imp
import datetime
import inspect
import time
from six import PY2
from collections import namedtuple
from atomic_reactor.build import BuildResult
from atomic_reactor.util import process_substitutions, exception_message
from dockerfile_parse import DockerfileParser
MODULE_EXTENSIONS = ('.py', '.pyc', '.pyo')
logger = logging.getLogger(__name__)
class AutoRebuildCanceledException(Exception):
"""Raised if a plugin cancels autorebuild"""
def __init__(self, plugin_key, msg):
self.plugin_key = plugin_key
self.msg = msg
def __str__(self):
return 'plugin %s canceled autorebuild: %s' % (self.plugin_key, self.msg)
class PluginFailedException(Exception):
""" There was an error during plugin execution """
class BuildCanceledException(Exception):
"""Build was canceled"""
class InappropriateBuildStepError(Exception):
"""Requested build step is not appropriate"""
class Plugin(object):
""" abstract plugin class """
# unique plugin identification
# output of this plugin can be found in results specified with this key,
# same thing goes for input: use this key for providing input for this plugin
key = None
# by default, if plugin fails (raises exc), execution continues
is_allowed_to_fail = True
def __init__(self, *args, **kwargs):
"""
constructor
"""
self.log = logging.getLogger("atomic_reactor.plugins." + self.key)
self.args = args
self.kwargs = kwargs
def __str__(self):
return "%s" % self.key
def __repr__(self):
return "Plugin(key='%s')" % self.key
def run(self):
"""
each plugin has to implement this method -- it is used to run the plugin actually
response from a build plugin is kept and used in json result response like this:
results[plugin.key] = plugin.run()
input plugins should emit build json with this method
"""
raise NotImplementedError()
class BuildPlugin(Plugin):
"""
abstract plugin class: base for build plugins, it is
flavored with ContainerTasker and BuildWorkflow instances
"""
def __init__(self, tasker, workflow, *args, **kwargs):
"""
constructor
:param tasker: ContainerTasker instance
:param workflow: DockerBuildWorkflow instance
:param args: arguments from user input
:param kwargs: keyword arguments from user input
"""
self.tasker = tasker
self.workflow = workflow
super(BuildPlugin, self).__init__(*args, **kwargs)
def is_in_orchestrator(self):
"""
Check if the configuration this plugin is part of is for
an orchestrator build or a worker build.
:return: True if orchestrator build, False if worker build
"""
return self.workflow.is_orchestrator_build()
class PluginsRunner(object):
def __init__(self, plugin_class_name, plugins_conf, *args, **kwargs):
"""
constructor
:param plugin_class_name: str, name of plugin class to filter (e.g. 'PreBuildPlugin')
:param plugins_conf: list of dicts, configuration for plugins
"""
self.plugins_results = getattr(self, "plugins_results", {})
self.plugins_conf = plugins_conf or []
self.plugin_files = kwargs.get("plugin_files", [])
self.plugin_classes = self.load_plugins(plugin_class_name)
self.available_plugins = self.get_available_plugins()
def load_plugins(self, plugin_class_name):
"""
load all available plugins
:param plugin_class_name: str, name of plugin class (e.g. 'PreBuildPlugin')
:return: dict, bindings for plugins of the plugin_class_name class
"""
# imp.findmodule('atomic_reactor') doesn't work
plugins_dir = os.path.join(os.path.dirname(__file__), 'plugins')
logger.debug("loading plugins from dir '%s'", plugins_dir)
files = [os.path.join(plugins_dir, f)
for f in os.listdir(plugins_dir)
if f.endswith(".py")]
if self.plugin_files:
logger.debug("loading additional plugins from files '%s'", self.plugin_files)
files += self.plugin_files
plugin_class = globals()[plugin_class_name]
plugin_classes = {}
for f in files:
module_name = os.path.basename(f).rsplit('.', 1)[0]
# Do not reload plugins
if module_name in sys.modules:
f_module = sys.modules[module_name]
else:
try:
logger.debug("load file '%s'", f)
f_module = imp.load_source(module_name, f)
except (IOError, OSError, ImportError, SyntaxError) as ex:
logger.warning("can't load module '%s': %s", f, ex)
continue
for name in dir(f_module):
binding = getattr(f_module, name, None)
try:
# if you try to compare binding and PostBuildPlugin, python won't match them
# if you call this script directly b/c:
# ! <class 'plugins.plugin_rpmqa.PostBuildRPMqaPlugin'> <= <class
# '__main__.PostBuildPlugin'>
# but
# <class 'plugins.plugin_rpmqa.PostBuildRPMqaPlugin'> <= <class
# 'atomic_reactor.plugin.PostBuildPlugin'>
is_sub = issubclass(binding, plugin_class)
except TypeError:
is_sub = False
if binding and is_sub and plugin_class.__name__ != binding.__name__:
plugin_classes[binding.key] = binding
return plugin_classes
def create_instance_from_plugin(self, plugin_class, plugin_conf):
"""
create instance from plugin using the plugin class and configuration passed to for it
:param plugin_class: plugin class
:param plugin_conf: dict, configuration for plugin
:return:
"""
plugin_instance = plugin_class(**plugin_conf)
return plugin_instance
def on_plugin_failed(self, plugin=
|
None, exception=None):
pass
def save_plugin_timestamp(self, plugin, timestamp):
pass
def save_plugin_duration(self, plugin, duration):
pass
def get_available_plugins(self):
"""
check requested plugins availability
and handle missing plugins
:return: list of namedtuples, runnable plugins data
"""
available_plugins = []
PluginData = namedtuple('PluginDat
|
a', 'name, plugin_class, conf, is_allowed_to_fail')
for plugin_request in self.plugins_conf:
plugin_name = plugin_request['name']
try:
plugin_class = self.plugin_classes[plugin_name]
except KeyError:
if plugin_request.get('required', True):
msg = ("no such plugin: '%s', did you set "
"the correct plugin type?") % plugin_name
exc = PluginFailedException(msg)
self.on_plugin_failed(plugin_name, exc)
logger.error(msg)
raise exc
else:
# This plugin is marked as not being required
logger.warning("plugin '%s' requested but not available",
plugin_name)
continue
plugin_is_allowed_to_fail = plugin_request.get('is_allowed_to_fail',
getattr(plugin_class,
"is_
|
anish/buildbot
|
master/buildbot/status/__init__.py
|
Python
|
gpl-2.0
| 714
| 0
|
from buildbot.status import build
from buildbot.status import builder
from buildbot.status import buildrequest
from buildbot.status import buildset
from buildbot.status import master
# styles.Versioned requires this, as it keys the version numbers on the fully
# qualified class n
|
ame; see master/buildbot/test/regressions/test_unpickling.py
build.BuildStatus.__module__ = 'buildbot.status.builder'
# add all of these classes to builder; this is a form of late binding to allow
# circular module references among the status modules
builder.BuildSetStatus = buildset.BuildSetStatus
builder.Status = master.Status
builder.BuildStatus = build.BuildStatus
builder.BuildRequestStatus =
|
buildrequest.BuildRequestStatus
|
sonaht/ansible
|
lib/ansible/modules/network/ios/ios_logging.py
|
Python
|
gpl-3.0
| 9,798
| 0.000714
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: ios_logging
version_added: "2.4"
author: "Trishna Guha (@trishnag)"
short_description: Manage logging on network devices
description:
- This module provides declarative management of logging
on Cisco Ios devices.
options:
dest:
description:
- Destination of the logs.
choices: ['on', 'host', console', 'monitor', 'buffered']
name:
description:
- If value of C(dest) is I(file) it indicates file-name,
for I(user) it indicates username and for I(host) indicates
the host name to be notified.
size:
description:
- Size of buffer. The acceptable value is in range from 4096 to
4294967295 bytes.
facility:
description:
- Set logging facility.
level:
description:
- Set logging severity levels.
collection:
description: List of logging definitions.
purge:
description:
- Purge logging not defined in the collections parameter.
default: no
state:
description:
- State of the logging configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure host logging
ios_logging:
dest: host
name: 172.16.0.1
state: present
- name: remove host logging configuration
ios_logging:
dest: host
name: 172.16.0.1
state: absent
- name: configure console logging level and facility
ios_logging:
dest: console
facility: local7
level: debugging
state: present
- name: enable logging to all
ios_logging:
dest : on
- name: configure buffer size
ios_logging:
dest: buffered
size: 5000
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- logging facility local7
- logging host 172.16.0.1
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ios import get_config, load_config
from ansible.module_utils.ios import ios_argument_spec, check_args
def validate_size(value, module):
if value:
if not int(4096) <= value <= int(4294967295):
module.fail_json(msg='size must be between 4096 and 4294967295')
else:
return value
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
for w in want:
dest = w['dest']
name = w['name']
size = w['size']
facility = w['facility']
level = w['level']
state = w['state']
del w['state']
if state == 'absent' and w in have:
if dest == 'host':
commands.append('no logging host {}'.format(name))
elif dest:
commands.append('no logging {}'.format(dest))
else:
module.fail_json(msg='dest must be among console, monitor, buffered, host, on')
if facility:
commands.append('no logging facility {}'.format(facility))
if state == 'present' and w not in have:
if facility:
commands.append('logging facility {}'.format(facility))
if dest == 'host':
commands.append('logging host {}'.format(name))
elif dest == 'on':
commands.append('logging on')
elif dest == 'buffered' and size:
commands.append('logging buffered {}'.format(size))
else:
dest_cmd = 'logging {}'.format(dest)
if level:
dest_cmd += ' {}'.format(level)
commands.append(dest_cmd)
return commands
def parse_facility(line):
match = re.search(r'logging facility (\S+)', line, re.M)
if match:
facility = match.group(1)
else:
facility = 'local7'
return facility
def parse_size(line, dest):
size = None
if dest == 'buffered':
match = re.search(r'logging buffered (\S+)', line, re.M)
if match:
try:
int_size = int(match.group(1))
except ValueError:
int_size = None
if int_size:
if isinstance(int_size, int):
size = str(match.group(1))
else:
size = str(4096)
return size
def parse_name(line, dest):
if de
|
st == 'host':
match = re.search(r'logging host (\S+)', line, re.M)
if match:
name = match.
|
group(1)
else:
name = None
return name
def parse_level(line, dest):
level_group = ('emergencies', 'alerts', 'critical', 'errors', 'warnings',
'notifications', 'informational', 'debugging')
if dest == 'host':
level = 'debugging'
else:
match = re.search(r'logging {} (\S+)'.format(dest), line, re.M)
if match:
if match.group(1) in level_group:
level = match.group(1)
else:
level = 'debugging'
else:
level = 'debugging'
return level
def map_config_to_obj(module):
obj = []
dest_group = ('console', 'host', 'monitor', 'buffered', 'on')
data = get_config(module, flags=['| section logging'])
for line in data.split('\n'):
match = re.search(r'logging (\S+)', line, re.M)
if match.group(1) in dest_group:
dest = match.group(1)
else:
pass
obj.append({'dest': dest,
'name': parse_name(line, dest),
'size': parse_size(line, dest),
'facility': parse_facility(line),
'level': parse_level(line, dest)})
return obj
def map_params_to_obj(module):
obj = []
if 'aggregate' in module.params and module.params['aggregate']:
for c in module.params['aggregate']:
d = c.copy()
if d['dest'] != 'host':
d['name'] = None
if 'state' not in d:
d['state'] = module.params['state']
if 'facility' not in d:
d['facility'] = module.params['facility']
if 'level' not in d:
d['level'] = module.params['level']
if d['dest'] == 'buffered':
if 'size' in d:
d['size'] = str(validate_size(d['size'], module))
elif 'size' not in d:
d['size'] = str(4096)
else:
pass
if d['dest'] != 'buffered':
d['size'] = None
obj.append(d)
else:
if module.params['dest'] != 'host':
module.params['name'] = None
if module.params['dest'] == 'buffered':
if not module.params['size']:
module.params['size'] = str(4096)
else:
module.params['size'] = None
if module.params['size'] is None:
obj.append({
'dest': module.params['dest'],
'name': module.params['name'],
'size': module.params['size'],
'facility': module.params['facility'],
'level': module.params['level'],
'state': module.params['state']
})
else:
obj.append({
|
berkeley-stat159/project-iota
|
code/utils/conv_response/combine_convo_point_script.py
|
Python
|
bsd-3-clause
| 1,565
| 0.008946
|
import matplotlib.pyplot as plt
import numpy as np
from sys import argv
f1 = argv[1] # task001_run001
block_convo = np.array([])
full_convo = np.array([])
block_num = [1,4,5]
full_num = range(1,7,1)
"""
block_list = ['task001_run001/cond001.txt', 'task001_run001/cond004.txt', 'task001_run001dconv005.txt']
"""
block_list = []
for i in block_num:
block_list.append(f1 + '/cond00' + str(i) + '.txt')
"""
full_list = ['task001_run001/cond001.txt', 'task001_run001/cond0
|
02.txt', 'task001_run001/cond003.txt', 'task001_run001/cond004.txt',
'task001_run001/cond005.txt', 'task001_
|
run001/cond006.txt']
"""
full_list = []
for i in full_num:
full_list.append(f1 + '/cond00' + str(i) + '.txt')
for i in block_list:
block_convo = np.append(block_convo, np.loadtxt('../../../data/sub001/onsets/' + i))
for i in full_list:
full_convo = np.append(full_convo, np.loadtxt('../../../data/sub001/onsets/' + i))
block_time = block_convo[range(0, len(block_convo), 3)]
block_val = block_convo[range(2, len(block_convo), 3)]
full_time = full_convo[range(0, len(full_convo), 3)]
full_val = full_convo[range(2, len(full_convo), 3)]
plt.figure(0)
plt.plot(block_time, block_val, '.')
plt.xlabel('Experiment Time')
plt.ylabel('Study condition amplitude')
plt.title('Block Model Condition')
plt.savefig('../../../data/convo/' + f1 + '_block_points.png')
plt.figure(1)
plt.plot(full_time, full_val, '.')
plt.xlabel('Experiment Time')
plt.ylabel('Study condition amplitude')
plt.title('Full Model Condition')
plt.savefig('../../../data/convo/' + f1 + '_full_points.png')
|
SymbiFlow/fasm
|
update_version.py
|
Python
|
isc
| 3,846
| 0
|
#!/usr/bin/env python3
import platform
import subprocess
import sys
VERSION_FILE = 'fasm/version.py'
VERSION_FILE_TEMPLATE = '''\
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2017-2022 F4PGA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# ** WARNING **
# This file is auto-generated by the update_version.py script.
# ** WAR
|
NING **
version_str = "{version}"
version_tuple = {version_tuple}
try:
from packaging.version import Version as V
pversion = V("{version}")
except ImportError:
pass
git_hash = "{git_hash}"
git_describe = "{git_describe}"
git_msg = """\\
{git_msg}
"""
'''
GIT = 'git'
if platform.system() == 'Windows':
GIT = 'git.exe'
def get_hash():
cmd = [GIT, 'rev-parse', 'HEAD']
try:
return subprocess.check
|
_output(cmd).decode('utf-8').strip()
except OSError:
print(cmd)
raise
def get_describe():
cmd = [
GIT, 'describe', '--tags', 'HEAD', '--match', 'v*', '--exclude', '*-r*'
]
try:
return subprocess.check_output(cmd).decode('utf-8').strip()
except OSError:
print(cmd)
raise
def get_msg():
cmd = [GIT, 'log', '-1', 'HEAD']
try:
data = subprocess.check_output(cmd).decode('utf-8')
except OSError:
print(cmd)
raise
return '\n'.join(line.rstrip() for line in data.split('\n'))
def create_version_tuple(git_describe):
"""
>>> t = '''\\
... v0.0
... v0.0.0
... v1.0.1-265-g5f0c7a7
... v0.0-7004-g1cf70ea2
... '''
>>> for d in t.splitlines():
... v = create_version_tuple(d)
... print((create_version_str(v), v))
('0.0', (0, 0, None))
('0.0.0', (0, 0, 0, None))
('1.0.1.post265', (1, 0, 1, 265))
('0.0.post7004', (0, 0, 7004))
"""
vtag = git_describe.strip()
if vtag.startswith('v'):
vtag = vtag[1:]
vbits = vtag.split('.')
vpost = [None]
if '-' in vbits[-1]:
vend = vbits.pop(-1).split('-')
assert len(vend) == 3, (vtag, vbits, vend)
assert len(vend[0]) > 0, (vtag, vbits, vend)
vbits.append(vend.pop(0))
vpost = [int(vend.pop(0))]
assert vend[-1].startswith('g'), (vtag, vbits, vend, vpost)
vbits = [int(i) for i in vbits]
vbits.extend(vpost)
return tuple(vbits)
def create_version_str(version_tuple):
vbits = [str(i) for i in version_tuple]
if version_tuple[-1] is None:
vbits.pop(-1)
else:
vbits[-1] = 'post' + vbits[-1]
return '.'.join(vbits)
def update_version_py(args):
output = VERSION_FILE_TEMPLATE.format(**args)
old = ''
try:
with open(VERSION_FILE) as f:
old = f.read()
except IOError as e:
print(e)
if old != output:
with open(VERSION_FILE, 'w') as f:
f.write(output)
print('Updated {}'.format(VERSION_FILE))
def main(args):
git_hash = get_hash()
git_describe = get_describe()
git_msg = get_msg()
version_tuple = create_version_tuple(git_describe)
version = create_version_str(version_tuple)
update_version_py(locals())
return 0
if __name__ == "__main__":
import doctest
failure_count, test_count = doctest.testmod()
if failure_count > 0:
sys.exit(-1)
sys.exit(main(sys.argv))
|
cpausmit/Kraken
|
filefi/024/writeCfg.py
|
Python
|
mit
| 6,916
| 0.005928
|
#!/usr/bin/env python
"""
Re-write config file and optionally convert to python
"""
__revision__ = "$Id: writeCfg.py,v 1.1 2011/09/19 21:41:44 paus Exp $"
__version__ = "$Revision: 1.1 $"
import getopt
import imp
import os
import pickle
import sys
import xml.dom.minidom
from random import SystemRandom
from ProdCommon.CMSConfigTools.ConfigAPI.CfgInterface import CfgInterface
import FWCore.ParameterSet.Types as CfgTypes
MyRandom = SystemRandom()
class ConfigException(Exception):
"""
Exceptions raised by writeCfg
"""
def __init__(self, msg):
Exception.__init__(self, msg)
self._msg = msg
return
def __str__(self):
return self._msg
def main(argv) :
"""
writeCfg
- Read in existing, user supplied pycfg or pickled pycfg file
- Modify job specific parameters based on environment variables and arguments.xml
- Write out pickled pycfg file
required parameters: none
optional parameters:
--help : help
--debug : debug statements
"""
# defaults
inputFileNames = None
parentFileNames = None
debug = False
_MAXINT = 900000000
try:
opts, args = getopt.getopt(argv, "", ["debug", "help"])
except getopt.GetoptError:
print main.__doc__
sys.exit(2)
try:
CMSSW = os.environ['CMSSW_VERSION']
parts = CMSSW.split('_')
CMSSW_major = int(parts[1])
CMSSW_minor = int(parts[2])
CMSSW_patch = int(parts[3])
except (KeyError, ValueError):
msg = "Your environment doesn't specify the CMSSW version or specifies it incorrectly"
raise ConfigException(msg)
# Parse command line options
for opt, arg in opts :
if opt == "--help" :
print main.__doc__
sys.exit()
elif opt == "--debug" :
debug = True
# Parse remaining parameters
try:
fileName = args[0]
outFileName = args[1]
except IndexError:
print main.__doc__
sys.exit()
# Read in Environment, XML and get optional Parameters
nJob = int(os.environ.get('NJob', '0'))
preserveSeeds = os.environ.get('PreserveSeeds','')
incrementSeeds = os.environ.get('IncrementSeeds','')
# Defaults
maxEvents = 0
skipEvents = 0
firstEvent = -1
compHEPFirstEvent = 0
firstRun = 0
# FUTURE: Remove firstRun
firstLumi = 0
dom = xml.dom.minidom.parse(os.environ['RUNTIME_AREA']+'/arguments.xml')
for elem in dom.getElementsByTagName("Job"):
if nJob == int(elem.getAttribute("JobID")):
if elem.getAttribute("MaxEvents"):
maxEvents = int(elem.getAttribute("MaxEvents"))
if elem.getAttribute("SkipEvents"):
skipEvents = int(elem.getAttribute("SkipEvents"))
if elem.getAttribute("FirstEvent"):
firstEvent = int(elem.getAttribute("FirstEvent"))
if elem.getAttribute("FirstRun"):
firstRun = int(elem.getAttribute("FirstRun"))
if elem.getAttribute("FirstLumi"):
firstLumi = int(elem.getAttribute("FirstLumi"))
generator = str(elem.getAttribute('Generator'))
inputFiles = str(elem.getAttribute('InputFiles'))
parentFiles = str(elem.getAttribute('ParentFiles'))
lumis = str(elem.getAttribute('Lumis'))
# Read Input python config file
handle = open(fileName, 'r')
try: # Nested form for Python < 2.5
try:
print "Importing .py file"
cfo = imp.load_source("pycfg", fileName, handle)
cmsProcess = cfo.process
except Exception, ex:
msg = "Your pycfg file is not valid python: %s" % str(ex)
raise ConfigException(msg)
finally:
handle.close()
cfg = CfgInterface(cmsProcess)
# Set parameters for job
print "Setting parameters"
inModule = cfg.inputSource
if maxEvents:
cfg.maxEvents.setMaxEventsInput(maxEvents)
if skipEvents:
inModule.setSkipEvents(skipEvents)
# Set "skip events" for various generators
if generator == 'comphep':
cmsProcess.source.CompHEPFirstEvent = CfgTypes.int32(firstEvent)
elif generator == 'lhe':
cmsProcess.source.skipEvents = CfgTypes.untracked(CfgTypes.uint32(firstEvent))
cmsProcess.source.firstEvent = CfgTypes.untracked(CfgTypes.uint32(firstEvent+1))
elif firstEvent != -1: # (Old? Madgraph)
cmsProcess.sou
|
rce.firstEvent = CfgTypes.untracked(CfgTypes.uint32(firstEvent))
if inputFiles:
inputFileNames = inputFiles.split(',')
inModule.setFileNames(*inputFileNames)
# handle parent files if needed
if parentFiles:
parentFileNames = parentFiles.split(',')
inModule.setSecondaryFileNames(*parentFileNames
|
)
if lumis:
if CMSSW_major < 3: # FUTURE: Can remove this check
print "Cannot skip lumis for CMSSW 2_x"
else:
lumiRanges = lumis.split(',')
inModule.setLumisToProcess(*lumiRanges)
# Pythia parameters
if (firstRun):
inModule.setFirstRun(firstRun)
if (firstLumi):
inModule.setFirstLumi(firstLumi)
# Check if there are random #'s to deal with
if cfg.data.services.has_key('RandomNumberGeneratorService'):
print "RandomNumberGeneratorService found, will attempt to change seeds"
from IOMC.RandomEngine.RandomServiceHelper import RandomNumberServiceHelper
ranGenerator = cfg.data.services['RandomNumberGeneratorService']
randSvc = RandomNumberServiceHelper(ranGenerator)
incrementSeedList = []
preserveSeedList = []
if incrementSeeds:
incrementSeedList = incrementSeeds.split(',')
if preserveSeeds:
preserveSeedList = preserveSeeds.split(',')
# Increment requested seed sets
for seedName in incrementSeedList:
curSeeds = randSvc.getNamedSeed(seedName)
newSeeds = [x+nJob for x in curSeeds]
randSvc.setNamedSeed(seedName, *newSeeds)
preserveSeedList.append(seedName)
# Randomize remaining seeds
randSvc.populate(*preserveSeedList)
# Write out new config file
outFile = open(outFileName,"w")
outFile.write("import FWCore.ParameterSet.Config as cms\n")
outFile.write("import pickle\n")
outFile.write("pickledCfg=\"\"\"%s\"\"\"\n" % pickle.dumps(cmsProcess))
outFile.write("process = pickle.loads(pickledCfg)\n")
outFile.close()
if (debug):
print "writeCfg output (May not be exact):"
print "import FWCore.ParameterSet.Config as cms"
print cmsProcess.dumpPython()
if __name__ == '__main__' :
exit_status = main(sys.argv[1:])
sys.exit(exit_status)
|
crezefire/angle
|
src/tests/deqp_support/generate_case_lists.py
|
Python
|
bsd-3-clause
| 1,684
| 0.003563
|
#!/usr/bin/python
#
# Copyright 2015 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# generate_case_lists.py:
# Helper script for updating the dEQP case list files, stored in the repo.
# Generally only used when the dEQP config changes, or when we roll dEQP.
import subprocess
import sys
import os
import shutil
import gzip
# TODO(jmadill): other platforms
os_suffix = '.exe'
build_dir = os.path.join('build', 'Debug_x64')
def run_deqp(deqp_exe):
subprocess.call([deqp_exe, '--deqp-runmode=txt-caselist', '--deqp-gl-context-type=null'])
# This stuff is all hard-coded for now. If we need more versatility w
|
e can
# make some options into c
|
ommand line arguments with default values.
script_dir = os.path.dirname(sys.argv[0])
path_to_deqp_exe = os.path.join('..', '..', build_dir)
deqp_data_path = os.path.join('third_party', 'deqp', 'data')
os.chdir(os.path.join(script_dir, '..'))
run_deqp(os.path.join(path_to_deqp_exe, 'angle_deqp_gles2_tests' + os_suffix))
run_deqp(os.path.join(path_to_deqp_exe, 'angle_deqp_gles3_tests' + os_suffix))
run_deqp(os.path.join(path_to_deqp_exe, 'angle_deqp_egl_tests' + os_suffix))
def compress_case_list(case_file):
with open(os.path.join(deqp_data_path, case_file + '.txt')) as in_handle:
data = in_handle.read()
in_handle.close()
with gzip.open(os.path.join('deqp_support', case_file + '.txt.gz'), 'wb') as out_handle:
out_handle.write(data)
out_handle.close()
compress_case_list('dEQP-GLES2-cases')
compress_case_list('dEQP-GLES3-cases')
compress_case_list('dEQP-EGL-cases')
|
alfa-jor/addon
|
plugin.video.alfa/lib/python_libtorrent/python_libtorrent/functions.py
|
Python
|
gpl-3.0
| 9,266
| 0.012089
|
#-*- coding: utf-8 -*-
'''
python-libtorrent for Kodi (script.module.libtorrent)
Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import os, sys
import xbmc, xbmcgui, xbmcaddon
from net import HTTP
from core import filetools ### Alfa
__libbaseurl__ = "https://github.com/DiMartinoXBMC/script.module.libtorrent/raw/master/python_libtorrent"
#__settings__ = xbmcaddon.Addon(id='script.module.libtorrent')
#__version__ = __settings__.getAddonInfo('version')
#__plugin__ = __settings__.getAddonInfo('name') + " v." + __version__
#__icon__=os.path.join(xbmc.translatePath('special://home'), 'addons',
# 'script.module.libtorrent', 'icon.png')
#__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
__version__ = '1.1.17' ### Alfa
__plugin__ = "python-libtorrent v.1.1.7" ### Alfa
__icon__=os.path.join(xbmc.translatePath('special://home'), 'addons',
'plugin.video.alfa', 'icon.png') ### Alfa
#__language__ = __settings__.getLocalizedString ### Alfa
#from python_libtorrent.platform_pulsar import get_platform, get_libname ### Alfa
from lib.python_libtorrent.python_libtorrent.platform_pulsar import get_platform, get_libname ### Alfa
def log(msg):
try:
xbmc.log("### [%s]: %s" % (__plugin__,msg,), level=xbmc.LOGNOTICE )
except UnicodeEncodeError:
xbmc.log("### [%s]: %s" % (__plugin__,msg.encode("utf-8", "ignore"),), level=xbmc.LOGNOTICE )
except:
xbmc.log("### [%s]: %s" % (__plugin__,'ERROR LOG',), level=xbmc.LOGNOTICE )
def getSettingAsBool(setting):
__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
return __settings__.getSetting(setting).lower() == "true"
class LibraryManager():
def __init__(self, dest_path, platform):
self.dest_path = dest_path
self.platform = platform
self.root=os.path.dirname(os.path.dirname(__file__))
ver1, ver2, ver3 = platform['version'].split('.') ### Alfa: resto método
try:
ver1 = int(ver1)
ver2 = int(ver2)
except:
pass
if ver1 >= 1 and ver2 >= 2:
global __libbaseurl__
__libbaseurl__ = 'https://github.com/alfa-addon/alfa-repo/raw/master/downloads/libtorrent'
def check_exist(self):
for libname in get_libname(self.platform):
if not filetools.exists(os.path.join(self.dest_path,libname)):
return False
return True
def check_update(self):
need_update=False
for libname in get_libname(self.platform):
if libname!='liblibtorrent.so':
self.libpath = os.path.join(self.dest_path, libname)
self.sizepath=os.path.join(self.root, self.platform['system'], self.platform['version'], libname+'.size.txt')
size=str(os.path.getsize(self.libpath))
size_old=open( self.sizepath, "r" ).read()
if size_old!=size:
need_update=True
return need_update
def update(self):
if self.check_update():
for libname in get_libname(self.platform):
self.libpath = os.path.join(self.dest_path, libname)
filetools.remove(self.libpath)
self.download()
def download(self):
__settings__ = xbmcaddon.Addon(id='plugin.video.alfa') ### Alfa
filetools.mkdir(self.dest_path)
for libname in get_libname(self.platform):
dest = os.path.join(self.dest_path, libname)
log("try to fetch %s" % libname)
url = "%s/%s/%s/%s.zip" % (__libbaseurl__, self.platform['system'], self.platform['version'], libname)
if libname!='liblibtorrent.so':
try:
self.http = HTTP()
self.http.fetch(url, download=dest + ".zip", progress=False) ### Alfa
log("%s -> %s" % (url, dest))
xbmc.executebuiltin('XBMC.Extract("%s.zip","%s")' % (dest, self.dest_path), True)
filetools.remove(dest + ".zip")
except:
text = 'Failed download %s!' % libname
xbmc.executebuiltin("XBMC.Notification(%s,%s,%s,%s)" % (__plugin__,text,750,__icon__))
else:
filetools.copy(os.path.join(self.dest_path, 'libtorrent.so'), dest, silent=True) ### Alfa
dest_alfa = os.path.join(xbmc.translatePath(__settings__.getAddonInfo('Path')), \
'lib', libname) ### Alfa
filetools.copy(dest, dest_alfa, silent=True) ### Alfa
dest_alfa = os.path.join(xbmc.translatePath(__settings__.getAddonInfo('Profile')), \
'custom_code', 'lib', libname) ### Alfa
filetools.copy(dest, dest_alfa, silent=True) ### Alfa
return True
def android_workaround(self, new_dest_path): ### Alfa (entera)
import subprocess
for libname in get_libname(self.platform):
libpath=os.path.join(self.dest_path, libname)
size=str(os.path.getsize(libpath))
new_libpath=os.path.join(new_dest_path, libname)
if filetools.exists(new_libpath):
new_size=str(os.path.getsize(new_libpath))
if size != new_size:
filetools.remove(new_libpath)
if filetools.exists(new_libpath):
try:
command = ['su', '-c', 'rm', '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
log('Comando ROOT: %s' % str(command))
|
except:
log('Sin
|
PERMISOS ROOT: %s' % str(command))
if not filetools.exists(new_libpath):
log('Deleted: (%s) %s -> (%s) %s' %(size, libpath, new_size, new_libpath))
if not filetools.exists(new_libpath):
filetools.copy(libpath, new_libpath, silent=True) ### ALFA
log('Copying... %s -> %s' %(libpath, new_libpath))
if not filetools.exists(new_libpath):
try:
command = ['su', '-c', 'cp', '%s' % libpath, '%s' % new_libpath]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, er
|
RobbieClarken/python3-microstacknode
|
tests/test_display.py
|
Python
|
gpl-3.0
| 2,175
| 0.002299
|
#!/usr/bin/env python3
import os
import sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parentdir)
import time
import unittest
import microstacknode.hardware.display.ssd1306
from microstacknode.hardware.display.font import (FourByFiveFont,
BlockFont)
from microstacknode.hardware.display.sprite import (Sprite,
CharSprite,
StringSprite)
class TestSSD1306(unittest.TestCase):
# def setUp(self):
# self.display = microstacknode.hardware.display.ssd1306.SSD1306()
# self.display.init()
# @unittest.skip('')
# def test_character_printing(self):
# char_sprite = CharSprite('a', FourByFiveFont())
# self.display.draw_sprite(0, 0, char_sprite)
# @unittest.skip('')
# def test_character_printing(self):
# str_sprite = StringSprite('ALPHABET', 'R', MinecraftiaFont())
# self.display.draw_sprite(0, 0, str_sprite)
# @unittest.skip('')
# def test_rectangle(self):
# sprite = Sprite(10, 16)
# sprite.draw_rectangle(0, 0, 10, 16, 1)
# sprite.draw_rectangle(1, 1, 5, 5)
# self.display.draw_sprite(0, 0, sprite)
# # time.sleep(1)
# # sprite.invert_vertical()
# # self.display.draw_sprite(0, 0, sprite)
# # time.sleep(1)
# # sprite.invert_horizontal()
# # self.display.draw_sprite(0, 0, sprite)
# time.sleep(1)
#
|
sprite.rotate90(3)
# self.display.clear_display()
# self.display.draw_sprite(0, 0, sprite)
# # time.sleep(1)
# # sprite.rotate90()
# # self.display.clear_display()
# # self.display.draw_sprite(0, 0, sprite)
# # time.sleep(1)
# # sprite.rotate90()
# # self.display.clear_display()
# # self.display.draw_sprite(0, 0, sprite)
def test_set_pixel(self):
with microstacknode.hardware.di
|
splay.ssd1306.SSD1306() as ssd1306:
ssd1306.set_pixel(0, 0, 1)
if __name__ == "__main__":
unittest.main()
|
NicolasPresta/ReconoBook
|
reconobook_train.py
|
Python
|
mit
| 5,180
| 0.002511
|
# coding=utf-8
# ==============================================================================
"""Entrenamiento del modelo"""
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import reconobook_modelo
import reconobook_eval
from reconobook_dataset import ReconoBookData
from datetime import datetime
import os.path
import time
import numpy as np
import shutil
import config
# ==============================================================================
FLAGS = tf.app.flags.FLAGS
# ==============================================================================
def train(dataset):
with tf.Graph().as_default():
# Definimos la variable que tiene el paso actual.
global_step = tf.Variable(0, trainable=False)
# Obtenemos imagenes y labels.
images, labels = reconobook_modelo.train_inputs(dataset, FLAGS.train_batch_size)
# Dadas las imagenes obtiene la probabilidad que tiene cada imagen de pertener a cada clase.
logits = reconobook_modelo.inference(images, FLAGS.keep_drop_prob)
# Calulamos el costo.
loss = reconobook_modelo.loss(logits, labels)
# Definimos el paso de entrenamiento
train_op = reconobook_modelo.train(loss, global_step)
# Create a saver que va a guardar nuestro modelo
saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.saver_max_to_keep)
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
# Definimos la configuración general de la sesion
config = tf.ConfigProto()
config.log_device_placement = FLAGS.log_device_placement
config.allow_soft_placement = FLAGS.allow_soft_placement
# Creamos la sesión
sess = tf.Session(config=config)
sess.run(init)
# Iniciamos las colas de lectura
tf.train.start_queue_runners(sess=sess)
# Creamos la operación que va a guardar el resumen para luego visualizarlo desde tensorboard
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(FLAGS.summary_dir_train, sess.graph)
for step in range(FLAGS.train_max_steps):
start_time = time.time()
sess.run([train_op],
run_metadata=run_metadata,
options=run_options)
duration = time.time() - start_time
# Imprimir el avance
if step % FLAGS.steps_to_imprimir_avance == 0:
num_examples_per_step = FLAGS.train_batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
loss_value = sess.run(loss)
format_str = '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)'
print(format_str % (datetime.now(), step, loss_value, examples_per_sec, sec_per_batch))
# Guardar el summary para verlo en tensorboard
if step % FLAGS.steps_to_guardar_summary == 0:
summary_str = sess.run(summary_op)
summary_writer.add_run_metadata(run_metadata, 'step%d' % step)
summary_writer.add_summary(summary_str, step)
print("---> Guardado Summary Train ")
# Guardar el modelo en el estado actual y lo evaluamos para los 3 sets de datos
if step % FLAGS.steps_to_guardar_checkpoint == 0 or (step + 1) == FLAGS.train_max_steps:
checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
print("---> Guardado Checkpoint")
print("--- ---- ---- ---- ---")
reconobook_eval.evaluate('train', FLAGS.eval_num_examples_mini)
print("--- ---- ---- ---- ---")
reconobook_eval.evaluate('validation', FLAGS.eval_num_examples_mini)
print("--- ---- ---- ---- ---")
reconobook_eval.evaluate('test', FLAGS.eval_num_examples_mini)
print("--- ---- ---- ---- ---")
print("---> Guardado Summary Eval ")
def main(_):
dataset = ReconoBookData(subset='train')
assert dataset.data_files()
# creamos el directorio de summary_dir_train si no existe, y si existe lo borramos y creamos de nuevo
if not os.path.exists(FLAGS.summary_dir_train):
os.mkdir(
|
FLAGS.summar
|
y_dir_train)
else:
shutil.rmtree(FLAGS.summary_dir_train)
os.mkdir(FLAGS.summary_dir_train)
# creamos el directorio de checkpoint_dir si no existe, y si existe lo borramos y creamos de nuevo
if not os.path.exists(FLAGS.checkpoint_dir):
os.mkdir(FLAGS.checkpoint_dir)
else:
shutil.rmtree(FLAGS.checkpoint_dir)
os.mkdir(FLAGS.checkpoint_dir)
train(dataset)
if __name__ == '__main__':
tf.app.run()
|
MyRobotLab/pyrobotlab
|
service/OculusRift.py
|
Python
|
apache-2.0
| 73
| 0.027397
|
# start the service
oculusrift = Runtime.start("oculusrift
|
","Oc
|
ulusRift")
|
brownplt/k3
|
dj-resume/resume/generate.py
|
Python
|
apache-2.0
| 3,738
| 0.016854
|
import resume.models as rmod
import random
import logging
from django.http import HttpResponse
from datetime import date
logger = logging.getLogger('default')
def generate(request):
cs_objs = rmod.Department.objects.filter(shortname='cs')
if len(cs_objs) == 0:
logger.info('created cs dept')
cs = rmod.Department(name='Computer Science', shortname='cs', lastChange=0,\
headerImage='', logoImage='', resumeIma
|
ge='', headerBgImage='',\
brandColor='blue', contactName='Donald Knuth', contactEmail='test@example.com',\
techEmail='tech@example.com')
cs.save()
else:
logger.info('used pre-existing cs dept')
cs = cs_objs[0]
ct_objs = rmod.ComponentType
|
.objects.filter(short='ta')
if len(ct_objs) == 0:
logger.info('created component type')
ct = rmod.ComponentType(type='contactlong', name='type a', short='ta', department=cs)
ct.save()
else:
logger.info('used existing component type')
ct = ct_objs[0]
ct_objs = rmod.ComponentType.objects.filter(short='stmt')
if len(ct_objs) == 0:
logger.info('created component type')
ct = rmod.ComponentType(type='statement', name='Research Statement', short='stmt', department=cs)
ct.save()
else:
logger.info('used existing component type')
ct = ct_objs[0]
auth_objs = rmod.AuthInfo.objects.all()
if len(auth_objs) == 0:
return HttpResponse("No auth_info objects to use")
auth = auth_objs[0]
pos_objs = rmod.ApplicantPosition.objects.filter(name='pos1')
if len(pos_objs) == 0:
logger.info('created app position')
pos = rmod.ApplicantPosition(department=cs, name='pos1', shortform='p1',\
autoemail=False)
pos.save()
else:
logger.info('used existing app position')
pos = pos_objs[0]
a_objs = rmod.Applicant.objects.filter(auth=auth)
if len(a_objs) == 0:
logger.error('ERROR: created applicant')
a = rmod.Applicant(auth=auth, firstname='john', lastname='doe', country='usa',\
department=cs, position=pos)
a.save()
else:
logger.info('used existing applicant')
a = a_objs[0]
c_objs = rmod.Component.objects.filter(applicant=a)
if len(c_objs) == 0:
logger.info('created component')
c = rmod.Component(applicant=a, type=ct, value='component 1', lastSubmitted=0,\
department=cs)
c.save()
else:
logger.info('used existing component')
c = c_objs[0]
reviewer_objs = rmod.Reviewer.objects.filter(auth=auth)
if len(reviewer_objs) == 0:
logger.info('created reviewer')
reviewer = rmod.Reviewer(auth=auth, department=cs)
reviewer.save()
else:
logger.info('used existing reviewer')
reviewer = reviewer_objs[0]
review_objs = rmod.Review.objects.filter(applicant=a)
if len(review_objs) == 0:
logger.info('created review')
review = rmod.Review(applicant=a, reviewer=reviewer, advocate='advocate',\
comments='this shit sucks', draft=False, department=cs)
review.save()
else:
logger.info('used existing review')
review = review_objs[0]
area_objs = rmod.Area.objects.filter(department=cs)
if len(area_objs) < 2:
a = rmod.Area(name='area two', abbr='a2', department=cs)
a.save()
a = rmod.Area(name='area one', abbr='a1', department=cs)
a.save()
score_cats = rmod.ScoreCategory.objects.filter(department=cs)
if len(score_cats) == 0:
sc = rmod.ScoreCategory(name='Awesomeness Level', shortform='AL', department=cs)
sc.save()
else:
sc = score_cats[0]
score_vals = rmod.ScoreValue.objects.filter(department=cs)
if len(score_vals) == 0:
for i in range(5):
sv = rmod.ScoreValue(category=sc, number=i, explanation='%d level of awesome' % i,\
department=cs)
sv.save()
return HttpResponse('OK')
|
vakaras/nmadb-registration
|
src/nmadb_registration/views.py
|
Python
|
lgpl-3.0
| 4,119
| 0
|
from django.contrib import admin
from django.db import transaction
from django.core import urlresolvers
from django.utils.translation import ugettext as _
from django import shortcuts
from django.contrib import messages
from annoying.decorators import render_to
from nmadb_registration import forms, models
@admin.site.admin_view
@render_to('admin/file-form.html')
@transaction.atomic
def import_schools(request):
""" Imports schools.
"""
if request.method == 'POST':
form = forms.ImportTitleOnlyForm(request.POST, request.FILES)
if form.is_valid():
counter = 0
for sheet in form.cleaned_data['spreadsheet']:
for row in sheet:
school = models.School()
school.id = row[u'id']
school.title = row[u'title']
school.save()
counter += 1
msg = _(u'{0} schools successfully imported.').format(counter)
messages.success(request, msg)
return shortcuts.redirect(
'admin:nmadb_registration_school_changelist')
else:
form = forms.ImportTitleOnlyForm()
return {
'admin_index_url': urlresolvers.reverse('admin:index'),
'app_url': urlresolvers.reverse(
'admin:app_list',
kwargs={'app_label': 'nmadb_registration'}),
'app_label': _(u'NMADB Registration'),
'form': form,
}
@admin.site.admin_view
@render_to('admin/file-form.html')
@transaction.atomic
def import_sections(request):
""" Imports sections.
"""
if request.method == 'POST':
form = forms.ImportTitleOnlyForm(request.POST, request.FILES)
if form.is_valid():
counter = 0
for sheet in form.cleaned_data['spreadsheet']:
for row in sheet:
section = models.Section()
section.id = row[u'id']
section.title = row[u'title']
section.save()
counter += 1
msg = _(u'{0} sections successfully imported.').format(counter)
messages.success(request, msg)
return shortcuts.redirect(
'admin:nmadb_registration_section_changelist')
else:
form = forms.ImportTitleOnlyForm()
return {
'admin_index_url': urlresolvers.reverse('admin:index'),
'app_url': urlresolvers.reverse(
'admin:app_list',
kwargs={'app_label': 'nmadb_registration'}),
'app_label': _(u'NMADB Registration'),
'form': form,
}
@admin.site.admin_view
@render_to('admin/file-form.html')
@transaction.atomic
def import_municipalities(request):
""" Imports municipalities.
"""
if request.method == 'POST':
form = forms.ImportMunicipalitiesForm(request.POST, request.FILES)
if form.is_valid():
counter = 0
for sheet in form.cleaned_data['spreadsheet']:
for row in sheet:
municipality = models.Municipality()
municipality.id = row[u'id']
municipality.town = row[u'town']
municipality.municipality_type = (
row[u'municipality_type'] or u'')
mun
|
icipality.code = row[u'code']
municipality.save()
counter += 1
msg = _(u'{0} municipalities successfully imported.'
).format(counter)
messages.success(request, msg)
return shortcuts.redirect(
'admin:nmadb_registration_municipality_changelist')
else:
form = forms.ImportMunicipalitiesForm()
return {
'admin_index_url': urlresolvers.reverse('admin:index'),
|
'app_url': urlresolvers.reverse(
'admin:app_list',
kwargs={'app_label': 'nmadb_registration'}),
'app_label': _(u'NMADB Registration'),
'form': form,
}
|
BadSingleton/pyside2
|
tests/signals/signal2signal_connect_test.py
|
Python
|
lgpl-2.1
| 3,340
| 0.004491
|
# -*- coding: utf-8 -*-
''' Test case for signal to signal connections.'''
import unittest
from PySide2.QtCore import *
def cute_slot():
pass
class TestSignal2SignalConnect(unittest.TestCase):
'''Test case for signal to signal connections'''
def setUp(self):
#Set up the basic resources needed
self.sender = QObject()
self.forwarder = QObject()
self.args = None
self.called = False
def tearDown(self):
#Delete used resources
try:
del self.sender
except:
pass
try:
del self.forwarder
except:
pass
del self.args
def callback_noargs(self):
#Default callback without arguments
self.called = True
def callback_args(self, *args):
#Default callback with arguments
if args == self.args:
self.called = True
else:
raise TypeError("Invalid arguments")
def callback_qobject(self, *args):
#Default callback for QObject as argument
if args[0].objectName() == self.args[0]:
self.called = True
else:
raise TypeError("Invalid arguments")
def testSignalWithoutArguments(self):
QObject.connect(self.sender, SIGNAL("destroyed()"),
self.forwarder, SIGNAL("forward()"))
QObject.connect(self.forwarder, SIGNAL("forward()"),
self.callback_noargs)
del self.sender
self.assert_(self.called)
def testSignalWithOnePrimitiveTypeArgument(s
|
elf):
QObject.connect(self.sender, SIGNAL("mysignal(int)"),
self.forwarder, SIGNAL("mysignal(int)"))
QObject.connect(self.forwarder, SIGNAL("mysignal(int)"),
self.callback_args)
self.args = (19,)
self.sender.emit(SIGNAL('mysignal(int)')
|
, *self.args)
self.assert_(self.called)
def testSignalWithMultiplePrimitiveTypeArguments(self):
QObject.connect(self.sender, SIGNAL("mysignal(int,int)"),
self.forwarder, SIGNAL("mysignal(int,int)"))
QObject.connect(self.forwarder, SIGNAL("mysignal(int,int)"),
self.callback_args)
self.args = (23, 29)
self.sender.emit(SIGNAL('mysignal(int,int)'), *self.args)
self.assert_(self.called)
def testSignalWithOneStringArgument(self):
QObject.connect(self.sender, SIGNAL("mysignal(QString)"),
self.forwarder, SIGNAL("mysignal(QString)"))
QObject.connect(self.forwarder, SIGNAL("mysignal(QString)"),
self.callback_args)
self.args = ('myargument',)
self.sender.emit(SIGNAL('mysignal(QString)'), *self.args)
self.assert_(self.called)
def testSignalWithOneQObjectArgument(self):
QObject.connect(self.sender, SIGNAL('destroyed(QObject*)'),
self.forwarder, SIGNAL('forward(QObject*)'))
QObject.connect(self.forwarder, SIGNAL('forward(QObject*)'),
self.callback_qobject)
obj_name = 'sender'
self.sender.setObjectName(obj_name)
self.args = (obj_name, )
del self.sender
self.assert_(self.called)
if __name__ == '__main__':
unittest.main()
|
Yinan-Zhang/RichCSpace
|
alphashape/Homotopy.py
|
Python
|
mit
| 6,570
| 0.049772
|
"""
This class is about determine two path homotopy classes using Constraint Satisfication Programming techniques.+
"""
__author__ = 'Yinan Zhang'
__revision__ = '$Revision$'
import pdb
import sys, os, math, time, pygame, time, copy
sys.path.append('../basics/math')
sys.path.append('../basics/algorithm')
from numpy.linalg import matrix_rank
from hyper_geometry import *
from l1_geometry import *
from Triangle import *
from Contraction import *
from priority_queue import *
class HomotopyCSP:
'''Constraint Satisfication Programming on homotopy of sphere unions'''
def __init__(self, spheres, graph, triangle_set, edge_tri_dict, sphere_tri_dict):
self.spheres = spheres
self.graph = graph
self.triangle_set = triangle_set
self.edge_tri_dict = edge_tri_dict
self.sphere_tri_dict = sphere_tri_dict
pass;
def neighbor_spheres( self, union, used_spheres ):
for sphere in union.get_spheres():
used_spheres[sphere] = 1;
neighbors = []; # neighbor spheres of the union.
for sphere in union.get_spheres(): # loop over each component sphere
curr_neighbors = self.graph[sphere];# find its neighbors in the graph
for curr_neighbor in curr_neighbors:
if not used_spheres.has_key(curr_neighbor): # such that each neighbor is not used
neighbors.append(curr_neighbor);
print "Union neighbor spheres: {0}".format( len(neighbors) );
return neighbors;
def greedy( self, union1, union2, all_spheres, surface = None ):
''' Greedily grow spheres for a set of balls two paths go through,
such that the growth will not increase the 1st betti number.
'''
def center( union1, union2 ):
|
center = None;
for s in union1.spheres:
if center is None:
center = copy.copy( s.center );
else:
center += s.center;
for s in union2.spheres:
center += s.center;
center /= ( len(union1.spheres) + len(union2.spheres) );
return center;
def dist(sphere, union):
'''min_dist from a sphere to spheres on a union'''
min_dist = 100000000;
|
for s in union.get_spheres():
dist = (s.center - sphere.center).r();
if dist <= min_dist:
min_dist = dist;
return min_dist;
def heur_dist( sphere, union1, union2 ):
'''returns the heuristic of the sphere'''
return dist(sphere, union1) + dist(sphere, union2);
def heur_cent( sphere, center ):
'''returns the heuristic of the sphere'''
return (sphere.center - center).r();
pdb.set_trace();
union_center = center(union1, union2);
union1.render( surface, (200,200,00) );
pygame.display.update();
union2.render( surface, (200,000,200) );
pygame.display.update();
used_spheres = {};
for sphere in union1.get_spheres():
used_spheres[sphere] = 1;
for sphere in union2.get_spheres():
used_spheres[sphere] = 1;
union1cp = copy.copy( union1 );
union = union1.merge(union2, self.edge_tri_dict, self.sphere_tri_dict);
neighbors = self.neighbor_spheres(union, used_spheres)
heuristic = PriorityQueue();
if len(neighbors) == 0:
pass; ####sThink about this
for neighbor in neighbors:
if not used_spheres.has_key(neighbor) and all_spheres.has_key(neighbor):
heuristic.push( neighbor, heur_dist(neighbor, union1, union2) );
#heuristic.push(neighbor, heur_cent(neighbor, union_center));
pdb.set_trace();
while not heuristic.isEmpty() and not len(union.spheres) == len(all_spheres.keys()):
choice = heuristic.pop()
pygame.draw.circle( surface, (255,0,0), (int(choice.center[0]), int(choice.center[1])), int(choice.radius), 2 );
pygame.display.update()
#time.sleep(1);
old_betti = union.betti_number(self.edge_tri_dict)
print "old betti number: {0}".format( old_betti )
good = union.add_sphere_betti(choice, old_betti, self.edge_tri_dict, self.sphere_tri_dict, surface);
if old_betti == 0 or good == None:
print "Same homotopy class";
return;
if good:
union.render( surface, (200,200,200) );
time.sleep(1);
used_spheres[choice] = 1;
temp = Component( choice );
new_neighbors = self.neighbor_spheres(temp, used_spheres)
for neighbor in new_neighbors:
if not used_spheres.has_key(neighbor) and all_spheres.has_key(neighbor):
heuristic.push( neighbor, heur_dist(neighbor, union1, union2) );
#heuristic.push(neighbor, heur_cent(neighbor, union_center));
betti = union.betti_number(self.edge_tri_dict);
print betti
if betti != 0:
print "Different Homotopy Classes"
else:
print "Same homotopy class"
def CSP( self, all_spheres, path1, path2, surf ):
'''Given n variables( spheres ) s[1..n] with available value 0 and 1.
Solve the constraint satisfication problem of assigning variables such that:
1. either the assigned spheres have betti number 0
2. or there's no other spheres that can be added without increasing the betti number'''
path1cp = copy.copy(path1);
component = path1cp.merge(path2, self.edge_tri_dict, self.sphere_tri_dict);
assigned = {}
for s in component.spheres.keys():
assigned[s] = 1;
return self.CSP_helper(all_spheres, assigned, component, surf)
def CSP_helper( self, all_spheres, assigned, component, surf ):
def dist(sphere, ball):
'''min_dist from a sphere to spheres on a union'''
min_dist = 100000000;
for s in union.get_spheres():
dist = (s.center - sphere.center).r();
if dist <= min_dist:
min_dist = dist;
return min_dist;
def heur( sphere, path1, path2 ):
'''returns the heuristic of the sphere'''
return dist(sphere, union1) + dist(sphere, union2);
def select_next_unassigned(idx):
'''returns the an unassigned variable'''
spheres = all_spheres.keys();
for i in range(idx, len(spheres)):
s = spheres[i]
if not assigned.has_key(s) and component.intersects(s):
idx += 1;
return s, idx;
idx += 1;
return None;
pdb.set_trace();
old_betti = component.betti_number( self.edge_tri_dict );
print old_betti
if old_betti == 0:
return True; # Same homotopy
idx = 0;
while True:
pdb.set_trace()
component.render(surf, (150, 180, 230));
pygame.display.update();
var, idx = select_next_unassigned(idx);
if var == None:
return False;
if component.add_sphere_betti(var, old_betti, self.edge_tri_dict, self.sphere_tri_dict, surf):
component.render(surf, (150, 180, 230));pygame.display.update();
assigned[var] = 1;
if not self.CSP_helper(all_spheres, assigned, component, surf):
component.remove(var, old_betti, self.edge_tri_dict, self.sphere_tri_dict, True);
else:
return True;
pass;
|
Stanford-Online/edx-platform
|
cms/djangoapps/contentstore/utils.py
|
Python
|
agpl-3.0
| 18,481
| 0.002705
|
"""
Common utility functions useful throughout the contentstore
"""
import logging
from datetime import datetime
from django.conf import settings
from django.urls import reverse
from django.utils.translation import ugettext as _
from opaque_keys.edx.keys import CourseKey, UsageKey
from pytz import UTC
from six import text_type
from django_comment_common.models import assign_default_role
from django_comment_common.utils import seed_permissions_roles
from openedx.core.djangoapps.site_configuration.models import SiteConfiguration
from student import auth
from student.models import CourseEnrollment
from student.roles import CourseInstructorRole, CourseStaffRole
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.partitions.partitions_service import get_all_partitions_for_course
log = logging.getLogger(__name__)
def add_instructor(course_key, requesting_user, new_instructor):
"""
Adds given user as instructor and staff to the given course,
after verifying that the requesting_user has permission to do so.
"""
# can't use auth.add_users here b/c it requires user to already have Instructor perms in this course
CourseInstructorRole(course_key).add_users(new_instructor)
auth.add_users(requesting_user, CourseStaffRole(course_key), new_instructor)
def initialize_permissions(course_key, user_who_created_course):
"""
Initializes a new course by enrolling the course creator as a student,
and initializing Forum by seeding its permissions and assigning default roles.
"""
# seed the forums
seed_permissions_roles(course_key)
# auto-enroll the course creator in the course so that "View Live" will work.
CourseEnrollment.enroll(user_who_created_course, course_key)
# set default forum roles (assign 'Student' role)
assign_default_role(course_key, user_who_created_course)
def remove_all_instructors(course_key):
"""
Removes all instructor and staff users from the given course.
"""
staff_role = CourseStaffRole(course_key)
staff_role.remove_users(*staff_role.users_with_role())
instructor_role = CourseInstructorRole(course_key)
instructor_role.remove_users(*instructor_role.users_with_role())
def delete_course(course_key, user_id, keep_instructors=False):
"""
Delete course from module store and if specified remove user and
groups permissions from course.
"""
_delete_course_from_modulestore(course_key, user_id)
if not keep_instructors:
_remove_instructors(course_key)
def _delete_course_from_modulestore(course_key, user_id):
"""
Delete course from MongoDB. Deleting course will fire a signal which will result into
deletion of the courseware associated with a course_key.
"""
module_store = modulestore()
with module_store.bulk_operations(course_key):
module_store.delete_course(course_key, user_id)
def _remove_instructors(course_key):
"""
In the django layer, remove all the user/groups permissions associated with this course
"""
print 'removing User permissions from course....'
try:
remove_all_instructors(course_key)
except Exception as err:
log.error("Error in deleting course groups for {0}: {1}".format(course_key, err))
def get_lms_link_for_item(location, preview=False):
"""
Returns an LMS link to the course with a jump_to to the provided location.
:param location: the location to jump to
:param preview: True if the preview version of LMS should be returned. Default value is false.
"""
assert isinstance(location, UsageKey)
# checks LMS_BASE value in site configuration for the given course_org_filter(org)
# if not found returns settings.LMS_BASE
lms_base = SiteConfiguration.get_value_for_org(
location.org,
"LMS_BASE",
settings.LMS_BASE
)
if lms_base is None:
return None
if preview:
# checks PREVIEW_LMS_BASE value in site configuration for the given course_org_filter(org)
# if not found returns settings.FEATURES.get('PREVIEW_LMS_BASE')
lms_base = SiteConfiguration.get_value_for_org(
location.org,
"PREVIEW_LMS_BASE",
settings.FEATURES.get('PREVIEW_LMS_BASE')
)
return u"//{lms_base}/courses/{course_key}/jump_to/{location}".format(
lms_base=lms_base,
course_key=text_type(location.course_key),
location=text_type(location),
)
# pylint: disable=invalid-name
def get_lms_link_for_certificate_web_view(user_id, course_key, mode):
"""
Returns the url to the certificate web view.
"""
assert isinstance(course_key, CourseKey)
# checks LMS_BASE value in SiteConfiguration against course_org_filter if not found returns settings.LMS_BASE
lms_base = SiteConfiguration.get_value_for_org(course_key.org, "LMS_BASE", settings.LMS_BASE)
if lms_base is None:
return None
return u"//{certificate_web_base}/certificates/user/{user_id}/course/{course_id}?preview={mode}".format(
certificate_web_base=lms_base,
user_id=user_id,
course_id=unicode(course_key),
mode=mode
)
# pylint: disable=invalid-name
def is_currently_visible_to_students(xblock):
"""
Returns true if there is a published version of the xblock that is currently visible to students.
This means that it has a release date in the past, and the xblock has not been set to staff only.
"""
try:
published = modulestore().get_item(xblock.location, revision=ModuleStoreEnum.RevisionOption.published_only)
# If there's no published version then the xblock is clearly not visible
except ItemNotFoundError:
return False
# If visible_to_staff_only is True, this xblock is not visible to students regardless of start date.
if published.visible_to_staff_only:
return False
# Check start date
if 'detached' not in published._class_tags and published.start is not None:
return datetime.now(UTC) > published.start
# No start date, so it's always visible
return True
def has_children_visible_to_specific_partition_groups(xblock):
"""
Returns True if this xblock has children that are limited to specific user partition groups.
Note that this method is not recursive (it does not check grandchildren).
"""
if not xblock.has_children:
return False
for child in xblock.get_children():
if is_visible_to_specific_partition_groups(child):
return True
return False
def is_visible_to_specific_partition_groups(xblock):
"""
Returns True if this xblock has visibility limited to specific user partition groups.
"""
if not xblock.group_access:
return False
for partition in get_user_partition_info(xblock):
if any(g["selected"] for g in partition["groups"]):
return True
return False
def find_release_date_source(xblock):
"""
Finds the ancestor of xblock that set its release date.
"""
# Stop searching at the section level
if xblock.category == 'chapter':
return xblock
parent_location = modulestore().get_parent_location(xblock.location,
revision=ModuleStoreEnum.RevisionOption.draft_preferred)
# Orphaned xblocks set their own release date
if not parent_location:
return xblock
parent = modulestore().get_item(parent_location)
if parent.start != xblock.start:
return xblock
else:
return find_release_date_source(parent)
def find_staff_lock_source(xblock):
"""
Returns the xbl
|
ock responsible for setting this xblock's staff lock, or None if the xblock is not staff locked.
If this xblock is explicitly locked, return it, otherwise find the ancestor which sets this xblock's staff lock.
"""
|
# Stop searching if this xblock has explicitly set its own staff lock
if xblock.fields['visible_to_staff_only'].is_set_on(xblock):
return xbloc
|
GripQA/client-tools
|
jira-access/jira_descr.py
|
Python
|
apache-2.0
| 2,675
| 0.006729
|
#!/usr/bin/python3
"""jira_descr.py queries JIRA for issue description info
The information includes, values for:
- issue type
- status
- resolution
- priority
The information is retrieved and formatted for nice printing. This is a
utility for configuring the JIRA access for a new project.
Source documentation: https://docs.atlassian.com/jira/REST/latest/#d2e1750
Copyright 2015 Grip QA
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the
|
specific language governing permissions and
limitations under the License.
"""
__author__ = "Dean Stevens"
__copyright__ = "Copyright 2015, Grip QA"
__license__ = "Apache License, Version 2.0"
__status__ = "Prototype"
__version__ = "0.01"
import sys
import textwrap
from grip_import import ERR_LABEL
from grip_import import get_basename_arg
from grip_import import load_config
from grip_import import get_rest
def dump_data(jsn):
wrapper = textwrap.TextW
|
rapper(initial_indent = " "
,subsequent_indent = ' '*16)
for i in jsn:
o_str = "{0} -- {1}".format(i['name'], i['description'])
for l in wrapper.wrap(o_str):
print(l)
def descr_main(config):
"""Main function for retrieving and displaying Java issue information
Uses REST APIs to GET a project's names for various issue types, states and
priorities. Formats the output for easier reading.
Args:
config - configuration object
Returns:
No return value
"""
labels = ["issuetype"
,"status"
,"resolution"
,"priority"
]
api = config.jira_rest_api
auth = (config.username, config.password)
for l in labels:
print("\n\n{0}:".format(l))
url = "{0}{1}{2}".format(config.server, api, l)
jsn = get_rest(url, auth)
dump_data(jsn)
if __name__ == '__main__':
basename = get_basename_arg(__file__, sys.argv)
if basename is not None:
cfg_path = basename + ".cfg"
config = load_config(cfg_path)
if config is not None:
descr_main(config)
else:
err_str = ("{0}Failed to load configuration file: '{1}' "
"Exiting...")
print(err_str.format(ERR_LABEL, cfg_path))
|
Rostlab/nalaf
|
tests/features/test_simple.py
|
Python
|
apache-2.0
| 1,582
| 0.004425
|
import unittest
from nalaf.structures.data import Dataset, Document, Part, Token
from nalaf.features.simple import SimpleFeatureGenerator, SentenceMarkerFeatureGenerator
class TestSimpleFeatureGenerator(unittest.TestCase):
def setUp(self):
part = Part('Word1 word2 word3. Word4 word5 word6.')
part.sentences = [[Token('Word1', 0), Token('word2', 6), Token('word3', 12)],
[Token('Word4', 19), Token('word5', 25), Token('word6', 31)]]
self.dataset = Dataset()
self.dataset.documents['doc_1'] = Document()
self.dataset.documents['doc_1'].parts['part_1'] = part
self.simple_generator = SimpleFeatureGenerator()
self.sentence_generator = SentenceMarkerFeatureGenerator()
def test_simple_generate(self):
self.simple_generator.generate(self.dataset)
features = [token.features for token in self.dataset.tokens()]
expected = iter([{'word[0]': 'Word1'}, {'word[0]': 'word2'}, {'word[0]': 'word3'},
{'word[0]': 'Word4'}, {'word[0]': 'word5'}, {'word[0]': 'word6'}])
for feature in features:
self.assertEqual(feature, next(expected))
def test_sentence_generate(self):
self.sentence_gene
|
rator.generate(self.dataset)
features = [token.features for token in self.dataset.tokens()]
expected = iter([{'BOS[0]': 1}, {}, {'EOS[0]': 1}, {'BOS[0]': 1}, {}, {'EOS[0]': 1}])
for feature in features:
|
self.assertEqual(feature, next(expected))
if __name__ == '__main__':
unittest.main()
|
pombredanne/python-npm
|
setup.py
|
Python
|
mit
| 472
| 0.004237
|
from setuptools import setup, find_packages
import npm
|
setup(
name='npm',
version=npm.VERSION,
packages=find_packages(exclude=('tests',)),
description=
|
'Python bindings and utils for npm.',
long_description='Documentation at https://github.com/markfinger/python-npm',
install_requires=[
'optional-django==0.1.0',
],
author='Mark Finger',
author_email='markfinger@gmail.com',
url='https://github.com/markfinger/python-npm',
)
|
italopaiva/your.car
|
yourcar/telegram_bot/migrations/0001_initial.py
|
Python
|
bsd-2-clause
| 818
| 0.002445
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-06 22:06
from __fut
|
ure__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
|
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserBotConversation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('chat', models.BigIntegerField(unique=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
bencastan/LPTHW
|
ex32.py
|
Python
|
gpl-3.0
| 736
| 0.012228
|
the_count = [1, 2, 3, 4, 5]
fruits
|
= ['apples', 'oranges', 'pears', 'apricots']
change = [1, 'pennies', 2, 'dimes', 3, 'quarters']
# ihis firat
|
kind of for-loop goes through a list
for number in the_count:
print "This is the count %d" % number
# same as above
for fruit in fruits:
print "A fruit of type: %s" % fruit
# also we can go through mixed lists
# notice we have to use %r since we don't know what is in it
for i in change:
print "I got %r" %i
# we can aslo build lists, first start with an empty one.
elements = []
# then use the range function to do 0 to 5 counts
for i in range(0, 6):
print "Adding %d to the list." % i
elements.append(i)
# now we can print them out
for i in elements:
print "Element was: %d " %i
|
roadmapper/ansible
|
lib/ansible/modules/network/aci/aci_firmware_group_node.py
|
Python
|
gpl-3.0
| 6,497
| 0.001847
|
#!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: aci_firmware_group_node
short_description: This modules adds and remove nodes from the firmware group
version_added: "2.8"
description:
- This module addes/deletes a node to the firmware group. This modules assigns 1 node at a time.
options:
group:
description:
- This is the name of the firmware group
type: str
required: true
node:
description:
- The node to be added to the firmware group - the value equals the NodeID
type: str
required: true
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
default: present
choices: [ absent, present, query ]
name_alias:
version_added: '2.10'
description:
|
- The alias for the current object. This relates to the nameAlias field in ACI.
type: str
extends_documentation_fragment:
- aci
author:
- Steven Gerhart (@sgerhart)
'''
EXAMPLES = '''
- name: add firmware group node
aci_firmware_group_node:
host: "{{ inventory_hostname }}"
|
username: "{{ user }}"
password: "{{ pass }}"
validate_certs: no
group: testingfwgrp
node: 1001
state: present
- name: Remove firmware group node
aci_firmware_group_node:
host: "{{ inventory_hostname }}"
username: "{{ user }}"
password: "{{ pass }}"
validate_certs: no
group: testingfwgrp
node: 1001
state: absent
'''
RETURN = '''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
import json
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
group=dict(type='str', aliases=['group']), # Not required for querying all objects
node=dict(type='str', aliases=['node']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
name_alias=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['node', 'group']],
['state', 'present', ['node', 'group']],
],
)
state = module.params.get('state')
group = module.params.get('group')
node = module.params.get('node')
name_alias = module.params.get('name_alias')
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='firmwareFwGrp',
aci_rn='fabric/fwgrp-{0}'.format(group),
target_filter={'name': group},
module_object=group,
),
subclass_1=dict(
aci_class='fabricNodeBlk',
aci_rn='nodeblk-blk{0}-{0}'.format(node),
target_filter={'name': node},
module_object=node,
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='fabricNodeBlk',
class_config=dict(
from_=node,
to_=node,
nameAlias=name_alias,
),
)
aci.get_diff(aci_class='fabricNodeBlk')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
|
dasbruns/netzob
|
src/netzob/Common/Models/Grammar/States/PrismaState.py
|
Python
|
gpl-3.0
| 5,647
| 0.007793
|
#-*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2015 Christian Bruns |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : contact@netzob.org |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| File contributors : |
#| - Christian Bruns <christian.bruns1 (a) stud.uni-goettingen.de> |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Related third party imports |
#+---------------------------------------------------------------------------+
import random
#+---------------------------------------------------------------------------+
#| Local application imports |
#+---------------------------------------------------------------------------+
from netzob.Common.Models.Grammar.States.State import State
from netzob.Common.Utils.Decorators import typeCheck, NetzobLogger
@NetzobLogger
class PrismaState(State):
""" Performs like original State; incorporates features of invalidating a Transitions Symbol (if it is to faulty)
and even the Transition itself, if it has no Symbols left. Also removes itself, if no Transitions are left.
"""
def __init__(self, name=None):
super(PrismaState, self).__init__(name=name)
self.active = False
self.trans = []
self.usedTransitions = []
self.invalid = False
def executeAsInitiator(self, abstractionLayer):
if abstractionLayer is None:
raise TypeError("AbstractionLayer cannot be None")
self.active = True
# Pick the next transition
nextTransition = self.pickNextTransition()
if nextTransition is None:
self.active = False
raise Exception("No transition to execute, we stop here.")
# Execute picked transition as an initiator
try:
nextState = nextTransition.executeAsInitiator(abstractionLayer)
except Exception, e:
self.active = False
raise e
if nextState is None:
self.active = False
raise Exception("The execution of transition {0} on state {1} did not return the next state.".format(str(nextTransition), self.name))
self.active = False
return nextState
def pickNextTransition(self):
""" Advanced picking method; incorporates features of deleting Symbols from Transitions, Transitions from
current State and current State itself. Picks Transitions cyclically.
:return: the Transition to be executed
"""
flag = True
while flag:
pos = list(set(self.trans)-set(self.usedTransitions))
c = random.choice(pos)
# is endState invalid?
if c.endState.invalid:
# remove transition to it
self.trans.remove(c)
else:
flag = False
self.usedTransitions.append(c)
if c.inval
|
id:
self.trans.remove(c)
if len(self.trans) == 0:
self.invalid = True
if
|
self.name.split('|')[-1] == 'START':
exit()
# if c in self.trans:
if len(self.trans) <= len(self.usedTransitions):
self.usedTransitions = []
return c
def setTransitions(self, transitions):
self.trans = transitions
@property
def transitions(self):
return self.trans
|
tedlaz/pyted
|
pykoinoxrista/u_dbcon.py
|
Python
|
gpl-3.0
| 5,047
| 0
|
# -*- coding: utf-8 -*-
import sqlite3
import os
from logger import log
from collections import OrderedDict as odi
CREATE, INSERT, UPDATE, DELETE, SCRIPT, SELECT = range(6)
SQL_CREATE = 'sql_create.sql'
class Sqlcon(object):
def __new__(cls, action, db, sql):
if not action:
return None
if not db:
return None
return super(Sqlcon, cls).__new__(cls)
def __init__(self, action, db, sql=''):
self.action = action
self.db = db
self.sql = sql
self.con = None
self.cur = None
def run(self):
if self.action == CREATE:
return self._create()
elif self.action == INSERT:
return self._insert()
elif self.action == UPDATE:
return self._update()
elif self.action == DELETE:
return self._delete()
elif self.action == SCRIPT:
return self._script()
elif self.action == SELECT:
return self._select()
else:
return self._other()
def _connect(self):
try:
self.con = sqlite3.connect(self.db)
self.cur = self.con.cursor()
self.connected = True
return True
except sqlite3.Error as sqe:
log.error('Connection error : %s' % sqe)
return False
def __del__(self):
if not self.con:
print('no connection')
self.cur.close()
self.con.close()
self.cur = None
self.con = None
def _create(self):
if os.path.exists(self.db):
log.error('file %s exists. Exiting' % self.db)
return False
if not os.path.exists(SQL_CREATE):
log.error('file %s not exists. Exiting' % SQL_CREATE)
with open(SQL_CREATE) as filesql:
sql_create = filesql.read()
rval = False
if self._connect():
try:
self.cur.executescript(sql_create)
self.con.commit()
rval = True
except sqlite3.Error as sqe:
log.error('Script error : %s' % sqe)
self.con.rollback()
os.remove(self.db)
finally:
# self._close()
log.info('database %s created' % self.db)
return rval
def _insert(self):
if not len(self.sql) > 6:
return 0
if not self.sql[:6].upper() == 'INSERT':
return 0
return 1
def _update(self):
if not len(self.sql) > 6:
return 0
if not self.sql[:6].upper() == 'UPDATE':
return 'update'
def _delete(self):
if not len(self.sql) > 6:
return 0
if not self.sql[:6].upper() == 'DELETE':
return 'delete'
def _script(self):
if not os.path.exists(self.db):
log.error('_script: db file %s not exists. Exiting' % self.db)
return False
rval = False
if self._connect():
try:
self.cur.executescript(self.sql)
self.con.commit()
rval = True
except sqlite3.Error as sqe:
log.error('_script : %s' % sqe)
self.con.rollback()
if rval:
log.info('Script execution completed. No errors !!!')
return rval
def _select(self):
return 'select'
def _other(self):
return 'other'
class tst(object):
def __new__(cls, param, pas):
if param:
return super(tst, cls).__new__(cls)
else:
return None
def __init__(self, param, pas):
self.param = param
self.pas = pas
class Dbm(object):
def __init__(self, db):
self.db = db
self.conn = None
self.cur = None
self._connect()
|
def _connect(self):
self.conn = sqlite3.connect(self.db)
self.conn.execute('pragma foreign_keys = on')
self.conn.commit()
self.cur = self.conn.cursor()
def select(self, sql):
self.cur.execute(sql)
self.conn.commit()
columnNames = [t[0] for t in self.cur.description]
|
data = self.cur.fetchall()
listdict = []
for row in data:
tdic = odi()
for i, col in enumerate(row):
tdic[columnNames[i]] = col
listdict.append(tdic)
return listdict
def select_one(self, table, id):
sql = "SELECT * FROM %s WHERE id=%s" % (table, id)
return self.select(sql)[0]
def __del__(self):
if self.cur:
self.cur.close()
if self.conn:
self.conn.close()
print('cur and con closed')
if __name__ == '__main__':
# for row in Dbm('tst.sql3').select("select * from dia"):
# print(row)
# for row in Dbm('tst.sql3').select("select * from ej"):
# print(row)
print(Dbm('tst.sql3').select_one('dia', 100).values())
# dbm.qprint("select * from ej")
|
brandonmburroughs/food2vec
|
dat/RecipesScraper/RecipesScraper/settings.py
|
Python
|
mit
| 864
| 0.002315
|
"""
Scrapy settings for RecipesScraper project.
"""
# Names
BOT_NAME = 'RecipesScraper'
SPIDER_MODULES = ['RecipesScraper.spiders']
NEWSPIDER_MODULE = 'RecipesScraper.spiders'
# Obey robots.txt r
|
ules
ROBOTSTXT_OBEY = True
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Configure item pipelines
ITEM_PIPELINES = {
'RecipesScraper.pipelines.JsonPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
AUTOTHROTTL
|
E_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 3
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
AUTOTHROTTLE_TARGET_CONCURRENCY = 2.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
|
peterwilletts24/Python-Scripts
|
plot_scripts/EMBRACE/plot_from_pp_geop_height_by_day_dkbhu.py
|
Python
|
mit
| 12,998
| 0.018926
|
"""
Load pp, plot and save
8km difference
"""
import os, sys
#%matplotlib inline
#%pylab inline
import matplotlib
matplotlib.use('Agg')
# Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
#from matplotlib import figure
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import iris.unit as unit
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
imp
|
ort matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import
|
re
import iris.analysis.cartography
import math
from dateutil import tz
#import multiprocessing as mp
import gc
import types
import pdb
save_path='/nfs/a90/eepdw/Figures/EMBRACE/'
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_title.py')
unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/unrotate_pole.py')
#pp_file = ''
plot_diags=['408']
#plot_diags=['sp_hum']
plot_levels = [925, 850, 700, 500]
#experiment_ids = ['dkmbq', 'dklyu']
experiment_ids = ['djznw', 'djzny', 'djznq', 'djzns', 'dklwu', 'dklzq'] # All minus large 2
#Experiment_ids = ['djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
#experiment_ids = ['djzny', 'djznq', 'djzns', 'dkjxq', 'dklwu', 'dklzq', 'dkbhu',] # All 12
#experiment_ids = ['dkbhu', 'dkjxq']
experiment_ids = ['dkmbq', 'dklyu', 'djznw', 'djzny', 'djznq', 'djzns', 'dklwu', 'dklzq'] # All minus large 2
experiment_ids = ['dkbhu']
pp_file_path = '/nfs/a90/eepdw/Data/EMBRACE/'
degs_crop_top = 1.7
degs_crop_bottom = 2.5
from iris.coord_categorisation import add_categorised_coord
# def add_hour_of_day(cube, coord, name='hour'):
# add_categorised_coord(cube, name, coord,
# lambda coord, x: coord.units.num2date(x).hour)
figprops = dict(figsize=(8,8), dpi=100)
#cmap=cm.s3pcpn_l
u = unit.Unit('hours since 1970-01-01 00:00:00',calendar='gregorian')
dx, dy = 10, 10
divisor=10 # for lat/lon rounding
lon_high = 101.866
lon_low = 64.115
lat_high = 33.
lat_low =-6.79
lon_low_tick=lon_low -(lon_low%divisor)
lon_high_tick=math.ceil(lon_high/divisor)*divisor
lat_low_tick=lat_low - (lat_low%divisor)
lat_high_tick=math.ceil(lat_high/divisor)*divisor
def main():
for p_level in plot_levels:
# Set pressure height contour min/max
if p_level == 925:
clev_min = 0.
clev_max = 810.
elif p_level == 850:
clev_min = 1435.
clev_max = 1530.
elif p_level == 700:
clev_min = 3090.
clev_max = 3155.
elif p_level == 500:
clev_min = 5800.
clev_max = 5890.
else:
print 'Contour min/max not set for this pressure level'
# Set potential temperature min/max
if p_level == 925:
clevpt_min = 300.
clevpt_max = 312.
elif p_level == 850:
clevpt_min = 302.
clevpt_max = 310.
elif p_level == 700:
clevpt_min = 312.
clevpt_max = 320.
elif p_level == 500:
clevpt_min = 325.
clevpt_max = 332.
else:
print 'Potential temperature min/max not set for this pressure level'
# Set specific humidity min/max
if p_level == 925:
clevsh_min = 0.012
clevsh_max = 0.020
elif p_level == 850:
clevsh_min = 0.007
clevsh_max = 0.017
elif p_level == 700:
clevsh_min = 0.002
clevsh_max = 0.010
elif p_level == 500:
clevsh_min = 0.001
clevsh_max = 0.005
else:
print 'Specific humidity min/max not set for this pressure level'
#clevs_col = np.arange(clev_min, clev_max)
clevs_lin = np.arange(clev_min, clev_max, 256)
p_level_constraint = iris.Constraint(pressure=p_level)
for plot_diag in plot_diags:
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
# For each day in cube
height_pp_file = '%s_408_on_p_levs_mean_by_day.pp' % (experiment_id)
height_pfile = '%s%s/%s/%s' % (pp_file_path, expmin1, experiment_id, height_pp_file)
cube = iris.load_cube(height_pfile, p_level_constraint)
#print pcube
#print height_cube
time_coords = cube.coord('time')
#add_hour_of_day(pcube, pcube.coord('time'))
#add_hour_of_day(height_cube, height_cube.coord('time'))
iris.coord_categorisation.add_day_of_year(cube, time_coords, name='day_of_year')
#pcube.remove_coord('time')
#cube_diff.remove_coord('time')
#height_cube.remove_coord('time')
#height_cube_diff.remove_coord('time')
#p_cube_difference = iris.analysis.maths.subtract(pcube, cube_diff, dim='hour')
#height_cube_difference = iris.analysis.maths.subtract(height_cube, height_cube_diff, dim='hour')
#pdb.set_trace()
#del height_cube, pcube, height_cube_diff, cube_diff
for t, time_cube in enumerate(cube.slices(['grid_latitude', 'grid_longitude'])):
#pdb.set_trace()
# Get time of averagesfor plot title
h = u.num2date(np.array(time_cube.coord('time').points, dtype=float)[0]).strftime('%d%b')
#Convert to India time
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('Asia/Kolkata')
h_utc = u.num2date(np.array(time_cube.coord('day_of_year').points, dtype=float)[0]).replace(tzinfo=from_zone)
h_local = h_utc.astimezone(to_zone).strftime('%H%M')
fig = plt.figure(**figprops)
cmap=plt.cm.RdBu_r
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lon_low,lon_high,lat_low+degs_crop_bottom,lat_high-degs_crop_top))
m =\
Basemap(llcrnrlon=lon_low,llcrnrlat=lat_low,urcrnrlon=lon_high,urcrnrlat=lat_high, rsphere = 6371229)
#pdb.set_trace()
lat = cube.coord('grid_latitude').points
lon = cube.coord('grid_longitude').points
cs = cube.coord_system('CoordSystem')
lons, lats = np.meshgrid(lon, lat)
lons, lats = iris.analysis.cartography.unrotate_pole\
(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
x,y = m(lons,lats)
# if plot_diag=='temp':
# min_contour = clevpt_min
# max_contour = clevpt_max
# cb_label='K'
# main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), potential temperature (colours),\
#
|
JohnMnemonick/UralsCoin
|
qa/pull-tester/pull-tester.py
|
Python
|
mit
| 8,944
| 0.007044
|
#!/usr/bin/python
# Copyright (c) 2013 The Bitsend Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
from urllib import urlopen
import requests
import getpass
from string import Template
import sys
import os
import subprocess
class RunError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def run(command, **kwargs):
fail_hard = kwargs.pop("fail_hard", True)
# output to /dev/null by default:
kwargs.setdefault("stdout", open('/dev/null', 'w'))
kwargs.setdefault("stderr", open('/dev/null', 'w'))
command = Template(command).substitute(os.environ)
if "TRACE" in os.environ:
if 'cwd' in kwargs:
print("[cwd=%s] %s"%(kwargs['cwd'], command))
else: print(command)
try:
process = subprocess.Popen(command.split(' '), **kwargs)
process.wait()
except KeyboardInterrupt:
process.terminate()
raise
if process.returncode != 0 and fail_hard:
raise RunError("Failed: "+command)
return process.returncode
def checkout_pull(clone_url, commit, out):
# Init
build_dir=os.environ["BUILD_DIR"]
run("umount ${CHROOT_COPY}/proc", fail_hard=False)
run("rsync --delete -apv ${CHROOT_MASTER}/ ${CHROOT_COPY}")
run("rm -rf ${CHROOT_COPY}${SCRIPTS_DIR}")
run("cp -a ${SCRIPTS_DIR} ${CHROOT_COPY}${SCRIPTS_DIR}")
# Merge onto upstream/master
run("rm -rf ${BUILD_DIR}")
run("mkdir -p ${BUILD_DIR}")
run("git clone ${CLONE_URL} ${BUILD_DIR}")
run("git remote add pull "+clone_url, cwd=build_dir, stdout=out, stderr=out)
run("git fetch pull", cwd=build_dir, stdout=out, stderr=out)
if run("git merge "+ commit, fail_hard=False, cwd=build_dir, stdout=out, stderr=out) != 0:
return False
run("chown -R ${BUILD_USER}:${BUILD_GROUP} ${BUILD_DIR}", stdout=out, stderr=out)
run("mount --bind /proc ${CHROOT_COPY}/proc")
return True
def commentOn(commentUrl, success, inMerge, needTests, linkUrl):
common_message = """
This test script verifies pulls every time they are updated. It, however, dies sometimes and fails to test properly. If you are waiting on a test, please check timestamps to verify that the test.log is moving at http://jenkins.bluematt.me/pull-tester/current/
Contact BlueMatt on freenode if something looks broken."""
# Remove old BitsendPullTester comments (I'm being lazy and not paginating here)
recentcomments = requests.get(commentUrl+"?sort=created&direction=desc",
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
for comment in recentcomments:
if comment["user"]["login"] == os.environ["GITHUB_USER"] and common_message in comment["body"]:
requests.delete(comment["url"],
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"]))
if success == True:
if needTests:
message = "Automatic sanity-testing: PLEASE ADD TEST-CASES, though technically passed. See " + linkUrl + " for binaries and test log."
else:
message = "Automatic sanity-testing: PASSED, see " + linkUrl + " for binaries and test log."
post_data = { "body" : message + common_message}
elif inMerge:
post_data = { "body" : "Automatic sanity-testing: FAILED MERGE, see " + linkUrl + " for test log." + """
This pull does not merge cleanly onto current master""" + common_message}
else:
post_data = { "body" : "Automatic sanity-testing: FAILED BUILD/TEST, see " + linkUrl + " for binaries and test log." + """
This could happen for one of several reasons:
1. It chanages changes build scripts in a way that made them incompatible with the automated testing scripts (please tweak those patches in qa/pull-tester)
2. It adds/modifies tests which test network rules (thanks for doing that), which conflicts with a patch applied at test time
3. It does not build on either Linux i386 or Win32 (via MinGW cross compile)
4. The test suite fails on either Linux i386 or Win32
5. The block test-cases failed (lookup the first bNN identifier which failed in https://github.com/TheBlueMatt/test-scripts/blob/master/FullBlockTestGenerator.java)
If you believe this to be in error, please ping BlueMatt on freenode or TheBlueMatt here.
""" + common_
|
message}
resp = requests.post(commentUrl, json.dumps(post_data), auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"]))
def testpull(number, comment_url, clone_url, commit):
print("Testing pull %d: %s : %s"%(number, clone_url,commit))
|
dir = os.environ["RESULTS_DIR"] + "/" + commit + "/"
print(" ouput to %s"%dir)
if os.path.exists(dir):
os.system("rm -r " + dir)
os.makedirs(dir)
currentdir = os.environ["RESULTS_DIR"] + "/current"
os.system("rm -r "+currentdir)
os.system("ln -s " + dir + " " + currentdir)
out = open(dir + "test.log", 'w+')
resultsurl = os.environ["RESULTS_URL"] + commit
checkedout = checkout_pull(clone_url, commit, out)
if checkedout != True:
print("Failed to test pull - sending comment to: " + comment_url)
commentOn(comment_url, False, True, False, resultsurl)
open(os.environ["TESTED_DB"], "a").write(commit + "\n")
return
run("rm -rf ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False);
run("mkdir -p ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False);
run("chown -R ${BUILD_USER}:${BUILD_GROUP} ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False)
script = os.environ["BUILD_PATH"]+"/qa/pull-tester/pull-tester.sh"
script += " ${BUILD_PATH} ${MINGW_DEPS_DIR} ${SCRIPTS_DIR}/BitsenddComparisonTool_jar/BitsenddComparisonTool.jar 0 6 ${OUT_DIR}"
returncode = run("chroot ${CHROOT_COPY} sudo -u ${BUILD_USER} -H timeout ${TEST_TIMEOUT} "+script,
fail_hard=False, stdout=out, stderr=out)
run("mv ${CHROOT_COPY}/${OUT_DIR} " + dir)
run("mv ${BUILD_DIR} " + dir)
if returncode == 42:
print("Successfully tested pull (needs tests) - sending comment to: " + comment_url)
commentOn(comment_url, True, False, True, resultsurl)
elif returncode != 0:
print("Failed to test pull - sending comment to: " + comment_url)
commentOn(comment_url, False, False, False, resultsurl)
else:
print("Successfully tested pull - sending comment to: " + comment_url)
commentOn(comment_url, True, False, False, resultsurl)
open(os.environ["TESTED_DB"], "a").write(commit + "\n")
def environ_default(setting, value):
if not setting in os.environ:
os.environ[setting] = value
if getpass.getuser() != "root":
print("Run me as root!")
sys.exit(1)
if "GITHUB_USER" not in os.environ or "GITHUB_AUTH_TOKEN" not in os.environ:
print("GITHUB_USER and/or GITHUB_AUTH_TOKEN environment variables not set")
sys.exit(1)
environ_default("CLONE_URL", "https://github.com/bitsend/bitsend.git")
environ_default("MINGW_DEPS_DIR", "/mnt/w32deps")
environ_default("SCRIPTS_DIR", "/mnt/test-scripts")
environ_default("CHROOT_COPY", "/mnt/chroot-tmp")
environ_default("CHROOT_MASTER", "/mnt/chroot")
environ_default("OUT_DIR", "/mnt/out")
environ_default("BUILD_PATH", "/mnt/bitsend")
os.environ["BUILD_DIR"] = os.environ["CHROOT_COPY"] + os.environ["BUILD_PATH"]
environ_default("RESULTS_DIR", "/mnt/www/pull-tester")
environ_default("RESULTS_URL", "http://jenkins.bluematt.me/pull-tester/")
environ_default("GITHUB_REPO", "bitsend/bitsend")
environ_default("TESTED_DB", "/mnt/commits-tested.txt")
environ_default("BUILD_USER", "matt")
environ_default("BUILD_GROUP", "matt")
environ_default("TEST_TIMEOUT", str(60*60*2))
print("Optional usage: pull-tester.py 2112")
f = open(os.environ["TESTED_DB"])
tested = set( line.rstrip() for line in f.readlines() )
f.close()
if len(sys.argv) > 1:
pull = requests.get("https://api.github.com/repos/"+os.environ["GITHUB_REPO"]+"/pulls/"+sys.argv[1],
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_
|
bollwyvl/ipytangle
|
ipytangle/__init__.py
|
Python
|
bsd-3-clause
| 5,335
| 0
|
import inspect
from IPython.utils.traitlets import (
Any,
CInt,
CBool,
CFloat,
Dict,
Tuple,
link,
)
from IPython.html.widgets import Widget
from IPython.html.widgets.widget_selection import _Selection
from .widgets import Tangle
__all__ = ["Tangle", "tangle"]
function = type(lambda: 0)
class AutoTangle(Tangle):
def __init__(self, *args, **kwargs):
super(AutoTangle, self).__init__(*args, **kwargs)
for key, widget_traitlet in self._links:
link((self, key), widget_traitlet)
for key, fn_subscribed in self._derived.items():
self._subscribe(key, fn_subscribed)
def _subscribe(self, key, fn_subscribed):
fn, subscribed = fn_subscribed
def _handler():
handler_kwargs = {sub: getattr(self, sub) for sub in subscribed}
setattr(self, key, fn(**handler_kwargs))
self.on_trait_change(_handler, name=subscribed)
def _refresh(self):
for key, fn_subscribed in self._derived.items():
fn, subscribed = fn_subscribed
for sub in subscribed:
val = getattr(self, sub)
self._notify_trait(sub, val, val)
return self
def _get_primitive(value):
if isinstance(value, int):
return CInt
elif isinstance(value, bool):
return CBool
elif isinstance(value, float):
return CFloat
elif isinstance(value, dict):
return Dict
def _link_widget(key, value, class_attrs):
widget_cls = value.__class__
traitlet_cls = getattr(widget_cls, "value").__class__
if isinstance(value, _Selection):
label_key = "{}_label".format(key)
options_key = "{}_label_options".format(key)
class_attrs["_links"] += [
(label_key, (value, "selected_label")),
]
class_attrs["_dlinks"] += [
(key, (value, "value")),
(options_key, (value, "_options_labels")),
]
label_cls = getattr(widget_cls, "selected_label").__class__
class_attrs[label_key] = label_cls(value.selected_label, sync=True)
options_cls = getattr(widget_cls, "_options_labels").__class__
class_attrs[options_key] = options_cls(value._options_labels,
sync=True)
else:
class_attrs["_links"].append((key, (value, "value")))
class_attrs[key] = traitlet_cls(sync=True)
return class_attrs
def tangle(*args, **kwargs):
"""
Shortcut to create a new, custom Tangle model. Use instead of directly
subclassing `Tangle`.
A new, custom Widget class is created, with each of `kwargs` as a traitlet.
Returns an instance of the new class with default values.
`kwargs` options
- primitive types (int, bool, float) will be created as casting versions
(`CInt`, `CBool`, `CFloat`)
- a `list` will be created as an `Enum`
- a `Widget` instance will create a link to that widget's `value`
- a `tuple` `(widget_instance, "traitlet")` will create a `link`
- functions will be `inspect`ed to find their argument names subscribed for
update... this uses `inspect`, won't work with `*` magic
- a `tuple` `(function, default)` will be created as the type (as
above)
"""
class_attrs = {
"_links": [],
"_dlinks": [],
"_derived": {}
}
for value in args:
if isinstance(value, function):
# we'll just go ahead and assume this was made by `interact`
if hasattr(value, "widget") and hasattr(value.widget, "children"):
for child in value.widget.children:
_link_widget(child.description, child, class_attrs)
for key, value in kwargs.items():
traitlet_cls = _get_primitive(value)
traitlet_args = [value]
traitlet_kwargs = {
"sync": True
}
handled = False
if traitlet_cls is not None:
pass
elif isinstance(value, list):
traitlet_cls = Any
traitlet_args = [value[0]]
class_attrs["{}_options".format(key)] = Tuple(value, sync=True)
elif isinstance(value, Widget):
_link_widget(key, value, class_attrs)
handled = True
elif isinstance(value, tuple):
if isinstance(value[0], Widget):
widget, traitlet = value
widget_cls = widget.__class__
traitlet_args = []
traitlet_cls = getattr(widget_cls, traitlet).
|
__class__
|
class_attrs["_links"].append((key, value))
elif hasattr(value[1], "__call__"):
example, fn = value
traitlet_args = [example]
traitlet_cls = _get_primitive(example)
subscribed = inspect.getargspec(fn).args
class_attrs["_derived"][key] = (fn, subscribed)
if not handled:
if traitlet_cls is None:
raise ValueError("Didn't understand {}: {}".format(key, value))
class_attrs[key] = traitlet_cls(*traitlet_args, **traitlet_kwargs)
new_class = type(
'DynamicAutoTangle{}'.format(id(class_attrs)),
(AutoTangle,),
class_attrs
)
inst = new_class()
return inst._refresh()
|
angr/angr
|
angr/engines/pcode/arch/ArchPcode_dsPIC33E_LE_24_default.py
|
Python
|
bsd-2-clause
| 4,846
| 0.000619
|
###
### This file was automatically generated
###
from archinfo.arch import register_arch, Endness, Register
from .common import ArchPcode
class ArchPcode_dsPIC33E_LE_24_default(ArchPcode):
name = 'dsPIC33E:LE:24:default'
pcode_arch = 'dsPIC33E:LE:24:default'
description = 'dsPIC33E'
bits = 24
ip_offset = 0x2e
sp_offset = 0x1e
bp_offset = sp_offset
instruction_endness = Endness.LE
register_list = [
Register('w1w0', 4, 0x0),
Register('w0', 2, 0x0),
Register('w0byte', 1, 0x0),
Register('w1', 2, 0x2),
Register('w1byte', 1, 0x2),
Register('w3w2', 4, 0x4),
Register('w2', 2, 0x4),
Register('w2byte', 1, 0x4),
Register('w3', 2, 0x6),
Register('w3byte', 1, 0x6),
Register('w5w4', 4, 0x8),
Register('w4', 2, 0x8),
Register('w4byte', 1, 0x8),
Register('w5', 2, 0xa),
Register('w5byte', 1, 0xa),
Register('w7w6', 4, 0xc),
Register('w6', 2, 0xc),
Register('w6byte
|
', 1, 0xc),
Register('w7', 2, 0xe),
Register('w7byte', 1, 0xe),
Register('w9w8', 4, 0x10),
Register('w8', 2, 0x10),
Register('w8byte', 1, 0x10),
Register('w9', 2, 0x12),
Register('w9byte', 1, 0x12),
Register('w11w10', 4, 0x14),
Register('w10', 2, 0x14),
Register('w10byte', 1, 0x14),
Register('w11', 2, 0x16),
Register('w11byte', 1, 0x16),
Register('w13w12', 4, 0x18),
|
Register('w12', 2, 0x18),
Register('w12byte', 1, 0x18),
Register('w13', 2, 0x1a),
Register('w13byte', 1, 0x1a),
Register('w15w14', 4, 0x1c),
Register('w14', 2, 0x1c),
Register('w14byte', 1, 0x1c),
Register('w15', 2, 0x1e),
Register('w15byte', 1, 0x1e),
Register('splim', 2, 0x20),
Register('acca', 6, 0x22),
Register('accal', 2, 0x22),
Register('accah', 2, 0x24),
Register('accau', 2, 0x26),
Register('accb', 6, 0x28),
Register('accbl', 2, 0x28),
Register('accbh', 2, 0x2a),
Register('accbu', 2, 0x2c),
Register('pc', 3, 0x2e, alias_names=('ip',)),
Register('dsrpag', 2, 0x32),
Register('dswpag', 2, 0x34),
Register('rcount', 2, 0x36),
Register('corcon', 2, 0x44),
Register('modcon', 2, 0x46),
Register('xmodsrt', 2, 0x48),
Register('xmodend', 2, 0x4a),
Register('ymodsrt', 2, 0x4c),
Register('ymodend', 2, 0x4e),
Register('xbrev', 2, 0x50),
Register('disicnt', 2, 0x52),
Register('tblpag', 1, 0x54),
Register('shadow_w0', 2, 0x0),
Register('shadow_w1', 2, 0x2),
Register('shadow_w2', 2, 0x4),
Register('shadow_w3', 2, 0x6),
Register('srl', 1, 0x400),
Register('srh', 1, 0x401),
Register('srh_oa', 1, 0x600),
Register('srh_ob', 1, 0x601),
Register('srh_sa', 1, 0x602),
Register('srh_sb', 1, 0x603),
Register('srh_oab', 1, 0x604),
Register('srh_sab', 1, 0x605),
Register('srh_da', 1, 0x606),
Register('srh_dc', 1, 0x607),
Register('srl_ipl2', 1, 0x608),
Register('srl_ipl1', 1, 0x609),
Register('srl_ipl0', 1, 0x60a),
Register('srl_ra', 1, 0x60b),
Register('srl_n', 1, 0x60c),
Register('srl_ov', 1, 0x60d),
Register('srl_z', 1, 0x60e),
Register('srl_c', 1, 0x60f),
Register('disi', 1, 0x610),
Register('shadow_srh_dc', 1, 0x611),
Register('shadow_srl_n', 1, 0x612),
Register('shadow_srl_ov', 1, 0x613),
Register('shadow_srl_z', 1, 0x614),
Register('shadow_srl_c', 1, 0x615),
Register('dostart', 3, 0x800),
Register('dostart1', 3, 0x803),
Register('dostart2', 3, 0x806),
Register('dostart3', 3, 0x809),
Register('doend', 3, 0x80c),
Register('doend1', 3, 0x80f),
Register('doend2', 3, 0x812),
Register('doend3', 3, 0x815),
Register('dostart_shadow', 3, 0x818),
Register('doend_shadow', 3, 0x81b),
Register('wdtcount', 2, 0xa00),
Register('wdtprescalara', 2, 0xa02),
Register('wdtprescalarb', 2, 0xa04),
Register('corcon_var', 1, 0xc00),
Register('corcon_ipl3', 1, 0xc01),
Register('corcon_psv', 1, 0xc02),
Register('corcon_sfa', 1, 0xc03),
Register('corcon_dl', 1, 0xc04),
Register('dcount', 2, 0x1000),
Register('dcount1', 2, 0x1002),
Register('dcount2', 2, 0x1004),
Register('dcount3', 2, 0x1006),
Register('skipnextflag', 1, 0x1200),
Register('contextreg', 4, 0x1400)
]
register_arch(['dspic33e:le:24:default'], 24, Endness.LE, ArchPcode_dsPIC33E_LE_24_default)
|
ansible/tower-cli
|
docs/source/cli_ref/examples/inventory_script_example.py
|
Python
|
apache-2.0
| 327
| 0.003058
|
#!/usr/bin/env python
impo
|
rt json
inv = {
'_meta': {
'hostvars': {}
},
'hosts': []
}
for num in range(0, 3):
host = u"host-%0.2d" % num
inv['hosts'].append(host)
inv['_meta']
|
['hostvars'][host] = dict(ansible_ssh_host='127.0.0.1', ansible_connection='local')
print(json.dumps(inv, indent=2))
|
swiftstack/swift
|
test/unit/proxy/test_sysmeta.py
|
Python
|
apache-2.0
| 21,779
| 0
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves.urllib.parse import quote
import unittest
import os
from tempfile import mkdtemp
import shutil
from swift.common.middleware.copy import ServerSideCopyMiddleware
from swift.common.storage_policy import StoragePolicy
from swift.common.swob import Request
from swift.common.utils import mkdirs, split_path
from swift.common.wsgi import WSGIContext
from swift.obj import server as object_server
from swift.proxy import server as proxy
import swift.proxy.controllers
fro
|
m swift.proxy.controllers.base import get_object_info
from test.unit import FakeMemcache, debug_logger, FakeRing, \
fake_http_connect, patch_policies, skip_if_no_xattrs
class FakeServerConnection(WSGIContext):
'''Fakes an HTTPConnection to a server instance.'''
def __init__(self, app):
super(FakeServerConnection, self).__init__(app)
|
self.data = b''
def getheaders(self):
return self._response_headers
def read(self, amt=None):
try:
return next(self.resp_iter)
except StopIteration:
return b''
def getheader(self, name, default=None):
result = self._response_header_value(name)
return result if result else default
def getresponse(self):
environ = {'REQUEST_METHOD': self.method}
req = Request.blank(self.path, environ, headers=self.req_headers,
body=self.data)
self.data = b''
self.resp = self._app_call(req.environ)
self.resp_iter = iter(self.resp)
if self._response_headers is None:
self._response_headers = []
status_parts = self._response_status.split(' ', 1)
self.status = int(status_parts[0])
self.reason = status_parts[1] if len(status_parts) == 2 else ''
return self
def getexpect(self):
class ContinueResponse(object):
status = 100
return ContinueResponse()
def send(self, data):
self.data += data
def close(self):
pass
def __call__(self, ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
self.path = quote('/' + device + '/' + str(partition) + path)
self.method = method
self.req_headers = headers
return self
def get_http_connect(account_func, container_func, object_func):
'''Returns a http_connect function that delegates to
entity-specific http_connect methods based on request path.
'''
def http_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
a, c, o = split_path(path, 1, 3, True)
if o:
func = object_func
elif c:
func = container_func
else:
func = account_func
resp = func(ipaddr, port, device, partition, method, path,
headers=headers, query_string=query_string)
return resp
return http_connect
@patch_policies([StoragePolicy(0, 'zero', True,
object_ring=FakeRing(replicas=1))])
class TestObjectSysmeta(unittest.TestCase):
'''Tests object sysmeta is correctly handled by combination
of proxy server and object server.
'''
def _assertStatus(self, resp, expected):
self.assertEqual(resp.status_int, expected,
'Expected %d, got %s'
% (expected, resp.status))
def _assertInHeaders(self, resp, expected):
for key, val in expected.items():
self.assertIn(key, resp.headers,
'Header %s missing from %s' % (key, resp.headers))
self.assertEqual(val, resp.headers[key],
'Expected header %s:%s, got %s:%s'
% (key, val, key, resp.headers[key]))
def _assertNotInHeaders(self, resp, unexpected):
for key, val in unexpected.items():
self.assertNotIn(key, resp.headers,
'Header %s not expected in %s'
% (key, resp.headers))
def setUp(self):
skip_if_no_xattrs()
self.app = proxy.Application(None,
logger=debug_logger('proxy-ut'),
account_ring=FakeRing(replicas=1),
container_ring=FakeRing(replicas=1))
self.copy_app = ServerSideCopyMiddleware(self.app, {})
self.tmpdir = mkdtemp()
self.testdir = os.path.join(self.tmpdir,
'tmp_test_object_server_ObjectController')
mkdirs(os.path.join(self.testdir, 'sda', 'tmp'))
conf = {'devices': self.testdir, 'mount_check': 'false'}
self.obj_ctlr = object_server.ObjectController(
conf, logger=debug_logger('obj-ut'))
http_connect = get_http_connect(fake_http_connect(200),
fake_http_connect(200),
FakeServerConnection(self.obj_ctlr))
self.orig_base_http_connect = swift.proxy.controllers.base.http_connect
self.orig_obj_http_connect = swift.proxy.controllers.obj.http_connect
swift.proxy.controllers.base.http_connect = http_connect
swift.proxy.controllers.obj.http_connect = http_connect
def tearDown(self):
shutil.rmtree(self.tmpdir)
swift.proxy.controllers.base.http_connect = self.orig_base_http_connect
swift.proxy.controllers.obj.http_connect = self.orig_obj_http_connect
original_sysmeta_headers_1 = {'x-object-sysmeta-test0': 'val0',
'x-object-sysmeta-test1': 'val1'}
original_sysmeta_headers_2 = {'x-object-sysmeta-test2': 'val2'}
changed_sysmeta_headers = {'x-object-sysmeta-test0': '',
'x-object-sysmeta-test1': 'val1 changed'}
new_sysmeta_headers = {'x-object-sysmeta-test3': 'val3'}
original_meta_headers_1 = {'x-object-meta-test0': 'meta0',
'x-object-meta-test1': 'meta1'}
original_meta_headers_2 = {'x-object-meta-test2': 'meta2'}
changed_meta_headers = {'x-object-meta-test0': '',
'x-object-meta-test1': 'meta1 changed'}
new_meta_headers = {'x-object-meta-test3': 'meta3'}
bad_headers = {'x-account-sysmeta-test1': 'bad1'}
# these transient_sysmeta headers get changed...
original_transient_sysmeta_headers_1 = \
{'x-object-transient-sysmeta-testA': 'A'}
# these transient_sysmeta headers get deleted...
original_transient_sysmeta_headers_2 = \
{'x-object-transient-sysmeta-testB': 'B'}
# these are replacement transient_sysmeta headers
changed_transient_sysmeta_headers = \
{'x-object-transient-sysmeta-testA': 'changed_A'}
new_transient_sysmeta_headers_1 = {'x-object-transient-sysmeta-testC': 'C'}
new_transient_sysmeta_headers_2 = {'x-object-transient-sysmeta-testD': 'D'}
def test_PUT_sysmeta_then_GET(self):
path = '/v1/a/c/o'
env = {'REQUEST_METHOD': 'PUT'}
hdrs = dict(self.original_sysmeta_headers_1)
hdrs.update(self.original_meta_headers_1)
hdrs.update(self.bad_headers)
hdrs.update(self.original_transient_sysmeta_headers_1)
req = Request.blank(path, environ=env, headers=hdrs, body=b'x')
resp = req.get_response(self.app)
self._assertStatus(resp, 201)
req = Request.blank(path, environ={})
resp = req.get_response(self.app)
self._assertStatus(resp, 200)
|
axbaretto/beam
|
sdks/python/.tox/py27gcp/lib/python2.7/site-packages/google/cloud/_testing.py
|
Python
|
apache-2.0
| 3,140
| 0
|
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared testing utilities."""
# Avoid the grpc and google.cloud.grpc collision.
from __future__ import absolute_import
class _Monkey(object):
# context-manager for replacing module names in the scope of a test.
def __init__(self, module, **kw):
self.module = module
if len(kw) == 0: # pragma: NO COVER
raise ValueError('_Monkey was used with nothing to monkey-patch')
self.to_restore = {key: getattr(module, key) for key in kw}
for key, value in kw.items():
setattr(module, key, value)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for key, value in self.to_restore.items():
setattr(self.module, key, value)
class _NamedTemporaryFile(object):
def __init__(self, suffix=''):
import os
import tempfile
filehandle, self.name = tempfile.mkstemp(suffix=suffix)
os.close(filehandle)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
import os
os.remove(self.name)
def _tempdir_maker():
import contextlib
import shutil
import tempfile
@contextlib.contextmanager
def _tempdir_mgr():
temp_dir = tempfile.mkdtemp()
yield temp_dir
shutil.rmtree(temp_dir)
return _tempdir_mgr
_tempdir = _tempdir_maker()
del _tempdir_maker
class _GAXBaseAPI(object):
_random_gax_error = False
def __init__(self, **kw):
self.__dict__.update(kw)
def _make_grpc_error(self, status_code, trailing=None):
from grpc._channel import _RPCState
from google.cloud.exceptions import GrpcRendezvous
details = 'Some error details.'
exc_state = _RPCState((), None, trailing, status_code, details)
return GrpcRendezvous(exc_state, None, None, None)
def _make_grpc_not_found(self):
from grpc import StatusCode
return self._make_grpc_error(StatusCode.NOT_FOUND)
def _make_grpc_failed_precondition(self):
from grpc import StatusCode
return self._make_grpc
|
_error(StatusCode.FAILED_PRECONDITION)
def _make_grpc_deadline_exceeded(self):
from grpc import StatusCode
return self._make_grpc_error(StatusCode.DEADLINE_EXCEEDED)
class _GAXPageIterator(object):
def __init__(self, *pages, **kwargs):
self._pages
|
= iter(pages)
self.page_token = kwargs.get('page_token')
def next(self):
import six
return six.next(self._pages)
__next__ = next
|
EmanueleCannizzaro/scons
|
src/engine/SCons/Tool/mssdk.py
|
Python
|
mit
| 1,834
| 0.001636
|
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS
|
PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING
|
FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mssdk.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""engine.SCons.Tool.mssdk
Tool-specific initialization for Microsoft SDKs, both Platform
SDKs and Windows SDKs.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
from MSCommon import mssdk_exists, \
mssdk_setup_env
def generate(env):
"""Add construction variables for an MS SDK to an Environment."""
mssdk_setup_env(env)
def exists(env):
return mssdk_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
adamcandy/qgis-plugins-meshing
|
dev/tests/gaussian_bump.py
|
Python
|
lgpl-2.1
| 4,413
| 0.009517
|
#!/usr/bin/env python
##########################################################################
#
# QGIS-meshing plugins.
#
# Copyright (C) 2012-2013 Imperial College London and others.
#
# Please see the AUTHORS file in the main source directory for a
# full list of copyright holders.
#
# Dr Adam S. Candy, adam.candy@imperial.ac.uk
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation,
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
##########################################################################
import numpy
import argparse
import os
import math
from Scientific.IO import NetCDF
def main():
parser = argparse.ArgumentParser(
prog="gaussian_bump",
description="""Create a Gaussian bump in a netcdf file"""
)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help="Verbose output: mainly progress reports.",
default=False
)
parser.add_argument(
'-d',
'--domain',
help="Domain size. Defualt is 1000x1000m",
default=1000.0,
type=float
)
parser.add_argument(
'-b',
'--bumpheight',
help="Distance between seabed and top of bump. Default is 100m",
default=100,
type=float
)
parser.add_argument(
'-r',
'--resolution',
help="Resolution of output netcdf file. Default is 10m",
default=10.0,
type=float
)
parser.add_argument(
'--shift',
help="Shift the bump in the 'north-south' direction, wrapping along the top/bottom",
default = 0,
type=float
)
parser.add_argument(
'--spread',
help="Spread of Gaussian",
default = 100.0,
type=float
)
parser.add_argument(
'output_file',
metavar='output_file',
nargs=1,
help='The output netcdf file'
)
args = parser.parse_args()
verbose = args.verbose
output_file = args.output_file[0]
domain_size = args.domain
bump_height = args.bumpheight
resolution = args.resolution
shift = args.shift
spread = args.spread
nPoints = int(domain_size / resolution)
shift = int(shift/resolution)
if (verbose):
print nPoints, shift
# generate regular grid
X, Y = numpy.meshgrid(numpy.linspace(0.0, domain_size, nPoints), numpy.linspace(0.0, domain_size, nPoints))
Z = numpy.zeros((nPoints,nPoints))
#for each point calculate the Gaussian
centre = domain_size/2.0
for i in range(0,len(X)):
for j in range(0,len(X[0])):
r = ((X[i][j]-centre)**2/(2.0*spread**2) + (Y[i][j]-centre)**2/(2.0*spread**2))
Z[i][j] = bump_height * math.exp(-1.0*r)
if (not shift == 0.0):
Z = numpy.roll(Z, shift, 0)
f = NetCDF.NetCDFFile(output_file, 'w')
xDim = f.createDimension("X", nPoints)
yDim = f.createDimension("Y", nPoints)
x = f.createVariable("X","d",("X",))
y = f.createVariable("Y","d",("Y",))
zVar = f.createVariable("Z","d",("X","Y"))
x.assignValue(X[0,0:nPoints])
y.assignValue(Y[0:nPoints,0])
zVar.assignValue(Z)
f.close()
os.system('grdreformat '+output_file+' '+output_file)
os.system('rm -f 1_contour.* 50_contour.*')
os.system('gdal_contour -fl 1.0 NETCDF:"'+output_file+'":z 1_contour.shp')
os.system('gdal_contour -
|
fl 50.0 NETCDF:"'+output_file+'":z 50_contour.shp')
if __name__ == "__main__":
|
main()
|
auspex/sonyutilities
|
iterator.py
|
Python
|
gpl-3.0
| 1,536
| 0.013672
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (division, absolute_import, print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Derek Broughton <auspex@pointerstop.ca>'
__docformat__ = 'restructuredtext en'
from calibre.ebooks.oeb.iterator.book import EbookIterator as _iterator
import os
class EbookIterator(_iterator):
def __init__(self, pathtoebook, log=None):
super(EbookIterator, self).__init__(pathtoebook, log=log)
self.__enter__(only_input_plugin=True, read_anchor_map=False)
def convert_from_sony_bookmark(self, bookmark, title=''):
filename,pos = bookmark.split('#point')
pos = pos.strip(u'(\x00)').split('/')
# Adobe Digital Editions doesn't count the tags correctly
if pos[1] == '1':
pos[1] = '2'
pos = '/'.join(pos)
prefix = self._tdir.tdir
path = os.path.join(prefix,filename)
spine_num = self.spine.index(path)
bm = dict(title=title, type='cfi', spine=spine_num, pos=pos)
return bm
def convert_to_sony_bo
|
okmark(self, bm):
prefi
|
x = self._tdir.tdir+'/'
filename = self.spine[bm['spine']].rpartition(prefix)[2]
pos = bm['pos'].split('/')
# ADE doesn't count the <HEAD> tag
if pos[1] == '2':
pos[1] = '1'
bookmark = "%s#point(%s)" % filename, '/'.join(pos)
return bookmark
|
cpina/science-cruise-data-management
|
ScienceCruiseDataManagement/main/management/commands/exportgpstracks.py
|
Python
|
mit
| 8,461
| 0.004728
|
from django.core.management.base import BaseCommand, CommandError
from ship_data.models import GpggaGpsFix
import datetime
from main import utils
import csv
import os
from django.db.models import Q
import glob
from main.management.commands import findgpsgaps
gps_bridge_working_intervals = None
# This file is part of https://github.com/cpina/science-cruise-data-management
#
# This project was programmed in a hurry without any prior Django experience,
# while circumnavigating the Antarctic on the ACE expedition, without proper
# Internet access, with 150 scientists using the system and doing at the same
# cruise other data management and system administration tasks.
#
# Sadly there aren't unit tests and we didn't have time to refactor the code
# during the cruise, which is really needed.
#
# Carles Pina (carles@pina.cat) and Jen Thomas (jenny_t152@yahoo.co.uk), 2016-2017.
class Command(BaseCommand):
help = 'Outputs the track in CSV format.'
def add_arguments(self, parser):
parser.add_argument('output_directory', type=str, help="Will delete existing files that started on the same start date")
parser.add_argument('start', type=str, help="Start of the GPS data. Format: YYYYMMDD")
parser.add_argument('end', type=str, help="End of the GPS data. Format: YYYYMMDD or 'yesterday'")
def handle(self, *args, **options):
generate_all_tracks(options['output_directory'], options['start'], options['end'])
def generate_all_tracks(output_directory, start, end):
global gps_bridge_working_intervals
gps_gaps = findgpsgaps.FindDataGapsGps("GPS Bridge1", start, end)
gps_bridge_working_intervals = gps_gaps.find_gps_missings()
generate_fast(output_directory, 3600, "1hour", start, end)
generate_fast(output_directory, 300, "5min", start, end)
generate_fast(output_directory, 60, "1min", start, end)
generate_fast(output_directory, 1, "1second", start, end)
def generate_fast(output_directory, seconds, file_suffix, start, end):
"""
This method uses Mysql datetime 'ends with' instead of doing individual queries
for each 'seconds'. It's faster but harder to find gaps in the data.
"""
first_date = datetime.datetime.strptime(start, "%Y%m%d")
first_date = utils.set_utc(first_date)
if end == "yesterday":
last_date = utils.last_midnight()
else:
last_date = datetime.datetime.strptime(end, "%Y%m%d")
last_date = utils.set_utc(last_date)
starts_file_format = first_date.strftime("%Y%m%d")
ends_file_format = last_date.strftime("%Y%m%d")
filename = "track_{}_{}_{}.csv".format(starts_file_format, ends_file_format, file_suffix)
files_to_delete = glob.glob(os.path.join(output_directory, "track_{}_*_{}.csv".format(starts_file_format,
file_suffix)))
print("Will start processing:", filename)
file_path = os.path.join(output_directory, filename)
if file_path in files_to_delete:
files_to_delete.remove(file_path) # In case that this script is re-generating the file
file = open(file_path + ".tmp", "w")
csv_writer = csv.writer(file)
csv_writer.writerow(["date_time", "latitude", "longitude"])
one_day = datetime.timedelta(days=1)
current_day = first_date
while current_day <= last_date:
process_day(current_day, seconds, csv_writer)
current_day += one_day
delete_files(files_to_delete)
file.close()
os.rename(file_path + ".tmp", file_path)
def process_day(date_time_process, seconds, csv_writer):
date_time_process_tomorrow = date_time_process + datetime.timedelta(days=1)
today_filter = Q(date_time__gte=date_time_process) & Q(date_time__lt=date_time_process_tomorrow)
if seconds == 1:
query_set = GpggaGpsFix.objects.filter(today_filter).order_by('date_time')
elif seconds == 60:
query_set = GpggaGpsFix.objects.filter(today_filter).filter(date_time__contains=':01.').order_by('date_time')
elif seconds == 300:
query_set = GpggaGpsFix.objects.filter(today_filter).filter(Q(date_time__contains=':00:01.') |
Q(date_time__contains=':05:01.') |
Q(date_time__contains=':10:01.') |
Q(date_time__contains=':15:01.') |
Q(date_time__contains=':20:01.') |
Q(date_time__contains=':25:01.') |
Q(date_time__contains=':30:01.') |
Q(date_time__contains=':35:01.') |
Q(date_time__contains=':40:01.') |
Q(date_time__contains=':45:01.') |
Q(date_time__contains=':50:01.') |
Q(date_time__contains=':55:01.')).order_by('date_time')
elif seconds == 3600:
query_set = GpggaGpsFix.objects.filter(today_filter).filter(date_time__contains=':00:01').order_by('date_time')
else:
assert False # need to add a if case for this
# 64: GPS Bridge
# 63: GPS Trimble
query_set = query_set.filter(utils.filter_out_bad_values())
previous_date_time_string = ""
for gps_info in query_set.iterator():
date_time_string = gps_info.date_time.strftime("%Y-%m-%d %H:%M:%S")
if date_time_string == previous_date_time_string:
continue
if which_gps(date_time_string) == "GPS Bridge1":
if gps_info.device_id == 64:
l = [gps_info.date_time.strftime("%Y-%m-%d %H:%M:%S"),
"{:.4f}".format(gps_info.latitude),
"{:.4f}".format(gps_info.longitude)]
# print(l)
csv_writer.writerow(l)
previous_date_tim
|
e_string = date_time_string
else:
if gps_info.device_id == 63:
l = [gps_info.date_time.strftime("%Y-%m-%d %H:%M:%S"),
|
"{:.4f}".format(gps_info.latitude),
"{:.4f}".format(gps_info.longitude)]
# print(l)
csv_writer.writerow(l)
previous_date_time_string = date_time_string
def delete_files(files):
for file in files:
print("Deleting file:", file)
os.remove(file)
def generate_method_1(output_directory, seconds, file_suffix):
"""
This method does a query every 'seconds'. Very slow, could be used to find gaps easily on the data.
As it is now it is difficult to decide which GPS the get comes from.
"""
time_delta = datetime.timedelta(seconds=seconds)
first_date = GpggaGpsFix.objects.earliest().date_time
last_date = GpggaGpsFix.objects.latest().date_time
filename = "track_{}_{}_{}.csv".format(first_date.strftime("%Y%m%d"), last_date.strftime("%Y%m%d"), file_suffix)
print("Will start processing:", filename)
file_path = os.path.join(output_directory, filename)
file = open(file_path, "w")
csv_writer = csv.writer(file)
csv_writer.writerow(["date_time", "latitude", "longitude"])
current_date = first_date
previous_date = current_date
while current_date < last_date:
location = utils.ship_location(current_date)
if location.date_time != previous_date:
if location.date_time is not None and location.latitude is not None and location.longitude is not None:
csv_writer.writerow([location.date_time.strftime("%Y-%m-%d %H:%M:%S"), "{:.4f}".format(location.latitude), "{:.4f}".format(location.longitude)])
if location.date_time is None:
print("No data for:", current_date)
if previous_date.day != current_date.day:
print("Generating CSV GPS info:", current_date)
previous_date = current_date
current_date = current_date + time_delta
def which_gps(
|
alienlike/hypertextual
|
hypertextual/models/breadcrumb.py
|
Python
|
agpl-3.0
| 100
| 0.01
|
class Breadcrumb:
def __init__(s
|
elf, text, url):
self.text = text
self.url =
|
url
|
UManPychron/pychron
|
pychron/hardware/environmental_probe.py
|
Python
|
apache-2.0
| 1,717
| 0.000582
|
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ================================================================
|
===============
# ============= enthought library imports =======================
# ============= standard librar
|
y imports ========================
# ============= local library imports ==========================
from __future__ import absolute_import
from pychron.hardware.core.core_device import CoreDevice
class TempHumMicroServer(CoreDevice):
"""
http://www.omega.com/Manuals/manualpdf/M3861.pdf
iServer MicroServer
tested with iTHX-W
"""
scan_func = 'read_temperature'
def read_temperature(self, **kw):
v = self.ask('*SRTF', timeout=1.0, **kw)
return self._parse_response(v)
def read_humidity(self, **kw):
v = self.ask('*SRH', timeout=1.0, **kw)
return self._parse_response(v)
def _parse_response(self, v):
try:
return float(v)
except (AttributeError, ValueError, TypeError):
return self.get_random_value()
# ============= EOF =============================================
|
ryanjw/co-occurrence_python
|
first_attempt.py
|
Python
|
gpl-2.0
| 1,125
| 0.022222
|
# need to pass it a file, where data starts, path to write
# things to import
import sys
import pandas
import scipy
import numpy
from scipy import stats
from scipy.stats import t
# arguments being passed
p
|
ath_of_file=sys.argv[1]
last_metadata_column=int(sys.argv[2])
path_to_write=sys.argv[3]
# spearman p calc based on two tailed t-test
def spearmanp(r,n):
tstat=r*numpy.sqrt((n-2)/(1-r**2))
return t.cdf(-abs(tstat),n-2)*2
# read in the data
df=pandas.read_table(path_of_file,index_col=False)
# remove metadata columns
df_data_only=df.drop(df.columns[[range(0,last_metadata_column)]],axis=1)
#make correlation matrix
df_corr_matrix=df_data_only.
|
corr(method="spearman")
#make column based on rows (called indexes in python)
df_corr_matrix["otus"]=df_corr_matrix.index
#melt dataframe but maintain indices now called otus
df_melt=pandas.melt(df_corr_matrix,id_vars="otus")
# remove NAs or NaNs which are result of non-existent otus (all 0 values)
df_melt=df_melt[numpy.isfinite(df_melt.value)]
df_melt['p.value']=spearmanp(df_melt.value,df_sub.shape[0])
#write the file
df_melt.to_csv(path_to_write,index=False)
|
Lothiraldan/ZeroServices
|
tests/utils.py
|
Python
|
mit
| 3,193
| 0.000313
|
import asyncio
try:
from unittest.mock import Mock, create_autospec
except ImportError:
from mock import Mock, create_autospec
from uuid import uuid4
from functools import wraps
from copy import copy
from unittest import TestCase as unittestTestCase
from zeroservices.exceptions import ServiceUnavailable
from zeroservices.resources import (ResourceCollection, Resource,
is_callable, ResourceService)
from zeroservices.medium import BaseMedium
from zeroservices.medium.memory import MemoryMedium
from zeroservices.discovery.memory import MemoryDiscoveryMedium
from zeroservices.memory import MemoryCollection, MemoryResource
from zeroservices import BaseService
from zeroservices.query import match
class TestCase(unittestTestCase):
def assertItemsEqual(self, *args):
if hasattr(self, 'assertCountEqual'):
return self.assertCountEqual(*args)
return super(TestCase, self).assertItemsEqual(*args)
def assertDictIsSubset(self, subset, superset):
for item in subset.items():
self.assertIn(item, superset.items())
def test_medium():
return Mock(spec_set=BaseMedium)
class TestResource(MemoryResource):
@is_callable
def custom_action(self, *arhs, **kwargs):
return 42
class TestCollection(MemoryCollection):
resource_class = TestResource
@is_callable
def custom_action(self, *args, **kwargs):
return 42
def sample_collection(sample_resource_name):
return TestCollection(sample_resource_name)
class TestService(BaseService):
def __init__(self, *args, node_infos=None, **kwargs):
super().__init__(*args, **kwargs)
self.on_message_mock = Mock()
self.on_event_mock = Mock()
self.node_infos = node_infos or {}
def service_info(self):
base_infos = copy(self.node_
|
infos)
base_in
|
fos.update(super().service_info())
return base_infos
@asyncio.coroutine
def on_message(self, *args, **kwargs):
return self.on_message_mock(*args, **kwargs)
@asyncio.coroutine
def on_event(self, *args, **kwargs):
return self.on_event_mock(*args, **kwargs)
def _create_test_service(name, node_infos, loop):
medium = MemoryMedium(loop, MemoryDiscoveryMedium)
service = TestService(name, medium, node_infos=node_infos)
return service
class TestResourceService(ResourceService):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.on_event_mock = Mock()
@asyncio.coroutine
def on_event(self, *args, **kwargs):
return self.on_event_mock(*args, **kwargs)
def _create_test_resource_service(name, loop):
medium = MemoryMedium(loop, MemoryDiscoveryMedium)
service = TestResourceService(name, medium)
return service
def _async_test(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
if not self.loop.is_running():
coro = asyncio.coroutine(f)
future = coro(self, *args, **kwargs)
self.loop.run_until_complete(asyncio.wait_for(future, 2, loop=self.loop))
else:
return f(self, *args, **kwargs)
return wrapper
|
agoravoting/election-orchestra
|
public_api.py
|
Python
|
agpl-3.0
| 8,209
| 0.001707
|
# -*- coding: utf-8 -*-
#
# SPDX-FileCopyrightText: 2013-2021 Agora Voting SL <contact@nvotes.com>
#
# SPDX-License-Identifier: AGPL-3.0-only
#
import pickle
import base64
import json
import re
from datetime import datetime
from flask import Blueprint, request, make_response, abort
from frestq.utils import loads, dumps
from frestq.tasks import SimpleTask, TaskError
from frestq.app import app, db
from models import Election, Authority, QueryQueue
from create_election.performer_jobs import check_election_data
from taskqueue import queue_task, apply_task, dequeue_task
public_api = Blueprint('public_api', __name__)
def error(status, message=""):
if message:
data = json.dumps(dict(message=message))
else:
data=""
return make_response(data, status)
@public_api.route('/dequeue', methods=['GET'])
def dequeue():
try:
dequeue_task()
except Exception as e:
return make_response(dumps(dict(status=e.message)), 202)
return make_response(dumps(dict(status="ok")), 202)
@public_api.route('/election', methods=['POST'])
def post_election():
'''
POST /election
Creates an election, with the given input data. This involves communicating
with the different election authorities to generate the joint public key.
Example request:
POST /election
{
"id": 1110,
"title": "Votación de candidatos",
"description": "Selecciona los documentos político, ético y organizativo con los que Podemos",
"director": "wadobo-auth1",
"authorities": "openkratio-authority",
"layout": "pcandidates-election",
"presentation": {
"share_text": "lo que sea",
"theme": "foo",
"urls": [
{
"title": "",
"url": ""
}
],
"theme_css": "whatever"
|
},
"end_date": "2013-12-09T18:17:14.457000",
"start_date": "2013-12-06T18:17:14.457000",
"questions": [
{
"description": "",
"layout": "pcandidates-election",
"max": 1,
"min": 0,
"num_winners": 1,
"title": "Secretaría General",
"randomize_answer_order": true,
"tally_type": "plurality-at
|
-large",
"answer_total_votes_percentage": "over-total-valid-votes",
"answers": [
{
"id": 0,
"category": "Equipo de Enfermeras",
"details": "",
"sort_order": 1,
"urls": [
{
"title": "",
"url": ""
}
],
"text": "Fulanita de tal",
}
]
}
],
"authorities": [
{
"name": "Asociación Sugus GNU/Linux",
"orchestra_url": "https://sugus.eii.us.es/orchestra",
"ssl_cert": "-----BEGIN CERTIFICATE-----\nMIIFATCCA+mgAwIBAgIQAOli4NZQEWpKZeYX25jjwDANBgkqhkiG9w0BAQUFADBz\n8YOltJ6QfO7jNHU9jh/AxeiRf6MibZn6fvBHvFCrVBvDD43M0gdhMkVEDVNkPaak\nC7AHA/waXZ2EwW57Chr2hlZWAkwkFvsWxNt9BgJAJJt4CIVhN/iau/SaXD0l0t1N\nT0ye54QPYl38Eumvc439Yd1CeVS/HYbP0ISIfpNkkFA5TiQdoA==\n-----END CERTIFICATE-----"
},
{
"name": "Agora Ciudadana",
"orchestra_url": "https://agoravoting.com:6874/orchestra",
"ssl_cert": "-----BEGIN CERTIFICATE-----\nMIIFATCCA+mgAwIBAgIQAOli4NZQEWpKZeYX25jjwDANBgkqhkiG9w0BAQUFADBz\n8YOltJ6QfO7jNHU9jh/AxeiRf6MibZn6fvBHvFCrVBvDD43M0gdhMkVEDVNkPaak\nC7AHA/waXZ2EwW57Chr2hlZWAkwkFvsWxNt9BgJAJJt4CIVhN/iau/SaXD0l0t1N\nT0ye54QPYl38Eumvc439Yd1CeVS/HYbP0ISIfpNkkFA5TiQdoA==\n-----END CERTIFICATE-----"
},
{
"name": "Wadobo Labs",
"orchestra_url": "https://wadobo.com:6874/orchestra",
"ssl_cert": "-----BEGIN CERTIFICATE-----\nMIIFATCCA+mgAwIBAgIQAOli4NZQEWpKZeYX25jjwDANBgkqhkiG9w0BAQUFADBz\n8YOltJ6QfO7jNHU9jh/AxeiRf6MibZn6fvBHvFCrVBvDD43M0gdhMkVEDVNkPaak\nC7AHA/waXZ2EwW57Chr2hlZWAkwkFvsWxNt9BgJAJJt4CIVhN/iau/SaXD0l0t1N\nT0ye54QPYl38Eumvc439Yd1CeVS/HYbP0ISIfpNkkFA5TiQdoA==\n-----END CERTIFICATE-----"
}
]
}
On success, response is empty with status 202 Accepted and returns something
like:
{
"task_id": "ba83ee09-aa83-1901-bb11-e645b52fc558",
}
When the election finally gets processed, the callback_url is called with a
POST containing the protInfo.xml file generated jointly by each
authority, following this example response:
{
"status": "finished",
"reference": {
"election_id": "d9e5ee09-03fa-4890-aa83-2fc558e645b5",
"action": "POST /election"
},
"session_data": [{
"session_id": "deadbeef-03fa-4890-aa83-2fc558e645b5",
"publickey": ["<pubkey codified in hexadecimal>"]
}]
}
Note that this protInfo.xml will contain the election public key, but
also some other information. In particular, it's worth noting that
the http and hint servers' urls for each authority could change later,
if election-orchestra needs it.
If there was an error, then the callback will be called following this
example format:
{
"status": "error",
"reference": {
"session_id": "d9e5ee09-03fa-4890-aa83-2fc558e645b5",
"action": "POST /election"
},
"data": {
"message": "error message"
}
}
'''
data = request.get_json(force=True, silent=True)
d = base64.b64encode(pickle.dumps(data)).decode('utf-8')
queueid = queue_task(task='election', data=d)
return make_response(dumps(dict(queue_id=queueid)), 202)
@public_api.route('/tally', methods=['POST'])
def post_tally():
'''
POST /tally
Tallies an election, with the given input data. This involves communicating
with the different election authorities to do the tally.
Example request:
POST /tally
{
"election_id": 111,
"callback_url": "https://127.0.0.1:5000/public_api/receive_tally",
"votes_url": "https://127.0.0.1:5000/public_data/vota4/encrypted_ciphertexts",
"votes_hash": "ni:///sha-256;f4OxZX_x_FO5LcGBSKHWXfwtSx-j1ncoSt3SABJtkGk"
}
On success, response is empty with status 202 Accepted and returns something
like:
{
"task_id": "ba83ee09-aa83-1901-bb11-e645b52fc558",
}
When the election finally gets processed, the callback_url is called with POST
similar to the following example:
{
"status": "finished",
"reference": {
"election_id": "d9e5ee09-03fa-4890-aa83-2fc558e645b5",
"action": "POST /tally"
},
"data": {
"votes_url": "https://127.0.0.1:5000/public_data/vota4/tally.tar.bz2",
"votes_hash": "ni:///sha-256;f4OxZX_x_FO5LcGBSKHWXfwtSx-j1ncoSt3SABJtkGk"
}
}
If there was an error, then the callback will be called following this
example format:
{
"status": "error",
"reference": {
"election_id": "d9e5ee09-03fa-4890-aa83-2fc558e645b5",
"action": "POST /tally"
},
"data": {
"message": "error message"
}
}
'''
# first of all, parse input data
data = request.get_json(force=True, silent=True)
d = base64.b64encode(pickle.dumps(data)).decode('utf-8')
queueid = queue_task(task='tally', data=d)
return make_response(dumps(dict(queue_id=queueid)), 202)
@public_api.route('/receive_election', methods=['POST'])
def receive_election():
'''
This is a test route to be able to test that callbacks are correctly sent
'''
print("ATTENTION received election callback: ")
print(request.get_json(force=True, silent=True))
return make_response("", 202)
@public_api.route('/receive_tally', methods=['POST'])
def receive_tally():
'''
This is a test route to be able to test that callbacks are correctly sent
'''
print("ATTENTION received tally callback: ")
print(request.get_json(force=True, silent=True))
return m
|
ygravrand/pyconfr2015
|
breizhcamp2016/ex2_multi_process/fast_food.py
|
Python
|
mit
| 429
| 0
|
# Encoding: utf-8
import sys
import time
from flask import Flask
app = Flask(__name__)
def kitchen_work():
time.sleep(5)
|
@app.route('/')
def fast_food_host():
print 'Order sent to the kit
|
chen, waiting...'
sys.stdout.flush()
kitchen_work()
print 'Burger received from kitchen!'
sys.stdout.flush()
return 'Here\'s your order.'
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
|
jasonljc/enterprise-price-monitor
|
django_monitor/price_monitor/spider/data_loader.py
|
Python
|
mit
| 3,067
| 0.005217
|
from price_monitor.models import Site, Price, SiteQuery
import json
from os import path
import os
import logging
logging.basicConfig(level=logging.DEBUG, filename="logfile", filemode="a+",
format="%(asctime)-15s %(levelname)-8s %(message)s")
logger = logging.getLogger(__name__)
class DataLoader(object):
def __init__(self):
self.staging_path = 'price_monitor/spider/enterprise/data/staging'
def check_staging(self):
'''Load files from staging path if there is any
'''
file_list = os.listdir(self.staging_path)
if not file_list:
logger.info('No file found in staging path %s'%self.staging_path)
else:
for basename in file_list:
file_path = path.join(self.staging_path, basename)
logger.info('Loading file %s'%file_path)
self.load_from_staging(file_path)
def load_from_staging(self, file_path):
with open(file_path) as fp:
lines = fp.read().split('\n')
for line in lines:
if line:
self.load_line(line)
def load_line(self, line):
def build_site(d):
site_set = Site.objects.filter(site_location=d['location'])
if not site_set:
logger.info('Saving new location %s'%d['location'])
site = Site(site_location=d['location'])
site.save()
|
return Site.objects.get(site_location=d['location'])
def build_site_query(d, site):
site_query_set = SiteQuery.objects.filter(search_time=d['
|
search_time'], site_id=site)
if not site_query_set:
start_date = '%s%s'%(d['start_date_month'], d['start_date_input'].zfill(2))
end_date = '%s%s'%(d['end_date_month'], d['end_date_input'].zfill(2))
site_query = SiteQuery(search_time=d['search_time'],
search_criteria=d['search_criteria'],
start_date=start_date,
start_time=d['start_date_time'],
end_date=end_date,
end_time=d['end_date_time'],
optional_code=d['optional_code'],
site_id=site)
site_query.save()
return SiteQuery.objects.get(search_time=d['search_time'], site_id=site)
def build_price(d, site_query):
price = Price(site_query=site_query,
car_class=d['car_class'],
car_price=d['car_price'],
car_total_price=d['car_total_price'])
price.save()
d_ = json.loads(line)
# Save Site
site_ = build_site(d_)
# Save SiteQuery
site_query_ = build_site_query(d_, site_)
# Save Price
build_price(d_, site_query_)
|
jamespcole/home-assistant
|
homeassistant/components/edp_redy/sensor.py
|
Python
|
apache-2.0
| 3,481
| 0
|
"""Support for EDP re:dy sensors."""
import logging
from homeassistant.const import POWER_WATT
from homeassistant.helpers.entity import Entity
from . import EDP_REDY, EdpRedyDevice
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['edp_redy']
# Load power in watts (W)
ATTR_ACTIVE_POWER = 'active_power'
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Perform the setup for re:dy devices."""
from edp_redy.session import ACTIVE_POWER_ID
session = hass.data[EDP_REDY]
devices = []
# Create sensors for modules
for device_json in session.modules_dict.values():
if 'HA_POWER_METER' not in device_json['Capabilities']:
continue
devices.append(EdpRedyModuleSensor(session, device_json))
# Create a sensor for global active power
devices.append(EdpRedySensor(session, ACTIVE_POWER_ID, "Power Home",
'mdi:flash', POWER_WATT))
async_add_entities(devices, True)
class EdpRedySensor(EdpRedyDevice, Entity):
"""Representation of a EDP re:dy generic sensor."""
def __init__(self, session, sensor_id, name, icon, unit):
"""Initialize the sensor."""
super().__init__(session, sensor_id, name)
self._icon = icon
self._unit = unit
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon to use in the frontend."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this sensor."""
return self._unit
async def async_update(self):
"""Parse the data for this sensor."""
if self._id in self._session.values_dict:
self._state = self._session.values_dict[self._id]
self._is_available = True
else:
self._is_available = False
class EdpRedyModuleSensor(EdpRedyDevice, Entity):
"""Representation of a EDP re:dy module sensor."""
def __init__(self, session, device_json):
"""Initialize the sensor."""
super().__init__(session, device_json['PKID'],
"Power {0}".format(device_json['Name']))
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon to use in the frontend."""
return 'mdi:flash'
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this sensor."""
return POWER_WATT
async def async_update(self):
"""Parse the data for this sensor."""
if self._id in self._session.modules_dict:
|
device_json = self._session.modules_dict[self._id]
self._parse_data(device_json)
else:
self._is_available = False
def _parse_data(self, data):
"""Parse data received from the server."""
super()._parse_data(data
|
)
_LOGGER.debug("Sensor data: %s", str(data))
for state_var in data['StateVars']:
if state_var['Name'] == 'ActivePower':
try:
self._state = float(state_var['Value']) * 1000
except ValueError:
_LOGGER.error("Could not parse power for %s", self._id)
self._state = 0
self._is_available = False
|
HybridF5/jacket
|
jacket/api/compute/openstack/compute/flavors_extraspecs.py
|
Python
|
apache-2.0
| 6,329
| 0
|
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import webob
from jacket.api.compute.openstack import common
from jacket.api.compute.openstack.compute.schemas import flavors_extraspecs
from jacket.api.compute.openstack import extensions
from jacket.api.compute.openstack import wsgi
from jacket.api.compute import validation
from jacket.compute import exception
from jacket.i18n import _
from jacket.compute import utils
ALIAS = 'os-flavor-extra-specs'
authorize = extensions.os_compute_authorizer(ALIAS)
class FlavorExtraSpecsController(wsgi.Controller):
"""The flavor extra specs API controller for the OpenStack API."""
def __init__(self, *args, **kwargs):
super(FlavorExtraSpecsController, self).__init__(*args, **kwargs)
def _get_extra_specs(self, context, flavor_id):
flavor = common.get_flavor(context, flavor_id)
return dict(extra_specs=flavor.extra_specs)
# NOTE(gmann): Max length for numeric value is being checked
# explicitly as json schema cannot have max length check for numeric value
def _check_extra_specs_value(self, specs):
for key, value in six.iteritems(specs):
try:
if isinstance(value, (six.integer_types, float)):
value = six.text_type(value)
utils.check_string_length(value, 'extra_specs value',
max_length=255)
except exception.InvalidInput as error:
raise webob.exc.HTTPBadRequest(
explanation=error.format_message())
@extensions.expected_errors(404)
def index(self, req, flavor_id):
"""Returns the list of extra specs for a given flavor."""
context = req.environ['compute.context']
authorize(context, action='index')
return self._get_extra_specs(context, flavor_id)
# NOTE(gmann): Here should be 201 instead of 200 by v2.1
# +microversions because the flavor extra specs has been created
# completely when returning a response.
@extensions.expected_errors((400, 404, 409))
@validation.schema(flavors_extraspecs.create)
def create(self, req, flavor_id, body):
context = req.environ['compute.context']
authorize(context, action='create')
specs = body['extra_specs']
self._check_extra_specs_value(specs)
flavor = common.get_flavor(context, flavor_id)
try:
flavor.extra_specs = dict(flavor.extra_specs, **specs)
flavor.save()
except exception.FlavorExtraSpecUpdateCreateFailed as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return body
@extensions.expected_errors((400, 404, 409))
@validation.schema(flavors_extraspecs.update)
def update(self, req, flavor_id, id, body):
context = req.environ['compute.context']
authorize(context, action='update')
self._check_extra_specs_value(body)
if id not in body:
expl = _('Request body and URI mi
|
smatch')
raise webob.exc.HTTPBadRequest(explanation=expl)
flavor = common.get_flavor(context, flavor_id)
try:
flavor.extra_specs = dict(flavor.extra_specs, **body)
flavo
|
r.save()
except exception.FlavorExtraSpecUpdateCreateFailed as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return body
@extensions.expected_errors(404)
def show(self, req, flavor_id, id):
"""Return a single extra spec item."""
context = req.environ['compute.context']
authorize(context, action='show')
flavor = common.get_flavor(context, flavor_id)
try:
return {id: flavor.extra_specs[id]}
except KeyError:
msg = _("Flavor %(flavor_id)s has no extra specs with "
"key %(key)s.") % dict(flavor_id=flavor_id,
key=id)
raise webob.exc.HTTPNotFound(explanation=msg)
# NOTE(gmann): Here should be 204(No Content) instead of 200 by v2.1
# +microversions because the flavor extra specs has been deleted
# completely when returning a response.
@extensions.expected_errors(404)
def delete(self, req, flavor_id, id):
"""Deletes an existing extra spec."""
context = req.environ['compute.context']
authorize(context, action='delete')
flavor = common.get_flavor(context, flavor_id)
try:
del flavor.extra_specs[id]
flavor.save()
except (exception.FlavorExtraSpecsNotFound,
exception.FlavorNotFound) as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except KeyError:
msg = _("Flavor %(flavor_id)s has no extra specs with "
"key %(key)s.") % dict(flavor_id=flavor_id,
key=id)
raise webob.exc.HTTPNotFound(explanation=msg)
class FlavorsExtraSpecs(extensions.V21APIExtensionBase):
"""Flavors extra specs support."""
name = 'FlavorExtraSpecs'
alias = ALIAS
version = 1
def get_resources(self):
extra_specs = extensions.ResourceExtension(
'os-extra_specs',
FlavorExtraSpecsController(),
parent=dict(member_name='flavor', collection_name='flavors'))
return [extra_specs]
def get_controller_extensions(self):
return []
|
CALlanoR/virtual_environments
|
medical_etls/part1/etls/utils.py
|
Python
|
apache-2.0
| 607
| 0.011532
|
import os
import csv
def get_value_or_default(value, default=None):
result = value.strip()
if len(result) == 0:
result = default
return result
def read_csv_file(csv_file_name,
delimiter,
|
quote_char='"',
skip_header=True,
encoding='latin-1'):
print(csv_file_name)
fd = open(file=csv_file_name, mode='r', encoding=encoding)
csv_reader = csv.reader(fd, delimiter=delimiter, qu
|
otechar=quote_char)
if skip_header:
next(csv_reader)
for row in csv_reader:
yield row
fd.close()
|
banwagong-news/ssbc
|
workers/clean_rubbish_res.py
|
Python
|
gpl-2.0
| 1,384
| 0.011561
|
#!/usr/bin/env python
#coding: utf8
import MySQLdb as mdb
import MySQLdb.cursors
SRC_HOST = '127.0.0.1'
SRC_USER = 'root'
SRC_PASS = ''
DATABASE_NAME = ''
DST_HOST = '127.0.0.1'
DST_USER = 'root'
DST_PASS = ''
src_conn = mdb.connect(SRC_HOST, SRC_USER, SRC_PASS, DATABASE_NAME, charset='utf8', cursorclass=MySQLdb.cursors.DictCursor)
src_curr = src_conn.cursor()
src_curr.execute('SET NAMES utf8')
dst_conn = mdb.connect(DST_HOST, DST_USER, DST_PASS, 'rt_main', port=9306, charset='utf8')
dst_curr = dst_conn.cursor()
dst_curr.execute('SET NAMES utf8')
def delete(resname):
onetimecount = 20;
while True:
ret = dst_curr.execute('select id from rt_main where match(\'*%s*\') limit %s'%(resname,onetimecount))
if ret < 0:
print 'done'
break
result = list(dst_curr.fetchall())
for id in iter(result):
src_curr.execute('select info_hash from search_hash where id = %s'%(id))
info_hash = src_curr.fetchall()
for hash in iter(info_hash):
src_curr.execute('delete from search_filelist where info_hash = \'%s\''%(hash['info_hash']))
src_curr.execute('delete from search_hash where id = %s'%(id))
|
dst_curr.exe
|
cute('delete from rt_main where id = %s'%(id))
if __name__ == '__main__':
delete(sys.argv[1])
|
ucrcsedept/galah
|
galah/db/models/classes.py
|
Python
|
apache-2.0
| 145
| 0.02069
|
from
|
mongoengine import *
class Class(Document):
name = StringField(required = True)
|
meta = {
"allow_inheritance": False
}
|
monal94/digits-scikit-learn
|
main.py
|
Python
|
mit
| 2,459
| 0.001627
|
# Import 'datasets' from 'sklearn'
import numpy as np
from sklearn import datasets
from sklearn.decomposition import PCA, RandomizedPCA
import matplotlib.pyplot as plt
# Load in the 'digits' data
digits = datasets.load_digits()
# Print the 'digits' data
print(digits)
# Print the keys
print(digits.keys)
# Print out the data
print(digits.data)
# Print out the target
print(digits.target)
# Print out the description of 'digits' data
print(digits.DESCR)
# Get the digits data
digits_data = digits.data
# Print the digits data
print(digits_data.shape)
# Get the target digits
digits_target = digits.target
# Print target data shape
print(digits_target.shape)
# Get the number of unique labels
number_digits = len(np.unique(digits_target))
# Print unique values
print(number_digits)
# Isolate the 'images'
digits_images = digits.images
# Inspect the shape
print(digits_images.shape)
# Figure size (width, height) in inches
fig = plt.figure(figsize=(6, 6))
# Adjust the subplots
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
# For each of the 64 images
for i in range(64):
# Initialize the subplots: add a subplot in the grid of 8 by 8, at the i+1-th position
ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
# Display an image at the i-th position
ax.imshow(digits.images[i], cmap=plt.cm.binary, interpolation='nearest')
# label the image with
|
the target value
ax.text(0, 7, str(digits.target[i]))
# Show the plot
plt.show()
# Create a Randomized PCA model that takes two components
randomized_pca = RandomizedPCA(n_components=2)
# Fit and transform the data to the model
reduced_data_rpca = randomized_pca.fit_transform(digits.data)
# Create a regular PCA model
pca = PCA(n_components=2)
# Fit and
|
transform the data to the model
reduced_data_pca = pca.fit_transform(digits.data)
# Inspect the shape
print(reduced_data_pca.shape)
# Print out the data
print(reduced_data_rpca)
print(reduced_data_pca)
colors = ['black', 'blue', 'purple', 'yellow', 'white', 'red', 'lime', 'cyan', 'orange', 'gray']
for i in range(len(colors)):
x = reduced_data_rpca[:, 0][digits.target == i]
y = reduced_data_rpca[:, 1][digits.target == i]
plt.scatter(x, y, c=colors[i])
plt.legend(digits.target_names, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xlabel('First Principal Component')
plt.ylabel('Second Principal Component')
plt.title("PCA Scatter Plot")
plt.show()
|
LockScreen/Backend
|
venv/lib/python2.7/site-packages/awscli/customizations/codedeploy/systems.py
|
Python
|
mit
| 7,661
| 0.000131
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import ctypes
i
|
mport os
import subprocess
DEFAULT_CONFIG_FILE = 'codedeploy.onpremises.yml'
class System:
UNSUPPORTED_SYSTEM_MSG = (
'Only Ubuntu Server, Red Hat Enterprise Linux Server and '
'Windows Server operating systems are supported.'
)
def __init__(self, params):
self.session = params.session
self.s3 = self.session.create_client(
's3',
region_name=params.region
)
def vali
|
date_administrator(self):
raise NotImplementedError('validate_administrator')
def install(self, params):
raise NotImplementedError('install')
def uninstall(self, params):
raise NotImplementedError('uninstall')
class Windows(System):
CONFIG_DIR = r'C:\ProgramData\Amazon\CodeDeploy'
CONFIG_FILE = 'conf.onpremises.yml'
CONFIG_PATH = r'{0}\{1}'.format(CONFIG_DIR, CONFIG_FILE)
INSTALLER = 'codedeploy-agent.msi'
def validate_administrator(self):
if not ctypes.windll.shell32.IsUserAnAdmin():
raise RuntimeError(
'You must run this command as an Administrator.'
)
def install(self, params):
if 'installer' in params:
self.INSTALLER = params.installer
process = subprocess.Popen(
[
'powershell.exe',
'-Command', 'Stop-Service',
'-Name', 'codedeployagent'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(output, error) = process.communicate()
not_found = (
"Cannot find any service with service name 'codedeployagent'"
)
if process.returncode != 0 and not_found not in error:
raise RuntimeError(
'Failed to stop the AWS CodeDeploy Agent:\n{0}'.format(error)
)
response = self.s3.get_object(Bucket=params.bucket, Key=params.key)
with open(self.INSTALLER, 'wb') as f:
f.write(response['Body'].read())
subprocess.check_call(
[
r'.\{0}'.format(self.INSTALLER),
'/quiet',
'/l', r'.\codedeploy-agent-install-log.txt'
],
shell=True
)
subprocess.check_call([
'powershell.exe',
'-Command', 'Restart-Service',
'-Name', 'codedeployagent'
])
process = subprocess.Popen(
[
'powershell.exe',
'-Command', 'Get-Service',
'-Name', 'codedeployagent'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(output, error) = process.communicate()
if "Running" not in output:
raise RuntimeError(
'The AWS CodeDeploy Agent did not start after installation.'
)
def uninstall(self, params):
process = subprocess.Popen(
[
'powershell.exe',
'-Command', 'Stop-Service',
'-Name', 'codedeployagent'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(output, error) = process.communicate()
not_found = (
"Cannot find any service with service name 'codedeployagent'"
)
if process.returncode == 0:
self._remove_agent()
elif not_found not in error:
raise RuntimeError(
'Failed to stop the AWS CodeDeploy Agent:\n{0}'.format(error)
)
def _remove_agent(self):
process = subprocess.Popen(
[
'wmic',
'product', 'where', 'name="CodeDeploy Host Agent"',
'call', 'uninstall', '/nointeractive'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(output, error) = process.communicate()
if process.returncode != 0:
raise RuntimeError(
'Failed to uninstall the AWS CodeDeploy Agent:\n{0}'.format(
error
)
)
class Linux(System):
CONFIG_DIR = '/etc/codedeploy-agent/conf'
CONFIG_FILE = DEFAULT_CONFIG_FILE
CONFIG_PATH = '{0}/{1}'.format(CONFIG_DIR, CONFIG_FILE)
INSTALLER = 'install'
def validate_administrator(self):
if os.geteuid() != 0:
raise RuntimeError('You must run this command as sudo.')
def install(self, params):
if 'installer' in params:
self.INSTALLER = params.installer
self._update_system(params)
self._stop_agent(params)
response = self.s3.get_object(Bucket=params.bucket, Key=params.key)
with open(self.INSTALLER, 'wb') as f:
f.write(response['Body'].read())
subprocess.check_call(
['chmod', '+x', './{0}'.format(self.INSTALLER)]
)
credentials = self.session.get_credentials()
environment = os.environ.copy()
environment['AWS_REGION'] = params.region
environment['AWS_ACCESS_KEY_ID'] = credentials.access_key
environment['AWS_SECRET_ACCESS_KEY'] = credentials.secret_key
if credentials.token is not None:
environment['AWS_SESSION_TOKEN'] = credentials.token
subprocess.check_call(
['./{0}'.format(self.INSTALLER), 'auto'],
env=environment
)
def uninstall(self, params):
process = self._stop_agent(params)
if process.returncode == 0:
self._remove_agent(params)
def _update_system(self, params):
raise NotImplementedError('preinstall')
def _remove_agent(self, params):
raise NotImplementedError('remove_agent')
def _stop_agent(self, params):
process = subprocess.Popen(
['service', 'codedeploy-agent', 'stop'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(output, error) = process.communicate()
if process.returncode != 0 and params.not_found_msg not in error:
raise RuntimeError(
'Failed to stop the AWS CodeDeploy Agent:\n{0}'.format(error)
)
return process
class Ubuntu(Linux):
def _update_system(self, params):
subprocess.check_call(['apt-get', '-y', 'update'])
subprocess.check_call(['apt-get', '-y', 'install', 'ruby2.0'])
def _remove_agent(self, params):
subprocess.check_call(['dpkg', '-r', 'codedeploy-agent'])
def _stop_agent(self, params):
params.not_found_msg = 'codedeploy-agent: unrecognized service'
return Linux._stop_agent(self, params)
class RHEL(Linux):
def _update_system(self, params):
subprocess.check_call(['yum', '-y', 'install', 'ruby'])
def _remove_agent(self, params):
subprocess.check_call(['yum', '-y', 'erase', 'codedeploy-agent'])
def _stop_agent(self, params):
params.not_found_msg = 'Redirecting to /bin/systemctl stop codedeploy-agent.service'
return Linux._stop_agent(self, params)
|
Shea690901/mudrpc
|
python/lib/PickleRPC/setup.py
|
Python
|
isc
| 690
| 0.004348
|
f
|
rom setuptools import setup, find_packages
import sys, os
version = '0.0'
setup(name='PickleRPC',
version=version,
description="RPC using pickle",
long_description="""\
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='Alexander Motzkau',
author_email='gnomi@unitopia.de',
url='',
|
license='',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
""",
)
|
akx/coffin
|
coffin/views/generic/__init__.py
|
Python
|
bsd-3-clause
| 35
| 0
|
from django
|
.views.generic import *
| |
litex-hub/pythondata-cpu-blackparrot
|
pythondata_cpu_blackparrot/system_verilog/black-parrot/external/basejump_stl/testing/bsg_cache/regression_non_blocking/test_block_ld2.py
|
Python
|
bsd-3-clause
| 747
| 0.024096
|
import sys
import random
from test_base import *
class TestBlockLD2(TestBase):
def generate(self):
self.clear_tag()
for n in range(50000):
tag = random.randint(0, 15)
|
index = random.randint(0,self.sets_p-1)
taddr = self.get_addr(tag,index)
op = random.randint(0,2)
if op ==
|
0:
self.send_block_st(taddr)
elif op == 1:
self.send_block_ld(taddr)
else:
self.send_aflinv(taddr)
self.tg.done()
def send_block_st(self, addr):
base_addr = addr - (addr % (self.block_size_in_words_p*4))
for i in range(self.block_size_in_words_p):
self.send_sw(base_addr+(i*4))
# main()
if __name__ == "__main__":
t = TestBlockLD2()
t.generate()
|
RedHatEMEA/aws-ose3
|
target/reip.py
|
Python
|
apache-2.0
| 18,095
| 0.003426
|
#!/usr/bin/python
import OpenSSL.crypto
import argparse
import base64
import glob
import k8s
import os
import shutil
import yaml
def sn():
sn = int(open("/etc/origin/master/ca.serial.txt").read(), 16)
sntext = "%X" % (sn + 1)
if len(sntext) % 2:
sntext = "0" + sntext
open("/etc/origin/master/ca.serial.txt", "w").write(sntext)
return sn
def make_cert(fn, o, cn, san, eku):
ca_cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
open("/etc/origin/master/ca.crt").read())
ca_key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM,
open("/etc/origin/master/ca.key").read())
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, 2048)
cert = OpenSSL.crypto.X509()
cert.set_version(2)
cert.set_serial_number(sn())
if o:
cert.get_subject().O = o
cert.get_subject().CN = cn
cert.gmtime_adj_notBefore(-60 * 60)
cert.gmtime_adj_notAfter((2 * 365 * 24 - 1) * 60 * 60)
cert.set_issuer(ca_cert.get_subject())
cert.set_pubkey(key)
cert.add_extensions([
OpenSSL.crypto.X509Extension("keyUsage", True, "digitalSignature, keyEncipherment"),
OpenSSL.crypto.X509Extension("extendedKeyUsage", False, eku),
OpenSSL.crypto.X509Extension("basicConstraints", True, "CA:FALSE")
])
if san:
cert.add_extensions([
OpenSSL.crypto.X509Extension("subjectAltName", False, san)
])
cert.sign(ca_key, "sha256")
with os.fdopen(os.open("%s.key" % fn, os.O_WRONLY | os.O_CREAT, 0600),
"w") as f:
f.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM,
key))
with open("%s.crt" % fn, "w") as f:
f.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
cert))
f.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
ca_cert))
def do_master_config():
# update master-config.yaml
f = "/etc/origin/master/master-config.yaml"
y = yaml.load(open(f, "r").read())
y["assetConfig"]["loggingPublicURL"] = "https://kibana." + args.subdomain + "/"
y["assetConfig"]["masterPublicURL"] = "https://" + args.public_hostname + ":8443"
y["assetConfig"]["metricsPublicURL"] = "https://hawkular-metrics." + args.subdomain + "/hawkular/metrics"
y["assetConfig"]["publicURL"] = "https://" + args.public_hostname + ":8443/console/"
y["corsAllowedOrigins"] = ["127.0.0.1",
"localhost",
"172.30.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.clust
|
er.local",
"openshift",
"openshift.default",
"openshift.default.svc",
"openshift.default
|
.svc.cluster.local",
args.private_ip,
args.private_hostname,
args.public_ip,
args.public_hostname
]
y["etcdClientInfo"]["urls"] = ["https://" + args.private_hostname + ":4001"]
y["etcdConfig"]["address"] = args.private_hostname + ":4001"
y["etcdConfig"]["peerAddress"] = args.private_hostname + ":7001"
y["kubernetesMasterConfig"]["masterIP"] = args.private_ip
y["masterPublicURL"] = "https://" + args.public_hostname + ":8443"
y["oauthConfig"]["assetPublicURL"] = "https://" + args.public_hostname + ":8443/console/"
y["oauthConfig"]["masterPublicURL"] = "https://" + args.public_hostname + ":8443"
y["oauthConfig"]["masterURL"] = "https://" + args.private_hostname + ":8443"
y["routingConfig"]["subdomain"] = "apps." + args.subdomain
open(f, "w").write(yaml.dump(y, default_flow_style=False))
# rebuild SSL certs
for cert in ["etcd.server", "master.server"]:
make_cert("/etc/origin/master/" + cert, None, "172.30.0.1",
", ".join(["DNS:kubernetes",
"DNS:kubernetes.default",
"DNS:kubernetes.default.svc",
"DNS:kubernetes.default.svc.cluster.local",
"DNS:openshift",
"DNS:openshift.default",
"DNS:openshift.default.svc",
"DNS:openshift.default.svc.cluster.local",
"DNS:" + args.public_hostname,
"DNS:" + args.private_hostname,
"DNS:172.30.0.1",
"DNS:" + args.public_ip,
"DNS:" + args.private_ip,
"IP:172.30.0.1",
"IP:" + args.public_ip,
"IP:" + args.private_ip]), "serverAuth")
# rebuild service kubeconfig files
ca = base64.b64encode(open("/etc/origin/master/ca.crt").read())
private_hostname_ = args.private_hostname.replace(".", "-")
public_hostname_ = args.public_hostname.replace(".", "-")
for kc in ["admin", "openshift-master", "openshift-registry", "openshift-router"]:
y = {"apiVersion": "v1",
"kind": "Config",
"preferences": {},
"clusters": [{"name": public_hostname_ + ":8443",
"cluster": {"certificate-authority-data": ca,
"server": "https://" + args.public_hostname + ":8443"}},
{"name": private_hostname_ + ":8443",
"cluster": {"certificate-authority-data": ca,
"server": "https://" + args.private_hostname + ":8443"}}],
"users": [{"name": "system:" + kc + "/" + private_hostname_ + ":8443",
"user": {"client-certificate-data": base64.b64encode(open("/etc/origin/master/" + kc + ".crt").read()),
"client-key-data": base64.b64encode(open("/etc/origin/master/" + kc + ".key").read())}}],
"contexts": [{"name": "default/" + public_hostname_ + ":8443/system:" + kc,
"context": {"cluster": public_hostname_ + ":8443",
"namespace": "default",
"user": "system:" + kc + "/" + private_hostname_ + ":8443"}},
{"name": "default/" + private_hostname_ + ":8443/system:" + kc,
"context": {"cluster": private_hostname_ + ":8443",
"namespace": "default",
"user": "system:" + kc + "/" + private_hostname_ + ":8443"}}],
"current-context": "default/" + private_hostname_ + ":8443/system:" + kc}
open("/etc/origin/master/" + kc + ".kubeconfig", "w").write(yaml.dump(y, default_flow_style=False))
# rebuild root's kubeconfig file
shutil.copy("/etc/origin/master/admin.kubeconfig", "/root/.kube/config")
def do_node_config():
# update node-config.yaml
f = "/etc/origin/node/node-config.yaml"
y = yaml.load(open(f, "r").read())
y["masterKubeConfig"] = "system:node:" + args.private_hostname + ".kubeconfig"
y["nodeIP"] = args.private_ip
y["nodeName"] = args.private_hostname
open(f, "w").write(yaml.dump(y, default_flow_style=False))
# remove old node SSL certs and kubeconfig files
for f in glob.glob("/etc/origin/node/system:node:*"):
os.unlink(f)
# rebuild node SSL certs
make_cert("/etc/origin/node/server", None, "172.30.0.1",
", ".join(["DNS:kubernetes",
"DNS:kubernetes.default",
"DNS:kubernetes.default.svc",
|
georgemarshall/django
|
django/contrib/admin/templatetags/admin_list.py
|
Python
|
bsd-3-clause
| 18,018
| 0.001665
|
import datetime
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
display_for_field, display_for_value, label_for_field, lookup_field,
)
from django.contrib.admin.views.main import (
ALL_VAR, ORDER_VAR, PAGE_VAR, SEARCH_VAR,
)
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.template import Library
from django.template.loader import get_template
from django.templatetags.static import static
from django.urls import NoReverseMatch
from django.utils import formats
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import gettext as _
from .base import InclusionAdminNode
register = Library()
DOT = '.'
@register.simple_tag
def paginator_number(cl, i):
"""
Generate an individual page index link in a paginated list.
"""
if i == DOT:
return '… '
elif i == cl.page_num:
return format_html('<span class="this-page">{}</span> ', i + 1)
else:
return format_html(
'<a href="{}"{}>{}</a> ',
cl.get_query_string({PAGE_VAR: i}),
mark_safe(' class="end"' if i == cl.paginator.num_pages - 1 else ''),
i + 1,
)
def pagination(cl):
"""
Generate the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range += [
*range(0, ON_ENDS), DOT,
*range(page_num - ON_EACH_SIDE, page_num + 1),
]
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range += [
*range(page_num + 1, page_num + ON_EACH_SIDE + 1), DOT,
*range(paginator.num_pages - ON_ENDS, paginator.num_pages)
]
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
@register.tag(name='pagination')
def pagination_tag(parser, token):
return InclusionAdminNode(
parser, token,
func=pagination,
template_name='pagination.html',
takes_context=False,
)
def result_headers(cl):
"""
Generate the list column headers.
"""
ordering_field_columns = cl.get_ordering_field_columns()
for i, field_name in enumerate(cl.list_display):
text, attr = label_for_field(
field_name, cl.model,
model_admin=cl.model_admin,
return_attr=True
)
is_field_sortable = cl.sortable_by is None or field_name in cl.sortable_by
if attr:
field_name = _coerce_field
|
_name(field_name, i)
# Potentially not sortable
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": text,
"class_attrib": mark_safe('
|
class="action-checkbox-column"'),
"sortable": False,
}
continue
admin_order_field = getattr(attr, "admin_order_field", None)
# Set ordering for attr that is a property, if defined.
if isinstance(attr, property) and hasattr(attr, 'fget'):
admin_order_field = getattr(attr.fget, 'admin_order_field', None)
if not admin_order_field:
is_field_sortable = False
if not is_field_sortable:
# Not sortable
yield {
'text': text,
'class_attrib': format_html(' class="column-{}"', field_name),
'sortable': False,
}
continue
# OK, it is sortable if we got this far
th_classes = ['sortable', 'column-{}'.format(field_name)]
order_type = ''
new_order_type = 'asc'
sort_priority = 0
# Is it currently being sorted on?
is_sorted = i in ordering_field_columns
if is_sorted:
order_type = ordering_field_columns.get(i).lower()
sort_priority = list(ordering_field_columns).index(i) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_primary = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
def make_qs_param(t, n):
return ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == i: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_primary.insert(0, param)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_primary.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if i not in ordering_field_columns:
o_list_primary.insert(0, make_qs_param(new_order_type, i))
yield {
"text": text,
"sortable": True,
"sorted": is_sorted,
"ascending": order_type == "asc",
"sort_priority": sort_priority,
"url_primary": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}),
"url_remove": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}),
"url_toggle": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}),
"class_attrib": format_html(' class="{}"', ' '.join(th_classes)) if th_classes else '',
}
def _boolean_icon(field_val):
icon_url = static('admin/img/icon-%s.svg' % {True: 'yes', False: 'no', None: 'unknown'}[field_val])
return format_html('<img src="{}" alt="{}">', icon_url, field_val)
def _coerce_field_name(field_name, field_index):
"""
Coerce a field_name (which may be a callable) to a string.
"""
if callable(field_name):
if field_name.__name__ == '<lambda>':
return 'lambda' + str(field_index)
else:
return field_name.__name__
return field_name
def items_for_result(cl, result, form):
"""
Generate the actual list of data.
"""
def link_in_col(is_first, field_name, cl):
if cl.list_display_links is None:
return False
if is_first and not cl.list_display_links:
return True
return field_name in cl.list_display_links
first = True
pk = cl.lookup_opts.pk.attname
for field_index, field_name in enumerate(cl.list_display):
empty_value_display = cl.model_admin.get_empty_value_display()
row_classes = ['field
|
trafferty/utils
|
python/buildVideoXML.py
|
Python
|
gpl-2.0
| 3,204
| 0.006554
|
#!/Library/Frameworks/Python.framework/Versions/Current/bin/python
import os
from os.path import join, getsize
from random import randint
def addEntry (XMLFile, finfo, dirs, NASPath):
#finfo[1].replace(' ', '_')
finfo[1] = finfo[1].replace('.', '_', finfo.count('.')-1)
title = finfo[1].split('.')[0]
root = ''
genre = 'Tom and Frederika'
pathlist = finfo[0].split('/')
for pathchunk in pathlist:
for dirname in dirs:
if pathchunk == dirname:
genre = dirname
imageRoot = ''
for pathchunk in pathlist:
if pathchunk.find('videos') == -1:
imageRoot = imageRoot + pathchunk + '/'
else:
imageRoot = imageRoot + 'videos/images/'
break
imageFile = imageRoot + title + '.jpg'
if os.path.exists(imageFile):
imageFile = 'images/' + title + '.jpg'
else:
imageFile = 'images/FAM%d.jpg' % randint(1,116)
XMLFile.write("<movie>\n")
XMLFile.write("<num>" + str(finfo[2]) + "</num>\n")
XMLFile.write("<origtitle>" + title + "</origtitle>\n")
XMLFile.write("<year>2009</year>\n")
XMLFile.write("<genre>" + genre + "</genre>\n")
XMLFile.write("<mpaa>Rated G</mpaa>\n")
XMLFile.write("<director></director>\n")
XMLFile.write("<actors></actors>\n")
XMLFile.write("<description></description>\n")
XMLFile.write("<path>" + NASPath + "</path>\n")
XMLFile.write("<length>110</length>\n")
XMLFile.write("<videocodec>MP4</videocodec>\n")
XMLFile.write("<poster>" + imageFile + "</poster>\n")
XMLFile.write("</movie>\n\n")
#------ End of addEntry
videosDir = '/Volumes/Volume_1-1/media/videos'
#videosDir = './videos'
videoXMLFileName = videosDir + '/videos.xml'
NASRoot = "Y:\\media\\videos\\"
allfiles = []
allDirs = []
print 'Reading in files from ' + videosDir;
for root, dirs, files in os.walk(videosDir):
for dirname in dirs:
allDirs.append(dirname)
for name in files:
if (name.find('mp4') > -1 or name.find('MP4') > -1) and name.find('._') == -1:
allfiles.append([root, name, len(allfiles)])
if (name.find('mkv') > -1 or name.find('MKV') > -1) and name.find('._') == -1:
allfiles.append([root, name, len(allfiles)])
if (name.find('avi') > -1 or name.find('AVI') > -1) and name.find('._') == -1:
allfiles.append([root, name, len(allfiles)])
videoXMLFile = open(videoXMLFileName, 'w')
videoXMLFile.write("<xml>\n")
videoXMLFile.write("<viddb>\n")
videoXMLFile.write("<movies>" + str(len(allfiles)) +"</movies>\n\n")
print '...read in ' + str(len(allfiles) + 1) + ' files'
print 'Building XML media file at ' + videoXMLFileName
for finfo in allfiles:
pathlist = finfo[0].split('/')
NASPath = NASRoot
for pathchunk in pathlist[5:]:
NASPath = NASPath + pathchunk + "\\"
NASPath = NASPath + finfo[1]
#print NASPath + " - " + finfo[0] + "/" + finfo[1]
addEntry (videoXMLFile, finfo, allDirs, NASPath)
videoXMLFile.write("</viddb>\n")
videoXMLFile.write("</xml>\n")
videoXMLFi
|
le.close()
print 'Built XML media file for ' + str(len(allfiles) + 1
|
) + ' movies'
|
shutej/tapcfg
|
drivers/osx/tuntap/test/tuntap/tuntap_tests.py
|
Python
|
lgpl-2.1
| 4,009
| 0.012472
|
# Copyright (c) 2011 Mattias Nissler <mattias.nissler@gmx.de>
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. The name of the author may not be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY
|
THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, P
|
ROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import itertools
import re
import sys
import unittest
from tuntap.packet_codec import TunPacketCodec, TunAFPacketCodec, TapPacketCodec
from tuntap.packet_reader import BlockingPacketSource, SelectPacketSource
from tuntap.test_char_dev import TestTunCharDev, TestTapCharDev
from tuntap.test_interface import TestTunInterface, TestTapInterface
from tuntap.test_ip import TestIp, TestIp6, TestMulticast, TestMulticast6
class FilteringTestSuite(unittest.TestSuite):
def __init__(self, filter):
super(FilteringTestSuite, self).__init__()
self._matcher = re.compile(filter or '.*')
def __iter__(self):
return itertools.ifilter(lambda test : self._matcher.search(str(test)),
super(FilteringTestSuite, self).__iter__())
def loadTestsFromTestCase(testCaseClass, *args, **kwargs):
testCaseNames = unittest.getTestCaseNames(testCaseClass, 'test_')
return unittest.TestSuite(map(lambda n : testCaseClass(n, *args, **kwargs), testCaseNames))
def main(argv):
# Parse the command line.
parser = argparse.ArgumentParser(description = 'Run tuntap unit tests.')
parser.add_argument('--tests', type = str, nargs = '?', default = None,
help = 'tests to run')
parser.add_argument('--verbosity', type = int, nargs = '?', default = 2,
help = 'verbosity level')
options = parser.parse_args(argv[1:])
# Gather tests and run them.
loader = unittest.TestLoader()
suite = FilteringTestSuite(options.tests)
suite.addTests(loadTestsFromTestCase(TestTunCharDev))
suite.addTests(loadTestsFromTestCase(TestTapCharDev))
suite.addTests(loadTestsFromTestCase(TestTunInterface))
suite.addTests(loadTestsFromTestCase(TestTapInterface))
codecs = (TunPacketCodec, TunAFPacketCodec, TapPacketCodec)
sources = (SelectPacketSource, BlockingPacketSource)
tests = (TestIp, TestIp6, TestMulticast, TestMulticast6)
for (test, codec, source) in [ (test, codec, source) for test in tests
for codec in codecs
for source in sources ]:
suite.addTests(loadTestsFromTestCase(test, lambda af, addr: codec(af, addr, source)))
runner = unittest.TextTestRunner(stream = sys.stderr,
descriptions = True,
verbosity = options.verbosity)
runner.run(suite)
if __name__ == '__main__':
main(sys.argv)
|
Tha-Robert/kademlia
|
kademlia/storage.py
|
Python
|
mit
| 2,552
| 0.000784
|
import time
from itertools import izip
from itertools import imap
from itertools import takewhile
import operator
from collections import OrderedDict
from zope.interface import implements
from zope.interface import Interface
class IStorage(Interface):
"""
Local storage for this node.
"""
def __setitem__(key, value):
"""
Set a key to the given value.
"""
def __getitem__(key):
"""
Get the given key. If item doesn't exist, raises C{KeyError}
"""
def get(key, default=None):
"""
Get given key. returns tupple (exists, value) so if not found exists is False
"""
def iteritemsOlderThan(secondsOld):
"""
Return the an iterator over (key, value) tuples for items older than the given secondsOld.
"""
def iteritems():
"""
Get the iterator for this storage, should yield tuple of (key, value)
"""
class ForgetfulStorage(object):
implements(IStorage)
def __init__(self, ttl=604800):
"""
By default, max age is a week.
"""
self.data = OrderedDict()
self.ttl = ttl
def __setitem__(self, key, value):
if key in self.data:
del self.data[key]
self.data[key] = (time.time(), value)
self.cull()
def cull(self):
for k, v in self.iteritemsOlderThan(self.ttl):
self.data.popitem(last=False)
def get(self, key, default=None):
self.cull()
if key in self.da
|
ta:
return False, self[key]
return False, default
def __getitem__(self, key):
self.cull()
return self.data[key][1]
def __iter__(
|
self):
self.cull()
return iter(self.data)
def __repr__(self):
self.cull()
return repr(self.data)
def iteritemsOlderThan(self, secondsOld):
minBirthday = time.time() - secondsOld
zipped = self._tripleIterable()
matches = takewhile(lambda r: minBirthday >= r[1], zipped)
return imap(operator.itemgetter(0, 2), matches)
def _tripleIterable(self):
ikeys = self.data.iterkeys()
ibirthday = imap(operator.itemgetter(0), self.data.itervalues())
ivalues = imap(operator.itemgetter(1), self.data.itervalues())
return izip(ikeys, ibirthday, ivalues)
def iteritems(self):
self.cull()
ikeys = self.data.iterkeys()
ivalues = imap(operator.itemgetter(1), self.data.itervalues())
return izip(ikeys, ivalues)
|
aarongarrett/inspyred
|
examples/advanced/parallel_evaluation_pp_example.py
|
Python
|
mit
| 1,728
| 0.011574
|
from random import Random
from time import time
import inspyred
import math
# Define an additional "necessary" function for the evaluator
# to see how it must be handled when using pp.
def my_squa
|
ring_function(x):
return x**2
def generate_rastrigin(random, args):
size = args.get('num_inputs', 10)
return [random.uniform(-5.12, 5.12) for i in range(size)]
def evaluate_rastrigin(candidates, args):
fitness = []
for cs in candidates:
fit = 10
|
* len(cs) + sum([(my_squaring_function(x - 1) -
10 * math.cos(2 * math.pi * (x - 1)))
for x in cs])
fitness.append(fit)
return fitness
def main(prng=None, display=False):
if prng is None:
prng = Random()
prng.seed(time())
ea = inspyred.ec.DEA(prng)
if display:
ea.observer = inspyred.ec.observers.stats_observer
ea.terminator = inspyred.ec.terminators.evaluation_termination
final_pop = ea.evolve(generator=generate_rastrigin,
evaluator=inspyred.ec.evaluators.parallel_evaluation_pp,
pp_evaluator=evaluate_rastrigin,
pp_dependencies=(my_squaring_function,),
pp_modules=("math",),
pop_size=8,
bounder=inspyred.ec.Bounder(-5.12, 5.12),
maximize=False,
max_evaluations=256,
num_inputs=3)
if display:
best = max(final_pop)
print('Best Solution: \n{0}'.format(str(best)))
return ea
if __name__ == '__main__':
main(display=True)
|
dh1tw/pyhamtools
|
docs/source/conf.py
|
Python
|
mit
| 8,402
| 0.006308
|
# -*- coding: utf-8 -*-
#
# pyhamtools documentation build configuration file, created by
# sphinx-quickstart on Thu Apr 24 01:00:39 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from pyhamtools.version import __version__, __release__
sys.path.insert(0,"/Users/user/projects/pyhamtools/pyhamtools")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['ntemplates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyhamtools'
copyright = u'2019, Tobias Wellnitz, DH1TW'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The
|
short X.Y version
|
.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __release__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['nstatic']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyhamtoolsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pyhamtools.tex', u'pyhamtools Documentation',
u'Tobias Wellnitz, DH1TW', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyhamtools', u'pyhamtools Documentation',
[u'Tobias Wellnitz, DH1TW'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyhamtools', u'pyhamtools Documentation',
u'Tobias Wellnitz, DH1TW', 'pyhamtools', 'Toolkit for ham radio software developers',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#tex
|
sputnick-dev/weboob
|
modules/hellobank/perso/transactions.py
|
Python
|
agpl-3.0
| 4,962
| 0.007457
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Christophe Lampin
# Copyright(C) 2009-2012 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import re
from weboob.deprecated.browser import Page
from weboob.tools.capabilities.bank.transactions import FrenchTransaction
class Transaction(FrenchTransaction):
PATTERNS = [(re.compile(u'^(?P<category>CHEQUE)(?P<text>.*)'), FrenchTransaction.TYPE_CHECK),
(re.compile('^(?P<category>FACTURE CARTE) DU (?P<dd>\d{2})(?P<mm>\d{2})(?P<yy>\d{2}) (?P<text>.*?)( CA?R?T?E? ?
|
\d*X*\d*)?$'),
FrenchTransaction.TYPE_CARD),
(re.compile('^(?P<category>(PRELEVEMENT|TELEREGLEMENT|TIP)) (?P<text>.*)'),
FrenchTransaction.TYPE_ORDER),
|
(re.compile('^(?P<category>ECHEANCEPRET)(?P<text>.*)'), FrenchTransaction.TYPE_LOAN_PAYMENT),
(re.compile('^(?P<category>RETRAIT DAB) (?P<dd>\d{2})/(?P<mm>\d{2})/(?P<yy>\d{2})( (?P<HH>\d+)H(?P<MM>\d+))? (?P<text>.*)'),
FrenchTransaction.TYPE_WITHDRAWAL),
(re.compile('^(?P<category>VIR(EMEN)?T? ((RECU|FAVEUR) TIERS|SEPA RECU)?)( /FRM)?(?P<text>.*)'),
FrenchTransaction.TYPE_TRANSFER),
(re.compile('^(?P<category>REMBOURST) CB DU (?P<dd>\d{2})(?P<mm>\d{2})(?P<yy>\d{2}) (?P<text>.*)'),
FrenchTransaction.TYPE_PAYBACK),
(re.compile('^(?P<category>REMBOURST)(?P<text>.*)'), FrenchTransaction.TYPE_PAYBACK),
(re.compile('^(?P<category>COMMISSIONS)(?P<text>.*)'), FrenchTransaction.TYPE_BANK),
(re.compile('^(?P<text>(?P<category>REMUNERATION).*)'), FrenchTransaction.TYPE_BANK),
(re.compile('^(?P<category>REMISE CHEQUES)(?P<text>.*)'), FrenchTransaction.TYPE_DEPOSIT),
]
class AccountHistory(Page):
def get_operations(self):
for tr in self.document.xpath('//table[@id="tableCompte"]//tr'):
if len(tr.xpath('td[@class="debit"]')) == 0:
continue
id = tr.find('td').find('input').attrib['id'].lstrip('_')
op = Transaction(id)
op.parse(date=tr.findall('td')[1].text,
raw=tr.findall('td')[2].text.replace(u'\xa0', u''))
debit = tr.xpath('.//td[@class="debit"]')[0].text
credit = tr.xpath('.//td[@class="credit"]')[0].text
op.set_amount(credit, debit)
yield op
def iter_coming_operations(self):
i = 0
for tr in self.document.xpath('//table[@id="tableauOperations"]//tr'):
if 'typeop' in tr.attrib:
tds = tr.findall('td')
if len(tds) != 3:
continue
text = tds[1].text or u''
text = text.replace(u'\xa0', u'')
for child in tds[1].getchildren():
if child.text:
text += child.text
if child.tail:
text += child.tail
i += 1
operation = Transaction(i)
operation.parse(date=tr.attrib['dateop'],
raw=text)
operation.set_amount(tds[2].text)
yield operation
def get_next_page(self):
others = self.document.xpath('//span[@class="OthersPage"]')
current = self.document.xpath('//span[@class="currentPage"]/strong')
if len(current) < 1:
return None
current = current[0].text
for other in others:
if (other.text <= current):
continue
else:
return other.text
return None
def get_flowExecutionKey(self):
flowExecutionKey = self.document.xpath('//input[@name="_flowExecutionKey"]/@value')
if len(flowExecutionKey) > 0:
return flowExecutionKey[0]
else:
return None
def get_IBAN(self):
return self.document.xpath('//a[@class="lien_perso_libelle"]')[0].attrib['id'][5:28]
class AccountComing(AccountHistory):
pass
|
emilybache/texttest-runner
|
src/main/python/lib/default/batch/batchutils.py
|
Python
|
mit
| 1,956
| 0.005624
|
import plugins, datetime, time, os
class BatchVersionFilter:
def __init__(self, batchSession):
self.batchSession = batchSession
def verifyVersions(self, app):
badVersion = self.findUnacceptableVersion(app)
if badVersion is not None:
raise plugins.TextTestError, "unregistered version '" + badVersion + "' for " + self.batchSession + " session."
def findUnacceptableVersion(self, app):
if app.getCompositeConfigValue("batch_use_version_filtering", self.batchSession) != "true":
return
allowedVersions = app.getCompositeConfigValue("batch_version", self.batchSession)
for version in app.versions:
if len(version) > 0 and version not in allowedVersions and not version.startswith("copy_"):
return version
def calculateBatchDate():
# Batch mode uses a standardised date that give a consistent answer for night-jobs.
# Hence midnight is a bad cutover point. The day therefore starts and ends at 8am :)
timeToUse = plugins.globalStartTime -
|
datetime.timedelta(hours=8)
return timeToUse.strftime("%d%b%Y")
def parseFileName(fileName, diag):
versionStr = fileName[5:-5]
components = versionStr.split("_")
|
diag.info("Parsing file with components " + repr(components))
for index, component in enumerate(components[1:]):
try:
diag.info("Trying to parse " + component + " as date")
date = time.strptime(component, "%d%b%Y")
version = "_".join(components[:index + 1])
tag = "_".join(components[index + 2:]) or component
return version, date, tag
except ValueError:
pass
return None, None, None
def convertToUrl(path, fileMapping):
for filePath, httpPath in fileMapping.items():
if path.startswith(filePath):
return path.replace(filePath, httpPath)
return "file://" + os.path.abspath(path)
|
Com-Mean/MLinAcition
|
chapter9/treeExplore.py
|
Python
|
gpl-3.0
| 1,329
| 0.009916
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#########################################################################
# File Name: treeExplore.py
# Author: lpqiu
# mail: qlp_1018@126.com
# Created Time: 2014年09月13日 星期六 21时42分58秒
#########################################################################
from numpy import *
from tkinter import *
import regTrees
def reDraw(tolS, tolN):
pass
def drawNewTree():
pass
def treeExplore():
root = Tk()
label(root, text="Plot Place Holder").grid(row=0, columnspan=3)
label(root, text="tolN").grid(row=1, column=0)
tolNEntry = Entry(root)
tolNEntry.grid(row=1, column=1)
tolNEntry.insert(0, '10')
label(root, text='tolS').grid(row=2, column=0)
tolSEntry = Entry(root)
tolSEntry.grid(row=2, column=1)
tolSEntry.ins
|
ert(0, '1.0')
Button(root, text='ReDraw', command=drawNewTree).grid(row=1, column=2,\
rowspan=3)
chkBtnVar = IntVar()
chkBtn = Checkbutton(root, text='Model Tree', variable=chkBtnVar)
chkBtn.grid(row=3, column=0, columnspan=2)
reDraw.drawDat= mat(regTrees.loadDataSet('sine.txt'))
reDraw.testDat = arange(min(reDraw.rawDat[:,0]), \
max(reDraw.rawDat[:,0]), 0.01)
reDraw(1.0, 10
|
)
root.mainloop()
if __name__=="__main__":
treeExplore()
|
allisson/django-tiny-rest
|
tiny_rest/tests/test_authorization.py
|
Python
|
mit
| 3,377
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase, RequestFactory
from django.contrib.auth import get_user_model, authenticate
from django.contrib.auth.models import AnonymousUser
import json
import status
from tiny_rest.views import APIView
from tiny_rest.authorization import (
IsAuthenticatedMixin, IsAuthenticatedOrReadOnlyMixin
)
User = get_user_model()
class IsAuthenticatedAPIView(IsAuthenticatedMixin, APIView):
def list(self, request, *args, **kwargs):
return self.response(
data={'is_authenticated': request.user.is_authenticated()}
)
class IsAuthenticatedOrReadOnlyAPIView(IsAuthenticatedOrReadOnlyMixin,
APIView):
def list(self, request, *args, **kwargs):
return self.response(
data={'is_authenticated': request.user.is_authenticated()}
)
def create(self, request, *args, **kwargs):
return self.response(
data={'is_authenticated': request.user.is_authenticated()}
)
class BaseTestCase(TestCase):
def setUp(self):
self.fact
|
ory = RequestFactory()
self.user = User.objects.create_user(
'user', 'user@email.com', '
|
123456'
)
class TestIsAuthenticatedAPIView(BaseTestCase):
def test_authenticate(self):
request = self.factory.get('/')
request.user = authenticate(username='user', password='123456')
response = IsAuthenticatedAPIView.as_view()(request)
data = json.loads(response.content.decode())
self.assertTrue(data['is_authenticated'])
request.user = AnonymousUser()
response = IsAuthenticatedAPIView.as_view()(request)
data = json.loads(response.content.decode())
self.assertEqual(data['error'], 'Not Authorized')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class TestIsAuthenticatedOrReadOnlyAPIView(BaseTestCase):
def test_authenticate(self):
request = self.factory.get('/')
request.user = AnonymousUser()
response = IsAuthenticatedOrReadOnlyAPIView.as_view()(request)
data = json.loads(response.content.decode())
self.assertFalse(data['is_authenticated'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
request = self.factory.post('/')
request.user = AnonymousUser()
response = IsAuthenticatedOrReadOnlyAPIView.as_view()(request)
data = json.loads(response.content.decode())
self.assertEqual(data['error'], 'Not Authorized')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
request = self.factory.get('/')
request.user = authenticate(username='user', password='123456')
response = IsAuthenticatedOrReadOnlyAPIView.as_view()(request)
data = json.loads(response.content.decode())
self.assertTrue(data['is_authenticated'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
request = self.factory.post('/')
request.user = authenticate(username='user', password='123456')
response = IsAuthenticatedOrReadOnlyAPIView.as_view()(request)
data = json.loads(response.content.decode())
self.assertTrue(data['is_authenticated'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
YoQuieroSaber/yournextrepresentative
|
candidates/management/commands/candidates_delete_party_images.py
|
Python
|
agpl-3.0
| 1,053
| 0.00095
|
from candidates.models import PopItPerson
from candidates.popit import PopItApiMixin, popit_unwrap_pagination
from django.core.management.base import BaseCommand
class Command(PopItApiMixin, BaseCommand):
def handle(self, **options):
for o in popit_unwrap_pagination(
self.api.organizations,
per_page=100,
embed='membership.person'
):
if o['classification'] != 'Party':
continue
print o['name']
for image in o.get('images', []):
print " DELETE", image['_id']
self.api.organizations(o['id']).image(image['_id']).delete()
# The person pages get party images via the
# membership.organization embed, so invali
|
date the cache
|
# entries for any person who's a member of this party:
for membership in o.get('memberships', []):
person = PopItPerson.create_from_dict(membership['person_id'])
person.invalidate_cache_entries()
|
jasonwee/asus-rt-n14uhp-mrtg
|
src/lesson_mathematics/fractions_limit_denominator.py
|
Python
|
apache-2.0
| 249
| 0
|
import fractions
import math
print('P
|
I =', math.pi)
f_pi = fractions.Fraction(str(math.pi))
print('No limit =', f_pi)
for i in [1, 6, 11, 60, 70, 90, 100]:
limited = f_pi.limit_denominator(i)
print('{0:8} = {1}
|
'.format(i, limited))
|
nuagenetworks/vspk-python
|
vspk/v6/nugatewayslocation.py
|
Python
|
bsd-3-clause
| 17,817
| 0.008868
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUGatewaysLocation(NURESTObject):
""" Represents a GatewaysLocation in the VSD
Notes:
Gateway location details
"""
__rest_name__ = "gatewayslocation"
__resource_name__ = "gatewayslocations"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a GatewaysLocation instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> gatewayslocation = NUGatewaysLocation(id=u'xxxx-xxx-xxx-xxx', name=u'GatewaysLocation')
>>> gatewayslocation = NUGatewaysLocation(data=my_dict)
"""
super(NUGatewaysLocation, self).__init__()
# Read/Write Attributes
self._last_updated_by = None
self._last_updated_date = None
self._latitude = None
self._address = None
self._ignore_geocode = None
self._time_zone_id = None
self._embedded_metadata = None
self._entity_scope = None
self._locality = None
self._longitude = None
self._country = None
self._creation_date = None
self._associated_entity_name = None
self._associated_entity_type = None
self._state = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="latitude", remote_name="latitude", attribute_type=float, is_required=False, is_unique=False)
self.expose_attribute(local_name="address", remote_name="address", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="ignore_geocode", remote_name="ignoreGeocode", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="time_zone_id", remote_name="timeZoneID", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="locality", remote_name="locality", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="longitude", remote_name="longitude", attribute_type=float, is_required=False, is_unique=False)
self.expose_attribute(local_name="country", remote_name="country", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_entity_name",
|
remote_name="associatedEntityName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_entity_type", remote_name="associatedEntityType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="state"
|
, remote_name="state", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def latitude(self):
""" Get latitude value.
Notes:
Latitude in decimal format.
"""
return self._latitude
@latitude.setter
def latitude(self, value):
""" Set latitude value.
Notes:
Latitude in decimal format.
"""
self._latitude = value
@property
def address(self):
""" Get address value.
Notes:
Formatted address including property number, street name, su
|
tseaver/gcloud-python
|
monitoring/google/cloud/monitoring_v3/gapic/enums.py
|
Python
|
apache-2.0
| 21,475
| 0.003679
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for protocol buffer enum types."""
import enum
class ComparisonType(enum.IntEnum):
"""
Specifies an ordering relationship on two arguments, here called left and
right.
Attributes:
COMPARISON_UNSPECIFIED (int): No ordering relationship is specified.
COMPARISON_GT (int): The left argument is greater than the right argument.
COMPARISON_GE (int): The left argument is greater than or equal to the right argument.
COMPARISON_LT (int): The left argument is less than the right argument.
COMPARISON_LE (int): The left argument is less than or equal to the right argument.
COMPARISON_EQ (int): The left argument is equal to the right argument.
COMPARISON_NE (int): The left argument is not equal to the right argument.
"""
COMPARISON_UNSPECIFIED = 0
COMPARISON_GT = 1
COMPARISON_GE = 2
COMPARISON_LT = 3
COMPARISON_LE = 4
COMPARISON_EQ = 5
COMPARISON_NE = 6
class ServiceTier(enum.IntEnum):
"""
The tier of service for a Stackdriver account. Please see the
`service tiers documentation <https://cloud.google.com/monitoring/accounts/tiers>`_
for more details.
Attributes:
SERVICE_TIER_UNSPECIFIED (int): An invalid sentinel value, used to indicate that a tier has not
been provided explicitly.
SERVICE_TIER_BASIC (int): The Stackdriver Basic tier, a free tier of service that provides basic
features, a moderate allotment of logs, and access to built-in metrics.
A number of features are not available in this tier. For more details,
see `the service tiers documentation <https://cloud.google.com/monitoring/accounts/tiers>`_.
SERVICE_TIER_PREMIUM (int): The Stackdriver Premium tier, a higher, more expensive tier of service
that provides access to all Stackdriver features, lets you use Stackdriver
with AWS accounts, and has a larger allotments for logs and metrics. For
more details, see `the service tiers documentation <https://cloud.google.com/monitoring/accounts/tiers>`_.
"""
SERVICE_TIER_UNSPECIFIED = 0
SERVICE_TIER_BASIC = 1
SERVICE_TIER_PREMIUM = 2
class UptimeCheckRegion(enum.IntEnum):
"""
The regions from which an uptime check can be run.
Attributes:
REGION_UNSPECIFIED (int): Default value if no region is specified. Will result in uptime checks
running from all regions.
USA (int): Allows checks to run from locations within the United States of America.
EUROPE (int): Allows checks to run from locations within the continent of Europe.
|
SOUTH_AMERICA (int): Allows checks to run from locations within the continent of South
America.
ASIA_PACIFIC (int): Allows checks
|
to run from locations within the Asia Pacific area (ex:
Singapore).
"""
REGION_UNSPECIFIED = 0
USA = 1
EUROPE = 2
SOUTH_AMERICA = 3
ASIA_PACIFIC = 4
class GroupResourceType(enum.IntEnum):
"""
The supported resource types that can be used as values of
group_resource.resource_type. gae_app and uptime_url are not allowed
because group checks on App Engine modules and URLs are not allowed.
Attributes:
RESOURCE_TYPE_UNSPECIFIED (int): Default value (not valid).
INSTANCE (int): A group of instances (could be either GCE or AWS_EC2).
AWS_ELB_LOAD_BALANCER (int): A group of AWS load balancers.
"""
RESOURCE_TYPE_UNSPECIFIED = 0
INSTANCE = 1
AWS_ELB_LOAD_BALANCER = 2
class LabelDescriptor(object):
class ValueType(enum.IntEnum):
"""
Value types that can be used as label values.
Attributes:
STRING (int): A variable-length string. This is the default.
BOOL (int): Boolean; true or false.
INT64 (int): A 64-bit signed integer.
"""
STRING = 0
BOOL = 1
INT64 = 2
class Aggregation(object):
class Aligner(enum.IntEnum):
"""
The Aligner describes how to bring the data points in a single
time series into temporal alignment.
Attributes:
ALIGN_NONE (int): No alignment. Raw data is returned. Not valid if cross-time
series reduction is requested. The value type of the result is
the same as the value type of the input.
ALIGN_DELTA (int): Align and convert to delta metric type. This alignment is valid
for cumulative metrics and delta metrics. Aligning an existing
delta metric to a delta metric requires that the alignment
period be increased. The value type of the result is the same
as the value type of the input.
One can think of this aligner as a rate but without time units; that
is, the output is conceptually (second_point - first_point).
ALIGN_RATE (int): Align and convert to a rate. This alignment is valid for
cumulative metrics and delta metrics with numeric values. The output is a
gauge metric with value type
``DOUBLE``.
One can think of this aligner as conceptually providing the slope of
the line that passes through the value at the start and end of the
window. In other words, this is conceptually ((y1 - y0)/(t1 - t0)),
and the output unit is one that has a \"/time\" dimension.
If, by rate, you are looking for percentage change, see the
``ALIGN_PERCENT_CHANGE`` aligner option.
ALIGN_INTERPOLATE (int): Align by interpolating between adjacent points around the
period boundary. This alignment is valid for gauge
metrics with numeric values. The value type of the result is the same
as the value type of the input.
ALIGN_NEXT_OLDER (int): Align by shifting the oldest data point before the period
boundary to the boundary. This alignment is valid for gauge
metrics. The value type of the result is the same as the
value type of the input.
ALIGN_MIN (int): Align time series via aggregation. The resulting data point in
the alignment period is the minimum of all data points in the
period. This alignment is valid for gauge and delta metrics with numeric
values. The value type of the result is the same as the value
type of the input.
ALIGN_MAX (int): Align time series via aggregation. The resulting data point in
the alignment period is the maximum of all data points in the
period. This alignment is valid for gauge and delta metrics with numeric
values. The value type of the result is the same as the value
type of the input.
ALIGN_MEAN (int): Align time series via aggregation. The resulting data point in
the alignment period is the average or arithmetic mean of all
data points in the period. This alignment is valid for gauge and delta
metrics with numeric values. The value type of the output is
``DOUBLE``.
ALIGN_COUNT (int): Align time series via aggregation. The resulting data point in
the alignment period is the count of all data points in the
period. This alignment is valid for gauge and delta metrics with numeric
or Boolean values. The value type of the output is
``INT64``.
ALIGN_SUM (int): Align time series via aggregation. The resulting data point in
the alignment period is the sum of all data points in the
period. This alignment is valid for gauge and delta metrics with numeric
|
joke2k/faker
|
faker/providers/person/or_IN/__init__.py
|
Python
|
mit
| 35,828
| 0
|
from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats_female = (
"{{first_name_female}} {{last_name}}",
"{{first_name_unisex}} {{last_name}}",
"{{prefix_female}} {{first_name_unisex}} {{last_name}}",
"{{prefix_female}} {{first_name_female}} {{last_name}}",
)
formats_male = (
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{middle_name}} {{last_name}}",
"{{first_name_unisex}} {{middle_name}} {{last_name}}",
"{{prefix_male}} {{first_name_male}} {{last_name}}",
)
formats = formats_female + formats_male
# All the names are extracted from Odia Wikipedia by Soumendra Kumar Sahoo.
# 1. https://or.wikipedia.org/s/1duk and
# 2. https://or.wikipedia.org/s/3vz
first_names_female = (
"ଅଜୟନ୍ତୀ",
"ଅଞ୍ଜଳି",
"ଅନିଶା",
"ଅନୀତା",
"ଅନୁ",
"ଅନୁପ୍ରିୟା",
"ଅନୁଭା",
"ଅପରାଜିତା",
"ଅମିତା",
"ଅମିୟବାଳା",
"ଅର୍ଚ୍ଚିତା",
"ଅର୍ପିତା",
"ଅସୀମା",
"ଆଞ୍ଚଲ",
"ଆନିଷା",
"ଆମେଲି",
"ଇନ୍ଦୁ",
"ଇନ୍ଦୁରାଣୀ",
"ଇନ୍ଦ୍ରାଣୀ",
"ଇରାନି",
"ଇଲା",
"ଉଷସୀ",
"ଉଷା",
"ଏଲିନା",
"କନକଲତା",
"କବିତା",
"କମଳା",
"କଲ୍ୟାଣୀ",
"କାଜଲ",
"କୁମୁଦ",
"କୁସୁମ",
"କୋଏଲ",
"ଗାର୍ଗୀ",
"ଗାୟତ୍ରୀବାଳା",
"ଗୀତା",
"ଗୁନ୍ ଗୁନ୍",
"ଗୌରୀ",
"ଗ୍ଲୋରିଆ",
"ଚନ୍ଦ୍ରମା",
"ଛବି",
"ଜିନା",
"ଜ୍ୟୋତିର୍ମୟୀ",
"ଜ୍ୟୋତ୍ସ୍ନା",
"ଜୟନ୍ତୀ",
"ଝରଣା",
"ଝିଲିକ୍",
"ଟୁକୁନି",
"ତନ୍ଦ୍ରା",
"ତମନ୍ନା",
"ତୃପ୍ତି",
"ତ୍ରିପୁରା",
"ଦୀପା",
"ଦୀପ୍ତିରେଖା",
"ଦେବଯାନୀ",
"ଦେବୀ",
"ନନ୍ଦିତା",
"ନନ୍ଦିନୀ",
"ନମିତା",
"ନମ୍ରତା",
"ନଳିନୀ",
"ନାଜିଆ",
"ନିକିତା",
"ନିବେଦିତା",
"ନିର୍ମଳା",
"ନିହାରିକା",
"ନୀତୁ",
"ନୈନା",
"ପଦ୍ମିନୀ",
"ପାର୍ବତୀ",
"ପିଙ୍କି",
"ପୁନମ",
"ପୁପୁଲ",
"ପୁଷ୍ପା",
"ପ୍ରକୃତି",
"ପ୍ରତିଜ୍ଞା",
"ପ୍ରମିଳା",
"ପ୍ରିୟଙ୍କା",
"ପ୍ରିୟମ୍ବଦା",
"ପ୍ରିୟା",
"ପ୍ରେମଲତା",
"ଫୁଲମଣି",
"ବନଜା",
"ବନ୍ଦିତା",
"ବବ୍ଲି",
"ବର୍ଣ୍ଣାଳୀ",
"ବର୍ଷା",
"ବାସନ୍ତି",
"ବାସନ୍ତୀ",
"ବିଜୟଲକ୍ଷ୍ମୀ",
"ବିଜୟିନୀ",
"ବିଦୁସ୍ମିତା",
"ବିନୋଦିନୀ",
"ବିରଜା",
"ବିଷ୍ଣୁପ୍ରିୟା",
"ବୀଣା",
"ବୈଶାଳୀ",
"ଭଗବତୀ",
"ଭବାନୀ",
"ଭାନୁମତୀ",
"ଭାସ୍ୱତୀ",
"ଭୂମିକା",
"ମଙ୍ଗଳା",
"ମଞ୍ଜୁଲତା",
"ମଞ୍ଜୁଳା",
"ମଣିମାଳା",
"ମନ୍ଦାକିନୀ",
"ମମତା",
"ମହାଶ୍ୱେତା",
"ମାଧୁରୀ",
"ମାମିନା",
"ମିନତି",
"ମିନାକ୍ଷୀ",
"ମେଘନା",
"ମେଘା",
"ଯଶୋଦା",
"ରଚନା",
"ରଜନୀ",
"ରଞ୍ଜିତା",
"ରତ୍ନପ୍ରଭା",
"ରଶ୍ମୀରେଖା",
"ରାକ୍ଷୀ",
"ରାଜଶ୍ରୀ",
"ରାଧାରାଣୀ",
"ରାଲି",
"ରାସମଞ୍ଜରୀ",
"ରାସେଶ୍ୱରୀ",
"ରିନା",
"ରିୟା",
"ରୀତା",
"ରୀତାରାଣୀ",
"ରୁକ୍ମଣୀ",
"ରୁନୁ",
"ରୋଜା",
"ରୋଷନୀ",
"ରୋସନାରା",
"ଲକ୍ଷ୍ମୀ",
"ଲକ୍ଷ୍ମୀପ୍ରିୟା",
"ଲତିକା",
"ଲିପି",
"ଲିପିକା",
"ଲିପ୍ସା",
"ଲୀଳା",
"ଲେଖା",
"ଲେସ୍ଲି",
"ଶିବାନୀ",
"ଶୀତଲ",
"ଶୁଭଶ୍ରୀ",
"ଶେଫାଳୀ",
"ଶୈରିନ୍ଦ୍ରୀ",
"ଶ୍ରୀମତି",
"ଶ୍ରୀମତୀ",
"ସଂଘମିତ୍ରା",
"ସଞ୍ଚିତା",
"ସନ୍ମିରା",
"ସରସ୍ୱତୀ",
"ସସ୍ମିତା",
"ସାବିତ୍ରୀ",
"ସିପ୍ରା",
"ସୀମାରାଣୀ",
"ସୁଚିତ୍ରା",
"ସୁଜାତା",
"ସୁନନ୍ଦା",
"ସୁପ୍ରିୟା",
"ସୁମନୀ",
"ସୁରମା",
"ସୋନିକା",
"ସୋଫିଆ",
"ସୌଦାମିନୀ",
"ସୌମ୍ୟା",
"ସ୍ନିଗ୍ଧା",
"ସ୍ନେହାଙ୍ଗିନୀ",
"ସ୍ମିତା",
"ସ୍ୱାଗତିକା",
)
first_names_unisex = (
"ଅଶ୍ୱିନୀ",
"ଅଶ୍ୱିନୀ",
"କବି",
"ଗୀତା",
"ଜ୍ୟୋତି",
"ଦୁର୍ଗା",
"ଦେବୀ",
"ପଦ୍ମ",
"ପୁପୁଲ",
"ପ୍ରିୟଦର୍ଶୀ",
"ମକର",
"ମଙ୍ଗଳା",
"ମୌସଦୀ",
"ରତି",
"ରଶ୍ମି",
"ଶାନ୍ତି",
"ସିମନ୍",
"ସୁଧାଂଶୁମାଳିନୀ",
"ସୁମନ",
"ସ୍ନିତି",
)
first_names_male = (
"ଅଂଶୁମାନ",
"ଅକ୍ଷୟ",
"ଅଖିଳ",
"ଅଗସ୍ତି",
"ଅଙ୍ଗଦ",
"ଅଚ୍ୟୁତାନନ୍ଦ",
"ଅଜିତ",
"ଅଜୟ",
"ଅତନୁ",
"ଅଦ୍ୱୈତ",
"ଅଧିରାଜ",
"ଅନନ୍ତ",
"ଅନାଦି",
"ଅନାଦୀ",
"ଅନିରୁଦ୍ଧ",
"ଅନିଲ",
"ଅନୀଲ",
"ଅନୁଭବ",
"ଅନ୍ତର୍ଯ୍ୟାମୀ",
"ଅପୂର୍ବ",
"ଅଭିନ୍ନ",
"ଅଭିମନ୍ୟୁ",
"ଅଭିରାମ",
"ଅଭିଷେକ",
"ଅଭୟ",
"ଅମର",
"ଅମରନାଥ",
"ଅମରେନ୍ଦ୍ର",
"ଅମିନୂଲ",
"ଅମ୍ଳାନ",
"ଅରକ୍ଷିତ",
"ଅରବିନ୍ଦ",
"ଅରିନ୍ଦମ",
"ଅରୁଣ",
"ଅର୍କ",
"ଅର୍ଜୁନ",
"ଅଲେଖ",
"ଅଶୋକ",
"ଅଶ୍ରୁମୋଚନ",
"ଅସୀତ",
"ଆକାଶ",
"ଆକୁଳାନନ୍ଦ",
"ଆଦିତ୍ୟ",
"ଆନନ୍ଦ",
"ଆପଲସ୍ୱାମୀ",
"ଆରତି",
"ଆର୍ଯ୍ୟନ",
"ଆଲୋକ",
"ଆଶ୍ରିତ",
"ଆସଫ",
"ଇତିସ",
"ଇନ୍ଦ୍ରମଣି",
"ଇରାଶିଷ",
"ଇଶ୍ୱର",
"ଉତ୍କଳ",
"ଉତ୍ତମ",
"ଉତ୍ସବ",
"ଉଧାର",
"ଉପେନ୍ଦ୍ର",
"ଉପେନ୍ଦ୍ରନାଥ",
"ଉମାକାନ୍ତ",
"ଉମାବଲ୍ଲଭ",
"ଉମାଶଙ୍କର",
"ଓଡ଼ିଆ",
"ଓମପ୍ରକାଶ",
|
"ଓମ୍",
"କନକବର୍ଦ୍ଧନ",
"କପିଳ",
"କମଳାକାନ୍ତ",
"କରୁଣାକର",
"କରେନ୍ଦ୍ର",
"କଳିଙ୍ଗ",
"କଳ୍ପତରୁ",
"କହ୍ନେଇ",
"କାଙ୍ଗାଳି",
"କାଙ୍ଗୋଇ",
"କାର୍ତ୍ତିକ",
"କାର୍ତ୍ତିକେଶ୍ୱର",
"କାଳନ୍ଦ
|
ୀ",
"କାଳିଆ",
"କାଳୁଖଣ୍ଡାୟତ",
"କାଶୀନାଥ",
"କାହ୍ନୁ",
"କାହ୍ନୁରାମ",
"କିରଣ",
"କିଶୋରଚନ୍ଦ୍ର",
"କିଶୋରୀମଣି",
"କୁଞ୍ଜବିହାରୀ",
"କୁଣାଳ",
"କୁନା",
"କୁମୁଦ",
"କୁଳମଣି",
"କୃଷ୍ଣ",
"କୃଷ୍ଣଚନ୍ଦ୍ର",
"କେଦାର",
"କେଦାରନାଥ",
"କେଶବ",
"କୈଳାଶ",
"କୈଳାସ",
"କ୍ଷୀରୋଦ",
"କ୍ଷେତ୍ର",
"ଖଗେଶ୍ୱର",
"ଖାରବେଳ",
"ଗଙ୍ଗାଧର",
"ଗଣେଶରାମ",
"ଗଣେଶ୍ୱର",
"ଗଦାଧର",
"ଗିରିଜା",
"ଗିରିଶ",
"ଗିରୀଶ",
"ଗୁରୁ",
"ଗୁରୁକୃଷ୍ଣ",
"ଗୁରୁଚରଣ",
"ଗୈାତମ",
"ଗୋକୁଳାନନ୍ଦ",
"ଗୋପନାରାୟଣ",
"ଗୋପାଳ",
"ଗୋପାଳବଲ୍ଲଭ",
"ଗୋପୀନାଥ",
"ଗୋବିନ୍ଦ",
"ଗୋଲକ",
"ଗୌତମ",
"ଗୌର",
"ଗୌରହରି",
"ଘଣ୍ଟେଶ୍ୱର",
"ଘନଶ୍ୟାମ",
"ଘାସିରାମ",
"ଚକ୍ରଧର",
"ଚକ୍ରମଣି",
"ଚନ୍ଦନ",
"ଚନ୍ଦ୍ରମଣି",
"ଚନ୍ଦ୍ରଶେଖର",
"ଚନ୍ଦ୍ରସେନ",
"ଚିତରଂଜନ",
"ଚିତ୍ତରଞ୍ଜନ",
"ଚିନ୍ତାମଣି",
"ଚିନ୍ମୟ",
"ଚିରଂଜୀବ",
"ଚୈତନ୍ୟ",
"ଛତିଶ",
"ଛୋଟରାୟ",
"ଜଗତେଶ୍ୱର",
"ଜଗଦାନନ୍ଦ",
"ଜଗଦିଶ",
"ଜଗନ୍ନାଥ",
"ଜଗବନ୍ଧୁ",
"ଜନାର୍ଦନ",
"ଜର୍ଜ",
"ଜଲାଲ",
"ଜିତୁ",
"ଜୀବନ",
"ଜୀବନାନନ୍ଦ",
"ଜ୍ଞାନ",
"ଜ୍ୟୋତି",
"ଜ୍ୟୋତିନ୍ଦ୍ର",
"ଜ୍ୟୋତିପ୍ରକାଶ",
"ଜ୍ୟୋତିରିନ୍ଦ୍ର",
"ଜୟକୃଷ୍ଣ",
"ଜୟଦେବ",
"ଜୟନାରାୟଣ",
"ଜୟନ୍ତ",
"ଜୟରାମ",
"ଜୟୀରାମ",
"ଝିନ୍ନ",
"ତନ୍ମୟ",
"ତପନ",
"ତପୁ",
"ତାନସେନ",
"ତାରାପ୍ରସାଦ",
"ତୁଷାରକାନ୍ତି",
"ତ୍ରିନାଥ",
"ତ୍ରିଲୋଚନ",
"ଦାମୋଦର",
"ଦାଶରଥୀ",
"ଦିଗମ୍ବର",
"ଦିନେଶ",
"ଦିବାକରନାଥ",
"ଦିବ୍ୟଶଙ୍କର",
"ଦିଲୀପ",
"ଦିଲ୍ଲୀପ",
"ଦୀନବନ୍ଧୁ",
"ଦୀପକ",
"ଦୀପ୍ତିରଞ୍ଜନ",
"ଦୁଃଖୀରାମ",
"ଦୁଃଶାସନ",
"ଦୁତିଅ",
"ଦୁର୍ଯ୍ୟୋଧନ",
"ଦୁର୍ଲଭ",
"ଦୁଷ୍ମନ୍ତ",
"ଦେବଦାସ",
"ଦେବନାରାୟଣ",
"ଦେବରାଜ",
"ଦେବାଶିଷ",
"ଦେବୀରଞ୍ଜନ",
"ଦେବୁ",
"ଦେବେନ",
"ଦେବେନ୍ଦ୍ର",
"ଦେବେନ୍ଦ୍ରନାଥ",
"ଦେବେଶ",
"ଦୈତାରି",
"ଦୈତାରୀ",
"ଦୋଳଗୋବିନ୍ଦ"
|
AtteqCom/zsl
|
src/zsl/db/helpers/sorter.py
|
Python
|
mit
| 3,601
| 0.003888
|
"""
:mod:`zsl.db.helpers.sorter`
----------------------------
"""
from __future__ import unicode_literals
from builtins import object, zip
from sqlalchemy import asc, desc
DEFAULT_SORT_ORDER = 'ASC'
|
# If changed, look at the condition in apply_sorter if self.
|
get_order() == "DESC":.
class Sorter(object):
"""
Helper class for applying ordering criteria to query.
"""
def __init__(self, sorter, mappings=None):
"""
sorter = {'sortby': string, 'sort': string}
sortby - string of comma-separated column names by which you want to order
sort - string of comma-separated values 'ASC'/'DESC' (order direction) which
set order direction to corresponding columns from sorter['sortby'] string
notes: - if 'sortby' key is not in sorter, no sorting will be applied to query
- if 'sort' key is not in sorter, DEFAULT_SORT_ORDER will be applied to
all columns from sorter['sortby']
- if sorter['sort'] == 'ASC' / 'DESC' (contains only one order direction),
this direction will be applied to all columns from sorter['sortby']
- if you want to order only by one column, simply put
sorter['sortby'] = '<column_name>' - without comma at the end of
the string
mappings dict - maps column names from sorter['sortby'] to column attributes names of
objects (see example)
- if the column names from sorter['sortby'] is equal to the name
of column attribute, it doesn`t have to be mentioned in mappings
Example:
sorter = {'sortby': 'firstname,state,sport', 'sort': 'ASC'}
mappings = {
'state': (State, 'name_sk'),
'sport': (Sport, 'name'),
}
"""
if mappings is None:
mappings = []
if 'sortby' in sorter:
self._fields = sorter['sortby'].split(',')
if 'sort' in sorter:
self._orders = sorter['sort'].split(',')
if len(self._orders) == 1:
self._orders *= len(self._fields)
elif len(self._orders) != len(self._fields):
raise Exception(
'zsl.db.helpers.Sorter: Number of order settings is nor zero nor one nor equal to number of'
'sortby columns.')
else:
self._orders = [DEFAULT_SORT_ORDER] * len(self._fields)
self._enabled = True
else:
self._enabled = False
self._mappings = mappings
def is_enabled(self):
return self._enabled
def get_fields(self):
return self._fields
def get_orders(self):
return self._orders
def apply_sorter(self, q, cls):
if self.is_enabled():
sorter_settings = []
for field, order in zip(self.get_fields(), self.get_orders()):
if field in self._mappings:
(cls, mapped_field) = self._mappings[field]
attr = getattr(cls, mapped_field)
else:
attr = getattr(cls, field)
if order == "DESC": # If changed, look at the DEFAULT_SORT_ORDER definition.
sorter_settings.append(desc(attr))
else:
sorter_settings.append(asc(attr))
return q.order_by(*sorter_settings)
else:
return q
|
gundalow/ansible-modules-core
|
network/nxos/nxos_udld_interface.py
|
Python
|
gpl-3.0
| 15,900
| 0.001195
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: nxos_udld_interface
version_added: "2.2"
short_description: Manages UDLD interface configuration params.
description:
- Manages UDLD interface configuration params.
extends_documentation_fragment: nxos
author:
- Jason Edelman (@jedelman8)
notes:
- Feature UDLD must be enabled on the device to use this module.
options:
mode:
description:
- Manages UDLD mode for an interface.
required: true
choices: ['enabled','disabled','aggressive']
interface:
description:
- FULL name of the interface, i.e. Ethernet1/1-
required: true
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ensure Ethernet1/1 is configured to be in aggressive mode
- nxos_udld_interface:
interface: Ethernet1/1
mode: aggressive
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Remove the aggressive config only if it's currently in aggressive mode and then disable udld (switch default)
- nxos_udld_interface:
interface: Ethernet1/1
mode: aggressive
state: absent
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# ensure Ethernet1/1 has aggressive mode enabled
- nxos_udld_interface:
interface: Ethernet1/1
mode: enabled
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"mode": "enabled"}
existing:
description:
- k/v pairs of existing configuration
type: dict
sample: {"mode": "aggressive"}
end_state:
description: k/v pairs of configuration after module execution
returned: always
type: dict
sample: {"mode": "enabled"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["interface ethernet1/33",
"no udld aggressive ; no udld disable"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import json
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
def execute_config_command(commands, module):
try:
module.configure(commands)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
|
error=str(clie), commands=commands)
except AttributeError:
try:
commands.insert(0, 'configure')
module.cli.add_commands(commands, output='config')
|
module.cli.run_commands()
except ShellError:
clie
|
Star2Billing/cdr-stats
|
cdr_stats/import_cdr/models.py
|
Python
|
mpl-2.0
| 2,641
| 0.002272
|
#
# CDR-Stats License
# http://www.cdr-stats.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from __future__ import unicode_literals
from django.db import models
from postgres.fields import json_field
class CDRImport(models.Model):
id = models.AutoField(primary_key=True)
switch = models.CharField(max_length=80)
cdr_source_type = models.IntegerField(blank=True, null=True)
callid = models.CharField(max_length=80)
caller_id_number = models.CharField(max_length=80)
caller_id_name = models.CharField(max_length=80)
destination_number = models.CharField(max_length=80)
dialcode = models.CharField(max_length=10, blank=True)
state = models.CharField(max_length=5, blank=True)
channel = models.CharField(max_length=80, blank=True)
starting_date = models.DateTimeField()
duration = models.IntegerField()
billsec = models.IntegerField()
progresssec = models.IntegerField(blank=True, null=True)
answersec = models.IntegerFie
|
ld(blank=True, null=True)
waitsec = models.IntegerField(blank=True, null=True)
hangup_cause_id = models.IntegerField(blank=True, null=True)
hangup_cause = models.CharField(max_length=80, blank=True)
direction = models.IntegerField(blank=
|
True, null=True)
country_code = models.CharField(max_length=3, blank=True)
accountcode = models.CharField(max_length=40, blank=True)
buy_rate = models.DecimalField(max_digits=10, decimal_places=5, blank=True, null=True)
buy_cost = models.DecimalField(max_digits=12, decimal_places=5, blank=True, null=True)
sell_rate = models.DecimalField(max_digits=10, decimal_places=5, blank=True, null=True)
sell_cost = models.DecimalField(max_digits=12, decimal_places=5, blank=True, null=True)
imported = models.BooleanField()
# Postgresql >= 9.4 Json field
extradata = json_field.JSONField(blank=True)
def __unicode__(self):
return '[%s] %s - dur:%d' % (self.id, self.destination_number, self.duration)
class Meta:
# Remove `managed = False` lines if you wish to allow Django to create, modify,
# and delete the table
managed = False
verbose_name = "CDR Import"
verbose_name_plural = "CDRs Import"
db_table = 'cdr_import'
# Manually selecting a database for a QuerySet
# CDRImport.objects.using('import_cdr').all()
|
j831/zulip
|
zerver/tests/test_bots.py
|
Python
|
apache-2.0
| 34,708
| 0.001441
|
from __future__ import absolute_import
from __future__ import print_function
import filecmp
import os
import ujson
from django.core import mail
from django.http import HttpResponse
from django.test import override_settings
from mock import patch
from typing import Any, Dict, List
from zerver.lib.actions import do_change_stream_invite_only
from zerver.models import get_realm, get_stream, \
Realm, Stream, UserProfile, get_user
from zerver.lib.test_classes import ZulipTestCase, UploadSerializeMixin
from zerver.lib.test_helpers import (
avatar_disk_path, get_test_image_file, tornado_redirected_to_list,
)
class BotTest(ZulipTestCase, UploadSerializeMixin):
def assert_num_bots_equal(self, count):
# type: (int) -> None
result = self.client_get("/json/bots")
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertEqual(count, len(json['bots']))
def create_bot(self, **extras):
# type: (**Any) -> Dict[str, Any]
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
bot_info.update(extras)
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
return ujson.loads(result.content)
def test_bot_domain(self):
# type: () -> None
self.login(self.example_email('hamlet'))
self.create_bot()
self.assertTrue(UserProfile.objects.filter(email='hambot-bot@zulip.testserver').exists())
# The other cases are hard to test directly, since we don't allow creating bots from
# the wrong subdomain, and because 'testserver.example.com' is not a valid domain for the bot's email.
# So we just test the Raelm.get_bot_domain function.
realm = get_realm('zulip')
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
self.assertEqual(realm.get_bot_domain(), 'zulip.testserver')
Realm.objects.exclude(string_id='zulip').update(deactivated=True)
self.assertEqual(realm.get_bot_domain(), 'testserver')
def deactivate_bot(self):
# type: () -> None
result = self.client_delete("/json/bots/hambot-bot@zulip.testserver")
self.assert_json_success(result)
def test_add_bot_with_bad_username(self):
# type: () -> None
self.login(self.example_email('hamlet'))
self.assert_num_bots_equal(0)
bot_info = dict(
full_name='My bot name',
short_name='@',
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'Bad name or username')
self.assert_num_bots_equal(0)
def test_add_bot_with_no_name(self):
# type: () -> None
self.login(self.example_email('hamlet'))
self.assert_num_bots_equal(0)
bot_info = dict(
full_name='a',
short_name='bot',
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'Name too short!')
self.assert_num_bots_equal(0)
def test_add_bot(self):
# type: () -> None
self.login(self.example_email('hamlet'))
self.assert_num_bots_equal(0)
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.create_bot()
self.assert_num_bots_equal(1)
email = 'hambot-bot@zulip.testserver'
realm = get_realm('zulip')
bot = get_user(email, real
|
m)
event = [e for e in events if e['event']['type'] == 'realm_bot'][0]
self.assertEqual(
dict(
type='realm_bot',
op='add',
bot=dict(email='hambot-bot@zulip.testserver',
user_id=bot.id,
full_name='The Bot of Haml
|
et',
is_active=True,
api_key=result['api_key'],
avatar_url=result['avatar_url'],
default_sending_stream=None,
default_events_register_stream=None,
default_all_public_streams=False,
owner=self.example_email('hamlet'))
),
event['event']
)
users_result = self.client_get('/json/users')
members = ujson.loads(users_result.content)['members']
bots = [m for m in members if m['email'] == 'hambot-bot@zulip.testserver']
self.assertEqual(len(bots), 1)
bot = bots[0]
self.assertEqual(bot['bot_owner'], self.example_email('hamlet'))
self.assertEqual(bot['user_id'], get_user(email, realm).id)
def test_add_bot_with_username_in_use(self):
# type: () -> None
self.login(self.example_email('hamlet'))
self.assert_num_bots_equal(0)
result = self.create_bot()
self.assert_num_bots_equal(1)
bot_info = dict(
full_name='Duplicate',
short_name='hambot',
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'Username already in use')
def test_add_bot_with_user_avatar(self):
# type: () -> None
email = 'hambot-bot@zulip.testserver'
realm = get_realm('zulip')
self.login(self.example_email('hamlet'))
self.assert_num_bots_equal(0)
with get_test_image_file('img.png') as fp:
self.create_bot(file=fp)
profile = get_user(email, realm)
# Make sure that avatar image that we've uploaded is same with avatar image in the server
self.assertTrue(filecmp.cmp(fp.name,
os.path.splitext(avatar_disk_path(profile))[0] +
".original"))
self.assert_num_bots_equal(1)
self.assertEqual(profile.avatar_source, UserProfile.AVATAR_FROM_USER)
self.assertTrue(os.path.exists(avatar_disk_path(profile)))
def test_add_bot_with_too_many_files(self):
# type: () -> None
self.login(self.example_email('hamlet'))
self.assert_num_bots_equal(0)
with get_test_image_file('img.png') as fp1, \
get_test_image_file('img.gif') as fp2:
bot_info = dict(
full_name='whatever',
short_name='whatever',
file1=fp1,
file2=fp2,
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'You may only upload one file at a time')
self.assert_num_bots_equal(0)
def test_add_bot_with_default_sending_stream(self):
# type: () -> None
email = 'hambot-bot@zulip.testserver'
realm = get_realm('zulip')
self.login(self.example_email('hamlet'))
self.assert_num_bots_equal(0)
result = self.create_bot(default_sending_stream='Denmark')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_sending_stream'], 'Denmark')
profile = get_user(email, realm)
self.assertEqual(profile.default_sending_stream.name, 'Denmark')
def test_add_bot_with_default_sending_stream_not_subscribed(self):
# type: () -> None
email = 'hambot-bot@zulip.testserver'
realm = get_realm('zulip')
self.login(self.example_email('hamlet'))
self.assert_num_bots_equal(0)
result = self.create_bot(default_sending_stream='Rome')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_sending_stream'], 'Rome')
profile = get_user(email, realm)
self.assertEqual(profile.default_sending_stream.name, 'Rome')
def test_bot_add_subscription(self):
# type: () -> None
"""
Calling POST /json/users/me/subscriptions should successfully add
streams, and a stream to the
list of subscriptions and confirm the right number of events
are generated.
When 'principals' has a bot, no notification message event or invitation email
is sent when add_subscriptions_backend is called in the
|
ilique/webpushkin
|
pushkin/migrations/0025_auto_20160616_1637.py
|
Python
|
mit
| 495
| 0.00202
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-16 16:37
from __future_
|
_ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pushkin', '0024_authparam_secret'),
]
operations = [
migrations.AlterField(
|
model_name='command',
name='arguments',
field=models.ManyToManyField(blank=True, null=True, to='pushkin.CommandArgument'),
),
]
|
Micronaet/micronaet-migration
|
script6_7/migrate.py
|
Python
|
agpl-3.0
| 52,936
| 0.026269
|
#!/usr/bin/python
# coding=utf-8
###############################################################################
#
# Micronaet S.r.l., Migration script for PostgreSQL
# Copyright (C) 2002-2013 Micronaet SRL (<http://www.micronaet.it>).
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import sys
import os
import ConfigParser
from openerp.openerp import server
from openerp.objects import table
import pdb
# -----------------------------------------------------------------------------
# Set up parameters (for connection to Open ERP Database)
# -----------------------------------------------------------------------------
config = ConfigParser.ConfigParser()
config_file = os.path.expanduser(
os.path.join("~", "etl", "minerals", "openerp.6.cfg"))
config.read([config_file])
host6 = config.get('dbaccess', 'server')
dbname6 = config.get('dbaccess', 'dbname')
orm_user6 = config.get('dbaccess', 'user')
orm_pwd6 = config.get('dbaccess', 'pwd')
orm_port6 = config.get('dbaccess', 'port')
pg_user6 = config.get('postgres', 'user')
pg_pwd6 = config.get('postgres', 'pwd')
pg_port6 = config.get('postgres', 'port')
verbose6 = config.get('import_mode', 'verbose')
config_file = os.path.expanduser(
os.path.join("~", "etl", "minerals", "openerp.7.cfg"))
config.read([config_file])
host7 = config.get('dbaccess', 'server')
dbname7 = config.get('dbaccess', 'dbname')
orm_user7 = config.get('dbaccess', 'user')
orm_pwd7 = config.get('dbaccess', 'pwd')
orm_port7 = config.get('dbaccess', 'port')
pg_user7 = config.get('postgres', 'user')
pg_pwd7 = config.get('postgres', 'pwd')
pg_port7 = config.get('postgres', 'port')
verbose7 = config.get('import_mode', 'verbose')
# Tables v. 60:
o6 = server(
dbname = dbname6, host = host6, verbose = verbose6,
orm_user = orm_user6, orm_password = orm_pwd6, orm_port = orm_port6,
pg_user = pg_user6, pg_password = pg_pwd6, pg_port = pg_port6)
# Tables v. 70:
o7 = server(
dbname = dbname7, host = host7, verbose = verbose7,
orm_user = orm_user7, orm_password = orm_pwd7, orm_port = orm_port7,
pg_user = pg_user7, pg_password = pg_pwd7, pg_port = pg_port7)
# Database dictionary for convert elements:
berkeley_tables = {}
# -----------------------------------------------------------------------------
# Migration tables
# -----------------------------------------------------------------------------
print "*" * 50, "\n", " " * 12, "Start Migration:\n", "*" * 50
# *************************** Usefull objects *********************************
# Object that are in linked in almost all other object
# Usually manually linked or only_create elements
# *****************************************************************************
# Operation: manually mapping
# Particularity: 1st static association
res_company = table(
name = 'res.company',
key = 'name',
o6 = o6,
o7 = o7,
mapping_databases = berkeley_tables,
static = {
1: 1, # Default
},
)
#res_company.migrate()
# Operation: manually mapping
res_users = table(
name = 'res.users',
key = 'name',
o6 = o6,
o7 = o7,
mapping_databases = berkeley_tables,
static = {
1: 1, # admin
3: 6, # ravelli
4: 9, # armando
5: 10, # vittoriana
6: 8, # alberto
},
)
#res_users.migrate()
# Operation: manually mapping
account_analytic_journal = table(
name = 'account.analytic.journal',
key = 'name',
o6 = o6,
o7 = o7,
mapping_databases = berkeley_tables,
static = {
1: 1, # default
},
)
#account_analytic_journal.migrate()
# Operation: manually mapping
#
# |==> account.account.template >> on property_account_expense_categ type many2one
# |==> account.account.template >> on property_account_expense type many2one
# |==> account.account.template >> on property_account_receivable type many2one
# |==> account.account.template >> on property_account_payable type many2one
# |==> account.account.template >> on property_reserve_and_surplus_account type many2one
# |==> account.tax.code.template >> on tax_code_root_id type many2one
# |==> account.account.template >> on property_account_income_categ type many2one
# |==> account.account.template >> on property_account_income type many2one
# |==> account.account.template >> on bank_account_view_id type many2one
# |==> account.account.template >> on account_root_id type many2one
#
# | > (account.tax.template >> on tax_template_ids type one2many)
account_chart_template = table(
name = 'account.chart.template',
key = 'name',
o6 = o6,
o7 = o7,
mapping_databases = berkeley_tables,
static = {
1: 2, # Italian Chart
},
)
#account_chart_template.migrate()
# Operation: mapping ID
#
# |==> stock.location >> on lot_output_id type many2one
# |==> stock.location >> on lot_stock_id type many2one
# |==> res.partner.address >> on partner_address_id type many2one
# |==> res.company >> on company_id type many2one
# |==> stock.location >> on lot_input_id type many2one
stock_warehouse = table(
name = 'stock.warehouse',
key = 'name',
o6 = o6,
o7 = o7,
mapping_databases = berkeley_tables,
static = {
1: 1, # Company
},
)
#stock_warehouse.migrate()
# Operation: manually mapping
account_fiscal_position = table(
name = 'account.fiscal.position',
key = 'name',
o6 = o6,
o7 = o7,
mapping_databases = berkeley_tables,
static = {
1: 1, # Italian
2: 2, # Extra CEE
3: 3, # Intra CEE
},
)
#account_fiscal_position.migrate()
# Operation: manually mapping
# |==> account.chart.template >> on chart_template_id type many2one
#
# | > (account.fiscal.position.tax.template >> on tax_ids type one2many)
# | > (account.fiscal.position.account.template >> on account_ids type one2many)
account_fiscal_position_template = table(
name = 'account.fiscal.position.template',
key = 'name',
o6 = o6,
o7 = o7,
mapping_databases = berkeley_tables,
static = {
1: 3, # Italian
2: 4, # Extra CEE
3: 5, # Intra CEE
},
)
#account_fiscal_position_template.migrate()
# Operation: association ID (simple obj)
# Particularity: manual mapping for no key
#
account_account_type = table(
name = 'account.account.type',
key = 'code', # not true!! (see asset)
o6 = o6,
o7 = o7,
mapping_databases = berkeley_tables,
static = {
1: 2, # reveivable
2: 3, # payable
3: 1, # view
4: 8, # income
5: 9, # expense
|
6: 14, # tax
7: 5, # cash
8: 6, # asset
9: 4, # b
|
ank
10: 15, # equity
},
)
#account_account_type.migrate()
# Operation: manually mapping
#
# | > (account.payment.term.line >> on line_ids type one2many)
account_payment_term = table(
name = 'account.payment.term',
key = 'name',
o6 = o6,
o7 = o7,
mapping_databases = berkeley_tables,
static = {
1: 3, # Default
},
)
#account_payment_term.migrate()
# Operation: manually mapping
# |==> res.currency
|
LukeBaal/PublicProjects
|
python tree diagrams/bin_tree.py
|
Python
|
mit
| 3,330
| 0.009309
|
class Leaf():
def __init__(self, screen, xy_pos=[0, 0], value="", radius=15, color=[0, 0, 0]):
self.screen = screen
self.color = color
self.value = value
self.radius = radius
self.width = 1
self.
|
x = coord[0]
self.y = coord[1]
self.top_y = self.y - self.radius
self.bot_y = self.y + self.radius
self.left = None
self.right = None
# ----------------------SET METHODS-------------------------
def set_color(self, color):
self.color = color
def set_left(self, left):
self.left = left
def set_right(self, right):
self.right = right
|
# ----------------------GET METHODS--------------------
def get_value(self):
return self.value
def get_left(self):
return self.left
def get_right(self):
return self.right
def get_pos(self):
return self.x, self.y
def get_x(self):
return self.x
def get_top_y(self):
return self.top_y
class Tree():
root = None
active = None
leaf_list = []
# Font info
FONT = pyg.font.SysFont('Calibri', 22, True, False)
def text(self, txt="", color=[0, 0, 0]):
return self.FONT.render(txt, True, color)
def __init__(self):
pass
def is_empty(self):
return self.root is None
def set_active(self, leaf):
self.active = leaf
def get_active(self):
return self.active
def get_leaf_list(self):
return self.leaf_list
def add(self, xy=[0, 0], value=""):
# If key from numpad, ignore square brackets
if value[0] == "[":
value = value[1].upper()
else:
value = value[0].upper()
# Initialize new Leaf object and append it to leaf_list
leaf = Leaf(xy, value)
self.leaf_list.append(leaf)
# Compare x coords to determine if new leaf is
# the left or right child of the active leaf
if self.active.get_x() >= leaf.get_x() and self.active.get_left() is None:
self.active.set_left(leaf)
elif self.active.get_right() is None:
self.active.set_right(leaf)
# Print all leaves
def draw(self):
for leaf in self.leaf_list:
pyg.draw.circle(leaf.screen, leaf.color, [leaf.x, leaf.y], leaf.radius, leaf.width)
leaf.screen.blit(leaf.text(leaf.value, leaf.color), [leaf.x-5, leaf.y-7])
if leaf.left is not None:
pyg.draw.line(leaf.screen, BLACK, [leaf.left.get_x(), leaf.left.get_top_y()], [leaf.x, leaf.bot_y()], 1)
if self.right is not None:
pyg.draw.line(leaf.screen, BLACK, [leaf.right.get_x(), leaf.right.get_top_y()], [leaf.x, leaf.bot_y()], 1)
def to_string(self, cursor=self.top, out=""):
if not self.is_empty()
if cursor.get_left() is not None:
out += "("
self.to_string(cursor.get_left(), out)
out += cursor.get_value()
if cursor.get_right() is not None:
self.to_string(cursor.get_right(), out)
out += ")"
return out
|
vakila/de-stress
|
testproject/testapp/migrations/0001_initial.py
|
Python
|
mit
| 1,334
| 0.002249
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Exercise',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, help_text='', verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=20, help_text='')),
('text', models.CharField(max_length=200, help_text='')),
('date_created', models.DateTimeField(help_text='')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Lesson',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, help_text='', verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=20, help_text='')),
('description', models.TextField(help_text='')),
('date_created', models.DateTimeField(help_text='')),
('
|
exercises', models.ManyToManyField(help_text='', to='testapp.Exercise')),
],
options={
},
bases=(models.Model,),
|
),
]
|
miku/siskin
|
siskin/test_conversions.py
|
Python
|
gpl-3.0
| 11,642
| 0.001117
|
import json
import pymarc
from siskin.conversions import (de_listify, imslp_xml_to_marc, osf_to_intermediate)
def test_imslp_xml_to_marc():
example = """<?xml version="1.0"?>
<document docID="imslpvalsskramstadhans">
<localClass localClassName="col">imslp</localClass>
<localClass localClassName="vifa">vifamusik</localClass>
<identifier identifierEncodingSchema="originalID">valsskramstadhans</identifier>
<creator>
<mainForm>Skramstad, Hans</mainForm>
</creator>
<title>Vals for pianoforte</title>
<subject>
<mainForm>Romantic</mainForm>
</subject>
<music_arrangement_of>Piano</music_arrangement_of>
<url urlEncodingSchema="originalDetailView">http://imslp.org/wiki/Vals_(Skramstad,_Hans)</url>
<vifatype>Internetressource</vifatype>
<fetchDate>2018-04-25T00:00:00.01Z</fetchDate>
<vifaxml><![CDATA[<document docID="imslpvalsskramstadhans"><localClass
localClassName="col">imslp</localClass><localClass
localClassName="vifa">vifamusik</localClass><identifier
identifierEncodingSchema="originalID">valsskramstadhans</identifier><creator><mainForm>Skramstad,
Hans</mainForm></creator><title>Vals for
pianoforte</title><subject><mainForm>Romantic</mainForm></subject><music_arrangement_of>Piano</music_arrangement_of><url
urlEncodingSchema="originalDetailView">http://imslp.org/wiki/Vals_(Skramstad,_Hans)</url><vifatype>Internetressource</vifatype></document>]]></vifaxml>
</document>
"""
result = imslp_xml_to_marc(example)
assert result is not None
assert isinstance(result, pymarc.Record)
assert result["001"].value() == "finc-15-dmFsc3NrcmFtc3RhZGhhbnM"
assert result["100"]["a"] == "Skramstad, Hans"
assert result["245"]["a"] == "Vals for pianoforte"
assert result["856"]["u"] == "http://imslp.org/wiki/Vals_(Skramstad,_Hans)"
def test_de_listify():
cases = (
(None, None),
("", ""),
([], None),
({1, 2, 3}, 1),
([1, 2, 3], 1),
)
for v, expected in cases:
assert de_listify(v) == expected
def test_osf_to_intermediate():
cases = (
(None, None),
({}, None),
(json.loads("""
{
"id": "egcsk",
"type": "preprints",
"attributes": {
"date_created": "2021-07-19T07:32:33.252615",
"date_modified": "2021-07-19T07:42:12.725457",
"date_published": "2021-07-19T07:41:43.501204",
"original_publication_date": "2021-02-28T17:00:00",
"doi": null,
"title": "Konsep Allah Dalam Teologi Proses",
"description": "Penulisan karya ilmiah ini dikhususkan untuk membahas mengenai Allah yang dirumuskan dalam teologi proses, yang dicetuskan oleh Alfred Whitehead. Dalam bagian bagian ini penulis menyajikan konsep Allah dalam teologi proses dan bagaimana tanggapan terhadap konsep tersebut secara Alkitabiah Metode penelitian, penulis menggunakan pendekatan metode penelitian kualitatif analisis deskriptif, dengan pendekatan literatur dan tergolong dalam penelitian perpustakaan. Konsep Allah menurut teologi proses adalah Allah yang berproses, tidak berpribadi dan tidak memiliki kedaulatan absolut. Namun pandangan tentang Allah dalam teologi proses adalah suatu kumpulan pengalaman pribadi dan dijadikan sebagai suatu konsep dalam pemikiran manusia. Tanggapan tersebut menunjukan perbandingan dari pola pikir teologi proses mengenai Allah y
|
ang menyimpang dan mengarahkan seseorang dalam memahami konsep Allah yang benar sesuai dengan pernyataan Allah m",
"is_published": true,
"is_preprint_orphan": false,
"license_record": {
"copyright_holders": [
""
],
"year":
|
"2021"
},
"tags": [
"Gambar",
"Respon",
"Teologi Proses",
"Tuhan"
],
"preprint_doi_created": "2021-07-19T07:42:12.695116",
"date_withdrawn": null,
"current_user_permissions": [],
"public": true,
"reviews_state": "accepted",
"date_last_transitioned": "2021-07-19T07:41:43.501204",
"has_coi": false,
"conflict_of_interest_statement": null,
"has_data_links": "no",
"why_no_data": null,
"data_links": [],
"has_prereg_links": "no",
"why_no_prereg": null,
"prereg_links": [],
"prereg_link_info": "",
"subjects": [
[
{
"id": "584240da54be81056cecaab4",
"text": "Arts and Humanities"
},
{
"id": "584240da54be81056cecaa9c",
"text": "Religion"
},
{
"id": "584240da54be81056cecaaf5",
"text": "Christianity"
}
]
]
},
"relationships": {
"contributors": {
"links": {
"related": {
"href": "https://api.osf.io/v2/preprints/egcsk/contributors/",
"meta": {}
}
}
},
"bibliographic_contributors": {
"links": {
"related": {
"href": "https://api.osf.io/v2/preprints/egcsk/bibliographic_contributors/",
"meta": {}
}
}
},
"citation": {
"links": {
"related": {
"href": "https://api.osf.io/v2/preprints/egcsk/citation/",
"meta": {}
}
},
"data": {
"id": "egcsk",
"type": "preprints"
}
},
"identifiers": {
"links": {
"related": {
"href": "https://api.osf.io/v2/preprints/egcsk/identifiers/",
"meta": {}
}
}
},
"node": {
"links": {
"related": {
"href": "https://api.osf.io/v2/nodes/uka4p/",
"meta": {}
},
"self": {
"href": "https://api.osf.io/v2/preprints/egcsk/relationships/node/",
"meta": {}
}
},
"data": {
"id": "uka4p",
"type": "nodes"
}
},
"license": {
"links": {
"related": {
"href": "https://api.osf.io/v2/licenses/563c1cf88c5e4a3877f9e96a/",
"meta": {}
}
},
"data": {
"id": "563c1cf88c5e4a3877f9e96a",
"type": "licenses"
}
},
"provider": {
"links": {
"related": {
"href": "https://api.osf.io/v2/providers/preprints/osf/",
"meta": {}
}
},
"data": {
"id": "osf",
"type": "preprint-providers"
}
},
"files": {
"links": {
"related": {
"href": "https://api.osf.io/v2/preprints/egcsk/files/",
"meta": {}
|
carvalhomb/tsmells
|
lib/Cheetah/src/Parser.py
|
Python
|
gpl-2.0
| 101,244
| 0.006568
|
#!/usr/bin/env python
# $Id: Parser.py,v 1.135 2007/11/16 18:26:01 tavis_rudd Exp $
"""Parser classes for Cheetah's Compiler
Classes:
ParseError( Exception )
_LowLevelParser( Cheetah.SourceReader.SourceReader ), basically a lexer
_HighLevelParser( _LowLevelParser )
Parser === _HighLevelParser (an alias)
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com>
Version: $Revision: 1.135 $
Start Date: 2001/08/01
Last Revision Date: $Date: 2007/11/16 18:26:01 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>"
__revision__ = "$Revision: 1.135 $"[11:-2]
import os
import sys
import re
from re import DOTALL, MULTILINE
from types import StringType, ListType, TupleType, ClassType, TypeType
import time
from tokenize import pseudoprog
import inspect
import new
import traceback
from Cheetah.SourceReader import SourceReader
from Cheetah import Filters
from Cheetah import ErrorCatchers
from Cheetah.Unspecified import Unspecified
from Cheetah.Macros.I18n import I18n
# re tools
_regexCache = {}
def cachedRegex(pattern):
if pattern not in _regexCache:
_regexCache[pattern] = re.compile(pattern)
return _regexCache[pattern]
def escapeRegexChars(txt,
escapeRE=re.compile(r'([\$\^\*\+\.\?\{\}\[\]\(\)\|\\])')):
"""Return a txt with all special regular expressions chars escaped."""
return escapeRE.sub(r'\\\1' , txt)
def group(*choices): return '(' + '|'.join(choices) + ')'
def nongroup(*choices): return '(?:' + '|'.join(choices) + ')'
def namedGroup(name, *choices): return '(P:<' + name +'>' + '|'.join(choices) + ')'
def any(*choices): return apply(group, choices) + '*'
def maybe(*choices): return apply(group, choices) + '?'
##################################################
## CONSTANTS & GLOBALS ##
NO_CACHE = 0
STATIC_CACHE = 1
REFRESH_CACHE = 2
SET_LOCAL = 0
SET_GLOBAL = 1
SET_MODULE = 2
##################################################
## Tokens for the parser ##
#generic
identchars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ_"
namechars = identchars + "0123456789"
#operators
powerOp = '**'
unaryArithOps = ('+', '-', '~')
binaryArithOps = ('+', '-', '/', '//','%')
shiftOps = ('>>','<<')
bitwiseOps = ('&','|','^')
assignOp = '='
augAssignOps = ('+=','-=','/=','*=', '**=','^=','%=',
'>>=','<<=','&=','|=', )
assignmentOps = (assignOp,) + augAssignOps
compOps = ('<','>','==','!=','<=','>=', '<>', 'is', 'in',)
booleanOps = ('and','or','not')
operators = (powerOp,) + unaryArithOps + binaryArithOps \
+ shiftOps + bitwiseOps + assignmentOps \
+ compOps + booleanOps
delimeters = ('(',')','{','}','[',']',
',','.',':',';','=','`') + augAssignOps
keywords = ('and', 'del', 'for', 'is', 'raise',
'assert', 'elif', 'from', 'lambda', 'return',
'break', 'else', 'global', 'not', 'try',
'class', 'except', 'if', 'or', 'while',
'continue', 'exec', 'import', 'pass',
'def', 'finally', 'in', 'print',
)
single3 = "'''"
double3 = '"""'
tripleQuotedStringStarts = ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""')
tripleQuotedStringPairs = {"'''": single3, '"""': double3,
"r'''": single3, 'r"""': double3,
"u'''": single3, 'u"""': double3,
"ur'''": single3, 'ur"""': double3,
"R'''": single3, 'R"""': double3,
"U'''": single3, 'U"""': double3,
"uR'''": single3, 'uR"""': double3,
"Ur'''": single3, 'Ur"""': double3,
"UR'''": single3, 'UR"""': double3,
}
closurePairs= {')':'(',']':'[','}':'{'}
closurePairsRev= {'(':')','[':']','{':'}'}
##################################################
## Regex chunks for the parser ##
tripleQuotedStringREs = {}
def makeTripleQuoteRe(start, end):
start = escapeRegexChars(start)
end = escapeRegexChars(end)
return re.compile(r'(?:' +
|
start + r').*?' + r'(?:' + end + r')', re.DOTALL)
for start, end in tripleQuotedStringPairs.items():
tripleQuotedStringREs[start] = makeTripleQuoteRe(start, end)
WS
|
= r'[ \f\t]*'
EOL = r'\r\n|\n|\r'
EOLZ = EOL + r'|\Z'
escCharLookBehind = nongroup(r'(?<=\A)',r'(?<!\\)')
nameCharLookAhead = r'(?=[A-Za-z_])'
identRE=re.compile(r'[a-zA-Z_][a-zA-Z_0-9]*')
EOLre=re.compile(r'(?:\r\n|\r|\n)')
specialVarRE=re.compile(r'([a-zA-z_]+)@') # for matching specialVar comments
# e.g. ##author@ Tavis Rudd
unicodeDirectiveRE = re.compile(
r'(?:^|\r\n|\r|\n)\s*#\s{0,5}unicode[:\s]*([-\w.]*)\s*(?:\r\n|\r|\n)', re.MULTILINE)
encodingDirectiveRE = re.compile(
r'(?:^|\r\n|\r|\n)\s*#\s{0,5}encoding[:\s]*([-\w.]*)\s*(?:\r\n|\r|\n)', re.MULTILINE)
escapedNewlineRE = re.compile(r'(?<!\\)\\n')
directiveNamesAndParsers = {
# importing and inheritance
'import':None,
'from':None,
'extends': 'eatExtends',
'implements': 'eatImplements',
'super': 'eatSuper',
# output, filtering, and caching
'slurp': 'eatSlurp',
'raw': 'eatRaw',
'include': 'eatInclude',
'cache': 'eatCache',
'filter': 'eatFilter',
'echo': None,
'silent': None,
'call': 'eatCall',
'arg': 'eatCallArg',
'capture': 'eatCapture',
# declaration, assignment, and deletion
'attr': 'eatAttr',
'def': 'eatDef',
'block': 'eatBlock',
'@': 'eatDecorator',
'defmacro': 'eatDefMacro',
'closure': 'eatClosure',
'set': 'eatSet',
'del': None,
# flow control
'if': 'eatIf',
'while': None,
'for': None,
'else': None,
'elif': None,
'pass': None,
'break': None,
'continue': None,
'stop': None,
'return': None,
'yield': None,
# little wrappers
'repeat': None,
'unless': None,
# error handling
'assert': None,
'raise': None,
'try': None,
'except': None,
'finally': None,
'errorCatcher': 'eatErrorCatcher',
# intructions to the parser and compiler
'breakpoint': 'eatBreakPoint',
'compiler': 'eatCompiler',
'compiler-settings': 'eatCompilerSettings',
# misc
'shBang': 'eatShbang',
'encoding': 'eatEncoding',
'end': 'eatEndDirective',
}
endDirectiveNamesAndHandlers = {
'def': 'handleEndDef', # has short-form
'block': None, # has short-form
'closure': None, # has short-form
'cache': None, # has short-form
'call': None, # has short-form
'capture': None, # has short-form
'filter': None,
'errorCatcher':None,
'while': None, # has short-form
'for': None, # has short-form
'if': None, # has short-form
'try': None, # has short-form
'repeat': None, # has short-form
'unless': None, # has short-form
}
##################################################
## CLASSES ##
# @@TR: SyntaxError doesn't call exception.__str__ for some reason!
#class ParseError(SyntaxError):
class ParseError(ValueError):
def __init__(self, stream, msg='Invalid Syntax', extMsg='', lineno=None, col=None):
self.stream = stream
if stream.pos() >= len(stream):
stream.setPos(len(stream) -1)
self.msg = msg
self.extMsg = extMsg
self.lineno = lineno
self.col = col
def __str__(self):
return self.report()
def report(self):
stream = self.stream
if stream.filename():
f = " in file %s" % stream.filename()
else:
f = ''
report = ''
if sel
|
Diaoul/subliminal
|
subliminal/__init__.py
|
Python
|
mit
| 818
| 0.002445
|
# -*- coding: utf-8 -*-
__title__ = 'subliminal'
__version__ = '2.1.0'
__short_version__ = '.'.join(__version__.split('.')[:2])
__author__ = 'Antoine Bertin'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016, Antoine Bertin'
import logging
from .core import (AsyncProviderPool, ProviderPool, check_video, download_best_subtitles, download_subtitles,
list_subtitles, refine, save_subtitles, scan_video, scan_videos)
from .cache import region
from .exceptions
|
import Error, ProviderError
from .extensions import provider_manager, refiner_manager
from .providers import Provider
from .score import compute_score, get_scores
from .subtitle import SUBTITLE_EXTENSIONS, Subtitle
from .video import VIDEO_EXTENSIONS, Episod
|
e, Movie, Video
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
Sheeo/pygit2
|
setup.py
|
Python
|
gpl-2.0
| 6,812
| 0.000441
|
# -*- coding: utf-8 -*-
# coding: UTF-8
#
# Copyright 2010-2015 The pygit2 contributors
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# In addition to the permissions in the GNU General Public License,
# the authors give you unlimited permission to link the compiled
# version of this file into combinations with other programs,
# and to distribute those combinations without any restriction
# coming from the use of this file. (The General Public License
# restrictions do apply in other respects; for example, they cover
# modification of the file, and distribution when not linked into
# a combined executable.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
"""Setup file for pygit2."""
# Import from the future
from __future__ import print_function
# Import from the Standard Library
import codecs
from distutils.command.build import build
from distutils.command.sdist import sdist
from distutils import log
import os
from os import getenv, listdir, pathsep
from os.path import abspath, isfile
from setuptools import setup, Extension, Command
import shlex
from subprocess import Popen, PIPE
import sys
import unittest
# Get cffi major version
try:
import cffi
except ImportError:
cffi_major_version = None
else:
cffi_major_version = cffi.__version_info__[0]
# Import stuff from pygit2/_utils.py without loading the whole pygit2 package
sys.path.insert(0, 'pygit2')
from _build import __version__, get_libgit2_paths
if cffi_major_version == 0:
from _run import ffi, preamble, C_KEYWORDS
ffi.verify(preamble, **C_KEYWORDS)
del sys.path[0]
# Python 2 support
# See https://github.com/libgit2/pygit2/pull/180 for a discussion about this.
# Using six isn't an option here yet, we don't necessarily have six installed
if sys.version_info[0] == 2:
u = lambda s: unicode(s, 'utf-8')
else:
u = str
libgit2_bin, libgit2_include, libgit2_lib = get_libgit2_paths()
pygit2_exts = [os.path.join('src', name) for name in listdir('src')
if name.endswith('.c')]
class TestCommand(Command):
"""Command for running unittests without install."""
user_options = [("args=", None, '''The command args string passed to
unittest framework, such as
--args="-v -f"''')]
def initialize_options(self):
self.args = ''
def finalize_options(self):
pass
def run(self):
self.run_command('build')
bld = self.distribution.get_command_obj('build')
# Add build_lib in to sys.path so that unittest can found DLLs and libs
sys.path = [abspath(bld.build_lib)] + sys.path
test_argv0 = [sys.argv[0] + ' test --args=']
# For transfering args to unittest, we have to split args by ourself,
# so that command like:
#
# python setup.py test --args="-v -f"
#
# can be executed, and the parameter '-v -f' can be transfering to
# unittest properly.
test_argv = test_argv0 + shlex.split(self.args)
unittest.main(None, defaultTest='test.test_suite', argv=test_argv)
class sdist_files_from_git(sdist):
def get_file_list(self):
popen = Popen(['git', 'ls-files'], stdout=PIPE, stderr=PIPE,
universal_newlines=True)
stdoutdata, stderrdata = popen.communicate()
if popen.returncode != 0:
print(stderrdata)
sys.exit()
for line in stdoutdata.splitlines():
# Skip hidden files at the root
if line[0] == '.':
continue
self.filelist.append(line)
# Ok
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Version Control"]
with codecs.open('README.rst', 'r', 'utf-8') as readme:
long_description = readme.read()
cmdclass = {
'test': TestCommand,
'sdist': sdist_files_from_git,
}
# On Windows, we install the git2.dll too.
class BuildWithDLLs(build):
def _get_dlls(self):
# return a list of (FQ-in-name, relative-out-name) tuples.
ret = []
bld_ext = self.distribution.get_command_obj('build_ext')
compiler_type = bld_ext.compiler.compiler_type
libgit2_dlls = []
if compiler_type == 'msvc':
libgit2_dlls.append('git2.dll')
elif compiler_type == 'mingw32':
libgit2_dlls.append('libgit2.dll')
look_dirs = [libgit2_bin] + getenv("PATH", "").split(pathsep)
target = abspath(self.build_lib)
for bin in libgit2_dlls:
for look in look_dirs:
f = os.path.join(look, bin)
if isfile(f):
ret.append((f, target))
break
else:
log.warn("Could not find required DLL %r to include", bin)
log.debug("(looked in %s)", look_dirs)
return ret
def run(self):
build.run(self)
for s, d in self._get_dlls():
self.copy_file(s, d)
# On Windows we package up the dlls with the plugin.
if os.name == 'nt':
cmdclass['build'] = BuildWithDLLs
extra_args = {
'ext_module
|
s': [
Extension('_pygit2', pygit2_exts, libraries=['git2'],
include_dirs=[libgit2_include],
library_dirs=[libgit2_lib]),
# FFI is added in the build step
],
}
if cffi_major_version == 0:
extra_args['ext_modules'].append(ffi.verifier.get_extension())
else:
extra_args['cffi_modules'] = ['pygit2/_run.py:ffi']
setup(
|
name='pygit2',
description='Python bindings for libgit2.',
keywords='git',
version=__version__,
url='http://github.com/libgit2/pygit2',
classifiers=classifiers,
license='GPLv2 with linking exception',
maintainer=u('J. David Ibáñez'),
maintainer_email='jdavid.ibp@gmail.com',
long_description=long_description,
packages=['pygit2'],
package_data={'pygit2': ['decl.h']},
setup_requires=['cffi'],
install_requires=['cffi', 'six'],
zip_safe=False,
cmdclass=cmdclass,
**extra_args)
|
rohitranjan1991/home-assistant
|
tests/components/cpuspeed/test_config_flow.py
|
Python
|
mit
| 2,642
| 0
|
"""Tests for the CPU Speed config flow."""
from unittest.mock import AsyncMock, MagicMock
from homeassistant.components.cpuspeed.const import DOMAIN
from homeassistant.config_entries import SOURCE_USER
fro
|
m homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from tests.common import MockConfigEntry
async def test_full_user_flow(
hass: HomeAssistant,
mock_cpuinfo_config_flow: MagicMock,
mock_setup_entry: AsyncMock,
) -> None:
|
"""Test the full user configuration flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result.get("type") == RESULT_TYPE_FORM
assert result.get("step_id") == SOURCE_USER
assert "flow_id" in result
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={},
)
assert result2.get("type") == RESULT_TYPE_CREATE_ENTRY
assert result2.get("title") == "CPU Speed"
assert result2.get("data") == {}
assert len(mock_setup_entry.mock_calls) == 1
assert len(mock_cpuinfo_config_flow.mock_calls) == 1
async def test_already_configured(
hass: HomeAssistant,
mock_cpuinfo_config_flow: MagicMock,
mock_setup_entry: AsyncMock,
mock_config_entry: MockConfigEntry,
) -> None:
"""Test we abort if already configured."""
mock_config_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result.get("type") == RESULT_TYPE_ABORT
assert result.get("reason") == "already_configured"
assert len(mock_setup_entry.mock_calls) == 0
assert len(mock_cpuinfo_config_flow.mock_calls) == 0
async def test_not_compatible(
hass: HomeAssistant,
mock_cpuinfo_config_flow: MagicMock,
mock_setup_entry: AsyncMock,
) -> None:
"""Test we abort the configuration flow when incompatible."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result.get("type") == RESULT_TYPE_FORM
assert result.get("step_id") == SOURCE_USER
assert "flow_id" in result
mock_cpuinfo_config_flow.return_value = {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={},
)
assert result2.get("type") == RESULT_TYPE_ABORT
assert result2.get("reason") == "not_compatible"
assert len(mock_setup_entry.mock_calls) == 0
assert len(mock_cpuinfo_config_flow.mock_calls) == 1
|
ryandoherty/RaceCapture_App
|
autosportlabs/uix/gauge/bargraphgauge.py
|
Python
|
gpl-3.0
| 1,453
| 0.015141
|
import kivy
kivy.require('1.9.1')
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.stencilview import StencilView
from fieldlabel import FieldLabel
from kivy.properties import NumericProperty, ListProperty
from kivy.app import Builder
from kivy.gra
|
phics import Color, Rectangle
from utils import *
from random import random as r
Builder.load_file('autosportlabs/uix/gauge/bargraphgauge.kv')
class BarGraphGauge(AnchorLayo
|
ut):
minval = NumericProperty(0)
maxval = NumericProperty(100)
value = NumericProperty(0)
color = ListProperty([1, 1, 1, 0.5])
def __init__(self, **kwargs):
super(BarGraphGauge, self).__init__(**kwargs)
def on_minval(self, instance, value):
self._refresh_value()
def on_maxval(self, instance, value):
self._refresh_value()
def on_value(self, instance, value):
self._refresh_value()
def _refresh_value(self):
stencil = self.ids.stencil
value = self.value
minval = self.minval
maxval = self.maxval
channel_range = (maxval - minval)
pct = 0 if channel_range == 0 else ((value - minval) / channel_range)
width = self.width * pct
stencil.width = width
self.ids.value.text = str(value)
|
SoftwearDevelopment/spynl
|
spynl/main/serial/cli.py
|
Python
|
mit
| 1,287
| 0.000777
|
"""Command-line tool to test the (de)serialisation live."""
from sys import stdin, stdout
from argparse import ArgumentParser
from spynl.main.serial import negotiate_content_type, loads, dumps
def main():
"""main function for converting between formats"""
parser = ArgumentParser(description='Convert between data formats.')
parser.add_argument(
'--output',
metavar='FILENAME',
dest='output_filename',
default=None,
help='filename where output is stored, STDOUT by default',
)
|
parser.add_argument(
'--output-type',
metavar='TYPE',
dest='output_type',
default='json',
help='output type e.g. JSON or XML etc.',
)
parser
|
.add_argument(
'--input-type',
metavar='TYPE',
dest='input_type',
default=None,
help='suggested input type. See --output-type',
)
args = parser.parse_args()
request = stdin.read()
request = loads(request, negotiate_content_type(request, args.input_type))
response = dumps(request, negotiate_content_type('', args.output_type))
output = open(args.output_filename, 'w') if args.output_filename else stdout
output.write(response + '\n')
if args.output_filename:
output.close()
|
all-of-us/raw-data-repository
|
rdr_service/lib_fhir/fhirclient_4_0_0/models/medicationstatement.py
|
Python
|
bsd-3-clause
| 8,384
| 0.005725
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/MedicationStatement) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class MedicationStatement(domainresource.DomainResource):
""" Record of medication being taken by a patient.
A record of a medication that is being consumed by a patient. A
MedicationStatement may indicate that the patient may be taking the
medication now or has taken the medication in the past or will be taking
the medication in the future. The source of this information can be the
patient, significant other (such as a family member or spouse), or a
clinician. A common scenario where this information is captured is during
the history taking process during a patient visit or stay. The medication
information may come from sources such as the patient's memory, from a
pre
|
scription bottle, or from a list of medications the patient, clinician
or other par
|
ty maintains.
The primary difference between a medication statement and a medication
administration is that the medication administration has complete
administration information and is based on actual administration
information from the person who administered the medication. A medication
statement is often, if not always, less specific. There is no required
date/time when the medication was administered, in fact we only know that a
source has reported the patient is taking this medication, where details
such as time, quantity, or rate or even medication product may be
incomplete or missing or less precise. As stated earlier, the medication
statement information may come from the patient's memory, from a
prescription bottle or from a list of medications the patient, clinician or
other party maintains. Medication administration is more formal and is not
missing detailed information.
"""
resource_type = "MedicationStatement"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.basedOn = None
""" Fulfils plan, proposal or order.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.category = None
""" Type of medication usage.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.context = None
""" Encounter / Episode associated with MedicationStatement.
Type `FHIRReference` (represented as `dict` in JSON). """
self.dateAsserted = None
""" When the statement was asserted?.
Type `FHIRDate` (represented as `str` in JSON). """
self.derivedFrom = None
""" Additional supporting information.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.dosage = None
""" Details of how medication is/was taken or should be taken.
List of `Dosage` items (represented as `dict` in JSON). """
self.effectiveDateTime = None
""" The date/time or interval when the medication is/was/will be taken.
Type `FHIRDate` (represented as `str` in JSON). """
self.effectivePeriod = None
""" The date/time or interval when the medication is/was/will be taken.
Type `Period` (represented as `dict` in JSON). """
self.identifier = None
""" External identifier.
List of `Identifier` items (represented as `dict` in JSON). """
self.informationSource = None
""" Person or organization that provided the information about the
taking of this medication.
Type `FHIRReference` (represented as `dict` in JSON). """
self.medicationCodeableConcept = None
""" What medication was taken.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.medicationReference = None
""" What medication was taken.
Type `FHIRReference` (represented as `dict` in JSON). """
self.note = None
""" Further information about the statement.
List of `Annotation` items (represented as `dict` in JSON). """
self.partOf = None
""" Part of referenced event.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.reasonCode = None
""" Reason for why the medication is being/was taken.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.reasonReference = None
""" Condition or observation that supports why the medication is
being/was taken.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.status = None
""" active | completed | entered-in-error | intended | stopped | on-
hold | unknown | not-taken.
Type `str`. """
self.statusReason = None
""" Reason for current status.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.subject = None
""" Who is/was taking the medication.
Type `FHIRReference` (represented as `dict` in JSON). """
super(MedicationStatement, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MedicationStatement, self).elementProperties()
js.extend([
("basedOn", "basedOn", fhirreference.FHIRReference, True, None, False),
("category", "category", codeableconcept.CodeableConcept, False, None, False),
("context", "context", fhirreference.FHIRReference, False, None, False),
("dateAsserted", "dateAsserted", fhirdate.FHIRDate, False, None, False),
("derivedFrom", "derivedFrom", fhirreference.FHIRReference, True, None, False),
("dosage", "dosage", dosage.Dosage, True, None, False),
("effectiveDateTime", "effectiveDateTime", fhirdate.FHIRDate, False, "effective", False),
("effectivePeriod", "effectivePeriod", period.Period, False, "effective", False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("informationSource", "informationSource", fhirreference.FHIRReference, False, None, False),
("medicationCodeableConcept", "medicationCodeableConcept", codeableconcept.CodeableConcept, False, "medication", True),
("medicationReference", "medicationReference", fhirreference.FHIRReference, False, "medication", True),
("note", "note", annotation.Annotation, True, None, False),
("partOf", "partOf", fhirreference.FHIRReference, True, None, False),
("reasonCode", "reasonCode", codeableconcept.CodeableConcept, True, None, False),
("reasonReference", "reasonReference", fhirreference.FHIRReference, True, None, False),
("status", "status", str, False, None, True),
("statusReason", "statusReason", codeableconcept.CodeableConcept, True, None, False),
("subject", "subject", fhirreference.FHIRReference, False, None, True),
])
return js
import sys
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import dosage
except ImportError:
dosage = sys.modules[__package__ + '.dosage']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
|
kevintumbo/Checkpoint2_Bucketlist
|
tests/base_test.py
|
Python
|
mit
| 2,589
| 0.011587
|
from bucketlist import create_app, db
from bucketlist.models import User, Bucketlist, Item
import json
import unittest
class BaseTestCase(unittest.TestCase):
def setUp(self):
""" this function creates the base test"""
self.app = create_app(config_name="development")
self.client = self.app.test_client
self.user1 = {"username":"ktumbo",
"email":"ktumbo@gmail.com",
"password":"password"
}
self.bucketlist = {"name":"Work goals",
"description":"Things To achieve at work",
"owner_id": 1
}
self.bucketlists2 = {"name":"Life Goals",
"description":"Things To Achieve in Life",
"owner_id": 1
}
self.bucketlists3 = {"name":"IEBC Goals",
"description":"Things IEBC needs to achieve",
"owner_id": 1
}
self.item1 = {"item_name":"Be A Python and Js Ninja",
"item_description":"Be a pro in flask, Django, Angular, React and vue ",
"owner_id": 1,
"bucketlist_id": 1
}
self.item2 = {"item_name":"Be a rockstar",
"item_description":"Learn how to play slipknot songs proficiently",
"owner_id":1,
"bucketlist_id":1
}
with self.app.app_context():
db.create_all()
# register and log in user
base_response = self.client().post('/api/v1.0/auth/register', data=self.user1)
self.user_login = {
"email": "ktumbo@gmail.com",
"password": "password"
}
base_result = self.client().post('/api/v1.0/auth/login', data=self.user_login)
access_token = json.loads(base_result.d
|
ata.decode())['access_token']
self.my_header = dict(Authorization="Bearer " + access_token)
# create bucketlist
bucket_response = self.client().post('/api/v1.0/bucketlists/',
|
data=self.bucketlists3,
headers=self.my_header)
def tearDown(self):
""" removes resources once tests have run """
with self.app.app_context():
db.session.remove()
db.drop_all()
|
kenorb/BitTorrent
|
twisted/web/test/test_xml.py
|
Python
|
gpl-3.0
| 23,878
| 0.003015
|
# -*- test-case-name: twisted.web.test.test_xml -*-
#
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""Some fairly inadequate testcases for Twisted XML support."""
from __future__ import nested_scopes
from twisted.trial.unittest import TestCase
from twisted.web import sux
from twisted.web import microdom
from twisted.web import domhelpers
class Sux0r(sux.XMLParser):
def __init__(self):
self.tokens = []
def getTagStarts(self):
return [token for token in self.tokens if token[0] == 'start']
def gotTagStart(self, name, attrs):
self.tokens.append(("start", name, attrs))
def gotText(self, text):
self.tokens.append(("text", text))
class SUXTest(TestCase):
def testBork(self):
s = "<bork><bork><bork>"
ms = Sux0r()
|
ms.connectionMade()
ms.dataReceived(s)
self.failUnlessEqual(len(ms.getTagStarts()),3)
class MicroDOMTest(TestCase):
def testCaseSensitiveSoonCloser(self):
s
|
= """
<HTML><BODY>
<P ALIGN="CENTER">
<A HREF="http://www.apache.org/"><IMG SRC="/icons/apache_pb.gif"></A>
</P>
<P>
This is an insane set of text nodes that should NOT be gathered under
the A tag above.
</P>
</BODY></HTML>
"""
d = microdom.parseString(s, beExtremelyLenient=1)
l = domhelpers.findNodesNamed(d.documentElement, 'a')
n = domhelpers.gatherTextNodes(l[0],1).replace(' ',' ')
self.assertEquals(n.find('insane'), -1)
def testUnEntities(self):
s = """
<HTML>
This HTML goes between Stupid <=CrAzY!=> Dumb.
</HTML>
"""
d = microdom.parseString(s, beExtremelyLenient=1)
n = domhelpers.gatherTextNodes(d)
self.assertNotEquals(n.find('>'), -1)
def testEmptyError(self):
self.assertRaises(sux.ParseError, microdom.parseString, "")
def testTameDocument(self):
s = """
<test>
<it>
<is>
<a>
test
</a>
</is>
</it>
</test>
"""
d = microdom.parseString(s)
self.assertEquals(
domhelpers.gatherTextNodes(d.documentElement).strip() ,'test')
def testAwfulTagSoup(self):
s = """
<html>
<head><title> I send you this message to have your advice!!!!</titl e
</headd>
<body bgcolor alink hlink vlink>
<h1><BLINK>SALE</blINK> TWENTY MILLION EMAILS & FUR COAT NOW
FREE WITH `ENLARGER'</h1>
YES THIS WONDERFUL AWFER IS NOW HERER!!!
<script LANGUAGE="javascript">
function give_answers() {
if (score < 70) {
alert("I hate you");
}}
</script><a href=/foo.com/lalal name=foo>lalal</a>
</body>
</HTML>
"""
d = microdom.parseString(s, beExtremelyLenient=1)
l = domhelpers.findNodesNamed(d.documentElement, 'blink')
self.assertEquals(len(l), 1)
def testScriptLeniency(self):
s = """
<script>(foo < bar) and (bar > foo)</script>
<script language="javascript">foo </scrip bar </script>
<script src="foo">
<script src="foo">baz</script>
<script /><script></script>
"""
d = microdom.parseString(s, beExtremelyLenient=1)
self.assertEquals(d.firstChild().firstChild().firstChild().data,
"(foo < bar) and (bar > foo)")
self.assertEquals(d.firstChild().getElementsByTagName("script")[1].firstChild().data,
"foo </scrip bar ")
def testScriptLeniencyIntelligence(self):
# if there is comment or CDATA in script, the autoquoting in bEL mode
# should not happen
s = """<script><!-- lalal --></script>"""
self.assertEquals(microdom.parseString(s, beExtremelyLenient=1).firstChild().toxml(), s)
s = """<script><![CDATA[lalal]]></script>"""
self.assertEquals(microdom.parseString(s, beExtremelyLenient=1).firstChild().toxml(), s)
s = """<script> // <![CDATA[
lalal
//]]></script>"""
self.assertEquals(microdom.parseString(s, beExtremelyLenient=1).firstChild().toxml(), s)
def testPreserveCase(self):
s = '<eNcApSuLaTe><sUxor></sUxor><bOrk><w00T>TeXt</W00t></BoRk></EnCaPsUlAtE>'
s2 = s.lower().replace('text', 'TeXt')
# these are the only two option permutations that *can* parse the above
d = microdom.parseString(s, caseInsensitive=1, preserveCase=1)
d2 = microdom.parseString(s, caseInsensitive=1, preserveCase=0)
# caseInsensitive=0 preserveCase=0 is not valid, it's converted to
# caseInsensitive=0 preserveCase=1
d3 = microdom.parseString(s2, caseInsensitive=0, preserveCase=1)
d4 = microdom.parseString(s2, caseInsensitive=1, preserveCase=0)
d5 = microdom.parseString(s2, caseInsensitive=1, preserveCase=1)
# this is slightly contrived, toxml() doesn't need to be identical
# for the documents to be equivalent (i.e. <b></b> to <b/>),
# however this assertion tests preserving case for start and
# end tags while still matching stuff like <bOrk></BoRk>
self.assertEquals(d.documentElement.toxml(), s)
self.assert_(d.isEqualToDocument(d2), "%r != %r" % (d.toxml(), d2.toxml()))
self.assert_(d2.isEqualToDocument(d3), "%r != %r" % (d2.toxml(), d3.toxml()))
# caseInsensitive=0 on the left, NOT perserveCase=1 on the right
## XXX THIS TEST IS TURNED OFF UNTIL SOMEONE WHO CARES ABOUT FIXING IT DOES
#self.failIf(d3.isEqualToDocument(d2), "%r == %r" % (d3.toxml(), d2.toxml()))
self.assert_(d3.isEqualToDocument(d4), "%r != %r" % (d3.toxml(), d4.toxml()))
self.assert_(d4.isEqualToDocument(d5), "%r != %r" % (d4.toxml(), d5.toxml()))
def testDifferentQuotes(self):
s = '<test a="a" b=\'b\' />'
d = microdom.parseString(s)
e = d.documentElement
self.assertEquals(e.getAttribute('a'), 'a')
self.assertEquals(e.getAttribute('b'), 'b')
def testLinebreaks(self):
s = '<test \na="a"\n\tb="#b" />'
d = microdom.parseString(s)
e = d.documentElement
self.assertEquals(e.getAttribute('a'), 'a')
self.assertEquals(e.getAttribute('b'), '#b')
def testMismatchedTags(self):
for s in '<test>', '<test> </tset>', '</test>':
self.assertRaises(microdom.MismatchedTags, microdom.parseString, s)
def testComment(self):
s = "<bar><!--<foo />--></bar>"
d = microdom.parseString(s)
e = d.documentElement
self.assertEquals(e.nodeName, "bar")
c = e.childNodes[0]
self.assert_(isinstance(c, microdom.Comment))
self.assertEquals(c.value, "<foo />")
c2 = c.cloneNode()
self.assert_(c is not c2)
self.assertEquals(c2.toxml(), "<!--<foo />-->")
def testText(self):
d = microdom.parseString("<bar>xxxx</bar>").documentElement
text = d.childNodes[0]
self.assert_(isinstance(text, microdom.Text))
self.assertEquals(text.value, "xxxx")
clone = text.cloneNode()
self.assert_(clone is not text)
self.assertEquals(clone.toxml(), "xxxx")
def testEntities(self):
nodes = microdom.parseString("<b>&AB;</b>").documentElement.childNodes
self.assertEquals(len(nodes), 2)
self.assertEquals(nodes[0].data, "&")
self.assertEquals(nodes[1].data, "AB;")
self.assertEquals(nodes[0].cloneNode().toxml(), "&")
for n in nodes:
self.assert_(isinstance(n, microdom.EntityReference))
def testCData(self):
s = '<x><![CDATA[</x>\r\n & foo]]></x>'
cdata = microdom.parseString(s).documentElement.childNodes[0]
self.assert_(isinstance(cdata, microdom.CDATASection))
self.assertEquals(cdata.data, "</x>\r\n & foo")
self.assertEquals(cdata.cloneNode().toxml(), "<![CDA
|
Fillll/reddit2telegram
|
reddit2telegram/channels/~inactive/comedynecrophilia/app.py
|
Python
|
mit
| 155
| 0.006452
|
#
|
encoding:utf-8
subreddit = 'comedynecrophilia'
t_channel = '@comedynecrophilia'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| |
google-code-export/los-cocos
|
test/test_recorder.py
|
Python
|
bsd-3-clause
| 1,317
| 0.023538
|
from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__
|
), '..'))
#
# this test is not suitable for autotest because it uses a special clock
# that clashes with the clock used to autotest. So no testinfo here.
tags = "recorder"
import cocos
from cocos.director import director
from cocos.actions import JumpTo, JumpBy
from cocos.sprite import Sprite
import pyglet
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
x,y = director.get_window_siz
|
e()
self.sprite = Sprite( 'grossini.png', (x//5, y//3*2) )
self.add( self.sprite )
self.sprite.do( JumpTo( (x//5*4, 100), 100, 10, 6 ) )
self.sprite2 = Sprite( 'grossini.png', (x//5, y//3) )
self.add( self.sprite2 )
self.sprite2.do( JumpBy( (x//5*4, 100), 100, 10, 6 ) )
description = """
records 6 seconds, snapshots in the tmp subdir
"""
def main():
print(description)
director.set_recorder(25, "tmp/frame-%d.png", 6)
director.init()
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
if __name__ == '__main__':
main()
|
abreen/socrates.py
|
filetypes/jflapfile.py
|
Python
|
gpl-2.0
| 1,386
| 0.002165
|
from filetypes.basefile import BaseFile
from filetypes.plainfile import PlainFile
from filetypes.plainfile import ReviewTest
import filetypes
class JFLAPReviewTest(ReviewTest):
def __init__(self, dict_, file_type):
super().__init__(dict_, file_type)
def run(self, path):
"""A JFLAP review test calls the ReviewTest run() method but
suppresses printing the file.
"""
return super().run(path, False)
class JFLAPFile(PlainFile):
yaml_type = 'jflap'
extensions = ['jff']
supported_tests = PlainFile.supported_tests.copy()
supported_tests.append(JFLAPReviewTest)
def __init__(self, dict_):
BaseFile.__init__(self, dict_)
if 'tests' in dict_:
for t in dict_['tests']:
test_cls = filetypes.find_test_class(JFLAPFile.yaml_type,
t['type'])
self.tests.append(test
|
_cls(t, JFLAPFile.yaml_type))
def run_tests(self):
results = []
for t in self.tests:
result = t.run(self.path)
if result:
if type(result) is list:
|
for r in result:
results.append(r)
else:
results.append(result)
return results
def __str__(self):
return self.path + " (JFLAP file)"
|
qiyuangong/leetcode
|
python/006_ZigZag_Conversion.py
|
Python
|
mit
| 1,447
| 0.000691
|
class Solution(object):
# def convert(self, s, numRows):
# """
# :type s: str
# :type numRows: int
# :rtype: str
# """
# ls = len(s)
# if ls <= 1 or numRows == 1:
# return s
# temp_s = []
# for i in range(numRows):
# temp_s.append(['']*(ls / 2))
# inter = numRows - 1
# col, row = 0, 0
# for i, ch in enumerate(s):
# flag = True
# if (i / inter) % 2 == 1:
# # print i
# flag = False
# if flag:
# temp_s[row][col] = ch
# row += 1
# else:
# temp_s[row][col] = ch
# col += 1
# row -= 1
# result = ''
# for i in range(numRows):
# result += ''.join(temp_s[i])
|
# return result
def convert(self, s, numRows):
# https://leetcode.com/discuss/90908/easy-python-o-n-solution-94%25-with-explanations
if numRows == 1:
return s
# calculate period
p = 2 * (numRows - 1)
result = [""] * numRow
|
s
for i in xrange(len(s)):
floor = i % p
if floor >= p//2:
floor = p - floor
result[floor] += s[i]
return "".join(result)
if __name__ == '__main__':
# begin
s = Solution()
print s.convert("PAYPALISHIRING", 3)
|
joelwilson/caniflymykite
|
geonames.py
|
Python
|
gpl-3.0
| 1,582
| 0.001264
|
import json
import os
import requests
USERNAME = os.environ['GEONAMES_USER']
BASE_URL = 'http://api.geonames.org'
def search(term, user=USERNAME):
'''Returns a dict of results for a search to geonames.'''
r = requests.get(BASE_URL + '/searchJSON',
params={'q': term,
'username': user,
'country': 'US',
'featureClass': 'P'})
return json.loads(r.text)['geonames'] if r.o
|
k else None
def weather(lat, lon, user=USERNAME):
'''Returns a dict of current weather conditions of the station
closest to lat, lon.
'''
r = requests.get(BASE_URL + '/findNearByWeatherJSON',
params={'lat': lat,
'lng': lon,
'username': user})
jsontext = json.loads(r.text)
if r.ok:
try:
return jsontext['weatherObservation']
except KeyError:
return None
r
|
eturn None
def nearestplace(lat, lon, user=USERNAME):
''''Returns a dict of attributes of the closest geographical place.'''
r = requests.get(BASE_URL + '/findNearbyPlaceNameJSON',
params={'lat': lat,
'lng': lon,
'username': user,
'country': 'US',
'featureClass': 'P'})
jsontext = json.loads(r.text)
if r.ok:
try:
return jsontext['geonames'][0]
except KeyError:
return None
return None
|
jwhui/openthread
|
tools/otci/otci/command_handlers.py
|
Python
|
bsd-3-clause
| 9,542
| 0.001782
|
#!/usr/bin/env python3
#
# Copyright (c) 2020, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
import queue
import re
import threading
import time
from abc import abstractmethod
from typing import Any, Callable, Optional, Union, List, Pattern
from .connectors import OtCliHandler
from .errors import ExpectLineTimeoutError, CommandError
from .utils import match_line
class OTCommandHandler:
"""This abstract class defines interfaces of a OT Command Handler."""
@abstractmethod
def execute_command(self, cmd: str, timeout: float) -> List[str]:
"""Method execute_command should execute the OT CLI command within a timeout (in seconds) and return the
command output as a list of lines.
Note: each line SHOULD NOT contain '\r\n' at the end. The last line of output should be 'Done' or
'Error <code>: <msg>' following OT CLI conventions.
"""
pass
@abstractmethod
def close(self):
"""Method close should close the OT Command Handler."""
pass
@abstractmethod
def wait(self, duration: float) -> List[str]:
"""Method wait should wait for a given duration and return the OT CLI output during this period.
Normally, OT CLI does not output when it's not executing any command. But OT CLI can also output
asynchronously in some cases (e.g. `Join Success` when Joiner joins successfully).
"""
pass
@abstractmethod
def set_line_read_callback(self, callback: Optional[Callable[[str], Any]]):
"""Method set_line_read_callback should register a callback that will be called for every line
output by the OT CLI.
This is useful for handling asynchronous command output while still being able to execute
other commands.
"""
pass
def shell(self, cmd: str, timeout: float) -> List[str]:
raise NotImplementedError("shell command is not supported on %s" % self.__class__.__name__)
class OtCliCommandRunner(OTCommandHandler):
__PATTERN_COMMAND_DONE_OR_ERROR = re.compile(
r'(Done|Error|Error \d+:.*|.*: command not found)$') # "Error" for spinel-cli.py
__PATTERN_LOG_LINE = re.compile(r'((\[(NONE|CRIT|WARN|NOTE|INFO|DEBG)\])'
r'|(-.*-+: )' # e.g. -CLI-----:
r')')
"""regex used to filter logs"""
__ASYNC_COMMANDS = {'scan', 'ping', 'discover'}
def __init__(self, otcli: OtCliHandler, is_spinel_cli=False):
self.__otcli: OtCliHandler = otcli
self.__is_spinel_cli = is_spinel_cli
self.__expect_command_echoback = not self.__is_spinel_cli
self.__line_read_callback = None
self.__pending_lines = queue.Queue()
self.__should_close = threading.Event()
self.__otcli_reader = threading.Thread(target=self.__otcli_read_routine)
self.__otcli_reader.setDaemon(True)
self.__otcli_reader.start()
def __repr__(self):
return repr(self.__otcli)
def execute_command(self, cmd, timeout=10) -> List[str]:
assert not self.__should_close.is_set(), "OT CLI is already closed."
self.__otcli.writeline(cmd)
if cmd in ('reset', 'factoryreset'):
self.wait(3)
self.__otcli.writeline('extaddr')
self.wait(1)
return []
if self.__expect_command_echoback:
self.__expect_line(timeout, cmd)
output = self.__expect_line(timeout,
OtCliCommandRunner.__PATTERN_COMMAND_DONE_OR_ERROR,
asynchronous=cmd.split()[0] in OtCliCommandRunner.__ASYNC_COMMANDS)
return output
def wait(self, duration: float) -> List[str]:
self.__otcli.wait(duration)
output = []
try:
while True:
line = self.__pending_lines.get_nowait()
output.append(line)
except queue.Empty:
pass
return output
def close(self):
self.__should_close.set()
self.__otcli.close()
self.__otcli_reader.join()
def set_line_read_callback(self, callback: Optional[Callable[[str], Any]]):
self.__line_read_callback = callback
#
# Private methods
#
def __expect_line(self, timeout: float, expect_line: Union[str, Pattern], asynchronous=False) -> List[str]:
output = []
if not asynchronous:
while True:
try:
line = self.__pending_lines.get(timeout=timeout)
except queue.Empty:
raise ExpectLineTimeoutError(expect_line)
output.append(line)
if match_line(line, expect_line):
break
else:
done = False
while not done and timeout > 0:
lines = self.wait(1)
|
timeout -= 1
for line in lines:
output.append(line)
if match_line(line, expect_line):
done = True
|
break
if not done:
raise ExpectLineTimeoutError(expect_line)
return output
def __otcli_read_routine(self):
while not self.__should_close.is_set():
try:
line = self.__otcli.readline()
except Exception:
if self.__should_close.is_set():
break
else:
raise
logging.debug('%s: %r', self.__otcli, line)
if line is None:
break
if line.startswith('> '):
line = line[2:]
if self.__line_read_callback is not None:
self.__line_read_callback(line)
logging.debug('%s: %s', self.__otcli, line)
if not OtCliCommandRunner.__PATTERN_LOG_LINE.match(line):
self.__pending_lines.put(line)
class OtbrSshCommandRunner(OTCommandHandler):
def __init__(self, host, port, username, password, sudo):
import paramiko
self.__host = host
self.__port = port
self.__sudo = sudo
self.__ssh = paramiko.SSHClient()
self.__ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.__line_read_callback = None
try:
self.__ssh.connect(host,
port=port,
username=username,
password=password,
allow_agent=False,
look_for_keys=False)
|
h01ger/voctomix
|
voctocore/lib/controlserver.py
|
Python
|
mit
| 5,365
| 0
|
import logging
from queue import Queue
from gi.repository import GObject
from lib.commands import ControlServerCommands
from lib.tcpmulticonnection import TCPMultiConnection
from lib.response import NotifyResponse
class ControlServer(TCPMultiConnection):
def __init__(self, pipeline):
'''Initialize server and start listening.'''
self.log = logging.getLogger('ControlServer')
super().__init__(port=9999)
self.command_queue = Queue()
self.commands = ControlServerCommands(pipeline)
def on_accepted(self, conn, addr):
'''Asynchronous connection listener.
Starts a handler for each connection.'''
self.log.debug('setting gobject io-watch on connection')
GObject.io_add_watch(conn, GObject.IO_IN, self.on_data, [''])
def on_data(self, conn, _, leftovers, *args):
'''Asynchronous connection handler.
Pushes data from socket into command queue linewise'''
close_after = False
try:
while True:
try:
leftovers.append(conn.recv(4096).decode(errors='replace'))
if len(leftovers[-1]) == 0:
self.log.info("Socket was closed")
leftovers.pop()
close_after = True
break
except UnicodeDecodeError as e:
continue
except BlockingIOError:
pass
data = "".join(leftovers)
del leftovers[:]
lines = data.split('\n')
for line in lines[:-1]:
self.log.debug("got line: %r", line)
line = line.strip()
# 'quit' = remote wants us to close the connection
if line == 'quit' or line == 'exit':
self.log.info("Client asked us to close the Connection")
self.close_connection(conn)
return False
self.log.debug('re-starting on_loop scheduling')
GObject.idle_add(self.on_loop)
self.command_queue.put((line, conn))
if close_after:
self.close_connection(conn)
return False
if lines[-1] != '':
self.log.debug("remaining %r", lines[-1])
leftovers.append(lines[-1])
return True
def on_loop(self):
'''Command handler. Processes commands in the command queue whenever
nothing else is happening (registered as GObject idle callback)'''
|
self.log.debug('on_loop called')
if self.command_queue.empty():
self.log.debug('command_queue is empty again, '
'stopping on_loop scheduling')
return False
|
line, requestor = self.command_queue.get()
words = line.split()
if len(words) < 1:
self.log.debug('command_queue is empty again, '
'stopping on_loop scheduling')
return True
command = words[0]
args = words[1:]
self.log.info("processing command %r with args %s", command, args)
response = None
try:
# deny calling private methods
if command[0] == '_':
self.log.info('private methods are not callable')
raise KeyError()
command_function = self.commands.__class__.__dict__[command]
except KeyError as e:
self.log.info("received unknown command %s", command)
response = "error unknown command %s\n" % command
else:
try:
responseObject = command_function(self.commands, *args)
except Exception as e:
message = str(e) or "<no message>"
response = "error %s\n" % message
else:
if isinstance(responseObject, NotifyResponse):
responseObject = [responseObject]
if isinstance(responseObject, list):
for obj in responseObject:
signal = "%s\n" % str(obj)
for conn in self.currentConnections:
self._schedule_write(conn, signal)
else:
response = "%s\n" % str(responseObject)
finally:
if response is not None and requestor in self.currentConnections:
self._schedule_write(requestor, response)
return False
def _schedule_write(self, conn, message):
queue = self.currentConnections[conn]
self.log.debug('re-starting on_write[%u] scheduling', conn.fileno())
GObject.io_add_watch(conn, GObject.IO_OUT, self.on_write)
queue.put(message)
def on_write(self, conn, *args):
self.log.debug('on_write[%u] called', conn.fileno())
try:
queue = self.currentConnections[conn]
except KeyError:
return False
if queue.empty():
self.log.debug('write_queue[%u] is empty again, '
'stopping on_write scheduling',
conn.fileno())
return False
message = queue.get()
try:
conn.send(message.encode())
except Exception as e:
self.log.warning('failed to send message', exc_info=True)
return True
|
manojngb/Crazyfly_simple_lift
|
src/cfclient/utils/input/inputinterfaces/__init__.py
|
Python
|
gpl-2.0
| 3,515
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2014 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this
|
program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Find all the available input interfaces and try to initialize them.
"""
import os
import glob
import logging
from ..inputreaderinterface import InputReaderInterface
__author__ = 'Bitcraze AB'
__all__ = ['In
|
putInterface']
logger = logging.getLogger(__name__)
found_interfaces = [os.path.splitext(os.path.basename(f))[0] for f in
glob.glob(os.path.dirname(__file__) + "/[A-Za-z]*.py")]
if len(found_interfaces) == 0:
found_interfaces = [os.path.splitext(os.path.basename(f))[0] for
f in glob.glob(os.path.dirname(__file__) +
"/[A-Za-z]*.pyc")]
logger.info("Found interfaces: {}".format(found_interfaces))
initialized_interfaces = []
available_interfaces = []
for interface in found_interfaces:
try:
module = __import__(interface, globals(), locals(), [interface], 1)
main_name = getattr(module, "MODULE_MAIN")
initialized_interfaces.append(getattr(module, main_name)())
logger.info("Successfully initialized [{}]".format(interface))
except Exception as e:
logger.info("Could not initialize [{}]: {}".format(interface, e))
def devices():
# Todo: Support rescanning and adding/removing devices
if len(available_interfaces) == 0:
for reader in initialized_interfaces:
devs = reader.devices()
for dev in devs:
available_interfaces.append(InputInterface(
dev["name"], dev["id"], reader))
return available_interfaces
class InputInterface(InputReaderInterface):
def __init__(self, dev_name, dev_id, dev_reader):
super(InputInterface, self).__init__(dev_name, dev_id, dev_reader)
# These devices cannot be mapped and configured
self.supports_mapping = False
# Ask the reader if it wants to limit
# roll/pitch/yaw/thrust for all devices
self.limit_rp = dev_reader.limit_rp
self.limit_thrust = dev_reader.limit_thrust
self.limit_yaw = dev_reader.limit_yaw
def open(self):
self._reader.open(self.id)
def close(self):
self._reader.close(self.id)
def read(self, include_raw=False):
mydata = self._reader.read(self.id)
# Merge interface returned data into InputReader Data Item
for key in list(mydata.keys()):
self.data.set(key, mydata[key])
return self.data
|
andreasplesch/QGIS-X3D-Processing
|
scripts/generate_X3D_Shape.py
|
Python
|
gpl-3.0
| 495
| 0.032323
|
##X3D=group
##X3D Shape node from Appearance and Geometry=name
##X3D_Geometry_file=file
##appearance=strin
|
g <Appearance><Material></Material></Appearance>
##output_X3D_Shape_file=output file
out=open(output_X3D_Shape_file,'w'
|
)
geofile=open(X3D_Geometry_file,'r')
# no error checking, use elementtree later
out.write( '<Shape>\n' )
out.write( appearance+'\n' )
for g in geofile: out.write(g) # just write incrementally since may be large
out.write( '</Shape>\n')
geofile.close()
out.close()
|
khchine5/xl
|
lino_xl/lib/ledger/models.py
|
Python
|
bsd-2-clause
| 32,456
| 0.003143
|
# -*- coding: UTF-8 -*-
# Copyright 2008-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""Database models for this plugin.
"""
from __future__ import unicode_literals, print_function
import six
from builtins import str
import logging
logger = logging.getLogger(__name__)
import datetime
from dateutil.relativedelta import relativedelta
from django.db import models
from atelier.utils import last_day_of_month
from lino.api import dd, rt, _
from lino import mixins
from lino.utils import mti
from etgen.html import E
from lino.mixins.periods import DateRange
from lino.modlib.users.mixins import UserAuthored
from lino.modlib.printing.mixins import PrintableType
from lino.modlib.checkdata.choicelists import Checker
from lino_xl.lib.accounts.utils import DEBIT, CREDIT, ZERO
from lino_xl.lib.accounts.fields import DebitOrCreditField
from lino_xl.lib.contacts.choicelists import PartnerEvents
from lino.modlib.system.choicelists import ObservedEvent
from .utils import get_due_movements, check_clearings_by_partner
from .choicelists import (FiscalYears, VoucherTypes, VoucherStates,
PeriodStates, JournalGroups, TradeTypes)
from .mixins import ProjectRelated, VoucherNumber, JournalRef, PeriodRangeObservable
from .roles import VoucherSupervisor
# from .mixins import FKMATCH
from .ui import *
class LedgerInfo(dd.Model):
class Meta:
app_label = 'ledger'
allow_cascaded_delete = 'user'
user = dd.OneToOneField('users.User', primary_key=True)
entry_date = models.DateField(
_("Last entry date"), null=True, blank=True)
@classmethod
def get_for_user(cls, user):
|
try:
return cls.objects.get(user=user)
except cls.DoesNotExist:
return cls(user=user)
@dd.python_2_unicode_compatible
class Journal(mixins.BabelNa
|
med,
mixins.Sequenced,
mixins.Referrable,
PrintableType):
class Meta:
app_label = 'ledger'
verbose_name = _("Journal")
verbose_name_plural = _("Journals")
trade_type = TradeTypes.field(blank=True)
voucher_type = VoucherTypes.field()
journal_group = JournalGroups.field()
auto_check_clearings = models.BooleanField(
_("Check clearing"), default=True)
auto_fill_suggestions = models.BooleanField(
_("Fill suggestions"), default=True)
force_sequence = models.BooleanField(
_("Force chronological sequence"), default=False)
account = dd.ForeignKey('accounts.Account', blank=True, null=True)
partner = dd.ForeignKey('contacts.Company', blank=True, null=True)
printed_name = dd.BabelCharField(
_("Printed document designation"), max_length=100, blank=True)
dc = DebitOrCreditField(_("Primary booking direction"))
yearly_numbering = models.BooleanField(
_("Yearly numbering"), default=True)
must_declare = models.BooleanField(default=True)
# invert_due_dc = models.BooleanField(
# _("Invert booking direction"),
# help_text=_("Whether to invert booking direction of due movement."),
# default=True)
def get_doc_model(self):
"""The model of vouchers in this Journal.
"""
# print self,DOCTYPE_CLASSES, self.doctype
return self.voucher_type.model
#~ return DOCTYPES[self.doctype][0]
def get_doc_report(self):
return self.voucher_type.table_class
#~ return DOCTYPES[self.doctype][1]
def get_voucher(self, year=None, number=None, **kw):
cl = self.get_doc_model()
kw.update(journal=self, accounting_period__year=year, number=number)
return cl.objects.get(**kw)
def create_voucher(self, **kw):
"""Create an instance of this Journal's voucher model
(:meth:`get_doc_model`).
"""
cl = self.get_doc_model()
kw.update(journal=self)
try:
doc = cl()
# ~ doc = cl(**kw) # wouldn't work. See Django ticket #10808
#~ doc.journal = self
for k, v in kw.items():
setattr(doc, k, v)
#~ print 20120825, kw
except TypeError:
#~ print 20100804, cl
raise
doc.on_create(None)
#~ doc.full_clean()
#~ doc.save()
return doc
def get_allowed_accounts(self, **kw):
if self.trade_type:
return self.trade_type.get_allowed_accounts(**kw)
# kw.update(chart=self.chart)
return rt.models.accounts.Account.objects.filter(**kw)
def get_next_number(self, voucher):
# ~ self.save() # 20131005 why was this?
cl = self.get_doc_model()
flt = dict()
if self.yearly_numbering:
flt.update(accounting_period__year=voucher.accounting_period.year)
d = cl.objects.filter(journal=self, **flt).aggregate(
models.Max('number'))
number = d['number__max']
#~ logger.info("20121206 get_next_number %r",number)
if number is None:
return 1
return number + 1
def __str__(self):
# s = super(Journal, self).__str__()
s = dd.babelattr(self, 'name')
if self.ref:
s += " (%s)" % self.ref
#~ return '%s (%s)' % (d.BabelNamed.__unicode__(self),self.ref or self.id)
return s
#~ return self.ref +'%s (%s)' % mixins.BabelNamed.__unicode__(self)
#~ return self.id +' (%s)' % mixins.BabelNamed.__unicode__(self)
def save(self, *args, **kw):
#~ self.before_save()
r = super(Journal, self).save(*args, **kw)
self.after_save()
return r
def after_save(self):
pass
def full_clean(self, *args, **kw):
if self.dc is None:
if self.trade_type:
self.dc = self.trade_type.dc
elif self.account:
self.dc = self.account.type.dc
else:
self.dc = DEBIT # cannot be NULL
if not self.name:
self.name = self.id
#~ if not self.pos:
#~ self.pos = self.__class__.objects.all().count() + 1
super(Journal, self).full_clean(*args, **kw)
def disable_voucher_delete(self, doc):
# print "pre_delete_voucher", doc.number, self.get_next_number()
if self.force_sequence:
if doc.number + 1 != self.get_next_number(doc):
return _("%s is not the last voucher in journal"
% str(doc))
def get_template_groups(self):
"""Here we override the class method by an instance method. This
means that we must also override all other methods of
Printable who call the *class* method. This is currently only
:meth:`template_choices`.
"""
return [self.voucher_type.model.get_template_group()]
@dd.chooser(simple_values=True)
def template_choices(cls, build_method, voucher_type):
# Overrides PrintableType.template_choices to not use the class
# method `get_template_groups`.
if not voucher_type:
return []
#~ print 20131006, voucher_type
template_groups = [voucher_type.model.get_template_group()]
return cls.get_template_choices(build_method, template_groups)
#
#
#
@dd.python_2_unicode_compatible
class AccountingPeriod(DateRange, mixins.Referrable):
class Meta:
app_label = 'ledger'
verbose_name = _("Accounting period")
verbose_name_plural = _("Accounting periods")
ordering = ['ref']
preferred_foreignkey_width = 10
state = PeriodStates.field(default=PeriodStates.as_callable('open'))
year = FiscalYears.field(blank=True)
remark = models.CharField(_("Remark"), max_length=250, blank=True)
@classmethod
def get_available_periods(cls, entry_date):
"""Return a queryset of peruiods available for booking."""
if entry_date is None: # added 20160531
entry_date = dd.today()
fkw = dict(start_date__lte=entry_date, end_date__gte=entry_date)
return rt.models.ledger.AccountingPeriod.objects.filter(**fkw)
@classmethod
|
KMFleischer/PyEarthScience
|
Visualization/PyNGL/vectors_simple_PyNGL.py
|
Python
|
mit
| 3,506
| 0.042499
|
"""
PyEarthScience: PyNGL vector example
- vectors on map plot
- rectilinear grid (lat/lon)
09.10.15 kmf
"""
import Ngl,Nio
#-- define variables
diri = "/Users/k204045/NCL/general/data/new_data/" #-- data directory
fname = "rectilinear_grid_2D.nc" #-- data file name
minval = 250. #-- minimum contour level
maxval = 315 #-- maximum contour level
inc = 5. #-- contour level spacing
#-- open file and read variables
f = Nio.open_file(diri + fname,"r") #-- open data file
temp = f.variables["tsurf"][0,::-1,:] #-- first time step, reverse latitude
u = f.variables["u10"][0,::-1,:] #-- first time step, reverse latitude
v = f.variables["v10"][0,::-1,:] #-- first time step, reverse latitude
lat = f.variables["lat"][::-1] #-- reverse latitudes
lon = f.variables["lon"][:] #-- all longitudes
nlon = len(lon) #-- number of longitudes
nlat = len(lat) #-- number of latitudes
#-- open a workstation
wkres = Ngl.Resources() #-- generate an resources object for workstation
wks_type = "x11" #-- graphics output type
wks = Ngl.open_wks(wks_type,"plot_vector_PyNGL",wkres)
#-- create 1st plot: vectors on global map
res = Ngl.Resources()
res.vfXCStartV = float(lon[0]) #-- minimum longitude
res.vfXCEndV = float(lon[len(lon[:])-1]) #-- maximum longitude
res.vfYCStartV = float(lat[0]) #-- minimum latitude
res.vfYCEndV = float(lat[len(lat[:])-1]) #-- maximum latitude
res.tiMainString = "~F25~Wind velocity vectors" #-- title string
res.tiMainFontHeightF = 0.024 #-- decrease title font size
res.mpLimitMode = "Corners" #-- select a sub-region
res.mpLeftCornerLonF = float(lon[0]) #-- left longitude value
res.mpRightCornerLonF = float(lon[len(lon[:])-1]) #-- right longitude value
res.mpLeft
|
CornerLatF = float(lat[0]) #-- left latitude value
res.mpRightCornerLatF = float(lat[len(lat[:])-1])
|
#-- right latitude value
res.mpGridSpacingF = 30 #-- map grid spacing
res.mpPerimOn = True #-- turn on map perimeter
res.vpXF = 0.1 #-- viewport x-position
res.vpYF = 0.92 #-- viewport y-position
res.vpWidthF = 0.75 #-- viewport width
res.vpHeightF = 0.75 #-- viewport height
res.vcMinFracLengthF = 0.15 #-- increase length of vectors
res.vcMinMagnitudeF = 0.0007 #-- increase length of vectors
res.vcRefLengthF = 0.025 #-- set reference vector length
res.vcRefMagnitudeF = 20.0 #-- set reference magnitude value
res.vcLineArrowThicknessF = 1.0 #-- make vector lines thicker (default: 1.0)
map1 = Ngl.vector_map(wks,u[::2,::2],v[::2,::2],res) #-- draw a vector plot
#-- the end
Ngl.end()
|
rhd/meson
|
mesonbuild/modules/qt4.py
|
Python
|
apache-2.0
| 7,453
| 0.00161
|
# Copyright 2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from .. import mlog
from .. import build
from ..mesonlib import MesonException, Popen_safe
from ..dependencies import Qt4Dependency
from . import ExtensionModule
import xml.etree.ElementTree as ET
from . import ModuleReturnValue
class Qt4Module(ExtensionModule):
tools_detected = False
def _detect_tools(self, env, method):
if self.tools_detected:
return
mlog.log('Detecting Qt4 tools')
# FIXME: We currently require Qt4 to exist while importing the module.
# We should make it gracefully degrade and not create any targets if
# the import is marked as 'optional' (not implemented yet)
kwargs = {'required': 'true', 'modules': 'Core', 'silent': 'true', 'method': method}
qt4 = Qt4Dependency(env, kwargs)
# Get all tools and then make sure that they are the right version
self.moc, self.uic, self.rcc = qt4.compilers_detect()
# Moc, uic and rcc write their version strings to stderr.
# Moc and rcc return a non-zero result when doing so.
# What kind of an idiot thought that was a good idea?
if self.moc.found():
stdout, stderr = Popen_safe(self.moc.get_command() + ['-v'])[1:3]
stdout = stdout.strip()
stderr = stderr.strip()
if 'Qt Meta' in stderr:
moc_ver = stderr
else:
raise MesonException('Moc preprocessor is not for Qt 4. Output:\n%s\n%s' %
(stdout, stderr))
mlog.log(' moc:', mlog.green('YES'), '(%s, %s)' %
(self.moc.get_path(), moc_ver.split()[-1]))
else:
mlog.log(' moc:', mlog.red('NO'))
if self.uic.found():
stdout, stderr = Popen_safe(self.uic.get_command() + ['-v'])[1:3]
stdout = stdout.strip()
stderr = stderr.strip()
if 'version 4.' in stderr:
uic_ver = stderr
else:
raise MesonException('Uic compiler is not for Qt4. Output:\n%s\n%s' %
(stdout, stderr))
mlog.log(' uic:', mlog.green('YES'), '(%s, %s)' %
(self.uic.get_path(), uic_ver.split()[-1]))
else:
mlog.log(' uic:', mlog.red('NO'))
if self.rcc.found():
stdout, stderr = Popen_safe(self.rcc.get_command() + ['-v'])[1:3]
stdout = stdout.strip()
stderr = stderr.strip()
if 'version 4.' in stderr:
rcc_ver = stderr
else:
raise MesonException('Rcc compiler is not for Qt 4. Output:\n%s\n%s' %
(stdout, stderr))
mlog.log(' rcc:', mlog.green('YES'), '(%s, %s)'
% (self.rcc.get_path(), rcc_ver.split()[-1]))
else:
mlog.log(' rcc:', mlog.red('NO'))
self.tools_detected = True
def parse_qrc(self, state, fname):
abspath = os.path.join(state.environment.source_dir, state.subdir, fname)
relative_part = os.path.split(fname)[0]
try:
tree = ET.parse(abspath)
root = tree.getroot()
result = []
for child in root[0]:
if child.tag != 'file':
mlog.warning("malformed rcc file: ", os.path.join(state.subdir, fname))
break
else:
result.append(os.path.join(state.subdir, relative_part, child.text))
return result
except Exception:
return []
def preprocess(self, state, args, kwargs):
rcc_files = kwargs.pop('qresources', [])
if not isinstance(rcc_files, list):
rcc_files = [rcc_files]
ui_files = kwargs.pop('ui_files', [])
if not isinstance(ui_files, list):
ui_files = [ui_files]
moc_headers = kwargs.pop('moc_headers', [])
if not isinstance(moc_headers, list):
moc_headers = [moc_headers]
moc_sources = kwargs.pop('moc_sources', [])
if not isinstance(moc_sources, list):
moc_sources = [moc_sources]
sources = kwargs.pop('sources', [])
if not isinstance(sources, list):
sources = [sources]
sources += args[1:]
method = kwargs.get('method', 'auto')
self._detect_tools(state.environment, method)
err_msg = "{0} sources specified and couldn't find {1}, " \
"please check your qt4 installation"
if len(moc_headers) + len(moc_sources) > 0 and not self.moc.found():
raise MesonException(err_msg.format('MOC', 'moc-qt4'))
if len(rcc_files) > 0:
if not self.rcc.found():
raise MesonException(err_msg.format('RCC', 'rcc-qt4'))
qrc_deps = []
for i in rcc_files:
qrc_deps += self.parse_qrc(state, i)
if len(args) > 0:
name = args[0]
else:
basename = os.path.split(rcc_files[0])[1]
name = 'qt4-' + basename.replace('.', '_')
rcc_kwargs = {'input': rcc_files,
'ou
|
tput': name + '.cpp',
'command': [self.rcc, '-o', '@OUTPUT@', '@INPUT@'],
'depend_files': qrc_deps}
res_target = build.CustomTarget(name, state.subdir, rcc_kwargs)
sources.append(res_target)
|
if len(ui_files) > 0:
if not self.uic.found():
raise MesonException(err_msg.format('UIC', 'uic-qt4'))
ui_kwargs = {'output': 'ui_@BASENAME@.h',
'arguments': ['-o', '@OUTPUT@', '@INPUT@']}
ui_gen = build.Generator([self.uic], ui_kwargs)
ui_output = ui_gen.process_files('Qt4 ui', ui_files, state)
sources.append(ui_output)
if len(moc_headers) > 0:
moc_kwargs = {'output': 'moc_@BASENAME@.cpp',
'arguments': ['@INPUT@', '-o', '@OUTPUT@']}
moc_gen = build.Generator([self.moc], moc_kwargs)
moc_output = moc_gen.process_files('Qt4 moc header', moc_headers, state)
sources.append(moc_output)
if len(moc_sources) > 0:
moc_kwargs = {'output': '@BASENAME@.moc',
'arguments': ['@INPUT@', '-o', '@OUTPUT@']}
moc_gen = build.Generator([self.moc], moc_kwargs)
moc_output = moc_gen.process_files('Qt4 moc source', moc_sources, state)
sources.append(moc_output)
return ModuleReturnValue(sources, sources)
def initialize():
mlog.warning('rcc dependencies will not work properly until this upstream issue is fixed:',
mlog.bold('https://bugreports.qt.io/browse/QTBUG-45460'))
return Qt4Module()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.