repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
lv10/bestbuyapi
|
bestbuyapi/api/bulk.py
|
Python
|
mit
| 3,123
| 0.000961
|
import json
import zipfile
from io import BytesIO
from ..constants import BULK_API
from ..api.base import BestBuyCore
from ..utils.exceptions import BestBuyBulkAPIError
class BestBuyBulkAPI(BestBuyCore):
def _api_name(self):
return BULK_API
def archive(self, name, file_format):
"""BestBuy generates Bulk files (archives) daily at 9:00 AM CST.
:params:
:name (str): Archive type. The type supported by BestBuy's API are:
- products
- stores
- reviews
- categories
- storeAvailability
:file_format (str): File format in which the archive is to be downloaded.
- {xml or json}: Products, Reviews, Stores, and Categories
- {tsv} for Store Availability
:returns: Unzipped files from Best Buy's API response
:rType: dict
BestBuy bulk docs:
- https://developer.bestbuy.com/documentation/bulkDownload-api
"""
payload = {"query": f"{name}.{file_format}.zip", "params": {}}
response = self._call(payload)
return self._load_zipped_response(response, file_format)
def archive_subset(self, subset, file_format):
"""Bulk files (archives) are generated every day at 9 AM by BestBuy.
:params:
:name (str): Archive type. The archive type supported are:
- productsActive
- productsInactive (Currently empty or deprecated)
- productsMusic (Currently empty or deprecated)
- productsMovie (Currently empty or deprecated)
- productsHardgood (Currently empty or deprecated)
- productsBundle (Currently empty or deprecated)
- productsGame (Currently empty or deprecated)
- productsSoftware (Currently empty or deprecated)
- productsBlackTie (Currently empty or deprecated)
- productsMarketplace (Currently empty or deprecated)
|
- productsDigital (Currently empty or deprecated)
:file_format (str)
|
: File format in which the archive is to be downloaded.
- xml
- json
BestBuy product subsets bulk docs:
- https://developer.bestbuy.com/documentation/bulkDownload-api
"""
payload = {"query": f"subsets/{subset}.{file_format}.zip", "params": {}}
response = self._call(payload)
return self._load_zipped_response(response, file_format)
def _load_zipped_response(self, zipped_response, file_format):
if zipfile.is_zipfile(BytesIO(zipped_response)):
with zipfile.ZipFile(BytesIO(zipped_response), "r") as z:
out = {}
for filename in z.namelist():
with z.open(filename) as f:
data = f.read()
if file_format == "json":
out[filename] = json.loads(data)
else:
out[filename] = data
return out
|
thopiekar/Uranium
|
UM/Math/Ray.py
|
Python
|
lgpl-3.0
| 767
| 0.006519
|
# Copyright
|
(c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from UM.Math.Vector import Vector
class Ray:
def __init__(self, origin = Vector(), direction = Vector()):
self._origin = origin
self._direction = direction
self._inverse_direction = 1.0 / direction
@property
def origin(self):
return self._origin
@property
def direction(self):
return self._direction
@property
def inverseDirection(self):
|
return self._inverse_direction
def getPointAlongRay(self, distance):
return self._origin + (self._direction * distance)
def __repr__(self):
return "Ray(origin = {0}, direction = {1})".format(self._origin, self._direction)
|
mozilla/BanHammer
|
BanHammer/blacklist/views/zlb.py
|
Python
|
bsd-3-clause
| 9,627
| 0.006232
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.exceptions import ObjectDoesNotExist
from django.views.decorators.cache import never_cache
from django.http import HttpResponse, HttpResponseRedirect
from session_csrf import anonymous_csrf
from ..models import ZLB, ZLBVirtualServer, ZLBVirtualServerRule, ZLBVirtualServerProtection
from ..models import ZLBRule, ZLBProtection, Offender, ZLBVirtualServerPref
from ..forms import ZLBForm, VirtualServerConfirm
from BanHammer.blacklist.management import zeus
import BanHammer.blacklist.tasks as tasks
from BanHammer import settings
@anonymous_csrf
@never_cache
def index(request, zlb=None, action=None):
request.session['order_by'] = request.GET.get('order_by', 'hostname')
request.session['order'] = request.GET.get('order', 'asc')
order_by = request.session.get('order_by', 'address')
order = request.session.get('order', 'asc')
zlbs = ZLB.objects.all()
if order_by == 'created_date':
zlbs = sorted(list(zlbs), key=lambda zlb: zlb.created_date)
elif order_by == 'updated_date':
zlbs = sorted(list(zlbs), key=lambda zlb: zlb.updated_date)
elif order_by == 'name':
zlbs = sorted(list(zlbs), key=lambda zlb: zlb.name)
elif order_by == 'hostname':
zlbs = sorted(list(zlbs), key=lambda zlb: zlb.hostname)
elif order_by == 'datacenter':
zlbs = sorted(list(zlbs), key=lambda zlb: zlb.datacenter)
if order == 'desc':
zlbs.reverse()
data = {'zlbs': zlbs}
if action == 'update':
data['zlb'] = zlb
data['action'] = 'update'
data['testing_env'] = settings.TESTING_ENV
return render_to_response(
'zlb/index.html',
data,
context_instance = RequestContext(request)
)
@anonymous_csrf
def new(request):
if request.method == 'POST':
form = ZLBForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
hostname = form.cleaned_data['hostname']
datacenter = form.cleaned_data['datacenter']
doc_url = form.cleaned_data['doc_url']
login = form.cleaned_data['login']
password = form.cleaned_data['password']
comment = form.cleaned_data['comment']
zlb = ZLB(
name=name,
hostname=hostname,
datacenter=datacenter,
doc_url=doc_url,
login=login,
password
|
=password,
comment=comment,
)
zlb.save()
return HttpResponseRedirect('/zlbs')
else:
form = ZLBForm()
return render_to_response(
'zlb/new.html',
{'form': form},
context_instance = RequestContext(request)
)
@anonymous_csrf
def edit(request, id):
if request.method == 'POST':
form = ZLBForm(request.POST)
if form.is_valid():
zlb = ZLB.objects.get(id=id)
|
zlb.name = form.cleaned_data['name']
zlb.hostname = form.cleaned_data['hostname']
zlb.datacenter = form.cleaned_data['datacenter']
zlb.doc_url = form.cleaned_data['doc_url']
zlb.comment = form.cleaned_data['comment']
zlb.login = form.cleaned_data['login']
if form.cleaned_data['password']:
zlb.password = form.cleaned_data['password']
zlb.save()
return HttpResponseRedirect('/zlbs')
else:
initial = ZLB.objects.get(id=id)
initial = initial.__dict__
id = initial['id']
initial['password'] = ''
form = ZLBForm(initial)
return render_to_response(
'zlb/edit.html',
{'form': form, 'id': id},
context_instance = RequestContext(request)
)
@anonymous_csrf
def delete(request, id):
zlb = ZLB.objects.get(id=id)
zlb.delete()
return HttpResponseRedirect('/zlbs')
@anonymous_csrf
@never_cache
def show(request, id):
zlb = ZLB.objects.get(id=id)
if zlb.updating:
return render_to_response(
'zlb/updating.html',
{'zlb': zlb,},
context_instance = RequestContext(request)
)
vs = ZLBVirtualServer.objects.filter(zlb_id=zlb.id)
prefs_o = ZLBVirtualServerPref.objects.filter(zlb=zlb)
prefs = {}
for p in prefs_o:
prefs[p.vs_name] = p
pr = {}
rul = {}
return render_to_response(
'zlb/show.html',
{'zlb': zlb,
'prefs': prefs,
'vs': vs,
'testing_env': settings.TESTING_ENV,},
context_instance = RequestContext(request)
)
@anonymous_csrf
@never_cache
def update(request, id):
tasks.update_zlb.delay(id)
zlb = ZLB.objects.get(id=id)
return HttpResponseRedirect('/zlbs')
def _parse_addr(addresses):
addr_list = addresses.split(', ')
addresses = []
for addr in addr_list:
network = addr.split('/')
addr = network[0]
if len(network) == 2:
cidr = network[1]
else:
cidr = None
if cidr:
offender = Offender.objects.filter(address=addr, cidr=cidr)
else:
offender = Offender.objects.filter(address=addr)
if offender.count() != 0:
addresses.append(offender[0])
else:
addresses.append(addr)
return addresses
@anonymous_csrf
def index_protection(request, zlb_id):
zlb = ZLB.objects.get(id=zlb_id)
protections = ZLBProtection.objects.filter(zlb_id=zlb_id)
for p in protections:
p.allowed_addresses = _parse_addr(p.allowed_addresses)
p.banned_addresses = _parse_addr(p.banned_addresses)
p.virtual_servers = ZLBVirtualServerProtection.objects.filter(zlb_id=zlb_id, protection_id=p.id)
return render_to_response(
'zlb/protections.html',
{'zlb': zlb,
'protections': protections,},
context_instance = RequestContext(request)
)
@anonymous_csrf
def index_rules(request, zlb_id):
zlb = ZLB.objects.get(id=zlb_id)
rules = ZLBRule.objects.filter(zlb_id=zlb_id)
for rule in rules:
rule.virtual_servers = ZLBVirtualServerRule.objects.filter(zlb_id=zlb_id, rule_id=rule.id)
return render_to_response(
'zlb/rules.html',
{'zlb': zlb,
'rules': rules,},
context_instance = RequestContext(request)
)
@never_cache
@anonymous_csrf
def virtual_server(request, zlb_id, vs_id):
if request.method == 'POST':
form = VirtualServerConfirm(request.POST)
if form.is_valid():
confirm = form.cleaned_data['confirm']
vs = ZLBVirtualServer.objects.get(id=vs_id)
pref = ZLBVirtualServerPref.objects.filter(zlb_id=zlb_id,vs_name=vs.name)
if pref.count() == 0:
p = ZLBVirtualServerPref(
zlb_id=zlb_id,
vs_name=vs.name,
favorite=False,
confirm=confirm,
)
p.save()
else:
pref = pref[0]
pref.confirm = confirm
pref.save()
return HttpResponseRedirect('/zlb/%s/virtual_server/%s' % (zlb_id, vs_id))
else:
form = VirtualServerConfirm()
zlb = ZLB.objects.get(id=zlb_id)
virtual_server = ZLBVirtualServer.objects.get(id=vs_id)
prefs = ZLBVirtualServerPref.objects.filter(zlb=zlb,vs_name=virtual_server.name)
rules = ZLBVirtualServerRule.objects.filter(virtualserver=virtual_server)
protections = ZLBVirtualServerProtection.objects.filter(virtualserver=virtual_server)
for p in protections:
p.protection.allowed_addresses = _parse_addr(p.protection.allowed_addresses)
p.protection.banned_addresses = _parse_addr(p.protection.banned_addresses)
return render_to_response(
'zlb/virtual_server.html',
{'zlb': zlb,
'virtual_server': virtual_server,
'prefs': prefs,
'rules': rules,
'protections': protections,
'form'
|
igor-toga/local-snat
|
neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py
|
Python
|
apache-2.0
| 41,399
| 0.000121
|
#!/usr/bin/env python
# Copyright 2012 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#
# Performs per host Linux Bridge configuration for Neutron.
# Based on the structure of the OpenVSwitch agent in the
# Neutron OpenVSwitch Plugin.
import sys
import netaddr
from neutron_lib import constants
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import service
from oslo_utils import excutils
from six import moves
from neutron._i18n import _LE, _LI, _LW
from neutron.agent.linux import bridge_lib
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import exceptions
from neutron.common import profiler as setup_profiler
from neutron.common import topics
from neutron.common import utils as n_utils
from neutron.plugins.common import constants as p_const
from neutron.plugins.common import utils as p_utils
from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb
from neutron.plugins.ml2.drivers.agent import _common_agent as ca
from neutron.plugins.ml2.drivers.agent import config as cagt_config # noqa
from neutron.plugins.ml2.drivers.l2pop.rpc_manager \
import l2population_rpc as l2pop_rpc
from neutron.plugins.ml2.drivers.linuxbridge.agent import arp_protect
from neutron.plugins.ml2.drivers.linuxbridge.agent.common import config # noqa
from neutron.plugins.ml2.drivers.linuxbridge.agent.common \
import constants as lconst
from neutron.plugins.ml2.drivers.linuxbridge.agent.common \
import utils as lb_utils
LOG = logging.getLogger(__name__)
LB_AGENT_BINARY = 'neutron-linuxbridge-agent'
BRIDGE_NAME_PREFIX = "brq"
MAX_VLAN_POSTFIX_LEN = 5
VXLAN_INTERFACE_PREFIX = "vxlan-"
class LinuxBridgeManager(amb.CommonAgentManagerBase):
def __init__(self, bridge_mappings, interface_mappings):
super(LinuxBridgeManager, self).__init__()
self.bridge_mappings = bridge_mappings
self.interface_mappings = interface_mappings
self.validate_interface_mappings()
self.validate_bridge_mappings()
self.ip = ip_lib.IPWrapper()
# VXLAN related parameters:
self.local_ip = cfg.CONF.VXLAN.local_ip
self.vxlan_mode = lconst.VXLAN_NONE
if cfg.CONF.VXLAN.enable_vxlan:
device = self.get_local_ip_device()
self.validate_vxlan_group_with_local_ip()
self.local_int = device.name
self.check_vxlan_support()
def validate_interface_mappings(self):
for physnet, interface in self.interface_mappings.items():
if not ip_lib.device_exists(interface):
LOG.error(_LE("Interface %(intf)s for physical network %(net)s"
" does not exist. Agent terminated!"),
{'intf': interface, 'net': physnet})
sys.exit(1)
def validate_bridge_mappings(self):
for physnet, bridge in self.bridge_mappings.items():
if not ip_lib.device_exists(bridge):
LOG.error(_LE("Bridge %(brq)s for physical network %(net)s"
" does not exist. Agent terminated!"),
{'brq': bridge, 'net': physnet})
sys.exit(1)
def validate_vxlan_group_with_local_ip(self):
if not cfg.CONF.VXLAN.vxlan_group:
return
try:
ip_addr = netaddr.IPAddress(self.local_ip)
# Ensure the configured group address/range is valid and multicast
group_net = netaddr.IPNetwork(cfg.CONF.VXLAN.vxlan_group)
if not group_net.is_multicast():
raise ValueError()
if not ip_addr.version == group_net.version:
raise ValueError()
except (netaddr.core.AddrFormatError, ValueError):
LOG.error(_LE("Invalid VXLAN Group: %(group)s, must be an address "
"or network (in CIDR notation) in a multicast "
"range of the same address family as local_ip: "
"%(ip)s"),
{'group': cfg.CONF.VXLAN.vxlan_group,
'ip': self.local_ip})
sys.exit(1)
def get_local_ip_device(self):
"""Return the device with local_ip on the host."""
device = self.ip.get_device_by_ip(self.local_ip)
if not device:
LOG.error(_LE("Tunneling cannot be enabled without the local_ip "
"bound to an interface on the host. Please "
"configure local_ip %s on the host interface to "
"be used for tunneling and restart the agent."),
self.local_ip)
sys.exit(1)
return device
def get_existing_bridge_name(self, physical_network):
if not physical_network:
return None
return self.bridge_mappings.get(physical_network)
@staticmethod
def get_bridge_name(network_id):
if not network_id:
LOG.warning(_LW("Invalid Network ID, will lead to incorrect "
"bridge name"))
bridge_name = BRIDGE_NAME_PREFIX + \
network_id[:lconst.RESOURCE_ID_LENGTH]
return bridge_name
def get_subinterface_name(self, physical_interface, vlan_id):
if not vlan_id:
LOG.warning(_LW("Invalid VLAN ID, will lead to incorrect "
"subinterface name"))
vlan_postfix = '.%s' % vlan_id
# For the vlan subinterf
|
ace name prefix we use:
# * the physical_interface, if len(physical_interface) +
# len(vlan_postifx) <= 15 for backward compatibility reasons
# Example: physical_interface = eth0
# prefix = eth0.1
# prefix = eth0.1111
#
# * otherwise a unique hash per physic
|
al_interface to help debugging
# Example: physical_interface = long_interface
# prefix = longHASHED.1
# prefix = longHASHED.1111
#
# Remark: For some physical_interface values, the used prefix can be
# both, the physical_interface itself or a hash, depending
# on the vlan_postfix length.
# Example: physical_interface = mix_interface
# prefix = mix_interface.1 (backward compatible)
# prefix = mix_iHASHED.1111
if (len(physical_interface) + len(vlan_postfix) >
constants.DEVICE_NAME_MAX_LEN):
physical_interface = p_utils.get_interface_name(
physical_interface, max_len=(constants.DEVICE_NAME_MAX_LEN -
MAX_VLAN_POSTFIX_LEN))
return "%s%s" % (physical_interface, vlan_postfix)
@staticmethod
def get_tap_device_name(interface_id):
return lb_utils.get_tap_device_name(interface_id)
def get_vxlan_device_name(self, segmentation_id):
if 0 <= int(segmentation_id) <= p_const.MAX_VXLAN_VNI:
return VXLAN_INTERFACE_PREFIX + str(segmentation_id)
else:
LOG.warning(_LW("Invalid Segmentation ID: %s, will lead to "
"incorrect vxlan device name"), segmentation_id)
def get_vxlan_group(self, segmentation_id):
net = netaddr.IPNetwork(cfg.CONF.VXLAN.vxlan_group)
# Map the segmentation ID to (one of) the group address(es)
return str(net.network +
(int(segmentation_id) & int(
|
plivo/plivo-python
|
tests/xml/test_recordElement.py
|
Python
|
mit
| 1,989
| 0.003017
|
from unittest import TestCase
from plivo import plivoxml
from tests import PlivoXmlTestCase
class RecordElementTest(TestCase, PlivoXmlTestCase):
def test_set_methods(self):
expected_response = '<Response><Record action="https://foo.example.com" callbackMethod="GET" ' \
'callbackUrl="https://foo.example.com" fileFormat="wav" finishOnKey="#" ' \
'maxLength="10" method="GET" playBeep="false" recordSession="false" ' \
'redirect="false" startOnDialAnswer="false" timeout="100" transcriptionMethod="GET" ' \
'transcriptionType="hybrid" transcriptionUrl="https://foo.example.com"/>' \
'</Response>'
action = 'https://foo.example.com'
method = 'GET'
fileFormat = 'wav'
redirect = False
timeout = 100
maxLength = 10
recordSession = False
startOnDialAnswer = False
playBeep = F
|
alse
finishOnKey = '#'
transcriptionType = 'hybrid'
transcriptionUrl = 'https://foo.example.com'
transcriptionMethod = 'GET'
callbackUrl = 'https://foo.e
|
xample.com'
callbackMethod = 'GET'
element = plivoxml.ResponseElement()
response = element.add(
plivoxml.RecordElement().set_action(action).set_method(method)
.set_file_format(fileFormat).set_redirect(redirect).set_timeout(
timeout).set_max_length(maxLength).set_play_beep(playBeep)
.set_finish_on_key(finishOnKey).set_record_session(recordSession).
set_start_on_dial_answer(startOnDialAnswer).set_transcription_type(
transcriptionType).set_transcription_url(transcriptionUrl)
.set_transcription_method(transcriptionMethod).set_callback_url(
callbackUrl).set_callback_method(callbackMethod)).to_string(False)
self.assertXmlEqual(response, expected_response)
|
julianwang/cinder
|
cinder/db/sqlalchemy/migrate_repo/versions/004_volume_type_to_uuid.py
|
Python
|
apache-2.0
| 5,948
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from migrate import ForeignKeyConstraint
from oslo_log import log as logging
from sqlalchemy import Integer, MetaData, String, Table
from cinder.i18n import _LI
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
"""Convert volume_type_id to UUID."""
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
volume_types = Table('volume_types', meta, autoload=True)
extra_specs = Table('volume_type_extra_specs', meta, autoload=True)
fkey_remove_list = [volumes.c.volume_type_id,
volume_types.c.id,
extra_specs.c.volume_type_id]
for column in fkey_remove_list:
fkeys = list(column.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
fkey = ForeignKeyConstraint(columns=[column],
refcolumns=[volume_types.c.id],
name=fkey_name)
try:
fkey.drop()
except Exception:
if migrate_engine.url.get_dialect().name.startswith('sqlite'):
pass
else:
raise
volumes.c.volume_type_id.alter(String(36))
volume_types.c.id.alter(String(36))
extra_specs.c.volume_type_id.alter(String(36))
vtype_list = list(volume_types.select().execute())
for t in vtype_list:
new_id = str(uuid.uuid4())
volumes.update().\
where(volumes.c.volume_type_id == t['id']).\
values(volume_type_id=new_id).execute()
extra_specs.update().\
where(extra_specs.c.volume_type_id == t['id']).\
values(volume_type_id=new_id).execute()
volume_types.update().\
where(volume_types.c.id == t['id']).\
values(id=new_id).execute()
for column in fkey_remove_list:
fkeys = list(column.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
fkey = ForeignKeyConstraint(columns=[column],
refcolumns=[volume_types.c.id],
name=fkey_name)
try:
fkey.create()
LOG.info(_LI('Created foreign key %s'), fkey_name)
except Exception:
if migrate_engine.url.get_dialect().name.startswith('sqlite'):
pass
else:
|
raise
def downgrade(migrate_engine):
"""Convert volume_type from UUID back to int."""
meta = MetaData()
meta.bind = migrate_engine
volumes = Table('volumes', meta, autoload=True)
volume_types = Table('volume_types', meta, autoload=True)
extra_specs = Table('volume_type_extra_specs', meta, autoload=True)
fkey_remove_list = [
|
volumes.c.volume_type_id,
volume_types.c.id,
extra_specs.c.volume_type_id]
for column in fkey_remove_list:
fkeys = list(column.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
fkey = ForeignKeyConstraint(columns=[column],
refcolumns=[volume_types.c.id],
name=fkey_name)
try:
fkey.drop()
except Exception:
if migrate_engine.url.get_dialect().name.startswith('sqlite'):
pass
else:
raise
vtype_list = list(volume_types.select().execute())
new_id = 1
for t in vtype_list:
volumes.update().\
where(volumes.c.volume_type_id == t['id']).\
values(volume_type_id=new_id).execute()
extra_specs.update().\
where(extra_specs.c.volume_type_id == t['id']).\
values(volume_type_id=new_id).execute()
volume_types.update().\
where(volume_types.c.id == t['id']).\
values(id=new_id).execute()
new_id += 1
if migrate_engine.name == 'postgresql':
# NOTE(e0ne): PostgreSQL can't cast string to int automatically
table_column_pairs = [('volumes', 'volume_type_id'),
('volume_types', 'id'),
('volume_type_extra_specs', 'volume_type_id')]
sql = 'ALTER TABLE {0} ALTER COLUMN {1} ' + \
'TYPE INTEGER USING {1}::numeric'
for table, column in table_column_pairs:
migrate_engine.execute(sql.format(table, column))
else:
volumes.c.volume_type_id.alter(Integer)
volume_types.c.id.alter(Integer)
extra_specs.c.volume_type_id.alter(Integer)
for column in fkey_remove_list:
fkeys = list(column.foreign_keys)
if fkeys:
fkey_name = fkeys[0].constraint.name
fkey = ForeignKeyConstraint(columns=[column],
refcolumns=[volume_types.c.id],
name=fkey_name)
try:
fkey.create()
LOG.info(_LI('Created foreign key %s'), fkey_name)
except Exception:
if migrate_engine.url.get_dialect().name.startswith('sqlite'):
pass
else:
raise
|
ivaxer/tipsip
|
tipsip/tests/test_header.py
|
Python
|
isc
| 2,901
| 0.002413
|
from twisted.trial import unittest
from tipsip.header import Headers
from tipsip.header import Header, AddressHeader, ViaHeader
class HeadersTest(unittest.TestCase):
def test_construct(self):
aq = self.assertEqual
at = self.assertTrue
h = Headers({'Subject': 'lunch'}, f='John', to='abacaba')
h['TO'] = 'Carol'
aq(h['Subject'], 'lunch')
aq(h['from'], 'John')
aq(h['t'], 'Carol')
r = str(h)
for line in r.split('\r\n'):
at(line in ['Subject: lunch', 'From: John', 'To: Carol'])
def test_manipulation(self):
aq = self.assertEqual
at = self.assertTrue
h = Headers()
h['f'] = "from header"
h['to'] = "to header"
at('FROM' in h)
at('To' in h)
to = h.pop('t')
aq(to, "to header")
at(h.has_key('From'))
class HeaderTest(unittest.TestCase):
def test_construct(self):
aq = self.assertEqual
at = self.assertTrue
h = Header('active', params={'expires': '3600'})
aq(str(h), 'active ;expires=3600')
class AddressHeaderTest(unittest.TestCase):
def test_parsing(self):
aq = self.assertEqual
v = AddressHeader.parse('<sips:bob@192.0.2.4>;expires=60')
aq(str(v.uri), 'sips:bob@192.0.2.4')
aq(v.params['expires'], '60')
aq(v.display_name, '')
v = AddressHeader.parse('<sip:server10.biloxi.com;lr>')
aq(str(v.uri), 'sip:server10.biloxi.com;lr')
aq(v.params, {})
aq(v.display_name, '')
v = AddressHeader.parse('The Operator <sip:operator@cs.columbia.edu>;tag=287447')
aq(str(v.uri), 'sip:operator@cs.columbia.edu')
|
aq(v.display_name, 'The Operator')
aq
|
(v.params, {'tag': '287447'})
v = AddressHeader.parse('sip:echo@example.com')
aq(str(v.uri), 'sip:echo@example.com')
class ViaHeaderTest(unittest.TestCase):
def test_construct(self):
aq = self.assertEqual
v = ViaHeader(transport='UDP', host='192.168.0.1', port='5060', params={'received': '8.8.8.8'})
aq(str(v), 'SIP/2.0/UDP 192.168.0.1:5060 ;received=8.8.8.8')
def test_parsing(self):
aq = self.assertEqual
at = self.assertTrue
v = ViaHeader.parse('SIP/2.0/UDP 127.0.0.1:21375;branch=z9hG4bK-d8754z-2f9c4f090fc81b1f-1---d8754z-;rport')
aq(v.version, 'SIP/2.0')
aq(v.transport, 'UDP')
aq(v.host, '127.0.0.1')
aq(v.port, '21375')
aq(v.params['branch'], 'z9hG4bK-d8754z-2f9c4f090fc81b1f-1---d8754z-')
at('rport' in v.params)
v = ViaHeader.parse('SIP/2.0/UDP pc33.atlanta.com:5066;branch=z9hG4bK776asdhds')
aq(v.port, '5066')
def test_serialize(self):
aq = self.assertEqual
v = ViaHeader.parse('SIP/2.0/UDP 127.0.0.1:21375;rport')
aq(str(v), 'SIP/2.0/UDP 127.0.0.1:21375 ;rport')
|
singhj/locality-sensitive-hashing
|
utils/levenshtein.py
|
Python
|
mit
| 745
| 0.016107
|
# Reference: http://hetland.org/coding/python/levenshtein.py
def levenshtein(a,b):
"Calculates the Levenshtein distance between a and b."
n, m = len(a), len(b)
if n > m
|
:
# Make sure n <= m, to use O(min(n,m)) space
a,b = b,a
n,m = m,n
current = range(n+1)
for i in range(1,m+1):
previous, current = current, [i]+[0]*n
for j in range(1,n+1):
add, delete = previous[j]+1, current[j-1]+1
change = previous[j-1]
if a[j-1] != b[i-1]:
change = change + 1
|
current[j] = min(add, delete, change)
return current[n]
if __name__=="__main__":
from sys import argv
print levenshtein(argv[1],argv[2])
|
matthieu-meaux/DLLM
|
examples/broken_wing/test_broken_wing.py
|
Python
|
gpl-2.0
| 1,720
| 0.005814
|
# -*-mode: python; py-indent-offset: 4; tab-width: 8; coding: iso-8859-1 -*-
# DLLM (non-linear Differentiated Lifting Line Model, open source software)
#
# Copyright (C) 2013-2015 Airbus Group SAS
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# https://github.com/matthieu-meaux/DLLM.git
#
# @author : Matthieu MEAUX
#
# Imports
from MDOTools.OC.operating_condition import OperatingCondition
from DLLM.DLLMGeom.wing_broken import Wing_Broken
from DLLM.DLLMKernel.DLLMSolver import DLLMSolver
OC=O
|
peratingCondition('cond1', atmospheric_model='ISA')
OC.set_Mach(0.8)
OC.set_AoA(3.0)
OC.set_altitude(10000.)
OC.set_T0_deg(15.)
OC.set_P0(101325.)
OC.set_humidity(0.)
OC.compute_atmosphere()
wing_param=Wing_Broken('broken_wing',n_sect=20)
wing_param.import_BC_from_file('input_parameters.par')
wing_param.build_linear_airfoil(OC, AoA0=0.0, set_as_ref=True)
wing_param.build_airfoils_from_ref()
wing_
|
param.update()
wing_param.plot()
DLLM = DLLMSolver('test', wing_param, OC, verbose=1)
DLLM.run_direct()
DLLM.run_post()
DLLM.run_adjoint()
|
ASCIT/donut-python
|
donut/constants.py
|
Python
|
mit
| 2,372
| 0.000422
|
"""Sto
|
re various constants here"""
from enum import Enum
# Maximum file upload size (in bytes).
MAX_CONTENT_LENGTH = 1 * 10
|
24 * 1024 * 1024
# Authentication/account creation constants
PWD_HASH_ALGORITHM = 'pbkdf2_sha256'
SALT_SIZE = 24
MIN_USERNAME_LENGTH = 2
MAX_USERNAME_LENGTH = 32
MIN_PASSWORD_LENGTH = 8
MAX_PASSWORD_LENGTH = 1024
HASH_ROUNDS = 100000
PWD_RESET_KEY_LENGTH = 32
# Length of time before recovery key expires, in minutes.
PWD_RESET_KEY_EXPIRATION = 1 * 24 * 60
CREATE_ACCOUNT_KEY_LENGTH = 32
class Gender(Enum):
"""Value of members.gender if member's gender is unknown"""
NO_GENDER = None
"""Value of members.gender if member is female"""
FEMALE = 0
"""Value of members.gender if member is male"""
MALE = 1
CONTACTS = {
'Administration': [{
'name': 'Kevin Gilmartin',
'role': 'Dean of Undergraduate Students',
'email': 'kmg@hss.caltech.edu'
}, {
'name': 'Lesley Nye',
'role': 'Dean of Undergraduate Students',
'email': 'lnye@caltech.edu'
}, {
'name': 'Kristin Weyman',
'role': 'Associate Dean of Undergraduate Students',
'email': 'kweyman@caltech.edu'
}, {
'name': 'Beth Larranaga',
'role': 'Office Manager',
'email': 'rosel@caltech.edu'
}, {
'name': 'Sara Loredo',
'role': 'Office Assistant',
'email': 'sara@caltech.edu'
}],
'Student Life': [{
'name':
'Tom Mannion',
'role':
'Senior Director, Student Activities and Programs',
'email':
'mannion@caltech.edu'
}, {
'name': 'Joe Shepherd',
'role': 'Vice President for Student Affairs',
'email': 'joseph.e.shepherd@caltech.edu'
}, {
'name':
'Felicia Hunt',
'role':
'Assistant Vice President for Student Affairs and Residential Experience',
'email':
'fhunt@caltech.edu'
}, {
'name': 'Maria Katsas',
'role': 'Director of Housing',
'email': 'maria@caltech.edu'
}, {
'name':
'Allie McIntosh',
'role':
'Community Educator and Deputy Title IX Coordinator',
'email':
'allie@caltech.edu'
}, {
'name': 'Jaime Reyes',
'role': 'Acting Director of Dining Services',
'email': 'reyes@caltech.edu'
}]
}
|
brousch/saythis2
|
tts_engines/osx_say.py
|
Python
|
mit
| 888
| 0.003378
|
import os
import re
import subprocess
from utils import whereis_exe
class osx_voice():
def __init__(self, voice_line):
mess = voice_line.split(' ')
cleaned = [ part for part in mess if len(part)>0 ]
self.name = cleaned[0]
self.locality = cleaned[1]
self.desc = cleaned[2].replace('# ', '')
def __str__(self):
return self.name + ' ' + self.locality + ' ' + self.desc
def fetch_voices()
|
:
osx_voices = []
if whereis_exe("say"):
voices_raw = os.popen("say -v ?").read()
voice_lines = voices_raw.split('\n')
for line in voice_lines:
try:
osx_voices.append(osx_voice(line))
except IndexError:
pass
return osx_v
|
oices
def speak(text, voice, rate):
if whereis_exe("say"):
subprocess.call(["say", text, "-v", voice, "-r", rate])
|
NlGG/experiments
|
Experimental Games on Networks/otree_code/network/models.py
|
Python
|
mit
| 3,635
| 0.009134
|
# -*- coding: utf-8 -*-
# <standard imports>
from __future__ import division
import random
import otree.models
import otree.constants
from otree.db import models
from otree import widgets
from otree.common import Currency as c, currency_range, safe_json
from otree.constants import BaseConstants
from otree.models import BaseSubsession, BaseGroup, BasePlayer
# </standard imports>
from numpy import *
author = 'NIGG'
doc = """
Your app description
"""
class Constants():
name_in_url = 'network'
players_per_group = 5
num_rounds = 40
places = ['A', 'B', 'C', 'D', 'E']
# define more constants here
class Subsession(BaseSubsession):
def before_session_starts(self):
Group.network_histry = []
for i in range(Constants.num_rounds):
for group in self.get_groups():
Group.network_tp = group.network_type()
Group.network_group = group.network()
players = group.get_players()
random.shuffle(players)
group.set_players(players)
Group.network_histry.append([ Group.network_group[0], Group.network_group[1]])
class Group(BaseGroup):
# <built-in>
subsession = models.ForeignKey(Subsession)
# </built-in>
networktype = models.CharField()
def network_type(self):
|
network_type_group = ['Blue network', 'Red network', 'Brown network']
network_type = random.choice(network_type_group)
return network_type
def network(self):
network_type = str(self.network_tp)
if network_type == 'Blue network':
network = {'A':['B'], 'B':['A', 'C', 'E'], 'C':['
|
B', 'D', 'E'], 'D':['C', 'E'], 'E':['B', 'C', 'D']}
elif network_type == 'Red network':
network = {'A':['B'], 'B':['A', 'C'], 'C':['B', 'D', 'E'], 'D':['C', 'E'], 'E':['C', 'D']}
else:
network = {'A':['B'], 'B':['A', 'C', 'E'], 'C':['B', 'D'], 'D':['C', 'E'], 'E':['B', 'D']}
network_group = [network_type, network]
return network_group
def set_payoffs(self):
network_group = self.network_histry[self.subsession.round_number-1]
self.network_type = network_group[0]
self.network = network_group[1]
player = [0 for i in range(Constants.players_per_group)]
active = [0 for i in range(Constants.players_per_group)]
i = 0
for role in Constants.places:
player[i] = self.get_player_by_role(role)
assign_nghb = self.network[role]
for other_role in assign_nghb:
if self.get_player_by_role(other_role).decision == 'ACTIVE':
active[i] += 1
player[i].payoff = float(100*active[i]/3)
player[i].num_active = active[i]
i += 1
class Player(otree.models.BasePlayer):
# <built-in>
group = models.ForeignKey(Group, null=True)
subsession = models.ForeignKey(Subsession)
# </built-in>
decision = models.CharField(
choices=['ACTIVE', 'INACTIVE'],
doc="""このプレイヤーの選択は""",
widget=widgets.RadioSelect()
)
nghb = models.PositiveIntegerField()
num_active = models.PositiveIntegerField()
def other_player(self):
return self.get_others_in_group()[0]
def role(self):
return Constants.places[self.id_in_group - 1]
def num_nghb(self):
return len(Group.network_histry[self.subsession.round_number-1][1][self.role()])
|
UnrememberMe/pants
|
testprojects/src/python/interpreter_selection/python_3_selection_testing/test_py2.py
|
Python
|
apache-2.0
| 554
| 0.012635
|
# coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS
|
.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import sys
from interpreter_selection.python_3_selection_testing.main_py2 import main
def test_main():
print(sys.
|
executable)
# Note that ascii exists as a built-in in Python 3 and
# does not exist in Python 2
ret = main()
assert ret == None
|
xtao/code
|
vilya/views/api/repos/__init__.py
|
Python
|
bsd-3-clause
| 4,261
| 0
|
# -*- coding: utf-8 -*-
import json
from vilya.libs import api_errors
from vilya.models.project import CodeDoubanProject
from vilya.views.api.utils import RestAPIUI, api_require_login, jsonize
from vilya.views.api.repos.product import ProductUI
from vilya.views.api.repos.summary import SummaryUI
from vilya.views.api.repos.intern import InternUI
from vilya.views.api.repos.default_branch import DefaultBranchUI
from vilya.views.api.repos.commits import CommitsUI
from vilya.views.api.repos.post_receive import PostReceiveUI
from vilya.views.api.repos.git2svn import GIT2SVNUI
from vilya.views.api.repos.svn2git import SVN2GITUI
from vilya.views.api.repos.pulls import PullsUI
from vilya.views.api.repos.issues import IssuesUI
from vilya.views.api.repos.contents import ContentsUI
from vilya.views.api.repos.push import PushUI
from vilya.views.api.repos.watchers import WatchersUI
_q_exports = []
def _q_lookup(request, name):
return RepositoryUI(name)
def _q_access(request):
request.response.set_content_type('application/json; charset=utf-8')
class RepositoryUI(object):
_q_exports = [
'lang_stats', 'forks', 'pulls', 'summary',
'committers', 'name', 'owner', 'product',
'intern_banned', 'default_branch', 'commits',
'post_receive', 'svn2git', 'git2svn', 'issues',
'contents', 'can_push', 'watchers'
]
def __init__(self, name):
self.name = name
self.repo = CodeDoubanProject.get_by_name(self.name)
def __call__(self, request):
return self._q_index(request)
@jsonize
def _q_index(self, request):
if not self.repo:
raise api_errors.NotFoundError("repo")
return {}
def _q_access(self, request):
self.method = request.method
def _q_lookup(self, request, part):
name = "%s/%s" % (self.name, part)
if not CodeDoubanProject.exists(name):
raise api_errors.NotFoundError("repo")
return RepositoryUI(name)
@jsonize
def lang_stats(self, request):
if not self.repo:
raise api_errors.NotFoundError
if self.method == 'POST':
language = request.get_form_var('language', '')
lan
|
guages = request.get_form_var('languages', '[]')
try:
languages = json.loads(languages)
except ValueError:
raise api_errors.NotJSONError
self.repo.language = language
self.repo.languages = languages
return {}
else:
return dict(language=self.repo.language,
languages=self.repo.languages)
@property
def forks(self):
|
return ForksUI(self.repo)
@property
def pulls(self):
return PullsUI(self.repo)
@property
def product(self):
return ProductUI(self.repo)
@property
def summary(self):
return SummaryUI(self.repo)
@property
def intern_banned(self):
return InternUI(self.repo)
@property
def can_push(self):
return PushUI(self.repo)
@property
def default_branch(self):
return DefaultBranchUI(self.repo)
@property
def commits(self):
return CommitsUI(self.repo)
@property
def post_receive(self):
return PostReceiveUI(self.repo)
@property
def svn2git(self):
return SVN2GITUI(self.repo)
@property
def git2svn(self):
return GIT2SVNUI(self.repo)
@property
def issues(self):
return IssuesUI(self.repo)
@property
def contents(self):
return ContentsUI(self.repo)
@property
def watchers(self):
return WatchersUI(self.repo)
class ForksUI(RestAPIUI):
_q_exports = []
_q_methods = ['get', 'post']
def __init__(self, repo):
self.repo = repo
@api_require_login
def post(self, request):
repo = self.repo
fork_repo = repo.new_fork(self.user.name)
if not fork_repo:
# FIXME: repository exists
return []
return fork_repo.as_dict()
def get(self, request):
fork_repos = self.repo.get_forked_projects()
return [project.get_info(without_commits=True)
for project in fork_repos]
|
paluh/django-tz
|
django_tz/middleware.py
|
Python
|
bsd-2-clause
| 1,766
| 0.002831
|
import datetime
import pytz
from django.conf import settings
from django.utils.cache import patch_vary_headers
from django.utils.translation import trans_real
from . import global_tz
from .forms import TimeZoneForm
from .utils import guess_tz_from_lang
def get_tz_from_request(request):
if hasattr(request, 's
|
ession'):
session_name = getattr(settings, 'TIMEZONE_SESSION_NAME', 'django_timezone')
tz = request.session.get(session_name, None)
if tz and isinstance(tz, datetime.tzinfo):
return tz
cookie_name = getattr(settings, 'TIMEZONE_COOKIE_NAME', 'TIMEZONE')
form = TimeZoneForm({'timezone': request.COOKIES.get(cookie_name, None)})
if form.is_valid():
return form.cleaned_data['timezone']
return None
class GlobalTimezoneM
|
iddleware(object):
"""
This middleware guesses timezone from language and sets it in current
thread global cache.
"""
def get_tz(self, request):
raise NotImplementedError()
def process_request(self, request):
tz = self.get_tz(request)
if tz:
global_tz.activate(tz)
def process_response(self, request, response):
global_tz.deactivate()
return response
class TimezoneFromLangMiddleware(GlobalTimezoneMiddleware):
"""
Not very smart middelware which guesses timezone from request lang setting.
"""
def get_tz(self, request):
tz = get_tz_from_request(request)
if tz:
return tz
accept_lang = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
langs = trans_real.parse_accept_lang_header(accept_lang)
for lang, unused in langs:
tz = guess_tz_from_lang(lang)
if tz:
break
return tz
|
titusjan/astviewer
|
astviewer/misc.py
|
Python
|
mit
| 6,180
| 0.004854
|
""" Miscellaneous routines and constants.
"""
import logging, sys, traceback
import os.path
import astviewer.qtpy
import astviewer.qtpy._version as qtpy_version
from astviewer.version import DEBUGGING, PROGRAM_NAME, PROGRAM_VERSION, PYTHON_VERSION
from astviewer.qtpy import QtCore, QtWidgets
logger=logging.getLogger(__name__)
QT_API = astviewer.qtpy.API
QT_API_NAME = astviewer.qtpy.API_NAME
QTPY_VERSION = '.'.join(map(str, qtpy_version.version_info))
ABOUT_MESSAGE = ("{}: {}\n\nPython: {}\n{} (api={})"
.format(PROGRAM_NAME, PROGRAM_VERSION, PYTHON_VERSION, QT_API_NAME, QT_API))
def program_directory():
""" Returns the program directory where this program is installed
"""
return os.path.abspath(os.path.dirname(__file__))
def icons_directory():
""" Returns the program directory where this program is installed
"""
return os.path.join(program_directory(), 'icons')
###########
# Logging #
###########
def logging_basic_config(level):
""" Setup basic config logging. Useful for debugging to quickly setup a useful logger"""
fmt = '%(filename)25s:%(lineno)-4d : %(levelname)-7s: %(message)s'
logging.basicConfig(level=level, format=fmt)
# pylint: disable=redefined-outer-name
def log_dictionary(dictionary, msg='', logger=None, level='debug', item_prefix=' '):
""" Writes a log message with key and value for each item in the dictionary.
:param dictionary: the dictionary to be logged
:type dictionary: dict
:param name: An optional message that is logged before the contents
:type name: string
:param logger: A logging.Logger object to log to. If not set, the 'main' logger
|
is used.
:type logger: logging.Logger or a string
:param level: log level. String or int as described in the logging module documentation.
Default: 'debug'.
:type level: string or int
:param item_prefix: String that will be prefixed to each line. Default: two spaces.
:type item_prefix: string
"""
level_nr = logging.getLevelName(level.upper())
if logger is None:
logger = logging.getLogger('main
|
')
if msg :
logger.log(level_nr, "Logging dictionary: {}".format(msg))
if not dictionary:
logger.log(level_nr,"{}<empty dictionary>".format(item_prefix))
return
max_key_len = max([len(k) for k in dictionary.keys()])
for key, value in sorted(dictionary.items()):
logger.log(level_nr, "{0}{1:<{2}s} = {3}".format(item_prefix, key, max_key_len, value))
# pylint: enable=redefined-outer-name
#################
# Type checking #
#################
def class_name(obj):
""" Returns the class name of an object"""
return obj.__class__.__name__
def check_class(obj, target_class, allow_none = False):
""" Checks that the obj is a (sub)type of target_class.
Raises a TypeError if this is not the case.
"""
if not isinstance(obj, target_class):
if not (allow_none and obj is None):
raise TypeError("obj must be a of type {}, got: {}"
.format(target_class, type(obj)))
############
# Qt stuff #
############
class ResizeDetailsMessageBox(QtWidgets.QMessageBox):
""" Message box that enlarges when the 'Show Details' button is clicked.
Can be used to better view stack traces. I could't find how to make a resizeable message
box but this it the next best thing.
Taken from:
http://stackoverflow.com/questions/2655354/how-to-allow-resizing-of-qmessagebox-in-pyqt4
"""
def __init__(self, detailsBoxWidth=700, detailBoxHeight=300, *args, **kwargs):
""" Constructor
:param detailsBoxWidht: The width of the details text box (default=700)
:param detailBoxHeight: The heights of the details text box (default=700)
"""
super(ResizeDetailsMessageBox, self).__init__(*args, **kwargs)
self.detailsBoxWidth = detailsBoxWidth
self.detailBoxHeight = detailBoxHeight
def resizeEvent(self, event):
""" Resizes the details box if present (i.e. when 'Show Details' button was clicked)
"""
result = super(ResizeDetailsMessageBox, self).resizeEvent(event)
details_box = self.findChild(QtWidgets.QTextEdit)
if details_box is not None:
#details_box.setFixedSize(details_box.sizeHint())
details_box.setFixedSize(QtCore.QSize(self.detailsBoxWidth, self.detailBoxHeight))
return result
def handleException(exc_type, exc_value, exc_traceback):
""" Causes the application to quit in case of an unhandled exception (as God intended)
Shows an error dialog before quitting when not in debugging mode.
"""
traceback.format_exception(exc_type, exc_value, exc_traceback)
logger.critical("Bug: uncaught {}".format(exc_type.__name__),
exc_info=(exc_type, exc_value, exc_traceback))
if DEBUGGING:
sys.exit(1)
else:
# Constructing a QApplication in case this hasn't been done yet.
if not QtWidgets.qApp:
_app = QtWidgets.QApplication()
#msgBox = QtWidgets.QMessageBox()
msgBox = ResizeDetailsMessageBox()
msgBox.setText("Bug: uncaught {}".format(exc_type.__name__))
msgBox.setInformativeText(str(exc_value))
lst = traceback.format_exception(exc_type, exc_value, exc_traceback)
msgBox.setDetailedText("".join(lst))
msgBox.setIcon(QtWidgets.QMessageBox.Warning)
msgBox.exec_()
sys.exit(1)
def get_qsettings():
""" Creates a QSettings object for this application.
We do not set the application and organization in the QApplication object to
prevent side-effects in case the AstViewer is imported.
"""
return QtCore.QSettings("titusjan.nl", PROGRAM_NAME)
def get_qapplication_instance():
""" Returns the QApplication instance. Creates one if it doesn't exist.
"""
app = QtWidgets.QApplication.instance()
if app is None:
app = QtWidgets.QApplication(sys.argv)
check_class(app, QtWidgets.QApplication)
return app
|
LINKIWI/modern-paste
|
app/uri/main.py
|
Python
|
mit
| 62
| 0
|
from base_uri im
|
port URI
class HomeURI(URI):
pa
|
th = '/'
|
acsone/server-tools
|
base_name_search_improved/__init__.py
|
Python
|
agpl-3.0
| 99
| 0
|
# -*- coding: utf-8 -*-
from .ho
|
oks import post_init_hook
from .
|
import models
from . import tests
|
spectralDNS/shenfun
|
demo/laguerre_dirichlet_poisson1D.py
|
Python
|
bsd-2-clause
| 1,587
| 0.00252
|
r"""
Solve Poisson equation in 1D with homogeneous Dirichlet bcs on the domain [0, inf)
\nabla^2 u = f,
The equation to solve for a Laguerre basis is
(\nabla u, \nabla v) = -(f, v)
"""
import os
import sys
from sympy import symbols, sin, exp, lambdify
import numpy as np
from shenfun import inner, grad, TestFunction, TrialFunction, \
Array, Function, FunctionSpace, dx
assert len(sys.argv) == 2, 'Call with one command-line argument'
assert isinstance(int(sys.argv[-1]), int)
# Use sympy to compute a rhs, given an analytical solution
x = symbols("x", real=True)
ue = sin(2*x)*exp(-x)
fe = ue.diff(x, 2)
# Size of discretization
N = int(sys.argv[-1])
SD = FunctionSpace(N, 'Laguerre', bc=(0, 0))
u = TrialFunction(SD)
v = TestFunction(SD)
# Get f on quad points
fj = Array(SD, buffer=fe)
# Compute right hand side of Poisson equation
f_hat = Function(SD)
f_hat = inner(v, -fj, output_array=f_hat)
# Get left hand side of Poisson equation
#A = inner(v, -div(grad(u)))
A = inner(grad(v), grad(u))
f_hat = A.solve(f_hat)
uj = f_hat.backward()
uh = uj.forward()
# Comp
|
are with analytical solution
ua = Array(SD, buffer=ue)
print("Error=%2.16e" %(np.linalg.norm(uj-ua)))
print("Error=%2.16e" %(np.sqrt(dx(uj-ua)**2)))
assert np.allclose(uj, ua, atol=1e-5)
point = np.array([0.1, 0.2])
p = SD.eval(point, f_hat)
assert np.all
|
close(p, lambdify(x, ue)(point), atol=1e-5)
if 'pytest' not in os.environ:
import matplotlib.pyplot as plt
xx = np.linspace(0, 16, 100)
plt.plot(xx, lambdify(x, ue)(xx), 'r', xx, uh.eval(xx), 'bo', markersize=2)
plt.show()
|
vitan/blaze
|
blaze/data/core.py
|
Python
|
bsd-3-clause
| 6,508
| 0.000154
|
from __future__ import absolute_import, division, print_function
from itertools import chain
from dynd import nd
import datashape
from datashape.internal_utils import IndexCallable
from datashape import discover
from functools import partial
from ..dispatch import dispatch
from blaze.expr import Projection, Field
from blaze.expr import Expr, UnaryOp
from .utils import validate, coerce, coerce_to_ordered, ordered_index
from ..utils import partition_all
__all__ = ['DataDescriptor', 'discover', 'compute_up']
def isdimension(ds):
return isinstance(ds, (datashape.Var, datashape.Fixed))
class DataDescriptor(object):
"""
Standard interface to data storage
Data descriptors provide read and write access to common data storage
systems like csv, json, HDF5, and SQL.
They provide Pythonic iteration over these resources as well as efficient
chunked access with DyND arrays.
Data Descriptors implement the following methods:
__iter__ - iterate over storage, getting results as Python objects
chunks - iterate over storage, getting results as DyND arrays
extend - insert new data into storage (if possible.)
Consumes a sequence of core Python objects
extend_chunks - insert new data into storage (if possible.)
Consumes a sequence of DyND arrays
as_dynd - load entire dataset into memory as a DyND array
"""
def extend(self, rows):
""" Extend data with many rows
"""
rows = iter(rows)
row = next(rows)
rows = chain([row], rows)
if not validate(self.schema, row):
raise ValueError('Invalid data:\n\t %s \nfor dshape \n\t%s' %
(str(row), self.schema))
if isinstance(row, dict):
rows = map(partial(coerce_to_ordered, self.schema), rows)
self._extend(rows)
def extend_chunks(self, chunks):
def dtype_of(chunk):
return str(len(chunk) * self.schema)
self._ext
|
end_chunks((nd.array(chunk, type=dtype_of(chunk))
for chunk in chunks))
def _extend_chunks(self, chunks):
self.extend((row for chunk in chunks
for row in nd.as_py(chunk, tuple=True)))
def chunks(self, **kwargs):
def dshape(chunk):
return str(len(chunk) * self.dshape.subshape[0])
for chunk in self._chunks(**kwargs):
yield nd.array(chunk, t
|
ype=dshape(chunk))
def _chunks(self, blen=100):
return partition_all(blen, iter(self))
def as_dynd(self):
return self.dynd[:]
def as_py(self):
if isdimension(self.dshape[0]):
return tuple(self)
else:
return tuple(nd.as_py(self.as_dynd(), tuple=True))
def __array__(self):
return nd.as_numpy(self.as_dynd())
def __getitem__(self, key):
return self.get_py(key)
@property
def dynd(self):
return IndexCallable(self.get_dynd)
def get_py(self, key):
key = ordered_index(key, self.dshape)
subshape = self.dshape._subshape(key)
if hasattr(self, '_get_py'):
result = self._get_py(key)
elif hasattr(self, '_get_dynd'):
result = self._get_dynd(key)
else:
raise AttributeError("Data Descriptor defines neither "
"_get_py nor _get_dynd. Can not index")
return coerce(subshape, result)
def get_dynd(self, key):
key = ordered_index(key, self.dshape)
subshape = self.dshape._subshape(key)
if hasattr(self, '_get_dynd'):
result = self._get_dynd(key)
elif hasattr(self, '_get_py'):
result = nd.array(self._get_py(key), type=str(subshape))
else:
raise AttributeError("Data Descriptor defines neither "
"_get_py nor _get_dynd. Can not index")
# Currently nd.array(result, type=discover(result)) is oddly slower
# than just nd.array(result) , even though no type coercion should be
# necessary. As a short-term solution we check if this is the case and
# short-circuit the `type=` call
# This check can be deleted once these two run at similar speeds
ds_result = discover(result)
if (subshape == ds_result or
(isdimension(subshape[0]) and isdimension(ds_result[0]) and
subshape.subshape[0] == subshape.subshape[0])):
return nd.array(result)
else:
return nd.array(result, type=str(subshape))
def __iter__(self):
if not isdimension(self.dshape[0]):
raise TypeError("Data Descriptor not iterable, has dshape %s" %
self.dshape)
schema = self.dshape.subshape[0]
try:
seq = self._iter()
except NotImplementedError:
seq = iter(nd.as_py(self.as_dynd(), tuple=True))
if not isdimension(self.dshape[0]):
yield coerce(self.dshape, nd.as_py(self.as_dynd(), tuple=True))
else:
for block in partition_all(100, seq):
x = coerce(len(block) * schema, block)
for row in x:
yield row
def _iter(self):
raise NotImplementedError()
_dshape = None
@property
def dshape(self):
return datashape.dshape(self._dshape or datashape.Var() * self.schema)
_schema = None
@property
def schema(self):
if self._schema:
return datashape.dshape(self._schema)
if isdimension(self.dshape[0]):
return self.dshape.subarray(1)
raise TypeError('Datashape is not indexable to schema\n%s' %
self.dshape)
@property
def columns(self):
rec = self.schema[0]
if isinstance(rec, datashape.Record):
return rec.names
else:
raise TypeError('Columns attribute only valid on tabular '
'datashapes of records, got %s' % self.dshape)
@dispatch((Expr, UnaryOp), DataDescriptor)
def compute_up(t, ddesc, **kwargs):
return compute_up(t, iter(ddesc)) # use Python streaming by default
@dispatch(Projection, DataDescriptor)
def compute_up(t, ddesc, **kwargs):
return ddesc[:, t.fields]
@dispatch(Field, DataDescriptor)
def compute_up(t, ddesc, **kwargs):
return ddesc[:, t.fields[0]]
@dispatch(DataDescriptor)
def discover(dd):
return dd.dshape
|
tensorflow/adanet
|
adanet/core/evaluator.py
|
Python
|
apache-2.0
| 4,624
| 0.00519
|
"""An AdaNet evaluator implementation in Tensorflow using a single graph.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import logging
from adanet import tf_compat
import numpy as np
import tensorflow.compat.v2 as tf
# TODO: Remove uses of Evaluator once AdaNet Ranker is implemented.
class Evaluator(object):
"""Evaluates candidate ensemble performance."""
class Objective(object):
"""The Evaluator objective for the metric being optimized.
Two objectives are currently supported:
- MINIMIZE: Lower is better for the metric being optimized.
- MAXIMIZE: Higher is better for the metric being optimized.
"""
MINIMIZE = "minimize"
MAXIMIZE = "maximize"
def __init__(self,
input_fn,
metric_name="adanet_loss",
objective=Objective.MINIMIZE,
steps=None):
"""Initializes a new Evaluator instance.
Args:
input_fn: Input function returning a tuple of: features - Dictionary of
string feature name to `Tensor`. labels - `Tensor` of labels.
metric_name: The name of the evaluation metrics to use when choosing the
best ensemble. Must refer to a valid evaluation metric.
objective: Either `Objective.MINIMIZE` or `Objective.MAXIMIZE`.
steps: Number of steps for which to evaluate the ensembles. If an
`OutOfRangeError` occurs, evaluation stops. If set to None, will iterate
the dataset until all inputs are exhausted.
Returns:
An :class:`adanet.Evaluator` instance.
"""
self._input_fn = input_fn
self._steps = steps
self._metric_name = metric_name
self._objective = objective
if objective == self.Objective.MINIMIZE:
self._objective_fn = np.nanargmin
elif objective == self.Objective.MAXIMIZE:
self._objective_fn = np.nanargmax
else:
raise ValueError(
"Evaluator objective must be one of MINIMIZE or MAXIMIZE.")
@property
def input_fn(self):
"""Return the input_fn."""
return self._input_fn
@property
def steps(self):
"""Return the number of evaluation steps."""
return self._steps
@property
def metric_name(self):
"""Returns the name of the metric being optimized."""
return self._metric_name
@property
def objective_fn(self):
"""Returns a fn which selects the best metric based on the objective."""
return self._objective_fn
def evaluate(self, se
|
ss, ensemble_metrics):
"""Evaluates the given AdaNet objectives on the data from `input_fn`.
The candidates are fed the same batches of feature
|
s and labels as
provided by `input_fn`, and their losses are computed and summed over
`steps` batches.
Args:
sess: `Session` instance with most recent variable values loaded.
ensemble_metrics: A list dictionaries of `tf.metrics` for each candidate
ensemble.
Returns:
List of evaluated metrics.
"""
evals_completed = 0
if self.steps is None:
logging_frequency = 1000
elif self.steps < 10:
logging_frequency = 1
else:
logging_frequency = math.floor(self.steps / 10.)
objective_metrics = [em[self._metric_name] for em in ensemble_metrics]
sess.run(tf_compat.v1.local_variables_initializer())
while True:
if self.steps is not None and evals_completed == self.steps:
break
try:
evals_completed += 1
if (evals_completed % logging_frequency == 0 or
self.steps == evals_completed):
logging.info("Ensemble evaluation [%d/%s]", evals_completed,
self.steps or "??")
sess.run(objective_metrics)
except tf.errors.OutOfRangeError:
logging.info("Encountered end of input after %d evaluations",
evals_completed)
break
# Evaluating the first element is idempotent for metric tuples.
return sess.run([metric[0] for metric in objective_metrics])
|
KredekPth/Kurs_django
|
rental/models.py
|
Python
|
mit
| 564
| 0.031915
|
from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
from django.utils.timezone import now
from shelf.models import BookItem
# Create your models here.
class Rental(models.Model):
who = models.ForeignKey(User)
what = models.ForeignKey(BookItem)
when = models.DateTimeField(
|
default = datetime.now)
returned = models.DateTimeField(null = True, blank = True)
def __str__(
|
self):
return"{User},{Book},{rent},{ret}".format(User = self.who,Book = self.what,rent=self.when,ret = self.returned)
|
pik/pypayd
|
pypayd/config.py
|
Python
|
mit
| 1,595
| 0.013166
|
#Defaults - overridable via. pypayd.conf or command-line arguments
DEFAULT_KEYPATH = '0/0/1'
DEFAULT_TICKER = 'dummy'
DEFAULT_CURRENCY = 'USD'
DEFAULT_WALLET_FILE = 'wallet.txt'
DEFAULT_WALLET_PASSWORD = "foobar"
DEFAULT_MNEMONIC_TYPE = "electrumseed"
DB = None
DEFAULT_DB = "pypayd.db"
DEFAULT_TESTNET_DB = "pypayd_testnet.db"
#Pypay server settings
RPC_HOST ='127.0.0.1'
RPC_PORT = 3080
VERSION = 0.1
AUTH_REQUIRED = True
#Blockchain
TESTNET = False
BLOCKCHAIN_CONNECT = 'http://localhost:
|
3001' #'https://test-insight.bitpay.com' #None
LOCAL_BLOCKCHAIN = False
BLOCKCHAIN_SERVICE = 'insight'
#generate a new address for every order if gen_new == True
GEN_NEW = False
#delay between requests to the blockchain service for new transactions
POLLING_DELAY = 30
#maximum time a leaf (address) is used t
|
o process orders before a new one is generated
MAX_LEAF_LIFE = 604800
#maximum number of transactions per address before a new one is generated
MAX_LEAF_TX = 9999
#maximum amount of time an order received for generated amount will be considered valid
ORDER_LIFE = 86400
#time from last order creation, after which an adress is considered stale and no longer polled
LEAF_POLL_LIFE = ORDER_LIFE*2
#log file settings
LOG = None
MAX_LOG_SIZE = 16*1024*1024
UPDATE_ON_CONFIRM = 6 #can also take a list, such as [6, 20, 100]
DATA_DIR = ""
DB = None
KEYPATH = None
LAST_USED_KEYPATH = None
RPC_USER = 'user'
RPC_PASSWORD= 'password'
# INTERNAL
STATE = {"last_order_updates": {"order_id":None, "timestamp": None}}
# UNUSED
ZMQ_BIND = None
ZMQ_FEED = False
SOCKETIO_BIND = None
SOCKETIO_FEED = False
|
rupak0577/ginga
|
ginga/tests/test_colors.py
|
Python
|
bsd-3-clause
| 4,797
| 0.0271
|
#
# Unit Tests for the colors.py functions
#
# Rajul Srivastava (rajul09@gmail.com)
#
import unittest
import logging
import numpy as np
import ginga.colors
class TestError(Exception):
pass
class TestColors(unittest.TestCase):
def setUp(self):
self.logger = logging.getLogger("TestColors")
self.color_list_length = len(ginga.colors.color_dict)
# Tests for the lookup_color() funtion
def test_lookup_color_white_tuple(self):
expected = (1.0, 1.0, 1.0)
actual = ginga.colors.lookup_color("white", "tuple")
assert np.allclose(expected, actual)
def test_lookup_color_black_tuple(self):
expected = (0.0, 0.0, 0.0)
actual = ginga.colors.lookup_color("black", "tuple")
assert np.allclose(expected, actual)
def test_lookup_color_white_hash(self):
expected = "#ffffff"
actual = ginga.colors.lookup_color("white", "hash")
assert expected == actual
def test_lookup_color_black_black(self):
expected = "#000000"
actual = ginga.colors.lookup_color("black", "hash")
assert expected == actual
def test_lookup_color_yellow_tuple(self):
expected = (1.0, 1.0, 0.0)
actual = ginga.colors.lookup_color("yellow")
assert np.allclose(expected, actual)
def test_lookup_color_unknown(self):
self.assertRaises(KeyError, ginga.colors.lookup_color, "unknown_color")
def test_lookup_color_raise_exception_unknown_key(self):
self.assertRaises(KeyError, ginga.colors.lookup_color, "unknown_key")
def test_lookup_color_raise_exception_unknown_format(self):
self.assertRaises(ValueError, ginga.colors.lookup_color, "white", "unknown_format")
# Tests for the get_colors() function
def test_get_colors_len(self):
expected = self.color_list_length
actual = len(ginga.colors.get_colors())
assert expected == actual
def test_add_and_get_colors_len(self):
ginga.colors.add_color("test_color_white", (0.0, 0.0, 0.0))
expected = self.color_list_length + 1
actual = len(ginga.colors.get_colors())
assert expected == actual
ginga.colors.remove_color("test_c
|
olor_white")
# Tests for the add_color() and remove_color() function
def test_add_and_remove_color_len(self):
ginga.colors.add_color("test_color_white", (0.0, 0.0, 0.0))
expected = self.color_list_length + 1
actual = len(ginga.colors.color_dict)
assert expected == actual
expected = len(ginga.colors.color_dict)
actual = len(ginga.colors.color_list)
assert expecte
|
d == actual
ginga.colors.remove_color("test_color_white")
expected = self.color_list_length
actual = len(ginga.colors.color_dict)
assert expected == actual
expected = len(ginga.colors.color_dict)
actual = len(ginga.colors.color_list)
assert expected == actual
def test_add_and_remove_color_rbg(self):
ginga.colors.add_color("test_color_white", (0.0, 0.0, 0.0))
expected = (0.0, 0.0, 0.0)
actual = ginga.colors.lookup_color("test_color_white")
assert np.allclose(expected, actual)
ginga.colors.remove_color("test_color_white")
self.assertRaises(KeyError, ginga.colors.remove_color, "test_color_white")
def test_add_color_wrong_rbg_type(self):
self.assertRaises(TypeError, ginga.colors.add_color, "white", "string_wrong_format")
def test_add_color_wrong_rbg_values(self):
self.assertRaises(ValueError, ginga.colors.add_color, "test_color", (-1.0, 0.0, 0.0))
def test_add_color_wrong_tuple_length(self):
self.assertRaises(ValueError, ginga.colors.add_color, "test_color", (0.0, 0.0))
def test_remove_color_unknown(self):
self.assertRaises(KeyError, ginga.colors.remove_color, "unknown_color")
# Tests for recalc_color_list() function
def test_recalc_color_list(self):
ginga.colors.color_dict["test_color_white"] = (0.0, 0.0, 0.0)
expected = len(ginga.colors.color_dict) - 1
actual = len(ginga.colors.color_list)
assert expected == actual
ginga.colors.recalc_color_list()
expected = len(ginga.colors.color_dict)
actual = len(ginga.colors.color_list)
assert expected == actual
del ginga.colors.color_dict["test_color_white"]
expected = len(ginga.colors.color_dict) + 1
actual = len(ginga.colors.color_list)
assert expected == actual
ginga.colors.recalc_color_list()
expected = len(ginga.colors.color_dict)
actual = len(ginga.colors.color_list)
assert expected == actual
# Tests for scan_rgbtxt_buf() function
def test_scan_rgbtxt_buf(self):
test_rgb_lines = '''
255 255 255 white
0 0 0 black
255 0 0 red
0 255 0 green
0 0 255 blue
'''
result = ginga.colors.scan_rgbtxt_buf(test_rgb_lines)
assert isinstance(result, dict)
expected = 5
actual = len(result)
assert expected == actual
expected = (1.0, 1.0, 1.0)
actual = result["white"]
assert np.allclose(expected, actual)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
#END
|
mlperf/training_results_v0.7
|
Google/benchmarks/resnet/implementations/resnet-cloud-TF2.0-tpu-v3-32/resnet_imagenet_main.py
|
Python
|
apache-2.0
| 12,959
| 0.007408
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Runs a ResNet model on the ImageNet dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from tf2_common.utils.flags import core as flags_core
from tf2_common.utils.logs import logger
from tf2_common.utils.misc import distribution_utils
from tf2_common.utils.misc import keras_utils
from tf2_common.utils.misc import model_helpers
from tf2_common.utils.mlp_log import mlp_log
from tf2_resnet import common
from tf2_resnet import imagenet_preprocessing
from tf2_resnet import resnet_model
import tensorflow_model_optimization as tfmot
def run(flags_obj):
"""Run ResNet ImageNet training and eval loop using native Keras APIs.
Args:
flags_obj: An object containing parsed flag values.
Raises:
ValueError: If fp16 is passed as it is not currently supported.
NotImplementedError: If some features are not currently supported.
Returns:
Dictionary of training and eval stats.
"""
mlp_log.mlperf_print('init_start', None)
common.print_flags(flags_obj)
keras_utils.set_session_config(
enable_eager=flags_obj.enable_eager,
enable_xla=flags_obj.enable_xla)
# Execute flag override logic for better model performance
if flags_obj.tf_gpu_thread_mode:
keras_utils.set_gpu_thread_mode_and_count(
per_gpu_thread_count=flags_obj.per_gpu_thread_count,
gpu_thread_mode=flags_obj.tf_gpu_thread_mode,
num_gpus=flags_obj.num_gpus,
datasets_num_private_threads=flags_obj.datasets_num_private_threads)
common.set_cudnn_batchnorm_mode()
dtype = flags_core.get_tf_dtype(flags_obj)
if dtype == tf.float16:
loss_scale = flags_core.get_loss_scale(flags_obj, default_for_fp16=128)
policy = tf.compat.v2.keras.mixed_precision.experimental.Policy(
'mixed_float16', loss_scale=loss_scale)
tf.compat.v2.keras.mixed_precision.experimental.set_policy(policy)
if n
|
ot keras_utils.is_v2_0():
|
raise ValueError('--dtype=fp16 is not supported in TensorFlow 1.')
elif dtype == tf.bfloat16:
policy = tf.compat.v2.keras.mixed_precision.experimental.Policy(
'mixed_bfloat16')
tf.compat.v2.keras.mixed_precision.experimental.set_policy(policy)
data_format = flags_obj.data_format
if data_format is None:
data_format = ('channels_first'
if tf.test.is_built_with_cuda() else 'channels_last')
tf.keras.backend.set_image_data_format(data_format)
# Configures cluster spec for distribution strategy.
_ = distribution_utils.configure_cluster(flags_obj.worker_hosts,
flags_obj.task_index)
strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=flags_obj.distribution_strategy,
num_gpus=flags_obj.num_gpus,
all_reduce_alg=flags_obj.all_reduce_alg,
num_packs=flags_obj.num_packs,
tpu_address=flags_obj.tpu,
tpu_zone=flags_obj.tpu_zone if flags_obj.tpu else None)
if strategy:
# flags_obj.enable_get_next_as_optional controls whether enabling
# get_next_as_optional behavior in DistributedIterator. If true, last
# partial batch can be supported.
strategy.extended.experimental_enable_get_next_as_optional = (
flags_obj.enable_get_next_as_optional
)
strategy_scope = distribution_utils.get_strategy_scope(strategy)
# pylint: disable=protected-access
if flags_obj.use_synthetic_data:
distribution_utils.set_up_synthetic_data()
input_fn = common.get_synth_input_fn(
height=imagenet_preprocessing.DEFAULT_IMAGE_SIZE,
width=imagenet_preprocessing.DEFAULT_IMAGE_SIZE,
num_channels=imagenet_preprocessing.NUM_CHANNELS,
num_classes=flags_obj.num_classes,
dtype=dtype,
drop_remainder=True)
else:
distribution_utils.undo_set_up_synthetic_data()
input_fn = imagenet_preprocessing.input_fn
# When `enable_xla` is True, we always drop the remainder of the batches
# in the dataset, as XLA-GPU doesn't support dynamic shapes.
# drop_remainder = flags_obj.enable_xla
# Current resnet_model.resnet50 input format is always channel-last.
# We use keras_application mobilenet model which input format is depends on
# the keras beckend image data format.
# This use_keras_image_data_format flags indicates whether image preprocessor
# output format should be same as the keras backend image data format or just
# channel-last format.
use_keras_image_data_format = (flags_obj.model == 'mobilenet')
train_input_dataset = input_fn(
is_training=True,
data_dir=flags_obj.data_dir,
batch_size=flags_obj.batch_size,
parse_record_fn=imagenet_preprocessing.get_parse_record_fn(
use_keras_image_data_format=use_keras_image_data_format),
datasets_num_private_threads=flags_obj.datasets_num_private_threads,
dtype=dtype,
drop_remainder=flags_obj.drop_train_remainder,
tf_data_experimental_slack=flags_obj.tf_data_experimental_slack,
training_dataset_cache=flags_obj.training_dataset_cache,
)
eval_input_dataset = None
if not flags_obj.skip_eval:
eval_input_dataset = input_fn(
is_training=False,
data_dir=flags_obj.data_dir,
batch_size=flags_obj.batch_size,
parse_record_fn=imagenet_preprocessing.get_parse_record_fn(
use_keras_image_data_format=use_keras_image_data_format),
dtype=dtype,
drop_remainder=flags_obj.drop_eval_remainder)
steps_per_epoch, train_epochs = common.get_num_train_iterations(flags_obj)
mlp_log.mlperf_print('global_batch_size', flags_obj.batch_size)
mlp_log.mlperf_print('num_train_examples',
imagenet_preprocessing.NUM_IMAGES['train'])
mlp_log.mlperf_print('num_eval_examples',
imagenet_preprocessing.NUM_IMAGES['validation'])
learning_rate_schedule_fn = None
with strategy_scope:
optimizer, learning_rate_schedule_fn = common.get_optimizer(
flags_obj=flags_obj,
steps_per_epoch=steps_per_epoch,
train_steps=train_epochs * steps_per_epoch)
if flags_obj.fp16_implementation == 'graph_rewrite':
# Note: when flags_obj.fp16_implementation == "graph_rewrite", dtype as
# determined by flags_core.get_tf_dtype(flags_obj) would be 'float32'
# which will ensure tf.compat.v2.keras.mixed_precision and
# tf.train.experimental.enable_mixed_precision_graph_rewrite do not double
# up.
optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(
optimizer)
if flags_obj.model == 'resnet50_v1.5':
resnet_model.change_keras_layer(flags_obj.use_tf_keras_layers)
model = resnet_model.resnet50(num_classes=flags_obj.num_classes)
elif flags_obj.model == 'mobilenet':
# TODO(kimjaehong): Remove layers attribute when minimum TF version
# support 2.0 layers by default.
model = tf.keras.applications.mobilenet.MobileNet(
weights=None, classes=flags_obj.num_classes, layers=tf.keras.layers)
if flags_obj.pretrained_filepath:
model.load_weights(flags_obj.pretrained_filepath)
if flags_obj.pruning_method == 'polynomial_decay':
if dtype != tf.float32:
raise NotImplementedError(
'Pruning is currently only supported on dtype=tf.float32.')
pruning_params = {
'pruning_schedule':
|
bepo13/destinydb-stl-generator-v0
|
src/DestinyModelGenStl.py
|
Python
|
mit
| 6,240
| 0.005769
|
import numpy
scale = 1000
def unit(v):
return (v / numpy.linalg.norm(v))
def angle(v1, v2):
v1_u = unit(v1)
v2_u = unit(v2)
angle = numpy.arccos(numpy.dot(v1_u, v2_u))
if numpy.isnan(angle):
if (v1_u == v2_u).all():
return 0.0
else:
return numpy.pi
return angle
def generate(models, solidName, fileName):
# Find the minimum positional vector for all models
positionMin = numpy.array([0, 0, 0, 0], dtype='float')
for model in models:
for geometry in model.geometry.geometry:
for mesh in geometry.meshes.meshes:
positions = mesh.positions
for v in positions:
if v[0] < positionMin[0]:
positionMin[0] = v[0]
if v[1] < positionMin[1]:
positionMin[1] = v[1]
if v[2] < positionMin[2]:
positionMin[2] = v[2]
if v[3] < positionMin[3]:
positionMin[3] = v[3]
# Translate position coordinates to always be positive
positionMin *= -1
#Open file
with open(fileName, 'w') as f:
print("Writing "+fileName+"...")
# Write name header
f.write("solid "+solidName+"\n")
# Iterate through all models
for model in models:
# Write positional vectors (once to start)
for geometry in model.geometry.geometry:
for mesh in geometry.meshes.meshes:
indices = mesh.indices.data
positions = mesh.positions
normals = mesh.normals
parts = mesh.parts.data
# Loop through all the parts in the mesh
for i, part in enumerate(parts):
# Check if this part has been duplicated
ignore = False
for j in range(i):
if (parts[i].indexStart == parts[j].indexStart) or (parts[i].indexCount == parts[j].indexCount):
ignore = True
# Skip anything meeting one of the following::
# duplicate part
# levelOfDetail greater than one
# diffuseTexture.contains("target_reticles")
if ignore or part.levelOfDetail > 1 or ("target_reticles" in part.diffuseTexture):
continue
start = part.indexStart
count = part.indexCount
# Process indices in sets of 3
if part.primitive == 3:
increment = 3
# Process indices as triangle strip
elif part.primitive == 5:
increment = 1
count -= 2
j = 0
while j < count:
# Skip if any two of the indices match (ignoring lines)
if (indices[start+j+0] == indices[start+j+1]) or (indices[start+j+0] == indices[start+j+2]) or (indices[start+j+1] == indices[start+j+2]):
j += 1
continue
# Calculate the average normal
n = (normals[indices[start+j+0]] + normals[indices[start+j+1]] + normals[indices[start+j+2]])[0:3]
# Calculate normal of vertices
v1 = positions[indices[start+j+1]][0:3] - positions[indices[start+j+0]][0:3]
v2 = positions[indices[start+j+2]][0:3] - positions[indices[start+j+1]][0:3]
n2 = numpy.cross(v1, v2)
# Calculate the angle between the two normals
# Reverse the vertices orientation if the angle is > pi/2 (90*)
a = angle(unit(n), unit(n2))
if a > numpy.pi/2:
flip = True
else:
flip = False
# Write the normal and loop start to file
# the normal doesn't matter for this, the order of vertices does
f.write("facet normal 0.0 0.0 0.0\n")
f.write(" outer loop\n")
if flip:
# write the three vertices to the file in reverse order
k = 3
while k > 0:
v = positions[indices[start+j+(k-1)]]
v = (v + positionMin) * scale
f.write(" vertex "+str(v[0])+" "+str(v[1])+" "+str(v[2])+"\n")
k -= 1
else:
# write the three v
|
ertices
|
to the file in forward order
for k in range(3):
v = positions[indices[start+j+k]]
v = (v + positionMin) * scale
f.write(" vertex "+str(v[0])+" "+str(v[1])+" "+str(v[2])+"\n")
# Write the loop and normal end to file
f.write(" endloop\n")
f.write("endfacet\n")
j += increment
else:
# Skip this if it ever happens
continue
f.close()
return
|
ray6/sdn
|
actualSDN.py
|
Python
|
mit
| 14,660
| 0.007572
|
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ether
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
from ryu.lib.packet import arp, ipv4
from ryu.topology.api import get_switch, get_link, get_host
from ryu.topology import event, switches
import networkx as nx
from ryu.lib import hub
class actualSDN_switch(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(actualSDN_switch, self).__init__(*args, **kwargs)
self.vtable = {}
# default vlan table
self.vtable = {'00:00:00:00:00:01':'1',
'00:00:00:00:00:02':'1',
'00:00:00:00:00:03':'1'}
self.mac_to_ip = {} # mac <-> ip
self.ip_to_mac = {} # ip <-> mac
self.mac_to_port = {} # host in which port
self.stable = {} #dpid<->datapath
self.default_datapath = None
self.default_ev = None
self.host_enter = 0 # host enter number
self.switch_enter = 0 # switch enter number
self.mac_to_dp = {} # mac <-> datapath
self.switches = [] #all switches' dpid
self.switches_dp = [] #all switches' datapath
# self.path_db = [] # store shortest path
# monitor init
self.datapaths={} # all datapaths
self.monitor_thread = hub.spawn(self._monitor)
self.bandwidth = {}
#networkx init
self.topology_api_app = self
self.directed_Topo = nx.DiGraph()
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
self.datapaths[datapath.id] = datapath
self.default_datapath = datapath
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
# read the mac_table(valid user) and put the information into the mac_to_ip and ip_to_mac
with open('./mac_table.txt') as f:
line = f.readlines()
line = [x.strip('\n') for x in line]
for content in line:
tmp = content.split(',')
mac = tmp[0]
ip = tmp[1]
self.mac_to_ip[mac] = ip
self.ip_to_mac[ip] = mac
#self.host_num = len(self.ip_to_mac)
self.host_num = 3
# _monitor, _request_stats adn _port_stats_reply_handler, the three functions are used when monitor the traffic
def _monitor(self):
while True:
for dp in self.datapaths.values():
self._request_stats(dp)
hub.sleep(3)
def _request_stats(self, datapath):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
req = parser.OFPPortStatsRequest(datapath, 0 , ofproto.OFPP_ANY)
datapath.send_msg(req)
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def _port_stats_reply_handler(self, ev):
body = ev.msg.body
parser = ev.msg.datapath.ofproto_parser
self.logger.info('datapath port '
'rx-pkts rx-bytes '
'tx-pkts tx-bytes bandwidth')
self.logger.info('---------------- -------- '
'-------- -------- '
'-------- -------- --------')
for stat in sorted(body):
if stat.port_no < 7:
index = str(ev.msg.datapath.id) + '-' + str(stat.port_no)
if index not in self.bandwidth:
self.bandwidth[index] = 0
transfer_bytes = stat.rx_bytes + stat.tx_bytes
speed = (transfer_bytes - self.bandwidth[index]) / 3
self.logger.info('%016x %8x %8d %8d %8d %8d %8d\n',
ev.msg.datapath.id, stat.port_no,
stat.rx_packets, stat.rx_bytes,
stat.tx_packets, stat.tx_bytes, speed)
self.bandwidth[index] = transfer_bytes
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
buffer_id = ofproto.OFP_NO_BUFFER
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
print('add flow!!')
# delete flow
def del_flow(self, datapath, match):
ofproto = datapath.ofproto
ofproto_parser = datapath.ofproto_parser
mod = ofproto_parser.OFPFlowMod(datapath=datapath,
command= ofproto.OFPFC_DELETE,out_port=ofproto.OFPP_ANY, out_group=ofproto.OFPG_ANY,match=match)
datapath.send_msg(mod)
print('del flow')
# when src in topo and change port, this situation will run this function to delete flows which are relative the src.
def ShortestPathDeleteFlow(self, datapath, *args):
if datapath==None:
return
ofproto = datapath.ofproto
ofproto_parser = datapath.ofproto_parser
#print('stable',self.stable)
for key, value in self.stable.items():
for arg in args:
match = ofproto_parser.OFPMatch(eth_dst=arg)
self.del_flow(value, match)
match = ofproto_parser.OFPMatch(eth_src=arg)
self.del_flow(value, match)
print('SP del flow end')
# handle arp package
def _handle_arp(self, datapath, in_port, pkt_ethernet, arp_pkt):
if arp_pkt.opcode != arp.ARP_REQUEST:
return
if self.ip_to_mac.get(arp_pkt.dst_ip) == None:
return
#Browse Target hardware adress from ip_to_mac table.
get_mac = self.ip_to_mac[arp_pkt.dst_ip]
#target_ip_addr = arp_pkt.dst_ip
pkt = packet.Packet()
#Create ethernet packet
pkt.add_protocol(ethernet.ethernet(ethertype=ether.ETH_TYPE_ARP,dst=pkt_ethernet.src,src=get_mac))
#Create ARP Reply packet
pkt.add_protocol(arp.arp(opcode=arp.ARP_REPLY,
src_mac=get_mac
|
,
src_ip=arp_pkt.dst_ip,
dst_mac=arp_pkt.src_mac,
dst_ip=arp_pkt.src_ip))
self._send_packet(datapath, in_port, pkt)
pr
|
int('arp', get_mac, pkt_ethernet.src,)
# add host in the direct topo
def AddHost(self, dpid, host, in_port):
#Add host into directed_topo
self.directed_Topo.add_node(host)
#Add edge switch's port to src host
self.directed_Topo.add_edge(dpid, host, {'port':in_port})
#Add edge host to switch
self.directed_Topo.add_edge(host, dpid)
return
@set_ev_cls(event.EventSwitchEnter)
def get_topology_data(self, ev):
#Topo information of switch
self.switch_enter += 1
#Get Switch List
switch_list = get_switch(self.topology_api_app, None)
self.switches = [switch.dp.id for switch in switch_list]
self.switches_dp = [switch.dp for switch in switch_list]
#Add switch dpid into Directed Topology
self.directed_Topo.add_nodes_from(self.switches)
#Get Link List
links_list = get_link(self.topology_api_app, None)
#When all Link enter
if self.switch_enter == len(self.switches):
links = [(link.src.dpi
|
transistorfet/nerve
|
nerve/http/servers/wsgi.py
|
Python
|
gpl-3.0
| 3,372
| 0.007711
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import nerve
import os
import cgi
import traceback
import urllib.parse
class WSGIHandler (nerve.Server):
def __init__(self, **config):
super().__init__(**config)
def __call__(self, environ, start_response):
#nerve.logs.redirect(environ['wsgi.errors'])
#for (key, value) in sorted(environ.items()):
# print(key, value, file=environ['wsgi.errors'])
reqtype = environ['REQUEST_METHOD']
scheme = environ['REQUEST_SCHEME'] if 'REQUEST_SCHEME' in environ else ''
servername = environ['SERVER_NAME']
path = environ['PATH_INFO']
querystring = environ['QUERY_STRING']
uri = urllib.parse.urlunsplit( (scheme, servername, path, querystring, '') )
getvars = nerve.core.delistify(cgi.parse_qs(querystring))
try:
contentlen = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
contentlen = 0
contents = environ['wsgi.input'].read(contentlen).decode('utf-8')
if 'CONTENT_TYPE' in environ:
(mimetype, pdict) = cgi.parse_header(environ['CONTENT_TYPE'])
else:
mimetype = None # empty post doesn't provide a content-type.
if mimetype == None:
postvars = { }
elif mimetype == 'multipart/form-data':
postvars = nerve.core.delistify(cgi.parse_multipart(self.rfile, pdict))
elif mimetype == 'application/x-www-form-urlencoded':
postvars = nerve.core.delistify(urllib.parse.parse_qs(contents, keep_blank_values=True))
elif mimetype == 'application/json':
postvars = json.loads(contents)
else:
raise Exception("unrecognized content-type in POST " + self.path + " (" + mimetype + ")")
postvars.update(getvars)
headers = { }
for (key, value) in environ.it
|
ems():
if key.startswith('HTTP_'):
name = key[5:].lower().replace('_', '-')
headers[name] = value
request = nerve.Request(self, None, reqtype, uri,
|
postvars, headers=headers)
controller = self.make_controller(request)
controller.handle_request(request)
redirect = controller.get_redirect()
error = controller.get_error()
headers = controller.get_headers()
mimetype = controller.get_mimetype()
output = controller.get_output()
if redirect:
status = '302 Found'
headers += [ ('Location', redirect) ]
elif error:
if type(error) is nerve.NotFoundError:
status = '404 Not Found'
else:
status = '500 Internal Server Error'
else:
status = '200 OK'
if isinstance(output, str):
output = bytes(output, 'utf-8')
if mimetype:
headers += [ ('Content-Type', mimetype) ]
if output:
headers += [ ('Content-Length', str(len(output))) ]
else:
headers += [ ('Content-Length', '0') ]
#print(path, file=environ['wsgi.errors'])
#for (key, value) in sorted(headers):
# print(key, value, file=environ['wsgi.errors'])
start_response(status, headers)
#nerve.logs.redirect(None)
yield output if output is not None else b''
|
CptSpaceToaster/memegen
|
memegen/services/template.py
|
Python
|
mit
| 1,885
| 0.000531
|
import logging
from ._base import Service
from ..domain import Template
log = logging.getLogger(__name__)
class TemplateService(Service):
def __init__(self,
|
template_store, **kwargs):
super().__init__(**kwargs
|
)
self.template_store = template_store
def all(self):
"""Get all templates."""
templates = self.template_store.filter()
return templates
def find(self, key):
"""Find a template with a matching key."""
key = Template.strip(key)
# Find an exact match
template = self.template_store.read(key)
if template:
return template
# Else, find an alias match
for template in self.all():
if key in template.aliases_stripped:
return template
# Else, no match
raise self.exceptions.not_found
def validate(self):
"""Ensure all template are valid and conflict-free."""
templates = self.all()
keys = {template.key: template for template in templates}
for template in templates:
log.info("checking template '%s' ...", template)
if not template.validate():
return False
for alias in template.aliases:
log.info("checking alias '%s' -> '%s' ...", alias, template.key)
if alias not in template.aliases_lowercase:
msg = "alias '%s' should be lowercase characters or dashes"
log.error(msg, alias)
return False
try:
existing = keys[alias]
except KeyError:
keys[alias] = template
else:
msg = "alias '%s' already used in template: %s"
log.error(msg, alias, existing)
return False
return True
|
dhaase-de/dh-python-dh
|
dh/thirdparty/tqdm/_tqdm_gui.py
|
Python
|
mit
| 13,510
| 0
|
"""
GUI progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm_gui import tgrange[, tqdm_gui]
>>> for i in tgrange(10): #same as: for i in tqdm_gui(xrange(10))
... ...
"""
# future division is important to divide integers and get as
# a result precise floating numbers (instead of truncated int)
from __future__ import division, absolute_import
# import compatibility functions and utilities
import sys
from time import time
from ._utils import _range
# to inherit from the tqdm class
from ._tqdm import tqdm
__author__ = {"github.com/": ["casperdcl", "lrq3000"]}
__all__ = ['tqdm_gui', 'tgrange']
class tqdm_gui(tqdm): # pragma: no cover
"""
Experimental GUI version of tqdm!
"""
@classmethod
def write(cls, s, file=None, end="\n"):
"""
Print a message via tqdm_gui (just an alias for print)
"""
if file is None:
file = sys.stdout
# TODO: print text on GUI?
file.write(s)
file.write(end)
def __init__(self, *args, **kwargs):
import matplotlib as mpl
import matplotlib.pyplot as plt
from collections import deque
kwargs['gui'] = True
super(tqdm_gui, self).__init__(*args, **kwargs)
# Initialize the GUI display
if self.disable or not kwargs['gui']:
return
self.fp.write('Warning: GUI is experimental/alpha\n')
self.mpl = mpl
self.plt = plt
self.sp = None
# Remember if external environment uses toolbars
self.toolbar = self.mpl.rcParams['toolbar']
self.mpl.rcParams['toolbar'] = 'None'
self.mininterval = max(self.mininterval, 0.5)
self.fig, ax = plt.subplots(figsize=(9, 2.2))
# self.fig.subplots_adjust(bottom=0.2)
if self.total:
self.xdata = []
self.ydata = []
self.zdata = []
else:
self.xdata = deque([])
self.ydata = deque([])
self.zdata = deque([])
self.line1, = ax.plot(self.xdata, self.ydata, color='b')
self.line2, = ax.plot(self.xdata, self.zdata, color='k')
ax.set_ylim(0, 0.001)
if self.total:
ax.set_xlim(0, 100)
ax.set_xlabel('percent')
self.fig.legend((self.line1, self.
|
line2), ('cur', 'est'),
loc='center right')
# progressbar
|
self.hspan = plt.axhspan(0, 0.001,
xmin=0, xmax=0, color='g')
else:
# ax.set_xlim(-60, 0)
ax.set_xlim(0, 60)
ax.invert_xaxis()
ax.set_xlabel('seconds')
ax.legend(('cur', 'est'), loc='lower left')
ax.grid()
# ax.set_xlabel('seconds')
ax.set_ylabel((self.unit if self.unit else 'it') + '/s')
if self.unit_scale:
plt.ticklabel_format(style='sci', axis='y',
scilimits=(0, 0))
ax.yaxis.get_offset_text().set_x(-0.15)
# Remember if external environment is interactive
self.wasion = plt.isinteractive()
plt.ion()
self.ax = ax
def __iter__(self):
# TODO: somehow allow the following:
# if not self.gui:
# return super(tqdm_gui, self).__iter__()
iterable = self.iterable
if self.disable:
for obj in iterable:
yield obj
return
# ncols = self.ncols
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
unit = self.unit
unit_scale = self.unit_scale
ascii = self.ascii
start_t = self.start_t
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
# dynamic_ncols = self.dynamic_ncols
smoothing = self.smoothing
avg_time = self.avg_time
bar_format = self.bar_format
plt = self.plt
ax = self.ax
xdata = self.xdata
ydata = self.ydata
zdata = self.zdata
line1 = self.line1
line2 = self.line2
for obj in iterable:
yield obj
# Update and print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
delta_it = n - last_print_n
# check the counter first (avoid calls to time())
if delta_it >= miniters:
cur_t = time()
delta_t = cur_t - last_print_t
if delta_t >= mininterval:
elapsed = cur_t - start_t
# EMA (not just overall average)
if smoothing and delta_t:
avg_time = delta_t / delta_it \
if avg_time is None \
else smoothing * delta_t / delta_it + \
(1 - smoothing) * avg_time
# Inline due to multiple calls
total = self.total
# instantaneous rate
y = delta_it / delta_t
# overall rate
z = n / elapsed
# update line data
xdata.append(n * 100.0 / total if total else cur_t)
ydata.append(y)
zdata.append(z)
# Discard old values
# xmin, xmax = ax.get_xlim()
# if (not total) and elapsed > xmin * 1.1:
if (not total) and elapsed > 66:
xdata.popleft()
ydata.popleft()
zdata.popleft()
ymin, ymax = ax.get_ylim()
if y > ymax or z > ymax:
ymax = 1.1 * y
ax.set_ylim(ymin, ymax)
ax.figure.canvas.draw()
if total:
line1.set_data(xdata, ydata)
line2.set_data(xdata, zdata)
try:
poly_lims = self.hspan.get_xy()
except AttributeError:
self.hspan = plt.axhspan(0, 0.001, xmin=0,
xmax=0, color='g')
poly_lims = self.hspan.get_xy()
poly_lims[0, 1] = ymin
poly_lims[1, 1] = ymax
poly_lims[2] = [n / total, ymax]
poly_lims[3] = [poly_lims[2, 0], ymin]
if len(poly_lims) > 4:
poly_lims[4, 1] = ymin
self.hspan.set_xy(poly_lims)
else:
t_ago = [cur_t - i for i in xdata]
line1.set_data(t_ago, ydata)
line2.set_data(t_ago, zdata)
ax.set_title(self.format_meter(
n, total, elapsed, 0,
self.desc, ascii, unit, unit_scale,
1 / avg_time if avg_time else None, bar_format),
fontname="DejaVu Sans Mono", fontsize=11)
plt.pause(1e-9)
# If no `miniters` was specified, adjust automatically
# to the maximum iteration rate seen so far.
if dynamic_miniters:
if maxinterval and delta_t > maxinterval:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif mininterval and delta_t:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
miniters = smoothing * delta_it * mininterval \
/ delta_t + (1 - smoothing) * miniters
else:
|
mlperf/training_results_v0.6
|
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/topi/tests/python_cpp/test_topi_pooling.py
|
Python
|
apache-2.0
| 5,140
| 0.006226
|
"""Test code for pooling"""
import numpy as np
import tvm
import topi
import math
from topi.util import get_const_tuple
pool_code = {
"avg": 0,
"max": 1
}
def verify_pool(n, ic, ih, kh, sh, padding, pool_type, ceil_mode, count_include_pad=True):
iw = ih
kw = kh
sw = sh
pt, pl, pb, pr = padding
A = tvm.placeholder((n, ic, ih, iw), name='A')
B = topi.cpp.nn.pool(A, [kh, kw], [sh, sw], padding,
pool_code[pool_type], ceil_mode, "NCHW", count_include_pad)
B = topi.cpp.nn.relu(B)
dtype = A.dtype
bshape = get_const_tuple(B.shape)
ashape = get_const_tuple(A.shape)
if ceil_mode:
assert bshape[2] == int(math.ceil(float(ashape[2] - kh + pt + pb) / sh) + 1)
assert bshape[3] == int(math.ceil(float(ashape[3] - kw + pl + pr) / sw) + 1)
else:
assert bshape[2] == int(math.floor(float(ashape[2] - kh + pt + pb) / sh) + 1)
assert bshape[3] == int(math.floor(float(ashape[3] - kw + pl + pr) / sw) + 1)
a_np = np.random.uniform(size=(n, ic, ih, iw)).astype(dtype)
pad_np = np.zeros(shape=(n, ic, ih+pt+pb, iw+pl+pr)).astype(dtype)
no_zero = (range(n), range(ic), (range(pt, ih+pt)), (range(pl, iw+pl)))
pad_np[np.ix_(*no_zero)] = a_np
_, oc, oh, ow = get_const_tuple(B.shape)
b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype)
if pool_type == 'avg':
for i in range(oh):
for j in range(ow):
if count_include_pad:
b_np[:,:,i,j] = np.mean(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2,3))
else:
pad_count = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw] > 0, axis=(2,3))
b_np[:,:,i,j] = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2,3)) / np.maximum(pad_count, 1)
elif pool_type =='max':
for i in range(oh):
for j in range(ow):
b_np[:,:,i,j] = np.max(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2,3))
b_np = np.maximum(b_np, 0.0)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.default_schedule(target, [B], False)
else:
s = topi.cpp.cuda.schedule_pool(target, [B])
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), ctx)
f = tvm.build(s, [A, B], device)
f(a, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
for device in ['cuda', 'opencl', 'metal', 'rocm']:
check_device(device)
def test_pool():
verify_pool(1, 256, 32, 2, 2, [0, 0, 0
|
, 0], 'avg', False, True)
verify_pool(1, 256, 31, 3, 3, [1, 2, 1, 2], 'avg', False, True
|
)
verify_pool(1, 256, 32, 2, 2, [1, 2, 1, 2], 'avg', False, False)
verify_pool(1, 256, 31, 4, 4, [3, 3, 3, 3], 'avg', False, False)
verify_pool(1, 256, 31, 4, 4, [0, 0, 0, 0], 'avg', False, False)
verify_pool(1, 256, 32, 2, 2, [0, 0, 0, 0], 'max', False)
verify_pool(1, 256, 31, 3, 3, [2, 1, 2, 1], 'max', False)
verify_pool(1, 256, 31, 3, 3, [2, 1, 2, 1], 'max', True)
verify_pool(1, 256, 31, 3, 3, [2, 1, 0, 3], 'avg', False, True)
verify_pool(1, 256, 32, 2, 2, [0, 3, 2, 1], 'avg', False, False)
verify_pool(1, 256, 31, 3, 3, [1, 0, 3, 2], 'max', False)
verify_pool(1, 256, 31, 3, 3, [3, 2, 1, 0], 'max', True)
def verify_global_pool(n, c, h, w, pool_type):
A = tvm.placeholder((n, c, h, w), name='A')
B = topi.cpp.nn.global_pool(A, pool_code[pool_type])
B = topi.cpp.nn.relu(B)
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
if pool_type == 'avg':
b_np = np.mean(a_np, axis=(2,3), keepdims=True)
elif pool_type =='max':
b_np = np.max(a_np, axis=(2,3), keepdims=True)
b_np = np.maximum(b_np, 0.0)
def check_device(device):
if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
target = topi.cpp.TEST_create_target(device)
if device == "llvm":
s = topi.cpp.generic.default_schedule(target, [B], False)
else:
s = topi.cpp.cuda.schedule_global_pool(target, [B])
ctx = tvm.context(device, 0)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
f = tvm.build(s, [A, B], device)
f(a, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
for device in ['cuda', 'opencl', 'metal', 'rocm']:
check_device(device)
def test_global_pool():
verify_global_pool(1, 1024, 7, 7, 'avg')
verify_global_pool(4, 1024, 7, 7, 'avg')
verify_global_pool(1, 1024, 7, 7, 'max')
verify_global_pool(4, 1024, 7, 7, 'max')
if __name__ == "__main__":
test_pool()
test_global_pool()
|
111pontes/ydk-py
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_infra_objmgr_cfg.py
|
Python
|
apache-2.0
| 90,489
| 0.018323
|
""" Cisco_IOS_XR_infra_objmgr_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR infra\-objmgr package configuration.
This module contains definitions
for the following management objects\:
object\-group\: Object\-group configuration
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class EndPortEnum(Enum):
"""
EndPortEnum
End port
.. data:: echo = 7
Echo (7)
.. data:: discard = 9
Discard (9)
.. data:: daytime = 13
Daytime (13)
.. data:: chargen = 19
Character generator (19)
.. data:: ftp_data = 20
FTP data connections (used infrequently, 20)
.. data:: ftp = 21
File Transfer Protocol (21)
.. data:: ssh = 22
Secure Shell (22)
.. data:: telnet = 23
Telnet (23)
.. data:: smtp = 25
Simple Mail Transport Protocol (25)
.. data:: time = 37
Time (37)
.. data:: nicname = 43
Nicname (43)
.. data:: tacacs = 49
TAC Access Control System (49)
.. data:: domain = 53
Domain Name Service (53)
.. data:: gopher = 70
Gopher (70)
.. data:: finger = 79
Finger (79)
.. data:: www = 80
World Wide Web (HTTP, 80)
.. data:: host_name = 101
NIC hostname server (101)
.. data:: pop2 = 109
Post Office Protocol v2 (109)
.. data:: pop3 = 110
Post Office Protocol v3 (110)
.. data:: sun_rpc = 111
Sun Remote Procedure Call (111)
.. data:: ident = 113
Ident Protocol (113)
.. data:: nntp = 119
Network News Transport Protocol (119)
.. data:: bgp = 179
Border Gateway Protocol (179)
.. data:: irc = 194
Internet Relay Chat (194)
.. data:: pim_auto_rp = 496
PIM Auto-RP (496)
.. data:: exec_ = 512
Exec (rsh, 512)
.. data:: login = 513
Login (rlogin, 513)
.. data:: cmd = 514
Remote commands (rcmd, 514)
.. data:: lpd = 515
Printer service (515)
.. data:: uucp = 540
Unix-to-Unix Copy Program (540)
.. data:: klogin = 543
Kerberos login (543)
.. data:: kshell = 544
Kerberos shell (544)
.. data:: talk = 517
Talk (517)
.. data:: ldp = 646
LDP session connection attempts (MPLS, 646)
"""
echo = 7
discard = 9
daytime = 13
chargen = 19
ftp_data = 20
ftp = 21
ssh = 22
telnet = 23
smtp = 25
time = 37
nicname = 43
tacacs = 49
domain = 53
gopher = 70
finger = 79
www = 80
host_name = 101
pop2 = 109
pop3 = 110
sun_rpc = 111
ident = 113
nntp = 119
bgp = 179
irc = 194
pim_auto_rp = 496
exec_ = 512
login = 513
cmd = 514
lpd = 515
uucp = 540
klogin = 543
kshell = 544
talk = 517
ldp = 646
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['EndPortEnum']
class PortEnum(Enum):
"""
PortEnum
Port
.. data:: echo = 7
Echo (7)
.. data:: discard = 9
Discard (9)
.. data:: daytime = 13
Daytime (13)
.. data:: chargen = 19
Character generator (19)
.. data:: ftp_data = 20
FTP data connections (used infrequently, 20)
.. data:: ftp = 21
File Transfer Protocol (21)
.. data:: ssh = 22
Secure Shell (22)
.. data:: telnet = 23
Telnet (23)
.. data:: smtp = 25
Simple Mail Transport Protocol (25)
.. data:: time = 37
Time (37)
.. data:: nicname = 43
Nicname (43)
.. data:: tacacs = 49
TAC Access Control System (49)
.. data:: domain = 53
Domain Name Service (53)
.. data:: gopher = 70
Gopher (70)
.. data:: finger = 79
Finger (79)
.. data:: www = 80
World Wide Web (HTTP, 80)
.. data:: host_name = 101
NIC hostname server (101)
.. data:: pop2 = 109
Post Office Protocol v2 (109)
.. data:: pop3 = 110
Post Office Protocol v3 (110)
.. data:: sun_rpc = 111
Sun Remote Procedure Call (111)
.. data:: ident = 113
Ident Protocol (113)
.. data:: nntp = 119
Network News Transport Protocol (119)
.. data:: bgp = 179
|
Border Gateway Protocol (179)
.. data:: irc = 194
Internet Relay Chat (194)
.. data:: pim_auto_rp = 496
PIM Auto-RP (496)
.. data
|
:: exec_ = 512
Exec (rsh, 512)
.. data:: login = 513
Login (rlogin, 513)
.. data:: cmd = 514
Remote commands (rcmd, 514)
.. data:: lpd = 515
Printer service (515)
.. data:: uucp = 540
Unix-to-Unix Copy Program (540)
.. data:: klogin = 543
Kerberos login (543)
.. data:: kshell = 544
Kerberos shell (544)
.. data:: talk = 517
Talk (517)
.. data:: ldp = 646
LDP session connection attempts (MPLS, 646)
"""
echo = 7
discard = 9
daytime = 13
chargen = 19
ftp_data = 20
ftp = 21
ssh = 22
telnet = 23
smtp = 25
time = 37
nicname = 43
tacacs = 49
domain = 53
gopher = 70
finger = 79
www = 80
host_name = 101
pop2 = 109
pop3 = 110
sun_rpc = 111
ident = 113
nntp = 119
bgp = 179
irc = 194
pim_auto_rp = 496
exec_ = 512
login = 513
cmd = 514
lpd = 515
uucp = 540
klogin = 543
kshell = 544
talk = 517
ldp = 646
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['PortEnum']
class PortOperatorEnum(Enum):
"""
PortOperatorEnum
Port operator
.. data:: equal = 0
Match packets on ports equal to entered port
number
.. data:: not_equal = 1
Match packets on ports not equal to entered
port number
.. data:: greater_than = 2
Match packets on ports greater than entered
port number
.. data:: less_than = 3
Match packets on ports less than entered port
number
"""
equal = 0
not_equal = 1
greater_than = 2
less_than = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['PortOperatorEnum']
class StartPortEnum(Enum):
"""
StartPortEnum
Start port
.. data:: echo = 7
Echo (7)
.. data:: discard = 9
Discard (9)
.. data:: daytime = 13
Daytime (13)
.. data:: chargen = 19
Character generator (19)
.. data:: ftp_data = 20
FTP data connections (used infrequently, 20)
.. data:: ftp = 21
File Transfer Protocol (21)
.. data:: ssh = 22
Secure Shell (22)
.. data:: telnet = 23
Telnet (23)
.. data:: smtp = 25
Simple Mail Transport Protocol (25)
.. data:: time = 37
Time (37)
.. data:: nicname = 43
Nicname (43)
.. data:: tacacs = 49
TAC Access Control System (49)
.. data:: domain = 53
Domain Name Service (53)
.. data:: gopher = 70
Gopher (70)
.. data:: finger = 79
Finger (79)
.. data:: www = 80
World Wide Web (HTTP, 80)
.. data:: host_name = 101
NIC hostname server (101)
.. data:: pop2 = 109
Post Office Protocol v2 (109)
.. data:: pop3 = 110
Post Office Protocol v3 (110)
.. data:: sun_rpc = 111
Sun Remote Procedure Call (111)
.. data:: ident = 113
Ident Protocol (113)
.. data:: nntp = 119
Network News Transport Protocol (119)
.. data:: bgp = 179
Border Gateway Protocol (179)
.. data:: irc = 194
Internet Relay Chat (194)
.. data:: pim_auto_rp = 496
PIM Auto-RP (496)
.. data:: exec_ = 512
|
KWierso/treeherder
|
tests/log_parser/test_performance_parser.py
|
Python
|
mpl-2.0
| 1,056
| 0
|
import json
from treeherder.log_parser.parsers import (EmptyPerformanceData,
PerformanceParser)
def test_performance_log_parsing_malformed_perfherder_data():
"""
If we have malformed perfherder data lines, we should just ignore
them and still be able to parse the valid ones
"""
parser = PerformanceParser()
# invalid json
parser.parse_line("PERFHERDER
|
_DATA: {oh noes i am not valid json}", 1)
try:
# Empty performance data
parser.parse_line("PERFHERDER_DATA: {}", 2)
except EmptyPerformanceData:
pass
valid_perfherder_data = {
"framework": {"name": "talos"}, "suites": [{
"name": "basic_compositor_video",
"subtests": [{
"name": "240p.120fps.mp4_scale_fullscreen_startup",
|
"value": 1234
}]
}]
}
parser.parse_line('PERFHERDER_DATA: {}'.format(
json.dumps(valid_perfherder_data)), 3)
assert parser.get_artifact() == [valid_perfherder_data]
|
magus424/powerline
|
powerline/lib/debug.py
|
Python
|
mit
| 3,036
| 0.027339
|
#!/usr/bin/env python
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import gc
import sys
from types import FrameType
from itertools import chain
# From http://code.activestate.com/recipes/523004-find-cyclical-references/
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
'''Find reference cycles
:param list objects:
A list of objects to find cycles in. It is often useful to pass in
gc.garbage to find the cycles that are preventing some objects from
being garbage collected.
:param file outstream:
The stream for output.
:param bool show_progress:
If True, print the number of objects reached as they are found.
'''
def print_path(path):
for
|
i, step in enumerate(path):
# next "wraps around"
next = path[(i + 1) % len(path)]
outstream.write("
|
%s -- " % str(type(step)))
written = False
if isinstance(step, dict):
for key, val in step.items():
if val is next:
outstream.write("[%s]" % repr(key))
written = True
break
if key is next:
outstream.write("[key] = %s" % repr(val))
written = True
break
elif isinstance(step, (list, tuple)):
for i, item in enumerate(step):
if item is next:
outstream.write("[%d]" % i)
written = True
elif getattr(type(step), '__getattribute__', None) in (object.__getattribute__, type.__getattribute__):
for attr in chain(dir(step), getattr(step, '__dict__', ())):
if getattr(step, attr, None) is next:
try:
outstream.write('%r.%s' % (step, attr))
except TypeError:
outstream.write('.%s' % (step, attr))
written = True
break
if not written:
outstream.write(repr(step))
outstream.write(" ->\n")
outstream.write("\n")
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write("%d\r" % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we've found our way back to the start, this is
# a cycle, so print it out
if referent is start:
try:
outstream.write('Cyclic reference: %r\n' % referent)
except TypeError:
try:
outstream.write('Cyclic reference: %i (%r)\n' % (id(referent), type(referent)))
except TypeError:
outstream.write('Cyclic reference: %i\n' % id(referent))
print_path(current_path)
# Don't go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, FrameType):
continue
# We haven't seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + (obj,))
for obj in objects:
# We are not interested in non-powerline cyclic references
try:
if not type(obj).__module__.startswith('powerline'):
continue
except AttributeError:
continue
recurse(obj, obj, {}, ())
|
gpodder/mygpo
|
mygpo/administration/group.py
|
Python
|
agpl-3.0
| 1,212
| 0.00165
|
from datetime import datetime
from collections import defaultdict
DEFAULT_RELEASE = datetime(1970, 1, 1)
_SORT_KEY = lambda eps: eps[0].released or DEFAULT_RELEASE
class PodcastGrouper(object):
"""Groups episodes of two podcasts based on certain features
The results are sorted by release timestamp"""
DEFAULT_RELEASE = datetime(1970, 1, 1)
def __init__(self, podcasts):
if not podcasts or (None in podcasts):
raise ValueError("podcasts must not be None")
self.podcasts = podcasts
def __get_episodes(self):
episodes = {}
for podcast in self.podcasts:
episodes.update(dict((e.id, e.id) for e in podcast.episode_set.all()))
return episodes
|
def group(self, get_features):
episodes = self.__get_episodes()
episode_groups = defaultdict(list)
episode_features = map(get_features, episodes.items())
for features, episode_id in episode_features:
|
episode = episodes[episode_id]
episode_groups[features].append(episode)
# groups = sorted(episode_groups.values(), key=_SORT_KEY)
groups = episode_groups.values()
return enumerate(groups)
|
okfnepal/election-nepal
|
fabfile.py
|
Python
|
mit
| 21,828
| 0.000321
|
from __future__ import print_function, unicode_literals
from future.builtins import open
import os
import re
import sys
from contextlib import contextmanager
from functools import wraps
from getpass import getpass, getuser
from glob import glob
from importlib import import_module
from posixpath import join
from mezzanine.utils.conf import real_project_name
from fabric.api import abort, env, cd, prefix, sudo as _sudo, run as _run, \
hide, task, local
from fabric.context_managers import settings as fab_settings
from fabric.contrib.console import confirm
from fabric.contrib.files import exists, upload_template
from fabric.contrib.project import rsync_project
from fabric.colors import yellow, green, blue, red
from fabric.decorators import hosts
################
# Config setup #
################
env.proj_app = real_project_name("electionNepal")
conf = {}
if sys.argv[0].split(os.sep)[-1] in ("fab", "fab-script.py"):
# Ensure we import settings from the current dir
try:
conf = import_module("%s.settings" % env.proj_app).FABRIC
try:
conf["HOSTS"][0]
except (KeyError, ValueError):
raise ImportError
except (ImportError, AttributeError):
print("Aborting, no hosts defined.")
exit()
env.db_pass = conf.get("DB_PASS", None)
env.admin_pass = conf.get("ADMIN_PASS", None)
env.user = conf.get("SSH_USER", getuser())
env.password = conf.get("SSH_PASS", None)
env.key_filename = conf.get("SSH_KEY_PATH", None)
env.hosts = conf.get("HOSTS", [""])
env.proj_name = conf.get("PROJECT_NAME", env.proj_app)
env.venv_home = conf.get("VIRTUALENV_HOME", "/home/%s/.virtualenvs" % env.user)
env.venv_path = join(env.venv_home, env.proj_name)
env.proj_path = "/home/%s/mezzanine/%s" % (env.user, env.proj_name)
env.manage = "%s/bin/python %s/manage.py" % (env.venv_path, env.proj_path)
env.domains = conf.get("DOMAINS", [conf.get("LIVE_HOSTNAME", env.hosts[0])])
env.domains_nginx = " ".join(env.domains)
env.domains_regex = "|".join(env.domains)
env.domains_python = ", ".join(["'%s'" % s for s in env.domains])
env.ssl_disabled = "#" if len(env.domains) > 1 else ""
env.vcs_tools = ["git", "hg"]
env.deploy_tool = conf.get("DEPLOY_TOOL", "rsync")
env.reqs_path = conf.get("REQUIREMENTS_PATH", None)
env.locale = conf.get("LOCALE", "en_US.UTF-8")
env.num_workers = conf.get("NUM_WORKERS",
"multiprocessing.cpu_count() * 2 + 1")
env.secret_key = conf.get("SECRET_KEY", "")
env.nevercache_key = conf.get("NEVERCACHE_KEY", "")
# Remote git repos need to be "bare" and reside separated from the project
if env.deploy_tool == "git":
env.repo_path = "/home/%s/git/%s.git" % (env.user, env.proj_name)
else:
env.repo_path = env.proj_path
##################
# Template setup #
##################
# Each template gets uploaded at deploy time, only if their
# contents has changed, in which case, the reload command is
# also run.
templates = {
"nginx": {
"local_path": "deploy/nginx.conf.template",
"remote_path": "/etc/nginx/sites-enabled/%(proj_name)s.conf",
"reload_command": "service nginx restart",
},
"supervisor": {
"local_path": "deploy/supervisor.conf.template",
"remote_path": "/etc/supervisor/conf.d/%(proj_name)s.conf",
"reload_command": "supervisorctl update gunicorn_%(proj_name)s",
},
"cron": {
"local_path": "deploy/crontab.template",
"remote_path": "/etc/cron.d/%(proj_name)s",
"owner": "root",
"mode": "600",
},
"gunicorn": {
"local_path": "deploy/gunicorn.conf.py.template",
"remote_path": "%(proj_path)s/gunicorn.conf.py",
},
"settings": {
"local_path": "deploy/local_settings.py.template",
"remote_path": "%(proj_path)s/%(proj_app)s/local_settings.py",
},
}
######################################
# Context for virtualenv and project #
######################################
@contextmanager
def virtualenv():
"""
Runs commands within the project's virtualenv.
"""
with cd(env.venv_path):
with prefix("source %s/bin/activate" % env.venv_path):
yield
@contextmanager
def project():
"""
Runs commands within the project's directory.
"""
with virtualenv():
with cd(env.proj_path):
yield
@contextmanager
def update_changed_requirements():
"""
Checks for changes in the requirements file across an update,
and gets new requirements if changes have occurred.
"""
reqs_path = join(env.proj_path, env.reqs_path)
get_reqs = lambda: run("cat %s" % reqs_path, show=False)
old_reqs = get_reqs() if env.reqs_path else ""
yield
if old_reqs:
|
new_reqs = get_reqs()
if old_reqs == new_reqs:
# Unpinned requirements should always be checked.
for req in new_reqs.split("\n"):
if req.startswith("-e"):
if "@" not in req:
# Editable requirement without pinned commit.
break
elif req.strip() and not req.startswith("#"):
if not set(">=<") & set(req):
|
# PyPI requirement without version.
break
else:
# All requirements are pinned.
return
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
###########################################
# Utils and wrappers for various commands #
###########################################
def _print(output):
print()
print(output)
print()
def print_command(command):
_print(blue("$ ", bold=True) +
yellow(command, bold=True) +
red(" ->", bold=True))
@task
def run(command, show=True, *args, **kwargs):
"""
Runs a shell comand on the remote server.
"""
if show:
print_command(command)
with hide("running"):
return _run(command, *args, **kwargs)
@task
def sudo(command, show=True, *args, **kwargs):
"""
Runs a command as sudo on the remote server.
"""
if show:
print_command(command)
with hide("running"):
return _sudo(command, *args, **kwargs)
def log_call(func):
@wraps(func)
def logged(*args, **kawrgs):
header = "-" * len(func.__name__)
_print(green("\n".join([header, func.__name__, header]), bold=True))
return func(*args, **kawrgs)
return logged
def get_templates():
"""
Returns each of the templates with env vars injected.
"""
injected = {}
for name, data in templates.items():
injected[name] = dict([(k, v % env) for k, v in data.items()])
return injected
def upload_template_and_reload(name):
"""
Uploads a template only if it has changed, and if so, reload the
related service.
"""
template = get_templates()[name]
local_path = template["local_path"]
if not os.path.exists(local_path):
project_root = os.path.dirname(os.path.abspath(__file__))
local_path = os.path.join(project_root, local_path)
remote_path = template["remote_path"]
reload_command = template.get("reload_command")
owner = template.get("owner")
mode = template.get("mode")
remote_data = ""
if exists(remote_path):
with hide("stdout"):
remote_data = sudo("cat %s" % remote_path, show=False)
with open(local_path, "r") as f:
local_data = f.read()
# Escape all non-string-formatting-placeholder occurrences of '%':
local_data = re.sub(r"%(?!\(\w+\)s)", "%%", local_data)
if "%(db_pass)s" in local_data:
env.db_pass = db_pass()
local_data %= env
clean = lambda s: s.replace("\n", "").replace("\r", "").strip()
if clean(remote_data) == clean(local_data):
return
upload_template(local_path, remote_path, env, use_sudo=True, backup=False)
if owner:
sudo("chown %s %s" % (owner, remote_path))
if mode:
sudo("chmod %s %s" % (mode, remote_path))
if reload_command:
sudo(reload_command)
def rsync_upload():
"""
Uploads the project with rsync exclu
|
nylas/sync-engine
|
migrations/versions/034_cascade_folder_deletes_to_imapuid.py
|
Python
|
agpl-3.0
| 4,899
| 0.000408
|
"""cascade folder deletes to imapuid
Otherwise, since this fk is NOT NULL, deleting a folder which has associated
imapuids still existing will cause a database IntegrityError. Only the mail
sync engine does such a thing. Nothing else should be deleting folders,
hard or soft.
This also fixes a problem where if e.g. someone disables their Spam folder
from showing up in Gmail IMAP, the server will crash trying to delete that
folder the account.spam_folder_id constraint fails.
Revision ID: 350a08df27ee
Revises: 1eab2619cc4f
Create Date: 2014-05-25 01:40:21.762119
"""
# revision identifiers, used by Alembic.
revision = '350a08df27ee'
down_revision = '1eab2619cc4f'
from alembic import op
def upgrade():
op.drop_constraint('imapuid_ibfk_3', 'imapuid', type_='foreignkey')
op.create_foreign_key('imapuid_ibfk_3', 'imapuid', 'folder',
['folder_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('account_ibfk_2', 'account', type_='foreignkey')
op.create_foreign_key('account_ibfk_2', 'account', 'folder',
['inbox_folder_id'], ['id'], ondelete='SET NULL')
op.drop_constraint('account_ibfk_3', 'account', type_='foreignkey')
op.create_foreign_key('account_ibfk_3', 'account', 'folder',
['sent_folder_id'], ['id'], ondelete='SET NULL')
op.drop_constraint('account_ibfk_4', 'account', type_='foreignkey')
op.create_foreign_key('account_ibfk_4', 'account', 'folder',
['drafts_folder_id'], ['id'], ondelete='SET NULL')
op.drop_constraint('account_ibfk_5', 'account', type_='foreignkey')
op.create_foreign_key('account_ibfk_5', 'account', 'folder',
['spam_folder_id'], ['id'], ondelete='SET NULL')
op.drop_constraint('account_ibfk_6', 'account', type_='foreignkey')
op.create_foreign_key('account_ibfk_6', 'account', 'folder',
['trash_folder_id'], ['id'], ondelete='SET NULL')
op.drop_constraint('account_ibfk_7', 'account', type_='foreignkey')
op.create_foreign_key('account_ibfk_7', 'account', 'folder',
['archive_folder_id'], ['id'], ondelete='SET NULL')
op.drop_constraint('account_ibfk_8', 'account', type_
|
='foreignkey')
op.create_foreign_key('account_ibfk_8', 'accou
|
nt', 'folder',
['all_folder_id'], ['id'], ondelete='SET NULL')
op.drop_constraint('account_ibfk_9', 'account', type_='foreignkey')
op.create_foreign_key('account_ibfk_9', 'account', 'folder',
['starred_folder_id'], ['id'], ondelete='SET NULL')
# for some reason this was left out of migration 024, so might not exist
try:
op.drop_constraint('account_ibfk_10', 'account', type_='foreignkey')
except:
pass
op.create_foreign_key('account_ibfk_10', 'account', 'folder',
['important_folder_id'], ['id'], ondelete='SET NULL')
def downgrade():
op.drop_constraint('imapuid_ibfk_3', 'imapuid', type_='foreignkey')
op.create_foreign_key('imapuid_ibfk_3', 'imapuid', 'folder',
['folder_id'], ['id'])
op.drop_constraint('account_ibfk_2', 'account', type_='foreignkey')
op.create_foreign_key('account_ibfk_2', 'account', 'folder',
['inbox_folder_id'], ['id'])
op.drop_constraint('account_ibfk_3', 'account', type_='foreignkey')
op.create_foreign_key('account_ibfk_3', 'account', 'folder',
['sent_folder_id'], ['id'])
op.drop_constraint('account_ibfk_4', 'account', type_='foreignkey')
op.create_foreign_key('account_ibfk_4', 'account', 'folder',
['drafts_folder_id'], ['id'])
op.drop_constraint('account_ibfk_5', 'account', type_='foreignkey')
op.create_foreign_key('account_ibfk_5', 'account', 'folder',
['spam_folder_id'], ['id'])
op.drop_constraint('account_ibfk_6', 'account', type_='foreignkey')
op.create_foreign_key('account_ibfk_6', 'account', 'folder',
['trash_folder_id'], ['id'])
op.drop_constraint('account_ibfk_7', 'account', type_='foreignkey')
op.create_foreign_key('account_ibfk_7', 'account', 'folder',
['archive_folder_id'], ['id'])
op.drop_constraint('account_ibfk_8', 'account', type_='foreignkey')
op.create_foreign_key('account_ibfk_8', 'account', 'folder',
['all_folder_id'], ['id'])
op.drop_constraint('account_ibfk_9', 'account', type_='foreignkey')
op.create_foreign_key('account_ibfk_9', 'account', 'folder',
['starred_folder_id'], ['id'])
op.drop_constraint('account_ibfk_10', 'account', type_='foreignkey')
op.create_foreign_key('account_ibfk_10', 'account', 'folder',
['important_folder_id'], ['id'])
|
LibraryBox-Dev/LibraryBox-core
|
piratebox_origin/piratebox/piratebox/python_lib/messages.py
|
Python
|
gpl-3.0
| 1,109
| 0.038774
|
import string
import socket
import base64
import sys
class message:
def __init__(self, name="generate" ):
if name == "generate":
self.name=socket.gethostname()
else:
self.name=name
self.type="gc"
self.decoded=""
def set ( self, content=" " ):
base64content = base64.b64encode ( content )
self.decoded="piratebox;"+ self.type + ";01;" + self.name + ";" + base64content
def get ( self ):
# TODO Split decoded part
message_parts = string.split ( self.decoded , ";" )
if message_parts[0] != "piratebox":
return None
b64_content_part = message_parts[4]
content = base64.b64decode ( b64_content_part )
return content
def get_sendername (self):
ret
|
urn self.name
def get_message ( self ):
return self.decoded
def set_message ( self , decoded):
|
self.decoded = decoded
class shoutbox_message(message):
def __init__(self, name="generate" ):
message.__init__( self , name)
self.type="sb"
|
k0pernicus/giwyn
|
giwyn/lib/gitconf/commands.py
|
Python
|
gpl-3.0
| 1,268
| 0.005521
|
import giwyn.lib.settings.settings
from git import *
def list_git_projects():
print("List of git projects:")
#end="" -> avoid last '\n' character
for git_object in giwyn.lib.settings.settings.GIT_OBJECTS:
print(git_object)
def push_ready_projects():
print("Repository to push...")
any_repo_to_push = False
for git_project in giwyn.lib.settings.sett
|
ings.GIT_OBJECTS:
if git_project.current_status == "TO PUSH":
print("Pushing {0} in the current branch...".format(git_project.entry))
git_project.git_object.remote().push()
any_repo_to_push = True
if not any_repo_to_push:
print("There is no repository to push yet!")
def pull_ready_projects():
print("Repository to pull...")
any_repo_to_pull = False
for git_project in giwy
|
n.lib.settings.settings.GIT_OBJECTS:
if git_project.current_status == "CLEAN":
print("Try to pull {0}, from the current branch...".format(git_project.entry))
#Pull from origin
if git_project.git_object.remotes != []:
git_project.git_object.remotes.origin.pull()
any_repo_to_pull = True
if not any_repo_to_pull:
print("There is no repository to pull yet!")
|
DataCanvasIO/pyDataCanvas
|
datacanvas/dataset/parser/parser.py
|
Python
|
apache-2.0
| 435
| 0
|
# -*- coding: utf-8 -*-
from ..common import get_module_class
|
class Parser(object):
@staticmethod
def get(parser_name):
clazz = get_module_class(parser_name, __name__)
return clazz()
def loads(self, content):
return content
def dumps(self, content):
return content
def load(self, f):
return NotImplemented
def dump(self, content,
|
f):
return NotImplemented
|
dirjud/pickup
|
event/models.py
|
Python
|
gpl-2.0
| 3,032
| 0.01715
|
from django.db import models
from django.utils import timezone
import pytz
import datetime
def hash(n):
n = int(n)
return ((0x0000FFFF & n)<<16) + ((0xFFFF0000 & n)>>16)
class EventInstance(object):
def __init__(self, event, event_time, date):
self.date = date.date()
self.time = date.time()
self.event= event
self.event_time = event_time
self.attending = Signup.objects.filter(event=event, date=self.date, status=Signup.ATTENDING)
self.not_attending = Signup.objects.filter(event=event, date=self.date, status=Signup.NOT_ATTENDING)
def get_date_id(self):
return "%4d_%02d_%02d" % (self.date.year, self.date.month, self.date.day)
class Event(models.Model):
name = models.CharField(max_length=100)
timezone = models.CharField(max_length=50, choices=[(x,x) for x in pytz.all_timezones ], default="US/Mountain")
description = models.TextField()
location_lat = models.FloatField()
location_lon = models.FloatField()
addr = models.CharField(max_length=200)
city = models.CharField(max_length=100)
state = models.CharField(max_length=5)
zip = models.CharField(max_length=20)
contact_emails = models.CharField(max_length=500, help_text='Comma separated list of email addresses')
def __unicode__(self):
return self.name
def get_next(self):
timezone.activate(pytz.timezone(self.timezone))
now = timezone.now().date()
events = [ EventInstance(self, d, d.get_next(now)) for d in self.times.all() ]
events.sort(key=lambda x:x.date)
return events
class EventTime(models.Model):
DAY_CHOICES = (
(0, "Monday", ),
(1, "Tuesday", ),
(2, "Wednesday",),
(3, "Thursday", ),
(4, "Friday", ),
(5, "Saturday", ),
(6, "Sunday", ),
|
)
event= models.ForeignKey(Event, related_name="times")
day = models.IntegerField(choices=DAY_CHOICES)
time = models.TimeField()
|
def get_next(self, now):
dow = now.weekday()
td = datetime.timedelta(days=(self.day - dow) % 7)
next_date = now + td
return datetime.datetime.combine(next_date, self.time)
class Signup(models.Model):
ATTENDING = 0
NOT_ATTENDING = 1
status_choices = (
( ATTENDING , "I'm In", ),
( NOT_ATTENDING, "I'm Out", ),
)
event = models.ForeignKey(Event, related_name="signups")
date = models.DateField()
name = models.CharField(max_length=100)
status= models.IntegerField(choices=status_choices, blank=False, default=ATTENDING)
def hash(self):
return hash(self.pk)
class Comment(models.Model):
class Meta:
ordering = ["-timestamp"]
event = models.ForeignKey(Event, related_name="comments")
name = models.CharField(max_length=100)
comment = models.TextField()
timestamp = models.DateTimeField(auto_now_add=True)
|
themaxx75/lapare-bijoux
|
lapare.ca/lapare/apps/www/migrations/0011_auto_20151022_2037.py
|
Python
|
bsd-3-clause
| 375
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Mig
|
ration):
dependencies = [
('www', '0010_expo_info_url'),
]
operations = [
migrations.RenameField(
model_name='expo',
old_name='info_url',
new_
|
name='url',
),
]
|
rodrigobersan/X-Serv-18.1-Practica1
|
project/acorta/models.py
|
Python
|
gpl-2.0
| 219
| 0.004566
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
clas
|
s Urls(models.Model):
longurl = models.CharField(max
|
_length=256)
shorturl = models.CharField(max_length=128)
|
alexandergraul/pvs-bot
|
launcher.py
|
Python
|
gpl-3.0
| 1,561
| 0.000641
|
import RoleManagement
import bot_logger
import purger
async def run_op(client, message, bot_log):
levels = {
'admin': ['admin'],
'high': ['admin', 'moderator', 'panda bat'],
'medium': ['trial moderator', 'moderator', 'admin', 'pa
|
nda bat'],
'low': ['@everyone']
}
ops = {'+': [RoleManagement.assign_role, 'low'],
'-': [RoleManagement.remove_role, 'low'],
'reduce': [RoleManagement.reduce_roles, 'high'],
|
'timein': [RoleManagement.timein_user, 'medium'],
'timeout': [RoleManagement.timeout_user, 'medium'],
'verify': [RoleManagement.verify_rank, 'low'],
'purge': [purger.purge_channel, 'high'],
'count': [RoleManagement.count_users, 'medium'],
}
# unwrap message into operation and arguments
operation = message.content[1:]
try:
operation, _ = operation.split(maxsplit=1)
except ValueError:
if operation == 'purge':
pass
else:
return None
# check if operation exists
if operation in ops.keys():
op = ops[operation]
else:
return None
success = False
required_roles = levels[op[1]]
for r in message.author.roles:
if r.name.lower() in required_roles:
await op[0](client, message, bot_log)
success = True
break
if success is not True:
client.send_message(message.channel,
"Failed running `{}`".format(operation))
|
grangier/django-11599
|
tests/modeltests/custom_pk/models.py
|
Python
|
bsd-3-clause
| 5,234
| 0.001911
|
# -*- coding: utf-8 -*-
"""
14. Using a custom primary key
By default, Django adds an ``"id"`` field to each model. But you can override
this behavior by explicitly adding ``primary_key=True`` to a field.
"""
from django.conf import settings
from django.db import models, transaction, IntegrityError
from fields import MyAutoField
class Employee(models.Model):
employee_code = models.IntegerField(primary_key=True, db_column = 'code')
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
class Meta:
ordering = ('last_name', 'first_name')
def __unicode__(self):
return u"%s %s" % (self.first_name, self.last_name)
class Business(models.Model):
name = models.CharField(max_length=20, primary_key=True)
employees = models.ManyToManyField(Employee)
class Meta:
verbose_name_plural = 'businesses'
def __unicode__(self):
return self.name
class Bar(models.Model):
id = MyAutoField(primary_key=True, db_index=True)
def __unicode__(self):
return repr(self.pk)
class Foo(models.Model):
bar = models.ForeignKey(Bar)
__test__ = {'API_TESTS':"""
>>> dan = Employee(employee_code=123, first_name='Dan', last_name='Jones')
>>> dan.save()
>>> Employee.objects.all()
[<Employee: Dan Jones>]
>>> fran = Employee(employee_code=456, first_name='Fran', last_name='Bones')
>>> fran.save()
>>> Employee.objects.all()
[<Employee: Fran Bones>, <Employee: Dan Jones>]
>>> Employee.objects.get(pk=123)
<Employee: Dan Jones>
>>> Employee.objects.get(pk=456)
<Employee: Fran Bones>
>>> Employee.objects.get(pk=42)
Traceback (most recent call last):
...
DoesNotExist: Employee matching query does not exist.
# Use the name of the primary key, rather than pk.
>>> Employee.objects.get(employee_code__exact=123)
<Employee: Dan Jones>
# pk can be used as a substitute for the primary key.
>>> Employee.objects.filter(pk__in=[123, 456])
[<Employee: Fran Bones>, <Employee: Dan Jones>]
# The primary key can be accessed via the pk property on the model.
>>> e = Employee.objects.get(pk=123)
>>> e.pk
123
# Or we can use the real attribute name for the primary key:
>>> e.employee_code
123
# Fran got married and changed her last name.
>>> fran = Employee.objects.get(pk=456)
>>> fran.last_name = 'Jones'
>>> fran.save()
>>> Employee.objects.filter(last_name__exact='Jones')
[<Employee: Dan Jones>, <Employee: Fran Jones>]
>>> emps = Employee.objects.in_bulk([123, 456])
>>> emps[123]
<Employee: Dan Jones>
>>> b = Business(name='Sears')
>>> b.save()
>>> b.employees.add(dan, fran)
>>> b.employees.all()
[<Employee: Dan Jones>, <Employee: Fran Jones>]
>>> fran.business_set.all()
[<Business: Sears>]
>>> Business.objects.in_bulk(['Sears'])
{u'Sears': <Business: Sears>}
>>>
|
Business.objects.filter(name__exact='Sears')
[<Business: Sears>]
>>> Business.objects.filter(pk='Sears')
[<Business: Sears>]
# Queries across tables, involving
|
primary key
>>> Employee.objects.filter(business__name__exact='Sears')
[<Employee: Dan Jones>, <Employee: Fran Jones>]
>>> Employee.objects.filter(business__pk='Sears')
[<Employee: Dan Jones>, <Employee: Fran Jones>]
>>> Business.objects.filter(employees__employee_code__exact=123)
[<Business: Sears>]
>>> Business.objects.filter(employees__pk=123)
[<Business: Sears>]
>>> Business.objects.filter(employees__first_name__startswith='Fran')
[<Business: Sears>]
# Primary key may be unicode string
>>> bus = Business(name=u'jaźń')
>>> bus.save()
# The primary key must also obviously be unique, so trying to create a new
# object with the same primary key will fail.
>>> try:
... sid = transaction.savepoint()
... Employee.objects.create(employee_code=123, first_name='Fred', last_name='Jones')
... transaction.savepoint_commit(sid)
... except Exception, e:
... if isinstance(e, IntegrityError):
... transaction.savepoint_rollback(sid)
... print "Pass"
... else:
... print "Fail with %s" % type(e)
Pass
# Regression for #10785 -- Custom fields can be used for primary keys.
>>> new_bar = Bar.objects.create()
>>> new_foo = Foo.objects.create(bar=new_bar)
# FIXME: This still doesn't work, but will require some changes in
# get_db_prep_lookup to fix it.
# >>> f = Foo.objects.get(bar=new_bar.pk)
# >>> f == new_foo
# True
# >>> f.bar == new_bar
# True
>>> f = Foo.objects.get(bar=new_bar)
>>> f == new_foo
True
>>> f.bar == new_bar
True
"""}
# SQLite lets objects be saved with an empty primary key, even though an
# integer is expected. So we can't check for an error being raised in that case
# for SQLite. Remove it from the suite for this next bit.
if settings.DATABASE_ENGINE != 'sqlite3':
__test__["API_TESTS"] += """
# The primary key must be specified, so an error is raised if you try to create
# an object without it.
>>> try:
... sid = transaction.savepoint()
... Employee.objects.create(first_name='Tom', last_name='Smith')
... print 'hello'
... transaction.savepoint_commit(sid)
... print 'hello2'
... except Exception, e:
... if isinstance(e, IntegrityError):
... transaction.savepoint_rollback(sid)
... print "Pass"
... else:
... print "Fail with %s" % type(e)
Pass
"""
|
takeshineshiro/horizon
|
openstack_dashboard/dashboards/project/access_and_security/tabs.py
|
Python
|
apache-2.0
| 5,203
| 0
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from neutronclient.common import exceptions as neutron_exc
from openstack_dashboard.api import keystone
from openstack_dashboard.api import network
from openstack_dashboard.api import nova
from openstack_dashboard.dashboards.project.access_and_security.\
api_access.tables import EndpointsTable
from openstack_dashboard.dashboards.project.access_and_security.\
floating_ips.tables import FloatingIPsTable
from openstack_dashboard.dashboards.project.access_and_security.\
keypairs.tables import KeypairsTable
from openstack_dashboard.dashboards.project.access_and_security.\
security_groups.tables import SecurityGroupsTable
class SecurityGroupsTab(tabs.TableTab):
table_classes = (SecurityGroupsTable,)
name = _("Security Groups")
slug = "security_groups_tab"
template_name = "horizon/common/_detail_table.html"
permissions = ('openstack.services.compute',)
def get_security_groups_data(self):
try:
security_groups = network.security_group_list(self.request)
except neutron_exc.ConnectionFailed:
security_groups = []
exceptions.handle(self.request)
except Exception:
security_groups = []
exceptions.handle(self.request,
_('Unable to retrieve security groups.'))
return sorted(security_groups, key=lambda group: group.name)
class KeypairsTab(tabs.TableTab):
table_classes = (KeypairsTable,)
name = _("Key Pairs")
slug = "keypairs_tab"
template_name = "horizon/common/_detail_table.html"
permissions = ('openstack.services.compute',)
def get_keypairs_data(self):
try:
keypairs = nova.keypair_list(self.request)
except Exception:
keypairs = []
exceptions.handle(self.request,
_('Unable to retrieve key pair list.'))
return keypairs
class FloatingIPsTab(tabs.TableTab):
table_classes = (FloatingIPsTable,)
name = _("Floating IPs")
slug = "floating_ips_tab"
template_name = "horizon/common/_detail_table.html"
permissions = ('openstack.services.compute',)
def get_floating_ips_data(self):
try:
floating_ips = network.tenant_floating_ip_list(self.request)
except neutron_exc.ConnectionFailed:
floating_ips = []
exceptions.handle(self.request)
except Exception:
floating_ips = []
exceptions.handle(self.request,
_('Unable to retrieve floating IP addresses.'))
try:
floating_ip_pools = network.floating_ip_pools_list(self.request)
except neutron_exc.ConnectionFailed:
floating_ip_pools = []
exceptions.handle(self.request)
except Exception:
floating_ip_pools = []
exceptions.handle(self.request,
_('Unable to retrieve floating IP pools.'))
pool_dict = dict([(obj.id, obj.name) for obj in floating_ip_pools])
instances = []
try:
instances, has_more = nova.server_list(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve instance list.'))
instances_dict = dict([(obj.id, obj.name) for obj in instances])
for ip in floating_ips:
ip.instance_name = instances_dict.get(ip.instance_id)
ip.pool_name = pool_dict.get(ip.pool, ip.pool)
return floating_ips
def allowed(self, request):
return network.floating_ip_supported(request)
class APIAccessTab(tabs.TableTab):
table_classes = (EndpointsTable,)
|
name = _("API Access")
slug = "api_access_tab"
template_name = "horizon/common/_detail_table.html"
def get_endpoints_data(self):
services = []
for i, service in enumerate(self.request.user.service_catalog):
service['id'] = i
services.append(
keystone.Service(service, self.request.user.services_region))
return services
class AccessAndSecurityTabs(tabs.TabGroup):
slug = "access_security_tabs"
|
tabs = (SecurityGroupsTab, KeypairsTab, FloatingIPsTab, APIAccessTab)
sticky = True
|
lmazuel/azure-sdk-for-python
|
azure-batch/azure/batch/models/pool_evaluate_auto_scale_parameter.py
|
Python
|
mit
| 1,498
| 0
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PoolEvaluateAutoScaleParameter(Model):
"""Options for evaluating an automatic scaling formula on a pool.
:param auto_scale_formul
|
a: The formula for the desired number of compute
nodes in the pool. The formula is validated and its results calculated,
but it is not applied to the pool. To apply the formula to the pool,
'Enable automatic scaling on a pool'. For more information about
speci
|
fying this formula, see Automatically scale compute nodes in an Azure
Batch pool
(https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling).
:type auto_scale_formula: str
"""
_validation = {
'auto_scale_formula': {'required': True},
}
_attribute_map = {
'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'},
}
def __init__(self, auto_scale_formula):
super(PoolEvaluateAutoScaleParameter, self).__init__()
self.auto_scale_formula = auto_scale_formula
|
tps12/Tec-Nine
|
rock/sedimentary.py
|
Python
|
gpl-3.0
| 2,309
| 0.004764
|
def deposit(materials, life, sea, climate):
contributions = []
depositkeys = set()
for m in materials:
t = 0
i = len(m.substance) - 1
sources = []
keys = set()
while t < m.total:
dt = m.total - t
layer = m.substance[i]
if layer['thickness'] >= dt:
sources.append({ 'rock': layer['rock'], 'thickness': dt })
else:
sources.append(layer)
keys = keys.union(sources[-1]['rock'].keys())
t += sources[-1]['thickness']
i -= 1
rock = { 'type': 'S', 'name': 'S' }
for k in keys:
if k not in rock:
# weight attributes by thickness
rock[k] = sum([float(s['thickness']) * s['rock'][k]
if k in s['rock'] else 0
for s in sources])/m.total
depositkeys = depositkeys.union(rock.keys())
contributions.append({ 'rock': rock, 'thickness': m.amount })
rock = { 'type': 'S', 'toughness': 0, 'name': None }
thickness = sum([c['thickness'] for c in contributions])
for k in depositkeys:
if k not in rock:
# weigh
|
t attributes by thickness
rock[k] = sum([float(c['thickness']) * c['rock'][k]
if k in c['rock'] else 0
for c in contributions])/thickness
rock['clasticity'] = rock['clasticity'] * 2 if 'clasticity' in rock else 1
if life:
if sea:
rock['calcity'] = max(0, min(1, float(climate.temperature - 18)/25))
if rock['calcity'] > 0.9
|
9:
rock['name'] = 'chalk'
elif rock['calcity'] > 0.75:
rock['name'] = 'limestone'
elif climate.koeppen[0] == u'C' and climate.temperature < 18:
rock['bogginess'] = max(0, (climate.precipitation - 0.75) * 4)
if rock['name'] is None:
grain = 1e-3/float(rock['clasticity'])
if grain < 4e-6:
name = 'claystone'
elif grain < 60e-6:
name = 'siltstone'
elif grain < 2e-3:
name = 'sandstone'
else:
name = 'conglomerate'
rock['name'] = name
return { 'rock': rock, 'thickness': thickness }
|
svanschalkwyk/datafari
|
windows/python/Lib/test/test_io.py
|
Python
|
apache-2.0
| 120,213
| 0.001406
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as a attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
from __future__ import print_function
from __fu
|
ture__ import unicode_literals
import os
import sys
import time
import array
import random
import unittest
import weakref
import warnings
import abc
import signal
import errno
from itertools import cycle, count
from collections import deque
from UserList import UserList
from test import test_support as support
import contextlib
import codecs
import io # C implementation of io
import _pyio as pyio
|
# Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
import fcntl
except ImportError:
fcntl = None
__metaclass__ = type
bytes = support.py3k_bytes
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with io.open(__file__, "r", encoding="latin1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return MockRawIO.write(self, b) * 2
def read(self, n=None):
return MockRawIO.read(self, n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
MockRawIO.readinto(self, buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise IOError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super(MockFileIO, self).__init__(data)
def read(self, n=None):
res = super(MockFileIO, self).read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super(MockFileIO, self).readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = bytearray(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data, b" worl")
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(bytearray(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(bytearray()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError,
|
SoundGoof/NIPAP
|
nipap/nipap/errors.py
|
Python
|
mit
| 1,301
| 0.000769
|
class NipapError(Exception):
""" NIPAP base error class.
"""
|
error_code = 1000
class NipapInputError(NipapError):
""" Erroneous input.
A general input error.
"""
error_code = 1100
class NipapMissingInputError(NipapInputError):
""" Missing input.
Most input is passed in dicts, this could mean a missing key in a dict.
"""
error_code = 1110
class NipapExtraneousInputError(NipapInputError):
""" Extraneous input.
Most input is passed in dicts, this could mean an unknown key in a dict.
"""
|
error_code = 1120
class NipapNoSuchOperatorError(NipapInputError):
""" A non existent operator was specified.
"""
error_code = 1130
class NipapValueError(NipapError):
""" Something wrong with a value
For example, trying to send an integer when an IP address is expected.
"""
error_code = 1200
class NipapNonExistentError(NipapError):
""" A non existent object was specified
For example, try to get a prefix from a pool which doesn't exist.
"""
error_code = 1300
class NipapDuplicateError(NipapError):
""" The passed object violates unique constraints
For example, create a VRF with a name of an already existing one.
"""
error_code = 1400
|
Azure/azure-sdk-for-python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_07_01/operations/_snapshots_operations.py
|
Python
|
mit
| 46,204
| 0.004697
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request_initial(
subscription_id: str,
resource_group_name: str,
snapshot_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"snapshotName": _SERIALIZER.url("snapshot_name", snapshot_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
snapshot_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"snapshotName": _SERIALIZER.url("snapshot_name", snapshot_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
snapshot_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"snapshotName": _SERIALIZER.url("snapshot_name", snapshot_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
snapshot_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-07-01"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"s
|
napshotName": _SERIALIZER.url("snapshot_name", snapshot_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type:
|
Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_list_by_resource_group_request(
subscription_id: str,
resource_group_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
subscription_id: st
|
T-002/pycast
|
pycast/common/decorators.py
|
Python
|
mit
| 2,987
| 0.002009
|
# !/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2012-2015 Christian Schwarz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
def optimized(fn):
"""Decorator that will call the optimized c++ version
of a pycast function if available rather than theo
original pycast function
:param function fn: original pycast function
:return: return the wrapped function
|
:rtype: function
"""
def _optimized(self, *args, **kwargs):
""" This method calls the pycastC function if
optimization is enabled and the pycastC function
is available.
:param: PyCastObject self: reference to the calling object.
Needs to be passed to the pycastC function,
|
so that all uts members are available.
:param: list *args: list of arguments the function is called with.
:param: dict **kwargs: dictionary of parameter names and values the function has been called with.
:return result of the function call either from pycast or pycastC module.
:rtype: function
"""
if self.optimizationEnabled:
class_name = self.__class__.__name__
module = self.__module__.replace("pycast", "pycastC")
try:
imported = __import__("%s.%s" % (module, class_name), globals(), locals(), [fn.__name__])
function = getattr(imported, fn.__name__)
return function(self, *args, **kwargs)
except ImportError:
print "[WARNING] Could not enable optimization for %s, %s" % (fn.__name__, self)
return fn(self, *args, **kwargs)
else:
return fn(self, *args, **kwargs)
setattr(_optimized, "__name__", fn.__name__)
setattr(_optimized, "__repr__", fn.__repr__)
setattr(_optimized, "__str__", fn.__str__)
setattr(_optimized, "__doc__", fn.__doc__)
return _optimized
|
davy39/eric
|
ThirdParty/Pygments/pygments/plugin.py
|
Python
|
gpl-3.0
| 1,903
| 0
|
# -*- coding: utf-8 -*-
"""
pygments.plugin
~~~~~~~~~~~~~~~
Pygments setuptools plugin interface. The methods defined
here also work if setuptools isn't installed but they just
return nothing.
lexer plugins::
[pygments.lexers]
yourlexer = yourmodule:YourLexer
formatter plugins::
[pygments.formatters]
yourformatter = yourformatter:YourFormatter
/.ext = yourformatter:YourFormatter
As you can see, you can define extensions for the formatter
with a leading slash.
syntax plugins::
[pygments.styles]
yourstyle = yourstyle:YourStyle
filter plugin::
[pygments.filter]
yourfilter = yourfilter:YourFilter
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
try:
import pkg_resources
except ImportErr
|
or:
pkg_resources = None
LEXER_ENTRY_POINT = 'pygments.lexers'
FORMATTER_ENTRY_POINT = 'pygments.formatters'
STYLE_ENTRY_POINT = 'pygments.styles'
FILTER_ENTRY_POINT = 'pygments.filters'
def find_plugin_lexers():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(LEXER_ENTRY_POINT):
yield entrypoint.load()
def find_plugin_formatters():
if pkg_resources is None:
return
for entrypoint in pkg_res
|
ources.iter_entry_points(FORMATTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_styles():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(STYLE_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_filters():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(FILTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
|
s-knibbs/py-web-player
|
pywebplayer/discover.py
|
Python
|
gpl-2.0
| 2,070
| 0.000483
|
import os
import time
from config import ComponentBase
from transcode import Transcoder
class MediaDiscovery(ComponentBase):
DURATION_FORMAT = '%H:%M:%S'
MAX_DEPTH = 4
def __init__(self, library):
super(MediaDiscovery, self).__init__()
self.library = library
def search(self, paths, depth=0):
"""Search the given paths for media files"""
num_items = 0
sub_paths = []
tcoder = Transcoder()
if len(paths) == 0 or depth >= self.MAX_DEPTH:
return 0
for path in paths:
try:
for entry in os.listdir(path):
abspath = os.path.join(path, entry)
if os.path.isdir(abspath):
sub_paths.append(abspath)
continue
name, ext = os.path.splitext(entry)
ext = ext[1:]
if ext in tcoder.MIME_MAP:
info = tcoder.get_media_info(abspath)
if info is None:
continue
size = os.stat(abspath).st_size
length = self._duration_to_secs(info['dur
|
ation'])
self.library.insert(name, abspath, length, size,
tcoder.MIME_MAP[ext], info, ignore_duplicates=True)
|
num_items += 1
except OSError as e:
self.logger.warning(str(e))
self.library.save()
return self.search(sub_paths, depth + 1) + num_items
def _duration_to_secs(self, duration):
"""Converts a duration string into seconds"""
# TODO - Support sub second precision
ts = time.strptime(duration, self.DURATION_FORMAT)
return ts.tm_hour * 3600 + ts.tm_min * 60 + ts.tm_sec
def start_watching(self):
"""Watch the filesystem for any new media files
and add them to the database automatically.
"""
pass
# TODO - Implement file system watching
|
vuolter/pyload
|
src/pyload/core/network/cookie_jar.py
|
Python
|
agpl-3.0
| 1,007
| 0.000993
|
# -*- coding: utf-8 -*-
import time
from datetime import timedelta
class CookieJar:
def __init__(self, pluginname, account=None):
self.cookies = {}
self.plugin = pluginname
self.account = account
def add_cookies(self, clist):
for c in clist:
name = c.split("\t")[5]
self.cookies[name] = c
def get_cookies(self):
return list(self.cookies.values()
|
)
def parse_cookie(self, name):
if name in self.co
|
okies:
return self.cookies[name].split("\t")[6]
else:
return None
def get_cookie(self, name):
return self.parse_cookie(name)
def set_cookie(
self,
domain,
name,
value,
path="/",
exp=time.time() + timedelta(hours=744).total_seconds(), #: 31 days retention
):
self.cookies[
name
] = f".{domain}\tTRUE\t{path}\tFALSE\t{exp}\t{name}\t{value}"
def clear(self):
self.cookies = {}
|
caulagi/hubot-py-wtf
|
test/code/bad.py
|
Python
|
mit
| 46
| 0.021739
|
# -*- coding:
|
utf-8 -*-
#!/usr/bin/env
|
python
|
ElessarWebb/dummy
|
src/dummy/utils/argparser.py
|
Python
|
mit
| 1,745
| 0.05616
|
import logging
from dummy.models import Test
from dummy.utils import git
from dummy.storage import StorageProvider
from dummy import config
logger = logging.getLogger( __name__ )
def discover_targets( args ):
targets = []
if args.alltargets:
for t in config.TARGETS.keys():
targets.append( t )
elif len( args.target ) == 0:
targets.append( config.DEFAULT_TARGET )
else:
for target in args.
|
target:
targets.append( target )
return targets
def discover_tests( args ):
tests = []
# try to find the suites and append the testnames
# of the suite
for name in args.suite:
logger.info( "Loading tests from suite `%s`" % name )
# make sure to have a valid test suite name
try:
suite = config.SUITES[ name ]
for descr in suite:
for fname in Test.glob( descr ):
logger.debug( "
|
Adding test `%s` to tests." % fname )
tests.append( Test( fname ))
except KeyError:
logger.error( "We looked, but a test suite with name `%s` was not found." % name )
# queue the named tests
for names in [ Test.glob( name ) for name in args.tests ]:
for name in names:
tests.append( Test( name ))
# expand excludes using globbing
excludes = []
for ex in args.exclude:
excludes += Test.glob( ex )
# unqueue excluded tests
tests = [ t for t in tests if t.name not in excludes ]
# unqueue tests that already have results
# if complement option is given
if args.complement:
targets = discover_targets( args )
commit = args.commit or git.describe()
# assume tested
filtered = []
for test in tests:
tested = True
for t in targets:
if not StorageProvider.exists( commit, t, test ):
tested = False
if not tested:
filtered.append( test )
tests = filtered
return tests
|
osrf/docker_templates
|
docker_templates/library.py
|
Python
|
apache-2.0
| 2,737
| 0.000731
|
# Copyright 2015-2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import git
import os
import string
def latest_commit_sha(repo, path):
"""That the last commit sha for a given path in repo"""
log_message = repo.git.log("-1", path)
commit_sha = log_message.split('\n')[0].split(' ')[1]
return commit_sha
def parse_manifest(manifest, repo, repo_name):
# For each release
for release_name, release_data in list(manifest['release_names'].items()):
print('release_name: ', release_name)
# For each os supported
at_least_one_tag = False
for os_name, os_data in list(release_data['os_names'].items()):
print('os_name: ', os_name)
# For each os code name supported
for os_code_name, os_code_data in list(os_data['os_code_names'].items()):
print('os_code_name: ', os_code_name)
if os_code_data['tag_names']:
at_least_one_tag = True
for tag_name, tag_data in os_code_data['tag_names'].items():
print('tag_name: ', tag_name)
tags = []
for alias_pattern in tag_data['aliases']:
alias_template = string.Template(alias_pattern)
alias = alias_template.substitute(
release_name=release_name,
os_name=os_name,
os_code_name=os_code_name)
tags.append(alias)
commit_path = os.path.join(
repo_name, release_name,
os_name, os_code_name, tag_name)
commit_sha = latest_commit_sha(repo, commit_path)
print('ta
|
gs: ', tags)
tag_data['Tags'] = tags
tag_data['Architectures'] = os_code_data['archs']
tag_data['GitComm
|
it'] = commit_sha
tag_data['Directory'] = commit_path
if not at_least_one_tag:
del manifest['release_names'][release_name]
return manifest
|
ddico/odoo
|
addons/sale_expense/models/product_template.py
|
Python
|
agpl-3.0
| 1,032
| 0.001938
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models
class ProductTemplate(models.Model):
_inherit = 'product.template'
def _default_visible_expense_policy(self):
visibility = self.user_has_groups('hr_expense.group_hr_expense_user')
return visibility or super(ProductTemplate, self)._default_visible_expense_policy()
@api.depends('can_be_expensed')
def _compute_visible_expense_policy(self):
expense_products = self.filtered(lambda p: p.can_be_expensed)
for produc
|
t_template in self - expense_products:
product_template.visible_expense_policy = False
super(ProductTemplate, expense_products)._compu
|
te_visible_expense_policy()
visibility = self.user_has_groups('hr_expense.group_hr_expense_user')
for product_template in expense_products:
if not product_template.visible_expense_policy:
product_template.visible_expense_policy = visibility
|
odahoda/noisicaa
|
noisicaa/builtin_nodes/pianoroll_track/track_ui.py
|
Python
|
gpl-2.0
| 53,058
| 0.001809
|
#!/usr/bin/python3
# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <pink@odahoda.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license
import fractions
import functools
import logging
import os.path
from typing import Any, Dict, List, Set, Sequence, Tuple
from PyQt5.QtCore import Qt
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from noisicaa.core.typing_extra import
|
down_cast
from noisicaa import constants
from noisicaa import audioproc
from noisicaa import core
from noisicaa import music
from noisicaa.ui import ui_base
from noisicaa.ui import clipboard
from noisicaa.ui import pianoroll
from noisicaa.ui import s
|
lots
from noisicaa.ui import int_dial
from noisicaa.ui.track_list import tools
from noisicaa.ui.track_list import base_track_editor
from noisicaa.ui.track_list import time_view_mixin
from noisicaa.builtin_nodes.pianoroll import processor_messages
from . import model
from . import clipboard_pb2
logger = logging.getLogger(__name__)
class PianoRollToolMixin(tools.ToolBase): # pylint: disable=abstract-method
track = None # type: PianoRollTrackEditor
def activateSegment(self, segment: 'SegmentEditor') -> None:
pass
def activated(self) -> None:
for segment in self.track.segments:
self.activateSegment(segment)
super().activated()
def __changeRowHeight(
self,
delta: int,
label: QtWidgets.QLabel,
increase_button: QtWidgets.QToolButton,
decrease_button: QtWidgets.QToolButton
) -> None:
tr = self.track
pos = (tr.yOffset() + tr.height() / 2) / tr.gridHeight()
tr.setGridYSize(
max(tr.MIN_GRID_Y_SIZE, min(tr.MAX_GRID_Y_SIZE, tr.gridYSize() + delta)))
tr.setYOffset(
max(0, min(tr.gridHeight() - tr.height(),
int(pos * tr.gridHeight() - tr.height() / 2))))
label.setText("%dpx" % tr.gridYSize())
increase_button.setEnabled(tr.gridYSize() < tr.MAX_GRID_Y_SIZE)
decrease_button.setEnabled(tr.gridYSize() > tr.MIN_GRID_Y_SIZE)
def buildContextMenu(self, menu: QtWidgets.QMenu, evt: QtGui.QContextMenuEvent) -> None:
view_menu = menu.addMenu("View")
increase_row_height_button = QtWidgets.QToolButton()
increase_row_height_button.setObjectName('incr-row-height')
increase_row_height_button.setAutoRaise(True)
increase_row_height_button.setIcon(QtGui.QIcon(
os.path.join(constants.DATA_DIR, 'icons', 'zoom-in.svg')))
increase_row_height_button.setEnabled(self.track.gridYSize() < self.track.MAX_GRID_Y_SIZE)
decrease_row_height_button = QtWidgets.QToolButton()
decrease_row_height_button.setObjectName('decr-row-height')
decrease_row_height_button.setAutoRaise(True)
decrease_row_height_button.setIcon(QtGui.QIcon(
os.path.join(constants.DATA_DIR, 'icons', 'zoom-out.svg')))
decrease_row_height_button.setEnabled(self.track.gridYSize() > self.track.MIN_GRID_Y_SIZE)
row_height_label = QtWidgets.QLabel("%dpx" % self.track.gridYSize())
increase_row_height_button.clicked.connect(functools.partial(
self.__changeRowHeight,
1, row_height_label, increase_row_height_button, decrease_row_height_button))
decrease_row_height_button.clicked.connect(functools.partial(
self.__changeRowHeight,
-1, row_height_label, increase_row_height_button, decrease_row_height_button))
row_height_widget = QtWidgets.QWidget()
l = QtWidgets.QHBoxLayout()
l.setContentsMargins(10, 2, 10, 2)
l.setSpacing(4)
l.addWidget(QtWidgets.QLabel("Row height:"))
l.addWidget(decrease_row_height_button)
l.addWidget(row_height_label)
l.addWidget(increase_row_height_button)
l.addStretch(1)
row_height_widget.setLayout(l)
row_height_action = QtWidgets.QWidgetAction(self)
row_height_action.setDefaultWidget(row_height_widget)
view_menu.addAction(row_height_action)
current_channel_menu = menu.addMenu("Current MIDI Channel")
for ch in range(16):
current_channel_menu.addAction(
self.track.set_current_channel_actions[ch])
def contextMenuEvent(self, evt: QtGui.QContextMenuEvent) -> None:
menu = QtWidgets.QMenu(self.track)
menu.setObjectName('context-menu')
self.buildContextMenu(menu, evt)
menu.popup(evt.globalPos())
evt.accept()
class ArrangeSegmentsTool(PianoRollToolMixin, tools.ToolBase):
def __init__(self, **kwargs: Any) -> None:
super().__init__(
type=tools.ToolType.PIANOROLL_ARRANGE_SEGMENTS,
group=tools.ToolGroup.EDIT,
**kwargs)
self.__action = None # type: str
self.__resize_segment = None # type: SegmentEditor
self.__drag_segments = None # type: List[SegmentEditor]
self.__handle_offset = None # type: int
self.__ref_time = None # type: audioproc.MusicalTime
self.__time = None # type: audioproc.MusicalTime
self.__select_all_action = QtWidgets.QAction(self)
self.__select_all_action.setObjectName('select-all')
self.__select_all_action.setText("Select All")
self.__select_all_action.setShortcut('ctrl+a')
self.__select_all_action.setShortcutContext(Qt.WidgetWithChildrenShortcut)
self.__select_all_action.triggered.connect(self.__selectAll)
self.__clear_selection_action = QtWidgets.QAction(self)
self.__clear_selection_action.setObjectName('clear-selection')
self.__clear_selection_action.setText("Clear Selection")
self.__clear_selection_action.setShortcut('ctrl+shift+a')
self.__clear_selection_action.setShortcutContext(Qt.WidgetWithChildrenShortcut)
self.__clear_selection_action.triggered.connect(self.__clearSelection)
self.__add_segment_action = QtWidgets.QAction(self)
self.__add_segment_action.setObjectName('add-segment')
self.__add_segment_action.setText("Add Segment")
self.__add_segment_action.setIcon(QtGui.QIcon(
os.path.join(constants.DATA_DIR, 'icons', 'list-add.svg')))
self.__add_segment_action.setShortcut('ins')
self.__add_segment_action.setShortcutContext(Qt.WidgetWithChildrenShortcut)
self.__add_segment_action.triggered.connect(self.__createSegment)
self.__delete_segment_action = QtWidgets.QAction(self)
self.__delete_segment_action.setObjectName('delete-segment')
self.__delete_segment_action.setText("Delete Segment(s)")
self.__delete_segment_action.setIcon(QtGui.QIcon(
os.path.join(constants.DATA_DIR, 'icons', 'list-remove.svg')))
self.__delete_segment_action.setShortcut('del')
self.__delete_segment_action.setShortcutContext(Qt.WidgetWithChildrenShortcut)
self.__delete_segment_action.triggered.connect(self.__deleteSegments)
def iconName(self) -> str:
return 'pianoroll-arrange-segments'
def keySequence(self) -> QtGui.QKeySequence:
return QtGui.QKeySequence('a')
def activateSegment(self, segment: 'SegmentEditor') -> None:
segment.setAttribute(Qt.WA_TransparentForMouseEvents, True)
segment.setReadOnly(True)
def activated(self) -> None:
self.track.addAction(self.__select_all_action)
self.track.addAc
|
tantalor/megaera
|
megaera/local.py
|
Python
|
mit
| 1,305
| 0.010728
|
"""Accessors for an app's local configuration
The local configuration is loaded from a YAML file. The default
configuration is "local.yaml", in the app's root.
An app's local configuration can change depending on the current
environment, i.e., development and production.
For example,
pirate: ninja
robot:
dev: zombie
prod: monkey
In development, this app's local config will be,
{'pirate': 'ninja', 'robot': 'zombie'}
In production, the app's local config will be,
{'pirate': 'ninja', 'robot': 'monkey'}
"""
import yaml
import os
from env import branch
from google.appengine.api import memcache
def config(filename='local.yaml'):
"""Return the config (dict) for the current environment."""
cachekey = 'config:%s' % filename
# check
|
memcache
try:
config = memcache.get(cachekey)
if config:
return config
except AssertionError: pass
if os.path.exists(filename):
config = yaml.load(file(filename).read())
# branch each value by environment
config = dict([(key, branch(value)) for k
|
ey, value in config.iteritems()])
try:
memcache.set(cachekey, config)
except AssertionError: pass
return config
return dict()
def config_get(key):
"""Return the value for the given key from the default config."""
return config()[key]
|
adykstra/mne-python
|
tutorials/stats-sensor-space/plot_stats_cluster_time_frequency.py
|
Python
|
bsd-3-clause
| 4,873
| 0
|
"""
=========================================================================
Non-parametric between conditions cluster statistic on single trial power
=========================================================================
This script shows how to compare clusters in time-frequency
power estimates between conditions. It uses a non-parametric
statistical procedure based on permutations and cluster
level statistics.
The procedure consists in:
- extracting epochs for 2 conditions
- compute single trial power estimates
- baseline line correct the power estimates (power ratios)
- compute stats to see if the power estimates are significantly different
between conditions.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
tmin, tmax = -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
ch_name = 'MEG 1332' # restrict example to one channel
# Load condition 1
reject = dict(grad=4000e-13, eog=150e-6)
event_id = 1
epochs_condition_1 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject, preload=True)
epochs_condition_1.pick_channels([ch_name])
# Load condition 2
event_id = 2
epochs_condition_2 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject, preload=True)
epochs_condition_2.pick_channels([ch_name])
###############################################################################
# Factor to downsample the temporal dimension of the TFR computed by
# tfr_morlet. Decimation occurs after frequency decomposition and can
# be used to reduce memory usage (and possibly comptuational time of downstream
# operations such as nonparametric statistics) if you don't need high
# spectrotemporal resolution.
decim = 2
freqs = np.arange(7, 30, 3) # define frequencies of interest
n_cycles = 1.5
tfr_epochs_1 = tfr_morlet(epochs_condition_1, freqs,
n_cycles=n_cycles, decim=decim,
return_itc=False, average=False)
tfr_epochs_2 = tfr_morlet(epochs_condition_2, freqs,
n_cycles=n_cycles, decim=decim,
return_itc=False, average=False)
tfr_epochs_1.apply_baseline(mode='ratio', baseline=(None, 0))
tfr_epochs_2.apply_baseline(mode='ratio', baseline=(None, 0))
epochs_power_1 = tfr_epochs_1.data[:, 0, :, :] # only 1 channel as 3D matrix
epochs_power_2 = tfr_epochs_2.data[:, 0, :, :] # only 1 channel as 3D matrix
###############################################################################
# Compute statistic
# -----------------
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([epochs_power_1, epochs_power_2],
n_permutations=100, threshold=threshold, tail=0)
###############################################################################
# View time-frequency plots
# -------------------------
times = 1e3 * epochs_condition_1.times # change unit to ms
evoked_condition_1 = epochs_condition_1.average()
evoked_condition_2 = epochs_condition_2.average()
plt.figure()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 1, 1)
# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
if p_val <= 0.05:
T_obs_plot[c] = T_obs[c]
plt.imshow(T_obs,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', cmap='gray')
plt.imshow(T_obs_plot,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', cmap='RdBu_r')
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title('Induced power (%s)' % ch_name)
ax2 = plt.subplot(2, 1, 2)
evoked_contrast = mne.combine_evoked([evoked_condition_1, evoked_condition_2],
|
weights=[1, -1])
evoked_contrast.plot(axes=ax2, time_unit='s')
plt.s
|
how()
|
pignacio/chorddb
|
chorddb/terminal/__init__.py
|
Python
|
gpl-3.0
| 190
| 0
|
#! /usr/bin/env python
# -*- c
|
oding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
from .render import render_tablature
|
__all__ = ['render_tablature']
|
mishpat/human-capital-search
|
humancapitalsearch.py
|
Python
|
mit
| 4,080
| 0.007108
|
import functools
import os
import numpy as np
import pygmo as pg
from simulation import simulate, statistical
from solving import value_function_list
from util import constants as cs, param_type
class HumanCapitalSearchProblem(object):
def fitness(self, params_nparray, gradient_eval=False):
params_paramtype = param_type.transform_array_to_paramstype(params_nparray)
optimal_value_funct
|
ions = value_function_list.backwards_iterate(params_paramtype)
# if display_plot
|
s:
# plotting.plot_valuefunctions(optimal_value_functions)
panel_data = simulate.simulate_data(params_paramtype, optimal_value_functions)
# if save_panel_data:
# np.savetxt('simulated_data.csv', panel_data.T, delimiter=',')
simulated_coefficients, _, _, _ = statistical.calculate_coefficients(panel_data)
criterion_value = calculate_criterion(
simulated_coefficients, cs.data_coeffs, cs.weights)
if not gradient_eval:
print('within_val {0}:'.format(os.getpid()), repr(params_nparray), repr(np.array([criterion_value])))
return np.array([criterion_value])
def get_name(self):
return 'Human Capital Search Problem'
def get_bounds(self):
lowerbounds, upperbounds, _ = param_type.gen_initial_point()
return (lowerbounds, upperbounds)
def gradient(self, x):
grad_fitness = functools.partial(self.fitness, gradient_eval=True)
return pg.estimate_gradient(grad_fitness, x)
def calculate_criterion(simulate_coefficients,
data_coefficients, weights):
cutoff = 155
try:
squared_coeff_diffs = (data_coefficients[:cutoff]
- simulate_coefficients[:cutoff])**2
return np.sum(squared_coeff_diffs * weights[:cutoff]**2)
except ValueError:
return 1000000.0
return None
def convert_hours_to_seconds(hours):
seconds = 60 * 60 * hours
return int(seconds)
if __name__ == '__main__':
x0 = np.array([ 3.50002199e-03, 6.51848176e-03, 1.51129690e-02,
5.44408669e-01, 4.00993663e-01, 6.55844833e-02,
6.07802957e+00, 1.60167206e+00, 5.01869425e+00,
4.72961572e+00, 9.38466921e+00, 5.05588161e+00,
8.19033636e+00, 2.75929445e+00, 2.85635433e+00,
1.75737616e+00, 7.80585097e-01, 7.75955256e-01,
7.84082645e-01, 7.84472240e-01, 7.88595353e-01,
7.56837829e-01, 7.95899147e-01, 1.00607895e-01,
9.54173933e-02, 1.01830970e-01, 2.35455817e-01,
4.34618429e-01, 5.05177886e-01, 4.97754216e-01,
1.33424724e+00, 1.33335481e+00, 5.14048248e-01,
5.31256998e-01, 2.72639929e-02, 2.06973106e-01,
7.44039604e-01, 3.35103286e-01, 8.64058736e-01,
2.01314260e-01, 7.48161453e-01, 1.98923666e-01,
7.49378943e-01, 1.96135026e-01, 7.52297629e-01,
2.03848678e-01, 7.48561095e-01, 1.98618489e-01,
9.17364498e-01, 1.97851509e-01, 7.42171336e-01,
1.98302575e-01, 7.41711271e-01]) #array([ 149.86359966])
hc_problem = HumanCapitalSearchProblem()
hc_problem.fitness(x0)
np.set_printoptions(threshold=10000)
np.random.seed(128)
prob = pg.problem(hc_problem)
pop = pg.population(prob=prob, size=0)
pop.push_back(x0, f=[149.86359966])
archi = pg.archipelago(pop=pop)
methods = ["bobyqa", 'neldermead', 'praxis', "newuoa_bound", "sbplx"]
algo_list = []
for method in methods:
nl= pg.nlopt(method)
nl.stopval = 1e-8
nl.ftol_rel = 1e-8
nl.xtol_rel = 1e-8
nl.xtol_abs = 1e-8
nl.ftol_abs = 1e-8
nl.maxtime = convert_hours_to_seconds(12)
algo = pg.algorithm(nl)
archi.push_back(algo=algo, pop=pop)
print(archi)
archi.evolve()
import time
while archi.status is pg.core._evolve_status.busy:
time.sleep(120)
print(archi)
archi.wait_check()
print(archi.get_champions_x())
print(archi.get_champions_f())
|
lofar-astron/PyBDSF
|
bdsf/sourcecounts.py
|
Python
|
gpl-3.0
| 12,587
| 0.025582
|
"""Sourcecounts
s is flux in Jy and n is number > s per str
"""
import numpy as N
s=N.array([ 9.9999997e-05, 0.00010328281, 0.00010667340, 0.00011017529, 0.00011379215, 0.00011752774, 0.00012138595, \
0.00012537083, 0.00012948645, 0.00013373725, 0.00013812761, 0.00014266209, 0.00014734542, 0.00015218249, 0.00015717837, \
0.00016233824, 0.00016766752, 0.00017317173, 0.00017885664, 0.00018472817, 0.00019079246, 0.00019705582, 0.00020352470, \
0.00021020604, 0.00021710672, 0.00022423393, 0.00023159511, 0.00023919797, 0.00024705040, 0.00025516062, 0.00026353705, \
0.00027218851, 0.00028112394, 0.00029035273, 0.00029988447, 0.00030972913, 0.00031989696, 0.00033039862, 0.00034124497, \
0.00035244724, 0.00036401744, 0.00037596744, 0.00038830977, 0.00040105727, 0.00041422324, 0.00042782145, 0.00044186602, \
0.00045637166, 0.00047135353, 0.00048682719, 0.00050280854, 0.00051931484, 0.00053636299, 0.00055397081, 0.00057215663, \
0.00059093948, 0.00061033899, 0.00063037529, 0.00065106933, 0.00067244272, 0.00069451780, 0.00071731757, 0.00074086577, \
0.00076518703, 0.00079030672, 0.00081625103, 0.00084304705, 0.00087072275, 0.000
|
89930650, 0.00092882907, 0.00095932081, \
0.00099081360, 0.0010233402, 0.0010569346, 0.0010916317, 0.0011274681, 0.0011644807, 0.0012027085, 0.0012421905, \
0.0012829694, 0.0013250869, 0.0013685870, 0.0014135153, 0.0014599183, 0.0015078448, 0.0015573446, 0.0016084694, \
0.0016612725, 0.0017158090, 0.0017721358
|
, 0.0018303118, 0.0018903976, 0.0019524558, 0.0020165513, 0.0020827511, \
0.0021511239, 0.0022217415, 0.0022946771, 0.0023700071, 0.0024478103, 0.0025281659, 0.0026111610, 0.0026968806, \
0.0027854142, 0.0028768543, 0.0029712960, 0.0030688383, 0.0031695808, 0.0032736324, 0.0033810998, 0.0034920950, \
0.0036067341, 0.0037251366, 0.0038474260, 0.0039737299, 0.0041041803, 0.0042389128, 0.0043780687, 0.0045217923, \
0.0046702349, 0.0048235501, 0.0049818982, 0.0051454445, 0.0053143604, 0.0054888208, 0.0056690089, 0.0058551119, \
0.0060473247, 0.0062458473, 0.0064508831, 0.0066626542, 0.0068813767, 0.0071072797, 0.0073405989, 0.0075815772, \
0.0078304661, 0.0080875214, 0.0083530201, 0.0086272340, 0.0089104511, 0.0092029646, 0.0095050810, 0.0098171150, \
0.010139393, 0.010472251, 0.010816036, 0.011171106, 0.011537833, 0.011916599, 0.012307799, 0.012711842, 0.013129148, \
0.013560154, 0.014005309, 0.014465077, 0.014939931, 0.015430382, 0.015936933, 0.016460113, 0.017000468, 0.017558562, \
0.018134978, 0.018730316, 0.019345198, 0.019980265, 0.020636180, 0.021313628, 0.022013316, 0.022735972, 0.023482339, \
0.024253221, 0.025049411, 0.025871737, 0.026721058, 0.027598262, 0.028504262, 0.029440004, 0.030406466, 0.031404655, \
0.032435611, 0.033500414, 0.034600168, 0.035736032, 0.036909178, 0.038120817, 0.039372254, 0.040664773, 0.041999724, \
0.043378498, 0.044802535, 0.046273317, 0.047792386, 0.049361322, 0.050981764, 0.052655403, 0.054383982, 0.056169309, \
0.058013245, 0.059917714, 0.061884668, 0.063916229, 0.066014484, 0.068181612, 0.070419893, 0.072731644, 0.075119294, \
0.077585325, 0.080132306, 0.082762904, 0.085479856, 0.088286005, 0.091184273, 0.094177686, 0.097269312, 0.10046248, \
0.10376048, 0.10716674, 0.11068483, 0.11431842, 0.11807128, 0.12194734, 0.12595065, 0.13008538, 0.13435584, 0.13876650, \
0.14332195, 0.14802694, 0.15288639, 0.15790530, 0.16308904, 0.16844295, 0.17397262, 0.17968382, 0.18558250, 0.19167484, \
0.19796717, 0.20446607, 0.21117832, 0.21811092, 0.22527111, 0.23266634, 0.24030435, 0.24819310, 0.25634068, 0.26475587, \
0.27344733, 0.28242409, 0.29169556, 0.30127138, 0.31116158, 0.32137644, 0.33192664, 0.34282318, 0.35407743, 0.36570114, \
0.37770644, 0.39010584, 0.40291208, 0.41613895, 0.42980003, 0.44390959, 0.45848233, 0.47353345, 0.48907870, 0.50513422, \
0.52171689, 0.53884387, 0.55653316, 0.57480311, 0.59367281, 0.61316204, 0.63329101, 0.65408045, 0.67555267, 0.69772983, \
0.72063506, 0.74429214, 0.76872587, 0.79396176, 0.82002604, 0.84694600, 0.87474972, 0.90346611, 0.93312526, 0.96375805, \
0.99539644, 1.0280730, 1.0618227, 1.0966804, 1.1326823, 1.1698662, 1.2082708, 1.2479361, 1.2889036, 1.3312160, 1.3749173, \
1.4200534, 1.4666711, 1.5148191, 1.5645479, 1.6159091, 1.6689565, 1.7237452, 1.7803327, 1.8387777, 1.8991414, 1.9614867, \
2.0258787, 2.0923846, 2.1610713, 2.2320154, 2.3052883, 2.3809667, 2.4591296, 2.5398583, 2.6232371, 2.7093532, 2.7982962, \
2.8901591, 2.9850378, 3.0830312, 3.1842413, 3.2887743, 3.3967385, 3.5082474, 3.6234167, 3.7423668, 3.8652217, 3.9921098, \
4.1231637, 4.2585196, 4.3983188, 4.5427074, 4.6918364, 4.8458605, 5.0049415, 5.1692443, 5.3389411, 5.5142026, 5.6952238, \
5.8821878, 6.0752892, 6.2747297, 6.4807177, 6.6934676, 6.9132018, 7.1401496, 7.3745475, 7.6166406, 7.8666806, 8.1249294, \
8.3916559, 8.6671391, 8.9516649, 9.2455320, 9.5490456, 9.8625231, 10.186292, 10.520689, 10.866064, 11.222776, 11.591200, \
11.971718, 12.364727, 12.770638, 13.189876, 13.622874, 14.070073, 14.531968, 15.009026, 15.501744, 16.010639, 16.536238, \
17.079092, 17.639769, 18.218849, 18.816940, 19.434666, 20.072670, 20.731619, 21.412201, 22.115124, 22.841122, 23.590954, \
24.365402, 25.165274, 25.991404, 26.844654, 27.725914, 28.636105, 29.576176, 30.547108, 31.549913, 32.585640, 33.655365, \
34.760208, 35.901321, 37.079857, 38.297119, 39.554344, 40.852840, 42.193966, 43.579117, 45.009739, 46.487324, 48.013420, \
49.589611, 51.217548, 52.898926, 54.635498, 56.429081, 58.281548, 60.194820, 62.170906, 64.211861, 66.319824, 68.496979, \
70.745613, 73.068062, 75.466751, 77.944183, 80.502945, 83.145714, 85.875237, 88.694359, 91.606033, 94.613190, 97.719162, \
100.92711, 104.24036, 107.66238, 111.19673, 114.84712, 118.61734, 122.51133, 126.53315, 130.68700, 134.97722, 139.40826, \
143.98479, 148.71155, 153.59348, 158.63567, 163.84338, 169.22206, 174.77731, 180.51492, 186.44090, 192.56142, 198.88284, \
205.41180, 212.15511, 219.11977, 226.31306, 233.74251, 241.41557, 249.34081, 257.52621, 265.98032, 274.71198, 283.73026, \
293.04462, 302.66473, 312.60065, 322.86276, 333.46173, 344.40869, 355.71500, 367.39246, 379.45328, 391.91003, 404.77573, \
418.06375, 431.78802, 445.96283, 460.60297, 475.72372, 491.34085, 507.47067, 524.13000, 541.33624, 559.10730, 577.46179, \
596.41876, 615.99811, 636.21954, 657.10541, 678.67700, 700.95673, 723.96783, 747.73438, 772.28113, 797.63373, 823.81854, \
850.86298, 878.79529, 907.64453, 937.44080, 968.21527, 1000.0000])
n=N.array([ 3.7709775e+10, 3.6065767e+10, 3.4493432e+10, 3.2989649e+10, 3.1551425e+10, 3.0175900e+10, \
2.8860342e+10, 2.7602137e+10, \
2.6398808e+10, 2.5247922e+10, 2.4147204e+10, 2.3094475e+10, 2.2087643e+10, 2.1124704e+10, 2.0203747e+10, 1.9322939e+10, \
1.8480527e+10, 1.7674846e+10, 1.6904289e+10, 1.6167328e+10, 1.5462490e+10, 1.4788384e+10, 1.4143675e+10, 1.3527065e+10, \
1.2937335e+10, 1.2373316e+10, 1.1833886e+10, 1.1317971e+10, 1.0824550e+10, 1.0352640e+10, 9.9013028e+09, 9.4696428e+09, \
9.0568028e+09, 8.6619587e+09, 8.2843305e+09, 7.9231647e+09, 7.5777439e+09, 7.2473825e+09, 6.9314243e+09, 6.6292444e+09, \
6.3402342e+09, 6.0638244e+09, 5.7994639e+09, 5.5466291e+09, 5.3048166e+09, 5.0735457e+09, 4.8523587e+09, 4.6408141e+09, \
4.4384916e+09, 4.2449897e+09, 4.0599278e+09, 3.8829297e+09, 3.7136481e+09, 3.5517468e+09, 3.3969042e+09, 3.2488120e+09, \
3.1071754e+09, 2.9717143e+09, 2.8421588e+09, 2.7182515e+09, 2.5997458e+09, 2.4864064e+09, 2.3780086e+09, 2.2743360e+09, \
2.1751834e+09, 2.0803535e+09, 1.9896579e+09, 1.9029162e+09, 1.8199575e+09, 1.7406141e+09, 1.6647299e+09, 1.5921536e+09, \
1.5227420e+09, 1.4563558e+09, 1.3928644e+09, 1.3321405e+09, 1.2740643e+09, 1.2185199e+09, 1.1653979e+09, 1.1145907e+09, \
1.0659987e+09, 1.0195252e+09, 9.7507763e+08, 9.3256806e+08, 8.9191149e+08, 8.5302746e+08, 8.1583853e+08, 7.8027117e+08, \
7.4625421e+08, 7.1372032e+08, 6.8260474e+08, 6.5284576e+08, 6.2438406e+08, 5.9716326e+08, 5.7112922e+08, 5.4623008e+08, \
5.2241651e+08, 4.9964106e+08, 4.7785866e+08, 4.5702573e+08, 4.3710147e+08, 4.1804544e+08, 3.9982026e+08, 3.8238954e+08, \
3.6571878e+08, 3.4977482e+08, 3.3452595e+08, 3.1994208e+08, 3.0599382e+08, 2.9265363e+08, 2.7989501e+08, 2.6769266e+08, \
2.5602224e+08, 2.4486062e+08, 2.3418562e
|
MitchTalmadge/Emoji-Tools
|
src/main/resources/PythonScripts/fontTools/unicode.py
|
Python
|
gpl-3.0
| 1,057
| 0.037843
|
from __future__ import print_function, division, absolute
|
_import
from fontTools.misc.py23 import *
def _makeunicod
|
es(f):
import re
lines = iter(f.readlines())
unicodes = {}
for line in lines:
if not line: continue
num, name = line.split(';')[:2]
if name[0] == '<': continue # "<control>", etc.
num = int(num, 16)
unicodes[num] = name
return unicodes
class _UnicodeCustom(object):
def __init__(self, f):
if isinstance(f, basestring):
f = open(f)
self.codes = _makeunicodes(f)
def __getitem__(self, charCode):
try:
return self.codes[charCode]
except KeyError:
return "????"
class _UnicodeBuiltin(object):
def __getitem__(self, charCode):
try:
# use unicodedata backport to python2, if available:
# https://github.com/mikekap/unicodedata2
import unicodedata2 as unicodedata
except ImportError:
import unicodedata
try:
return unicodedata.name(unichr(charCode))
except ValueError:
return "????"
Unicode = _UnicodeBuiltin()
def setUnicodeData(f):
global Unicode
Unicode = _UnicodeCustom(f)
|
ATNF/askapsdp
|
Tools/Dev/rbuild/askapdev/rbuild/utils/get_svn_revision.py
|
Python
|
gpl-2.0
| 1,823
| 0.002743
|
# @file get_svn_revision.py
# Fetch the subversion revision number from the repository
#
# @copyright (c) 2006,2014 CSIRO
# Australia Telescope National Facility (ATNF)
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# PO Box 76, Epping NSW 1710, Australia
# atnf-enquiries@csiro.au
#
# This file is part of the ASKAP software distribution.
#
# The ASKAP software distribution is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the License
# or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, In
|
c., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
#
# @author Robert Crida <robert.crida@ska.ac.za>
#
from runcmd import runcmd
from get_vcs_type import is_git
import os
def get_svn_revision():
try:
(stdout, stderr, returncode) = runcmd('svnversion', shell=True)
if returncode == 0 and stdout and stdout[0].isdigit():
return stdout.rstrip()
else:
|
if is_git():
return get_git_revision()
return "unknown"
except:
return "unknown"
def get_git_revision():
try:
(stdout, stderr, returncode) = runcmd('git describe --tags --always', shell=True)
if returncode == 0:
return stdout.rstrip()
else:
return "unknown"
except:
return "unknown"
|
Georacer/last_letter
|
rqt_dashboard/src/rqt_dashboard/dashboard.py
|
Python
|
gpl-3.0
| 16,075
| 0.029425
|
import os
import rospy, rospkg
import sys
import math
import yaml
from itertools import izip_longest
from operator import add, sub
from qt_gui.plugin import Plugin
from python_qt_binding import loadUi
from python_qt_binding.QtWidgets import QWidget
from PyQt5 import QtGui, QtWidgets, QtCore
from rqt_plot.rosplot import ROSData, RosPlotException
from std_msgs.msg import Float32
from last_letter_msgs.msg import RefCommands
class DashboardGrid(QtWidgets.QWidget):
def __init__(self):
super(DashboardGrid, self).__init__()
self.setFixedSize(900, 900)
self.setWindowTitle('UAV Dashboard')
self.setAutoFillBackground(True)
prefix = '{}'.format(rospy.get_namespace())
rospack = rospkg.RosPack()
filename = rospack.get_path('last_letter') + '/data/parameters/aircraft' + rospy.get_namespace() + 'dashboard.yaml'
data = yaml.load(open(filename).read())
gauges = []
self.line = QtWidgets.QHBoxLayout()
self.commands = RefCommands()
initPos = rospy.get_param('init/position',[0])
self.commands.altitude = -initPos[-1]
initVa = rospy.get_param('init/velLin',[0,0,0])
self.commands.airspeed = math.sqrt(initVa[0]*initVa[0] + initVa[1]*initVa[1] + initVa[2]*initVa[2])
self.pub = rospy.Publisher('refCommands', RefCommands, queue_size=1)
self.pubTimer = QtCore.QTimer()
self.pubTimer.timeout.connect(self.publishCommands)
self
|
.pubTimer.start(1000)
for name in sorted(data.keys()): #sort based on name
values = data[name]
# print 'Adding: {}'.format(name)
values['topic'] = prefix + values['topic']
values['warning'] = zip(*[iter(values['warning'])]*2)
values['danger'] = zip(*[iter(values['dang
|
er'])]*2)
gauges.append(GaugeSimple(**values))
gauges[-1].marker_set.connect(self.onClick)
grouped_gauges = list(izip_longest(*(iter(gauges),)*3))
for i in xrange(len(grouped_gauges)):
setattr(self, 'column{}'.format(i), QtWidgets.QVBoxLayout())
curr_column = getattr(self, 'column{}'.format(i))
for g in grouped_gauges[i]:
if g is not None:
curr_column.addWidget(g)
# curr_column.append(g.topic)
self.line.addLayout(curr_column)
self.setLayout(self.line)
def onClick(self,comList):
member = comList[0]
value = comList[1]
if member == 'Roll':
self.commands.euler.x = value*math.pi/180
if member == 'Yaw':
self.commands.euler.z = value*math.pi/180
if member == 'Pitch':
self.commands.euler.y = value*math.pi/180
elif member == 'Airspeed':
self.commands.airspeed = value
elif member == 'Geometric Altitude':
self.commands.altitude = value
def publishCommands(self):
self.commands.header.stamp = rospy.Time.now()
self.pub.publish(self.commands)
self.pubTimer.start(1000)
#By Fadi
class GaugeSimple(QtWidgets.QWidget):
''' Gauge pointer movement:
minimum->maximum values: clockwise rotation
maximum value > minimum-value
'''
marker_set = QtCore.pyqtSignal(list)
def __init__(self, topic='/HUD', length=300.0, end_angle=300.0, min=0.0, max=100.0, main_points=11,
warning=[], danger=[], multiplier='', units='', description=''):
super(GaugeSimple, self).__init__()
self.setFixedSize(300, 300)
self.setWindowTitle('A Magnificent Gauge')
self.setAutoFillBackground(True)
self.redraw_interval = 40
self.update_plot_timer = QtCore.QTimer()
self.update_plot_timer.timeout.connect(self.update_plot)
self._start_time = rospy.get_time()
self._rosdata = ROSData(topic, self._start_time)
self.min = min
self.curr_value = min
self.value_uncap = 0
self.max = max
self.length = length
self.main_points = main_points
self.start_angle = (end_angle + length) % 360
self.end_angle = end_angle % 360
self.is_circle = self.start_angle == self.end_angle
self.gauge_ticks = []
self.bounding_rect = QtCore.QRectF(25.0, 25.0, 250.0, 250.0)
self.center = QtCore.QPointF(150.0, 150.0)
self.warning = warning #zones
self.danger = danger #zones
self.center_radius = 5.0
self.margin = 12
self.units = units
self.multiplier = multiplier
self.description = description
self.update_plot_timer.start(self.redraw_interval)
#Various ui colors
self.marker_tick_color = QtGui.QPen(QtGui.QColor('#FF9900'), 1.8)
self.ui_color = QtGui.QPen(QtCore.Qt.green, 2.5)
self.ui_color_tick = QtGui.QPen(QtCore.Qt.green, 1.5)
self.gauge_color = QtGui.QPen(QtCore.Qt.lightGray, 2)
self.warning_color = QtGui.QPen(QtCore.Qt.yellow, 2)
self.warning_bg = QtGui.QRadialGradient(self.center, self.width()/3)
self.warning_bg.setColorAt(0.0, QtCore.Qt.yellow)
self.warning_bg.setColorAt(1.0, QtCore.Qt.black)
self.danger_color = QtGui.QPen(QtCore.Qt.red, 2)
self.danger_bg = QtGui.QRadialGradient(self.center, self.width()/3)
self.danger_bg.setColorAt(0.0, QtCore.Qt.red)
self.danger_bg.setColorAt(1.0, QtCore.Qt.black)
self.current_bg = QtCore.Qt.black
self.create_gauge()
def detect_safe_zones(self):
r = [(self.min, self.max)]
unsafe = sorted(self.warning+self.danger, key=lambda i:i[0])
for i in unsafe:
temp = []
for y in r:
if i[0] > y[1] or i[1] < y[0]:
temp.append(y)
elif i[0]==y[0] and i[1]==y[1]:
continue
elif i[0]>y[0] and i[1]<y[1]:
temp.append((y[0], i[0]))
temp.append((i[1], y[1]))
elif i[0]>y[0] and i[1]==y[1]:
temp.append((i[0], i[1]))
elif i[0]==y[0] and i[1]<y[1]:
temp.append((i[1], y[1]))
r = temp
return r
def create_gauge(self):
def text_width(text):
font = QtGui.QFont()
metrics = QtGui.QFontMetrics(font)
return metrics.width(text)
#Main points
divisor = self.main_points
if self.start_angle != self.end_angle:
divisor -= 1
angle_step = self.length/divisor
value_step = abs(self.max-self.min)/divisor
#Gauge main line(the circular path)
#Safe zones
zones = map(self.val2deg_tuple, self.detect_safe_zones())
self.gauge_safe = []
for zone in zones:
path = QtGui.QPainterPath()
path.arcMoveTo(self.bounding_rect, self.start_angle-zone[0])
path.arcTo(self.bounding_rect, self.start_angle-zone[0], -(zone[1]-zone[0]))
self.gauge_safe.append(path)
#Warning zones
warning_zones = map(self.val2deg_tuple, self.warning)
self.gauge_warning = []
for w in warning_zones:
path = QtGui.QPainterPath()
path.arcMoveTo(self.bounding_rect, self.start_angle-w[0])
path.arcTo(self.bounding_rect, self.start_angle-w[0], -(w[1]-w[0]))
self.gauge_warning.append(path)
#Danger zones
danger_zones = map(self.val2deg_tuple, self.danger)
self.gauge_danger = []
for d in danger_zones:
path = QtGui.QPainterPath()
path.arcMoveTo(self.bounding_rect, self.start_angle-d[0])
path.arcTo(self.bounding_rect, self.start_angle-d[0], -(d[1]-d[0]))
self.gauge_danger.append(path)
#Initial gauge position
self.set_gauge(self.curr_value)
for i in xrange(self.main_points):
#Find the point on the curve
angle = self.start_angle -i*angle_step
value = self.min + i*value_step
p = QtGui.QPainterPath()
p.arcMoveTo(self.bounding_rect, angle)
x, y = p.currentPosition().x(), p.currentPosition().y()
x_new = x*0.9 + self.center.x()*0.1
y_new = y*0.9 + self.center.y()*0.1
x_text = x*0.8 + self.center.x()*0.2 - (text_width(str(round(value, 1)))-10)/2
y_text = y*0.8 + self.center.y()*0.2 + 4
tick_path = QtGui.QPainterPath()
tick_path.moveTo(x_new, y_new)
tick_path.lineTo(x, y)
#Store the tick_length for the marker area
self.gauge_ticks.append([QtCore.QPointF(x_text, y_text), value, tick_path])
#Store the tick_length for the marker area
self.tick_length = math.sqrt((x-x_new)**2+(y-y_new)**2)
def val2deg(self, value):
return self.length*((value-self.min)/abs(self.max-self.min))
def val2deg_tuple(self, t):
return map(self.val2deg, t)
def deg2val(self, degrees):
#Convert the given degress relative to the start_angle to the respective value
return abs(self.max-self.min)*(degrees/self.length)+self.min
def mouseReleaseEvent(self, e):
self.mouseMoveEvent(e)
def mouseMoveEvent(self, e):
#marker_line and marker_value dont exist before the first call of this function
click_pos = e.posF()
x_coeff = (click_pos.x() - self.center.x())**2
y_coeff = (click_pos.y() - self.c
|
ActiveState/code
|
recipes/Python/576734_C_struct_decorator/recipe-576734.py
|
Python
|
mit
| 1,507
| 0.033842
|
import ctypes
class C_struct:
"""Decorator to convert the given class into a C struct."""
# contains a dict of all known translatable types
types = ctypes.__dict__
@classmethod
def register_type(cls, typenam
|
e, obj):
"""Adds the new class to the dict of understood types."""
cls.types[typename] = obj
def __call__(self, cls):
"""Converts the given class into a C struct.
Usage:
>>> @C_struct()
... class Account:
... first_name = "c_char_p"
... last_name = "c_char_p"
... b
|
alance = "c_float"
...
>>> a = Account()
>>> a
<cstruct.Account object at 0xb7c0ee84>
A very important note: while it *is* possible to
instantiate these classes as follows:
>>> a = Account("Geremy", "Condra", 0.42)
This is strongly discouraged, because there is at
present no way to ensure what order the field names
will be read in.
"""
# build the field mapping (names -> types)
fields = []
for k, v in vars(cls).items():
# don't wrap private variables
if not k.startswith("_"):
# if its a pointer
if v.startswith("*"):
field_type = ctypes.POINTER(self.types[v[1:]])
else:
field_type = self.types[v]
new_field = (k, field_type)
fields.append(new_field)
# make our bases tuple
bases = (ctypes.Structure,) + tuple((base for base in cls.__bases__))
# finish up our wrapping dict
class_attrs = {"_fields_": fields, "__doc__": cls.__doc__}
# now create our class
return type(cls.__name__, bases, class_attrs)
|
svenstaro/OpenShadingLanguage
|
testsuite/texture-withderivs/run.py
|
Python
|
bsd-3-clause
| 133
| 0.015038
|
#!/usr/bi
|
n/env python
command += testshade("-g 256 256 --center -od uint8 -o Cout out.tif
|
test")
outputs = [ "out.txt", "out.tif" ]
|
ThiagoGarciaAlves/erpnext
|
erpnext/accounts/report/sales_register/sales_register.py
|
Python
|
agpl-3.0
| 7,220
| 0.024931
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
from frappe import msgprint, _
def execute(filters=None):
if not filters: filters = {}
invoice_list = get_invoices(filters)
columns, income_accounts, tax_accounts = get_columns(invoice_list)
if not invoice_list:
msgprint(_("No record found"))
return columns, invoice_list
invoice_income_map = get_invoice_income_map(invoice_list)
invoice_income_map, invoice_tax_map = get_invoice_tax_map(invoice_list,
invoice_income_map, income_accounts)
invoice_so_dn_map = get_invoice_so_dn_map(invoice_list)
customer_map = get_customer_deatils(invoice_list)
data = []
for inv in invoice_list:
# invoice details
sales_order = list(set(invoice_so_dn_map.get(inv.name, {}).get("sales_order", [])))
delivery_note = list(set(invoice_so_dn_map.get(inv.name, {}).get("delivery_note", [])))
row = [inv.name, inv.posting_date, inv.customer, inv.customer_name,
customer_map.get(inv.customer, {}).get("customer_group"),
customer_map.get(inv.customer, {}).get("territory"),
inv.debit_to, inv.project_name, inv.remarks, ", ".join(sales_order), ", ".join(delivery_note)]
# map income values
base_net_total = 0
for income_acc in income_accounts:
income_amount = flt(invoice_income_map.get(inv.name, {}).get(income_acc))
base_net_total += income_amount
row.append(income_amount)
# net total
row.append(base_net_total or inv.base_net_total)
# tax account
total_tax = 0
for tax_acc in tax_accounts:
if tax_acc not in income_accounts:
tax_amount = flt(invoice_tax_map.get(inv.name, {}).get(tax_acc))
total_tax += tax_amount
row.append(tax_amount)
# total tax, grand total, outstanding amount & rounded total
row += [total_tax, inv.base_grand_total, inv.base_rounded_total, inv.outstanding_amount]
data.append(row)
return columns, data
def get_columns(invoice_list):
"""return columns based on filters"""
columns = [
_("Invoice") + ":Link/Sales Invoice:120", _("Posting Date") + ":Date:80", _("Customer Id") + "::120",
_("Customer Name") + "::120", _("Customer Group") + ":Link/Customer Group:120", _("Territory") + ":Link/Territory:80",
_("Receivable Account") + ":Link/Account:120", _("Project") +":Link/Project:80", _("Remarks") + "::150",
_("Sales Order") + ":Link/Sales Order:100", _("Delivery Note") + ":Link/Delivery Note:100"
]
income_accounts = tax_accounts = income_columns = tax_columns = []
if invoice_list:
income_accounts = frappe.db.sql_list("""select distinct income_account
from `tabSales Invoice Item` where docstatus = 1 and parent in (%s)
order by income_account""" %
', '.join(['%s']*len(invoice_
|
list)), tuple([inv.name for inv in invoice_list]))
tax_accounts = frappe.db.sql_list("""select distinct account_head
from `tabSales Taxes and Charges` where parenttype = 'Sales Invoice'
and docstatus = 1 and ifnull(base_tax_amount_after_discount_amount, 0) != 0
and parent in (%s) order by account_head""" %
', '.join(['%s']*len(invoice_list)), tuple(
|
[inv.name for inv in invoice_list]))
income_columns = [(account + ":Currency:120") for account in income_accounts]
for account in tax_accounts:
if account not in income_accounts:
tax_columns.append(account + ":Currency:120")
columns = columns + income_columns + [_("Net Total") + ":Currency:120"] + tax_columns + \
[_("Total Tax") + ":Currency:120", _("Grand Total") + ":Currency:120",
_("Rounded Total") + ":Currency:120", _("Outstanding Amount") + ":Currency:120"]
return columns, income_accounts, tax_accounts
def get_conditions(filters):
conditions = ""
if filters.get("company"): conditions += " and company=%(company)s"
if filters.get("customer"): conditions += " and customer = %(customer)s"
if filters.get("from_date"): conditions += " and posting_date >= %(from_date)s"
if filters.get("to_date"): conditions += " and posting_date <= %(to_date)s"
return conditions
def get_invoices(filters):
conditions = get_conditions(filters)
return frappe.db.sql("""select name, posting_date, debit_to, project_name, customer,
customer_name, remarks, base_net_total, base_grand_total, base_rounded_total, outstanding_amount
from `tabSales Invoice`
where docstatus = 1 %s order by posting_date desc, name desc""" %
conditions, filters, as_dict=1)
def get_invoice_income_map(invoice_list):
income_details = frappe.db.sql("""select parent, income_account, sum(base_net_amount) as amount
from `tabSales Invoice Item` where parent in (%s) group by parent, income_account""" %
', '.join(['%s']*len(invoice_list)), tuple([inv.name for inv in invoice_list]), as_dict=1)
invoice_income_map = {}
for d in income_details:
invoice_income_map.setdefault(d.parent, frappe._dict()).setdefault(d.income_account, [])
invoice_income_map[d.parent][d.income_account] = flt(d.amount)
return invoice_income_map
def get_invoice_tax_map(invoice_list, invoice_income_map, income_accounts):
tax_details = frappe.db.sql("""select parent, account_head,
sum(base_tax_amount_after_discount_amount) as tax_amount
from `tabSales Taxes and Charges` where parent in (%s) group by parent, account_head""" %
', '.join(['%s']*len(invoice_list)), tuple([inv.name for inv in invoice_list]), as_dict=1)
invoice_tax_map = {}
for d in tax_details:
if d.account_head in income_accounts:
if invoice_income_map[d.parent].has_key(d.account_head):
invoice_income_map[d.parent][d.account_head] += flt(d.tax_amount)
else:
invoice_income_map[d.parent][d.account_head] = flt(d.tax_amount)
else:
invoice_tax_map.setdefault(d.parent, frappe._dict()).setdefault(d.account_head, [])
invoice_tax_map[d.parent][d.account_head] = flt(d.tax_amount)
return invoice_income_map, invoice_tax_map
def get_invoice_so_dn_map(invoice_list):
si_items = frappe.db.sql("""select parent, sales_order, delivery_note, so_detail
from `tabSales Invoice Item` where parent in (%s)
and (ifnull(sales_order, '') != '' or ifnull(delivery_note, '') != '')""" %
', '.join(['%s']*len(invoice_list)), tuple([inv.name for inv in invoice_list]), as_dict=1)
invoice_so_dn_map = {}
for d in si_items:
if d.sales_order:
invoice_so_dn_map.setdefault(d.parent, frappe._dict()).setdefault(
"sales_order", []).append(d.sales_order)
delivery_note_list = None
if d.delivery_note:
delivery_note_list = [d.delivery_note]
elif d.sales_order:
delivery_note_list = frappe.db.sql_list("""select distinct parent from `tabDelivery Note Item`
where docstatus=1 and so_detail=%s""", d.so_detail)
if delivery_note_list:
invoice_so_dn_map.setdefault(d.parent, frappe._dict()).setdefault("delivery_note", delivery_note_list)
return invoice_so_dn_map
def get_customer_deatils(invoice_list):
customer_map = {}
customers = list(set([inv.customer for inv in invoice_list]))
for cust in frappe.db.sql("""select name, territory, customer_group from `tabCustomer`
where name in (%s)""" % ", ".join(["%s"]*len(customers)), tuple(customers), as_dict=1):
customer_map.setdefault(cust.name, cust)
return customer_map
|
cshallue/models
|
research/object_detection/meta_architectures/faster_rcnn_meta_arch_test.py
|
Python
|
apache-2.0
| 17,423
| 0.004018
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.meta_architectures.faster_rcnn_meta_arch."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from object_detection.meta_architectures import faster_rcnn_meta_arch_test_lib
class FasterRCNNMetaArchTest(
faster_rcnn_meta_arch_test_lib.FasterRCNNMetaArchTestBase,
parameterized.TestCase):
def test_postprocess_second_stage_only_inference_mode_with_masks(self):
model = self._build_model(
is_training=False, number_of_stages=2, second_stage_batch_size=6)
batch_size = 2
total_num_padded_proposals = batch_size * model.max_num_proposals
proposal_boxes = tf.constant(
[[[1, 1, 2, 3],
[0, 0, 1, 1],
[.5, .5, .6, .6],
4*[0], 4*[0], 4*[0], 4*[0], 4*[0]],
[[2, 3, 6, 8],
[1, 2, 5, 3],
4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]], dtype=tf.float32)
num_proposals = tf.constant([3, 2], dtype=tf.int32)
refined_box_encodings = tf.zeros(
[total_num_padded_proposals, model.num_classes, 4], dtype=tf.float32)
class_predictions_with_background = tf.ones(
[total_num_padded_proposals, model.num_classes+1], dtype=tf.float32)
image_shape = tf.constant([batch_size, 36, 48, 3], dtype=tf.int32)
mask_height = 2
mask_width = 2
mask_predictions = 30. * tf.ones(
[total_num_padded_proposals, model.num_classes,
mask_height, mask_width], dtype=tf.float32)
exp_detection_masks = np.array([[[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]]],
[[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[0, 0], [0, 0]]]])
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
detections = model.postprocess({
'refined_box_encodings': refined_box_encodings,
'class_predictions_with_background': class_predictions_with_background,
'num_proposals': num_proposals,
'proposal_boxes': proposal_boxes,
'image_shape': image_shape,
'mask_predictions': mask_predictions
}, true_image_shapes)
with self.test_session() as sess:
detections_out = sess.run(detections)
self.assertAllEqual(detections_out['detection_boxes'].shape, [2, 5, 4])
self.assertAllClose(detections_out['detection_scores'],
[[1, 1, 1, 1, 1], [1, 1, 1, 1, 0]])
self.assertAllClose(detections_out['detection_classes'],
[[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]])
self.assertAllClose(detections_out['num_detections'], [5, 4])
self.assertAllClose(detections_out['detection_masks'],
exp_detection_masks)
self.assertTrue(np.amax(detections_out['detection_masks'] <= 1.0))
self.assertTrue(np.amin(detections_out['detection_masks'] >= 0.0))
def test_postprocess_second_stage_only_inference_mode_with_shared_boxes(self):
model = self._build_model(
is_training=False, number_of_stages=2, second_stage_batch_size=6)
batch_size = 2
total_num_padded_proposals = batch_size * model.max_num_proposals
proposal_boxes = tf.constant(
[[[1, 1, 2, 3],
[0, 0, 1, 1],
[.5, .5, .6, .6],
4*[0], 4*[0], 4*[0], 4*[0], 4*[0]],
[[2, 3, 6, 8],
[1, 2, 5, 3],
4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]], dtype=tf.float32)
num_proposals = tf.constant([3, 2], dtype=tf.int32)
# This has 1 box instead of one for each class.
refined_box_encodings = tf.zeros(
[total_num_padded_proposals, 1, 4], dtype=tf.float32)
class_predictions_with_background = tf.ones(
[total_num_padded_proposals, model.num_classes+1], dtype=tf.float32)
image_shape = tf.constant([batch_size, 36, 48, 3], dtype=tf.int32)
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
detections = model.postprocess({
'refined_box_encodings': refined_box_encodings,
'class_predictions_with_background': class_predictions_with_background,
'num_proposals': num_proposals,
'proposal_boxes': proposal_boxes,
'image_shape': image_shape,
}, true_image_shapes)
with self.test_session() as sess:
detections_out = sess.run(detections)
self.assertAllEqual(detections_out['detection_boxes'].shape, [2, 5, 4])
self.assertAllClose(detections_out['detection_scores'],
[[1, 1, 1, 1, 1], [1, 1, 1, 1, 0]])
self.assertAllClose(detections_out['detection_classes'],
[[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]])
self.assertAllClose(detections_out['num_detections'], [5, 4])
@parameterized.parameters(
{'masks_are_class_agnostic': False},
{'masks_are_class_agnostic': True},
)
def test_predict_correct_shapes_in_inference_mode_three_stages_with_masks(
self, masks_are_class_agnostic):
batch_size = 2
image_size = 10
max_num_proposals = 8
initial_crop_size = 3
maxpool_stride = 1
input_shapes = [(batch_size, image_size, image_size, 3),
(None, image_s
|
ize, image_size, 3),
(batch_size, None, None, 3),
(None, None, None, 3)]
expected_num_anchors = image_size * image_size * 3 * 3
expected_shapes = {
'rpn_box_predictor_features':
(2, image_size, image_size, 512),
'rpn_features_to_crop': (2, image_size, image_size, 3),
'image_shape': (4,),
'rpn_box_encodings': (2, expected_num_anchors, 4),
|
'rpn_objectness_predictions_with_background':
(2, expected_num_anchors, 2),
'anchors': (expected_num_anchors, 4),
'refined_box_encodings': (2 * max_num_proposals, 2, 4),
'class_predictions_with_background': (2 * max_num_proposals, 2 + 1),
'num_proposals': (2,),
'proposal_boxes': (2, max_num_proposals, 4),
'proposal_boxes_normalized': (2, max_num_proposals, 4),
'box_classifier_features':
self._get_box_classifier_features_shape(image_size,
batch_size,
max_num_proposals,
initial_crop_size,
maxpool_stride,
3)
}
for input_shape in input_shapes:
test_graph = tf.Graph()
with test_graph.as_default():
model = self._build_model(
is_training=False,
number_of_stages=3,
second_stage_batch_size=2,
predict_masks=True,
masks_are_class_agnostic=masks_are_class_agnostic)
preprocessed_inputs = tf.placeholder(tf.float32, shape=input_shape)
_, true_image_shapes = model.preprocess(preprocessed_inputs)
result_tensor_dict = model.predict(preprocessed_inputs,
true_image_shapes)
init_op = tf.global_variables_initializer()
with self.test_session(graph=test_graph) as sess:
sess.run(init_op)
tensor_dict_out = sess.run(resul
|
ygol/odoo
|
addons/sale_stock/wizard/sale_order_cancel.py
|
Python
|
agpl-3.0
| 553
| 0.003617
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class SaleOrderCancel(models.TransientModel):
_inherit = 'sale.order.cancel'
display_delivery_alert = fields.Boolean('Delivery Alert', compute='_compute_display_delivery_alert')
@api.
|
depends('order_id')
def _compute_display_delivery_alert(self):
for wizard in self:
wizard.display_delivery_alert = bool(any(picking.state == 'done' for picking in wizard.order_id.picking_ids)
|
)
|
DeepThoughtTeam/tensorflow
|
tensorflow/python/ops/constant_op.py
|
Python
|
apache-2.0
| 7,338
| 0.004361
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache L
|
icense, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the
|
License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Constant Value Tensors
TensorFlow provides several operations that you can use to generate constants.
@@zeros
@@zeros_like
@@ones
@@ones_like
@@fill
@@constant
## Sequences
@@linspace
@@range
## Random Tensors
TensorFlow has several ops that create random tensors with different
distributions. The random ops are stateful, and create new random values each
time they are evaluated.
The `seed` keyword argument in these functions acts in conjunction with
the graph-level random seed. Changing either the graph-level seed using
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed) or the
op-level seed will change the underlying seed of these operations. Setting
neither graph-level nor op-level seed, results in a random seed for all
operations.
See [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for details on the interaction between operation-level and graph-level random
seeds.
### Examples:
```python
# Create a tensor of shape [2, 3] consisting of random normal values, with mean
# -1 and standard deviation 4.
norm = tf.random_normal([2, 3], mean=-1, stddev=4)
# Shuffle the first dimension of a tensor
c = tf.constant([[1, 2], [3, 4], [5, 6]])
shuff = tf.random_shuffle(c)
# Each time we run these ops, different results are generated
sess = tf.Session()
print(sess.run(norm))
print(sess.run(norm))
# Set an op-level seed to generate repeatable sequences across sessions.
c = tf.constant([[1, 2], [3, 4], [5, 6]])
sess = tf.Session()
norm = tf.random_normal(c, seed=1234)
print(sess.run(norm))
print(sess.run(norm))
```
Another common use of random values is the initialization of variables. Also see
the [Variables How To](../../how_tos/variables/index.md).
```python
# Use random uniform values in [0, 1) as the initializer for a variable of shape
# [2, 3]. The default type is float32.
var = tf.Variable(tf.random_uniform([2, 3]), name="var")
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
print(sess.run(var))
```
@@random_normal
@@truncated_normal
@@random_uniform
@@random_shuffle
@@set_random_seed
"""
# Must be separate from array_ops to avoid a cyclic dependency.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
def constant(value, dtype=None, shape=None, name="Const"):
"""Creates a constant tensor.
The resulting tensor is populated with values of type `dtype`, as
specified by arguments `value` and (optionally) `shape` (see examples
below).
The argument `value` can be a constant value, or a list of values of type
`dtype`. If `value` is a list, then the length of the list must be less
than or equal to the number of elements implied by the `shape` argument (if
specified). In the case where the list length is less than the number of
elements specified by `shape`, the last element in the list will be used
to fill the remaining entries.
The argument `shape` is optional. If present, it specifies the dimensions
of the resulting tensor. If not present, then the tensor is a scalar (0-D)
if `value` is a scalar, or 1-D otherwise.
If the argument `dtype` is not specified, then the type is inferred from
the type of `value`.
For example:
```python
# Constant 1-D Tensor populated with value list.
tensor = tf.constant([1, 2, 3, 4, 5, 6, 7]) => [1 2 3 4 5 6 7]
# Constant 2-D tensor populated with scalar value -1.
tensor = tf.constant(-1.0, shape=[2, 3]) => [[-1. -1. -1.]
[-1. -1. -1.]]
```
Args:
value: A constant value (or list) of output type `dtype`.
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
Returns:
A Constant Tensor.
"""
g = ops.get_default_graph()
tensor_value = attr_value_pb2.AttrValue()
tensor_value.tensor.CopyFrom(
tensor_util.make_tensor_proto(value, dtype=dtype, shape=shape))
dtype_value = attr_value_pb2.AttrValue(type=tensor_value.tensor.dtype)
const_tensor = g.create_op(
"Const", [], [dtype_value.type],
attrs={"value": tensor_value, "dtype": dtype_value}, name=name).outputs[0]
return const_tensor
@ops.RegisterShape("Const")
def _ConstantShape(op):
return [tensor_shape.TensorShape(
[d.size for d in op.get_attr("value").tensor_shape.dim])]
def _constant_tensor_conversion_function(v, dtype=None, name=None,
as_ref=False):
_ = as_ref
return constant(v, dtype=dtype, name=name)
ops.register_tensor_conversion_function(
(list, tuple), _constant_tensor_conversion_function, 100)
ops.register_tensor_conversion_function(
np.ndarray, _constant_tensor_conversion_function, 100)
ops.register_tensor_conversion_function(
np.generic, _constant_tensor_conversion_function, 100)
ops.register_tensor_conversion_function(
object, _constant_tensor_conversion_function, 200)
def _tensor_shape_tensor_conversion_function(s, dtype=None, name=None,
as_ref=False):
_ = as_ref
if not s.is_fully_defined():
raise ValueError(
"Cannot convert a partially known TensorShape to a Tensor: %s" % s)
if dtype is not None:
if dtype not in (dtypes.int32, dtypes.int64):
raise TypeError("Cannot convert a TensorShape to dtype: %s" % dtype)
else:
dtype = dtypes.int32
if name is None:
name = "shape_as_tensor"
return constant(s.as_list(), dtype=dtype, name=name)
ops.register_tensor_conversion_function(
tensor_shape.TensorShape, _tensor_shape_tensor_conversion_function, 100)
def _dimension_tensor_conversion_function(d, dtype=None, name=None,
as_ref=False):
_ = as_ref
if d.value is None:
raise ValueError("Cannot convert an unknown Dimension to a Tensor: %s" % d)
if dtype is not None:
if dtype not in (dtypes.int32, dtypes.int64):
raise TypeError("Cannot convert a TensorShape to dtype: %s" % dtype)
else:
dtype = dtypes.int32
if name is None:
name = "shape_as_tensor"
return constant(d.value, dtype=dtype, name=name)
ops.register_tensor_conversion_function(
tensor_shape.Dimension, _dimension_tensor_conversion_function, 100)
|
keflavich/pyspeckit-obsolete
|
pyspeckit/spectrum/readers/txt_reader.py
|
Python
|
mit
| 5,045
| 0.010109
|
"""
==========================
PySpecKit ASCII Reader
==========================
Routines for reading in ASCII format spectra. If atpy is not installed,
will use a very simple routine for reading in the data.
.. moduleauthor:: Adam Ginsburg <adam.g.ginsburg@gmail.com>
.. moduleauthor:: Jordan Mirocha <mirochaj@gmail.com>
"""
try:
import atpy
atpyOK = True
except ImportError:
atpyOK = False
from .. import units
import numpy as np
from pyspeckit.specwarnings import warn
import readcol
def open_1d_txt(filename, xaxcol=0, datacol=1, errorcol=2,
text_reader='simple', atpytype='ascii', **kwargs):
"""
Attempt to read a 1D spectrum from a text file assuming wavelength as the
first column, data as the second, and (optionally) error as the third.
Reading can be done either with atpy or a 'simple' reader. If you have an
IPAC, CDS, or formally formatted table, you'll want to use atpy.
If you have a simply formatted file of the form, e.g.
# name name
# unit unit
data data
data data
kwargs are passed to atpy.Table
"""
if text_reader in ('simple','readcol') or not atpyOK:
if not atpyOK:
warn("WARNING: atpy not installed; will use simple reader instead.")
|
if text_reader == 'simple':
data, error, XAxis, T = simple_txt(filename, xaxcol = xaxcol,
datacol = datacol, errorcol = errorcol, **kwargs)
elif text
|
_reader == 'readcol':
Tlist = readcol.readcol(filename, twod=False, **kwargs)
XAxis = units.SpectroscopicAxis(Tlist[xaxcol])
data = Tlist[datacol]
error = Tlist[errorcol]
T = dummy_class()
Tdict = readcol.readcol(filename, asDict=True, **kwargs)
T.data = dummy_class()
T.data.dtype = dummy_class()
T.data.dtype.names = hdr
T.columns = {}
T.columns[T.data.dtype.names[xaxcol]] = dummy_class()
T.columns[T.data.dtype.names[xaxcol]].unit = colunits[xaxcol]
T.columns[T.data.dtype.names[datacol]] = dummy_class()
T.columns[T.data.dtype.names[datacol]].unit = colunits[datacol]
elif text_reader in ('atpy','asciitable'):
T = atpy.Table(filename, type=atpytype, masked=True, **kwargs)
xarr = T.data[T.data.dtype.names[xaxcol]]
data = T.data[T.data.dtype.names[datacol]]
if len(T.columns) > errorcol:
error = T.data[T.data.dtype.names[errorcol]]
else:
# assume uniform, zero error
error = data*0
if 'xunits' in T.keywords:
xunits = T.keywords['xunits']
else:
xunits = 'unknown'
XAxis = units.SpectroscopicAxis(xarr,xunits)
# Need this in Spectrum class to correctly parse header
T.xaxcol = xaxcol
T.datacol = datacol
return data, error, XAxis, T
def simple_txt(filename, xaxcol=0, datacol=1, errorcol=2, skiplines=0, **kwargs):
"""
Very simple method for reading columns from ASCII file.
"""
f = open(filename, 'r')
hdr = None
colunits = []
coldata = []
for ii, line in enumerate(f):
# Ignore blank lines
if not line.strip():
continue
# Possibly read in header
if line.split()[0][0] == '#':
if (ii) == (0+skiplines):
hdr = line[1:].split()
if (ii) == (1+skiplines):
colunits = line[1:].split()
continue
if ii < skiplines:
continue
coldata.append(line.split())
for j, element in enumerate(coldata[-1]):
try:
coldata[-1][j] = float(element)
except ValueError:
coldata[-1][j] = str(element)
f.close()
coldata = zip(*coldata)
if not colunits:
colunits = ['unknown'] * len(coldata)
if not hdr:
hdr = ['unknown'] * len(coldata)
N = len(hdr)
# Prepare to return data
data = coldata[datacol]
xarr = coldata[xaxcol]
if errorcol > len(coldata) - 1:
error = np.array(data)*0
else:
error = coldata[errorcol]
if len(error) != len(data):
raise ValueError("Data and Error lengths do not match.")
XAxis = units.SpectroscopicAxis(xarr, colunits[xaxcol])
# Create atPy style Table instance
T = dummy_class()
T.data = dummy_class()
T.data.dtype = dummy_class()
T.data.dtype.names = hdr
T.columns = {}
T.columns[T.data.dtype.names[xaxcol]] = dummy_class()
T.columns[T.data.dtype.names[xaxcol]].unit = colunits[xaxcol]
T.columns[T.data.dtype.names[datacol]] = dummy_class()
T.columns[T.data.dtype.names[datacol]].unit = colunits[datacol]
return np.array(data), np.array(error), XAxis, T
class dummy_class:
def __init__(self):
pass
|
kallimachos/archive
|
andpygame/android_example.py
|
Python
|
gpl-3.0
| 1,612
| 0.001861
|
import pygame
# Import the android module. If we can't import it, set it to None - this
# lets us test it, and che
|
ck to see if we want android-specific behavior.
try:
import android
except ImportError:
android = None
# Event constant.
TIMEREVENT = pygame.USEREVENT
# The FPS the game runs at.
FPS = 30
# Color constants.
RED = (255, 0, 0, 255)
GREEN = (0, 255, 0, 255)
def main():
pygame.init()
# Set the screen size.
screen = pygame.display.set_mode((480, 800))
|
# Map the back button to the escape key.
if android:
android.init()
android.map_key(android.KEYCODE_BACK, pygame.K_ESCAPE)
# Use a timer to control FPS.
pygame.time.set_timer(TIMEREVENT, 1000 / FPS)
# The color of the screen.
color = RED
while True:
ev = pygame.event.wait()
# Android-specific:
if android:
if android.check_pause():
android.wait_for_resume()
# Draw the screen based on the timer.
if ev.type == TIMEREVENT:
screen.fill(color)
pygame.display.flip()
# When the touchscreen is pressed, change the color to green.
elif ev.type == pygame.MOUSEBUTTONDOWN:
color = GREEN
# When it's released, change the color to RED.
elif ev.type == pygame.MOUSEBUTTONUP:
color = RED
# When the user hits back, ESCAPE is sent. Handle it and end
# the game.
elif ev.type == pygame.KEYDOWN and ev.key == pygame.K_ESCAPE:
break
# This isn't run on Android.
if __name__ == "__main__":
main()
|
jeremiah-c-leary/vhdl-style-guide
|
vsg/tests/iteration_scheme/test_rule_300.py
|
Python
|
gpl-3.0
| 1,279
| 0.003909
|
import os
import unittest
from vsg.rules import iteration_scheme
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_300_test_input.vhd'))
dIndentMap = utils.read_indent_file()
lExpected = []
lExpected.append('')
utils.read_file(os.path.join(sTestDir, 'rule_300_test_input.fixed.vhd'), lExpected)
class test_iteration_scheme_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
self.oFile.set_indent_map(dIndentMap)
def test_rule_300(self):
oRule = iteration_scheme.rule_300()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'iteration_scheme')
self.
|
assertEqual(oRule.identifier, '300')
lExpected = [13, 17]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_300(self):
oRule = iteration_scheme.rule_300()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations,
|
[])
|
anselmobd/fo2
|
src/logistica/forms.py
|
Python
|
mit
| 5,361
| 0.000187
|
from datetime import datetime, timedelta
from pprint import pprint
from django import forms
from utils.functions import shift_years
from .models import (
NfEntrada,
PosicaoCarga,
)
class NotafiscalChaveForm(forms.Form):
chave = forms.CharField(
widget=forms.TextInput())
class NotafiscalRelForm(forms.Form):
def data_ini():
return (datetime.now().replace(day=1)-timedelta(days=1)).replace(day=1)
data_de = forms.DateField(
label='Data do Faturamento: De',
initial=data_ini,
widget=forms.DateInput(attrs={'type': 'date',
'autofocus': 'autofocus'}))
data_ate = forms.DateField(
label='Até', required=False,
widget=forms.DateInput(attrs={'type': 'date'}))
uf = forms.CharField(
label='UF', max_length
|
=2, min_length=2, required=False,
widget=forms.TextInput(attrs={'size': 2}))
nf = forms.CharField(
label='Número da NF', required=False,
widget=forms.TextInput(attrs={'type': 'number'}))
transportadora = forms.CharField(
label='Transportadora', required=False,
help_text='Sigla da transportadora.',
widget=forms.
|
TextInput())
cliente = forms.CharField(
label='Cliente', required=False,
help_text='Parte do nome ou início do CNPJ.',
widget=forms.TextInput())
pedido = forms.CharField(
label='Pedido Tussor', required=False,
widget=forms.TextInput(attrs={'type': 'number'}))
ped_cliente = forms.CharField(
label='Pedido de cliente', required=False,
widget=forms.TextInput(attrs={'type': 'string'}))
CHOICES = [('N', 'Não filtra'),
('C', 'Com data de saída informada'),
('S', 'Sem data de saída')]
data_saida = forms.ChoiceField(
label='Quanto a data de saída', choices=CHOICES, initial='S')
CHOICES = [('T', 'Todos (Sim ou Não)'),
('S', 'Sim'),
('N', 'Não')]
entregue = forms.ChoiceField(
choices=CHOICES, initial='T')
CHOICES = [('N', 'Número da nota fiscal (decrescente)'),
('P', 'Número do pedido (crescente)'),
('A', 'Atraso (maior primeiro)')]
ordem = forms.ChoiceField(
label='Ordem de apresentação', choices=CHOICES, initial='A')
CHOICES = [('V', 'Apenas NF de venda e ativas (não canceladas)'),
('T', 'Totas as notas fiscais')]
listadas = forms.ChoiceField(
label='Notas listadas', choices=CHOICES, initial='V')
posicao = forms.ModelChoiceField(
label='Posição', required=False,
queryset=PosicaoCarga.objects.all().order_by('id'),
empty_label='--Todas--')
CHOICES = [('-', 'Todas'),
('a', 'Atacado'),
('v', 'Varejo'),
('o', 'Outras')]
tipo = forms.ChoiceField(
choices=CHOICES, initial='-')
por_pagina = forms.IntegerField(
label='NF por página', required=True, initial=100,
widget=forms.TextInput(attrs={'type': 'number'}))
page = forms.IntegerField(
required=False, widget=forms.HiddenInput())
def clean_uf(self):
uf = self.cleaned_data['uf'].upper()
data = self.data.copy()
data['uf'] = uf
self.data = data
return uf
def clean_data_de(self):
data_de = self.cleaned_data['data_de']
if data_de:
if data_de.year < 100:
data_de = shift_years(2000, data_de)
return data_de
class NfPosicaoForm(forms.Form):
data = forms.DateField(
label='Data de movimento da carga',
help_text='Só pode ficar vazia de posição form "Entregue ao apoio".',
initial=datetime.now(), required=False,
widget=forms.DateInput(attrs={'type': 'date',
'autofocus': 'autofocus'}))
posicao = forms.ModelChoiceField(
label='Posição', required=False,
queryset=PosicaoCarga.objects.all().order_by('id'),
initial=2, empty_label='--Todas--')
class EntradaNfForm(forms.ModelForm):
cadastro = forms.CharField(
label='CNPJ',
widget=forms.TextInput(
attrs={'size': 20, 'autofocus': 'autofocus'}))
emissor = forms.CharField(
widget=forms.TextInput(attrs={'size': 80}))
descricao = forms.CharField(
widget=forms.TextInput(attrs={'size': 80}))
transportadora = forms.CharField(
widget=forms.TextInput(attrs={'size': 60}))
motorista = forms.CharField(
widget=forms.TextInput(attrs={'size': 60}))
class EntradaNfSemXmlForm(EntradaNfForm):
class Meta:
model = NfEntrada
fields = [
'cadastro', 'numero', 'emissor', 'descricao', 'volumes',
'chegada', 'transportadora', 'motorista', 'placa',
'responsavel'
]
class ListaForm(forms.Form):
numero = forms.CharField(
label='Número da NF', required=False,
widget=forms.TextInput(attrs={
'type': 'number',
'size': 8,
'autofocus': 'autofocus',
}))
data = forms.DateField(
label='Data de chegada', required=False,
widget=forms.DateInput(attrs={'type': 'date'}))
pagina = forms.IntegerField(
required=False, widget=forms.HiddenInput())
|
hansroh/aquests
|
aquests/protocols/dns/pydns/__init__.py
|
Python
|
mit
| 2,174
| 0.00276
|
# -*- encoding: utf-8 -*-
# $Id: __init__.py,v 1.8.2.10 2012/02/03 23:04:01 customdesigned Exp $
#
# This file is part of the pydns project.
# Homepage: http://pydns.sourceforge.net
#
# This code is covered by the standard Python License. See LICENSE for details.
#
# __init__.py for DNS class.
__version__ = '2.3.6'
from . import Type,Opcode,Status,Class
from .Base import DnsRequest,
|
DNSError
from .Lib import DnsResult
from .Base import *
from .Lib import *
Error=DNSError
from .lazy import *
Request = DnsRequest
Result = DnsResult
#
# $Log: __init__.py,v $
# Revision 1.8.2.10
|
2012/02/03 23:04:01 customdesigned
# Release 2.3.6
#
# Revision 1.8.2.9 2011/03/16 20:06:39 customdesigned
# Refer to explicit LICENSE file.
#
# Revision 1.8.2.8 2011/03/03 21:57:15 customdesigned
# Release 2.3.5
#
# Revision 1.8.2.7 2009/06/09 18:05:29 customdesigned
# Release 2.3.4
#
# Revision 1.8.2.6 2008/08/01 04:01:25 customdesigned
# Release 2.3.3
#
# Revision 1.8.2.5 2008/07/28 02:11:07 customdesigned
# Bump version.
#
# Revision 1.8.2.4 2008/07/28 00:17:10 customdesigned
# Randomize source ports.
#
# Revision 1.8.2.3 2008/07/24 20:10:55 customdesigned
# Randomize tid in requests, and check in response.
#
# Revision 1.8.2.2 2007/05/22 21:06:52 customdesigned
# utf-8 in __init__.py
#
# Revision 1.8.2.1 2007/05/22 20:39:20 customdesigned
# Release 2.3.1
#
# Revision 1.8 2002/05/06 06:17:49 anthonybaxter
# found that the old README file called itself release 2.2. So make
# this one 2.3...
#
# Revision 1.7 2002/05/06 06:16:15 anthonybaxter
# make some sort of reasonable version string. releasewards ho!
#
# Revision 1.6 2002/03/19 13:05:02 anthonybaxter
# converted to class based exceptions (there goes the python1.4 compatibility :)
#
# removed a quite gross use of 'eval()'.
#
# Revision 1.5 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs.
# yay.
#
# Revision 1.4 2001/11/26 17:57:51 stroeder
# Added __version__
#
# Revision 1.3 2001/08/09 09:08:55 anthonybaxter
# added identifying header to top of each file
#
# Revision 1.2 2001/07/19 06:57:07 anthony
# cvs keywords added
#
#
|
tanglei528/nova
|
nova/virt/disk/vfs/guestfs.py
|
Python
|
apache-2.0
| 8,034
| 0
|
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or
|
agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import tpool
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openst
|
ack.common import log as logging
from nova.virt.disk.vfs import api as vfs
LOG = logging.getLogger(__name__)
guestfs = None
class VFSGuestFS(vfs.VFS):
"""This class implements a VFS module that uses the libguestfs APIs
to access the disk image. The disk image is never mapped into
the host filesystem, thus avoiding any potential for symlink
attacks from the guest filesystem.
"""
def __init__(self, imgfile, imgfmt='raw', partition=None):
super(VFSGuestFS, self).__init__(imgfile, imgfmt, partition)
global guestfs
if guestfs is None:
guestfs = __import__('guestfs')
self.handle = None
def setup_os(self):
if self.partition == -1:
self.setup_os_inspect()
else:
self.setup_os_static()
def setup_os_static(self):
LOG.debug(_("Mount guest OS image %(imgfile)s partition %(part)s"),
{'imgfile': self.imgfile, 'part': str(self.partition)})
if self.partition:
self.handle.mount_options("", "/dev/sda%d" % self.partition, "/")
else:
self.handle.mount_options("", "/dev/sda", "/")
def setup_os_inspect(self):
LOG.debug(_("Inspecting guest OS image %s"), self.imgfile)
roots = self.handle.inspect_os()
if len(roots) == 0:
raise exception.NovaException(_("No operating system found in %s")
% self.imgfile)
if len(roots) != 1:
LOG.debug(_("Multi-boot OS %(roots)s") % {'roots': str(roots)})
raise exception.NovaException(
_("Multi-boot operating system found in %s") %
self.imgfile)
self.setup_os_root(roots[0])
def setup_os_root(self, root):
LOG.debug(_("Inspecting guest OS root filesystem %s"), root)
mounts = self.handle.inspect_get_mountpoints(root)
if len(mounts) == 0:
raise exception.NovaException(
_("No mount points found in %(root)s of %(imgfile)s") %
{'root': root, 'imgfile': self.imgfile})
# the root directory must be mounted first
mounts.sort(key=lambda mount: mount[0])
root_mounted = False
for mount in mounts:
LOG.debug(_("Mounting %(dev)s at %(dir)s") %
{'dev': mount[1], 'dir': mount[0]})
try:
self.handle.mount_options("", mount[1], mount[0])
root_mounted = True
except RuntimeError as e:
msg = _("Error mounting %(device)s to %(dir)s in image"
" %(imgfile)s with libguestfs (%(e)s)") % \
{'imgfile': self.imgfile, 'device': mount[1],
'dir': mount[0], 'e': e}
if root_mounted:
LOG.debug(msg)
else:
raise exception.NovaException(msg)
def setup(self):
LOG.debug(_("Setting up appliance for %(imgfile)s %(imgfmt)s") %
{'imgfile': self.imgfile, 'imgfmt': self.imgfmt})
try:
self.handle = tpool.Proxy(guestfs.GuestFS(close_on_exit=False))
except TypeError as e:
if 'close_on_exit' in str(e):
# NOTE(russellb) In case we're not using a version of
# libguestfs new enough to support the close_on_exit parameter,
# which was added in libguestfs 1.20.
self.handle = tpool.Proxy(guestfs.GuestFS())
else:
raise
try:
self.handle.add_drive_opts(self.imgfile, format=self.imgfmt)
self.handle.launch()
self.setup_os()
self.handle.aug_init("/", 0)
except RuntimeError as e:
# explicitly teardown instead of implicit close()
# to prevent orphaned VMs in cases when an implicit
# close() is not enough
self.teardown()
raise exception.NovaException(
_("Error mounting %(imgfile)s with libguestfs (%(e)s)") %
{'imgfile': self.imgfile, 'e': e})
except Exception:
# explicitly teardown instead of implicit close()
# to prevent orphaned VMs in cases when an implicit
# close() is not enough
self.teardown()
raise
def teardown(self):
LOG.debug(_("Tearing down appliance"))
try:
try:
self.handle.aug_close()
except RuntimeError as e:
LOG.warn(_("Failed to close augeas %s"), e)
try:
self.handle.shutdown()
except AttributeError:
# Older libguestfs versions haven't an explicit shutdown
pass
except RuntimeError as e:
LOG.warn(_("Failed to shutdown appliance %s"), e)
try:
self.handle.close()
except AttributeError:
# Older libguestfs versions haven't an explicit close
pass
except RuntimeError as e:
LOG.warn(_("Failed to close guest handle %s"), e)
finally:
# dereference object and implicitly close()
self.handle = None
@staticmethod
def _canonicalize_path(path):
if path[0] != '/':
return '/' + path
return path
def make_path(self, path):
LOG.debug(_("Make directory path=%s"), path)
path = self._canonicalize_path(path)
self.handle.mkdir_p(path)
def append_file(self, path, content):
LOG.debug(_("Append file path=%s"), path)
path = self._canonicalize_path(path)
self.handle.write_append(path, content)
def replace_file(self, path, content):
LOG.debug(_("Replace file path=%s"), path)
path = self._canonicalize_path(path)
self.handle.write(path, content)
def read_file(self, path):
LOG.debug(_("Read file path=%s"), path)
path = self._canonicalize_path(path)
return self.handle.read_file(path)
def has_file(self, path):
LOG.debug(_("Has file path=%s"), path)
path = self._canonicalize_path(path)
try:
self.handle.stat(path)
return True
except RuntimeError:
return False
def set_permissions(self, path, mode):
LOG.debug(_("Set permissions path=%(path)s mode=%(mode)s"),
{'path': path, 'mode': mode})
path = self._canonicalize_path(path)
self.handle.chmod(mode, path)
def set_ownership(self, path, user, group):
LOG.debug(_("Set ownership path=%(path)s "
"user=%(user)s group=%(group)s"),
{'path': path, 'user': user, 'group': group})
path = self._canonicalize_path(path)
uid = -1
gid = -1
if user is not None:
uid = int(self.handle.aug_get(
"/files/etc/passwd/" + user + "/uid"))
if group is not None:
gid = int(self.handle.aug_get(
"/files/etc/group/" + group + "/gid"))
LOG.debug(_("chown uid=%(uid)d gid=%(gid)s"),
{'uid': uid, 'gid': gid})
self.handle.chown(uid, gid, path)
|
LibreTime/libretime
|
shared/tests/logging_test.py
|
Python
|
agpl-3.0
| 1,238
| 0.000808
|
from pathlib import Path
import pytest
from loguru import logger
from libretime_shared.logging import (
DEBUG,
INFO,
create_task_logger,
|
level_from_name,
setup_logger,
)
@pytest.mark.parametrize(
"name,level_name,level_no",
[
("error", "error", 40),
|
("warning", "warning", 30),
("info", "info", 20),
("debug", "debug", 10),
("trace", "trace", 5),
],
)
def test_level_from_name(name, level_name, level_no):
level = level_from_name(name)
assert level.name == level_name
assert level.no == level_no
def test_level_from_name_invalid():
with pytest.raises(ValueError):
level_from_name("invalid")
def test_setup_logger(tmp_path: Path):
log_filepath = tmp_path / "test.log"
extra_log_filepath = tmp_path / "extra.log"
setup_logger(INFO, log_filepath)
extra_logger = create_task_logger(DEBUG, extra_log_filepath, True)
logger.info("test info")
extra_logger.info("extra info")
logger.debug("test debug")
extra_logger.complete()
logger.complete()
assert len(log_filepath.read_text(encoding="utf-8").splitlines()) == 1
assert len(extra_log_filepath.read_text(encoding="utf-8").splitlines()) == 1
|
Nat1405/newer-nifty
|
setup.py
|
Python
|
mit
| 1,483
| 0.003372
|
# Based on STScI'
|
s JWST calibration pipeline.
from __future__ import print_function
import os
import subprocess
import sys
from setuptools import setup, find_packages, Extensio
|
n, Command
from glob import glob
# Open the README as the package long description
readme = open('README.rst', 'r')
README_TEXT = readme.read()
readme.close()
NAME = 'Nifty4NIFS'
SCRIPTS = glob('scripts/*')
PACKAGE_DATA = {
'': ['*.dat', '*.cfg', '*.fits', '*.txt']
}
setup(
name=NAME,
version="1.0b5",
author='ncomeau',
author_email='ncomeau@gemini.edu',
description='The Gemini NIFS data reduction pipeline.',
long_description = README_TEXT,
url='http://www.gemini.edu',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: MacOS X',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Physics',
],
keywords='Gemini NIFS nifs pipeline reduction data IRAF iraf PYRAF pyraf astronomy integral field spectroscopy ifs ifu',
python_requires='~=2.7',
scripts=SCRIPTS, # TODO(nat): Update this to use entry_points instead of scripts for better cross-platform performance
packages=find_packages(),
package_data=PACKAGE_DATA
)
|
bezhermoso/home
|
lib/ansible/runner/action_plugins/assemble.py
|
Python
|
gpl-3.0
| 4,340
| 0.002995
|
# (c) 2013, Michael DeHaan <michael.dehaan@gmail.com>
# Stephen Fromm <sfromm@gmail.com>
# Brian Coca <briancoca+dev@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
import os
import os.path
import pipes
import shutil
import tempfile
from ansible import utils
from ansible.runner.return_data import ReturnData
class ActionModule(object):
TRANSFERS_FILES = True
def __init__(self, runner):
self.runner = runner
def _assemble_from_fragments(self, src_path, delimiter=None):
''' assemble a file from a directory of fragments '''
tmpfd, temp_path = tempfile.mkstemp()
tmp = os.fdopen(tmpfd,'w')
delimit_me = False
for f in sorted(os.listdir(src_path)):
fragment = "%s/%s" % (src_path, f)
if delimit_me and delimiter:
tmp.write(delimiter)
if os.path.isfile(fragment):
tmp.write(file(fragment).read())
delimit_me = True
tmp.close()
return temp_path
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
# load up options
options = {}
if complex_args:
options.update(complex_args)
options.update(utils.parse_kv(module_args))
src = options.get('src', None)
dest = options.get('dest', None)
delimiter = options.get('delimiter', None)
remote_src = options.get('remote_src', True)
if src is None or dest is None:
result = dict(failed=True, msg="src and dest are required")
return ReturnData(conn=conn, comm_ok=False, result=result)
if remote_src:
return self.runner._execute_module(conn, tmp, 'assemble', module_args, inject=inject, complex_args=complex_args)
# Does all work assembling the file
path = self._assemble_from_fragments(src, delimiter)
pathmd5 = utils.md5s(path)
remote_md5 = self.runner._remote_md5(conn, tmp, dest)
if pathmd5 != remote_md5:
resultant = file(path).read()
if self.runner.diff:
dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, inject=inject, persist_files=True)
if 'content' in dest_result.result:
dest_contents = dest_result.result['content']
if dest_result.result['encoding'] == 'base64':
dest_contents = base64.b64decode(dest_contents)
else:
raise Exception("unknown encoding, failed: %s" % dest_result.result)
xfered = self.runner._transfer_str(conn, tmp, 'src', resultant)
# fix file permiss
|
ions when the copy is done as a different user
if self.runner.sudo and self.runner.sudo_user != 'root':
self.runner._low_level_exec_command(conn, "chmod a+r %s" % xfered, tmp)
# run the copy module
module_args = "%s src=%s dest=%s original_basename=%s" % (module_args, pipes.quote(xfered), pipes.quote(dest), pipes.quote(os.path.basename(s
|
rc)))
if self.runner.noop_on_check(inject):
return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=src, after=resultant))
else:
res = self.runner._execute_module(conn, tmp, 'copy', module_args, inject=inject)
res.diff = dict(after=resultant)
return res
else:
module_args = "%s src=%s dest=%s original_basename=%s" % (module_args, pipes.quote(xfered), pipes.quote(dest), pipes.quote(os.path.basename(src)))
return self.runner._execute_module(conn, tmp, 'file', module_args, inject=inject)
|
ahmedshabib/evergreen-gainsight-hack
|
sentiment Analyser/samr/data.py
|
Python
|
mit
| 93
| 0
|
from c
|
ollections import namedtuple
Datapoint = namedtuple("Datapoint", "phrase sentime
|
nt")
|
j16sdiz/hangups
|
hangups/test/test_channel.py
|
Python
|
mit
| 2,184
| 0.000936
|
import pytest
from hangups import channel
@pytest.mark.parametrize('input_,expected', [
(b'79\n[[0,["c","98803CAAD92268E8","",8]\n]\n,[1,[{"gsid":"7tCoFHumSL-IT6BHpCaxLA"}]]\n]\n',
('98803CAAD92268E8', '7tCoFHumSL-IT6BHpCaxLA')
),
])
def test_parse_sid_response(input_, expected):
assert channel._parse_sid_response(input_) == expected
@pytest.mark.parametrize('input_,expected', [
# '€' is 3 bytes in UTF-8.
('€€'.encode()[:6], '€€'),
('€€'.encode()[:5], '€'),
('€€'.encode()[:4], '€'),
('€€'.encode()[:3], '€'),
('€€'.encode()[:2], ''),
('€€'.encode()[:1], ''),
('€€'.encode()[:0], ''),
])
def test_best_effort_decode(input_, expected):
assert channel._best_effort_decode(input_) == expected
def test_simple():
p = channel.PushDataParser()
assert list(p.get_submissions('10\n01234567893\nabc'.encode())) == [
'0123456789',
'abc',
]
def test_truncated_message():
p = channel.PushDataParser()
assert list(p.get_submissions('12\n012345678'.encode())) == []
def test_truncated_length():
p = channel.PushDataParser()
assert list(p.get_submissions('13'.encode())) == []
def test_malformed_length():
p = channe
|
l.PushDataParser()
# TODO: could detect errors like these with some extra work
assert list(p.get_submissions('11\n0123456789\n5e\n"abc"'.encode())) == [
'01
|
23456789\n'
]
def test_incremental():
p = channel.PushDataParser()
assert list(p.get_submissions(''.encode())) == []
assert list(p.get_submissions('5'.encode())) == []
assert list(p.get_submissions('\n'.encode())) == []
assert list(p.get_submissions('abc'.encode())) == []
assert list(p.get_submissions('de'.encode())) == ['abcde']
assert list(p.get_submissions(''.encode())) == []
def test_unicode():
p = channel.PushDataParser()
# smile is actually 2 code units
assert list(p.get_submissions('3\na😀'.encode())) == ['a😀']
def test_split_characters():
p = channel.PushDataParser()
assert list(p.get_submissions(b'1\n\xe2\x82')) == []
assert list(p.get_submissions(b'\xac')) == ['€']
|
drpngx/tensorflow
|
tensorflow/contrib/data/python/ops/unique.py
|
Python
|
apache-2.0
| 2,748
| 0.005459
|
# Copyright 2017 The TensorFlow Auth
|
ors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is d
|
istributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unique element dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.data.python.ops import contrib_op_loader # pylint: disable=unused-import
from tensorflow.contrib.data.python.ops import gen_dataset_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
def unique():
"""Creates a `Dataset` from another `Dataset`, discarding duplicates.
Use this transformation to produce a dataset that contains one instance of
each unique element in the input. For example:
```python
dataset = tf.data.Dataset.from_tensor_slices([1, 37, 2, 37, 2, 1])
# Using `unique()` will drop the duplicate elements.
dataset = dataset.apply(tf.contrib.data.unique()) # ==> { 1, 37, 2 }
```
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
"""
def _apply_fn(dataset):
return _UniqueDataset(dataset)
return _apply_fn
class _UniqueDataset(dataset_ops.Dataset):
"""A `Dataset` contains the unique elements from its input."""
def __init__(self, input_dataset):
"""See `unique()` for details."""
super(_UniqueDataset, self).__init__()
self._input_dataset = input_dataset
if input_dataset.output_types not in (dtypes.int32, dtypes.int64,
dtypes.string):
raise TypeError(
"`tf.contrib.data.unique()` only supports inputs with a single "
"`tf.int32`, `tf.int64`, or `tf.string` component.")
def _as_variant_tensor(self):
return gen_dataset_ops.unique_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
**dataset_ops.flat_structure(self))
@property
def output_classes(self):
return self._input_dataset.output_classes
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_types(self):
return self._input_dataset.output_types
|
Thuruv/pilgrim
|
blood/forms.py
|
Python
|
mit
| 537
| 0.007449
|
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout,Submit
from .models import Details, F
|
eedback
from crispy_forms.bootstrap import TabHolder, Tab
from crispy_forms.bootstrap import AppendedText, PrependedText, FormActions
class AddmeForm(forms.ModelForm):
class Meta:
model = Details
exclude = ['']
"""Forms for the ``feedback_form`` app."""
class FeedbackForm(fo
|
rms.ModelForm):
class Meta:
model = Feedback
fields = ('email', 'message')
|
mrknow/filmkodi
|
script.mrknow.urlresolver/lib/urlresolver9/lib/net.py
|
Python
|
apache-2.0
| 12,168
| 0.002959
|
'''
common XBMC Module
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import random
import cookielib
import gzip
import re
import StringIO
import urllib
import urllib2
import socket
import time
import kodi
# Set Global timeout - Useful for slow connections and Putlocker.
socket.setdefaulttimeout(10)
BR_VERS = [
['%s.0' % i for i in xrange(18, 50)],
['37.0.2062.103', '37.0.2062.120', '37.0.2062.124', '38.0.2125.101', '38.0.2125.104', '38.0.2125.111', '39.0.2171.71', '39.0.2171.95', '39.0.2171.99', '40.0.2214.93', '40.0.2214.111',
'40.0.2214.115', '42.0.2311.90', '42.0.2311.135', '42.0.2311.152', '43.0.2357.81', '43.0.2357.124', '44.0.2403.155', '44.0.2403.157', '45.0.2454.101', '45.0.2454.85', '46.0.2490.71',
'46.0.2490.80', '46.0.2490.86', '47.0.2526.73', '47.0.2526.80', '48.0.2564.116', '49.0.2623.112', '50.0.2661.86'],
['11.0'],
['8.0', '9.0', '10.0', '10.6']]
WIN_VERS = ['Windows NT 10.0', 'Windows NT 7.0', 'Windows NT 6.3', 'Windows NT 6.2', 'Windows NT 6.1', 'Windows NT 6.0', 'Windows NT 5.1', 'Windows NT 5.0']
FEATURES = ['; WOW64', '; Win64; IA64', '; Win64; x64', '']
RAND_UAS = ['Mozilla/5.0 ({win_ver}{feature}; rv:{br_ver}) Gecko/20100101 Firefox/{br_ver}',
'Mozilla/5.0 ({win_ver}{feature}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{br_ver} Safari/537.36',
'Mozilla/5.0 ({win_ver}{feature}; Trident/7.0; rv:{br_ver}) like Gecko',
'Mozilla/5.0 (compatible; MSIE {br_ver}; {win_ver}{feature}; Trident/6.0)']
def get_ua():
try: last_gen = int(kodi.get_setting('last_ua_create'))
except: last_gen = 0
if not kodi.get_setting('current_ua') or last_gen < (time.time() - (7 * 24 * 60 * 60)):
index = random.randrange(len(RAND_UAS))
versions = {'win_ver': random.choice(WIN_VERS), 'feature': random.choice(FEATURES), 'br_ver': random.choice(BR_VERS[index])}
user_agent = RAND_UAS[index].format(**versions)
# log_utils.log('Creating New User Agent: %s' % (user_a
|
gent), log_utils.LOGDEBUG)
kodi.set_setting('current_ua', user_agent)
kodi.set_setting('last_ua_create', str(int(time.time())))
else:
user_agent = kodi.get_setting('current_ua')
return user_agent
class Net:
'''
This class wraps :mod:`urllib2` and provides an easy wa
|
y to make http
requests while taking care of cookies, proxies, gzip compression and
character encoding.
Example::
from addon.common.net import Net
net = Net()
response = net.http_GET('http://xbmc.org')
print response.content
'''
_cj = cookielib.LWPCookieJar()
_proxy = None
_user_agent = 'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
_http_debug = False
def __init__(self, cookie_file='', proxy='', user_agent='', http_debug=False):
'''
Kwargs:
cookie_file (str): Full path to a file to be used to load and save
cookies to.
proxy (str): Proxy setting (eg.
``'http://user:pass@example.com:1234'``)
user_agent (str): String to use as the User Agent header. If not
supplied the class will use a default user agent (chrome)
http_debug (bool): Set ``True`` to have HTTP header info written to
the XBMC log for all requests.
'''
if cookie_file:
self.set_cookies(cookie_file)
if proxy:
self.set_proxy(proxy)
if user_agent:
self.set_user_agent(user_agent)
self._http_debug = http_debug
self._update_opener()
def set_cookies(self, cookie_file):
'''
Set the cookie file and try to load cookies from it if it exists.
Args:
cookie_file (str): Full path to a file to be used to load and save
cookies to.
'''
try:
self._cj.load(cookie_file, ignore_discard=True)
self._update_opener()
return True
except:
return False
def get_cookies(self):
'''Returns A dictionary containing all cookie information by domain.'''
return self._cj._cookies
def save_cookies(self, cookie_file):
'''
Saves cookies to a file.
Args:
cookie_file (str): Full path to a file to save cookies to.
'''
self._cj.save(cookie_file, ignore_discard=True)
def set_proxy(self, proxy):
'''
Args:
proxy (str): Proxy setting (eg.
``'http://user:pass@example.com:1234'``)
'''
self._proxy = proxy
self._update_opener()
def get_proxy(self):
'''Returns string containing proxy details.'''
return self._proxy
def set_user_agent(self, user_agent):
'''
Args:
user_agent (str): String to use as the User Agent header.
'''
self._user_agent = user_agent
def get_user_agent(self):
'''Returns user agent string.'''
return self._user_agent
def _update_opener(self):
'''
Builds and installs a new opener to be used by all future calls to
:func:`urllib2.urlopen`.
'''
if self._http_debug:
http = urllib2.HTTPHandler(debuglevel=1)
else:
http = urllib2.HTTPHandler()
if self._proxy:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
urllib2.ProxyHandler({'http':
self._proxy}),
urllib2.HTTPBasicAuthHandler(),
http)
else:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
urllib2.HTTPBasicAuthHandler(),
http)
urllib2.install_opener(opener)
def http_GET(self, url, headers={}, compression=True):
'''
Perform an HTTP GET request.
Args:
url (str): The URL to GET.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
return self._fetch(url, headers=headers, compression=compression)
def http_POST(self, url, form_data, headers={}, compression=True):
'''
Perform an HTTP POST request.
Args:
url (str): The URL to POST.
form_data (dict): A dictionary of form data to POST.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
return self._fetch(url, form_data, headers=headers, compression=compression)
def http_HEAD(self, url, headers={}):
'''
Perform an HTTP HEAD request.
Args:
url (str): The URL to GET.
|
mlperf/training_results_v0.7
|
Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/vta/python/vta/testing/simulator.py
|
Python
|
apache-2.0
| 2,565
| 0.00039
|
# Licensed to the Apache Software Foundation (ASF) under one
# or
|
more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
|
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utilities to start simulator."""
import ctypes
import json
import tvm
from ..environment import get_env
from ..libinfo import find_libvta
def _load_sw():
"""Load software library, assuming they are simulator."""
lib_sw = find_libvta("libvta", optional=True)
if not lib_sw:
return []
try:
return [ctypes.CDLL(lib_sw[0], ctypes.RTLD_GLOBAL)]
except OSError:
return []
def _load_all():
"""Load hardware library for tsim."""
lib = _load_sw()
env = get_env()
if env.TARGET == "tsim":
lib = find_libvta("libvta_hw", optional=True)
f = tvm.get_global_func("vta.tsim.init")
m = tvm.module.load(lib[0], "vta-tsim")
f(m)
return lib
def enabled():
"""Check if simulator is enabled."""
f = tvm.get_global_func("vta.simulator.profiler_clear", True)
return f is not None
def clear_stats():
"""Clear profiler statistics."""
env = get_env()
if env.TARGET == "sim":
f = tvm.get_global_func("vta.simulator.profiler_clear", True)
else:
f = tvm.get_global_func("vta.tsim.profiler_clear", True)
if f:
f()
def stats():
"""Get profiler statistics
Returns
-------
stats : dict
Current profiler statistics
"""
env = get_env()
if env.TARGET == "sim":
x = tvm.get_global_func("vta.simulator.profiler_status")()
else:
x = tvm.get_global_func("vta.tsim.profiler_status")()
return json.loads(x)
# debug flag to skip execution.
DEBUG_SKIP_EXEC = 1
def debug_mode(flag):
"""Set debug mode
Paramaters
----------
flag : int
The debug flag, 0 means clear all flags.
"""
tvm.get_global_func("vta.simulator.profiler_debug_mode")(flag)
LIBS = _load_all()
|
vespian/inventory_tool
|
inventory_tool/object/ippool.py
|
Python
|
apache-2.0
| 8,065
| 0.00062
|
#!/usr/bin/env python3
# Copyright (c) 2014 Pawel Rozlach, Brainly.com sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import logging
from inventory_tool.exception import MalformedInputException, GenericException
# For Python3 < 3.3, ipaddress module is available as an extra module,
# under a different name:
try:
from ipaddress import ip_address
from ipaddress import ip_network
from ipaddress import IPv4Address
from ipaddress import IPv6Address
ipaddress_name_network = "network_address"
ipaddress_name_broadcast = "broadcast_address"
except ImportError:
from ipaddr import IPAddress as ip_address
from ipaddr import IPNetwork as ip_network
from ipaddr import IPv4Address
from ipaddr import IPv6Address
ipaddress_name_network = "network"
ipaddress_name_broadcast = "broadcast"
class IPPool:
"""IP pool representation and manipulation
This class takes care of managing ip pools available, and this includes:
- assigning and releasing IPs, both manually and automatically
- booking and canceling IPs for special use
- serialization of IP pools for storage in YAML documents
- human readable representation of ip pools
"""
__slots__ = ['_network', '_allocated', '_reserved']
def __init__(self, network, allocated=[], reserved=[]):
"""Init IPPool
Args:
network: network from which ip addresses should be allocated
allocated: list of ip addres strings that are already allocated
reserved: list of ip address strings that should not be available
for allocation.
Raises:
ValueError: ip address or network is invalid or malformed.
"""
self._network = ip_network(network)
self._allocated = [ip_address(x) for x in allocated]
self._reserved = [ip_address(x) for x in reserved]
def get_hash(self):
"""Extract data from object in a way suitable for serializing
Returns:
Method returns data necessary for re-initializing the same object in
a form suitable for serialization using YAML/JSON. Normally, this
object contains other objects which can not be easily serialized or
are not very readable after serializing.
"""
tmp = {"network": str(self._network),
"allocated": sorted([str(x) for x in self._allocated]),
"reserved": sorted([str(x) for x in self._reserved]),
}
return tmp
def allocate(self, ip=None):
"""Allocate an IP from the pool.
Method allocates next free adress from the pool if ip is None, or
marks given ip as already allocated
Args:
ip: either None or ipaddress.ip_address object
Returns:
An ip that has been allocated. In case when "ip" argument is not
none, then the object pointed by it is returned.
Raises:
MalformedInputException - user provided data is invalid
GenericException - pool has run out of free ip adresses
"""
if ip is not None:
if ip not in self._network:
msg = "Attempt to allocate IP from outside of the pool: "
msg += "{0} is not in {1}.".format(ip, self._network)
raise MalformedInputException(msg)
if ip in self._allocated:
msg = "Attempt to allocate already allocated IP: " + str(ip)
raise MalformedInputException(msg)
elif ip in self._reserved:
msg = "Attempt to allocate from reserved pool: " + str(ip)
raise MalformedInputException(msg)
else:
self._allocated.append(ip)
return ip
else:
for candidate in [x for x in self._network
if x != self._network.__getattribute__(ipaddress_name_broadcast) and
x != self._network.__getattribute__(ipaddress_name_network)]:
if candidate not in self._allocated and \
candidate not in self._reserved:
logging.info(
"IP {0} has been auto-assigned.".format(candidate))
self._allocated.append(candidate)
return candidate
msg = "The pool has run out of free ip addresses."
raise GenericException(msg)
def release(self, ip):
"""Mark given IP as free, available for allocation.
Args:
ip: ip to deallocate
Raises:
MalformedInputException: provided ip has not been alocated yet.
"""
if ip in self._allocated:
self._allocated.remove(
|
ip_address(ip))
else:
msg = "An attempt to release an ip {0} ".format(ip)
msg += "which has not been allocated yet."
raise MalformedInputException(msg)
def release_all(self):
"""Mark all ip addresses in the pool as available"""
self._allocated = []
def overlaps(self, other):
"""Check if IP pools overlap
Args:
other: ip pool to check for overlap with this pool
|
"""
return self._network.overlaps(other._network)
def book(self, ip):
"""Prevent IP from being allocated.
Marks given IP as reserved/unavailable for allocation.
Args:
ip: ip to book.
Raises:
MalformedInputException: ip does not belong to this pool
"""
if ip not in self._network:
msg = "IP {0} does not belong to network {1}".format(ip, self._network)
raise MalformedInputException(msg)
elif ip in self._reserved:
msg = "IP {0} has already been booked".format(ip)
raise MalformedInputException(msg)
else:
self._reserved.append(ip)
def cancel(self, ip):
"""Remove reservation of an IP address
Marks given IP as available for allocation.
Args:
ip: ip to release
Raises:
MalformedInputException: ip has not been reserved yet.
"""
if ip in self._reserved:
self._reserved.remove(ip)
else:
msg = "IP {0} has not been reserved yet".format(ip)
raise MalformedInputException(msg)
def __contains__(self, other):
"""Check if ip belongs to the pool.
Args:
other: ip, either as a string or an ipaddress.ip_address object
to check the membership for.
"""
if isinstance(other, str):
tmp = ip_address(other)
return tmp in self._network
elif isinstance(other, IPv4Address) or \
isinstance(other, IPv6Address):
return other in self._network
else:
msg = "Could not determine membership of the object {0}".format(other)
raise MalformedInputException(msg)
def __str__(self):
"""Present object in human-readable form"""
msg = "Network: {0}\n".format(self._network)
msg += "Allocated:\n"
if self._allocated:
for tmp in self._allocated:
msg += "\t- {0}\n".format(tmp)
else:
msg += "\t<None>\n"
msg += "Reserved:\n"
if self._reserved:
for tmp in self._reserved:
msg += "\t- {0}\n".format(tmp)
else:
msg += "\t<None>\n"
return msg
|
christi3k/zulip
|
zerver/tests/test_realm.py
|
Python
|
apache-2.0
| 10,099
| 0.000594
|
from __future__ import absolute_import
from __future__ import print_function
import ujson
from django.http import HttpResponse
from mock import patch
from typing import Any, Dict, List, Text, Union
from zerver.lib.actions import (
do_change_is_admin,
do_set_realm_property,
do_deactivate_realm,
)
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import tornado_redirected_to_list
from zerver.models import get_realm, get_user_profile_by_email, Realm
class RealmTest(ZulipTestCase):
def assert_user_profile_cache_gets_new_name(self, email, new_realm_name):
# type: (Text, Text) -> None
user_profile = get_user_profile_by_email(email)
self.assertEqual(user_profile.realm.name, new_realm_name)
def test_do_set_realm_name_caching(self):
# type: () -> None
"""The main complicated thing about setting realm names is fighting the
cache, and we start by populating the cache for Hamlet, and we end
by checking the cache to ensure that the new value is there."""
self.example_user('hamlet')
realm = get_realm('zulip')
new_name = u'Zed You Elle Eye Pea'
do_set_realm_property(realm, 'name', new_name)
self.assertEqual(get_realm(realm.string_id).name, new_name)
self.assert_user_profile_cache_gets_new_name('hamlet@zulip.com', new_name)
def test_update_realm_name_events(self):
# type: () -> None
realm = get_realm('zulip')
new_name = u'Puliz'
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
do_set_realm_property(realm, 'name', new_name)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='name',
value=new_name,
))
def test_update_realm_description_events(self):
# type: () -> None
realm = get_realm('zulip')
new_description = u'zulip dev group'
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
do_set_realm_property(realm, 'description', new_description)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='description',
value=new_description,
))
def test_update_realm_description(self):
# type: () -> None
email = 'iago@zulip.com'
self.login(email)
realm = get_realm('zulip')
new_description = u'zulip dev group'
data = dict(description=ujson.dumps(new_description))
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.clien
|
t_patch('/json/realm', data)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.description, new_description)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='d
|
escription',
value=new_description,
))
def test_realm_description_length(self):
# type: () -> None
new_description = u'A' * 1001
data = dict(description=ujson.dumps(new_description))
# create an admin user
email = 'iago@zulip.com'
self.login(email)
result = self.client_patch('/json/realm', data)
self.assert_json_error(result, 'Realm description is too long.')
realm = get_realm('zulip')
self.assertNotEqual(realm.description, new_description)
def test_admin_restrictions_for_changing_realm_name(self):
# type: () -> None
new_name = 'Mice will play while the cat is away'
user_profile = self.example_user('othello')
email = user_profile.email
self.login(email)
do_change_is_admin(user_profile, False)
req = dict(name=ujson.dumps(new_name))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Must be a realm administrator')
def test_unauthorized_name_change(self):
# type: () -> None
data = {'full_name': 'Sir Hamlet'}
user_profile = self.example_user('hamlet')
email = user_profile.email
self.login(email)
do_set_realm_property(user_profile.realm, 'name_changes_disabled', True)
url = '/json/settings/change'
result = self.client_post(url, data)
self.assertEqual(result.status_code, 200)
# Since the setting fails silently, no message is returned
self.assert_in_response("", result)
def test_do_deactivate_realm_clears_user_realm_cache(self):
# type: () -> None
"""The main complicated thing about deactivating realm names is
updating the cache, and we start by populating the cache for
Hamlet, and we end by checking the cache to ensure that his
realm appears to be deactivated. You can make this test fail
by disabling cache.flush_realm()."""
self.example_user('hamlet')
realm = get_realm('zulip')
do_deactivate_realm(realm)
user = self.example_user('hamlet')
self.assertTrue(user.realm.deactivated)
def test_do_deactivate_realm_on_deactived_realm(self):
# type: () -> None
"""Ensure early exit is working in realm deactivation"""
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
def test_change_realm_default_language(self):
# type: () -> None
new_lang = "de"
realm = get_realm('zulip')
self.assertNotEqual(realm.default_language, new_lang)
# we need an admin user.
email = 'iago@zulip.com'
self.login(email)
req = dict(default_language=ujson.dumps(new_lang))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.default_language, new_lang)
# Test to make sure that when invalid languages are passed
# as the default realm language, correct validation error is
# raised and the invalid language is not saved in db
invalid_lang = "invalid_lang"
req = dict(default_language=ujson.dumps(invalid_lang))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Invalid language '%s'" % (invalid_lang,))
realm = get_realm('zulip')
self.assertNotEqual(realm.default_language, invalid_lang)
class RealmAPITest(ZulipTestCase):
def setUp(self):
# type: () -> None
user_profile = self.example_user('cordelia')
email = user_profile.email
self.login(email)
do_change_is_admin(user_profile, True)
def set_up_db(self, attr, value):
# type: (str, Any) -> None
realm = get_realm('zulip')
setattr(realm, attr, value)
realm.save()
def update_with_api(self, name, value):
# type: (str, Union[Text, int, bool]) -> Realm
result = self.client_patch('/json/realm', {name: ujson.dumps(value)})
self.assert_json_success(result)
return get_realm('zulip') # refresh data
def do_test_realm_update_api(self, name):
# type: (str) -> None
"""Test updating realm properties.
If new realm properties have been added to the Realm model but the
test_values dict below has not been updated, this will raise an
assertion error.
"""
bool_tests = [False, True] # type: List[bool]
test_values = dict(
add_emoji_by_admins_only=bool_tests,
create_stream_by_admins_only=bool_tests,
default_language=[u'de', u'en'],
description=[u'Realm description', u'New description'],
email_changes_disabled=bool_tests,
invite_required=bool_tes
|
makinacorpus/django
|
django/utils/datastructures.py
|
Python
|
bsd-3-clause
| 14,882
| 0.001344
|
import copy
from django.utils import six
class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
self.dicts = dicts
def __bool__(self):
return any(self.dicts)
def __nonzero__(self):
return type(self).__bool__(self)
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError(key)
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
# This is used by MergeDicts of MultiValueDicts.
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_:
return dict_.getlist(key)
return []
def _iteritems(self):
seen = set()
for dict_ in self.dicts:
for item in six.iteritems(dict_):
k = item[0]
if k in seen:
continue
seen.add(k)
yield item
def _iterkeys(self):
for k, v in self._iteritems():
yield k
def _itervalues(self):
for k, v in self._iteritems():
yield v
if six.PY3:
items = _iteritems
keys = _iterkeys
values = _itervalues
else:
iteritems = _iteritems
iterkeys = _iterkeys
itervalues = _itervalues
def items(self):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
__iter__ = _iterkeys
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
def __str__(self):
'''
Returns something like
"{'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}"
instead of the generic "<object meta-data>" inherited from object.
'''
return str(dict(self.items()))
def __repr__(self):
'''
Returns something like
MergeDict({'key1': 'val1', 'key2': 'val2'}, {'key3': 'val3'})
instead of generic "<object meta-data>" inherited from object.
'''
dictreprs = ', '.join(repr(d) for d in self.dicts)
return '%s(%s)' % (self.__class__.__name__, dictreprs)
class SortedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
"""
def __new__(cls, *args, **kwargs):
instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None or isinstance(data, dict):
data = data or []
super(SortedDict, self).__init__(data)
self.keyOrder = list(data) if data else []
else:
super(SortedDict, self).__init__()
super_set = super(SortedDict, self).__setitem__
for key, value in data:
# Take the ordering from first key
if key not in self:
self.keyOrder.append(key)
# But override with last value in data (dict() does this)
super_set(key, value)
def __deepcopy__(self, memo):
return self.__class__([(key, copy.deepcopy(value, memo))
for key, value in self.items()])
def __copy__(self):
# The Python's default copy implementation will alter the state
# of self. The reason for this seems complex but is likely related to
# subclassing dict.
return self.copy()
def __setitem__(self, key, value):
if key not in self:
self.keyOrder.append(key)
super(SortedDict, self).__setitem__(key, value)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
return iter(self.keyOrder)
def __reversed__(self):
return reversed(self.keyOrder)
def pop(self, k, *args):
result = super(SortedDict, sel
|
f).pop(k, *args)
try:
self.keyOrder.remove(k)
exce
|
pt ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def _iteritems(self):
for key in self.keyOrder:
yield key, self[key]
def _iterkeys(self):
for key in self.keyOrder:
yield key
def _itervalues(self):
for key in self.keyOrder:
yield self[key]
if six.PY3:
items = _iteritems
keys = _iterkeys
values = _itervalues
else:
iteritems = _iteritems
iterkeys = _iterkeys
itervalues = _itervalues
def items(self):
return [(k, self[k]) for k in self.keyOrder]
def keys(self):
return self.keyOrder[:]
def values(self):
return [self[k] for k in self.keyOrder]
def update(self, dict_):
for k, v in six.iteritems(dict_):
self[k] = v
def setdefault(self, key, default):
if key not in self:
self.keyOrder.append(key)
return super(SortedDict, self).setdefault(key, default)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
return self.__class__(self)
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in six.iteritems(self)])
def clear(self):
super(SortedDict, self).clear()
self.keyOrder = []
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.getlist('doesnotexist')
[]
>>> d.getlist('doesnotexist', ['Adrian', 'Simon'])
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError("Key %r not found in %r" % (key, self))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def
|
ekopylova/tcga-1
|
python_scripts/cgc_create_tcga_workflow_task.py
|
Python
|
bsd-3-clause
| 20,620
| 0.000533
|
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2016--, Evguenia Kopylova, Jad Kanbar, SevenBridges dev team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""
Create tasks for tcga-workflow-fasta-input-full-kraken-test workflow.
"""
from __future__ import print_function
import logging, yaml
import click
import sevenbridges as sb
from sevenbridges.errors import SbgError
from os.path import join, splitext, basename
from collections import OrderedDict
import re
def load_config(yaml_fp):
"""Load CGC API configuration file.
Parameters
----------
yaml_fp: str
Filepath to CGC API configuration file
Return
------
logger: logger instance
Log
"""
try:
fp = open(yaml_fp)
config = yaml.load(fp)
except:
raise SbgError('%s file missing!' % yaml_fp)
logger = logging.getLogger('log')
log_handler = logging.FileHandler(config['log_file'])
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
logger.setLevel(logging.DEBUG)
return logger, config
def create_task_workflow_cgc(local_mapping_fp,
all_files,
task_name,
api,
config,
logger):
"""Create CGC task for tcga_fasta_input_disease_type_workflow workflow.
Parameters
----------
local_mapping_fp: str
Filepath to master QIIME mapping file
all_files: list
TCGA file IDs
task_name: str
CGC task name
api: SevenBridges API instance
Api
config: dict
YAML configuration file
logger: logger instance
Log
Returns
-------
all_files: list
TCGA file IDs
total_size_gb: float
Total size of all TCGA files
"""
project = config['project']
# Upload local mapping file to project
try:
api.files.upload(project=project, path=local_mapping_fp)
# File already exists
except SbgError as e:
logger.error(
"Could not upload file, trying to query for it", exc_info=e)
pass
# Retrieve File object for mapping file
local_mapping_file = list(
api.files.query(
project=project, names=[basename(local_mapping_fp)]).all())
if len(local_mapping_file) > 1:
raise ValueError(
'List contains >1 files: %s' % len(local_mapping_file))
# Retrieve File objects for all bacterial and viral database files.
# We're not calling files directly by their ID because this can change,
# whereas file names are expected to stay the same.
input_index_files = list(api.files.query(
project=project,
names=['bacterial_database.idx',
'bacterial_nodes.dmp',
'bacterial_names.dmp',
'bacterial_database.kdb',
'viral_database.idx',
'viral_names.dmp',
'viral_nodes.dmp',
'viral_database.kdb']).all())
inputs = {}
inputs['qiime_mapping_file'] = local_mapping_file[0]
inputs['fasta_file_input'] = all_files
for _file in input_index_files:
name = _file.name
if name == 'bacterial_database.idx':
inputs['bacterial_database_idx'] = _file
elif name == 'bacterial_nodes.dmp':
inputs['bacterial_nodes_dmp'] = _file
elif name == 'bacterial_names.dmp':
inputs['bacterial_names_dmp'] = _file
elif name == 'bacterial_database.kdb':
inputs['bacterial_database_kdb'] = _file
elif name == 'viral_database.idx':
inputs['viral_database_idx'] = _file
elif name == 'viral_names.dmp':
inputs['viral_names_dmp'] = _file
elif name == 'viral_nodes.dmp':
inputs['viral_nodes_dmp'] = _file
elif name == 'viral_database.kdb':
inputs['viral_database_kdb'] = _file
else:
raise ValueError(
"File %s not assigned to any input argument." % name)
task_name = "workflow_%s" % task_name
my_project = api.projects.get(id = config['project'])
try:
api.tasks.create(name=task_name,
project=my_project.id,
app=config['app-workflow'],
inputs=inputs,
description=task_name)
except SbgError as e:
logger.error("Draft task was not created!", exc_info=e)
raise SbgError("Draft task was not created!")
# Initialize files array and total size
all_files = []
total_size_gb = 0.0
return all_files, total_size_gb
def generate_mapping_file(mapping_fp,
all_files,
config,
total_tasks_created,
output_dp,
sampleID_count,
logger,
fasta_files_workflow):
"""Create mini mapping file based on defined sample IDs.
Parameters
----------
mapping_fp: str
Filepath to master QIIME mapping file
all_files: list
List of CGC file IDs for which to generate mini-mapping file
config: dict
YAML configuration file
total_tasks_created: int
Number of task
output_dp: str
Output directory path
sampleID_count: int
Begin naming sample IDs from this integer
logger: logger instance
Log
fasta_files_workflow: list
FASTA file names
Returns
-------
output_fp: str
Filepath to mini-mapping file
sampleID_count: int
Updated sampleID count start
all_files: list
List of updated CGC file IDs (duplicates removed)
"""
disease_type = config['disease'].split()
filename = "%s_cgc_qiime_mapping_file_%s.txt" % (
'_'.join(disease_type), total_tasks_created)
output_fp = join(output_dp, filename)
all_files_names = [file.name for file in all_files]
all_files_names_added = []
with open(output_fp, 'w') as output_f:
with open(mapping_fp) as mapping_f:
for line in mapping_f:
if line.startswith('#SampleID'):
output_f.write(line)
else:
line = line.strip().split('\t')
# FASTA file name
filename = line[4]
if filename in all_files_names:
# update sampleID count
output_f.write('s%s\t' % sampleID_count)
sampleID_count += 1
output_f.write('\t'.join(line[1:]))
output_f.write('\n')
all_files_names_added.append(filename)
files_not_added = set(all_files_names) - set(all_files_names_added)
all_files_updated = list(a
|
ll_files)
# At least one FASTA file analyzed not found in mapping file
if len(files_not_added) > 0:
|
logger.error(
'Following files missing in mapping file:\n')
# Check missing files are duplicates of those that have been added
files_accounted_for = 0
for _file in files_not_added:
# Remove prefix _*_ which signifies duplicate
regex = re.compile('_._')
prefix = _file[0:3]
if re.match(regex, prefix):
original_file_name = _file[3:]
if original_file_name not in fasta_files_workflow:
logger.error('\t%s, [status] missing file')
else:
files_accounted_for += 1
logger.info('\t%s, [status] duplicate' % _file)
# File does not have prefix _*_ which signifies it is not a
# duplicate
else:
logg
|
ifwe/wxpy
|
src/tests/wxPythonTests/testGauge.py
|
Python
|
mit
| 2,410
| 0.005809
|
"""Unit tests for wx.Gauge.
Methods yet to test:
__init__, Create, Pulse"""
import unittest
import wx
import wxtest
import testControl
class GaugeTest(testControl.ControlTest):
def setUp(self):
self.app = wx.PySimpleApp()
self.frame = wx.Frame(parent=None)
self.testControl = wx.Gauge(parent=self.frame)
# C++ docs state:
# This method is not implemented (returns 0) for most platforms.
def testBezelFace(self):
"""SetBezelFace, GetBezelFace"""
if wxtest.PlatformIsMac() or wxtest.PlatformIsGtk() or \
wxtest.PlatformIsWindows():
for i in range(self.testControl.GetRange()):
self.testControl.SetBezelFace(i)
self.assertEquals(0, self.testControl.GetBezelFace())
else:
# this can't happen.
# TODO: what platforms does it work on?
raise EnvironmentError("Current platform not represented in wxtest")
def testIsVertical(self):
"""IsVertical"""
vert = wx.Gauge(self.frame, style=wx.GA_VERTICAL)
horiz = wx.Gauge(self.frame, style=wx.GA_HORIZONTAL)
self.assert_(not self.testControl.IsVertical()) # default
self.assert_(vert.IsVertical())
self.assert_(not horiz.IsVertical())
def testRange(self):
"""SetRange, GetRange"""
for i in range(0,1000,10):
self.testControl.SetRange(i)
self.assertEquals(i, self.testControl.GetRange())
|
# C++ docs state:
# This method is not implemented (returns 0) for most platforms.
def testShadowWidth(self):
"""SetShadowWidth, GetShadowWidth"
|
""
if wxtest.PlatformIsMac() or wxtest.PlatformIsGtk() or \
wxtest.PlatformIsWindows():
for i in range(self.testControl.GetRange()):
self.testControl.SetShadowWidth(i)
self.assertEquals(0, self.testControl.GetShadowWidth())
else:
# this can't happen.
# TODO: what platforms does it work on?
raise EnvironmentError("Current platform not represented in wxtest")
def testValue(self):
"""SetValue, GetValue"""
for i in range(self.testControl.GetRange()):
self.testControl.SetValue(i)
self.assertEquals(i, self.testControl.GetValue())
if __name__ == '__main__':
unittest.main()
|
flaviovdf/tag_assess
|
src/scripts/PrecisionRecall.py
|
Python
|
bsd-3-clause
| 4,909
| 0.010593
|
#!/usr/bin/env python
# -*- encoding: utf-8
from __future__ import division, print_function
from tagassess.dao.helpers import FilteredUserItemAnnotations
from tagassess.dao.pytables.annotations import AnnotReader
from tagassess.index_creator import create_occurrence_index
from tagassess.probability_estimates.precomputed import PrecomputedEstimator
import os
import plac
import sys
def get_baselines(annot_filter, reader, user_to_tags):
annotations = annot_filter.annotations(reader.iterate())
user_to_item = create_occurrence_index(annotations, 'user', 'item')
annotations = annot_filter.annotations(reader.iterate())
item_to_tags = create_occurrence_index(annotations, 'item', 'tag')
overlap = {}
for user in user_to_tags:
for item in user_to_item:
for tag in item_to_tags[item]:
if (user, tag) not in overlap:
overlap[user, tag] = 0
if tag not in user_to_tags[user]:
overlap[user, tag] += 1
idf = {}
annotations = annot_filter.annotations(reader.iterate())
for annot in annotations:
tag = annot['tag']
if tag not in idf:
idf[tag] = 0
idf[tag] += 1
for tag in idf.keys():
idf[tag] = 1.0 / idf[tag]
return idf, overlap
def run_exp(user_validation_tags, user_test_tags, user_test_items, est,
annot_filter, reader):
user_to_tags = {}
for user in est.get_valid_users():
#Remove validation tags. The script focuses on test tags
tags_to_compute = []
tags = est.tags_for_user(user)
for tag in tags:
if tag not in user_validation_tags[user]:
tags_to_compute.append(tag)
user_to_tags[user] = tags_to_compute
annotations = annot_filter.annotations(reader.iterate())
tag_to_items = create_occurrence_index(annotations, 'tag', 'item')
# item_to_tags = create_occurrence_index(annotations, 'item', 'tag')
print('#user', 'tag', 'precision', 'recall', 'hidden')
for user in est.get_valid_users():
tags = user_to_tags[user]
for tag in tags:
hidden = tag in user_test_tags[user]
relevant = user_test_items[user]
retrieved = tag_to_items[tag]
intersect = retrieved.intersection(relevant)
precision = len(intersect) / len(retrieved)
recall = len(intersect) / len(relevant)
# tags_for_relevant = set()
# for item in relevant:
# tags_for_relevant.update(item_to_tags[item])
|
print(user, tag, precision, recall, hidden)
def load_dict_from_file(fpath):
'''Loads dictionary from file'''
return_val = {}
with open(fpath) as in_file:
for line in in_file:
spl = line.split('-')
key = int(spl[0].strip())
value = set(int(x.strip()) for x in spl[1].split())
return_val[key] = value
return return_val
def load_train_test_validation
|
(cross_val_folder):
'''Loads cross validation dictionaries used for the experiment'''
filter_fpath = os.path.join(cross_val_folder, 'user_item_filter.dat')
user_items_to_filter = load_dict_from_file(filter_fpath)
val_tags_fpath = os.path.join(cross_val_folder, 'user_val_tags.dat')
user_validation_tags = load_dict_from_file(val_tags_fpath)
test_tags_fpath = os.path.join(cross_val_folder, 'user_test_tags.dat')
user_test_tags = load_dict_from_file(test_tags_fpath)
test_items_fpath = os.path.join(cross_val_folder, 'user_test_items.dat')
user_test_items = load_dict_from_file(test_items_fpath)
return user_items_to_filter, user_validation_tags, user_test_tags, \
user_test_items
@plac.annotations(
db_fpath = plac.Annotation('H5 database file', type=str),
db_name = plac.Annotation('H5 database name', type=str),
cross_val_folder = plac.Annotation('Folder with cross validation files',
type=str),
probs_folder = plac.Annotation('Probabilities Folder', type=str))
def main(db_fpath, db_name, cross_val_folder, probs_folder):
#get cross validation dicts
user_items_to_filter, user_validation_tags, user_test_tags, \
user_test_items = load_train_test_validation(cross_val_folder)
with AnnotReader(db_fpath) as reader:
reader.change_table(db_name)
annot_filter = FilteredUserItemAnnotations(user_items_to_filter)
est = PrecomputedEstimator(probs_folder)
run_exp(user_validation_tags, user_test_tags, user_test_items, est,
annot_filter, reader)
if __name__ == '__main__':
sys.exit(plac.call(main))
|
1flow/1flow
|
oneflow/core/migrations/0101_auto__add_historicalarticle.py
|
Python
|
agpl-3.0
| 62,707
| 0.007926
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'HistoricalArticle'
db.create_table(u'core_historicalarticle', (
(u'baseitem_ptr_id', self.gf('django.db.models.fields.IntegerField')(db_index=True, null=True, blank=True)),
(u'id', self.gf('django.db.models.fields.IntegerField')(db_index=True, blank=True)),
('polymorphic_ctype_id', self.gf('django.db.models.fields.IntegerField')(db_index=True, null=True, blank=True)),
('duplicate_of_id', self.gf('django.db.models.fields.IntegerField')(db_index=True, null=True, blank=True)),
('duplicate_status', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('language_id', self.gf('django.db.models.fields.IntegerField')(db_index=True, null=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=1024)),
('slug', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True, blank=True)),
('user_id', self.gf('django.db.models.fields.IntegerField')(db_index=True, null=True, blank=True)),
('is_restricted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(db_index=True, blank=True)),
('date_updated', self.gf('django.db.models.fields.DateTimeField')(blank=True)),
('date_published', self.gf('django.db.models.fields.DateTimeField')(db_index=True, null=True, blank=True)),
('default_rating', self.gf('django.db.models.fields.FloatField')(default=0.0, blank=True)),
('text_direction', self.gf('django.db.models.fields.CharField')(default=u'ltr', max_length=3, db_index=True)),
('origin', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=512, db_index=True)),
('comments_feed_url', self.gf('django.db.models.fields.URLField')(max_length=512, null=True, blank=True)),
('url_absolute', self.gf('django.db.models.fields.BooleanField')(default=False)),
('url_error', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('is_orphaned', self.gf('django.db.models.fields.BooleanField')(default=False)),
('image_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('excerpt', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('content', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('content_type', self.gf('django.db.models.fields.IntegerField')(db_index=True, null=True, blank=True)),
('content_error', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('word_count', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
(u'history_id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
(u'history_date', self.gf('django.db.models.fields.DateTimeField')()),
(u'history_user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['base.User'], null=True)),
(u'history_type', self.gf('django.db.models.fields.CharField')(max_length=1)),
))
db.send_create_signal(u'core', ['HistoricalArticle'])
def backwards(self, orm):
# Deleting model 'HistoricalArticle'
db.delete_table(u'core_historicalarticle')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name
|
': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions'
|
: ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'base.user': {
'Meta': {'object_name': 'User'},
'address_book': ('json_field.fields.JSONField', [], {'default': '[]', 'blank': 'True'}),
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'avatar_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'email_announcements': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'hash_codes': ('jsonfield.fields.JSONField', [], {'default': "{'unsubscribe': '3a9b390dc65a4381a1adb78af170cdcd'}", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'register_data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'sent_emails': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.article': {
'Meta': {'object_name': 'Article', '_ormbases': ['core.BaseItem']},
u'baseitem_ptr': ('django.db.models.fields.related.OneToOneField'
|
samuelcolvin/pydantic
|
pydantic/color.py
|
Python
|
mit
| 16,607
| 0.001505
|
"""
Color definitions are used as per CSS3 specification:
http://www.w3.org/TR/css3-color/#svg-color
A few colors have multiple names referring to the sames colors, eg. `grey` and `gray` or `aqua` and `cyan`.
In these cases the LAST color when sorted alphabetically takes preferences,
eg. Color((0, 255, 255)).as_named() == 'cyan' because "cyan" comes after "aqua".
"""
import math
import re
from colorsys import hls_to_rgb, rgb_to_hls
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union, cast
from .errors import ColorError
from .utils import Representation, almost_equal_floats
if TYPE_CHECKING:
from .typing import CallableGenerator, ReprArgs
ColorTuple = Union[Tuple[int, int, int], Tuple[int, int, int, float]]
ColorType = Union[ColorTuple, str]
HslColorTuple = Union[Tuple[float, float, float], Tuple[float, float, float, float]]
class RGBA:
"""
Internal use only as a representation of a color.
"""
__slots__ = 'r', 'g', 'b', 'alpha', '_tuple'
def __init__(self, r: float, g: float, b: float, alpha: Optional[float]):
self.r = r
self.g = g
self.b = b
self.alpha = alpha
self._tuple: Tuple[float, float, float, Optional[float]] = (r, g, b, alpha)
def __getitem__(self, item: Any) -> Any:
return self._tuple[item]
# these are not compiled here to avoid import slowdown, they'll be compiled the first time they're used, then cached
r_hex_short = r'\s*(?:#|0x)?([0-9a-f])([0-9a-f])([0-9a-f])([0-9a-f])?\s*'
r_hex_long = r'\s*(?:#|0x)?([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})?\s*'
_r_255 = r'(\d{1,3}(?:\.\d+)?)'
_r_comma = r'\s*,\s*'
r_rgb = fr'\s*rgb\(\s*{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_255}\)\s*'
_r_alpha = r'(\d(?:\.\d+)?|\.\d+|\d{1,2}%)'
r_rgba = fr'\s*rgba\(\s*{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_alpha}\s*\)\s*'
_r_h = r'(-?\d+(?:\.\d+)?|-?\.\d+)(deg|rad|turn)?'
_r_sl = r'(\d{1,3}(?:\.\d+)?)%'
r_hsl = fr'\s*hsl\(\s*{_r_h}{_r_comma}{_r_sl}{_r_comma}{_r_sl}\s*\)\s*'
r_hsla = fr'\s*hsl\(\s*{_r_h}{_r_comma}{_r_sl}{_r_comma}{_r_sl}{_r_comma}{_r_alpha}\s*\)\s*'
# colors where the two hex characters are the same, if all colors match this the short version of hex colors can be used
repeat_colors = {int(c * 2, 16) for c in '0123456789abcdef'}
rads = 2 * math.pi
class Color(Representation):
__slots__ = '_original', '_rgba'
def __init__(self, value: ColorType) -> None:
self._rgba: RGBA
self._original: ColorType
if isinstance(value, (tuple, list)):
self._rgba = parse_tuple(value)
elif isinstance(value, str):
self._rgba = parse_str(value)
elif isinstance(value, Color):
self._rgba = value._rgba
value = value._original
else:
raise ColorError(reason='value must be a tuple, list or string')
# if we've got here value must be a valid color
self._original = value
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='color')
def original(self) -> ColorType:
"""
Original value passed to Color
"""
return self._original
def as_named(self, *, fallback: bool = False) -> str:
if self._rgba.alpha is None:
rgb = cast(Tuple[int, int, int], self.as_rgb_tuple())
try:
return COLORS_BY_VALUE[rgb]
except KeyError as e:
if fallback:
return self.as_hex()
else:
raise ValueError('no named color found, use fallback=True, as_hex() or as_rgb()') from e
else:
return self.as_hex()
def as_hex(self) -> str:
"""
Hex string representing the color can be 3, 4, 6 or 8 characters depending on whether the string
a "short" representation of the color is possible and whether there's an alpha channel.
"""
values = [float_to_255(c) for c in self._rgba[:3]]
if self._rgba.alpha is not None:
values.append(float_to_255(self._rgba.alpha))
as_hex = ''.join(f'{v:02x}' for v in values)
if all(c in repeat_colors for c in values):
as_hex = ''.join(as_hex[c] for c in range(0, len(as_hex), 2))
return '#' + as_hex
def as_rgb(self) -> str:
"""
Color as an rgb(<r>, <g>, <b>) or rgba(<r>, <g>, <b>, <a>) string.
"""
if self._rgba.alpha is None:
return f'rgb({float_to_255(self._rgba.r)}, {float_to_255(self._rgba.g)}, {float_to_255(self._rgba.b)})'
else:
return (
f'rgba({float_to_255(self._rgba.r)}, {float_to_255(self._rgba.g)}, {float_to_255(self._rgba.b)}, '
f'{round(self._alpha_float(), 2)})'
)
def as_rgb_tuple(self, *, alpha: Optional[bool] = None) -> ColorTuple:
"""
Color as an RGB or RGBA tuple; red, green and blue are in the range 0 to 255, alpha if included is
in the range 0 to 1.
:param alpha: whether to include the alpha channel, options are
None - (default) include alpha only if it's set (e.g. not None)
True - always include alpha,
False - always omit alpha,
"""
r, g, b = [float_to_255(c) for c in self._rgba[:3]]
if alpha is None:
if self._rgba.alpha is None:
return r, g, b
else:
return r, g, b, self._alpha_float()
elif alpha:
return r, g, b, self._alpha_float()
else:
# alpha is False
return r, g, b
def as_hsl(self) -> str:
"""
Color as an hsl(<h>, <s>, <l>) or hsl(<h>, <s>, <l>, <a>) string.
"""
if self._rgba.alpha is None:
h, s, li = self.as_hsl_tuple(alpha=False) # type: ignore
|
return f'hsl({h * 360:0.0f}, {s:0.0%}, {li:0.0%})'
else:
h, s, li, a = self.as_hsl_tuple(alpha=True) # type: ignore
return f'hsl({h * 360:0.0f}, {s:0.0%}, {li:0.0%}, {round(a, 2)})'
def as_hsl_tuple(self, *, alpha: Optional[bool] = None) -> HslColorTuple:
"""
Color as an HSL or HSLA tuple, e.g. hue, saturation, lightness and optionally alpha; all elements are in
the ran
|
ge 0 to 1.
NOTE: this is HSL as used in HTML and most other places, not HLS as used in python's colorsys.
:param alpha: whether to include the alpha channel, options are
None - (default) include alpha only if it's set (e.g. not None)
True - always include alpha,
False - always omit alpha,
"""
h, l, s = rgb_to_hls(self._rgba.r, self._rgba.g, self._rgba.b)
if alpha is None:
if self._rgba.alpha is None:
return h, s, l
else:
return h, s, l, self._alpha_float()
if alpha:
return h, s, l, self._alpha_float()
else:
# alpha is False
return h, s, l
def _alpha_float(self) -> float:
return 1 if self._rgba.alpha is None else self._rgba.alpha
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls
def __str__(self) -> str:
return self.as_named(fallback=True)
def __repr_args__(self) -> 'ReprArgs':
return [(None, self.as_named(fallback=True))] + [('rgb', self.as_rgb_tuple())] # type: ignore
def parse_tuple(value: Tuple[Any, ...]) -> RGBA:
"""
Parse a tuple or list as a color.
"""
if len(value) == 3:
r, g, b = [parse_color_value(v) for v in value]
return RGBA(r, g, b, None)
elif len(value) == 4:
r, g, b = [parse_color_value(v) for v in value[:3]]
return RGBA(r, g, b, parse_float_alpha(value[3]))
else:
raise ColorError(reason='tuples must have length 3 or 4')
def parse_str(value: str) -> RGBA:
"""
Parse a string to an RGBA tuple, trying the following formats (in this order):
* named color, see COLORS_BY_NAME below
* hex short eg. `<prefix>fff` (prefix can be `#`, `0x`
|
keyvank/pyglare
|
pyglare/image/frame.py
|
Python
|
mit
| 343
| 0.061224
|
class Frame:
def __init__(self,width,height,color):
self.width = width
self.height = height
self.data = []
for h in range(height):
row = []
for w in range(width):
|
row.append(color)
self.data.append(row)
def clear(self,color):
for h in range(self.height):
for w in range(self.width):
self.data[h][w]
|
= color
|
gem/oq-hazardlib
|
openquake/hazardlib/const.py
|
Python
|
agpl-3.0
| 4,670
| 0.000428
|
# The Hazard Library
# Copyright (C) 2012-2017 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module :mod:`openquake.hazardlib.const` defines various constants.
"""
class ConstantContainer(object):
"""
Class that doesn't support instantiation.
>>> ConstantContainer()
Traceback (most recent call last):
...
AssertionError: do not create objects ConstantContainer, \
use class properties instead
"""
def __init__(self):
raise AssertionError('do not create objects %s, '
'use class properties instead'
% type(self).__name__)
class TRT(ConstantContainer):
"""
Container for constants that define some of the common Tectonic Region
Types.
"""
# Constant values correspond to the NRML schema definition.
ACTIVE_SHALLOW_CRUST = 'Active Shallow Crust'
STABLE_CONTINENTAL = 'Stable Shallow Crust'
SUBDUCTION_INTERFACE = 'Subduction Interface'
SUBDUCTION_INTRASLAB = 'Subduction IntraSlab'
VOLCANIC = 'Volcanic'
GEOTHERMAL = 'Geothermal'
INDUCED = 'Induced'
class IMC(ConstantContainer):
"""
The intensity measure component is the component of interest
of ground shaking for an
:mod:`intensity measure <openquake.hazardlib.imt>`.
"""
#: The horizontal component.
HORIZONTAL = 'Horizontal'
#: The median horizontal component.
MEDIAN_HORIZONTAL = 'Median horizontal'
#: Usually defined as the geometric average of the maximum
#: of the two horizontal components (which may not occur
#: at the same time).
AVERAGE_HORIZONTAL = 'Average horizontal'
#: An orientation-independent alternative to :attr:`AVERAGE_HORIZONTAL`.
#: Defined at Boore et al. (2006, Bull. Seism. Soc. Am. 96, 1502-1511)
#: and is used for all the NGA GMPEs.
GMRotI50 = 'Average Horizontal (GMRotI50)'
#: The geometric mean of the records rotated into the most adverse
#: direction for the structure.
GMRotD100 = "Average Horizontal (GMRotD100)"
#: An orientation-independent alternative to :attr:`AVERAGE_HORIZONTAL`.
#: Defined at Boore et al. (2006, Bull. Seism. Soc. Am. 96, 1502-1511)
#: and is used for all the NGA GMPEs.
RotD50 = 'Average Horizontal (RotD50)'
#:
RotD100 = 'Horizontal Maximum Direction (RotD100)'
#: A randomly chosen horizontal component.
RANDOM_HORIZONTAL = 'Random horizontal'
#: The largest value obtained from two perpendicular horizontal
#: components.
GREATER_OF_TWO_HORIZONTAL = 'Greater of two horizontal'
#: The vertical component.
VERTICAL = 'Vertical'
#: "Vectorial addition: a_V = sqrt(max|a_1(t)|^2 + max|a_2(t)|^2)).
#: This means that the maximum ground amplitudes occur simultaneously on
#: the two horizontal components; this is a conservative assumption."
#: p. 53 of Douglas (2003, Earth-Sci. Rev. 61, 43-104)
VECTORIAL = 'Square root of sum of squares of peak horizontals'
#: "the peak square root of the sum of squares of two orthogonal
#: horizontal components in the time domain"
#: p. 880 of Kanno et al. (2006, Bull. Seism. Soc. Am. 96, 879-897)
PEAK_SRSS_HORIZONTAL = 'Peak square root of sum of squares of horizontals'
class StdDev(ConstantContainer):
"""
GSIM standard deviation represents ground shaking variability at a site.
"""
#: Standard deviation representing ground shaking variability
#: within different events.
INTER_EV
|
ENT = 'Inter event'
#: Standard deviation representing ground shaking variability
#: within a single event.
INTRA_EVENT = 'Intra event'
#: Total standard deviation, defined as the square root of the sum
#: of inter- and intra-event squared standard deviations, represents
#: the total ground shaking variability, and is the only one that
#: is used for calculating a probability of intensity exceedance
#: (see
#: :meth:`openquake.hazardlib.gsim.base.GroundShak
|
ingIntensityModel.get_poes`).
TOTAL = 'Total'
|
toomastahves/math-pg
|
pkmkt2_code/task6.py
|
Python
|
unlicense
| 625
| 0.0096
|
from sympy import symbols, diff, N, Matrix
import numpy as np
from task4 import get_euler_dt
X1, X2, X3 = symbols('X1 X2 X3')
def get_vorticity_tensor(eq1, eq2, eq3):
vkl = get_euler_dt(eq1, eq2, eq3)
wkl = 0.5*(vkl - np.transpose(vkl))
return N(Matrix(wkl), 2)
def get_vorticity_components(eq1, eq2, eq3):
wkl = get_vorticity_tensor(eq1, eq2,
|
eq3) # Tuple, indexes from 0 to 8
w1 = wkl[7] - wkl[5]
w2 = wkl[6] - wkl[2]
w3 = wkl[3] - wkl[1]
return [w1, w2, w3]
#from testdata import eq1, eq2, eq3
#print
|
(get_vorticity_tensor(eq1, eq2, eq3))
#print(get_vorticity_components(eq1, eq2, eq3))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.