repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
browning/shows
|
shows/manage.py
|
Python
|
mit
| 254
| 0
|
#!/usr/bin/env pytho
|
n
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "shows.settings.local")
from django.core.management import ex
|
ecute_from_command_line
execute_from_command_line(sys.argv)
|
Peter-Lavigne/Project-Euler
|
p081.py
|
Python
|
mit
| 699
| 0
|
# https://projecteuler.net/problem=81
from projecteuler.FileReader import file_to_2D_array_of_ints
# this problem uses a similar solution to problem 18, "Maximum Path Sum 1."
# this problem us
|
es a diamond instead of a pyramid
matrix = file_to_2D_array_of_ints("p081.txt", ",")
y_max = len(matrix) - 1
x_max = len(matrix[0]) - 1
for y in range(y_max, -1, -1):
for x in range(x_max, -1, -1):
if y == y_max and x == x_max:
continue
elif y == y_max:
matrix[y][x] += matrix[y][x + 1]
elif x == x_max:
matrix[
|
y][x] += matrix[y + 1][x]
else:
matrix[y][x] += min(matrix[y][x + 1], matrix[y + 1][x])
print(matrix[0][0])
|
shubham0d/SmartClass
|
SmartClass/userhome/migrations/0003_auto_20161015_0037.py
|
Python
|
gpl-3.0
| 584
| 0
|
# -*- coding: utf-8 -*-
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('userhome', '0002_auto_20161014_2208'),
]
operations = [
migrations.AddField(
model_name='notice',
name='Path',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='notice',
|
name='Topic',
field=models.CharField(default=b'New Update', max_length=100),
),
]
|
blakehawkins/SheetSync
|
tests/test_backup.py
|
Python
|
mit
| 1,701
| 0.014109
|
# -*- coding: utf-8 -*-
"""
Test the "backup" function, which saves sheet data to file.
"""
import sheetsync
import time, os
CLIENT_ID = os.environ['SHEETSYNC_CLIENT_ID']
CLIENT_SECRET = os.environ['SHEETSYNC_CLIENT_SECRET']
TESTS_FOLDER_KEY = os.environ.get("SHEETSYNC_FOLDER_KEY")
SHEET_TO_BE_BACKED_UP = "1-HpLBDvGS5V8pIXR9GSqseeciWyy41I1uyLhzyAjPq4"
def test_backup():
print ('setup_function: Retrieve OAuth2.0 credentials.')
creds = sheetsync.ia_credentials_helper(CLIENT_ID, CLIENT_SECRET,
credentials_cache_file='credentials.json',
cache_key='default')
print ('Open a spreadsheet, back it up to named file.')
target = sheetsync.Sheet(credentials=creds,
document_key = SHEET_TO_BE_BACKED_UP,
worksheet_name = 'Simpsons',
key_column_headers = ['Character'],
header_row_ix=1)
backup_name = 'backup test: %s' % int(time.time())
backup_key = target.backup(backup_name, folder_name="sheetsync backups")
backup_sheet = sheetsync.Sheet(credentials=creds,
|
document_key = backup_key,
worksheet_name = 'Simpsons',
key_column_headers = ['Character'],
header_row_ix=1)
backup_data = backup_sheet.data()
assert "Bart Simpson" in backup_data
assert backup_data["Bart Simpson"]["Voice actor"] == "Nancy Cartwright"
print ('teardown_function Delete test spreadsheet')
ba
|
ckup_sheet.drive_service.files().delete(fileId=backup_sheet.document_key).execute()
|
shakamunyi/neutron-vrrp
|
neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py
|
Python
|
apache-2.0
| 71,665
| 0.000251
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import sys
import mock
import netaddr
from oslo.config import cfg
import testtools
from neutron.agent.linux import async_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.agent.linux import utils
from neutron.common import constants as n_const
from neutron.openstack.common import log
from neutron.plugins.common import constants as p_const
from neutron.plugins.openvswitch.agent import ovs_neutron_agent
from neutron.plugins.openvswitch.common import constants
from neutron.tests import base
NOTIFIER = ('neutron.plugins.openvswitch.'
'ovs_neutron_plugin.AgentNotifierApi')
OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0"
FAKE_MAC = '00:11:22:33:44:55'
FAKE_IP1 = '10.0.0.1'
FAKE_IP2 = '10.0.0.2'
class CreateAgentConfigMap(base.BaseTestCase):
def test_create_agent_config_map_succeeds(self):
self.assertTrue(ovs_neutron_agent.create_agent_config_map(cfg.CONF))
def test_create_agent_config_map_fails_for_invalid_tunnel_config(self):
# An ip address is required for tunneling but there is no default,
# verify this for both gre and vxlan tunnels.
cfg.CONF.set_override('tunnel_types', [p_const.TYPE_GRE],
group='AGENT')
with testtools.ExpectedException(ValueError):
ovs_neutron_agent.create_agent_config_map(cfg.CONF)
cfg.CONF.set_override('tunnel_types', [p_const.TYPE_VXLAN],
group='AGENT')
with testtools.ExpectedException(ValueError):
ovs_neutron_agent.create_agent_config_map(cfg.CONF)
def test_create_agent_config_map_enable_tunneling(self):
# Verify setting only enable_tunneling will default tunnel_type to GRE
cfg.CONF.set_override('tunnel_types', None, group='AGENT')
cfg.CONF.set_override('enable_tunneling', True, group='OVS')
cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS')
cfgmap =
|
ovs_neutron_agent.create_agent_config_map(cfg.CONF)
self.assertEqual(cfgmap['tunnel_types'], [p_const.TYPE_GRE])
|
def test_create_agent_config_map_fails_no_local_ip(self):
# An ip address is required for tunneling but there is no default
cfg.CONF.set_override('enable_tunneling', True, group='OVS')
with testtools.ExpectedException(ValueError):
ovs_neutron_agent.create_agent_config_map(cfg.CONF)
def test_create_agent_config_map_fails_for_invalid_tunnel_type(self):
cfg.CONF.set_override('tunnel_types', ['foobar'], group='AGENT')
with testtools.ExpectedException(ValueError):
ovs_neutron_agent.create_agent_config_map(cfg.CONF)
def test_create_agent_config_map_multiple_tunnel_types(self):
cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS')
cfg.CONF.set_override('tunnel_types', [p_const.TYPE_GRE,
p_const.TYPE_VXLAN], group='AGENT')
cfgmap = ovs_neutron_agent.create_agent_config_map(cfg.CONF)
self.assertEqual(cfgmap['tunnel_types'],
[p_const.TYPE_GRE, p_const.TYPE_VXLAN])
def test_create_agent_config_map_enable_distributed_routing(self):
self.addCleanup(cfg.CONF.reset)
# Verify setting only enable_tunneling will default tunnel_type to GRE
cfg.CONF.set_override('enable_distributed_routing', True,
group='AGENT')
cfgmap = ovs_neutron_agent.create_agent_config_map(cfg.CONF)
self.assertEqual(cfgmap['enable_distributed_routing'], True)
class TestOvsNeutronAgent(base.BaseTestCase):
def setUp(self):
super(TestOvsNeutronAgent, self).setUp()
notifier_p = mock.patch(NOTIFIER)
notifier_cls = notifier_p.start()
self.notifier = mock.Mock()
notifier_cls.return_value = self.notifier
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
kwargs = ovs_neutron_agent.create_agent_config_map(cfg.CONF)
class MockFixedIntervalLoopingCall(object):
def __init__(self, f):
self.f = f
def start(self, interval=0):
self.f()
with contextlib.nested(
mock.patch('neutron.plugins.openvswitch.agent.ovs_neutron_agent.'
'OVSNeutronAgent.setup_integration_br',
return_value=mock.Mock()),
mock.patch('neutron.plugins.openvswitch.agent.ovs_neutron_agent.'
'OVSNeutronAgent.setup_ancillary_bridges',
return_value=[]),
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'create'),
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'set_secure_mode'),
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'get_local_port_mac',
return_value='00:00:00:00:00:01'),
mock.patch('neutron.agent.linux.utils.get_interface_mac',
return_value='00:00:00:00:00:01'),
mock.patch('neutron.agent.linux.ovs_lib.'
'get_bridges'),
mock.patch('neutron.openstack.common.loopingcall.'
'FixedIntervalLoopingCall',
new=MockFixedIntervalLoopingCall)):
self.agent = ovs_neutron_agent.OVSNeutronAgent(**kwargs)
self.agent.tun_br = mock.Mock()
self.agent.sg_agent = mock.Mock()
def _mock_port_bound(self, ofport=None, new_local_vlan=None,
old_local_vlan=None):
port = mock.Mock()
port.ofport = ofport
net_uuid = 'my-net-uuid'
fixed_ips = [{'subnet_id': 'my-subnet-uuid',
'ip_address': '1.1.1.1'}]
if old_local_vlan is not None:
self.agent.local_vlan_map[net_uuid] = (
ovs_neutron_agent.LocalVLANMapping(
old_local_vlan, None, None, None))
with contextlib.nested(
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'set_db_attribute', return_value=True),
mock.patch('neutron.agent.linux.ovs_lib.OVSBridge.'
'db_get_val', return_value=str(old_local_vlan)),
mock.patch.object(self.agent.int_br, 'delete_flows')
) as (set_ovs_db_func, get_ovs_db_func, delete_flows_func):
self.agent.port_bound(port, net_uuid, 'local', None, None,
fixed_ips, "compute:None", False)
get_ovs_db_func.assert_called_once_with("Port", mock.ANY, "tag")
if new_local_vlan != old_local_vlan:
set_ovs_db_func.assert_called_once_with(
"Port", mock.ANY, "tag", str(new_local_vlan))
if ofport != -1:
delete_flows_func.assert_called_once_with(in_port=port.ofport)
else:
self.assertFalse(delete_flows_func.called)
else:
self.assertFalse(set_ovs_db_func.called)
self.assertFalse(delete_flows_func.called)
def _setup_for_dvr_test(self, ofport=10):
self._port = mock.Mock()
self._port.ofport = ofport
self._port.vif_id = "1234-5678-90"
self.agent.enable_distributed_routing = True
self.agent.enable_tunneling = True
self.agent.patch_tun_ofport = 1
self.agent.patch
|
ctames/conference-host
|
webApp/urls.py
|
Python
|
mit
| 1,850
| 0.007027
|
from django.conf.urls import patterns, include, url
from django.conf import settings
f
|
rom django.conf.urls.static import static
from django.contrib import admin
admin.autodiscover()
import views
urlpatterns = patterns('',
url(r'^pis', views.pis),
url(r'^words', views.words, { 'titles': False }),
url(r'^projects', views.projects),
url(r'^posters', views.poste
|
rs),
url(r'^posterpresenters', views.posterpresenters),
url(r'^pigraph', views.pigraph),
url(r'^institutions', views.institutions),
url(r'^institution/(?P<institutionid>\d+)', views.institution),
url(r'^profile/$', views.profile),
url(r'^schedule/(?P<email>\S+)', views.schedule),
url(r'^ratemeeting/(?P<rmid>\d+)/(?P<email>\S+)', views.ratemeeting),
url(r'^submitrating/(?P<rmid>\d+)/(?P<email>\S+)', views.submitrating),
url(r'^feedback/(?P<email>\S+)', views.after),
url(r'^breakouts', views.breakouts),
url(r'^breakout/(?P<bid>\d+)', views.breakout),
url(r'^about', views.about),
url(r'^buginfo', views.buginfo),
url(r'^allrms', views.allrms),
url(r'^allratings', views.allratings),
url(r'^login', views.login),
url(r'^logout', views.logout),
url(r'^edit_home_page', views.edit_home_page),
url(r'^pi/(?P<userid>\d+)', views.pi), # , name = 'pi'),
url(r'^pi/(?P<email>\S+)', views.piEmail), # , name = 'pi'),
url(r'^project/(?P<abstractid>\S+)', views.project, name = 'project'),
url(r'^scope=(?P<scope>\w+)/(?P<url>.+)$', views.set_scope),
url(r'^active=(?P<active>\d)/(?P<url>.+)$', views.set_active),
url(r'^admin/', include(admin.site.urls)),
(r'', include('django_browserid.urls')),
url(r'^$', views.index, name = 'index'),
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
ProjectSWGCore/NGECore2
|
scripts/object/tangible/wearables/boots/item_entertainer_boots_02_01.py
|
Python
|
lgpl-3.0
| 366
| 0.02459
|
import sys
def setup(core, object):
object.setStfFilename('static_item_n')
object.setStfName('item
|
_entertainer_boots_02_01')
object.setDetailFilename('static_item_d')
object.setDetailName('item_entertainer_boots_02_01')
object.setIntAttribute('cat_stat_mod_bonus.@stat_n:agility_modified', 3)
object.setStringAt
|
tribute('class_required', 'Entertainer')
return
|
kepbod/usefullib
|
test/bpkm.py
|
Python
|
mit
| 2,744
| 0.001822
|
#!/usr/bin/env python3
'''
bpkm.py - Calculate BPKM.
author: Xiao-Ou Zhang
version: 0.2.0
'''
import sys
sys.path.insert(0, '/picb/rnomics1/xiaoou/program/usefullib/python')
from map import mapto
from subprocess import Popen, PIPE
import os
def calculatebpkm(chrom, sta, end, bam, total=0, length=0, getsegment=False):
'''
calculatebpkm(chrom, sta, end, bam, total, length, getsegment) -> bpkm
Calculate BPKM.
'''
sta = int(sta)
end = int(end)
total = int(total)
length = int(length)
if sta == end:
return 0
read_segments = []
with Popen(['samtools', 'view', b
|
am, '{}:{}-{}
|
'.format(chrom, sta, end)],
stdout=PIPE) as proc:
for line in proc.stdout:
str_line = line.decode('utf-8')
pos = str_line.split()[3]
cigar = str_line.split()[5]
segment = readsplit(pos, cigar)
read_segments.extend(segment)
if not read_segments:
return 0
mapped_read_segments = mapto(read_segments, [[sta, end]])
if not getsegment:
base = 0
for segment in mapped_read_segments:
base += segment[1] - segment[0]
return (base * pow(10, 9)) / (total * length * (end - sta))
else:
return mapped_read_segments
def readsplit(pos1, cigar):
'''
readsplit(pos, cigar) -> interval
Split reads.
'''
pos2, num = (int(pos1), '')
interval = []
for i in cigar:
if 48 <= ord(i) <= 57:
num += i
continue
elif i == 'M' or i == 'D':
pos2 += int(num)
num = ''
continue
elif i == 'I':
num = ''
continue
elif i == 'N':
interval.append([int(pos1), pos2])
pos1 = pos2 + int(num)
pos2 = pos1
num = ''
interval.append([int(pos1), pos2])
return interval
if __name__ == '__main__':
if len(sys.argv) != 4:
print('bpkm.py *.bed *.bam length')
sys.exit(0)
name = os.path.splitext(os.path.split(sys.argv[1])[1])[0]
bam, length = sys.argv[2:5]
size = 0
with Popen(['samtools', 'idxstats', bam], stdout=PIPE) as proc:
for line in proc.stdout:
str_line = line.decode('utf-8')
size += int(str_line.split()[2])
with open(sys.argv[1], 'r') as f:
with open('{}.bpkm'.format(name), 'w') as outf:
for line in f:
chrom, sta, end, *remaining = line.split()
bpkm = calculatebpkm(chrom, sta, end, bam, size, length)
pos = '{}:{}-{}'.format(chrom, sta, end)
result = '\t'.join([pos, str(bpkm)] + remaining)
outf.write('{}\n'.format(result))
|
ganeshgore/myremolab
|
server/src/test/unit/voodoo/gen/loader/test_SchemaChecker.py
|
Python
|
bsd-2-clause
| 2,998
| 0.004004
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
#
import sys
import unittest
import voodoo.gen.loader.sc
|
hema_checker as SchemaChecker
import voodoo.gen.exceptions.loader.LoaderErrors as LoaderErrors
class WrappedSchemaChecker(SchemaChecker.SchemaChecker):
def __init__(self, xml_conten
|
t, xsd_content):
self.__xml_content = xml_content
self.__xsd_content = xsd_content
def _read_xml_file(self, xmlfile):
return self.__xml_content
def _read_xsd_file(self, xsdfile):
return self.__xsd_content
SAMPLE_XML_SCHEMA = """<?xml version="1.0"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
elementFormDefault="qualified"
>
<xs:element name="root_element">
<xs:complexType>
<xs:sequence>
<xs:element name="element" type="xs:string"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>"""
VALID_XML_CONTENT = """<?xml version="1.0"?>
<root_element>
<element>This is a valid XML</element>
</root_element>"""
INVALID_WELL_FORMED_XML_CONTENT = """<?xml version="1.0"?>
<whatever>
<element>This is a well formed invalid XML</element>
</whatever>"""
INVALID_XML_CONTENT = """This is not a well formed xml"""
class SchemaCheckerTestCase(unittest.TestCase):
if SchemaChecker.LXML_AVAILABLE:
def test_correct_check_schema(self):
schema_checker = WrappedSchemaChecker(
VALID_XML_CONTENT,
SAMPLE_XML_SCHEMA
)
schema_checker.check_schema('whatever','whatever')
def test_invalid_well_formed(self):
schema_checker = WrappedSchemaChecker(
INVALID_WELL_FORMED_XML_CONTENT,
SAMPLE_XML_SCHEMA
)
self.assertRaises(
LoaderErrors.InvalidSyntaxFileConfigurationError,
schema_checker.check_schema,
'whatever',
'whatever'
)
def test_invalid_xml(self):
schema_checker = WrappedSchemaChecker(
INVALID_XML_CONTENT,
SAMPLE_XML_SCHEMA
)
self.assertRaises(
LoaderErrors.InvalidSyntaxFileConfigurationError,
schema_checker.check_schema,
'whatever',
'whatever'
)
else:
print >> sys.stderr, "SchemaChecker tests skipped since lxml is not available"
def suite():
return unittest.makeSuite(SchemaCheckerTestCase)
if __name__ == '__main__':
unittest.main()
|
tdr130/sec
|
lib/xml_parser.py
|
Python
|
mit
| 4,735
| 0.045829
|
#!/usr/bin/python
__VERSION__ = '0.1'
__AUTHOR__ = 'Galkan'
__DATE__ = '06.08.2014'
""" it is derived from https://github.com/argp/nmapdb/blob/master/nmapdb.py """
try:
import sys
import xml.dom.minidom
from xml.etree import ElementTree
except ImportError,e:
import sys
sys.stdout.write("%s\n" %e)
sys.exit(1)
class XmlParser:
def __init__(self, xml_file):
self.xml_file = xml_file
self.mac = {}
self.mac_list = {}
self.os = {}
self.os_list = {}
self.script_ret = {}
self.script = {}
self.script_list = []
self.port_service_ret = {}
self.port_service = {}
self.port_service_list = []
def parser(self, opt):
try:
root = xml.dom.minidom.parse(self.xml_file)
except Exception, err:
print >> sys.stderr, err
sys.exit(1)
for host in root.getElementsByTagName("host"):
try:
address = host.getElementsByTagName("address")[0]
ip = address.getAttribute("addr")
protocol = address.getAttribute("addrtype")
except:
pass
try:
mac_address = host.getElementsByTagName("address")[1]
mac = mac_address.getAttribute("addr")
mac_vendor = mac_address.getAttribute("vendor")
except:
mac = ""
mac_vendor = ""
mac_ret = mac + ":" + mac_vendor
self.mac_list[ip] = mac_ret
if opt == 1:
try:
os = host.getElementsByTagName("os")[0]
os_match = os.getElementsByTagName("osmatch")[0]
os_name = os_match.getAttribute("name")
os_accuracy = os_match.getAttribute("accuracy")
os_class = os.getElementsByTagName("osclass")[0]
os_family = os_class.getAttribute("osfamily")
os_gen = os_class.getAttribute("osgen")
except:
os_name = ""
os_accuracy = ""
os_family = ""
os_gen = ""
os_ret = os_family + ":" + os_name + ":" + os_accuracy
self.os_list[ip] = os_ret
elif opt == 2:
try:
ports = host.getElementsByTagName("ports")[0]
ports = ports.getElementsByTagName("port")
except:
continue
for port in ports:
port_number = port.getAttribute("portid")
protocol = port.getAttribute("protocol")
state_el = port.getElementsByTagName("state")[0]
state = state_el.getAttribute("state")
try:
service = port.getElementsByTagName("service")[0]
port_name = service.getAttribute("name")
product_descr = service.getAttribute("product")
product_ver = service.getAttribute("version")
product_extra = service.getAttribute("extrainfo")
except:
service = ""
port_name = ""
product_descr = ""
product_ver = ""
product_extra = ""
port_service_ret = port_number + ":" + state + ":" + protocol + ":" + port_name + ":" + product_descr + ":" + product_ver + ":" + product_extra
self.port_service_list.append(port_service_ret)
self.port_service[ip] = self.port_service_list
self.port_service_list = []
elif opt == 3:
for hostscript in host.getElementsByTagName("hostscript"):
for script in hostscript.getElementsByTagName("script"):
script_id = script.getAttribute("id")
script_output = script.getAttribute("output")
script_ret_1 = script_id + ":" + script_output
self.script_list.append(script_ret_1)
ports = host.getElementsByTagName("ports")[0].g
|
etElementsByTagName("port")
for port in ports:
for script in port.getElementsByTagName("script"):
script_id = script.getAttribute("id")
|
script_output = script.getAttribute("output")
script_ret_2 = script_id + ":" + script_output
self.script_list.append(script_ret_2)
self.script[ip] = self.script_list
self.script_list = []
self.os["os"] = self.os_list
self.mac["mac"] = self.mac_list
self.script_ret["script"] = self.script
self.port_service_ret["port_service"] = self.port_service
return self.mac, self.os, self.port_service_ret, self.script_ret
##
### Main ...
##
if __name__ == "__main__":
xml_parser = XmlParser(sys.argv[1])
for key in xml_parser.parser(3).keys():
print xml_parser.parser(3)[key]
|
biomodels/MODEL1302010006
|
setup.py
|
Python
|
cc0-1.0
| 377
| 0.005305
|
from setuptools import setup, find_packages
|
setup(name='MODEL1302010006',
version=20140916,
description='MODEL1302010006 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/MODEL1302010006',
maintainer='Stanley Gu',
maintainer_ur
|
l='stanleygu@gmail.com',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
)
|
samgeen/Hamu
|
Utils/CommandLine.py
|
Python
|
mit
| 464
| 0.006466
|
'''
Load options from the command line
Sam Geen, July 2013
'''
i
|
mport sys
def Arg1(default=None):
'''
Read the first argument
default: Default value to return if no argument is found
Return: First argument in sys.argv (minus program name) or default if none
'''
if len(sys.argv) < 2:
return
|
default
else:
return sys.argv[1]
if __name__=="__main__":
print "Test Arg1():"
print Arg1()
print Arg1("Bumface")
|
galactose/wviews
|
wview.py
|
Python
|
gpl-3.0
| 6,822
| 0.00044
|
"""
Wviews: Worldview Solver for Epistemic Logic Programs
Build 1.0 - Port from C++ -> Python.
Copyright (C) 2014 Michael Kelly
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from subprocess import Popen, PIPE, STDOUT
from itertools import product
from answer_set import parse_answer_sets, NO_MODEL_FOR_EVALUATED_PROGRAM
from program import parser
from program.atom import EpistemicModality
from program.grounder import Grounder
from program.program import LogicProgram
class WorldViews(object):
"""
This program reads in programs with lists of rules of the form:
[a-z+[a-z]*[(A-Z+[A-Z]*)] [v a-z*]] :- [[~|-]K|M[a-z]*,]*[[not]a-z*]
where a-zA-Z represents atoms, v represents disjunction,
',' represents conjunction, K and M are modal operators,
K 'Knows' and M 'Believes'
and ':-' representing implication (is true if the following is also true)
Returns:
- worldviews - the applicable worldviews derived from the rules.
"""
def __init__(self, file_name, dlv_path=None):
self.dlv_path = dlv_path
if not dlv_path:
self.dlv_path = 'dlv'
self.grounder = Grounder(parser.parse_program(file_name))
grounded_program_it = self.grounder.ground_program(parser.parse_program(file_name))
self.program_info = LogicProgram(None)
self.program_info.index_atoms(grounded_program_it)
@staticmethod
def check_atom_valuation(possible_world_view, atom):
"""
Given a possible world view and an epistemic atom which has an
applied valuation, return True if the valuation against the atom
was correct given the occurrences of the atom in the possible
world view and the modal operator associated with the atom.
Arguments:
* possible_world_view (set(str)) - a returned answer set,
{a_1, ..., a_n }
* atom (Atom) - an atom label, also containing its valuation
"""
universal_count = 0
answer_set_count = 0
exists = False
if atom.modality not in (EpistemicModality.KNOW,
EpistemicModality.BELIEVE):
raise Exception('Unknown epistemic modality')
for answer_set in possible_world_view:
if atom.valuation_string() not in answer_set:
answer_set_count += 1
continue
universal_count += 1
answer_set_count += 1
exists = True
if atom.modality == EpistemicModality.BELIEVE:
break
universal = (universal_count == answer_set_count)
if atom.modality == EpistemicModality.KNOW:
if (atom.epistemic_negation and universal) or \
(not atom.epistemic_negation and not universal):
return not atom.valuation
elif (not atom.epistemic_negation and universal) or \
(atom.epistemic_negation and not universal):
return atom.valuation
# negated belief
if (atom.epistemic_negation and exists) or \
(not atom.epistemic_negation and not exists):
return not atom.valuation
elif (atom.epistemic_negation and not exists) or \
(not atom.epistemic_negation and exists):
return atom.valuation
def check_valuation_validity(self, possible_world_view):
"""
goal: extract the current evaluation on the modal atom
- with the given binary evaluation, the epistemic atom and the found
worldview
- if the evaluation is satisfied by the worldview return true
- otherwise for any given epistemic atom and its evaluation, if one
fails the whole evaluation fails.
"""
e_atom_iter = self.program_info.epistemic_atom_cache.iteritems()
for _, epistemic_atom in e_atom_iter:
if not self.check_atom_valuation(
possible_world_view, epistemic_atom
):
return False
return True
@staticmethod
def get_valuation_string(epistemic_atom_count):
"""
Arguments:
* epistemic_atom_count (int) - How many epistemic atoms exist in the
program
"""
for valuation in product((True, False), repeat=epistemic_atom_count):
yield valuation
def generate_worldview(self):
# posOpt = self.optimisation_feasibilty(stat_struct)
prog_info = self.program_info
e_atom_cache = prog_info.epistemic_atom_cache
for valuation in self.get_valuation_string(len(e_atom_cache)):
# if self.evaluation_skip(posOpt, stat_struct, binModEval)
# continue
evaluated_program = prog_info.get_evaluated_program_and_apply_valuation(valuation)
raw_worldview = self.get_possible_worldview(evaluated_program)
world_view = parse_answer_sets(raw_worldview)
# checks returned set against original set.
if world_view != NO_MODEL_FOR_EVALUATED_PROGRAM and \
self.check_valuation_validity(world_view):
yield world_view
def get_possible_worldview(self, evaluated_program):
dlv_response = Popen(
[self.dlv_path, '-silent', '--'], stdout=PIPE, stdin=PIPE, stderr=STDOUT
)
raw_program = '\n'.join(evaluated_program)
response = dlv_response.communicate(input=raw_program)[0].split('\n')
for line in response:
if line:
yield line
@staticmethod
def translate_modality(atom_details):
"""
transModality:
PRE:
POST:
COMPLEXITY:
COMPLETED:
"""
mod = ''
if (atom_details & 0x1) == 1:
# 0x1 is atom negation
mod = '-'
if (atom_details & 0x2) == 2:
# 0x2 is epistemic modality, if true its knows, fal
|
se if believes
mod = 'K' + mod
else:
mod = 'M' + mod
# 0x4 is epistemic negat
|
ion
if (atom_details & 0x4) == 4:
mod = '-' + mod
return mod
|
kbase/transform
|
lib/biokbase/Transform/TextFileDecoder.py
|
Python
|
mit
| 9,547
| 0.003771
|
#!/usr/bin/env python
import os
import codecs
def open_textdecoder(file=None, codec=None, chunkSize=1024):
fp = open(file, 'rb')
return TextFileDecoder(fp, codec, chunkSize)
# TODO look into inheriting from a stream for this class
class TextFileDecoder(object):
"""Class that wraps a file object and handles text decoding while
adjusting the file pointer to relative byte positions after
each read operation. Convenience methods for finding a string
in the file are also included."""
def __init__(self, file=None, codec=None, chunkSize=1024):
"""Constructor accepts a file object, a codec string, and an
optional chunk size used when reading bytes from the file."""
self._fp = file
sel
|
f._codec = codec
# use chunk size of 1KB
|
self._chunkSize = chunkSize
def close(self):
"""Calls file object close() method"""
self._fp.close()
def readline(self):
"""Reads chunks of bytes from the file, decoding each chunk until
EOL is found. Once EOL is found, the file pointer is set to
the byte immediately following the EOL character, and the decoded
text of the line is returned.
If the end of the file is reached first, the file read() method
will raise an IOError."""
startPosition = self._fp.tell()
chunkString = str()
while 1:
bytes = self._fp.read(self._chunkSize)
if not bytes:
return chunkString
nextChunk = codecs.decode(bytes, self._codec)
eol = nextChunk.find("\n")
if eol != -1:
chunkString += nextChunk[:eol + 1]
self._fp.seek(startPosition + len(codecs.encode(chunkString, self._codec)),0)
return chunkString
chunkString += nextChunk
def read(self, size):
"""Reads a fixed number of bytes from the file and decodes them,
resetting the file pointer to the byte immediately following the
last decoded character and returning the decoded text."""
startPosition = self._fp.tell()
byteString = self._fp.read(size)
if byteString == None:
return None
charString = codecs.decode(byteString, self._codec)
charBytes = len(codecs.encode(charString, self._codec))
self._fp.seek(startPosition + charBytes)
return charString
def tell(self):
"""Calls file tell() method."""
return self._fp.tell()
def seek(self, position, start=0):
"""Calls file seek() method."""
self._fp.seek(position, start)
def find(self, s, startPosition=0, lastPosition=-1, firstByte=False):
"""Find a string within the file, optionally constrained by a start
and end byte position in the file. The firstByte option is a
switch that returns by default the byte immediately following
the found string so that it can be read in. If firstByte is True,
the position returned will be the byte immmediately preceding
the found string. If the string is not found, returns -1.
The file pointer is reset to the position it was at before this
method was called before returning."""
filePosition = self._fp.tell()
self._fp.seek(0,2)
finalPosition = self._fp.tell()
if lastPosition < 0 and abs(lastPosition) < finalPosition:
lastPosition = finalPosition + lastPosition
elif lastPosition < 0 or lastPosition > finalPosition:
lastPosition = finalPosition
if startPosition < 0 and abs(startPosition) < lastPosition:
startPosition = lastPosition + startPosition
elif startPosition < 0:
startPosition = 0
if startPosition > lastPosition:
raise Exception("Start position greater than ending position!")
stringLength = len(s)
stringBytes = len(codecs.encode(s, self._codec))
if stringBytes > lastPosition - startPosition:
self._fp.seek(filePosition)
return -1
chunkSize = self._chunkSize
if stringBytes > chunkSize:
chunkSize = stringBytes * 4
if lastPosition - startPosition < chunkSize:
chunkSize = lastPosition - startPosition
offset = 0
while 1:
try:
self._fp.seek(startPosition + offset)
chunkString = codecs.decode(self._fp.read(chunkSize), self._codec)
except IOError, e:
self._fp.seek(filePosition)
return -1
# look for the first instance of this string
firstInstance = chunkString.find(s)
if firstInstance > -1:
# set the file position back to where it was before we began
self._fp.seek(filePosition)
offsetString = chunkString
# if the string is at the end we are done, otherwise we need to get everything on the end after our string
if s != chunkString[-stringLength:]:
if firstByte:
# calculate up to the start of the string
offsetString = chunkString[:firstInstance]
else:
# calculate up to the end of the string
offsetString = chunkString[:firstInstance + stringLength]
# calculate the bytes to the string
return startPosition + offset + len(codecs.encode(offsetString, self._codec))
elif startPosition + offset + chunkSize == lastPosition:
# we reached the end of the file and didn't find the string
self._fp.seek(filePosition)
return -1
# need to read further ahead
if startPosition + offset + chunkSize + chunkSize < lastPosition:
offset += chunkSize
else:
# the only part left is the end of the file
chunkSize = lastPosition - startPosition
def rfind(self, s, lastPosition=-1, startPosition=0, firstByte=False):
"""Same behavior as find method above, but searches in reverse from
the end of the file or the last file position provided."""
filePosition = self._fp.tell()
self._fp.seek(0,2)
finalPosition = self._fp.tell()
if lastPosition < 0 and abs(lastPosition) < finalPosition:
lastPosition = finalPosition + lastPosition
elif lastPosition < 0 or lastPosition > finalPosition:
lastPosition = finalPosition
if startPosition < 0 and abs(startPosition) < lastPosition:
startPosition = lastPosition + startPosition
elif startPosition < 0:
startPosition = 0
if startPosition > lastPosition:
raise Exception("Start position greater than ending position!")
stringLength = len(s)
stringBytes = len(codecs.encode(s, self._codec))
if stringBytes > lastPosition - startPosition:
self._fp.seek(filePosition)
return -1
chunkSize = self._chunkSize * 4
if stringBytes > chunkSize:
chunkSize = stringBytes * 4
if lastPosition - startPosition < chunkSize:
chunkSize = lastPosition - startPosition
offset = 0
while 1:
try:
self._fp.seek(lastPosition - offset - chunkSize)
chunkString = codecs.decode(self._fp.read(chunkSize), self._codec)
except IOError, e:
self._fp.seek(filePosition)
return -1
# look for the last instance of this string
lastInstance = chunkString.rfind(s)
if lastInstance > -1:
# set the file position back to where it was before we began
|
datawire/mdk
|
unittests/test_mdk.py
|
Python
|
apache-2.0
| 27,210
| 0.000919
|
"""
Tests for the MDK public API that are easier to do in Python.
"""
from time import time
from builtins import range
from past.builtins import unicode
from unittest import TestCase
from tempfile import mkdtemp
from collections import Counter
import configparser
import hypothesis.strategies as st
from hypothesis import given, assume
from mdk import MDKImpl
from mdk_runtime import fakeRuntime
from mdk_runtime.actors import _QuarkRuntimeLaterCaller
from mdk_discovery import (
ReplaceCluster, NodeActive, RecordingFailurePolicyFactory,
)
from mdk_protocol import Close, ProtocolError
from .common import (
create_node, SANDBOX_ENV, MDKConnector, create_mdk_with_faketracer,
)
class MDKInitializationTestCase(TestCase):
"""
Tests for top-level MDK API startup.
"""
def test_no_datawire_token(self):
"""
If DATAWIRE_TOKEN is not set neither the TracingClient nor the DiscoClient
are started.
"""
# Disable connecting to our Discovery server:
runtime = fakeRuntime()
runtime.getEnvVarsService().set("MDK_DISCOVERY_SOURCE", "synapse:path=" + mkdtemp())
# Start the MDK:
mdk = MDKImpl(runtime)
mdk.start()
# Do a bunch of logging:
session = mdk.session()
session.info("category", "hello!")
session.error("category", "ono")
session.warn("category", "gazoots")
session.critical("category", "aaaaaaa")
session.debug("category", "behold!")
# Time passes...
scheduleService = runtime.getScheduleService()
for i in range(10):
scheduleService.advance(1.0)
scheduleService.pump()
# No WebSocket connections made:
self.assertFalse(runtime.getWebSocketsService().fakeActors)
def add_bools(list_of_lists):
"""
Given recursive list that can contain other lists, return tuple of that plus
a booleans strategy for each list.
"""
l = []
def count(recursive):
l.append(1)
for child in recursi
|
ve:
if isinstance(child, list):
count(child)
count(list_of_lists)
return st.tuples(st.just(list_of_lists), st.tuples(*[st.sampled_from([True, False]) for i in l]))
class InteractionTestCase(TestCase):
"""Tests for the Session interaction API."""
def init(self):
"""Initi
|
alize an empty environment."""
self.connector = MDKConnector(RecordingFailurePolicyFactory())
# Because we want to use blocking resolve() we need async message delivery:
self.connector.runtime.dispatcher.pump()
self.connector.runtime.dispatcher.callLater = _QuarkRuntimeLaterCaller()
self.runtime = self.connector.runtime
self.mdk = self.connector.mdk
self.disco = self.mdk._disco
# Create a session:
self.session = self.mdk.session()
def setUp(self):
self.init()
# Register some nodes:
self.node1 = create_node("a1", "service1")
self.node2 = create_node("a2", "service1")
self.node3 = create_node("b1", "service2")
self.node4 = create_node("b2", "service2")
self.all_nodes = set([self.node1, self.node2, self.node3, self.node4])
self.disco.onMessage(None, ReplaceCluster("service1", SANDBOX_ENV,
[self.node1, self.node2]))
self.disco.onMessage(None, ReplaceCluster("service2", SANDBOX_ENV,
[self.node3, self.node4]))
def assertPolicyState(self, policies, successes, failures):
"""
Assert that the given FailurePolicy instances has the given number of
success() and failure() calls.
"""
for policy in policies:
self.assertEqual((policy.successes, policy.failures),
(successes, failures))
def test_successfulInteraction(self):
"""
All nodes resolved within a successful interaction are marked as
succeeding to connect.
"""
self.session.start_interaction()
node = self.session.resolve("service1", "1.0")
another_node = self.session.resolve("service2", "1.0")
self.session.finish_interaction()
expected_successful = [self.disco.failurePolicy(node),
self.disco.failurePolicy(another_node)]
expected_nothing = list(self.disco.failurePolicy(n) for n in
self.all_nodes if
n.address not in [node.address, another_node.address])
self.assertPolicyState(expected_successful, 1, 0)
self.assertPolicyState(expected_nothing, 0, 0)
def test_failedInteraction(self):
"""All nodes resolved with a failing interaction are marked as failures."""
self.session.start_interaction()
node = self.session.resolve("service1", "1.0")
another_node = self.session.resolve("service2", "1.0")
self.session.fail_interaction("OHNO")
self.session.finish_interaction()
expected_failed = [self.disco.failurePolicy(node),
self.disco.failurePolicy(another_node)]
expected_nothing = list(self.disco.failurePolicy(n) for n in
self.all_nodes if
n.address not in [node.address, another_node.address])
self.assertPolicyState(expected_failed, 0, 1)
self.assertPolicyState(expected_nothing, 0, 0)
def test_failedResetsInteraction(self):
"""
Nodes resolved after a failing interaction are not marked as failed when
finish is called.
"""
self.session.start_interaction()
node = self.session.resolve("service1", "1.0")
self.session.fail_interaction("OHNO")
another_node = self.session.resolve("service2", "1.0")
self.session.finish_interaction()
expected_failed = [self.disco.failurePolicy(node)]
expected_succeeded = [self.disco.failurePolicy(another_node)]
expected_nothing = list(self.disco.failurePolicy(n) for n in
self.all_nodes if
n.address not in [node.address, another_node.address])
self.assertPolicyState(expected_failed, 0, 1)
self.assertPolicyState(expected_succeeded, 1, 0)
self.assertPolicyState(expected_nothing, 0, 0)
def test_finishedResetsInteraction(self):
"""
Each new interaction allows marking Nodes with new information.
"""
self.session.start_interaction()
node = self.session.resolve("service1", "1.0")
self.session.fail_interaction("OHNO")
self.session.finish_interaction()
self.session.start_interaction()
# Resolve same node again:
while True:
another_node = self.session.resolve("service1", "1.0")
if node.address == another_node.address:
break
self.session.finish_interaction()
self.assertPolicyState([self.disco.failurePolicy(node)], 1, 1)
@given(st.recursive(st.text(alphabet="abcd", min_size=1, max_size=3),
st.lists).flatmap(add_bools))
def test_nestedInteractions(self, values):
"""
Nested interactions operate independently of parent interactions.
:param values: a two-tuple composed of:
- a recursive list of unicode and other recursive lists - list start
means begin interaction, string means node resolve, list end means
finish interaction.
- list of False/True; True means failed interaction
"""
requested_interactions, failures = values
failures = iter(failures)
assume(not isinstance(requested_interactions, unicode))
self.init()
ws_actor = self.connector.expectSocket()
self.connector.connect(ws_actor)
failures = iter(failures)
created_services = {}
expected_success_nodes = Counter()
expected_failed_nodes = Counter()
def run_interaction(children):
s
|
rspavel/spack
|
var/spack/repos/builtin/packages/py-sphinxcontrib-devhelp/package.py
|
Python
|
lgpl-2.1
| 793
| 0.003783
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PySphinxcontribDevhelp(PythonPackage):
"""sphinxcontrib-devhelp is a sphinx extension which outputs
Devhelp document."""
homepage = "http
|
://sphinx-doc.org/"
url = "https://pypi.io/packages/source/s/sphinxcontrib-devhelp/sphinxcontrib-devhelp-1.0.1.tar.gz"
version('1.0.1', sha256='6c64b077937330a9128a4da74586e8c2130262f014689b4b89e2d08ee7294a34')
depends_on('python@3.5:', type=('b
|
uild', 'run'))
depends_on('py-setuptools', type='build')
def test(self):
# Requires sphinx, creating a circular dependency
pass
|
yasserglez/arachne
|
tests/testresultqueue.py
|
Python
|
gpl-3.0
| 6,369
| 0.000628
|
# -*- coding: utf-8 -*-
import os
import sys
import shutil
import optparse
import unittest
TESTDIR = os.path.dirname(os.path.abspath(__file__))
SRCDIR = os.path.abspath(os.path.join(TESTDIR, os.path.pardir))
sys.path.insert(0, SRCDIR)
from arachne.error import EmptyQueue
from arachne.result import CrawlResult, ResultQueue
from arachne.task import CrawlTask
from arachne.url import URL
class TestResultQueue(unittest.TestCase):
def setUp(self):
self._db_home = os.path.join(TESTDIR, 'testresultqueue')
os.mkdir(self._db_home)
self._sites_info = {
'a78e6853355ad5cdc751ad678d15339382f9ed21':
{'url': URL('ftp://atlantis.uh.cu/')},
'7e019d6f671d336a0cc31f137ba034efb13fc327':
{'url': URL('ftp://andromeda.uh.cu/')},
'aa958756e769188be9f76fbdb291fe1b2ddd4777':
{'url': URL('ftp://deltha.uh.cu/')},
'd4af25db08f5fb6e768db027d51b207cd1a7f5d0':
{'url': URL('ftp://anduin.uh.cu/')},
'886b46f54bcd45d4dd5732e290c60e9639b0d101':
{'url': URL('ftp://tigris.uh.cu/')},
'ee5b017839d97507bf059ec91f1e5644a30b2fa6':
{'url': URL('ftp://lara.uh.cu/')},
'341938200f949daa356e0b62f747580247609f5a':
{'url': URL('ftp://nimbo.uh.cu/')},
'd64f2fc98d015a43da3be34668341e3ee6f79133':
{'url': URL('ftp://liverpool.reduh.uh.cu/')},
'0d3465f2b9fd5cf55748797c590ea621e3017a29':
{'url': URL('ftp://london.reduh.uh.cu/')},
'c5bcce5953866b673054f8927648d634a7237a9b':
{'url': URL('ftp://bristol.reduh.uh.cu/')},
}
self._results = []
self._results_per_site = 10
for site_id, info in self._sites_info.iteritems():
for name in (str(n) for n in xrange(self._results_per_site)):
task = CrawlTask(site_id, info['url'].join(name))
self._results.append(CrawlResult(task, True))
self._queue = ResultQueue(self._sites_info, self._db_home)
def test_length(self):
self.assertEquals(len(self._queue), 0)
for i, result in enumerate(self._results):
self._queue.put(result)
self.assertEquals(len(self._queue), i + 1)
num_results = len(self._results)
for i in xrange(num_results):
result = self._queue.get()
self._queue.report_done(result)
s
|
elf.assert
|
Equals(len(self._queue), num_results - i - 1)
def test_populate(self):
self.assertRaises(EmptyQueue, self._queue.get)
self._populate_queue()
for result in self._results:
returned = self._queue.get()
self.assertEquals(str(returned.task.url), str(result.task.url))
self._queue.report_done(result)
self.assertRaises(EmptyQueue, self._queue.get)
def test_persistence(self):
self._populate_queue()
for i, result in enumerate(self._results):
if i % (self._results_per_site / 2) == 0:
# When a few results have been removed close the database to
# write all the results to disk and open it again.
self._queue.close()
self._queue = ResultQueue(self._sites_info, self._db_home)
returned = self._queue.get()
self.assertEquals(str(returned.task.url), str(result.task.url))
self._queue.report_done(returned)
def test_remove_site(self):
self._populate_queue()
self._queue.close()
# Remove a site. It should not return results from this site but it
# should keep the order of the other results in the queue.
del self._sites_info[self._sites_info.keys()[0]]
self._queue = ResultQueue(self._sites_info, self._db_home)
for result in self._results:
if result.task.site_id in self._sites_info:
returned = self._queue.get()
self.assertEquals(str(returned.task.url), str(result.task.url))
self._queue.report_done(returned)
self.assertEquals(len(self._queue), 0)
def test_report_done(self):
self._populate_queue()
self._clear_queue(remain=1)
result = self._queue.get()
self._queue.report_done(result)
self.assertEquals(len(self._queue), 0)
def test_report_error_one_result(self):
self._populate_queue()
self._clear_queue(remain=1)
result = self._queue.get()
self._queue.report_error(result)
returned = self._queue.get()
self.assertEquals(str(result.task.url), str(returned.task.url))
self._queue.report_done(returned)
def test_report_error_two_results(self):
self._populate_queue()
self._clear_queue(remain=2)
result = self._queue.get()
self._queue.report_error(result)
returned = self._queue.get()
self.assertTrue(str(result.task.url) != str(returned.task.url))
self._queue.report_done(returned)
returned = self._queue.get()
self.assertEquals(str(result.task.url), str(returned.task.url))
self._queue.report_done(returned)
def _clear_queue(self, remain=0):
# Remove results from the queue until the specified number of results
# (default 0) remains in the queue.
for i in xrange(len(self._queue) - remain):
self._queue.report_done(self._queue.get())
self.assertEquals(len(self._queue), remain)
def _populate_queue(self):
for result in self._results:
self._queue.put(result)
def tearDown(self):
if os.path.isdir(self._db_home):
self._queue.close()
shutil.rmtree(self._db_home)
def main():
parser = optparse.OptionParser()
parser.add_option('-v', dest='verbosity', default='2',
type='choice', choices=['0', '1', '2'],
help='verbosity level: 0 = minimal, 1 = normal, 2 = all')
options = parser.parse_args()[0]
module = os.path.basename(__file__)[:-3]
suite = unittest.TestLoader().loadTestsFromName(module)
runner = unittest.TextTestRunner(verbosity=int(options.verbosity))
result = runner.run(suite)
sys.exit(not result.wasSuccessful())
if __name__ == '__main__':
main()
|
bugsnag/bugsnag-python
|
tests/fixtures/django1/notes/urls.py
|
Python
|
mit
| 503
| 0
|
from django.conf.urls import url
from . import views
urlpatterns =
|
[
url(r'^$', views.index),
url(r'unhandled-crash/', views.unhandled_crash, name='crash'),
url(r'unhandled-crash-chain/', views.unhandled_crash_chain),
url(r'unhandled-template-crash/',
views.unhandled_crash_in_template),
url(r'handled-exception/', views.handle_notify
|
),
url(r'handled-exception-custom/', views.handle_notify_custom_info),
url(r'crash-with-callback/', views.handle_crash_callback),
]
|
talumbau/datashape
|
datashape/type_symbol_table.py
|
Python
|
bsd-2-clause
| 4,644
| 0.001938
|
"""
A symbol table object to hold types for the parser.
"""
from __future__ import absolute_import, division, print_function
__all__ = ['TypeSymbolTable', 'sym']
import ctypes
from . import coretypes as ct
_is_64bit = (ctypes.sizeof(ctypes.c_void_p) == 8)
def _complex(tp):
"""Simple temporary type constructor for complex"""
if tp == ct.DataShape(ct.float32):
return ct.complex_float32
elif tp == ct.DataShape(ct.float64):
return ct.complex_float64
else:
raise TypeError('Cannot contruct a complex type with real component %s' % tp)
def _struct(names, dshapes):
"""Simple temporary type constructor for struct"""
return ct.Record(list(zip(names, dshapes)))
def _funcproto(args, ret):
"""Simple temporary type constructor for funcproto"""
return ct.Function(*(args + [ret]))
def _typevar_dim(name):
"""Simple temporary type constructor for typevar as a dim"""
# Note: Presently no difference between dim and dtype typevar
return ct.TypeVar(name)
def _typevar_dtype(name):
"""Simple temporary type constructor for typevar as a dtype"""
# Note: Presently no difference between dim and dtype typevar
return ct.TypeVar(name)
def _ellipsis(name):
return ct.Ellipsis(ct.TypeVar(name))
class TypeSymbolTable(object):
"""
This is a class which holds symbols for types and type constructors,
and is used by the datashape parser to build types during its parsing.
A TypeSymbolTable sym has four tables, as follows:
sym.dtype
Data type symbols with no type constructor.
sym.dtype_constr
Data type symbols with a type constructor. This may contain
symbols also in sym.dtype, e.g. for 'complex' and 'complex[float64]'.
sym.dim
Dimension symbols with no type constructor.
sym.dim_constr
Dimension symbols with a type constructor.
"""
__slots__ = ['dtype', 'dtype_constr', 'dim', 'dim_constr']
def __init__(self, bare=False):
# Initialize all the symbol tables to empty dicts1
self.dtype = {}
self.dtype_constr = {}
self.dim = {}
self.dim_constr = {}
if not bare:
self.add_default_types()
def add_default_types(self):
"""
Adds all the default datashape types to the symbol table.
"""
# data types with no type constructor
self.dtype.update([('bool', ct.bool_),
('int8', ct.int8),
('int16', ct.int16),
('int32', ct.int32),
('int64', ct.int64),
('intptr', ct.int64 if _is_64bit else ct.int32),
('int', ct.int32),
('uint8', ct.uint8),
('uint16', ct.uint16),
('uint32', ct.uint32),
('uint64', ct.uint64),
('uintptr', ct.uint64 if _is_64bit else ct.uint32),
('float32', ct.float32),
('float64', ct.float64),
|
('complex64', ct.complex64),
('complex128', ct.complex128),
('real', ct.float64),
('complex', ct.complex_float64),
('string', ct.string),
('json', ct.json),
('date', ct.date_)
|
,
('time', ct.time_),
('datetime', ct.datetime_)])
# data types with a type constructor
self.dtype_constr.update([('complex', _complex),
('string', ct.String),
('struct', _struct),
('tuple', ct.Tuple),
('funcproto', _funcproto),
('typevar', _typevar_dtype),
('option', ct.Option),
('time', ct.Time),
('datetime', ct.DateTime),
('units', ct.Units)])
# dim types with no type constructor
self.dim.update([('var', ct.Var()),
('ellipsis', ct.Ellipsis())])
# dim types with a type constructor
self.dim_constr.update([('fixed', ct.Fixed),
('typevar', _typevar_dim),
('ellipsis', _ellipsis)])
# Create the default global type symbol table
sym = TypeSymbolTable()
|
yxdong/ybk
|
ybk/lighttrade/trader.py
|
Python
|
mit
| 2,160
| 0.00141
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import yaml
import logging
import threading
from ybk.lighttrade.sysframe import Client as SysframeClient
log = logging.getLogger('trader')
configfile = open(os.path.join(os.path.dirname(__file__), 'trading.yaml'), encoding='utf-8')
config = yaml.load(configfile)
try:
accountfile = open(
os.path.join(os.path.dirname(__file__), 'accounts.yaml'))
account = yaml.load(accountfile)
except:
account = {}
lock = threading.RLock()
class Trader(object):
""" 交易调度 """
traders = {}
def __init__(self, exchange, username=None, password=None):
""" 登陆并缓存Trader Object """
with lock:
d = config[exchange]
if d['system'] == 'sysframe':
Client = SysframeClient
elif d['system'] == 'winner':
raise NotImplementedError
if username is None:
u = account[exchange][0]
username = u['username']
password = u['password']
if d.get('disabled'):
raise ValueError('该交易所被禁止')
signature = (exchange, username, password)
if signature not in self.traders:
if not isinstance(d['tradeweb_url'], list):
d['tradeweb_url'] = [d['tradeweb_url']]
self.client = Client(front_url=d['front_url'],
tradeweb_url=d['tradeweb_url'])
setattr(self.client, 'exchange', exchange)
self.client.login(username, password)
self.traders[signature]
|
= self
else:
old = self.traders[signature]
self.client = old.client
self.client.keep_alive()
def __getattr__(self, key):
if key in self.__dict__:
return self.__dict__[key]
else:
return getattr(self.client, key)
@property
def server_time(self):
t0 = time.time()
return t0 + self.client.time_offs
|
et + self.client.latency * 3
if __name__ == '__main__':
pass
|
richardmin97/PaintTheWorld
|
Server/painttheworld/game.py
|
Python
|
gpl-3.0
| 8,968
| 0.005352
|
# painttheworld/game.py
#
# Represent and track the current game state.
import numpy as np
import datetime
import math
from painttheworld import constants
from painttheworld.constants import m1, m2, m3, m4, p1, p2, p3
''' Note that Latitude is North/South and Longitude is West/East'''
class GameState:
"""Keeps track of which teams have colored which areas of the map.
The map is a grid that's represented by a 2D array containing values
corresponding to which team controls that area/block of the map. Clients
perform the necessary GPS -> grid coordinate calculations and send their
game state updates in grid coordinates via the update() method.
TODO: might have to add coordinate transformations to our methods since
(0,0) is technically the center of our grid.
"""
def __init__(self, radius, gridsize):
"""Create a GameState object.
Args:
radius: the number of grid blocks from the center block in the
vertical/horizontal direction.
gridsize: The dimensions of a grid tile, in feet. This should be the
edge length
"""
size = 2*radius + 1
self.grid = np.zeros((size, size), dtype=np.int8)
self.radius = radius
self.gridsize = gridsize
self.user_count = 0
self.user_coords = []
self.user_grid = []
self.user_grid.extend([np.zeros((size, size), dtype=np.int8) for i in range(constants.lobby_size)])
def start_game(self):
"""Initialize the starting position of the grid.
This calculates the center coordinate by average the longitudes and
latitudes of all people (thi
|
s might not work too well, as that's not
really how nautical miles work). Additionally, it sets the start time to
be 3 seconds from now.
"""
self.center_coord = np.mean(self.u
|
ser_coords, axis=0)
self.conversion_rates = self.conversion_rates(self.center_coord)
self.start_time = datetime.datetime.now() + datetime.timedelta(seconds=3)
self.end_time = self.start_time + datetime.timedelta(minutes=3)
def update(self, coord, team):
"""Update the game state array."""
x, y = coord
self.grid[x][y] = team
def project(self, lon, lat):
""" Casts a GPS coordinate onto the grid, which has it's central
locations defined by center_coord.
"""
vert = GameState.haversine(self.center_coord[1], self.center_coord[0], self.center_coord[1], lat) # longitude is east-west, we ensure that's the sam'
horiz = GameState.haversine(self.center_coord[1], self.center_coord[0], lon, self.center_coord[0])
""" Vectorizes the latitude. The degree ranges from -90 to 90.
This latitude conversion doesn't handle poles.
I'm not sure how to handle you playing the game at the north and south pole.
"""
if lat > self.center_coord[0]:
vert = -vert
""" Vectorizes the longitude. The degree ranges from -180 to 180.
There's three cases:
1. They're both in the same hemisphere (east/west)
2. They cross over the 0 degree line
3. They cross over the 180 degree line
Case (1):
Check for case 1 by ensuring that the signs are identical.
If the longitude of the location is less than the longitude of the cenral
location, that means that we need to move left in the array.
We change the sign to be negative.
Case (2) + (3):
There's two cases here, where the signs are differing.
To determine which line we're crossing, the absolute value of the difference
in Longitudes is taken. If the difference >180,
that implies that the 180 degree is being crossed. Otherwise, it's the 0 degree line.
Case (2):
In case (2), if the longitude of the central point is negative, the distance must be positive.
If the longitude of the central point is positive, the distance must be negative.
Case (3):
In case (3), if the longitude of the central point is negative, the distance must be negative.
If the longitude of the central point is positive, the distance must be positive.
"""
if np.sign(self.center_coord[1]) == np.sign(lon): # Case 1
if lon > self.center_coord[1]:
horiz = -horiz
if math.fabs(self.center_coord[1] - lon) < 180: # Case 2
if self.center_coord[1] >= 0:
horiz = -horiz
elif self.center_coord[1] < 0: # Case 3
horiz = -horiz
horiz = math.floor(horiz * 1000 / constants.gridsize)
vert = math.floor(vert * 1000 / constants.gridsize)
return np.add((self.radius + 1, self.radius + 1), (horiz, vert))
def add_user(self, lat, lon):
""" Adds a user and their starting location to the grid.
Returns the user id number assosciated with that user, as well as their
locations. If there are enough users to begin the game, it initializes
the game variables.
"""
if self.user_count < constants.lobby_size:
self.user_count += 1
self.user_coords.append((float(lat), float(lon)))
if self.user_count == constants.lobby_size:
self.start_game()
return self.user_count-1
else:
return -1
def update_user(self, id, lon, lat):
currtime = datetime.datetime.now()
if self.start_time < currtime < self.end_time:
gridloc = self.project(lon, lat)
out_of_bounds = not self.inside_grid(gridloc)
if not out_of_bounds:
self.grid[gridloc[0]][gridloc[1]] = constants.Team.findTeam(id)
returngrid = self.diff(self.user_grid[id], self.grid)
np.copyto(self.user_grid[id], self.grid)
return returngrid, out_of_bounds
else:
if self.start_time > currtime:
raise RuntimeError('Game hasn\'t started.')
else:
raise RuntimeError('Game over.')
def inside_grid(self, coord):
lowest_coord = (0,0)
highest_coord = (constants.radius*2 + 1, constants.radius*2 + 1)
lower_bound = np.all(np.greater_equal(coord, lowest_coord))
upper_bound = np.all(np.less_equal(coord, highest_coord))
return lower_bound and upper_bound
@staticmethod
def diff(a, b):
"""Calculate the deltas of two GameState objects.
a is the "older" GameState object
b is the "updated" GameState object
Returns:
List of coordinate/team pairings of the form ((x,y), team_color).
"""
diff = np.absolute(a - b)
coord = np.nonzero(diff)
val = diff[coord]
coord = map(tuple, np.transpose(coord)) # turn coord into (x,y) tuples
return list(zip(coord, val))
@staticmethod
def conversion_rates(coord):
"""Calculates the conversion rate for 1 degree of longitude to a variety
of measurements, returned in a dict.
Args:
coord: a tuple (longitude, latitude)
Returns:
Conversion rate for 1 degree of longitude to miles
"""
latitude = math.radians(coord[1])
dict = {}
latlen = m1 + ( m2 * math.cos(2 * latitude) + \
m3 * math.cos(4 * latitude) + \
m4 * math.cos(6 * latitude) \
)
longlen = (p1 * math.cos(1 * latitude)) + \
(p2 * math.cos(3 * latitude)) + \
(p3 * math.cos(5 * latitude))
dict['lat_meters'] = latlen
dict['lat_feet'] = latlen * 3.28083333
dict['lat_miles'] = dict['lat_feet'] / 5280
dict['long_meters'] = longlen
dict['long_feet'] = longlen * 3.28083333
|
SU-ECE-17-7/ibeis
|
ibeis/dev.py
|
Python
|
apache-2.0
| 32,707
| 0.003333
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""
DEV SCRIPT
This is a hacky script meant to be run mostly automatically with the option of
interactions.
dev.py is supposed to be a developer non-gui interface into the IBEIS software.
dev.py runs experiments and serves as a scratchpad for new code and quick scripts
TODO:
Test to find typical "good" descriptor scores. Find nearest neighbors and
noramlizers for each feature in a query image. Based on ground truth and
spatial verification mark feature matches as true or false. Visualize the
feature scores of good matches vs bad matches. Lowe shows the pdf of
correct matches and the PDF for incorrect matches. We should also show the
same thing.
Done:
Cache nearest neighbors so different parameters later in the pipeline dont
take freaking forever.
CommandLine:
python dev.py --wshow -t query --db PZ_MTEST --qaid 110 --cfg score_method:nsum prescore_method:nsum
python dev.py --wshow -t query --db PZ_MTEST --qaid 110
python dev.py --wshow -t query --db PZ_MTEST --qaid 110 --cfg fg_on=True
python dev.py --wshow -t query --db PZ_MTEST --qaid 110 --cfg
"""
# TODO: ADD COPYRIGHT TAG
from __future__ import absolute_import, division, print_function
import multiprocessing
import sys
#from ibeis._devscript import devcmd, DEVCMD_FUNCTIONS, DEVPRECMD_FUNCTIONS, DEVCMD_FUNCTIONS2, devcmd2
from ibeis._devscript import devcmd, DEVCMD_FUNCTIONS, DEVPRECMD_FUNCTIONS
import utool as ut
from utool.util_six import get_funcname
import utool
#from ibeis.algo.hots import smk
import plottool as pt
import ibeis
if __name__ == '__main__':
multiprocessing.freeze_support()
ibeis._preload()
#from ibeis.all_imports import * # NOQA
#utool.util_importer.dynamic_import(__name__, ('_devcmds_ibeis', None),
# developing=True)
from ibeis._devcmds_ibeis import * # NOQA
# IBEIS
from ibeis.init import main_helpers # NOQA
from ibeis.other import dbinfo # NOQA
from ibeis.expt import experiment_configs # NOQA
from ibeis.expt import harness # NOQA
from ibeis import params # NOQA
print, print_, printDBG, rrr, profile = utool.inject(__name__, '[dev]')
#------------------
# DEV DEVELOPMENT
#------------------
# This is where you write all of the functions that will become pristine
# and then go in _devcmds_ibeis.py
"""
./dev.py -e print_results --db PZ_Master1 -a varysize_pzm:dper_name=[1,2],dsize=1500 -t candidacy_k:K=1 --intersect_hack
./dev.py -e draw_rank_cdf -t baseline -a baseline --show --db PZ_Master1
./dev.py -e get_dbinfo --db PZ_Master1 --aid_list=baseline
./dev.py -e get_dbinfo --db PZ_MTEST
./dev.py -e get_dbinfo --db PZ_Master1 --aid_list=baseline --hackshow-unixtime --show
./dev.py -e get_dbinfo --db PZ_Master1 --hackshow-unixtime --show
"""
# Quick interface into specific registered doctests
REGISTERED_DOCTEST_EXPERIMENTS = [
('ibeis.expt.experiment_drawing', 'draw_case_timedeltas', ['timedelta_hist', 'timedelta_pie']),
('ibeis.expt.experiment_drawing', 'draw_match_cases', ['draw_cases', 'cases']),
('ibeis.expt.experiment_drawing', 'draw_casetag_hist', ['taghist']),
('ibeis.expt.old_storage', 'draw_results'),
('ibeis.expt.experiment_drawing', 'draw_rank_cdf', ['rank_cdf']),
('ibeis.other.dbinfo', 'get_dbinfo'),
('ibeis.other.dbinfo', 'latex_dbstats'),
('ibeis.other.dbinfo', 'show_image_time_distributions', ['db_time_hist']),
('ibeis.expt.experiment_drawing', 'draw_rank_surface', ['rank_surface']),
('ibeis.expt.experiment_helpers', 'get_annotcfg_list', ['print_acfg']),
('ibeis.expt.experiment_printres', 'print_results', ['printres', 'print']),
('ibeis.expt.experiment_printres', 'print_latexsum', ['latexsum']),
('ibeis.dbio.export_subset', 'export_annots'),
('ibeis.expt.experiment_drawing', 'draw_annot_scoresep', ['scores', 'scores_good', 'scores_all']),
]
def _exec_doctest_func(modname, funcname):
module = ut.import_modname(modname)
func = module.__dict__[funcname]
testsrc = ut.get_doctest_examples(func)[0][0]
exec(testsrc, globals(), locals())
def _register_doctest_precmds():
from functools import partial
for tup in REGISTERED_DOCTEST_EXPERIMENTS:
modname, funcname = tup[:2]
aliases = tup[2] if len(tup) == 3 else []
aliases += [funcname]
_doctest_func = partial(_exec_doctest_func, modname, funcname)
devprecmd(*aliases)(_doctest_func)
_register_doctest_precmds()
@devcmd('tune', 'autotune')
def tune_flann(ibs, qaid_list, daid_list=None):
r"""
CommandLine:
python dev.py -t tune --db PZ_MTEST
python dev.py -t tune --db GZ_ALL
python dev.py -t tune --db GIR_Tanya
python dev.py -t tune --db PZ_Master0
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis._devscript import * # NOQA
>>> # build test data
>>> # execute function
>>> result = func_wrapper()
>>> # verify results
>>> print(result)
"""
all_aids = ibs.get_valid_aids()
vecs = np.vstack(ibs.get_annot_vecs(all_aids))
print('Tunning flann for species={species}:'.format(species=ibs.get_database_species(all_aids)))
tuned_params = vt.tune_flann(vecs,
target_precision=.98,
build_weight=0.05,
memory_weight=0.00,
sample_fraction=0.1)
tuned_params
#tuned_params2 = vt.tune_flann(vecs,
# target_precision=.90,
# build_weight=0.001,
# memory_weight=0.00,
# sample_fraction=0.5)
#tuned_params2
@devcmd('incremental', 'inc')
def incremental_test(ibs, qaid_list, daid_list=None):
"""
Adds / queries new images one at a time to a clean test database.
Tests the complete system.
Args:
ibs (list) : IBEISController object
qaid_list (list) : list of annotation-ids to query
CommandLine:
python dev.py -t inc --db PZ_MTEST --qaid 1:30:3 --cmd
python dev.py --db PZ_MTEST --allgt --cmd
python dev.py --db PZ_MTEST --allgt -t inc
python dev.py -t inc --db PZ_MTEST --qaid 1:30:3 --cmd
python dev.py -t inc --db GZ_ALL --ninit 100 --noqcache
python dev.py -t inc --db PZ_MTEST --noqcache --interactive-after 40
python dev.py -
|
t inc --db PZ_Master0 --noqcache --interactive-after 10000 --ninit 400
Example:
>>> from ibeis.all_imports import * # NOQA
>>> ibs = ibeis.opendb('PZ_MTEST')
>>> qaid_list = ibs.get_valid_aids()
>>> daid_list = None
"""
from ibeis.algo.hots import automated_matcher
ibs1 = ibs
num_initial = ut.get_arg
|
val('--ninit', type_=int, default=0)
return automated_matcher.incremental_test(ibs1, num_initial)
@devcmd('inspect')
def inspect_matches(ibs, qaid_list, daid_list):
print('<inspect_matches>')
from ibeis.gui import inspect_gui
return inspect_gui.test_review_widget(ibs, qaid_list, daid_list)
def get_ibslist(ibs):
print('[dev] get_ibslist')
ibs_GV = ibs
ibs_RI = ibs.clone_handle(nogravity_hack=True)
ibs_RIW = ibs.clone_handle(nogravity_hack=True, gravity_weighting=True)
ibs_list = [ibs_GV, ibs_RI, ibs_RIW]
return ibs_list
@devcmd('gv_scores')
def compgrav_draw_score_sep(ibs, qaid_list, daid_list):
print('[dev] compgrav_draw_score_sep')
ibs_list = get_ibslist(ibs)
for ibs_ in ibs_list:
draw_annot_scoresep(ibs_, qaid_list)
#--------------------
# RUN DEV EXPERIMENTS
#--------------------
#def run_registered_precmd(precmd_name):
# # Very hacky way to run just a single registered precmd
# for (func_aliases, func) in DEVPRECMD_FUNCTIONS:
# for aliases in func_aliases:
# ret = precmd_name in input_precmd_list
# if ret:
# func()
def run_devprecmds():
"""
Looks for pre-tests specified with the -t flag and runs them
"""
#input_precmd_list = params.args.tests[:]
input_prec
|
gustaveroussy/98drivers
|
scripts/kart_racer.py
|
Python
|
mit
| 1,836
| 0.052288
|
#!/env/python3
import sys
import argparse
import os
import csv
import tabix
import gzip
import io
from collections import Counter
def chromosom_sizes(hg19_size_file):
''' Return chromosom size range ex: size["chr13"] = 234324 '''
results = {}
with open(hg19_size_file) as file:
reader = csv.reader(file, delimiter="\t")
for line in reader:
results[line[0]] = int(line[1])
return results
def kart_racer(sample, genom, base_speed = 0, deceleration = 1, acceleration = 1, allow_negative = False):
# get chromosom size
sizes = chromosom_sizes(genom)
# get tabix variant file
tabix_file = tabix.open(sample)
# current speed
speed = 0.0 + base_speed
# test on chromosom 17
chromosom = "chr17"
size =
|
sizes[c
|
hromosom]
# Loop over genoms
for pos in range(0, size):
# get how many mutation at one position
count = len([c for c in tabix_file.query(chromosom, pos, pos + 2)])
if count > 0 :
speed += count * acceleration
else:
if speed > 0:
speed -= deceleration
else:
speed = 0.0
print(chromosom, pos, pos +1, speed, sep="\t")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Compute speed of mutation ",
usage="kart_racer.py file.bed.gz -g hg19.sizes -a 1 -b 0.1 "
)
parser.add_argument("sample", type=str, help="tabix file")
parser.add_argument("-g","--genom", type=str, help="genom size ")
parser.add_argument("-b","--base_speed", type=int, default = 0)
parser.add_argument("-d","--deceleration", type=float, default = 0.01, help="decrease speed by x each empty base")
parser.add_argument("-a","--acceleration", type=float, default = 1, help="accelerate by 1 each variant")
args = parser.parse_args()
kart_racer(args.sample, args.genom, args.base_speed , args.deceleration , args.acceleration, False )
|
tchakravarty/PythonExamples
|
Code/kirk2015/chapter3/bivariate_normal.py
|
Python
|
apache-2.0
| 582
| 0.008591
|
#========================
|
======================================================
# purpose: bivariate normal distribution simulation using PyMC
# author: tirthankar chakravarty
# created: 1/7/15
# revised:
# comm
|
ents:
# 1. install PyMC
# 2. not clear on why we are helping the sampler along. We want to sample from the
# bivariate
#==============================================================================
import random
import numpy as np
import matplotlib.pyplot as mpl
sample_size = 5e5
rhp = 0.9
mean = [10, 20]
std_dev = [1, 1]
biv_random = np.zeros([sample_size, 2])
|
jddixon/pysloc
|
tests/test_haskell_comments.py
|
Python
|
mit
| 1,084
| 0
|
#!/usr/bin/env python3
# testHaskellComments.py
""" Test line counter for the Haskell programmig language. """
import
|
unittest
from argparse import Namespace
from pysloc import count_lines_double_dash, MapHolder
class TestHaskellComments(unittest.TestCase):
""" Test line counter for the Haskell programmig language. """
def setUp(self):
pass
def t
|
earDown(self):
pass
# utility functions #############################################
# actual unit tests #############################################
def test_name_to_func_map(self):
"""
Verify line counts returned from known Haskell file are correct.
"""
test_file = 'tests/commentsForHaskell'
options = Namespace()
options.already = set()
options.ex_re = None
options.map_holder = MapHolder()
options.verbose = False
lines, sloc = count_lines_double_dash(test_file, options, 'occ')
self.assertEqual(lines, 27)
self.assertEqual(sloc, 10)
if __name__ == '__main__':
unittest.main()
|
neuroo/equip
|
equip/analysis/dataflow/lattice.py
|
Python
|
apache-2.0
| 1,624
| 0.011084
|
# -*- coding: utf-8 -*-
"""
equip.analysis.dataflow.lattice
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The base lattice implementation (mostly used as semi-lattice).
:copyright: (c) 2014 by Romain Gaucher (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
class Lattice(object):
"""
Interface for a lattice element. Practically, we only use the semi-lattice
with the join (V) operator.
"""
def __init__(self):
pass
def init_state(self):
"""
Returns a new initial state.
"""
pass
def join_all(self, *states):
result_state = None
for state in states:
if result_state is None:
result_state = state
else:
result_state = self.join(result_state, state)
return result_state
def join(self, state1, state2):
"""
Returns the result of the V (supremum) between the two states.
"""
pass
def meet_all(self, *stat
|
es):
result_state = None
for state in states:
if result_state is None:
result_state = state
else:
result_state = self.meet(result_state, state)
return result_state
|
def meet(self, state1, state2):
"""
Returns the result of the meet \/ (infimum) between the two states.
"""
pass
def lte(self, state1, state2):
"""
This is the <= operator between two lattice elements (states) as defined by:
state1 <= state2 and state2 <= state1 <=> state1 == state2
"""
pass
def top(self):
"""
The top of the lattice.
"""
pass
def bottom(self):
"""
The bottom of the lattice.
"""
pass
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/google/protobuf/descriptor_pb2.py
|
Python
|
gpl-3.0
| 63
| 0.015873
|
../
|
../../../../share/pyshared/google/protobuf/descript
|
or_pb2.py
|
jondelmil/brainiac
|
config/settings/local.py
|
Python
|
bsd-3-clause
| 1,961
| 0.00051
|
# -*- coding: utf-8 -*-
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='CHANGEME!!!f%--!hz6th_2^07!%1alq(_z+az5-siy#+l!)!mcc$zjkae=s0')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_BACKEND = env('
|
DJANGO_E
|
MAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
|
asedunov/intellij-community
|
python/testData/completion/epydocTagsMiddle.after.py
|
Python
|
apache-2.0
| 32
| 0.03125
|
def foo
|
(bar):
""" @param ""
|
"
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/surface/hoverlabel/font/_sizesrc.py
|
Python
|
mit
| 423
| 0.002364
|
import _plotly_utils.basevalidators
class SizesrcValidator(_plotly_utils.basevalidators
|
.SrcValidator):
def __init__(
self, plotly_name="sizesrc", parent_name="surfa
|
ce.hoverlabel.font", **kwargs
):
super(SizesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
DevinDewitt/pyqt5
|
examples/quick/models/objectlistmodel/objectlistmodel.py
|
Python
|
gpl-3.0
| 3,424
| 0.00847
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Digia Plc and its Subsidiary(-ies) nor the names
## of its contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUC
|
H DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import pyqtProperty, pyqtSignal, QObject, QUrl
from PyQt5.QtGui import QGuiApplication
from PyQt5.QtQuick import QQuickView
import objec
|
tlistmodel_rc
class DataObject(QObject):
nameChanged = pyqtSignal()
@pyqtProperty(str, notify=nameChanged)
def name(self):
return self._name
@name.setter
def name(self, name):
if self._name != name:
self._name = name
self.nameChanged.emit()
colorChanged = pyqtSignal()
@pyqtProperty(str, notify=colorChanged)
def color(self):
return self._color
@color.setter
def color(self, color):
if self._color != color:
self._color = color
self.colorChanged.emit()
def __init__(self, name='', color='', parent=None):
super(DataObject, self).__init__(parent)
self._name = name
self._color = color
if __name__ == '__main__':
import sys
app = QGuiApplication(sys.argv)
dataList = [DataObject("Item 1", 'red'),
DataObject("Item 2", 'green'),
DataObject("Item 3", 'blue'),
DataObject("Item 4", 'yellow')]
view = QQuickView()
view.setResizeMode(QQuickView.SizeRootObjectToView)
ctxt = view.rootContext()
ctxt.setContextProperty('myModel', dataList)
view.setSource(QUrl('qrc:view.qml'))
view.show()
sys.exit(app.exec_())
|
sean-/ansible
|
lib/ansible/utils/display.py
|
Python
|
gpl-3.0
| 6,949
| 0.003022
|
# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNE
|
SS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# FIXME: copied mostly from old code, needs py3 improvements
from __future_
|
_ import (absolute_import, division, print_function)
__metaclass__ = type
import textwrap
import os
import random
import subprocess
import sys
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.utils.color import stringc
class Display:
def __init__(self, verbosity=0):
self.verbosity = verbosity
# list of all deprecation messages to prevent duplicate display
self._deprecations = {}
self._warns = {}
self._errors = {}
self.cowsay = None
self.noncow = os.getenv("ANSIBLE_COW_SELECTION",None)
self.set_cowsay_info()
def set_cowsay_info(self):
if not C.ANSIBLE_NOCOWS:
if os.path.exists("/usr/bin/cowsay"):
self.cowsay = "/usr/bin/cowsay"
elif os.path.exists("/usr/games/cowsay"):
self.cowsay = "/usr/games/cowsay"
elif os.path.exists("/usr/local/bin/cowsay"):
# BSD path for cowsay
self.cowsay = "/usr/local/bin/cowsay"
elif os.path.exists("/opt/local/bin/cowsay"):
# MacPorts path for cowsay
self.cowsay = "/opt/local/bin/cowsay"
if self.cowsay and self.noncow == 'random':
cmd = subprocess.Popen([self.cowsay, "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
cows = out.split()
cows.append(False)
self.noncow = random.choice(cows)
def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False):
msg2 = msg
if color:
msg2 = stringc(msg, color)
if not log_only:
if not stderr:
try:
print(msg2)
except UnicodeEncodeError:
print(msg2.encode('utf-8'))
else:
try:
print(msg2, file=sys.stderr)
except UnicodeEncodeError:
print(msg2.encode('utf-8'), file=sys.stderr)
if C.DEFAULT_LOG_PATH != '':
while msg.startswith("\n"):
msg = msg.replace("\n","")
# FIXME: logger stuff needs to be implemented
#if not screen_only:
# if color == 'red':
# logger.error(msg)
# else:
# logger.info(msg)
def vv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=1)
def vvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=2)
def vvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=3)
def vvvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=4)
def vvvvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=5)
def verbose(self, msg, host=None, caplevel=2):
# FIXME: this needs to be implemented
#msg = utils.sanitize_output(msg)
if self.verbosity > caplevel:
if host is None:
self.display(msg, color='blue')
else:
self.display("<%s> %s" % (host, msg), color='blue', screen_only=True)
def deprecated(self, msg, version, removed=False):
''' used to print out a deprecation message.'''
if not removed and not C.DEPRECATION_WARNINGS:
return
if not removed:
if version:
new_msg = "\n[DEPRECATION WARNING]: %s. This feature will be removed in version %s." % (msg, version)
else:
new_msg = "\n[DEPRECATION WARNING]: %s. This feature will be removed in a future release." % (msg)
new_msg = new_msg + " Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.\n\n"
else:
raise AnsibleError("[DEPRECATED]: %s. Please update your playbooks." % msg)
wrapped = textwrap.wrap(new_msg, 79)
new_msg = "\n".join(wrapped) + "\n"
if new_msg not in self._deprecations:
self.display(new_msg, color='purple', stderr=True)
self._deprecations[new_msg] = 1
def warning(self, msg):
new_msg = "\n[WARNING]: %s" % msg
wrapped = textwrap.wrap(new_msg, 79)
new_msg = "\n".join(wrapped) + "\n"
if new_msg not in self._warns:
self.display(new_msg, color='bright purple', stderr=True)
self._warns[new_msg] = 1
def system_warning(self, msg):
if C.SYSTEM_WARNINGS:
self.warning(msg)
def banner(self, msg, color=None):
'''
Prints a header-looking line with stars taking up to 80 columns
of width (3 columns, minimum)
'''
if self.cowsay:
try:
self.banner_cowsay(msg)
return
except OSError:
# somebody cleverly deleted cowsay or something during the PB run. heh.
pass
msg = msg.strip()
star_len = (80 - len(msg))
if star_len < 0:
star_len = 3
stars = "*" * star_len
self.display("\n%s %s" % (msg, stars), color=color)
def banner_cowsay(self, msg, color=None):
if ": [" in msg:
msg = msg.replace("[","")
if msg.endswith("]"):
msg = msg[:-1]
runcmd = [self.cowsay,"-W", "60"]
if self.noncow:
runcmd.append('-f')
runcmd.append(self.noncow)
runcmd.append(msg)
cmd = subprocess.Popen(runcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
self.display("%s\n" % out, color=color)
def error(self, msg, wrap_text=True):
if wrap_text:
new_msg = "\n[ERROR]: %s" % msg
wrapped = textwrap.wrap(new_msg, 79)
new_msg = "\n".join(wrapped) + "\n"
else:
new_msg = msg
if new_msg not in self._errors:
self.display(new_msg, color='red', stderr=True)
self._errors[new_msg] = 1
|
emmanuelle/scikits.image
|
skimage/graph/setup.py
|
Python
|
bsd-3-clause
| 1,429
| 0.0007
|
#!/usr/bin/env python
from skimage._build import cython
import
|
os.path
base_path = os.path.abspath(os.path.dirname(__file__))
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configurat
|
ion, get_numpy_include_dirs
config = Configuration('graph', parent_package, top_path)
config.add_data_dir('tests')
# This function tries to create C files from the given .pyx files. If
# it fails, try to build with pre-generated .c files.
cython(['_spath.pyx'], working_path=base_path)
cython(['_mcp.pyx'], working_path=base_path)
cython(['heap.pyx'], working_path=base_path)
config.add_extension('_spath', sources=['_spath.c'],
include_dirs=[get_numpy_include_dirs()])
config.add_extension('_mcp', sources=['_mcp.c'],
include_dirs=[get_numpy_include_dirs()])
config.add_extension('heap', sources=['heap.c'],
include_dirs=[get_numpy_include_dirs()])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(maintainer='scikits-image Developers',
maintainer_email='scikits-image@googlegroups.com',
description='Graph-based Image-processing Algorithms',
url='https://github.com/scikits-image/scikits-image',
license='Modified BSD',
**(configuration(top_path='').todict())
)
|
electrumalt/electrum-doge
|
scripts/merchant/merchant.py
|
Python
|
gpl-3.0
| 9,336
| 0.008462
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time, sys, socket, os
import threading
import urllib2
import json
import Queue
import sqlite3
import electrum_doge as electrum
electrum.set_verbosity(False)
import ConfigParser
config = ConfigParser.ConfigParser()
config.read("merchant.conf")
my_password = config.get('main','password')
my_host = config.get('main','host')
my_port = config.getint('main','port')
database = config.get('sqlite3','database')
received_url = config.get('callback','received')
expired_url = config.get('callback','expired')
cb_password = config.get('callback','password')
wallet_path = config.get('electrum','wallet_path')
xpub = config.get('electrum','xpub')
pending_requests = {}
num = 0
def check_create_table(conn):
global num
c = conn.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='electrum_payments';")
data = c.fetchall()
if not data:
c.execute("""CREATE TABLE electrum_payments (address VARCHAR(40), amount FLOAT, confirmations INT(8), received_at TIMESTAMP, expires_at TIMESTAMP, paid INT(1), processed INT(1));""")
conn.commit()
c.execute("SELECT Count(address) FROM 'electrum_payments'")
num = c.fetchone()[0]
print "num rows", num
def row_to_dict(x):
return {
'id':x[0],
'address':x[1],
'amount':x[2],
'confirmations':x[3],
'received_at':x[4],
'expires_at':x[5],
'paid':x[6],
'processed':x[7]
}
# this process detects when addresses have received payments
def on_wallet_update():
for addr, v in pending_requests.items():
h = wallet.history.get(addr, [])
requested_amount = v.get('requested')
requested_confs = v.get('confirmations')
value = 0
for tx_hash, tx_height in h:
tx = wallet.transactions.get(tx_hash)
if not tx: continue
if wallet.verifier.get_confirmations(tx_hash) < requested_confs: continue
for o in tx.outputs:
o_address, o_value = o
if o_address == addr:
value += o_value
s = (value)/1.e8
print "balance for %s:"%addr, s, requested_amount
if s>= requested_amount:
print "payment accepted", addr
out_queue.put( ('payment', addr))
stopping = False
def do_stop(password):
global stopping
if password != my_password:
return "wrong password"
stopping = True
return "ok"
def process_request(amount, confirmations, expires_in, password):
global num
if password != my_password:
return "wrong password"
try:
amount = float(amount)
confirmations = int(confirmations)
expires_in = float(expires_in)
except Exception:
return "incorrect parameters"
account = wallet.default_account()
addr = account.get_address(0, num)
num += 1
out_queue.put( ('request', (addr, amount, confirmations, expires_in) ))
return addr
def do_dump(password):
if password != my_password:
return "wrong password"
conn = sqlite3.connect(database);
cur = conn.cursor()
# read pending requests from table
cur.execute("SELECT oid, * FROM electrum_payments;")
data = cur.fetchall()
return map(row_to_dict, data)
def getrequest(oid, password):
oid = int(oid)
conn = sqlite3.connect(database);
cur = conn.cursor()
# read pending requests from table
cur.execute("SELECT oid, * FROM electrum_payments WHERE oid=%d;"%(oid))
data = cur.fetchone()
return row_to_dict(data)
def send_command(cmd, params):
import jsonrpclib
server = jsonrpclib.Server('http://%s:%d'%(my_host, my_port))
try:
f = getattr(server, cmd)
except socket.error:
print "Server not running"
return 1
try:
out = f(*params)
except socket.error:
print "Server not running"
return 1
print json.dumps(out, indent=4)
return 0
def db_thread():
conn = sqlite3.connect(database);
# create tab
|
le if needed
check_create_table(conn)
while
|
not stopping:
cur = conn.cursor()
# read pending requests from table
cur.execute("SELECT address, amount, confirmations FROM electrum_payments WHERE paid IS NULL;")
data = cur.fetchall()
# add pending requests to the wallet
for item in data:
addr, amount, confirmations = item
if addr in pending_requests:
continue
else:
with wallet.lock:
print "subscribing to %s"%addr
pending_requests[addr] = {'requested':float(amount), 'confirmations':int(confirmations)}
wallet.synchronizer.subscribe_to_addresses([addr])
wallet.up_to_date = False
try:
cmd, params = out_queue.get(True, 10)
except Queue.Empty:
cmd = ''
if cmd == 'payment':
addr = params
# set paid=1 for received payments
print "received payment from", addr
cur.execute("update electrum_payments set paid=1 where address='%s'"%addr)
elif cmd == 'request':
# add a new request to the table.
addr, amount, confs, minutes = params
sql = "INSERT INTO electrum_payments (address, amount, confirmations, received_at, expires_at, paid, processed)"\
+ " VALUES ('%s', %f, %d, datetime('now'), datetime('now', '+%d Minutes'), NULL, NULL);"%(addr, amount, confs, minutes)
print sql
cur.execute(sql)
# set paid=0 for expired requests
cur.execute("""UPDATE electrum_payments set paid=0 WHERE expires_at < CURRENT_TIMESTAMP and paid is NULL;""")
# do callback for addresses that received payment or expired
cur.execute("""SELECT oid, address, paid from electrum_payments WHERE paid is not NULL and processed is NULL;""")
data = cur.fetchall()
for item in data:
oid, address, paid = item
paid = bool(paid)
headers = {'content-type':'application/json'}
data_json = { 'address':address, 'password':cb_password, 'paid':paid }
data_json = json.dumps(data_json)
url = received_url if paid else expired_url
if not url:
continue
req = urllib2.Request(url, data_json, headers)
try:
response_stream = urllib2.urlopen(req)
print 'Got Response for %s' % address
cur.execute("UPDATE electrum_payments SET processed=1 WHERE oid=%d;"%(oid))
except urllib2.HTTPError:
print "cannot do callback", data_json
except ValueError, e:
print e
print "cannot do callback", data_json
conn.commit()
conn.close()
print "database closed"
if __name__ == '__main__':
if len(sys.argv) > 1:
cmd = sys.argv[1]
params = sys.argv[2:] + [my_password]
ret = send_command(cmd, params)
sys.exit(ret)
# start network
c = electrum.SimpleConfig({'wallet_path':wallet_path})
daemon_socket = electrum.daemon.get_daemon(c,True)
network = electrum.NetworkProxy(daemon_socket,config)
network.start()
# wait until connected
while network.is_connecting():
|
srikary/sous-chef
|
modules/robotic_arm.py
|
Python
|
gpl-2.0
| 7,751
| 0.011869
|
from drivers.servo_driver import Servo
import submodules.stepper_axis as stepper_axis
from math import atan, degrees
import time
class RoboticArm:
# Dimensions of the Arm
vertical_offset_mm = 50
vertical_arm_mm = 100
horizontal_arm_mm = 100
level_arm_len= 20
claw_offset_to_center = 10
small_cup_positions = [(100, 0, 30, 30, 30),
(100, 0, 30, 30, 30),
(100, 0, 30, 30, 30),
(100, 0, 30, 30, 30),
(100, 0, 30, 30, 30),
(100, 0, 30, 30, 30)]
large_cup_positions = [(100, 0, 30, 30, 30),
(100, 0, 30, 30, 30),
(100, 0, 30, 30, 30),
(100, 0, 30, 30, 30),
(100, 0, 30, 30, 30),
(100, 0, 30, 30, 30)]
# Positions of the cooking utensils by size.
# Increasing index of size implies increasing diameter.
utensil_positions_size = [(100, 180, 30, 30, 30),
(100, 180, 30, 30, 30),
(100, 180, 30, 30, 30)]
# Positions of the
|
components when the arm is at base
base_pos = (100, 0, 30, 30, 30)
# Dimensions of the Rail
max_rail_translation_mm = 520
def __init__(self, rail_dir_pin, rail_step_pin, rail_enable_pin,
base_servo_c
|
hannel,
vertical_servo_channel,
horizontal_servo_channel,
level_servo_channel,
tipping_servo_channel,
grasp_servo_channel):
self.base_servo = Servo(base_servo_channel)
self.vertical_servo = Servo(vertical_servo_channel)
self.horizontal_servo = Servo(horizontal_servo_channel)
self.level_servo = Servo(level_servo_channel)
self.tipping_servo = Servo(tipping_servo_channel)
self.grasp_servo = Servo(grasp_servo_channel)
self.rail = stepper_axis.StepperAxis(rail_dir_pin, rail_step_pin, rail_enable_pin,
RoboticArm.max_rail_translation_mm,
inc_clockwise=False)
self.move_to_base()
# Begin unfinished methods
def move_rail_rotate_base(self, dest_pos):
self.at_base = False
self.rail.move_to(max(0, min(RoboticArm.max_rail_translation_mm, dest_pos[1])))
curr_y_pos = self.rail.get_curr_pos_mm()
# Need to rotate base servo to point from (0,curr_pos[1]) to (dest_pos[0],dest_pos[1])
# degrees to rotate clockwise with 0 pointing along x axis towards containers
degrees = degrees(atan(float(curr_y_pos - dest_pos[1])
/float(0 - dest_pos[0])))
if (degrees < 0 or degrees > 180):
raise ValueError("Invalid rotation angle:" +
str(degrees) + " degrees. Current position: "
+ curr_y_pos + "Dest position: " + dest_pos)
self.base_servo.move_to(degrees)
def get_corrected_vertical_angle(vertical_angle):
vertical_servo_offset = 15
vertical_angle_corrected = vertical_angle - vertical_servo_offset
if vertical_angle_corrected < 0:
vertical_angle_corrected = vertical_angle_corrected - vertical_servo_offset
return vertical_angle_corrected
# All angles are in degrees.
def get_claw_x_from_servo_pos(base_angle, vertical_angle, horizontal_angle):
""" Gets the position of the claw from the positions of the base, vertical
and horizontal servos. """
vertical_angle_corrected = get_corrected_vertical_angle(vertical_angle)
def get_claw_y_from_servo_pos(base_angle, vertical_angle, horizontal_angle):
None
# End of unfinished methods
def claw_grasp(self):
self.grasp_servo.move_to(10)
def claw_release(self):
self.grasp_servo.move_to(55)
def pour(self):
""" Pours the container assuming that it is positioned over the vessel and
waits for a few seconds before straightening it up. """
self.tipping_servo.move_to(180)
# Wait a few seconds for the contents to drain out.
time.sleep(5)
self.straighten_tipping_servo()
def straighten_tipping_servo(self):
self.tipping_servo.move_to(0)
# Rail stepper and base servo have been adjusted as necessary. This function
# Adjusts the three servos Vertical, Horizontal and Level servos to get the
# required Z and X from to_pos.
def execute_move_claw_xz(self, to_pos):
from_pos = (self.vertical_servo.get_current_pos(),
self.horizontal_servo.get_current_pos(),
self.level_servo.get_current_pos())
pos_delta = (to_pos[2] - from_pos[0],
to_pos[3] - from_pos[1],
to_pos[4] - from_pos[2])
max_delta = max(abs(pos_delta[0]), abs(pos_delta[1]), abs(pos_delta[2]))
for i in range(1, int(max_delta + 1)):
# Update each servo proportionally
update = [(float(i)/max_delta) * x for x in pos_delta]
new_pos = [int(update[j] + from_pos[j]) for j in range(0, len(from_pos))]
self.vertical_servo.move_to(new_pos[0])
self.horizontal_servo.move_to(new_pos[1])
self.level_servo.move_to(new_pos[2])
self.vertical_servo.move_to(to_pos[0])
self.horizontal_servo.move_to(to_pos[1])
self.level_servo.move_to(to_pos[2])
def move_to_cup(self, is_small_cup, cup_num):
if is_small_cup:
if cup_num < 0 or cup_num > 6:
raise ValueError("Invalid small cup number:" + cup_num)
else:
if cup_num < 0 or cup_num > 5:
raise ValueError("Invalid large cup number:" + cup_num)
# Positions of the various servos.
if is_small_cup:
pos_for_cup = RoboticArm.small_cup_positions[cup_num]
else:
pos_for_cup = RoboticArm.large_cup_positions[cup_num]
# Move the arm away from stirrer before moving rail.
self.base_servo.move_to(pos_for_cup[1])
# Arm out of collision path with stirrer. So, we can move the rail.
self.rail.move_to(pos_for_cup[0])
# Position claw around the cup.
self.execute_move_claw_xz(pos_for_cup)
self.at_base = False
None
def move_to_utensil(self, utensil_size):
if utensil_size < 0 or utensil_size > 2:
raise ValueError("Invalid utensil size:" + utensil_size)
# Positions of the various servos.
desired_servo_pos = RoboticArm.utensil_positions_size[utensil_size]
self.execute_move_claw_xz(desired_servo_pos)
self.rail.move_to(desired_servo_pos[0])
self.base_servo.move_to(desired_servo_pos[1])
self.at_base = False
def move_to_base(self):
self.execute_move_claw_xz(RoboticArm.base_pos)
self.base_servo.move_to(RoboticArm.base_pos[1])
self.rail.move_to(RoboticArm.base_pos[0])
self.rail.disable()
self.at_base = True
None
def is_at_base(self):
return self.at_base
# API method exposed by the RoboticArm
def add_cup(self, is_small_cup, cup_num, utensil_size):
# Init
self.straighten_tipping_servo()
self.claw_release()
# Move and position around cup and grasp it
self.move_to_cup(is_small_cup, cup_num)
self.claw_grasp()
self.move_to_utensil(utensil_size)
# Pours and straightens.
self.pour()
self.move_to_cup(is_small_cup, cup_num)
self.claw_release()
self.move_to_base()
self.rail.disable()
def shutdown(self):
self.move_to_base()
if (__name__ == "__main__"):
arm = RoboticArm(6, 5,
0, # Base
1, # Vertical
2, # Horizontal
3, # Level/Tilt
4, # Tipping
5) # Claw
#arm.move_to_base()
#arm.add_cup(True, 1, 0)
#arm.add_cup(True, 2, 0)
#arm.add_cup(True, 3, 0)
#arm.add_cup(True, 4, 0)
#arm.add_cup(True, 5, 0)
|
sippy/b2bua
|
sippy/SipURL.py
|
Python
|
bsd-2-clause
| 12,172
| 0.011009
|
# Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.
# Copyright (c) 2006-2014 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from sippy.SipConf import SipConf
try:
from urllib import quote, unquote
except ImportError:
from urllib.parse import quote, unquote
RFC3261_USER_UNRESERVED = '&=+$,;?/#'
# Quote from RFC-3261:
# Several rules are incorporated from RFC 2396 [5] but are updated to
# make them compliant with RFC 2234
RFC3261_MARK = '-_.!~*\'()'
USERNAME_SAFE = RFC3261_USER_UNRESERVED + RFC3261_MARK
class SipURL(object):
scheme = None
username = None
userparams = None
password = None
host = None
port = None
headers = None
usertype = None
transport = None
ttl = None
maddr = None
method = None
tag = None
other = None
lr = False
def __init__(self, url = None, username = None, password = None, host = None, port = None, headers = None, \
usertype = None, transport = None, ttl = None, maddr = None, method = None, tag = None, other = None, \
userparams = None, lr = False, relaxedparser = False, scheme = "sip"):
self.original_uri = url
self.other = []
self.userparams = []
if url == None:
self.scheme = scheme
self.username = username
if userparams != None:
self.userparams = userparams
self.password = password
if host == None:
self.host = SipConf.my_address
self.port = SipConf.my_port
else:
self.host = host
self.port = port
self.headers = headers
self.usertype = usertype
self.transport = transport
self.ttl = ttl
self.maddr = maddr
self.method = method
self.tag = tag
if other != None:
self.other = other
self.lr = lr
return
parts = url.split(':', 1)
if len(parts) < 2:
# scheme is missing, assume sip:
parts.insert(0, 'sip')
parts[0] = parts[0].lower()
if parts[0] not in ('sip', 'sips', 'tel'):
raise ValueError('unsupported scheme: %s:' % parts[0])
self.scheme, url = parts
if self.scheme == 'tel':
if SipConf.autoconvert_tel_url:
self.convertTelURL(url, relaxedparser)
else:
raise ValueError('tel: scheme is not supported')
else:
self.parseSipURL(url, relaxedparser)
def convertTelURL(self, url, relaxedparser):
self.scheme = 'sip'
if relaxedparser:
self.host = ''
else:
self.host = SipConf.my_address
self.port = SipConf.my_port
parts = url.split(';')
self.username = unquote(parts[0])
if len(parts) > 1:
# parse userparams
self.userparams = []
for part in parts[1:]:
# The RFC-3261 suggests the user parameter keys should
# be converted to lower case.
k, v = part.split('=')
self.userparams.append(k.lower() + '=' + v)
def parseSipURL(self, url, relaxedparser):
ear = url.find('@') + 1
parts = url[ear:].split(';')
userdomain, params = url[0:ear] + parts[0], parts[1:]
if len(params) == 0 and '?' in userdomain[ear:]:
self.headers = {}
userdomain_suff, headers = userdomain[ear:].split('?', 1)
userdomain = userdomain[:ear] + userdomain_suff
for header in headers.split('&'):
k, v = header.split('=')
self.headers[k] = unquote(v)
if ear > 0:
userpass = userdomain[:ear - 1]
hostport = userdomain[ear:]
upparts = userpass.split(':', 1)
if len(upparts) > 1:
self.password = upparts[1]
uparts = upparts[0].split(';')
if len(uparts) > 1:
self.userparams = uparts[1:]
self.username = unquote(uparts[0])
else:
hostport = userdomain
parseport = None
if relaxedparser and len(hostport) == 0:
|
se
|
lf.host = ''
elif hostport[0] == '[':
# IPv6 host
hpparts = hostport.split(']', 1)
self.host = hpparts[0] + ']'
if len(hpparts[1]) > 0:
hpparts = hpparts[1].split(':', 1)
if len(hpparts) > 1:
parseport = hpparts[1]
else:
# IPv4 host
hpparts = hostport.split(':', 1)
if len(hpparts) == 1:
self.host = hpparts[0]
else:
self.host = hpparts[0]
parseport = hpparts[1]
if parseport != None:
try:
self.port = int(parseport)
except Exception as e:
# Can't parse port number, check why
port = parseport.strip()
if len(port) == 0:
# Bug on the other side, work around it
print('WARNING: non-compliant URI detected, empty port number, ' \
'assuming default: "%s"' % str(self.original_uri))
elif port.find(':') > 0:
pparts = port.split(':', 1)
if pparts[0] == pparts[1]:
# Bug on the other side, work around it
print('WARNING: non-compliant URI detected, duplicate port number, ' \
'taking "%s": %s' % (pparts[0], str(self.original_uri)))
self.port = int(pparts[0])
else:
raise e
else:
raise e
if len(params) > 0:
last_param = params[-1]
arr = last_param.split('?', 1)
params[-1] = arr[0]
self.setParams(params)
if len(arr) == 2:
self.headers = {}
for header in arr[1].split('&'):
k, v = header.split('=')
self.headers[k] = unquote(v)
def setParams(self, params):
self.usertype = None
self.transport = None
self.ttl = None
self.maddr = None
self.method = None
self.tag = None
self.other = []
self.lr = False
for p in params:
nv = p.split('=', 1)
if len(nv) == 1:
if p == 'lr':
self.lr = True
else:
self.other.append(p)
continue
name, value = nv
if name == 'user':
self.usertype = value
elif name == 'transport':
self.transport =
|
Teekuningas/mne-python
|
mne/io/fieldtrip/fieldtrip.py
|
Python
|
bsd-3-clause
| 6,508
| 0
|
# -*- coding: UTF-8 -*-
# Authors: Thomas Hartmann <thomas.hartmann@th-ht.de>
# Dirk Gütlin <dirk.guetlin@stud.sbg.ac.at>
#
# License: BSD (3-clause)
import numpy as np
from .utils import _create_info, _set_tmin, _create_events, \
_create_event_metadata, _validate_ft_struct
from .. import RawArray
from ...epochs import EpochsArray
from ...evoked import EvokedArray
def read_raw_fieldtrip(fname, info, data_name='data'):
"""Load continuous (raw) data from a FieldTrip preprocessing structure.
This function expects to find single trial raw data (FT_DATATYPE_RAW) in
the structure data_name is pointing at.
.. warning:: FieldTrip does not normally store the original information
concerning channel location, orientation, type etc. It is
therefore **highly recommended** to provide the info field.
This can be obtained by reading the original raw data file
with MNE functions (without preload). The returned object
contains the necessary info field.
Parameters
----------
fname : str
Path and filename of the .mat file containing the data.
info : dict or None
The info dict of the raw data file corresponding to the data to import.
If this is set to None, limited information is extracted from the
FieldTrip structure.
data_name : str
Name of heading dict/ variable name under which the data was originally
saved in MATLAB.
Returns
-------
raw : instance of RawArray
A Raw Object containing the loaded data.
"""
from ...externals.pymatreader import read_mat
ft_struct = read_mat(fname,
ignore_fields=['previous'],
variable_names=[data_name])
# load data and set ft_struct to the heading dictionary
ft_struct = ft_struct[data_name]
_validate_ft_struct(ft_struct)
info = _create_info(ft_struct, info) # create info structure
data = np.array(ft_struct['trial']) # create the main data array
if data.ndim > 2:
data = np.squeeze(data)
if data.ndim == 1:
data = data[np.newaxis, ...]
if data.ndim != 2:
raise RuntimeError('The data you are trying to load does not seem to '
'be raw data')
raw = RawArray(data, info) # create an MNE RawArray
return raw
def read_epochs_fieldtrip(fname, info, data_name='data',
trialinfo_column=0):
"""Load epoched data from a FieldTrip preprocessing structure.
This function expects to find epoched data in the structure data_name is
pointing at.
.. warning:: Only epochs with the same amount of channels and samples are
supported!
.. warning:: FieldTrip does not normally store the original information
concerning channel location, orientation, type etc. It is
therefore **highly recommended** to provide the info field.
This can be obtained by reading the original raw data file
with MNE functions (without preload). The returned object
contains the necessary info field.
Parameters
----------
fname : str
Path and filename of the .mat file containing the data.
info : dict or None
The info dict of the raw data file corresponding to the data to import.
If this is set to None, limited information is extracted from the
FieldTrip structure.
data_name : str
Name of heading dict/ variable name under which the data was originally
saved in MATLAB.
trialinfo_column : int
Column of the trialinfo matrix to use for the event codes.
Returns
-------
epochs : instance of EpochsArray
An EpochsArray containing the loaded data.
"""
from ...externals.pymatreader import read_mat
ft_struct = read_mat(fname,
ignore_fields=['previous'],
variable_names=[data_name])
# load data and set ft_struct to the heading dictionary
ft_struct = ft_struct[data_name]
_validate_ft_struct(ft_struct)
info = _create_info(ft_struct, info) # create info structure
data = np.array(ft_struct['trial']) # create the epoch
|
s data array
events = _create_events(ft_struct, trialinfo_column)
if events is not None:
metadata = _create_event_metadata(ft_struct)
else:
metadata = None
tmin = _set_tmin(ft_struct) # create start time
epochs = EpochsArray(data=data, info=info, tmin=tmin,
|
events=events, metadata=metadata, proj=False)
return epochs
def read_evoked_fieldtrip(fname, info, comment=None,
data_name='data'):
"""Load evoked data from a FieldTrip timelocked structure.
This function expects to find timelocked data in the structure data_name is
pointing at.
.. warning:: FieldTrip does not normally store the original information
concerning channel location, orientation, type etc. It is
therefore **highly recommended** to provide the info field.
This can be obtained by reading the original raw data file
with MNE functions (without preload). The returned object
contains the necessary info field.
Parameters
----------
fname : str
Path and filename of the .mat file containing the data.
info : dict or None
The info dict of the raw data file corresponding to the data to import.
If this is set to None, limited information is extracted from the
FieldTrip structure.
comment : str
Comment on dataset. Can be the condition.
data_name : str
Name of heading dict/ variable name under which the data was originally
saved in MATLAB.
Returns
-------
evoked : instance of EvokedArray
An EvokedArray containing the loaded data.
"""
from ...externals.pymatreader import read_mat
ft_struct = read_mat(fname,
ignore_fields=['previous'],
variable_names=[data_name])
ft_struct = ft_struct[data_name]
_validate_ft_struct(ft_struct)
info = _create_info(ft_struct, info) # create info structure
data_evoked = ft_struct['avg'] # create evoked data
evoked = EvokedArray(data_evoked, info, comment=comment)
return evoked
|
abirafdirp/inventory
|
inventory/__init__.py
|
Python
|
bsd-3-clause
| 24
| 0
|
__author__ = 'abirafdi'
| ||
gruel/AphorismToTEI
|
tests/test_aphorism_to_xml.py
|
Python
|
bsd-3-clause
| 5,663
| 0.000883
|
import os
import sys
import pytest
from .conftest import Process, AphorismsToXMLException
file_path = os.path.realpath(__file__)
path = os.path.dirname(file_path)
sys.path.append(path)
path_testdata = os.path.join(path, 'test_files') + os.sep
# examples = os.path.join(path, '..', 'Examples', 'TextFiles') + os.sep
template_file = os.path.join(path, '..', 'exegis', 'template',
'xml_template.txt')
def test_divide_document():
comtoepi = Process()
# Read test introduction
with open(path_testdata + 'introduction.txt', 'r',
encoding="utf-8") as f:
introduction = f.read().strip()
# Read test title
with open(path_testdata + 'title.txt', 'r',
encoding="utf-8") as f:
title = f.read().strip()
# Read test text
with open(path_testdata + 'text.txt', 'r',
encoding="utf-8") as f:
text = f.readlines()
# Read test footnotes
with open(path_testdata + 'footnotes.txt', 'r',
encoding="utf-8") as f:
footnotes = f.readlines()
# Read full text file
with open(path_testdata +
'aphorism_with_intro_title_text_footnotes.txt', 'r',
encoding="utf-8") as f:
comtoepi._text = f.read().strip()
comtoepi.divide_document()
assert comtoepi._introduction == introduction
assert comtoepi._title == title
for i, line in enumerate(comtoepi._text.splitlines()):
assert line.strip() == text[i].strip()
for i, line in enumerate(comtoepi.footnotes.splitlines()):
assert line.strip() == footnotes[i].strip()
def test_divide_document_no_intro():
comtoepi = Process()
# Read test title
with open(path_testdata + 'title.txt', 'r',
encoding="utf-8") as f:
title = f.read().strip()
# Read test text
with open(path_testdata + 'text.txt', 'r',
encoding="utf-8") as f:
text = f.read
|
lines()
# Read test footnotes
with open(path_testdata + 'footnotes.txt', 'r',
encoding="utf-8") as f:
footnotes = f.readlines()
|
# Read full text file
with open(path_testdata +
'aphorism_no_intro_title_text_footnotes.txt', 'r',
encoding="utf-8") as f:
comtoepi._text = f.read().strip()
comtoepi.divide_document()
assert comtoepi._title == title
for i, line in enumerate(comtoepi._text.splitlines()):
assert line.strip() == text[i].strip()
for i, line in enumerate(comtoepi.footnotes.splitlines()):
assert line.strip() == footnotes[i].strip()
def test_divide_document_no_footnotes():
comtoepi = Process()
# Read full text file
with open(path_testdata +
'aphorism_no_intro_title_text_no_footnotes.txt', 'r',
encoding="utf-8") as f:
comtoepi._text = f.read().strip()
with pytest.raises(AphorismsToXMLException):
comtoepi.divide_document()
################# read_template ###################
def test_read_template_missing_template():
comtoepi = Process()
comtoepi.template_fname = 'ttttt'
with pytest.raises(AphorismsToXMLException):
comtoepi.read_template()
# with assertRaises(SystemExit) as cm:
# comtoepi.read_template()
# assertEqual(cm.exception.code, 1)
# #################### save_xml #########################
def test_treat_footnote():
comtoepi = Process()
comtoepi.footnotes = ['*1*ssss tttt ] conieci: '
'aaaa bbbb L5: om. Y']
comtoepi.treat_footnotes()
assert comtoepi.footnotes_app.footnotes is not None
assert comtoepi.footnotes_app.xml is not None
def test_main_open_document():
comtoepi = Process()
comtoepi.fname = path_testdata + 'aphorisms.txt'
comtoepi.main()
assert comtoepi._text is not None
def test_main_open_document_failed():
comtoepi = Process()
comtoepi.fname = path_testdata + 'do not exit'
with pytest.raises(AphorismsToXMLException):
comtoepi.main()
def test_main_division_failed():
comtoepi = Process()
comtoepi.fname = path_testdata + 'aphorisms_failed_division.txt'
with pytest.raises(AphorismsToXMLException):
comtoepi.main()
def test_main_no_point_commentaries():
"""test for coverage"""
comtoepi = Process()
comtoepi.fname = (path_testdata + 'aphorisms_no_point_commentaries.txt')
comtoepi.main()
#with pytest.raises(AphorismsToXMLException):
# comtoepi.main()
def test_main_references():
comtoepi = Process()
comtoepi.fname = (path_testdata + 'aphorisms_references_failed.txt')
comtoepi.main()
#with pytest.raises(AphorismsToXMLException):
# comtoepi.main()
def test_main_aphorism_point_number():
'''Function to test that the program return an error if the aphorism
is wrongly number (1. or 1 is ok but .1 is not)
'''
comtoepi = Process()
comtoepi.fname = (path_testdata + 'aphorisms_wrong_numeration.txt')
with pytest.raises(AphorismsToXMLException):
comtoepi.main()
# # ################# process_folder ###################
# Moved to driver:
# TODO: implement unittest for driver
# def test_process_folder(self):
# comtoepi.template_folder = path_testdata
# assertTrue(comtoepi.process_folder(path_testdata))
#
# def test_process_folder_raise_error_folder_not_present(self):
# folder = os.path.join('path_failed')
# assertRaises(AphorismsToXMLException,
# comtoepi.process_folder,
# folder)
# if __name__ == '__main__':
# pytest.main()
|
mobarski/sandbox
|
topic/tokens.py
|
Python
|
mit
| 455
| 0.035165
|
from
|
contrib import *
import re
def tokenize(text):
tokens = re.findall('(?u)[\w.-]+',text)
tokens = [t for t in tokens if not re.match('[\d.-]+$',t)]
#tokens = [t for t in tokens if len(t)>2]
# TODO remove stopwords
return u' '.join(tokens)
## text = KV('data/text.db',5)
## tokens = KV('data/tokens.db'
|
,5)
text = KO('data/text')
tokens = KO('data/tokens')
for k,v in text.items():
print(k)
tokens[k] = tokenize(v.decode('utf8'))
tokens.sync()
|
kreopt/aioweb
|
aioweb/middleware/csrf/__init__.py
|
Python
|
mit
| 3,212
| 0.001868
|
from aiohttp import web
from aiohttp_session import get_session, SESSION_KEY as SESSION_COOKIE_NAME
from aioweb.middleware.csrf.templatetags import CsrfTag, CsrfRawTag
from aioweb.util import awaitable
from aioweb.modules.template.backends.jinja2 import APP_KEY as JINJA_APP_KEY
import random, string
from aiohttp_session import get_session
from hashlib import sha256
CSRF_FIELD_NAME = 'csrftoken'
CSRF_SESSION_NAME = 'csrf_token'
CSRF_HEADER_NAME = 'X-Csrf-Token'
CSRF_COOKIE_NAME = 'Csrf-Token'
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
CSRF_LENGTH = 128
CSRF_SALT_LENGTH = 6
CSRF_ALLOWED_CHARS = string.ascii_letters + string.digits
CSRF_TOKEN_SEPARATOR = '-'
def generate_csrf_secret():
return ''.join([random.choice(CSRF_ALLOWED_CHARS) for c in range(CSRF_LENGTH)])
def generate_salt():
return ''.join([random.choice(CSRF_ALLOWED_CHARS) for c in range(CSRF_SALT_LENGTH)])
async def get_secret(request):
"""
Returns the CSRF token required for a POST form. The token is an
alphanumeric value. A new token is created if one is not already set.
"""
session = await get_session(request)
if CSRF_SESSION_NAME in session and session[CSRF_SESSION_NAME]:
return session[CSRF_SESSION_NAME]
return await set_secret(request)
def make_token(salt, secret):
return "{}{}{}".format(salt, CSRF_TOKEN_SEPARATOR,
sha256("{}{}{}".format(salt, CSRF_TOKEN_SEPARATOR, secret).encode()).hexdigest())
async def get_token(request):
salt = generate_salt()
secret = await get_secret(request)
return make_token(salt, secret)
async def set_secret(request):
session = await get_session(request)
session[CSRF_SESSION_NAME] = generate_csrf_secret()
return session[CSRF_SESSION_NAME]
def validate_token(token, secret):
salt, hashed = token.split('-', maxsplit=1)
return token == make_token(salt, secret)
async def middleware(app, handler):
async def middleware_handler(request):
setattr(
|
request, 'csrf_token', await get_token(request))
try:
response = await awaitable(handler(request))
except web.HTTPException as e:
raise e
return response
return middleware_handler
def setup(app):
app[JINJA_APP_KEY].add_extension(Cs
|
rfTag)
app[JINJA_APP_KEY].add_extension(CsrfRawTag)
async def pre_dispatch(request, controller, actionName):
reason = None
check_ok = True
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
action = getattr(controller, actionName)
if not getattr(action, 'csrf_disabled', False):
check_ok = False
token = request.headers.get(CSRF_HEADER_NAME)
if not token:
data = await request.post()
token = data.get(CSRF_FIELD_NAME)
if token:
if validate_token(token, await get_secret(request)):
check_ok = True
else:
reason = REASON_BAD_TOKEN
else:
reason = REASON_NO_CSRF_COOKIE
if not check_ok:
raise web.HTTPForbidden(reason=reason)
|
ringemup/satchmo
|
satchmo/apps/tax/modules/percent/processor.py
|
Python
|
bsd-3-clause
| 2,581
| 0.008911
|
from decimal import Decimal
from livesettings import config_value
from tax.modules.base.processor import BaseProcessor
class Processor(BaseProcessor):
method="percent"
#def __init__(self, order=None, user=None):
# """
# Any preprocessing steps should go here
# For instance, copying the shipping and billing areas
# """
# super(Processor, self).__init__(order=order, user=user)
# self.order = order
# self.user = user
def by_orderitem(self, orderitem):
if orderitem.product.taxable:
price = orderitem.sub_total
return self.by_price(orderitem.product.taxClass, price)
else:
return Decimal("0.00")
def by_price(self, taxclass, price):
percent = config_value('TAX','PERCENT')
p = price * (percent/100)
return p
def by_product(self, product, quantity=Decimal('1')):
price = product.get_qty_price(quantity)
taxclass = product.taxClass
return self.by_price(taxclass, price)
def get_percent(self, *args, **kwargs):
return Decimal(config_value('TAX','PERCENT'))
def get_rate(self, *args, **kwargs):
return self.get_percent()/100
def shipping(self, subtotal=None):
if subtotal is None and self.order:
subtotal = self.order.shipping_sub_total
if subtotal:
subtotal = self.order.shipping_sub_total
if config_value('TAX','TAX_SHIPPING'):
percent = config_value('TAX','PERCENT')
t = subtotal * (percent/100)
else:
t = Decimal("0.00")
else:
t = Decimal("0.00")
return t
def process(self, order=None):
"""
Calculate the tax and return it
"""
if order:
self.order = order
else:
order = self.order
percent = config_value('TAX','PERCENT')
sub_total = Decimal("
|
0.00")
for item in order.orderitem_set.filter(product__taxable=True):
sub_
|
total += item.sub_total
itemtax = sub_total * (percent/100)
taxrates = {'%i%%' % percent : itemtax}
if config_value('TAX','TAX_SHIPPING'):
shipping = order.shipping_sub_total
sub_total += shipping
ship_tax = shipping * (percent/100)
taxrates['Shipping'] = ship_tax
tax = sub_total * (percent/100)
return tax, taxrates
|
kvark/claymore
|
etc/blender/io_kri/__init__.py
|
Python
|
apache-2.0
| 186
| 0
|
bl_info = {
'name': 'KRI common routines',
'author': 'Dzmitry Malysh
|
au',
'version': (0, 1,
|
0),
'blender': (2, 6, 2),
'warning': '',
'category': 'Import-Export'
}
|
studio1247/gertrude
|
paques.py
|
Python
|
gpl-3.0
| 1,082
| 0.012015
|
# -*- coding: utf-8 -*-
## This file is part of Gertrude.
##
## Gertrude is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, o
|
r
## (at your option) any later version.
##
## Gertrude is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public
|
License
## along with Gertrude; if not, see <http://www.gnu.org/licenses/>.
import datetime
def getPaquesDate(year):
if year < 1583:
m, n = 16, 6
else:
m, n = 24, 5
a, b, c = year % 19, year % 4, year % 7
d = (19 * a + m) % 30
e = (2 * b + 4 * c + 6 * d + n) % 7
if d + e < 10:
return datetime.date(year, 3, d + e + 22)
else:
return datetime.date(year, 4, d + e - 9)
|
gabrielloliveira/omc
|
omc/omc/urls.py
|
Python
|
mit
| 366
| 0.002732
|
from django.conf.urls import url, include
from django.conf import settings
from django.contrib import admin
from django.conf.urls.static import static
urlpatterns
|
= [
url(r'^admin/', admin.site.urls),
url(r'', include('blog.urls')),
|
url(r'^ckeditor/', include('ckeditor_uploader.urls')),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
ericrrichards/rpgEngine
|
RpgEngine/RpgEngine/Scripts/Actions.py
|
Python
|
mit
| 242
| 0
|
class Actions:
@staticmethod
def Teleport(m
|
ap, tileX, tileY):
def teleport(trigger, entity):
entity.Til
|
eX = tileX
entity.TileY = tileY
TeleportEntity(entity, map)
return teleport
|
mridang/django-eggnog
|
eggnog/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 1,561
| 0.007687
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'U
|
pdate'
db.create_table('eggnog_update', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('package', self.gf('django.db.models.fields.CharField')(unique=True, max_length=2000)),
('installed', self.gf('django.db.models.fields.CharField')(max_length=16)),
('available', self.gf('django.db.models.fields.CharField')(max_length=16)),
('checked', self.gf('django.db.models.fields.DateTimeField')(auto_now_ad
|
d=True, blank=True)),
))
db.send_create_signal('eggnog', ['Update'])
def backwards(self, orm):
# Deleting model 'Update'
db.delete_table('eggnog_update')
models = {
'eggnog.update': {
'Meta': {'ordering': "['-checked']", 'object_name': 'Update'},
'available': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'checked': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'installed': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'package': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2000'})
}
}
complete_apps = ['eggnog']
|
semiautomaticgit/SemiAutomaticClassificationPlugin
|
core/utils.py
|
Python
|
gpl-3.0
| 380,258
| 0.03463
|
# -*- coding: utf-8 -*-
'''
/**************************************************************************************************************************
SemiAutomaticClassificationPlugin
The Semi-Automatic Classification Plugin for QGIS allows for the supervised classification of remote sensing images,
providing tools for the download, the preprocessing and postprocessing of images.
-------------------
begin : 2012-12-29
copyright : (C) 2012-2021 by Luca Congedo
email : ing.congedoluca@gmail.com
**************************************************************************************************************************/
/**************************************************************************************************************************
*
* This file is part of Semi-Automatic Classification Plugin
*
* Semi-Automatic Classification Plugin is free software: you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software Foundation,
* version 3 of the License.
*
* Semi-Automatic Classification Plugin is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with
* Semi-Automatic Classification Plugin. If not, see <http://www.gnu.org/licenses/>.
*
**************************************************************************************************************************/
'''
cfg = __import__(str(__name__).split('.')[0] + '.core.config', fromlist=[''])
# sound for Windows
try:
import winsound
except:
pass
class Utils:
def __init__(self):
pass
##################################
''' Download functions '''
##################################
# download html file
def downloadHtmlFileQGIS(self, url, url2 = None, timeOutSec = 1):
cfg.htmlW = url2
r = cfg.QNetworkRequestSCP(cfg.QtCoreSCP.QUrl(url))
cfg.reply = cfg.qgisCoreSCP.QgsNetworkAccessManager.instance().get(r)
cfg.reply.finished.connect(self.replyInTextBrowser)
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode())
return 'No'
# load reply in text browser
def replyInTextBrowser(self):
cfg.reply.deleteLater()
html2 = cfg.reply.readAll().data()
html = bytes.decode(html2)
# Github file not found
if '<h1>404</h1>' in html:
r = cfg.QNetworkRequestSCP(cfg.QtCoreSCP.QUrl(cfg.htmlW))
cfg.reply2 = cfg.qgisCoreSCP.QgsNetworkAccessManager.instance().get(r)
cfg.reply2.finished.connect(self.replyInTextBrowser2)
if len(html) > 0:
cfg.uidc.main_textBrowser.clear()
cfg.uidc.main_textBrowser.setHtml(html)
cfg.reply.finished.disconnect()
cfg.reply.abort()
cfg.reply.close()
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode())
# load reply in text browser
def replyInTextBrowser2(self):
cfg.reply2.deleteLater()
html2 = cfg.reply2.readAll().data()
html = bytes.decode(html2)
if len(html) > 0:
cfg.uidc.main_textBrowser.clear()
cfg.uidc.main_textBrowser.setHtml(html)
cfg.reply2.finished.disconnect()
cfg.reply2.abort()
cfg.reply2.close()
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode())
# reply Finish
def replyFinish(self):
cfg.replyP.deleteLater()
cfg.fileP = cfg.replyP.readAll()
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode())
# replyText
def replyText(self):
cfg.replyP.deleteLater()
cfg.htmlP = cfg.replyP.readAll()
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode())
# progress
def downloadProgress(self, value, total):
cfg.uiUtls.updateBar(self.progressP, '(' + str(value/1048576) + '/' + str(total/1048576) + ' MB) ' + self.urlP, cfg.QtWidgetsSCP.QApplication.translate('semiautomaticclassificationplugin', 'Downloading'))
if cfg.actionCheck == 'No':
cfg.replyP.finished.disconnect()
cfg.replyP.abort()
cfg.replyP.close()
# reply redirect
def replyRedirect(self):
cfg.replyR.deleteLater()
rA = cfg.replyR.attribute(cfg.QNetworkRequestSCP.RedirectionTargetAttribute)
if rA is not None:
cfg.replyRURL = rA.toString()
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode())
# reply redirect
def replyRedirect2(self):
cfg.replyR2.deleteLater()
rA = cfg.replyR2.attribute(cfg.QNetworkRequestSCP.RedirectionTargetAttribute)
if rA is not None:
cfg.replyRURL2 = rA.toString()
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode())
# reply redirect
def replyRedirect3(self):
cfg.replyR3.deleteLater()
rA = cfg.replyR3.attribute(cfg.QNetworkRequestSCP.RedirectionTargetAttribute)
if rA is not None:
cfg.replyRURL3 = rA.toString()
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode())
# connect with password QT
def passwordConnect(self, user, password, url, topLevelUrl, outputPath = None, progress = None, quiet = 'No', redirect = 'No'):
# logger
cfg.utls.logCondition(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode(), ' ' + url)
# auth
base64UP = cfg.base64SCP.encodestring(bytes(user + ':' + password, 'utf-8')[:-1])
h = bytes('Basic ', 'utf-8') + base64UP
hKey = cfg.QtCoreSCP.QByteArray(bytes('Authorization', 'utf-8') )
hValue = cfg.QtCoreSCP.QByteArray(h)
r = cfg.QNetworkRequestSCP(cfg.QtCoreSCP.QUrl(url))
r.setRawHeader(hKey, hValue)
qnamI = cfg.qgisCoreSCP.QgsNetworkAccessManager.instance()
if redirect != 'No':
cfg.replyR = qnamI.get(r)
cfg.replyR.finished.connect(self.replyRedirect)
# loop
eL = cfg.QtCoreSCP.QEventLoop()
cfg.replyR.finished.connect(eL.quit)
eL.exec_()
cfg.replyR.finished.disconnect(eL.quit)
cfg.replyR.finished.disconnect()
cfg.replyR.abort()
cfg.replyR.close()
r2 = cfg.QNetworkRequestSCP(cfg.QtCoreSCP.QUrl(cfg.replyRURL))
r2.setRawHeader(hKey, hValue)
cfg.replyR2 = qnamI.get(r2)
cfg.replyR2.finished.connect(self.replyRedirect2)
# loop
eL = cfg.QtCoreSCP.QEventLoop()
cfg.replyR2.finished.connect(eL.quit)
eL.exec_()
cfg.replyR2.finished.disconnect(eL.quit)
cfg.replyR2.finished.disconnect()
cfg.replyR2.abort()
cfg.replyR2.close()
r3 = cfg.QNetworkRequestSCP(cfg.QtCoreSCP.QUrl(cfg.replyRURL2))
r3.setRawHeader(hKey, hValue)
cfg.replyR3 = qnamI.get(r3)
cfg.replyR3.finished.connect(self.replyRedirect3)
# loop
eL = cfg.QtCoreSCP.QEventLoop()
cfg.replyR3.fini
|
shed.connect(eL.quit)
eL.exec_()
cfg.replyR3.finished.disconnect(eL.quit)
cfg.replyR3.finished.disconnect()
cfg.replyR3.abort()
|
cfg.replyR3.close()
try:
if outputPath is None:
cfg.replyP = qnamI.get(r)
cfg.replyP.finished.connect(self.replyText)
# loop
eL = cfg.QtCoreSCP.QEventLoop()
cfg.replyP.finished.connect(eL.quit)
eL.exec_()
cfg.replyP.finished.disconnect(eL.quit)
cfg.replyP.finished.disconnect()
cfg.replyP.abort()
cfg.replyP.close()
return cfg.htmlP
else:
self.urlP = url
self.progressP = progress
cfg.replyP = qnamI.get(r)
cfg.replyP.finished.connect(self.replyFinish)
cfg.replyP.downloadProgress.connect(self.downloadProgress)
# loop
eL = cfg.QtCoreSCP.QEventLoop()
cfg.replyP.finished.connect(eL.quit)
eL.exec_()
cfg.replyP.finished.disconnect(eL.quit)
cfg.replyP.finished.disconnect()
cfg.replyP.abort()
cfg.replyP.close()
with open(outputPath, 'wb') as file:
file.write(cfg.fileP)
if cfg.actionCheck == 'No':
raise ValueError('Cancel action')
if cfg.osSCP.path.getsize(outputPath) > 500:
cfg.fileP = None
return 'Yes'
else:
if 'problem' in cfg.fileP:
cfg.fileP = None
return '
|
urashima9616/Leetcode_Python
|
Leet132_PalindromePartition3.py
|
Python
|
gpl-3.0
| 1,337
| 0.006731
|
"""
Given a string s, partition s such that every substring of the partition is a palindrome.
Return the minimum cuts needed for a palindrome partitioning of s.
For example, given s = "aab",
Return 1 since the palindrome partitioning ["aa","b"] could be produced using 1 cut.
DFSearch solution
"""
class Solution(object):
def partition(self, s):
if not s:
return []
mincut = [len(s)]
res = self.PalindromePart(s, 0, mincut)
return mincut[0]
def PalindromePart(self, s, k, mincut):
if mincut[0] < k:
return [[]]
|
if not s:
return [[]]
if len(s) == 1:
return [[s]]
res = []
for i in xrange(len(s)):
# check palindrom
if s[:len(s)-i] == s[len(s)-i-1::-1]:
|
if i > 0:
for each in self.PalindromePart(s[len(s)-i:], k+1, mincut):
res.append([s[:len(s)-i]] + each)
mincut[0] = k + len(each) if mincut[0] > k + len(each) - 1 else mincut[0]
else:
each = []
res.append([s[:len(s)-i]] + each)
mincut[0] = k + len(each) if mincut[0] > k + len(each) - 1 else mincut[0]
return res
Solve = Solution()
print Solve.partition('a')
|
rolando/crochet
|
crochet/_util.py
|
Python
|
mit
| 366
| 0
|
"""
Utility functions
|
and classes.
"""
from functools import wraps
def synchronized(method):
"""
Decorator that wraps a method with an acquire/release of self._lock.
"""
@wraps(method)
def synced(s
|
elf, *args, **kwargs):
with self._lock:
return method(self, *args, **kwargs)
synced.synchronized = True
return synced
|
davisp/python-spidermonkey
|
tests/test-python-ctor.py
|
Python
|
mit
| 736
| 0.006793
|
# Copyright 2009 Paul J. Davis <paul.joseph.davis@gmail.com>
#
# This file is part of the python-spidermonkey package released
# under the MIT license.
import t
touched = 0
class Foo(object):
def __init__(self):
self.bar = 2
def __del__(self):
global touched
touched = 1
@t.glbl("Foo", Foo)
def test_py_ctor_right
|
_type(cx, glbl):
t.eq(isinstance(cx.execute("var f = new Foo(); f;"), Foo), True)
@t.glbl("Foo", Foo)
def test_py_ctor_attribute_acc(cx, glbl):
t.eq(cx.execute("var f = new Foo(); f;").bar, 2)
@t.glbl("Foo", Foo)
def test_py_
|
dtor_called(cx, glbl):
t.eq(cx.execute('var f = {"baz": new Foo()}; f;').baz.bar, 2)
cx.execute("delete f.baz;")
cx.gc()
t.eq(touched, 1)
|
jpoullet2000/cgs-benchmarks
|
highlander-benchmarks/vcf_import.py
|
Python
|
apache-2.0
| 7,545
| 0.009145
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import urllib
import zlib
import zipfile
import math
import sys
from subprocess import *
import subprocess
# This script will:
# 1. Download the public database from the broad institute
# 2. Generate random vcf files thanks to the previous file thanks to databaseExpansion.jar
# 3. Import the vcf files generated to highlander thanks to dbBuilder.jar (made by Raphaël Helaers)
# 4. Delete the different files when they are uploaded as they are not needed anymore
# Configuration for the user
public_database_path = "data/Public_Database_v3"
public_database_link = "ftp://ftp.broadinstitute.org/pub/ExAC_release/release0.3/ExAC.r0.3.sites.vep.vcf.gz"
vcf_generation_jar_path = "dbgeneration.jar"
db_builder_path = "dbBuilder_fast2.jar"
vcf_destination_path = "data/"
ram_max = 2 # Number of Go the different jar can use
disk_max = 20 # Number of Go we can use to store the generated vcf files
threads_max = 5 # Number of threads allowed to do the vcf generation
# Configuration specific to the files to generate.
analyses = [('small', 200,'20_2015_04_01_benchmarks_small'), ('medium', 1000,'21_2015_04_01_benchmarks_medium'),('big',5000,'22_2015_04_01_benchmarks_big'),('huge',25000,'23_2015_04_01_benchmarks_huge')]
# If a problem occured during a previous execution...
if os.path.isfile(public_database_path+".gz") and abs(os.path.getsize(public_database_path+".gz") - 3176043421) > 100*1024:
print("File compressed too small, we remove it.")
os.remove(public_database_path+".gz")
if os.path.isfile(public_database_path) and abs(os.path.getsize(public_database_path) - 23198476257) > 100*1024:
print("File uncompressed too small, we remove it.")
os.remove(public_database_path)
# 1. Download the public database from the broad institute if we don't have yet
if os.path.isfile(public_database_path+".gz") or os.path.isfile(public_database_path):
print("1. Public database from broad institute found, great!")
else:
try:
print("1.1. Public database from broad institute not found locally, we will download it, be patient... (Please, check if the file is created and its size increasing," \
+" otherwise remove spaces and stuff like that in the path")
urllib.urlretrieve(public_database_link, public_database_path+".gz")
except Exception as e:
print("1.1. A problem occured during the downloading of the public database, launch the script again or invistigate the error.")
print(e)
os.remove(public_database_path+".gz")
sys.exit(0)
# 1.2 Decompress the gzip file
if not os.path.isfile(public_database_path):
print("1.2. Decompress public database...")
try:
os.system('gzip -d '+public_database_path+'.gz')
except Exception as e:
print("1.2. A problem occured during the decompression of the public database.")
print(e)
sys.exit(0)
# 2. Generate random vcf files thanks to the previous file thanks to databaseExpansion.jar
def jarExecution(*args):
env = dict(os.environ)
env['JAVA_OPTS'] = '-d64 -Xms'+str(ram_max-1)+'g -Xmx'+str(ram_max)+'g'
subprocess.call(['java', '-jar']+list(args), env=env)
#subprocess.call(['java', '-d64', '-Xms'+str(ram_max-2)+'g', '-Xmx'+str(ram_max)+'g', '-jar']+list(args))
def checkIfSampleDone(sample_name):
# Check if a sample is already done or not
samples = [line.strip() for line in open('samples_done.txt')]
found = False
sample_name = str(sample_name)
for sample in samples:
if sample and sample_name == sample:
found = True
break
return found
def sampleIsDone(sample_name):
if os.path.isfile('samples_done.txt'):
with open('samples_done.txt', 'a') as file:
file.write(str(sample_name)+'\r\n')
skipped_files = 0
starting_sample = 100
for analyse in analyses:
# We create an appropriate number of files by step
max_vcf_step = min(analyse[1], (disk_max*1024)/150)
max_vcf_step = 3
for first_sample in xrange(1, analyse[1], max_vcf_step):
max_vcf_step = min(max_vcf_step, analyse[1] - first_sample)
if checkIfSample
|
Done(starting_sample + first_sample) and checkIfSampleDone(starting_sample + first_sample + max_vcf_step - 1):
print("Samples ["+str(starting_sample+first_sample)+"; "+str(starting_sample+first_sample+max_vcf_step - 1)+"] already done, we go to the next interval.")
continue
print("2. Generate random vcf files for the analysis "+analyse[0]+": "+str(max_vcf
|
_step)+" out of "+str(analyse[1])+" vcf.")
args = [''+vcf_generation_jar_path+'', "--o", public_database_path, "--d", vcf_destination_path+"r_"+analyse[0], "--s", str(max_vcf_step), "--f", "false", "--t", str(threads_max), "--i", str(starting_sample + first_sample)]
try:
#jarWrapper(*args)
jarExecution(*args)
except Exception as e:
print("2. A problem occured during the vcf generation...")
print(e)
sys.exit(0)
# 3. Import the vcf files generated to highlander thanks to dbBuilder.jar (made by Raphaël Helaers)
print("3. Importing the different samples")
error = 0
for i in xrange(first_sample, first_sample+max_vcf_step):
path_to_file = vcf_destination_path+"r_"+analyse[0]+"_s"+str(max_vcf_step)+"_"+str(starting_sample + first_sample)+"."+str(i-first_sample)
if checkIfSampleDone(starting_sample + i):
print("Sample "+str(starting_sample+i)+" already done, we go to the next one.")
os.remove(path_to_file)
continue
if os.path.isfile("sql/lock"):
os.remove("sql/lock")
result = False
args = [''+db_builder_path+'', "--tool", "variants", "--sample", "NA"+str(starting_sample + i).zfill(5), "--project", analyse[2], "--analysis", analyse[0], "--vcfpath", ''+path_to_file+'']
try:
jarExecution(*args)
result = True
except Exception as e:
result = False
print(e)
if result is False:
error += 1
if error < 3:
print("3. Problem during the importation of the sample file '"+path_to_file+"' (attempt "+str(error)+"/3), we try again...")
i = i - 1
else:
print("3. Problem during the importation of the sample file '"+path_to_file+"' (attempt "+str(error)+"/3), we skip this vcf.")
skipped_files += 1
else:
error = 0
sampleIsDone(starting_sample + i)
# 4. Delete the current file just used as we will not use it again anymore
if os.path.isfile(path_to_file):
print("Delete the file...")
os.remove(path_to_file)
starting_sample += analyse[1] + 1
if skipped_files < 20:
print("----> It seems the import is done, with "+str(skipped_files)+" sample(s) skipped. Thank you.")
else:
print("----> It seems the import is done, but there were "+str(skipped_files)+" sample(s) skipped... Thank you anyway.")
|
cjcjameson/gpdb
|
gpMgmt/bin/gpcheckcat_modules/mirror_matching_check.py
|
Python
|
apache-2.0
| 1,789
| 0.003354
|
from gppylib.gparray import FAULT_STRATEGY_FILE_REPLICATION, get_gparray_from_config
class MirrorMa
|
tchingCheck:
def run_check(self, db_connection, logger):
logger.info('-----------------------------------
|
')
logger.info('Checking mirroring_matching')
is_config_mirror_enabled = get_gparray_from_config().getFaultStrategy() == FAULT_STRATEGY_FILE_REPLICATION
# This query returns the mirroring status of all segments
mirroring_query = """SELECT gp_segment_id, mirror_existence_state FROM gp_dist_random('gp_persistent_relation_node') GROUP BY 1,2"""
segment_mirroring_result = db_connection.query(mirroring_query).getresult()
mismatching_segments = []
for (seg_id, mirror_state) in segment_mirroring_result:
is_segment_mirrored = mirror_state > 1
if mirror_state == 0:
continue # 0 is considered a match in either situation
if is_segment_mirrored != is_config_mirror_enabled:
mismatching_segments.append((seg_id, mirror_state))
if mismatching_segments:
logger.info('[FAIL] Mirroring mismatch detected')
logger.info("The GP configuration reports mirror enabling is: %s" % is_config_mirror_enabled)
logger.error("The following segments are mismatched in PT:")
logger.error("")
logger.error("Segment ID:\tmirror_existence_state:")
for (seg_id, mirror_existence_state) in mismatching_segments:
label = "Enabled" if mirror_existence_state > 1 else "Disabled"
logger.error("%i\t\t%i (%s)" % (seg_id, mirror_existence_state, label))
else:
logger.info('[OK] %s' % "mirroring_matching")
return mismatching_segments
|
zixiliuyue/pika
|
pika/connection.py
|
Python
|
bsd-3-clause
| 82,517
| 0.000594
|
"""Core connection objects"""
import ast
import sys
import collections
import copy
import logging
import math
import numbers
import platform
import warnings
if sys.version_info > (3,):
import urllib.parse as urlparse # pylint: disable=E0611,F0401
else:
import urlparse
from pika import __version__
from pika import callback
import pika.channel
from pika import credentials as pika_credentials
from pika import exceptions
from pika import frame
from pika import heartbeat
from pika import utils
from pika import spec
from pika.compat import (xrange, basestring, # pylint: disable=W0622
url_unquote, dictkeys, dict_itervalues,
dict_iteritems)
BACKPRESSURE_WARNING = ("Pika: Write buffer exceeded warning threshold at "
"%i bytes and an estimated %i frames behind")
PRODUCT = "Pika Python Client Library"
LOGGER = logging.getLogger(__name__)
class InternalCloseReasons(object):
"""Internal reason codes passed to the user's on_close_callback when the
connection is terminated abruptly, without reply code/text from the broker.
AMQP 0.9.1 specification cites IETF RFC 821 for reply codes. To avoid
conflict, the `InternalCloseReasons` namespace uses negative integers. These
are invalid for sending to the broker.
"""
SOCKET_ERROR = -1
BLOCKED_CONNECTION_TIMEOUT = -2
class Parameters(object): # pylint: disable=R0902
"""Base connection parameters class definition
:param bool backpressure_detection: `DEFAULT_BACKPRESSURE_DETECTION`
:param float|None blocked_connection_timeout:
`DEFAULT_BLOCKED_CONNECTION_TIMEOUT`
:param int channel_max: `DEFAULT_CHANNEL_MAX`
:param int connection_attempts: `DEFAULT_CONNECTION_ATTEMPTS`
:param credentials: `DEFAULT_CREDENTIALS`
:param int frame_max: `DEFAULT_FRAME_MAX`
:param int heartbeat: `DEFAULT_HEARTBEAT_TIMEOUT`
:param str host: `DEFAULT_HOST`
:param str locale: `DEFAULT_LOCALE`
:param int port: `DEFAULT_PORT`
:param float retry_delay: `DEFAULT_RETRY_DELAY`
:param float socket_timeout: `DEFAULT_SOCKET_TIMEOUT`
:param bool ssl: `DEFAULT_SSL`
:param dict ssl_options: `DEFAULT_SSL_OPTIONS`
:param str virtual_host: `DEFAULT_VIRTUAL_HOST`
"""
# Declare slots to protect against accidental assignment of an invalid
# attribute
__slots__ = (
'_backpressure_detection',
'_blocked_connection_timeout',
'_channel_max',
'_client_properties',
'_connection_attempts',
'_credentials',
'_frame_max',
'_heartbeat',
'_host',
'_locale',
'_port',
'_retry_delay',
'_socket_timeout',
'_ssl',
'_ssl_options',
'_virtual_host'
)
DEFAULT_USERNAME = 'guest'
DEFAULT_PASSWORD = 'guest'
DEFAULT_BACKPRESSURE_DETECTION = False
DEFAULT_BLOCKED_CONNECTION_TIMEOUT = None
DEFAULT_CHANNEL_MAX = pika.channel.MAX_CHANNELS
DEFAULT_CLIENT_PROPERTIES = None
DEFAULT_CREDENTIALS = pika_credentials.PlainCredentials(DEFAULT_USERNAME,
DEFAULT_PASSWORD)
DEFAULT_CONNECTION_ATTEMPTS = 1
DEFAULT_FRAME_MAX = spec.FRAME_MAX_SIZE
DEFAULT_HEARTBEAT_TIMEOUT = None # None accepts server's proposal
DEFAULT_HOST = 'localhost'
DEFAULT_LOCALE = 'en_US'
DEFAULT_PORT = 5672
DEFAULT_RETRY_DELAY = 2.0
DEFAULT_SOCKET_TIMEOUT = 0.25
DEFAULT_SSL = False
DEFAULT_SSL_OPTIONS = None
DEFAULT_SSL_PORT = 5671
DEFAULT_VIRTUAL_HOST = '/'
DEFAULT_HEARTBEAT_INTERVAL = DEFAULT_HEARTBEAT_TIMEOUT # DEPRECATED
def __init__(self):
self._backpressure_detection = None
self.backpressure_detection = self.DEFAULT_BACKPRESSURE_DETECTION
# If not None, blocked_connection_timeout is the timeout, in seconds,
# for the connection to remain blocked; if the timeout expires, the
# connection will be torn down, triggering the connection's
# on_close_callback
self._blocked_connection_timeout = None
self.blocked_connection_timeout = (
self.DEFAULT_BLOCKED_CONNECTION_TIMEOUT)
self._channel_max = None
self.channel_max = self.DEFAULT_CHANNEL_MAX
self._client_properties = None
self.client_properties = self.DEFAULT_CLIENT_PROPERTIES
self._connection_attempts = None
self.connection_attempts = self.DEFAULT_CONNECTION_ATTEMPTS
self._credentials = None
self.credentials = self.DEFAULT_CREDENTIALS
self._frame_max = None
self.frame_max = self.DEFAULT_FRAME_MAX
self._heartbeat = None
self.heartbeat = self.DEFAULT_HEARTBEAT_TIMEOUT
self._host = None
self.host = self.DEFAULT_HOST
self._locale = None
self.locale = self.DEFAULT_LOCALE
self._port = None
self.port = self.DEFAULT_PORT
self._retry_delay = None
self.retry_delay = self.DEFAULT_RETRY_DELAY
self._socket_timeout = None
self.socket_timeout = self.DEFAULT_SOCKET_TIMEOUT
self._ssl = None
self.ssl = self.DEFAULT_SSL
self._ssl_options = None
self.ssl_options = self.DEFAULT_SSL_OPTIONS
self._virtual_host = None
self.virtual_host = self.DEFAULT_VIRTUAL_HOST
def __repr__(self):
"""Represent the info about the instance.
:rtype: str
"""
return ('<%s host=%s port=%s virtual_host=%s ssl=%s>' %
(self.__class__.__name__, self.host, self.port,
self.virtual_host, self.ssl))
@property
def backpressure_detection(self):
"""
:returns: boolean indicatating whether backpressure detection is
enabled. Defaults to `DEFAULT_BACKPRESSURE_DETECTION`.
"""
return self._backpressure_detection
@backpressure_detection.setter
def backpressure_detection(self, value):
"""
:param bool value: boolean indicatating whether to enable backpressure
detection
"""
if not isinstance(value, bool):
raise TypeError('backpressure_detection must be a bool, '
'but got %r' % (value,))
self._backpressure_detection = value
@property
def blocked_connection_timeout(self):
"""
:returns: None or float blocked connection timeout. Defaults to
`DEFAULT_BLOCKED_CONNECTION_TIMEOUT`.
"""
return self._blocked_connection_timeout
@blocked_connection_timeout.setter
def blocked_connection_timeout(self, value):
"""
:param value: If not None, blocked_connection_timeout is the timeout, in
seconds, for the connection to remain blocked; if the timeout
expires, the connection will be torn down, triggering the
connection's on_close_callback
"""
if value is not None:
if not isinstance(value, numbers.Real):
raise TypeError('blocked_connection_timeout must be a Real '
'number, but got %r' % (value,))
if value < 0:
raise ValueError('blocked_connection_timeout must be >= 0, but '
'got %r' % (value,))
self._blocked_connection_timeout = value
@property
|
def channel_max(self):
"""
:returns: max preferred number of channels. Defaults to
`DEFAULT_CHANNEL_MAX`.
:rtype: int
"""
return self._channel_max
@channel_max.setter
def channel_max(self, value):
"""
:param int value: max preferred number of channels, between 1 and
`channel.MAX_CHANNELS`, inclusive
"""
if not isi
|
nstance(value, numbers.Integral):
raise TypeError('channel_max must be an int, but got %r' % (value,))
if value < 1 or value > pika.channel.MAX_CHANNELS:
raise ValueError('channel_max must be <= %i and > 0, but got %r' %
(pika.channel.MAX_C
|
talnoah/android_kernel_htc_dlx
|
virt/tools/perf/scripts/python/syscall-counts-by-pid.py
|
Python
|
gpl-2.0
| 1,927
| 0.033212
|
# system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/
|
syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (s
|
yscall_name(id), val),
|
henzk/django-productline
|
django_productline/startup.py
|
Python
|
mit
| 2,282
| 0.001753
|
from __future__ import unicode_literals
"""
product initialization stuff
"""
import os
import featuremonkey
from .composer import get_composer
from django_productline import compare_v
|
ersion
_product_selected = False
def select_product():
"""
binds the frozen context the
|
selected features
should be called only once - calls after the first call have
no effect
"""
global _product_selected
if _product_selected:
# tss already bound ... ignore
return
_product_selected = True
from django_productline import context, template
featuremonkey.add_import_guard('django.conf')
featuremonkey.add_import_guard('django.db')
os.environ['DJANGO_SETTINGS_MODULE'] = 'django_productline.settings'
contextfile = os.environ['PRODUCT_CONTEXT_FILENAME']
equationfile = os.environ['PRODUCT_EQUATION_FILENAME']
#bind context and compose features
context.bind_context(contextfile)
get_composer().select_equation(equationfile)
# after composition we are now able to bind composed template settings
template.bind_settings()
featuremonkey.remove_import_guard('django.conf')
featuremonkey.remove_import_guard('django.db')
import django
if compare_version(django.get_version(), '1.7') >= 0:
django.setup()
# force import of settings and urls
# better fail during initialization than on the first request
from django.conf import settings
from django.core.urlresolvers import get_resolver
# eager creation of URLResolver
get_resolver(None)
# make sure overextends tag is registered
from django.template.loader import get_template
from overextends import models
def get_wsgi_application():
"""
returns the wsgi application for the selected product
this function is called by featuredjango.wsgi to get the wsgi
application object
if you need to refine the wsgi application object e.g. to add
wsgi middleware please refine django.core.wsgi.get_wsgi_application directly.
"""
# make sure the product is selected before importing and constructing wsgi app
select_product()
# return (possibly refined) wsgi application
from django.core.wsgi import get_wsgi_application
return get_wsgi_application()
|
logston/ipy.io
|
docker/jupyter_notebook_config.py
|
Python
|
bsd-3-clause
| 77
| 0.012987
|
c = get_config()
c.NotebookApp.ip = '*'
c.Noteb
|
ookApp.open_browser = Fal
|
se
|
mganeva/mantid
|
Framework/PythonInterface/test/python/plugins/algorithms/CreateWorkspaceTest.py
|
Python
|
gpl-3.0
| 4,168
| 0.013196
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.api import MatrixWorkspace, AnalysisDataService
from mantid.simpleapi import CreateWorkspace
from testhelpers import run_algorithm
import numpy as np
|
class CreateWorkspaceTest(unittest.TestCase):
def test_create_with_1D_numpy_array(self):
x = np.array([1.,2.,3.,4.])
y = np.a
|
rray([1.,2.,3.])
e = np.sqrt(np.array([1.,2.,3.]))
wksp = CreateWorkspace(DataX=x, DataY=y,DataE=e,NSpec=1,UnitX='TOF')
self.assertTrue(isinstance(wksp, MatrixWorkspace))
self.assertEquals(wksp.getNumberHistograms(), 1)
self.assertEquals(len(wksp.readY(0)), len(y))
self.assertEquals(len(wksp.readX(0)), len(x))
self.assertEquals(len(wksp.readE(0)), len(e))
for index in range(len(y)):
self.assertEquals(wksp.readY(0)[index], y[index])
self.assertEquals(wksp.readE(0)[index], e[index])
self.assertEquals(wksp.readX(0)[index], x[index])
# Last X value
self.assertEquals(wksp.readX(0)[len(x)-1], x[len(x)-1])
AnalysisDataService.remove("wksp")
def test_create_with_2D_numpy_array(self):
x = np.array([1.,2.,3.,4.])
y = np.array([[1.,2.,3.],[4.,5.,6.]])
e = np.sqrt(y)
wksp = CreateWorkspace(DataX=x, DataY=y,DataE=e,NSpec=2,UnitX='TOF')
self.assertTrue(isinstance(wksp, MatrixWorkspace))
self.assertEquals(wksp.getNumberHistograms(), 2)
for i in [0,1]:
for j in range(len(y[0])):
self.assertEquals(wksp.readY(i)[j], y[i][j])
self.assertEquals(wksp.readE(i)[j], e[i][j])
self.assertEquals(wksp.readX(i)[j], x[j])
# Last X value
self.assertEquals(wksp.readX(i)[len(x)-1], x[len(x)-1])
AnalysisDataService.remove("wksp")
def test_with_data_from_other_workspace(self):
wsname = 'LOQ'
x1 = np.array([1.,2.,3.,4.])
y1 = np.array([[1.,2.,3.],[4.,5.,6.]])
e1 = np.sqrt(y1)
loq = CreateWorkspace(DataX=x1, DataY=y1,DataE=e1,NSpec=2,UnitX='Wavelength')
x2 = loq.extractX()
y2 = loq.extractY()
e2 = loq.extractE()
wksp = CreateWorkspace(DataX=x2, DataY=y2,DataE=e2,NSpec=2,UnitX='Wavelength')
self.assertTrue(isinstance(wksp, MatrixWorkspace))
self.assertEquals(wksp.getNumberHistograms(), 2)
for i in [0,1]:
for j in range(len(y2[0])):
self.assertEquals(wksp.readY(i)[j], loq.readY(i)[j])
self.assertEquals(wksp.readE(i)[j], loq.readE(i)[j])
self.assertEquals(wksp.readX(i)[j], loq.readX(i)[j])
# Last X value
self.assertEquals(wksp.readX(i)[len(x2)-1], loq.readX(i)[len(x2)-1])
AnalysisDataService.remove("wksp")
def test_create_with_numerical_vertical_axis_values(self):
data = [1.,2.,3.]
axis_values = [5,6,7]
alg = run_algorithm("CreateWorkspace", DataX=data, DataY=data, NSpec=3,VerticalAxisUnit='MomentumTransfer',
VerticalAxisValues=axis_values,child=True)
wksp = alg.getProperty("OutputWorkspace").value
for i in range(len(axis_values)):
self.assertEquals(wksp.getAxis(1).getValue(i), axis_values[i])
def test_create_with_numpy_vertical_axis_values(self):
data = [1.,2.,3.]
axis_values = np.array([6.,7.,8.])
alg = run_algorithm("CreateWorkspace", DataX=data, DataY=data, NSpec=3,VerticalAxisUnit='MomentumTransfer',
VerticalAxisValues=axis_values,child=True)
wksp = alg.getProperty("OutputWorkspace").value
for i in range(len(axis_values)):
self.assertEquals(wksp.getAxis(1).getValue(i), axis_values[i])
if __name__ == '__main__':
unittest.main()
|
petervaro/tup
|
src/tup.py
|
Python
|
gpl-3.0
| 23,015
| 0.004519
|
## INFO ##
## INFO ##
# TODO: %o is not available in output, nor input strings, only in command
# TODO: !-macro and ^-flags should only be available
# at the beginning of a command
#-- CHEATSHEET ----------------------------------------------------------------#
# HOWTO: http://sublimetext.info/docs/en/reference/syntaxdefs.html
# REGEX: http://manual.macromates.com/en/regular_expressions
# Syntax Definition
syntax = {
'name': '{NAME}',
'comment': ('\n\t\tCopyright (C) 2015 - 2016 Peter Varo'
'\n\t\t<http://github.com/petervaro/tup>'
'\n'
'\n\t\tThis program is free software: you can redistribute it'
'\n\t\tand/or modify it under the terms of the GNU General'
'\n\t\tPublic License as published by the Free Software'
'\n\t\tFoundation, either version 3 of the License, or (at your'
'\n\t\toption) any later version.'
'\n'
'\n\t\tThis program is distributed in the hope that it will be'
'\n\t\tuseful, but WITHOUT ANY WARRANTY; without even the'
'\n\t\timplied warranty of MERCHANTABILITY or FITNESS FOR A'
'\n\t\tPARTICULAR PURPOSE. See the GNU General Public License'
'\n\t\tfor more details.'
'\n'
'\n\t\tYou should have received a copy of the GNU General Public'
'\n\t\tLicense along with this program, most likely a file in'
'\n\t\tthe root directory, called "LICENSE". If not, see'
'\n\t\t<http://www.gnu.org/licenses>.'
'\n\t'),
'scopeName': 'source.{SCOPE}',
'fileTypes': ['Tupfile', 'tup'],
# Patterns
'patterns':
[
#-- COMMENT -------------------------------------------------------------------#
{
# One-liner
'name' : 'comment.line.hashmark.{SCOPE}',
'begin': r'(?<!\\)^\s*#',
'patterns':
[
{
'name' : 'comment.line.hashmark.line_continuation.{SCOPE}',
'match': r'\\\s*\n'
}
],
'end': r'(?<!\\)\n'
},
#-- MACROS --------------------------------------------------------------------#
{
'name' : 'meta.macro.definition.{SCOPE}',
'begin': r'^\s*(!)(.+?)\s*(=)',
'beginCaptures':
{
1: {'name': 'keyword.operator.macro.prefix.{SCOPE}'},
2: {'name': 'entity.other.inherited-class.macro.variable.{SCOPE}'},
3: {'name': 'keyword.operator.macro.assignment.{SCOPE}'}
},
'patterns':
[
# Input
{
'name' : 'string.unquoted.input.{SCOPE}',
'begin': r'(?<==)',
'patterns':
[
# Group
{
'name' : 'keyword.operator.group.{SCOPE}',
'match': r'<(.+?)(?<!\|)>',
'captures':
{
1: {'name': 'storage.modifier.group.{SCOPE}'}
}
},
# Bin
{
'name' : 'keyword.operator.bin.{SCOPE}',
'match': r'{(.+?)}',
'captures':
{
1: {'name': 'storage.type.bin.{SCOPE}'}
}
},
# Command
{
'name' : 'string.unquoted.command.{SCOPE}',
'begin': r'(\|>)',
'beginCaptures':
{
1: {'name': 'keyword.operator.arrow.{SCOPE}'}
},
'patterns':
[
# Output
{
'name' : 'string.unquoted.command.{SCOPE}',
'begin': r'(\|>)',
'beginCaptures':
{
1: {'name': 'keyword.operator.arrow.{SCOPE}'}
},
'patterns':
[
{
'name' : 'keyword.operator.bin.{SCOPE}',
'begin': r'{(.+?)}',
'beginCaptures':
{
1: {'name': 'storage.type.bin.{SCOPE}'},
},
'patterns':
[
{
'name' : 'invalid.illegal.group_after_bin.{SCOPE}',
'match': r'<.+?>.*'
},
{
'name' : 'invalid.illegal.bin_after_bin.{SCOPE}',
'match': r'{.+?}.*'
},
{'include': '#io_string'},
],
'end': r'(?=(?<!\\)\n)'
},
{
'name' : 'keyword.operator.group.{SCOPE}',
'begin': r'<(.+?)(?<!\|)>',
'beginCaptures':
{
1: {'name': 'storage.modifier.group.{SCOPE}'}
},
'patterns':
|
[
|
{
'name' : 'invalid.illegal.group_after_group.{SCOPE}',
'match': r'<.+?>.*'
},
{
'name' : 'keyword.operator.bin.{SCOPE}',
'begin': r'{(.+?)}',
'beginCaptures':
{
1: {'name': 'storage.type.bin.{SCOPE}'}
},
'patterns':
[
{
'name' : 'invalid.illegal.group_after_bin.{SCOPE}',
'match': r'<.+?>.*'
},
{'include': '#io_string'},
],
'end': r'(?=(?<!\\)\n)'
},
{'include': '#io_string'},
],
'end': r'(?=(?<!\\)\n)'
|
exaile/exaile
|
plugins/minimode/__init__.py
|
Python
|
gpl-2.0
| 9,528
| 0.00063
|
# Copyright (C) 2009-2010 Mathias Brodala
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, In
|
c., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from gi.repository import Gtk
from xl import event, providers, settings
from xl.nls import gettext as _
from xlgui.accelerators import Accele
|
rator
from xlgui.widgets import menu
from . import controls
from . import minimode_preferences
MINIMODE = None
def __migrate_fixed_controls():
"""
Makes sure fixed controls are selected,
mostly for migration from older versions
"""
option_name = 'plugin/minimode/selected_controls'
if settings.MANAGER.has_option(option_name):
selected_controls = settings.get_option(option_name)
if 'restore' not in selected_controls:
selected_controls += ['restore']
settings.set_option(option_name, selected_controls)
def enable(exaile):
"""
Enables the mini mode plugin
"""
__migrate_fixed_controls()
if exaile.loading:
event.add_callback(_enable, 'gui_loaded')
else:
_enable(None, exaile, None)
def _enable(event, exaile, nothing):
"""
Handles the deferred enable call
"""
global MINIMODE
MINIMODE = MiniMode(exaile)
def disable(exaile):
"""
Disables the mini mode plugin
"""
global MINIMODE
MINIMODE.destroy()
MINIMODE = None
def get_preferences_pane():
return minimode_preferences
class MiniMode(Gtk.Window):
"""
Mini Mode main window
"""
__gsignals__ = {'show': 'override'}
def __init__(self, exaile):
"""
Sets up the mini mode main window and
options to access it
"""
Gtk.Window.__init__(self)
self.set_title('Exaile Mini Mode')
self.set_resizable(False)
self.exaile_window = exaile.gui.main.window
controls.register()
self.box = controls.ControlBox()
self.box.set_spacing(3)
self.border_frame = Gtk.Frame()
self.border_frame.add(self.box)
self.add(self.border_frame)
self.accelerator = Accelerator(
'<Primary><Alt>M', _('Mini Mode'), self.on_menuitem_activate
)
self.menuitem = menu.simple_menu_item(
'minimode',
['clear-playlist'],
icon_name='exaile-minimode',
callback=self.accelerator,
)
providers.register('menubar-view-menu', self.menuitem)
providers.register('mainwindow-accelerators', self.accelerator)
self.mainbutton = Gtk.Button(label=_('Mini Mode'))
self.mainbutton.set_image(
Gtk.Image.new_from_icon_name('exaile-minimode', Gtk.IconSize.BUTTON)
)
self.mainbutton.connect('clicked', self.on_mainbutton_clicked)
action_area = exaile.gui.main.info_area.get_action_area()
action_area.pack_end(self.mainbutton, False, False, 6)
self.__active = False
self.__dirty = True
# XXX: Until defaults are implemented in xl.settings
self.__defaults = {
'plugin/minimode/always_on_top': True,
'plugin/minimode/show_in_panel': False,
'plugin/minimode/on_all_desktops': True,
'plugin/minimode/display_window_decorations': True,
'plugin/minimode/window_decoration_type': 'full',
'plugin/minimode/use_alpha': False,
'plugin/minimode/transparency': 0.3,
'plugin/minimode/horizontal_position': 10,
'plugin/minimode/vertical_position': 10,
}
exaile.gui.main.connect('main-visible-toggle', self.on_main_visible_toggle)
event.add_ui_callback(self.on_option_set, 'plugin_minimode_option_set')
self.on_option_set(
'plugin_minimode_option_set',
settings,
'plugin/minimode/button_in_mainwindow',
)
def destroy(self):
"""
Cleanups
"""
providers.unregister('mainwindow-accelerators', self.accelerator)
providers.unregister('menubar-view-menu', self.menuitem)
controls.unregister()
self.mainbutton.destroy()
self.set_active(False)
self.box.destroy()
Gtk.Window.destroy(self)
def set_active(self, active):
"""
Enables or disables the Mini Mode window
"""
if active == self.__active:
return
if active and not self.props.visible:
self.exaile_window.hide()
self.show_all()
elif not active and self.props.visible:
self.hide()
self.exaile_window.show()
self.__active = active
def do_show(self):
"""
Updates the appearance if
settings have been changed
"""
h = None
v = None
if self.__dirty:
for option, default in self.__defaults.items():
value = settings.get_option(option, default)
if option == 'plugin/minimode/always_on_top':
self.set_keep_above(value)
elif option == 'plugin/minimode/show_in_panel':
self.props.skip_taskbar_hint = not value
elif option == 'plugin/minimode/on_all_desktops':
if value:
self.stick()
else:
self.unstick()
elif option == 'plugin/minimode/display_window_decorations':
if value:
option = 'plugin/minimode/window_decoration_type'
value = settings.get_option(option, self.__defaults[option])
if value == 'full':
self.set_decorated(True)
self.border_frame.set_shadow_type(Gtk.ShadowType.NONE)
elif value == 'simple':
self.set_decorated(False)
self.border_frame.set_shadow_type(Gtk.ShadowType.OUT)
else:
self.set_decorated(False)
self.border_frame.set_shadow_type(Gtk.ShadowType.NONE)
elif option == 'plugin/minimode/use_alpha':
if value:
option = 'plugin/minimode/transparency'
opacity = 1 - settings.get_option(
option, self.__defaults[option]
)
self.set_opacity(opacity)
elif option == 'plugin/minimode/horizontal_position':
h = value
elif option == 'plugin/minimode/vertical_position':
v = value
self.__dirty = False
min_width, natural_width = self.get_preferred_width()
min_height, natural_height = self.get_preferred_height()
self.resize(natural_width, natural_height)
self.queue_draw()
Gtk.Window.do_show(self)
# GTK (or perhaps the theme?) likes to move the window to some
# random default position while showing it... so do these at the
# same time after show, otherwise it'll move on us
x, y = self.get_position()
if h is not None:
x = h
if v is not None:
y = v
self.move(x, y)
def do_configure_event(self, event):
"""
Stores the window position upon window movement
"""
settings.set_option('plugin/minimode/horizontal_position', event.x)
settings.set_option('plugin/minimode/vertical_position', ev
|
dablak/saved_searches
|
setup.py
|
Python
|
bsd-3-clause
| 792
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(
name='saved_searches',
version='2.0.0-alpha',
description='Saves user searches for integration with Haystack.',
author='Daniel Lindsley',
author_email='daniel@toastdriven.com',
url='http://github.com/toastdriven/saved_searches',
packages=[
'saved_searches',
'saved_searches.templatet
|
ags',
],
classifiers=
|
[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
)
|
matematik7/STM
|
tests/test_filename.py
|
Python
|
mit
| 4,121
| 0.005156
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------
# tests/test_filename.py
#
# Test thumbnail file name generation.
# ----------------------------------------------------------------
# copyright (c) 2015 - Domen Ipavec
# Distributed under The MIT License, see LICENSE
# ----------------------------------------------------------------
from unittest import TestCase
from stm.configuration import Configuration
from stm.image import Image
import os, os.path
class Test_filename(TestCase):
def checkImage(self, input, output):
img = Image(self.conf)
img.loadImage(input)
self.assertEqual(img.getThumbnailName(), output)
def setUp(self):
self.conf = Configuration()
def removeFolders(self, l):
for f in l:
if os.path.exists(f):
try:
os.rmdir(f)
except:
pass
def tearDown(self):
self.conf = None
self.removeFolders(('thumbs', '/tmp/thumbs', 'test', '/tmp/test', 'test-folder', '/tmp/test-folder'))
def test_output_file(self):
self.conf.output = "output.jpg"
self.checkImage("input.png", "output.jpg")
def test_no_config(self):
self.checkImage("input.png", "thumbs/input.png
|
")
self.checkImage("input.jpg", "thumbs/input.png")
self.checkImage("13 De_(com)čšž.test.jpg", "thumbs/13 De_(com)čšž.test.png")
self.checkImage("/tmp/input.jpg", "/tmp/thumbs/input.png")
def test_folder(self):
self.conf.folder = "test-folder"
self.checkImage("input.png", "test-folder/input.png")
self.ch
|
eckImage("input.jpg", "test-folder/input.png")
self.checkImage("13 De_(com)čšž.test.jpg", "test-folder/13 De_(com)čšž.test.png")
self.checkImage("/tmp/input.jpg", "/tmp/test-folder/input.png")
def test_abs_folder(self):
self.conf.folder = "/tmp"
self.checkImage("input.png", "/tmp/input.png")
self.checkImage("input.jpg", "/tmp/input.png")
self.checkImage("13 De_(com)čšž.test.jpg", "/tmp/13 De_(com)čšž.test.png")
self.checkImage("/tmp/input.jpg", "/tmp/input.png")
def test_format_jpg(self):
self.conf.fileFormat = 'jpg'
self.checkImage("input.png", "thumbs/input.jpg")
self.checkImage("input.jpg", "thumbs/input.jpg")
self.checkImage("13 De_(com)čšž.test.jpg", "thumbs/13 De_(com)čšž.test.jpg")
self.checkImage("/tmp/input.jpg", "/tmp/thumbs/input.jpg")
def test_format_source(self):
self.conf.fileFormat = 'source'
self.checkImage("input.png", "thumbs/input.png")
self.checkImage("image.jpg", "thumbs/image.jpg")
self.checkImage("13 De_(com)čšž.test.jpg", "thumbs/13 De_(com)čšž.test.jpg")
self.checkImage("/tmp/input.png", "/tmp/thumbs/input.png")
def test_postfix(self):
self.conf.name_postfix = "_thumb"
self.checkImage("input.png", "input_thumb.png")
self.checkImage("input.jpg", "input_thumb.png")
self.checkImage("13 De_(com)čšž.test.jpg", "13 De_(com)čšž.test_thumb.png")
self.checkImage("/tmp/input.jpg", "/tmp/input_thumb.png")
def test_prefix(self):
self.conf.name_prefix = "thumb_"
self.checkImage("input.png", "thumb_input.png")
self.checkImage("input.jpg", "thumb_input.png")
self.checkImage("13 De_(com)čšž.test.jpg", "thumb_13 De_(com)čšž.test.png")
self.checkImage("/tmp/input.jpg", "/tmp/thumb_input.png")
def test_all(self):
self.conf.folder = "test"
self.conf.fileFormat = 'jpg'
self.conf.name_prefix = "thumb_"
self.conf.name_postfix = "_thumb"
self.checkImage("input.png", "test/thumb_input_thumb.jpg")
self.checkImage("input.jpg", "test/thumb_input_thumb.jpg")
self.checkImage("13 De_(com)čšž.test.jpg", "test/thumb_13 De_(com)čšž.test_thumb.jpg")
self.checkImage("/tmp/input.png", "/tmp/test/thumb_input_thumb.jpg")
|
accraze/python-twelve-tone
|
tests/test_twelve_tone.py
|
Python
|
bsd-2-clause
| 186
| 0
|
from click.testing
|
import CliRunner
from twelve_tone.cli import main
def test_main():
runner = CliRunner()
result =
|
runner.invoke(main, [])
assert result.exit_code == 0
|
icyflame/batman
|
pywikibot/__init__.py
|
Python
|
mit
| 26,823
| 0.000336
|
# -*- coding: utf-8 -*-
"""The initialization file for the Pywikibot framework."""
#
# (C) Pywikibot team, 2008-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__release__ = '2.0b3'
__version__ = '$Id$'
__url__ = 'https://www.mediawiki.org/wiki/Special:MyLanguage/Manual:Pywikibot'
import datetime
import math
import re
import sys
import threading
import json
if sys.version_info[0] > 2:
from queue import Queue
long = int
else:
from Queue import Queue
from warnings import warn
# Use pywikibot. prefix for all in-package imports; this is to prevent
# confusion with similarly-named modules in version 1 framework, for users
# who want to continue using both
from pywikibot import config2 as config
from pywikibot.bot import (
output, warning, error, critical, debug, stdout, exception,
input, input_choice, input_yn, inputChoice, handle_args, showHelp, ui, log,
calledModuleName, Bot, CurrentPageBot, WikidataBot,
# the following are flagged as deprecated on usage
handleArgs,
)
from pywikibot.exceptions import (
Error, InvalidTitle, BadTitle, NoPage, NoMoveTarget, SectionError,
SiteDefinitionError, NoSuchSite, UnknownSite, UnknownFamily,
UnknownExtension,
NoUsername, UserBlocked,
PageRelatedError, IsRedirectPage, IsNotRedirectPage,
PageSaveRelatedError, PageNotSaved, OtherPageSaveError,
LockedPage, CascadeLockedPage, LockedNoPage, NoCreateError,
EditConflict, PageDeletedConflict, PageCreatedConflict,
ServerError, FatalServerError, Server504Error,
CaptchaError, SpamfilterError, CircularRedirect, InterwikiRedirectPage,
WikiBaseError, CoordinateGlobeUnknownException,
)
from pywikibot.tools import PY2, UnicodeMixin, redirect_func
from pywikibot.i18n import translate
from pywikibot.data.api import UploadWarning
from pywikibot.diff import PatchManager
import pywikibot.textlib as textlib
import pywikibot.tools
textlib_methods = (
'unescape', 'replaceExcept', 'removeDisabledParts', 'removeHTMLParts',
'isDisabled', 'interwikiFormat', 'interwikiSort',
'getLanguageLinks', 'replaceLanguageLinks',
'removeLanguageLinks', 'removeLanguageLinksAndSeparator',
'getCategoryLinks', 'categoryFormat', 'replaceCategoryLinks',
'removeCategoryLinks', 'removeCategoryLinksAndSeparator',
'replaceCategoryInPlace', 'compileLinkR', 'extract_templates_and_params',
'TimeStripper',
)
__all__ = (
'config', 'ui', 'UnicodeMixin', 'translate',
'Page', 'FilePage', 'Category', 'Link', 'User',
'ItemPage', 'PropertyPage', 'Claim',
'html2unicode', 'url2unicode', 'unicode2html',
'stdout', 'output', 'warning', 'error', 'critical', 'debug',
'exception', 'input_choice', 'input', 'input_yn', 'inputChoice',
'handle_args', 'handleArgs', 'showHelp', 'ui', 'l
|
og',
'calledModuleName', 'Bot', 'CurrentPageBot', 'WikidataBot',
|
'Error', 'InvalidTitle', 'BadTitle', 'NoPage', 'NoMoveTarget',
'SectionError',
'SiteDefinitionError', 'NoSuchSite', 'UnknownSite', 'UnknownFamily',
'UnknownExtension',
'NoUsername', 'UserBlocked', 'UserActionRefuse',
'PageRelatedError', 'IsRedirectPage', 'IsNotRedirectPage',
'PageSaveRelatedError', 'PageNotSaved', 'OtherPageSaveError',
'LockedPage', 'CascadeLockedPage', 'LockedNoPage', 'NoCreateError',
'EditConflict', 'PageDeletedConflict', 'PageCreatedConflict',
'UploadWarning',
'ServerError', 'FatalServerError', 'Server504Error',
'CaptchaError', 'SpamfilterError', 'CircularRedirect',
'InterwikiRedirectPage',
'WikiBaseError', 'CoordinateGlobeUnknownException',
'QuitKeyboardInterrupt',
)
__all__ += textlib_methods
if PY2:
# T111615: Python 2 requires __all__ is bytes
globals()['__all__'] = tuple(bytes(item) for item in __all__)
for _name in textlib_methods:
target = getattr(textlib, _name)
wrapped_func = redirect_func(target)
globals()[_name] = wrapped_func
deprecated = redirect_func(pywikibot.tools.deprecated)
deprecate_arg = redirect_func(pywikibot.tools.deprecate_arg)
class Timestamp(datetime.datetime):
"""Class for handling MediaWiki timestamps.
This inherits from datetime.datetime, so it can use all of the methods
and operations of a datetime object. To ensure that the results of any
operation are also a Timestamp object, be sure to use only Timestamp
objects (and datetime.timedeltas) in any operation.
Use Timestamp.fromISOformat() and Timestamp.fromtimestampformat() to
create Timestamp objects from MediaWiki string formats.
As these constructors are typically used to create objects using data
passed provided by site and page methods, some of which return a Timestamp
when previously they returned a MediaWiki string representation, these
methods also accept a Timestamp object, in which case they return a clone.
Use Site.getcurrenttime() for the current time; this is more reliable
than using Timestamp.utcnow().
"""
mediawikiTSFormat = "%Y%m%d%H%M%S"
ISO8601Format = "%Y-%m-%dT%H:%M:%SZ"
def clone(self):
"""Clone this instance."""
return self.replace(microsecond=self.microsecond)
@classmethod
def fromISOformat(cls, ts):
"""Convert an ISO 8601 timestamp to a Timestamp object."""
# If inadvertantly passed a Timestamp object, use replace()
# to create a clone.
if isinstance(ts, cls):
return ts.clone()
return cls.strptime(ts, cls.ISO8601Format)
@classmethod
def fromtimestampformat(cls, ts):
"""Convert a MediaWiki internal timestamp to a Timestamp object."""
# If inadvertantly passed a Timestamp object, use replace()
# to create a clone.
if isinstance(ts, cls):
return ts.clone()
return cls.strptime(ts, cls.mediawikiTSFormat)
def isoformat(self):
"""
Convert object to an ISO 8601 timestamp accepted by MediaWiki.
datetime.datetime.isoformat does not postfix the ISO formatted date
with a 'Z' unless a timezone is included, which causes MediaWiki
~1.19 and earlier to fail.
"""
return self.strftime(self.ISO8601Format)
toISOformat = redirect_func(isoformat, old_name='toISOformat',
class_name='Timestamp')
def totimestampformat(self):
"""Convert object to a MediaWiki internal timestamp."""
return self.strftime(self.mediawikiTSFormat)
def __str__(self):
"""Return a string format recognized by the API."""
return self.isoformat()
def __add__(self, other):
"""Perform addition, returning a Timestamp instead of datetime."""
newdt = super(Timestamp, self).__add__(other)
if isinstance(newdt, datetime.datetime):
return Timestamp(newdt.year, newdt.month, newdt.day, newdt.hour,
newdt.minute, newdt.second, newdt.microsecond,
newdt.tzinfo)
else:
return newdt
def __sub__(self, other):
"""Perform substraction, returning a Timestamp instead of datetime."""
newdt = super(Timestamp, self).__sub__(other)
if isinstance(newdt, datetime.datetime):
return Timestamp(newdt.year, newdt.month, newdt.day, newdt.hour,
newdt.minute, newdt.second, newdt.microsecond,
newdt.tzinfo)
else:
return newdt
class Coordinate(object):
"""
Class for handling and storing Coordinates.
For now its just being used for DataSite, but
in the future we can use it for the GeoData extension.
"""
def __init__(self, lat, lon, alt=None, precision=None, globe='earth',
typ="", name="", dim=None, site=None, entity=''):
"""
Represent a geo coordinate.
@param lat: Latitude
@type lat: float
@param lon: Longitude
@type lon: float
@param alt: Altitute? TODO FIXME
@param precision: precision
@type precision: float
@param globe: Which globe the p
|
lgarren/spack
|
var/spack/repos/builtin/packages/r-affyio/package.py
|
Python
|
lgpl-2.1
| 1,756
| 0.001139
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the F
|
ree Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, wr
|
ite to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffyio(RPackage):
"""Routines for parsing Affymetrix data files based upon file format
information. Primary focus is on accessing the CEL and CDF file
formats."""
homepage = "https://bioconductor.org/packages/affyio/"
url = "https://bioconductor.org/packages/3.5/bioc/src/contrib/affyio_1.46.0.tar.gz"
version('1.46.0', 'e1f7a89ae16940aa29b998a4dbdc0ef9')
depends_on('r-zlibbioc', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@1.46.0')
|
riverbird/djangoweblog
|
tinymce/widgets.py
|
Python
|
gpl-2.0
| 6,397
| 0.00297
|
# Copyright (c) 2008 Joost Cassee
# Licensed under the terms of the MIT License (see LICENSE.txt)
"""
This TinyMCE widget was copied and extended from this code by John D'Agostino:
http://code.djangoproject.com/wiki/CustomWidgetsTinyMCE
"""
from __future__ import unicode_literals
#import tinymce.settings
import demo.tinymce.settings
from django import forms
from django.conf import settings
from django.contrib.admin import widgets as admin_widgets
from django.core.urlresolvers import reverse
from django.forms.widgets import flatatt
from django.utils.html import escape
from django.utils.datastructures import SortedDict
from django.utils.safestring import mark_safe
from django.utils.translation import get_language, ugettext as _
try:
import json
except ImportError:
from django.utils import simplejson as json
try:
from django.utils.encoding import smart_text as smart_unicode
except ImportError:
try:
from django.utils.encoding import smart_unicode
except ImportError:
from django.forms.util import smart_unicode
class TinyMCE(forms.Textarea):
"""
TinyMCE widget. Set settings.TINYMCE_JS_URL to set the location of the
javascript file. Default is "MEDIA_URL + 'js/tiny_mce/tiny_mce.js'".
You can customize the configuration with the mce_attrs argument to the
constructor.
In addition to the standard configuration you can set the
'content_language' parameter. It takes the value of the 'language'
parameter by default.
In addition to the default settings from settings.TINYMCE_DEFAULT_CONFIG,
this widget sets the 'language', 'directionality' and
'
|
spellchecker_languages' parameters by default. The first is
|
derived from
the current Django language, the others from the 'content_language'
parameter.
"""
def __init__(self, content_language=None, attrs=None, mce_attrs=None):
super(TinyMCE, self).__init__(attrs)
if mce_attrs is None:
mce_attrs = {}
self.mce_attrs = mce_attrs
if content_language is None:
content_language = mce_attrs.get('language', None)
self.content_language = content_language
def render(self, name, value, attrs=None):
if value is None: value = ''
value = smart_unicode(value)
final_attrs = self.build_attrs(attrs)
final_attrs['name'] = name
assert 'id' in final_attrs, "TinyMCE widget attributes must contain 'id'"
mce_config = tinymce.settings.DEFAULT_CONFIG.copy()
mce_config.update(get_language_config(self.content_language))
if tinymce.settings.USE_FILEBROWSER:
mce_config['file_browser_callback'] = "djangoFileBrowser"
mce_config.update(self.mce_attrs)
if not 'mode' in mce_config:
mce_config['mode'] = 'exact'
if mce_config['mode'] == 'exact':
mce_config['elements'] = final_attrs['id']
mce_config['strict_loading_mode'] = 1
# Fix for js functions
js_functions = {}
for k in ('paste_preprocess','paste_postprocess'):
if k in mce_config:
js_functions[k] = mce_config[k]
del mce_config[k]
mce_json = json.dumps(mce_config)
pos = final_attrs['id'].find('__prefix__')
if pos != -1:
mce_json = mce_json.replace('"%s"' % final_attrs['id'], 'elements')
for k in js_functions:
index = mce_json.rfind('}')
mce_json = mce_json[:index]+', '+k+':'+js_functions[k].strip()+mce_json[index:]
html = ['<textarea%s>%s</textarea>' % (flatatt(final_attrs), escape(value))]
if tinymce.settings.USE_COMPRESSOR:
compressor_config = {
'plugins': mce_config.get('plugins', ''),
'themes': mce_config.get('theme', 'advanced'),
'languages': mce_config.get('language', ''),
'diskcache': True,
'debug': False,
}
compressor_json = json.dumps(compressor_config)
html.append('<script type="text/javascript">tinyMCE_GZ.init(%s)</script>' % compressor_json)
if pos != -1:
html.append('''<script type="text/javascript">
setTimeout(function () {
var id = '%s';
if (typeof(window._tinymce_inited) == 'undefined') {
window._tinymce_inited = [];
}
if (typeof(window._tinymce_inited[id]) == 'undefined') {
window._tinymce_inited[id] = true;
} else {
var elements = id.replace(/__prefix__/, parseInt(document.getElementById('%sTOTAL_FORMS').value) - 1);
if (document.getElementById(elements)) {
tinymce.init(%s);
}
}
}, 0);
</script>''' % (final_attrs['id'], final_attrs['id'][0:pos], mce_json))
else:
html.append('<script type="text/javascript">tinyMCE.init(%s)</script>' % mce_json)
return mark_safe('\n'.join(html))
def _media(self):
if tinymce.settings.USE_COMPRESSOR:
js = [reverse('tinymce-compressor')]
else:
js = [tinymce.settings.JS_URL]
if tinymce.settings.USE_FILEBROWSER:
js.append(reverse('tinymce-filebrowser'))
return forms.Media(js=js)
media = property(_media)
class AdminTinyMCE(TinyMCE, admin_widgets.AdminTextareaWidget):
pass
def get_language_config(content_language=None):
language = get_language()[:2]
if content_language:
content_language = content_language[:2]
else:
content_language = language
config = {}
config['language'] = language
lang_names = SortedDict()
for lang, name in settings.LANGUAGES:
if lang[:2] not in lang_names: lang_names[lang[:2]] = []
lang_names[lang[:2]].append(_(name))
sp_langs = []
for lang, names in lang_names.items():
if lang == content_language:
default = '+'
else:
default = ''
sp_langs.append('%s%s=%s' % (default, ' / '.join(names), lang))
config['spellchecker_languages'] = ','.join(sp_langs)
if content_language in settings.LANGUAGES_BIDI:
config['directionality'] = 'rtl'
else:
config['directionality'] = 'ltr'
if tinymce.settings.USE_SPELLCHECKER:
config['spellchecker_rpc_url'] = reverse('tinymce.views.spell_check')
return config
|
JoeGlancy/micropython
|
examples/reverb.py
|
Python
|
mit
| 1,345
| 0.006691
|
import audio
def from_file(file, frame):
ln = -1
while ln:
ln = file.readinto(frame)
yield frame
def reverb_gen(src, buckets, reflect, fadeout):
bucket_count = len(buckets)
bucket = 0
for frame in src:
echo = buckets[bucket]
echo *= reflect
echo += frame
yield echo
buckets[bucket] = echo
bucket += 1
if bucket == bucket_count:
bucket = 0
while fadeout:
fadeout -= 1
echo = buckets[bucket]
echo *= reflect
yield echo
buckets[bucket] = echo
bucket
|
+= 1
if bucket == bucket_count:
bucket = 0
def reverb(src, delay, reflect):
#Do all allocation up front, so we don't need to do any in the generator.
bucket_count = delay>>2
buckets = [ None ] * bucket_count
for i in range(bucket_count):
buckets[i] = audio.AudioFrame()
vol =
|
1.0
fadeout = 0
while vol > 0.05:
fadeout += bucket_count
vol *= reflect
return reverb_gen(src, buckets, reflect, fadeout)
def play_file(name, delay=80, reflect=0.5):
#Do allocation here, as we can't do it in an interrupt.
frame = audio.AudioFrame()
with open(name) as file:
gen = from_file(file, frame)
r = reverb(gen, delay, reflect)
audio.play(r)
|
ptMuta/python-node
|
pynode/runners/BabelRunner.py
|
Python
|
mit
| 2,294
| 0.003487
|
from Naked.toolshed.shell import muterun
from pynode.exceptions import NodeExecutionFailedException
from pynode.runners.Runner import Runner
class BabelRunner(Runner):
def __init__(self, ignore=None, extensions=None, presets=None, plugins=None):
babel_arguments = ''
if ignore is not None:
babel_arguments += '-i ' + ignore + ' '
if extensions is not None:
babel_arguments += '-x ' + extensions + ' '
if presets is not None:
babel_arguments += '--presets ' + presets + ' '
if plugins is not None:
babel_arguments += '--plugins ' + plugins + ' '
self.babel_arguments = babel_arguments
def execute_babel_node(self, expression_or_path, arguments):
try:
if len(arguments) > 0:
js_command = 'babel-node ' + self.babel_arguments + expression_or_path + ' ' + arguments
else:
js_command = 'babel-node ' + self.babel_arguments + expression_or_path
return muterun(js_command) # return result of execute_babel_node() of node.js file
except Exception as e:
raise e
def execute(self, script, *args):
result = self.execute_babel_node('-e "' + script + '"', self.args_to_string(args))
if result.exitcode == 0:
return result.stdout.decode('utf-8').strip()
else:
raise NodeExecutionFailedException(result.stderr)
def execute_silent(self, script, *args):
result = self.execute_babel_node('-e "' + script + '"', self.args_to_string(args))
if result.exitcode == 0:
return True
else:
raise NodeExecutionFailedException(result.stderr)
def execute_script(self, script_path, *args):
result = self.execute_babel_node(script_path, self.args_to_string(args)
|
)
if result.exitcode == 0:
return result.stdout.decode('utf-8').strip()
else:
raise NodeExecutionFailedException(result.stderr)
def execute_script_silent(self, script_path, *args):
result = self.execute_babel
|
_node(script_path, self.args_to_string(args))
if result.exitcode == 0:
return True
else:
raise NodeExecutionFailedException(result.stderr)
|
hzdg/django-google-search
|
googlesearch/__init__.py
|
Python
|
mit
| 662
| 0
|
from django.conf import settings
""" Your GSE API key """
GOOGLE_SEARCH_API_KEY = getattr(settings, 'GOOGLE_SEARCH_API_KEY', None)
""" The ID of the Google Custom Search
|
Engine """
GOOGLE_SEARCH_ENGINE_ID = getattr(settings, 'GOOGLE_SEARCH_ENGINE_ID', None)
""" The API version. Defaults to 'v1' """
GOOGLE_SEARCH_API_VERSION = getattr(
settings, 'GOOGLE_SEARCH_API_VERSION', 'v1')
""" The number of search results to show per page """
GOOGLE_SEARCH_RESULTS_PER_PAGE = getattr(
settings, 'GOOGLE_SEARCH_RESULTS_PER_PAGE', 10)
""" The maximum number of pages to display """
GOOGL
|
E_SEARCH_MAX_PAGES = getattr(settings, 'GOOGLE_SEARCH_MAX_PAGES', 10)
|
h2so5/Twemoji4Android
|
nototools/opentype_data.py
|
Python
|
mit
| 2,449
| 0.000817
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WA
|
RRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OpenType-related data."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import unicode_data
OMPL = {}
def _set_ompl():
"""Set up OMPL.
OMPL is defined to be the list of mirrored pairs
|
in Unicode 5.1:
http://www.microsoft.com/typography/otspec/ttochap1.htm#ltrrtl
"""
global OMPL
unicode_data.load_data()
bmg_data = unicode_data._bidi_mirroring_glyph_data
OMPL = {char:bmg for (char, bmg) in bmg_data.items()
if float(unicode_data.age(char)) <= 5.1}
ZWSP = [0x200B]
JOINERS = [0x200C, 0x200D]
BIDI_MARKS = [0x200E, 0x200F]
DOTTED_CIRCLE = [0x25CC]
# From the various script-specific specs at
# http://www.microsoft.com/typography/SpecificationsOverview.mspx
SPECIAL_CHARACTERS_NEEDED = {
'Arab': JOINERS + BIDI_MARKS + DOTTED_CIRCLE,
'Beng': ZWSP + JOINERS + DOTTED_CIRCLE,
'Bugi': ZWSP + JOINERS + DOTTED_CIRCLE,
'Deva': ZWSP + JOINERS + DOTTED_CIRCLE,
'Gujr': ZWSP + JOINERS + DOTTED_CIRCLE,
'Guru': ZWSP + JOINERS + DOTTED_CIRCLE,
# Hangul may not need the special characters:
# https://code.google.com/p/noto/issues/detail?id=147#c2
# 'Hang': ZWSP + JOINERS,
'Hebr': BIDI_MARKS + DOTTED_CIRCLE,
'Java': ZWSP + JOINERS + DOTTED_CIRCLE,
'Khmr': ZWSP + JOINERS + DOTTED_CIRCLE,
'Knda': ZWSP + JOINERS + DOTTED_CIRCLE,
'Laoo': ZWSP + DOTTED_CIRCLE,
'Mlym': ZWSP + JOINERS + DOTTED_CIRCLE,
'Mymr': ZWSP + JOINERS + DOTTED_CIRCLE,
'Orya': ZWSP + JOINERS + DOTTED_CIRCLE,
'Sinh': ZWSP + JOINERS + DOTTED_CIRCLE,
'Syrc': JOINERS + BIDI_MARKS + DOTTED_CIRCLE,
'Taml': ZWSP + JOINERS + DOTTED_CIRCLE,
'Telu': ZWSP + JOINERS + DOTTED_CIRCLE,
'Thaa': BIDI_MARKS + DOTTED_CIRCLE,
'Thai': ZWSP + DOTTED_CIRCLE,
'Tibt': ZWSP + JOINERS + DOTTED_CIRCLE,
}
if not OMPL:
_set_ompl()
|
zeroc0d3/docker-lab
|
vim/rootfs/usr/lib/python2.7/dist-packages/powerline/segments/vim/__init__.py
|
Python
|
mit
| 24,099
| 0.024775
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import re
import csv
import sys
from collections import defaultdict
try:
import vim
except ImportError:
vim = object()
from powerline.bindings.vim import (vim_get_func, getbufvar, vim_getbufoption,
buffer_name, vim_getwinvar,
register_buffer_cache, current_tabpage,
list_tabpage_buffers_segment_info)
from powerline.theme import requires_segment_info, requires_filesystem_watcher
from powerline.lib import add_divider_highlight_group
from powerline.lib.vcs import guess
from powerline.lib.humanize_bytes import humanize_bytes
from powerline.lib import wraps_saveargs as wraps
from powerline.segments.common.vcs import BranchSegment, StashSegment
from powerline.segments import with_docstring
from powerline.lib.unicode import string, unicode
try:
from __builtin__ import xrange as range
except ImportError:
pass
vim_funcs = {
'virtcol': vim_get_func('virtcol', rettype='int'),
'getpos': vim_get_func('getpos'),
'fnamemodify': vim_get_func('fnamemodify', rettype='bytes'),
'line2byte': vim_get_func('line2byte', rettype='int'),
'line': vim_get_func('line', rettype='int'),
}
vim_modes = {
'n': 'NORMAL',
'no': 'N-OPER',
'v': 'VISUAL',
'V': 'V-LINE',
'^V': 'V-BLCK',
's': 'SELECT',
|
'S': 'S-LINE',
'^S': 'S-BLCK',
'i': 'INSERT',
'ic': 'I-COMP',
'ix': 'I-C_X ',
'R': 'RPLACE',
'Rv': 'V-RPLC',
'Rc': 'R-COMP',
'Rx': 'R-C_X ',
'c': 'COMMND',
'cv': 'VIM-EX',
'ce': 'NRM-EX',
|
'r': 'PROMPT',
'rm': '-MORE-',
'r?': 'CNFIRM',
'!': '!SHELL',
}
# TODO Remove cache when needed
def window_cached(func):
cache = {}
@requires_segment_info
@wraps(func)
def ret(segment_info, **kwargs):
window_id = segment_info['window_id']
if segment_info['mode'] == 'nc':
return cache.get(window_id)
else:
if getattr(func, 'powerline_requires_segment_info', False):
r = func(segment_info=segment_info, **kwargs)
else:
r = func(**kwargs)
cache[window_id] = r
return r
return ret
@requires_segment_info
def mode(pl, segment_info, override=None):
'''Return the current vim mode.
If mode (returned by ``mode()`` VimL function, see ``:h mode()`` in Vim)
consists of multiple characters and necessary mode is not known to powerline
then it will fall back to mode with last character(s) ignored.
:param dict override:
dict for overriding default mode strings, e.g. ``{ 'n': 'NORM' }``
'''
mode = segment_info['mode']
if mode == 'nc':
return None
while mode:
try:
if not override:
return vim_modes[mode]
try:
return override[mode]
except KeyError:
return vim_modes[mode]
except KeyError:
mode = mode[:-1]
return 'BUG'
@window_cached
@requires_segment_info
def visual_range(pl, segment_info, CTRL_V_text='{rows} x {vcols}', v_text_oneline='C:{vcols}', v_text_multiline='L:{rows}', V_text='L:{rows}'):
'''Return the current visual selection range.
:param str CTRL_V_text:
Text to display when in block visual or select mode.
:param str v_text_oneline:
Text to display when in charaterwise visual or select mode, assuming
selection occupies only one line.
:param str v_text_multiline:
Text to display when in charaterwise visual or select mode, assuming
selection occupies more then one line.
:param str V_text:
Text to display when in linewise visual or select mode.
All texts are format strings which are passed the following parameters:
========= =============================================================
Parameter Description
========= =============================================================
sline Line number of the first line of the selection
eline Line number of the last line of the selection
scol Column number of the first character of the selection
ecol Column number of the last character of the selection
svcol Virtual column number of the first character of the selection
secol Virtual column number of the last character of the selection
rows Number of lines in the selection
cols Number of columns in the selection
vcols Number of virtual columns in the selection
========= =============================================================
'''
sline, scol, soff = [int(v) for v in vim_funcs['getpos']('v')[1:]]
eline, ecol, eoff = [int(v) for v in vim_funcs['getpos']('.')[1:]]
svcol = vim_funcs['virtcol']([sline, scol, soff])
evcol = vim_funcs['virtcol']([eline, ecol, eoff])
rows = abs(eline - sline) + 1
cols = abs(ecol - scol) + 1
vcols = abs(evcol - svcol) + 1
return {
'^': CTRL_V_text,
's': v_text_oneline if rows == 1 else v_text_multiline,
'S': V_text,
'v': v_text_oneline if rows == 1 else v_text_multiline,
'V': V_text,
}.get(segment_info['mode'][0], '').format(
sline=sline, eline=eline,
scol=scol, ecol=ecol,
svcol=svcol, evcol=evcol,
rows=rows, cols=cols, vcols=vcols,
)
@requires_segment_info
def modified_indicator(pl, segment_info, text='+'):
'''Return a file modified indicator.
:param string text:
text to display if the current buffer is modified
'''
return text if int(vim_getbufoption(segment_info, 'modified')) else None
@requires_segment_info
def tab_modified_indicator(pl, segment_info, text='+'):
'''Return a file modified indicator for tabpages.
:param string text:
text to display if any buffer in the current tab is modified
Highlight groups used: ``tab_modified_indicator`` or ``modified_indicator``.
'''
for buf_segment_info in list_tabpage_buffers_segment_info(segment_info):
if int(vim_getbufoption(buf_segment_info, 'modified')):
return [{
'contents': text,
'highlight_groups': ['tab_modified_indicator', 'modified_indicator'],
}]
return None
@requires_segment_info
def paste_indicator(pl, segment_info, text='PASTE'):
'''Return a paste mode indicator.
:param string text:
text to display if paste mode is enabled
'''
return text if int(vim.eval('&paste')) else None
@requires_segment_info
def readonly_indicator(pl, segment_info, text='RO'):
'''Return a read-only indicator.
:param string text:
text to display if the current buffer is read-only
'''
return text if int(vim_getbufoption(segment_info, 'readonly')) else None
SCHEME_RE = re.compile(b'^\\w[\\w\\d+\\-.]*(?=:)')
@requires_segment_info
def file_scheme(pl, segment_info):
'''Return the protocol part of the file.
Protocol is the part of the full filename just before the colon which
starts with a latin letter and contains only latin letters, digits, plus,
period or hyphen (refer to `RFC3986
<http://tools.ietf.org/html/rfc3986#section-3.1>`_ for the description of
URI scheme). If there is no such a thing ``None`` is returned, effectively
removing segment.
.. note::
Segment will not check whether there is ``//`` just after the
colon or if there is at least one slash after the scheme. Reason: it is
not always present. E.g. when opening file inside a zip archive file
name will look like :file:`zipfile:/path/to/archive.zip::file.txt`.
``file_scheme`` segment will catch ``zipfile`` part here.
'''
name = buffer_name(segment_info)
if not name:
return None
match = SCHEME_RE.match(name)
if match:
return match.group(0).decode('ascii')
@requires_segment_info
def file_directory(pl, segment_info, remove_scheme=True, shorten_user=True, shorten_cwd=True, shorten_home=False):
'''Return file directory (head component of the file path).
:param bool remove_scheme:
Remove scheme part from the segment name, if present. See documentation
of file_scheme segment for the description of what scheme is. Also
removes the colon.
:param bool shorten_user:
Shorten ``$HOME`` directory to :file:`~/`. Does not work for files with
scheme.
:param bool shorten_cwd:
Shorten current directory to :file:`./`. Does not work for files with
scheme present.
:param bool shorten_home:
Shorten all directories in :file:`/home/` to :file:`~user/` instead of
:file:`/home/user/`. Does not work for files with scheme pr
|
HuygensING/bioport-repository
|
bioport_repository/tests/test_common.py
|
Python
|
gpl-3.0
| 2,361
| 0.008895
|
##########################################################################
# Copyright (C) 2009 - 2014 Huygens ING & Gerbrandy S.R.L.
#
# This file is part of bioport.
#
# bioport is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/gpl-3.0.html>.
##########################################################################
import os
#import sys
import unittest
import shutil
import datetime
from bioport_repository.common import to_date, format_date
class CommonTestCase(unittest.TestCase):
def test_to_date(self):
self.assertEqual(to_date('2000'), datetime.datetime(2000, 1, 1, 0, 0))
self.assertEqual(to_date('2000-02'), datetime.datetime(2000, 2, 1, 0, 0))
self.assertEqual(to_date('2000-02-03'), datetime.datetime(2000, 2, 3, 0, 0))
self.assertEqual(to_date('2001-02', round='up'), datetime.datetime(2001, 2, 28, 0, 0))
#2000 is a leap year
self.assertEqual(to_date('2000-02', round='up'), datetime.datetime(2000, 2, 29, 0, 0))
self.assertEqual(to_date('2000-12', round='up'), datetime.datetime(2000, 12, 31, 0, 0))
self.assertEqual(to_date('2000', round='up'), datetime.datetime(2000, 12, 31, 0, 0))
self.assertEqual(to_date('0200', round='up'), datetime.datetime(200, 12, 31, 0, 0))
self.
|
assertEqual(to_date('1200', ), datetime.datetime(1200, 1, 1, 0, 0))
def test_format_date(self):
d
|
= datetime.datetime(1700, 3, 2)
self.assertEqual(format_date(d), '1700-03-02 00:00')
d = datetime.datetime(1, 3, 2)
self.assertEqual(format_date(d), '0001-03-02 00:00')
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(CommonTestCase, 'test'),
))
if __name__=='__main__':
unittest.main(defaultTest='test_suite')
|
mattclark/osf.io
|
tests/test_registrations/test_registration_approvals.py
|
Python
|
apache-2.0
| 12,424
| 0.003139
|
import datetime
import mock
from django.utils import timezone
from nose.tools import * # noqa
from tests.base import fake, OsfTestCase
from osf_tests.factories import (
EmbargoFactory, NodeFactory, ProjectFactory,
RegistrationFactory, UserFactory, UnconfirmedUserFactory
)
from framework.exceptions import PermissionsError
from osf.exceptions import (
InvalidSanctionRejectionToken, InvalidSanctionApprovalToken, NodeStateError,
)
from osf.utils import tokens
from osf.models.sanctions import (
Sanction,
PreregCallbackMixin,
RegistrationApproval,
)
from framework.auth import Auth
from osf.models import Contributor, SpamStatus
from osf.utils.permissions import ADMIN
DUMMY_TOKEN = tokens.encode({
'dummy': 'token'
})
class RegistrationApprovalModelTestCase(OsfTestCase):
def setUp(self):
super(RegistrationApprovalModelTestCase, self).setUp()
self.user = UserFactory()
self.project = ProjectFactory(creator=self.user)
self.registration = RegistrationFactory(project=self.project)
self.embargo = EmbargoFactory(user=self.user)
self.valid_embargo_end_date = timezone.now() + datetime.timedelta(days=3)
def test__require_approval_saves_approval(self):
initial_count = RegistrationApproval.objects.all().count()
self.registration._initiate_approval(
self.user
)
assert_equal(RegistrationApproval.objects.all().count(), initial_count + 1)
def test__initiate_approval_does_not_create_tokens_for_unregistered_admin(self):
unconfirmed_user = UnconfirmedUserFactory()
Contributor.objects.create(node=self.registration, user=unconfirmed_user)
self.registration.add_permission(unconfirmed_user, ADMIN, save=True)
assert_equal(Contributor.objects.get(node=self.registration, user=unconfirmed_user).permission, ADMIN)
approval = self.registration._initiate_approval(
self.user
)
assert_true(self.user._id in approval.approval_state)
assert_false(unconfirmed_user._id in approval.approval_state)
def test__initiate_approval_adds_admins_on_child_nodes(self):
project_admin = UserFactory()
project_non_admin = UserFactory()
child_admin = UserFactory()
child_non_admin = UserFactory()
grandchild_admin = UserFactory()
project = ProjectFactory(creator=project_admin)
project.add_contributor(project_non_admin, auth=Auth(project.creator), save=True)
child = NodeFactory(creator=child_admin, parent=project)
child.add_contributor(child_non_admin, auth=Auth(child.creator), save=True)
grandchild = NodeFactory(creator=grandchild_admin, parent=child) # noqa
registration = RegistrationFactory(project=project)
approval = registration._initiate_approval(registration.creator)
assert_in(project_admin._id, approval.approval_state)
assert_in(child_admin._id, approval.approval_state)
assert_in(grandchild_admin._id, approval.approval_state)
assert_not_in(project_non_admin._id, approval.approval_state)
assert_not_in(child_non_admin._id, approval.approval_state)
def test_require_approval_from_non_admin_raises_PermissionsError(self):
self.registration.remove_permission(self.user, ADMIN)
self.registration.save()
self.registration.reload()
with assert_raises(PermissionsError):
self.registration.require_approval(self.user)
def test_invalid_approval_token_raises_InvalidSanctionApprovalToken(self):
self.registration.require_approval(
self.user
)
self.registration.save()
assert_true(self.registration.is_pending_registration)
invalid_approval_token = 'not a real token'
with assert_raises(InvalidSanctionApprovalToken):
self.registration.registration_approval.approve(self.user, invalid_approval_token)
assert_true(self.registration.is_pending_registration)
def test_non_admin_approval_token_raises_PermissionsError(self):
non_admin = UserFactory()
self.registration.require_approval(
self.user,
)
self.registration.save()
assert_true(self.registration.is_pending_registration)
approval_token = self.registration.registration_approval.approval_state[self.user._id]['approval_token']
with assert_raises(PermissionsError):
self.registration.registration_approval.approve(non_admin, approval_token)
assert_true(self.registration.is_pending_registration)
def test_approval_adds_to_parent_projects_log(self):
initial_project_logs = self.registration.registered_from.logs.count()
self.registration.require_approval(
self.user
)
self.registration.save()
approval_token = self.registration.registration_approval.approval_state[self.user._id]['approval_token']
self.registration.registration_approval.approve(self.user, approval_token)
# adds initiated, approved, and registered logs
assert_equal(self.registration.registered_from.logs.count(), initial_project_logs + 3)
def test_one_approval_with_two_admins_stays_pending(self):
admin2 = UserFactory()
Contributor.objects.create(node=self.registration, user=admin2)
self.registration.add_permission(admin2, ADMIN, save=True)
self.registration.require_approval(
self.user
)
self.registration.save()
# First admin approves
approval_token = self.registration.registration_approval.approval_state[self.user._id]['approval_token']
self.registration.registration_approval.approve(self.user, approval_token)
assert_true(self.registration.is_pending_registration)
num_of_approvals = sum([val['has_approved'] for val in self.registration.registration_approval.approval_state.values()])
assert_equal(num_of_approvals, 1)
# Second admin approves
approval_token = self.registration.registration_approval.approval_state[admin2._id]['approval_token']
self.registration.registration_approval.approve(admin2, approval_token)
assert_false(self.registration.is_pending_registration)
n
|
um_of_approvals = sum([val['has_approved'] for val in self.registration.registration_approval.approval_state.valu
|
es()])
assert_equal(num_of_approvals, 2)
def test_invalid_rejection_token_raises_InvalidSanctionRejectionToken(self):
self.registration.require_approval(
self.user
)
self.registration.save()
assert_true(self.registration.is_pending_registration)
with assert_raises(InvalidSanctionRejectionToken):
self.registration.registration_approval.reject(self.user, fake.sentence())
assert_true(self.registration.is_pending_registration)
def test_non_admin_rejection_token_raises_PermissionsError(self):
non_admin = UserFactory()
self.registration.require_approval(
self.user
)
self.registration.save()
assert_true(self.registration.is_pending_registration)
rejection_token = self.registration.registration_approval.approval_state[self.user._id]['rejection_token']
with assert_raises(PermissionsError):
self.registration.registration_approval.reject(non_admin, rejection_token)
assert_true(self.registration.is_pending_registration)
def test_one_disapproval_cancels_registration_approval(self):
self.registration.require_approval(
self.user
)
self.registration.save()
assert_true(self.registration.is_pending_registration)
rejection_token = self.registration.registration_approval.approval_state[self.user._id]['rejection_token']
self.registration.registration_approval.reject(self.user, rejection_token)
assert_equal(self.registration.registration_approval.state, Sanction.REJECTED)
assert_false(self.registration.is_pending_registration)
def test_disapproval_adds_to_parent_projects_log(self)
|
botswana-harvard/edc-lab
|
old/lab_clinic_api/classes/edc_lab_results.py
|
Python
|
gpl-2.0
| 4,109
| 0.003651
|
from django.shortcuts import render_to_response
from django.template.loader import render_to_string
from lis.specimen.lab_result_item.classes import ResultItemFlag
from lis.exim.lab_import_lis.classes import LisDataImporter
from lis.exim.lab_import_dmis.classes import Dmis
from ..models import Result, Order, ResultItem
class EdcLabResults(object):
""" Accesses local lab data by subject."""
def update(self, subject_identifier):
""" Updates the local lab data with that from the Lis. """
dmis = Dmis('lab_api')
dmis.import_from_dmis(subject_identifier=subject_identifier)
lis_data_importer = LisDataImporter('lab_api')
last_updated = lis_data_importer.update_from_lis(subject_identifier=subject_identifier)
return last_updated
def render(self, subject_identifier, update=False):
""" Renders local lab data for the subject's dashboard."""
template = 'result_status_bar.html'
last_updated = None
if update:
last_updated = self.update(subject_identifier)
resulted = Result.objects.filter(
order__aliquot__receive__registered_subject__subject_identifier=subject_identifier).order_by(
'-order__aliquot__receive__drawn_datetime')
if update:
for result in resulted:
for result_item in ResultItem.objects.filter(result=result):
if result_item.result_item_value_as_float:
(result_item.reference_range, result_item.reference_flag,
result_item.grade_range, result_item.grade_flag) = ResultItemFlag().calculate(resul
|
t_item)
result_item.save()
ordered = Order.objects.filter(
aliquot__receive__registered_subject__subject_identifier=subject_identifier).exclude(
order_identifier__in=[result.order.order_identifier for result in resulted]).order_by(
'-aliquot__receive__drawn_datetime')
return render_to_string(template, {'resulted': resulted, 'ordered': ordered, 'last_updated
|
': last_updated})
def results_template(self, subject_identifier, update=False):
"""This method is a refactor of the above render method except that it renders to response"""
template = "result_status_bar.html"
return render_to_response(template, self.context_data(subject_identifier, update))
def context_data(self, subject_identifier, update=False):
resulted = self._query_resulted(subject_identifier)
self._update_result_items(resulted)
context = {'last_updated': self._last_updated(subject_identifier, update)}
context['ordered'] = self._query_ordered(subject_identifier, resulted)
context['resulted'] = self._query_resulted(subject_identifier)
return context
def _query_resulted(self, subject_identifier):
criteria = {'order__aliquot__receive__registered_subject__subject_identifier': subject_identifier}
return Result.objects.filter(**criteria).order_by('-order__aliquot__receive__drawn_datetime')
def _query_ordered(self, subject_identifier, resulted):
return Order.objects.filter(
aliquot__receive__registered_subject__subject_identifier=subject_identifier).exclude(
order_identifier__in=[result.order.order_identifier for result in resulted]).order_by(
'-aliquot__receive__drawn_datetime')
def _last_updated(self, subject_identifier, update=False):
return self.update(subject_identifier) if update else None
def _update_result_items(self, resulted):
for result in resulted:
for result_item in ResultItem.objects.filter(result=result):
self._update_result_item(result_item)
def _update_result_item(self, result_item):
if result_item.result_item_value_as_float:
(result_item.reference_range, result_item.reference_flag,
result_item.grade_range, result_item.grade_flag) = ResultItemFlag().calculate(result_item)
result_item.save()
|
zhhf/charging
|
charging/tests/unit/test_db_plugin.py
|
Python
|
apache-2.0
| 180,785
| 0.000111
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c)
|
2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distri
|
buted under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import os
import mock
from oslo.config import cfg
from testtools import matchers
import webob.exc
import neutron
from neutron.api import api_common
from neutron.api.extensions import PluginAwareExtensionManager
from neutron.api.v2 import attributes
from neutron.api.v2.attributes import ATTR_NOT_SPECIFIED
from neutron.api.v2.router import APIRouter
from neutron.common import config
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common.test_lib import test_config
from neutron import context
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import models_v2
from neutron.manager import NeutronManager
from neutron.openstack.common import importutils
from neutron.tests import base
from neutron.tests.unit import test_extensions
from neutron.tests.unit import testlib_api
DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
ETCDIR = os.path.join(ROOTDIR, 'etc')
def optional_ctx(obj, fallback):
if not obj:
return fallback()
@contextlib.contextmanager
def context_wrapper():
yield obj
return context_wrapper()
def etcdir(*p):
return os.path.join(ETCDIR, *p)
def _fake_get_pagination_helper(self, request):
return api_common.PaginationEmulatedHelper(request, self._primary_key)
def _fake_get_sorting_helper(self, request):
return api_common.SortingEmulatedHelper(request, self._attr_info)
class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
fmt = 'json'
resource_prefix_map = {}
def setUp(self, plugin=None, service_plugins=None,
ext_mgr=None):
super(NeutronDbPluginV2TestCase, self).setUp()
cfg.CONF.set_override('notify_nova_on_port_status_changes', False)
# Make sure at each test according extensions for the plugin is loaded
PluginAwareExtensionManager._instance = None
# Save the attributes map in case the plugin will alter it
# loading extensions
# Note(salvatore-orlando): shallow copy is not good enough in
# this case, but copy.deepcopy does not seem to work, since it
# causes test failures
self._attribute_map_bk = {}
for item in attributes.RESOURCE_ATTRIBUTE_MAP:
self._attribute_map_bk[item] = (attributes.
RESOURCE_ATTRIBUTE_MAP[item].
copy())
self._tenant_id = 'test-tenant'
if not plugin:
plugin = DB_PLUGIN_KLASS
# Create the default configurations
args = ['--config-file', etcdir('neutron.conf.test')]
# If test_config specifies some config-file, use it, as well
for config_file in test_config.get('config_files', []):
args.extend(['--config-file', config_file])
config.parse(args=args)
# Update the plugin
self.setup_coreplugin(plugin)
cfg.CONF.set_override(
'service_plugins',
[test_config.get(key, default)
for key, default in (service_plugins or {}).iteritems()]
)
cfg.CONF.set_override('base_mac', "12:34:56:78:90:ab")
cfg.CONF.set_override('max_dns_nameservers', 2)
cfg.CONF.set_override('max_subnet_host_routes', 2)
cfg.CONF.set_override('allow_pagination', True)
cfg.CONF.set_override('allow_sorting', True)
self.api = APIRouter()
# Set the defualt status
self.net_create_status = 'ACTIVE'
self.port_create_status = 'ACTIVE'
def _is_native_bulk_supported():
plugin_obj = NeutronManager.get_plugin()
native_bulk_attr_name = ("_%s__native_bulk_support"
% plugin_obj.__class__.__name__)
return getattr(plugin_obj, native_bulk_attr_name, False)
self._skip_native_bulk = not _is_native_bulk_supported()
def _is_native_pagination_support():
native_pagination_attr_name = (
"_%s__native_pagination_support" %
NeutronManager.get_plugin().__class__.__name__)
return (cfg.CONF.allow_pagination and
getattr(NeutronManager.get_plugin(),
native_pagination_attr_name, False))
self._skip_native_pagination = not _is_native_pagination_support()
def _is_native_sorting_support():
native_sorting_attr_name = (
"_%s__native_sorting_support" %
NeutronManager.get_plugin().__class__.__name__)
return (cfg.CONF.allow_sorting and
getattr(NeutronManager.get_plugin(),
native_sorting_attr_name, False))
self._skip_native_sorting = not _is_native_sorting_support()
if ext_mgr:
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
def tearDown(self):
self.api = None
self._deserializers = None
self._skip_native_bulk = None
self._skip_native_pagination = None
self._skip_native_sortin = None
self.ext_api = None
# NOTE(jkoelker) for a 'pluggable' framework, Neutron sure
# doesn't like when the plugin changes ;)
db.clear_db()
cfg.CONF.reset()
# Restore the original attribute map
attributes.RESOURCE_ATTRIBUTE_MAP = self._attribute_map_bk
super(NeutronDbPluginV2TestCase, self).tearDown()
def _req(self, method, resource, data=None, fmt=None, id=None, params=None,
action=None, subresource=None, sub_id=None):
fmt = fmt or self.fmt
path = '/%s.%s' % (
'/'.join(p for p in
(resource, id, subresource, sub_id, action) if p),
fmt
)
prefix = self.resource_prefix_map.get(resource)
if prefix:
path = prefix + path
content_type = 'application/%s' % fmt
body = None
if data is not None: # empty dict is valid
body = self.serialize(data)
return testlib_api.create_request(path, body, content_type, method,
query_string=params)
def new_create_request(self, resource, data, fmt=None, id=None,
subresource=None):
return self._req('POST', resource, data, fmt, id=id,
subresource=subresource)
def new_list_request(self, resource, fmt=None, params=None,
subresource=None):
return self._req(
'GET', resource, None, fmt, params=params, subresource=subresource
)
def new_show_request(self, resource, id, fmt=None,
subresource=None, fields=None):
if fields:
params = "&".join(["fields=%s" % x for x in fields])
else:
params = None
return self._req('GET', resource, None, fmt, id=id,
params=params, subresource=subresource)
def new_delete_request(self, resource, id, fmt=None, subresource=None,
sub_id=None):
return self._req(
'DELETE',
resource,
None,
fmt,
id=id,
subresource=subresource,
sub_id=sub_id
)
def new_update_request(self, resource, data, id, f
|
jethrogb/episoder
|
test/episode.py
|
Python
|
gpl-3.0
| 2,607
| 0.018028
|
# episoder, https://code.ott.net/episoder
# -*- coding: utf8 -*-
#
# Copyright (C) 2004-2020 Stefan Ott. All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from datetime import date
from unittest import TestCase, TestSuite, TestLoader
from pyepisoder.database import Episode, Show
class EpisodeTest(TestCase):
def test_construct(self):
ep = Episode(u"First", 3, 8, date(2017, 1, 1), u"0XOR", 117)
self.assertEqual(ep.show_id, None)
self.assertEqual(ep.episode, 8)
self.assertEqual(ep.airdate, date(2017, 1, 1))
self.assertEqual(ep.season, 3)
self.assertEqual(ep.title, u"First")
self.assertEqual(ep.totalnum, 117)
self.assertEqual(ep.prodnum, u
|
"0XOR")
def test_str_and_repr(self):
show = Show(u"TvShow", u"")
ep = Episode(u"First", 1, 1, date(2017, 1, 1), u"http://", 1)
ep.show = show
self.assertEqual(str(ep), "TvShow 1x01: First")
self.assertEqual(repr(ep), 'Episode(u"First", 1, 1, '
'date(2017, 1, 1), u"http://", 1)')
def test_equality(self):
ep1 = Episode(u"First", 1, 1, date(2017, 1, 1), u"http://", 1)
ep1.show_id = 1
ep2 = Episode(u"Second", 2, 2, dat
|
e(2017, 1, 1), u"http://", 1)
ep2.show_id = 2
self.assertNotEqual(ep1, ep2)
ep1.show_id = 2
self.assertNotEqual(ep1, ep2)
ep1.season = 2
self.assertNotEqual(ep1, ep2)
ep1.episode = 2
self.assertEqual(ep1, ep2)
ep1.season = 1
self.assertNotEqual(ep1, ep2)
ep1.season = 2
ep1.show_id = 1
self.assertNotEqual(ep1, ep2)
def test_sorting(self):
ep1 = Episode(u"A", 1, 1, date(2017, 1, 1), u"", 1)
ep2 = Episode(u"D", 2, 2, date(2017, 1, 1), u"", 1)
ep3 = Episode(u"E", 3, 1, date(2017, 1, 1), u"", 1)
ep4 = Episode(u"B", 1, 2, date(2017, 1, 1), u"", 1)
ep5 = Episode(u"C", 2, 1, date(2017, 1, 1), u"", 1)
episodes = sorted([ep1, ep2, ep3, ep4, ep5])
self.assertEqual(episodes, [ep1, ep4, ep5, ep2, ep3])
def test_suite():
suite = TestSuite()
loader = TestLoader()
suite.addTests(loader.loadTestsFromTestCase(EpisodeTest))
return suite
|
riusksk/riufuzz
|
tools/coverage/Utilities/Download.py
|
Python
|
apache-2.0
| 8,118
| 0.011333
|
import requests
import time
import string
import os.path
import urllib2
import sys
import getopt
from time import gmtime, strftime
#variables
class Downloader:
extension = "pdf"
signature = [0x25, 0x50, 0x44, 0x46]
searchChars = ['a', 'a']
outputDir = "downloaded_"
downloaded = []
successCount = 0
maxPerSearch = 500
last = 0
lastStatus = 0
def loadArguments(self, argv):
options, rem = getopt.getopt(argv, 'x:s:q:o:m:', ['extension=', 'signature=', 'search=', 'output=', 'max='])
for opt, arg in options:
if opt in ('-x'):
self.extension = arg
elif opt in ('-s'):
self.signature=[]
for x in range(len(arg)/2):
self.signature.append(int(arg[(x*2):(x*2+2)], 16))
elif opt in ('-q'):
self.searchChars=[]
for x in range(len(arg)):
self.searchChars.append(arg[x])
if opt in ('-o'):
self.outputDir = arg
if opt in ('-m'):
self.maxPerSearch = int(arg)
def currentStatusReport(self):
if len(self.downloaded) % 10 != 0 or len(self.downloaded) == self.lastStatus:
return
self.lastStatus = len(self.downloaded)
if not os.path.isdir(self.outputDir + self.extension):
print strftime("%Y-%m-%d %H:%M:%S", gmtime()) + " --- TOTAL: " + str(len(self.downloaded))+ " DOWNLOADED: 0"
else:
print strftime("%Y-%m-%d %H:%M:%S", gmtime()) + " --- TOTAL: " + str(len(self.downloaded))+ " DOWNLOADED: " + str(len(os.listdir(self.outputDir + self.extension)))
def loadList(self):
if os.path.isfile("list_" + self.extension + ".txt"):
with open("list_" + self.extension + ".txt") as f:
for line in f:
self.downloaded.append(line.strip())
if os.path.isdir(self.outputDir + self.extension):
self.successCount = len(os.listdir(self.outputDir + self.extension))
def readStatus(self):
if os.path.isfile("status" + self.extension + "_" + str(len(self.searchChars)) + ".txt"):
with open("status" + self.extension + "_" + str(len(self.searchChars)) + ".txt") as f:
x = 0
for line in f:
if x<len(self.searchChars):
self.searchChars[x] = line.strip()
x += 1
def start(self):
self.loadList()
self.readStatus()
self.search()
def downloadFile(self, url):
fDir=self.outputDir + self.extension
local_file = None
if not os.path.isdir(fDir):
os.makedirs(fDir)
try:
f = urllib2.urlopen(url, timeout=10)
for x in range(len(self.signature)):
if ord(f.read(1))!=self.signature[x]:
f.close()
raise
local_file=open("%s/file%08d.%s" % (fDir, self.successCount, self.extension), "wb")
for x in range(len(self.signature)):
local_file.write(chr(self.signature[x]))
local_file.write(f.read())
local_file.close()
f.close()
except KeyboardInterrupt:
raise
except:
if local_file != None:
local_file.close()
for x in xrange(10):
|
try:
if os.path.isfile("%s/file%08d.%s" % (fDir, self.successCount, self.extension)):
os.remove("%s/file%08d.%s" % (fDir, self.
|
successCount, self.extension))
break
except:
if x==9:
raise
time.sleep(1)
return
self.successCount += 1
def signatureText(self):
result = ""
for x in range(len(self.signature)):
result += "%0.2X" % self.signature[x]
return result
def searchCharsText(self):
result = ""
for x in range(len(self.searchChars)):
result += self.searchChars[x]
return result
def search(self):
if self.extension == None or self.extension == "":
print "ERROR: No extension specified!"
return
if len(self.signature) == 0:
print "WARNING: No signature specified - THERE WILL BE LOT OF FALSE RESULTS :("
print "Starting with search"
print "---------------------"
print "Extension: " + self.extension
print "Signature: " + self.signatureText()
print "Starting search base: " + self.searchCharsText()
print "Output dir: " + self.outputDir + self.extension
print "Max results per search: " + str(self.maxPerSearch)
self.searchReal("")
def searchReal(self, chars):
if len(chars) < len(self.searchChars):
for char in string.ascii_lowercase:
self.searchReal(chars + char)
return
for x in range(len(self.searchChars)):
if ord(chars[x])<ord(self.searchChars[x]):
return
for x in range(len(self.searchChars)):
self.searchChars[x]='a'
f = open("list_" + self.extension + ".txt", "a")
f_s = open("status" + self.extension + "_" + str(len(self.searchChars)) + ".txt", "w")
for x in range(len(chars)):
f_s.write(chars[x]+"\n")
f_s.close()
num = 0
blocked = True
print '---' + chars + '---'
while num < self.maxPerSearch:
r = 0
while True:
try:
if num == 0:
r=requests.get('http://www.google.ee/search?hl=en&q=filetype%3A' + self.extension + '+' + chars + '&num=100&btnG=Google+Search')
else:
r=requests.get('http://www.google.ee/search?hl=en&q=filetype%3A' + self.extension + '+' + chars + '&num=100&start=' + str(num))
break
except:
r=0
pos=r.content.find('<a href="')
while pos != -1:
pos2_a=r.content.find('"', pos+16)
pos2_b=r.content.find('&', pos+16)
if pos2_a == -1:
pos2 = pos2_b
elif pos2_b == -1:
pos2 = pos2_a
else:
pos2 = min (pos2_a, pos2_b)
if pos2 == -1:
break;
url = r.content[pos+16:pos2]
if url.find('.google.') == -1 and url.startswith('http'):
blocked = False
if url not in self.downloaded:
self.downloadFile(url)
self.downloaded.append(url)
f.write(url + "\n")
pos_a=r.content.find('<a href="', pos+1)
pos_b=r.content.find('a href="/url?q=', pos+1)
if pos_a == -1:
pos = pos_b
elif pos_b == -1:
pos = pos_a
else:
pos=min(pos_a, pos_b)
self.currentStatusReport()
if len(self.downloaded)==self.last:
if num == 0:
time.sleep(15)
break
else:
self.last = len(self.downloaded)
num = num + 100
time.sleep(5)
print "Total: " + str(len(self.downloaded))
if blocked:
print "Come on Google!!! You are arming my research when you block me! Will wait for 2 hours :("
time.sleep(7200)
obj = Downloader()
obj.loadArguments(sys.argv[1:])
obj.start()
|
4dsolutions/Python5
|
qrays.py
|
Python
|
mit
| 11,110
| 0.016022
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 4 09:07:22 2016
Vectors and Qvectors use the same metric i.e. the
xyz vector and corresponding ivm vector always have
the same length.
In contrast, the tetravolume.py modules in some cases
assumes that volume and area use R-edge cubes and triangles
for XYZ units respectively, and D-edge tetrahedrons
and triangles for IVM units of volume and area. See
the docstring for more details.
@author: K. Urner, 4D Solutions, (M) MIT License
Oct 8, 2021: remove gmpy2 dependency
Sep 19, 2021: remove autoconvert to floating point when initializing Vector
Sep 19, 2021: make xyz Vector a property of Qvector vs. a method
Sep 06, 2019: have type(self)() instead of Qvector() return outcomes
May 25, 2019: add area methods based on cross product
Jun 20, 2018: make Qvectors sortable, hashable
Jun 11, 2016: refactored to make Qvector and Vector each standalone
Aug 29, 2000: added extra-class, class dependent methods for
dot and cross as alternative syntax
July 8,2000: added method for rotation around any axis vector
May 27,2000: shortend several methods thanks to Peter Schneider-Kamp
May 24,2000: added unit method, tweaks
May 8, 2000: slight tweaks re rounding values
May 7, 2000: enhanced the Qvector subclass with native
length, dot, cross methods -- keeps coords as a 4-tuple
-- generalized Vector methods to accommodate 4-tuples
if Qvector subclass, plus now returns vector of whatever
type invokes method (i.e. Qvector + Qvector = Qvector)
Mar 23, 2000:
added spherical coordinate subclass
added quadray coordinate subclass
Mar 5, 2000: added angle function
June 6, 2020: spherical coordinates debug, working on blender integration
"""
from math import radians, degrees, cos, sin, acos
import math
from operator import add, sub, mul, neg
from collections import namedtuple
XYZ = namedtuple("xyz_vector", "x y z")
IVM = namedtuple("ivm_vector", "a b c d")
root2 = 2.0**0.5
class Vector:
def __init__(self, arg):
"""Initialize a vector at an (x,y,z)"""
self.xyz = XYZ(*arg)
def __repr__(self):
return repr(self.xyz)
@property
def x(self):
return self.xyz.x
@property
def y(self):
return self.xyz.y
@property
def z(self):
return self.xyz.z
def __mul__(self, scalar):
"""Return vector (self) * scalar."""
newcoords = [scalar * dim for dim in self.xyz]
return type(self)(newcoords)
__rmul__ = __mul__ # allow scalar * vector
def __truediv__(self,scalar):
"""Return vector (self) * 1/scalar"""
return self.__mul__(1.0/scalar)
def __add__(self,v1):
"""Add a vector to this vector, return a vector"""
newcoords = map(add, v1.xyz, self.xyz)
return type(self)(newcoords)
def __sub__(self,v1):
"""Subtract vector from this vector, return a vector"""
return self.__add__(-v1)
def __neg__(self):
"""Return a vector, the negative of this one."""
return type(self)(tuple(map(neg, self.xyz)))
def unit(self):
return self.__mul__(1.0/self.length())
def dot(self,v1):
"""
Return scalar dot product of this with another vector.
"""
return sum(map(mul , v1.xyz, self.xyz))
def cross(self,v1):
"""
Return the vector cross product of this with another vector
"""
newcoords = (self.y * v1.z - self.z * v1.y,
self.z * v1.x - self.x * v1.z,
self.x * v1.y - self.y * v1.x )
return type(self)(newcoords)
def area(self,v1):
"""
xyz area of a parallelogram with these edge lengths
"""
return self.cross(v1).length()
def length(self):
"""Return this vector's length"""
return self.dot(self) ** 0.5
def angle(self,v1):
"""
Return angle between self and v1, in decimal degrees
"""
costheta = round(self.dot(v1)/(self.length() * v1.length()),10)
theta = degrees(acos(costheta))
return round(theta,10)
def rotaxis(self,vAxis,deg):
"""
Rotate around vAxis by deg
realign rotation axis with Z-axis, realign self accordingly,
rotate by deg (counterclockwise) around Z, resume original
orientation (undo realignment)
"""
r,phi,theta = vAxis.spherical()
newv = self.rotz(-theta).roty(phi)
newv = newv.rotz(-deg)
newv = newv.roty(-phi).rotz(theta)
return type(self)(newv.xyz)
def rotx(self, deg):
rad = radians(deg)
newy = cos(rad) * self.y - sin(rad) * self.z
newz = sin(rad) * self.y + cos(rad) * self.z
newxyz = [round(p ,8) for p in (self.x , newy, newz)]
return type(self)(newxyz)
def roty(self, deg):
rad = radians(deg)
newx = cos(rad) * self.x - sin(rad) * self.z
newz = sin(rad) * self.x + cos(rad) * self.z
newxyz = [round(p ,8) for p in (newx, self.y, newz)]
return type(self)(newxyz)
def rotz(self, deg):
rad = radians(deg)
newx = cos(rad) * self.x - sin(rad) * self.y
newy = sin(rad) * self.x + cos(rad) * self.y
newxyz = [round(p ,8) for p in (newx , newy, self.z)]
return type(self)(newxyz)
def spherical(self):
"""Return (r,phi,theta) spherical coords based
on current (x,y,z)"""
r = self.length()
if self.x == 0:
if self.y ==0: theta = 0.0
elif self.y < 0: theta = -90.0
else: theta = 90.0
else:
theta = degrees(math.atan(self.y/self.x))
if self.x < 0 and self.y == 0: theta = 180
# theta is positive so turn more than 180
elif self.x < 0 and self.y < 0: theta = 180 + theta
# theta is negative so turn less than 180
elif self.x < 0 and self.y > 0: theta = 180 + theta
if r == 0:
phi=0.0
else:
phi = degrees(acos(self.z/r))
return (r, phi, theta)
def quadray(self):
"""return (a, b, c, d) quadray based on current (x, y, z)"""
x, y, z = self.xyz
k = 2/root2
a = k * ((x >= 0)* ( x) + (y >= 0) * ( y) + (z >= 0) * ( z))
b = k * ((x < 0)* (-x) + (y < 0) * (-y) + (z >= 0) * ( z))
c = k * ((x < 0)* (-x) + (y >= 0) * ( y) + (z < 0) * (-z))
d = k * ((x >= 0)* ( x) + (y < 0) * (-y) + (z < 0) * (-z))
return Qvector((a, b, c, d))
class Qvector:
"""Quadray vector"""
def __init__(self, arg):
"""Initialize a vector at an (x,y,z)"""
self.coords = self.norm(arg)
def __repr__(self):
return repr(self.coords)
def norm(self, arg):
"""Normalize such that 4-tuple all non-negative members."""
return IVM(*tuple(map(sub, arg, [min(arg)] * 4)))
def norm0(self):
"""Normalize such that sum of 4-tuple members = 0"""
q = self.coords
return IVM(*tuple(map(sub, q, [sum(q)/4.0] * 4)))
@property
def a(self):
return self.coords.a
@propert
|
y
def b(self):
return self.coords.b
@property
def c(self):
return self.coords.c
|
@property
def d(self):
return self.coords.d
def __eq__(self, other):
return self.coords == other.coords
def __lt__(self, other):
return self.coords < other.coords
def __gt__(self, other):
return self.coords > other.coords
def __hash__(self):
return hash(self.coords)
def __mul__(self, scalar):
"""Return vector (self) * scalar."""
newcoords = [scalar * dim for dim in self.coords]
return type(self)(newcoords)
__rmul__ = __mul__ # allow scalar * vector
def __truediv__(self,scalar):
"""Return vector (self) * 1/scalar"""
return self.__mul__(1.0/scalar)
|
tassmjau/duktape
|
tests/perf/test-hex-encode.py
|
Python
|
mit
| 341
| 0.043988
|
import math
import random
def test():
tmp1 = []
tmp2 = []
print('build')
for i in xrange(1024):
tmp1.append(chr(int(math.floor(random.random() * 256))))
tmp1 = ''.joi
|
n(tmp1)
for i in xrange(1024):
tmp2.append(tmp1)
tmp2 = ''.join(tmp2)
print(len(tmp2))
print('run')
for i in xrange(5000):
res = tmp2.encode('hex')
test()
|
|
KirovVerst/qproject
|
deploy_config_example.py
|
Python
|
mit
| 276
| 0
|
DO_TOKEN = ''
|
SSH_KEY_PUB_PATH = '/home/user/.ssh/id_rsa.pub'
# deploy all containe
|
rs to one droplet
ONE_DROPLET_NAME = 'qproject-all'
ONE_DROPLET_IMAGE = 'docker-16-04'
ONE_DROPLET_SIZE = '512mb'
# deploy all containers to multiple virtual machines
SWARM_WORKER_NUMBER = 1
|
bucketzxm/wechat_template
|
movie/views.py
|
Python
|
gpl-3.0
| 2,018
| 0.000991
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from django.shortcuts import render, HttpResponse
from django.views.decorators.csrf import csrf_exempt
import hashlib
import xml.etree.ElementTree as ET
import time
from config import TOKEN
# Create your views here.
TOKEN = TOKEN
@csrf_exempt
def index(request):
if request.method == "GET":
global TOKEN
signature = request.GET.get('signature', None)
timestamp = request.GET.get('timestamp', None)
nonce = request.GET.get('nonce', None)
echoStr = request.GET.get('echostr', None)
token = TOKEN
tmpList = [token, timestamp, nonce]
tmpList.sort()
tmp_str = "%s%s%s" % tuple(tmpList)
tmp_str = hashlib.sha1(tmp_str).hexdigest()
if tmp_str == signature:
return HttpResponse(echoStr)
else:
return HttpResponse('Error')
elif request.method == "POST":
xml_msg = request.body
response = HttpResponse(response_msg(xml_msg), content_type="application/xml")
return response
MSG_TYPE_TEXT = "text"
def response_msg(msg):
tree = ET.fromstring(msg)
msg = parse_xml(tree)
res = ""
if msg['MsgType'] == MSG_TYPE_TEXT:
reply_content = "Hello"
res = get_reply_xml(msg, reply_content)
return res
def get_reply_xml(msg, reply_content):
template = '''
<xml>
<ToUserName><![CDATA[%s]]></ToUserName>
<FromUserName>
|
<![CDATA[%s]]></FromUserName>
<CreateTime>%s</CreateTime>
<MsgType><![CDATA[%s]]></MsgType>
<Content><![CDATA[%s]
|
]></Content>
</xml>
'''
res = template % (msg['FromUserName'], msg['ToUserName'], str(int(time.time())), 'text', reply_content)
return res
def parse_xml(root_elm):
"""
:param root_elm:
:return: msg dict
"""
msg = {}
if root_elm.tag == 'xml':
for child in root_elm:
msg[child.tag] = child.text
return msg
|
koba-z33/nand2tetris
|
projects/python/assembler/n2tassembler/commandline.py
|
Python
|
gpl-3.0
| 3,587
| 0
|
from .commandtype import CommandType
from .commandlineerror import CommandLineError
class CommandLine():
"""アセンブラコマンドラインオブジェクト
"""
def __init__(self, line_no: int, raw_data: str):
"""コンストラクタ
Parameters
----------
line_no : int
行番号
raw_data : str
行生データ
"""
self.__line_no = line_no
self.__raw_data = raw_data
self.__data = self.raw_data.split('//', 1)[0].strip()
@property
def line_no(self) -> int:
"""行番号
Returns
-------
int
行番号
"""
return self.__line_no
@property
def raw_data(self) -> str:
"""行生データ
Returns
-------
str
行生データ
"""
return self.__raw_data
@property
def data(self) -> str:
"""コマンドデータ
Returns
-------
str
コマンドデータ
"""
return self.raw_data.split('//', 1)[0].strip()
@property
def command_type(self) -> CommandType:
"""コマンドタイプ
Returns
-------
CommandType
コマンドタイプ
"""
if len(self.__data) == 0:
return CommandType.BLANK_LINE
if self.__data[0] == '@':
return CommandType.A_COMMAND
elif self.__data[0] == '(':
return CommandType.L_COMMAND
else:
return CommandType.C_COMMAND
@property
def symbol(self) -> str:
"""シンボル
Returns
-------
str
シンボル
Raises
------
CommandLineError
A命令、ラベル以外のコマンドの時に発生
"""
if self.command_type == CommandType.A_COMMAND:
return self.data[1:]
elif self.command_type == CommandType.L_COMMAND:
return self.data[1:-1]
else:
raise CommandLineError(str(self))
@property
def dest(self) -> str:
"""destニーモニック
Returns
-------
str
destニーモニック
"""
return self.__sepalate_mnemonic()[0]
@property
def comp(self) -> str:
"""compニーモニック
Returns
-------
str
compニーモニック
"""
return self.__sepalate_mnemonic()[1]
@property
def jump(self) -> str:
"""jumpニーモニック
Returns
-------
str
jumpニーモニック
"""
return self.__sepalate_mnemonic()[2]
def __sepalate_mnemonic(self) -> tuple:
"""ニーモニック分割
Returns
-------
tuple
分割されたニーモニック
"""
if self.command_type != CommandType.C_COMMAND:
raise CommandLineError(str(self))
pos_e: int = self.data.find('=')
pos_s: int = self.data.find(';')
if pos_e == -1:
|
dest = 'null'
else:
dest = self.data[0:pos_e]
if pos_s == -1:
comp = self.data[pos
|
_e + 1:]
else:
comp = self.data[pos_e + 1:pos_s]
if pos_s == -1:
jump = 'null'
else:
jump = self.data[pos_s + 1:]
return (dest, comp, jump)
def __str__(self):
return 'LineNo {} : {}'.format(self.line_no, self.__raw_data)
|
w1ll1am23/home-assistant
|
homeassistant/components/intent/__init__.py
|
Python
|
apache-2.0
| 2,573
| 0.001555
|
"""The Intent integration."""
import voluptuous as vol
from homeassist
|
ant.components import http
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.const import SERVICE_TOGGLE, SERVICE_TURN_OFF, SERVICE_TURN_ON
from homeassistant.core import DOMAIN as HA_DOMAIN, HomeAssistant
from homeassistant.helpers import config_validation as cv, integration
|
_platform, intent
from .const import DOMAIN
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Intent component."""
hass.http.register_view(IntentHandleView())
await integration_platform.async_process_integration_platforms(
hass, DOMAIN, _async_process_intent
)
hass.helpers.intent.async_register(
intent.ServiceIntentHandler(
intent.INTENT_TURN_ON, HA_DOMAIN, SERVICE_TURN_ON, "Turned {} on"
)
)
hass.helpers.intent.async_register(
intent.ServiceIntentHandler(
intent.INTENT_TURN_OFF, HA_DOMAIN, SERVICE_TURN_OFF, "Turned {} off"
)
)
hass.helpers.intent.async_register(
intent.ServiceIntentHandler(
intent.INTENT_TOGGLE, HA_DOMAIN, SERVICE_TOGGLE, "Toggled {}"
)
)
return True
async def _async_process_intent(hass: HomeAssistant, domain: str, platform):
"""Process the intents of an integration."""
await platform.async_setup_intents(hass)
class IntentHandleView(http.HomeAssistantView):
"""View to handle intents from JSON."""
url = "/api/intent/handle"
name = "api:intent:handle"
@RequestDataValidator(
vol.Schema(
{
vol.Required("name"): cv.string,
vol.Optional("data"): vol.Schema({cv.string: object}),
}
)
)
async def post(self, request, data):
"""Handle intent with name/data."""
hass = request.app["hass"]
try:
intent_name = data["name"]
slots = {
key: {"value": value} for key, value in data.get("data", {}).items()
}
intent_result = await intent.async_handle(
hass, DOMAIN, intent_name, slots, "", self.context(request)
)
except intent.IntentHandleError as err:
intent_result = intent.IntentResponse()
intent_result.async_set_speech(str(err))
if intent_result is None:
intent_result = intent.IntentResponse()
intent_result.async_set_speech("Sorry, I couldn't handle that")
return self.json(intent_result)
|
pedro2d10/SickRage-FR
|
sickbeard/providers/bluetigers.py
|
Python
|
gpl-3.0
| 5,574
| 0.003409
|
# coding=utf-8
# Author: raver2046 <raver2046@gmail.com>
#
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
from requests.utils import dict_from_cookiejar
import traceback
from sickbeard import logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class BlueTigersProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
TorrentProvider.__init__(self, "BLUETIGERS")
self.username = None
self.password = None
self.ratio = None
self.token = None
self.cache = tvcache.TVCache(self, min_time=10) # Only poll BLUETIGERS every 10 minutes max
self.urls = {
'base_url': 'https://www.bluetigers.ca/',
'search': 'https://www.bluetigers.ca/torrents-search.php',
'login': 'https://www.bluetigers.ca/account-login.php',
'download': 'https://www.bluetigers.ca/torrents-details.php?id=%s&hit=1',
}
self.search_params = {
"c16": 1, "c10": 1, "c130": 1, "c131": 1, "c17": 1, "c18": 1, "c19": 1
}
self.url = self.urls['base_url']
def login(self):
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {
'username': self.username,
'password': self.password,
'take_login': '1'
}
response = self.get_url(self.urls['login'], post_data=login_params, timeout=30)
if not response:
check_login = self.get_url(self.urls['base_url'], timeout=30)
if re.search('account-logout.php', check_login):
return True
else:
logger.log(u"Unable to connect to provider", logger.WARNING)
return False
if re.search('account-login.php', response):
logger.log(u"Invalid username or password. Check your settings", logger.WARNING)
return False
return True
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals
results = []
if not self.login():
return results
for mode in search_strings:
items = []
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log(u"Search string: {search}".format(search=search_string.decode('utf-8')),
logger.DEBUG)
self.search_params['search'] = search_string
data = self.get_url(self.urls['search'], params=self.search_params)
if not data:
continue
try:
with BS4Parser(data, 'html5lib') as html:
result_linkz = html.findAll('a', href=re.compile("torrents-details"))
if not result_linkz:
logger.log(u"Data returned from provider do not contains any torrent", logger.DEBUG)
continue
if result_linkz:
for link in result_linkz:
title = link.text
download_url = self.urls['base_url'] + link['href']
download_url = download_url.replace("torrents-details", "download")
# FIXME
size = -1
seeders = 1
leechers = 0
if not title or not download_url:
continue
# Filter unseeded torrent
# if seeders < self.minseed or leechers < self.minleech:
# if mode != 'RSS':
# logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
# continue
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
items.append(item)
except Exception:
logger.log(u"Failed parsing provider. Traceback: %s" % traceback.format_exc(), logger.ERROR)
# For
|
each search mode sort all the items by seeders if available
items.sort(key=lambda tup: tup[3], reverse=
|
True)
results += items
return results
def seed_ratio(self):
return self.ratio
provider = BlueTigersProvider()
|
xujun10110/DIE
|
DIE/Lib/ParsedValue.py
|
Python
|
mit
| 1,350
| 0.002222
|
MAX_SCORE = 10
class ParsedValue():
"""
Possible run-time value.
The value data might either be definite or guessed.
"""
def __init__(self, data, description, score=0, raw=None, type_=None):
"""
Ctor
@param data: The data`s human-readable representation.
@param description: A description string for the value data type
@param score: score is a (0-10) value indicating the probability of the value.
score of 0 (default) means the value is certain
"""
self.data = data
self.description = description
self.type = type_
self.raw = raw # TODO: Validate value is a string (representing hex values)
# If score cannot be validated set its value to 10 (Guessed).
if self._validate_score(score):
self.score = score
else:
self.score = MAX_SCORE
def _validate_score(self, score):
"""
Validate that score value is in range 0-10.
@param score: Score value to validate
@return: True if score is valid, otherwise False.
"""
return 0 <= score <= MAX_SCOR
|
E
def is_guessed(self):
"""
Check if the value is guessed
@return: True if the value is guessed, other
|
wise False
"""
return not self.score == 0
|
mgodek/music_recommendation_system
|
matrixFactor.py
|
Python
|
gpl-3.0
| 3,252
| 0.009533
|
###############################################################################
import numpy
import time
###############################################################################
def matrixFactorization(R, P, Q, K, epochMax=1000, alpha=0.0002, beta=0.02):
Q = Q.T
for step in xrange(epochMax):
for i in xrange(len(R)):
for j in xrange(len(R[i])):
if R[i][j] > 0:
eij = R[i][j] - numpy.dot(P[i,:],Q[:,j])
for k in xrange(K):
P[i][k] = P[i][k] + alpha * (2 * eij * Q[k][j] - beta * P[i][k])
Q[k][j] = Q[k][j] + alpha * (2 * eij * P[i][k] - beta * Q[k][j])
eR = numpy.dot(P,Q)
e =
|
0
for i in xrange(len(R)):
for j in xrange(len(R[i])):
if R[i][j] > 0:
e = e + pow(R[i][j] - numpy.dot(P[i,:],Q[:,j]), 2)
for k in xrange(K):
e = e + (beta/2) * ( pow(P[i][k],2) + pow(Q[k][j],2) )
if e < 0.001:
break
return P, Q.T
############################################
|
###############################
def createUserRow(utp):
userRow = numpy.zeros(shape=(1, utp.nextSongIndex), dtype=float)
for songId in utp.user_feed_artists_tracks + utp.user_feed_tracks:
if songId in utp.echoSongIdToIdxMap:
userRow[0][utp.echoSongIdToIdxMap[songId]] = 20 # hardcoded estimated playcount for current user
# else:
# print("Missing other user preference on %s" % songId)
return userRow
###############################################################################
def createPreferenceMatrix(utp):
R = numpy.zeros(shape=(utp.nextUserIndex+1, utp.nextSongIndex), dtype=float)
print( "Create preference matrix R %d %d" %(utp.nextUserIndex+1, utp.nextSongIndex) )
t0 = time.clock()
for userId in utp.user_track_like:
for songId in utp.user_track_like[userId]:
R[userId][songId] = utp.user_track_like[userId][songId]
print("Time spent %s" % str(time.clock() - t0))
# add user row as last
R[utp.nextUserIndex][:] = createUserRow(utp)
return R
###############################################################################
def matrixFactorize(utp):
R = createPreferenceMatrix(utp)
N = len(R)
M = len(R[0])
K = 2
P = numpy.random.rand(N,K).astype('f')
Q = numpy.random.rand(M,K).astype('f')
print("Matrix factorization")
t0 = time.clock()
nP, nQ = matrixFactorization(R, P, Q, K)
print("Time spent %s" % str(time.clock() - t0))
# print(nP)
# print(nQ)
nR = numpy.dot(nP, nQ.T)
print(nR[utp.nextUserIndex][:]) # last user is the goal user
songIdx = 0
utp.user_recommendations_idxs.clear()
for element in nR[utp.nextUserIndex]:
#print(element)
#print(songIdx)
if element > 1: # greater than threshold TODO ask user?
utp.user_recommendations_idxs[songIdx] = element
else:
print("Skip songIdx %d with value %d" %(songIdx, element))
songIdx += 1
print(utp.user_recommendations_idxs)
###############################################################################
|
kashif/scikit-learn
|
sklearn/metrics/tests/test_pairwise.py
|
Python
|
bsd-3-clause
| 25,509
| 0
|
import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
|
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too muc
|
h memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unknown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
|
inovtec-solutions/OpenERP
|
openerp/addons/lunch/report/report_lunch_order.py
|
Python
|
agpl-3.0
| 2,799
| 0.009289
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields,osv
class report_lunch_o
|
rder(osv.osv):
_name = "report.lunch.order.line"
_description = "Lunch Ord
|
ers Statistics"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.date('Date Order', readonly=True, select=True),
'year': fields.char('Year', size=4, readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'),
('05','May'), ('06','June'), ('07','July'), ('08','August'), ('09','September'),
('10','October'), ('11','November'), ('12','December')], 'Month',readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'user_id': fields.many2one('res.users', 'User Name'),
'price_total':fields.float('Total Price', readonly=True),
'note' : fields.text('Note',size=256,readonly=True),
}
_order = 'date desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_lunch_order_line')
cr.execute("""
create or replace view report_lunch_order_line as (
select
min(lo.id) as id,
lo.user_id as user_id,
lo.date as date,
to_char(lo.date, 'YYYY') as year,
to_char(lo.date, 'MM') as month,
to_char(lo.date, 'YYYY-MM-DD') as day,
lo.note as note,
sum(lp.price) as price_total
from
lunch_order_line as lo
left join lunch_product as lp on (lo.product_id = lp.id)
group by
lo.date,lo.user_id,lo.note
)
""")
report_lunch_order()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Juanlu001/xlwings
|
xlwings/utils.py
|
Python
|
apache-2.0
| 2,171
| 0.000921
|
from __future__ import division
import
|
datetime as dt
missing = object()
try:
import numpy as np
except ImportError:
np = None
def int_to_rgb(number):
"""Given an integer, return the rgb"""
number = int(number)
r = numbe
|
r % 256
g = (number // 256) % 256
b = (number // (256 * 256)) % 256
return r, g, b
def rgb_to_int(rgb):
"""Given an rgb, return an int"""
return rgb[0] + (rgb[1] * 256) + (rgb[2] * 256 * 256)
def get_duplicates(seq):
seen = set()
duplicates = set(x for x in seq if x in seen or seen.add(x))
return duplicates
def np_datetime_to_datetime(np_datetime):
ts = (np_datetime - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's')
dt_datetime = dt.datetime.utcfromtimestamp(ts)
return dt_datetime
class VBAWriter(object):
class Block(object):
def __init__(self, writer, start):
self.writer = writer
self.start = start
def __enter__(self):
self.writer.writeln(self.start)
self.writer._indent += 1
def __exit__(self, exc_type, exc_val, exc_tb):
self.writer._indent -= 1
#self.writer.writeln(self.end)
def __init__(self, f):
self.f = f
self._indent = 0
self._freshline = True
def block(self, template, **kwargs):
return VBAWriter.Block(self, template.format(**kwargs))
def start_block(self, template, **kwargs):
self.writeln(template, **kwargs)
self._indent += 1
def end_block(self, template, **kwargs):
self.writeln(template, **kwargs)
self._indent -= 1
def write(self, template, **kwargs):
if self._freshline:
self.f.write('\t' * self._indent)
self._freshline = False
if kwargs:
template = template.format(**kwargs)
self.f.write(template)
if template[-1] == '\n':
self._freshline = True
def write_label(self, label):
self._indent -= 1
self.write(label + ':\n')
self._indent += 1
def writeln(self, template, **kwargs):
self.write(template + '\n', **kwargs)
|
liveblog/liveblog
|
server/liveblog/blogs/blog.py
|
Python
|
agpl-3.0
| 4,946
| 0.002022
|
import pymongo
from bson.objectid import ObjectId
from eve.utils import date_to_str
from html5lib.html5parser import ParseError
from lxml.html.html5parser import fragments_fromstring, HTMLParser
from superdesk.utc import utcnow
from superdesk import get_resource_service
from liveblog.posts.mixins import AuthorsMixin
from liveblog.posts.utils import get_associations
def is_valid_html(html):
try:
fragments_fromstring(html.encode('utf-8'), parser=HTMLParser(strict=True))
except ParseError:
return False
return True
class Blog(AuthorsMixin):
"""
Utility class to fetch blog data directly from mongo collections.
"""
order_by = ('_updated', '_created', 'order')
sort = ('asc', 'desc')
ordering = {
'newest_first': ('_created', 'desc'),
'oldest_first': ('_created', 'asc'),
'editorial': ('order', 'desc')
}
default_ordering = 'newest_first'
default_order_by = '_created'
default_sort = 'desc'
default_page = 1
default_page_limit = 25
max_page_limit = 100
def __init__(self, blog):
if isinstance(blog, (str, ObjectId)):
blog = get_resource_service('client_blogs').find_one(_id=blog, req=None)
self._blog = blog
self._posts_service = get_resource_service('client_blog_posts')
def _posts_lookup(self, sticky=None, highlight=None, all_posts=False, deleted=False, tags=[]):
filters = [
{'blog': self._blog['_id']}
]
# only return all post if parameter is specified. Otherwise get only open posts and not deleted
# also avoid sending "scheduled posts" by default (published_date in future)
if not all_posts:
filters.append({'post_status': 'open'})
if not deleted:
filters.append({'deleted': False})
filters.append({'published_date': {'$lte': date_to_str(utcnow())}})
if sticky:
|
filters.append({'sticky': True})
else:
filters.append({'sticky': False})
if highlight:
filters.append({'lb_highlight': True})
if len(tags) > 0:
filters.append({'tags': {'$in': tags}})
return {'$and': filters}
def get_ordering(self, label):
try:
order_
|
by, sort = self.ordering[label]
return order_by, sort
except KeyError:
return self.default_order_by, self.default_sort
def check_html_markup(self, original_text):
div_wrapped = '<div>{}</div>'.format(original_text)
if not is_valid_html(original_text) and is_valid_html(div_wrapped):
original_text = div_wrapped
return original_text
def posts(self, **kwargs):
"""
Builds a query with the given parameters and hit mongodb to retrive the data
Uses `find` method from resource service. If wrap parameter is provided, the return
value it's a dictionary ala `python-eve` style data structure
Supported kwargs: sticky, highlight, ordering, page, limit, wrap, all_posts, deleted, tags
"""
sticky = kwargs.get('sticky', None)
highlight = kwargs.get('highlight', None)
ordering = kwargs.get('ordering', None)
page = kwargs.get('page', self.default_page)
limit = kwargs.get('limit', self.default_page_limit)
wrap = kwargs.get('wrap', False)
all_posts = kwargs.get('all_posts', False)
deleted = kwargs.get('deleted', False)
tags = kwargs.get('tags', [])
order_by, sort = self.get_ordering(ordering or self.default_ordering)
lookup = self._posts_lookup(sticky, highlight, all_posts, deleted, tags)
results = self._posts_service.find(lookup)
total = results.count()
# Get sorting direction.
sort = pymongo.DESCENDING
if sort == 'asc':
sort = pymongo.ASCENDING
# Fetch posts, do pagination and sorting.
skip = limit * (page - 1)
results = results.skip(skip).limit(limit).sort(order_by, sort)
posts = [x for x in results if 'groups' in x]
related_items = self._posts_service._related_items_map(posts)
for post in posts:
for assoc in get_associations(post):
ref_id = assoc.get('residRef', None)
if ref_id:
assoc['item'] = related_items[ref_id]
original_text = assoc['item'].get('text')
assoc['item']['text'] = self.check_html_markup(original_text)
# Enrich documents
self.complete_posts_info(posts)
if wrap:
# Wrap in python-eve style data structure
return {
'_items': posts,
'_meta': {
'page': page,
'total': total,
'max_results': limit
}
}
return posts
|
waterblue13/tensor2tensor
|
tensor2tensor/models/cycle_gan.py
|
Python
|
apache-2.0
| 4,931
| 0.005678
|
# coding=utf-8
# Copyright 2017 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cycle GAN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from tensor2tensor.layers import common_layers
from tensor2tensor.models import transformer_vae
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
def reconstruct_loss(x, gt, hparams, reuse=None):
pred = tf.layers.dense(x, hparams.vocab_size, name="softmax", reuse=reuse)
xent, w = common_layers.padded_cross_entropy(pred, gt, 0.0)
return xent / w
def discriminator(x, compress, hparams, name, reuse=None):
with tf.variable_scope(name, reuse=reuse):
x = tf.stop_gradient(2 * x) - x # Reverse gradient.
if compress:
x = transformer_vae.compress(x, None, hparams, "compress")
else:
x = transformer_vae.residual_conv(x, 1, hparams, "compress_rc")
y = tf.reduce_mean(x, axis=1)
return tf.tanh(tf.layers.dense(y, 1, name="reduce"))
def discriminate_loss(x, y, compress, hparams, name):
with tf.variable_scope(name):
d1 = discriminator(x, compress, hparams, "discriminator")
d2 = discriminator(y, compress, hparams, "discriminator", reuse=True)
dloss = tf.reduce_mean(tf.abs(d1 - d2))
return - dloss
def split_on_batch(x):
batch_size = tf.shape(x)[0]
i = batch_size // 2
return x[:i, :, :, :], x[i:2*i, :, :, :]
def cycle_gan_internal(inputs, targets, _, hparams):
"""Cycle GAN, main step used for training."""
with tf.variable_scope("cycle_gan"):
# Embed inputs and targets.
inputs_orig, targets_orig = tf.to_int32(inputs), tf.to_int32(targets)
inputs = common_layers.embedding(
inputs_orig, hparams.vocab_size, hparams.hidden_size, "embed")
targets = common_layers.embedding(
targets_orig, hpa
|
ram
|
s.vocab_size, hparams.hidden_size,
"embed", reuse=True)
# Split the batch into input-input and target-target parts.
inputs1, _ = split_on_batch(inputs)
_, targets2 = split_on_batch(targets)
# Define F and G, called inp2tgt and tgt2inp here.
def inp2tgt(x, reuse=False):
return transformer_vae.residual_conv(x, 1, hparams, "inp2tgt", reuse)
def tgt2inp(x, reuse=False):
return transformer_vae.residual_conv(x, 1, hparams, "tgt2inp", reuse)
# Input-input part.
inp1_tgt = inp2tgt(inputs1)
inp1_back = tgt2inp(inp1_tgt)
# Target-target part.
tgt2_inp = tgt2inp(targets2, reuse=True)
tgt2_back = inp2tgt(tgt2_inp, reuse=True)
# Reconstruction losses.
inp1_orig, _ = split_on_batch(inputs_orig)
_, tgt2_orig = split_on_batch(targets_orig)
inp1_loss = reconstruct_loss(
inp1_back, tf.squeeze(inp1_orig, axis=3), hparams)
tgt2_loss = reconstruct_loss(
tgt2_back, tf.squeeze(tgt2_orig, axis=3), hparams, reuse=True)
# Discriminator losses.
dloss1 = discriminate_loss(inputs1, tgt2_inp, True, hparams, "inp_disc")
dloss2 = discriminate_loss(targets2, inp1_tgt, True, hparams, "tgt_disc")
# Reconstruct targets from inputs.
tgt = inp2tgt(inputs, reuse=True)
tgt = tf.layers.dense(tgt, hparams.vocab_size, name="softmax", reuse=True)
# We use the reconstruction only for tracking progress, no gradients here!
tgt = tf.stop_gradient(tf.expand_dims(tgt, axis=2))
losses = {"input_input": hparams.cycle_loss_multiplier * inp1_loss,
"target_target": hparams.cycle_loss_multiplier * tgt2_loss,
"input_disc": dloss1,
"target_disc": dloss2}
return tgt, losses
@registry.register_model
class CycleGAN(t2t_model.T2TModel):
def model_fn_body(self, features):
return cycle_gan_internal(
features["inputs"], features["targets"], features["target_space_id"],
self._hparams)
@registry.register_hparams
def cycle_gan_small():
"""Set of hyperparameters."""
hparams = transformer_vae.transformer_ae_small()
hparams.batch_size = 2048
hparams.input_modalities = "inputs:symbol:identity"
hparams.target_modality = "symbol:identity"
hparams.weight_decay = 3.0
hparams.learning_rate = 0.05
hparams.kl_warmup_steps = 5000
hparams.learning_rate_warmup_steps = 3000
hparams.add_hparam("vocab_size", 32) # Vocabulary size, need to set here.
hparams.add_hparam("cycle_loss_multiplier", 2.0)
return hparams
|
bruno1951/bruno1951-cmis-cs2
|
play.py
|
Python
|
cc0-1.0
| 414
| 0.007246
|
x = raw_in
|
put(" Take your wand out:")
y = raw_input(" You're a wizard youngone: ")
z = raw_input(" Please come with me and become a wizard: ")
p = raw_input(" No, I a
|
m not a liar: " )
print str(x) + " and repeat " + str(y) + "I have never seen such potential in such a young boy" + str(z) + "Young one, you will be taken care of very well, theres nothing to be afraid of, I promise" + str(p) + "Come, it is time"
|
AntonGagin/GSAS_USE
|
patchSystErrors/modifiedOld/GSASIIstrMain.py
|
Python
|
gpl-3.0
| 85,029
| 0.01797
|
# -*- coding: utf-8 -*-
'''
*GSASIIstrMain: main structure routine*
---------------------------------------
'''
########### SVN repository information ###################
# $Date: 2018-07-13 22:44:01 +0300 (Fri, 13 Jul 2018) $
# $Author: toby $
# $Revision: 3471 $
# $URL: https://subversion.xray.aps.anl.gov/pyGSAS/trunk/GSASIIstrMain.py
|
$
# $Id: GSASIIstrMain.py 3471 2018-07-13 19:44:01Z toby $
########### SVN repository information ###################
from __future__ import division, print_function
import platform
import sys
import os.path as ospath
import time
import math
import copy
if '2' in platform.python_version_tuple()[0]:
import cPickle
else:
import pickle as cPickle
import numpy as np
import numpy.linalg as nl
import scipy.optimize as so
import GSASIIpath
GSASIIpath.SetBinaryPath()
GSASIIpa
|
th.SetVersionNumber("$Revision: 3471 $")
import GSASIIlattice as G2lat
import GSASIIspc as G2spc
import GSASIImapvars as G2mv
import GSASIImath as G2mth
import GSASIIstrIO as G2stIO
import GSASIIstrMath as G2stMth
import GSASIIobj as G2obj
sind = lambda x: np.sin(x*np.pi/180.)
cosd = lambda x: np.cos(x*np.pi/180.)
tand = lambda x: np.tan(x*np.pi/180.)
asind = lambda x: 180.*np.arcsin(x)/np.pi
acosd = lambda x: 180.*np.arccos(x)/np.pi
atan2d = lambda y,x: 180.*np.arctan2(y,x)/np.pi
ateln2 = 8.0*math.log(2.0)
DEBUG = True
# </ Anton Gagin
#import scipy as sp
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import config_example
from scipy import interpolate
from scipy import linalg
from scipy.integrate import quad
from scipy.interpolate import interp1d
import numpy.ma as ma
def RefineCore(Controls,Histograms,Phases,restraintDict,rigidbodyDict,parmDict,varyList,
calcControls,pawleyLookup,ifSeq,printFile,dlg,GPXfile):
'''Core optimization routines, shared between SeqRefine and Refine
:returns: 5-tuple of ifOk (bool), Rvals (dict), result, covMatrix, sig
'''
# print 'current',varyList
# for item in parmDict: print item,parmDict[item] ######### show dict just before refinement
G2mv.Map2Dict(parmDict,varyList)
ifPrint = True
if ifSeq:
ifPrint = False
Rvals = {}
histoList = Histograms.keys()
histoList.sort()
# we'll need it later, but this cycle has to be done before the first refinement
nHist = 0
hIdList = []
for histogram in histoList:
Histogram = Histograms[histogram]
hId = Histogram['hId']
hIdList.append(hId)
nHist = nHist + 1
config_example.xyFWHM = [[0]*(max(hIdList)+1), [0]*(max(hIdList)+1)]
# Anton Gagin />
while True:
begin = time.time()
values = np.array(G2stMth.Dict2Values(parmDict, varyList))
if np.any(np.isnan(values)):
raise G2obj.G2Exception('ERROR - nan found in LS parameters - use Calculate/View LS parms to locate')
# test code to compute GOF and save for external repeat
#args = ([Histograms,Phases,restraintDict,rigidbodyDict],parmDict,varyList,calcControls,pawleyLookup,dlg)
#print '*** before fit chi**2',np.sum(G2stMth.errRefine(values,*args)**2)
#fl = open('beforeFit.cpickle','wb')
#cPickle.dump(values,fl,1)
#cPickle.dump(args[:-1],fl,1)
#fl.close()
Ftol = Controls['min dM/M']
Xtol = Controls['SVDtol']
Factor = Controls['shift factor']
if 'Jacobian' in Controls['deriv type']:
result = so.leastsq(G2stMth.errRefine,values,Dfun=G2stMth.dervRefine,full_output=True,
ftol=Ftol,col_deriv=True,factor=Factor,
args=([Histograms,Phases,restraintDict,rigidbodyDict],parmDict,varyList,calcControls,pawleyLookup,dlg))
ncyc = int(result[2]['nfev']/2)
elif 'analytic Hessian' in Controls['deriv type']:
Lamda = Controls.get('Marquardt',-3)
maxCyc = Controls['max cyc']
result = G2mth.HessianLSQ(G2stMth.errRefine,values,Hess=G2stMth.HessRefine,ftol=Ftol,xtol=Xtol,maxcyc=maxCyc,Print=ifPrint,lamda=Lamda,
args=([Histograms,Phases,restraintDict,rigidbodyDict],parmDict,varyList,calcControls,pawleyLookup,dlg))
ncyc = result[2]['num cyc']+1
Rvals['lamMax'] = result[2]['lamMax']
Controls['Marquardt'] = -3 #reset to default
elif 'Hessian SVD' in Controls['deriv type']:
maxCyc = Controls['max cyc']
result = G2mth.HessianSVD(G2stMth.errRefine,values,Hess=G2stMth.HessRefine,ftol=Ftol,xtol=Xtol,maxcyc=maxCyc,Print=ifPrint,
args=([Histograms,Phases,restraintDict,rigidbodyDict],parmDict,varyList,calcControls,pawleyLookup,dlg))
ncyc = result[2]['num cyc']+1
else: #'numeric'
result = so.leastsq(G2stMth.errRefine,values,full_output=True,ftol=Ftol,epsfcn=1.e-8,factor=Factor,
args=([Histograms,Phases,restraintDict,rigidbodyDict],parmDict,varyList,calcControls,pawleyLookup,dlg))
ncyc = 1
if len(varyList):
ncyc = int(result[2]['nfev']/len(varyList))
# table = dict(zip(varyList,zip(values,result[0],(result[0]-values))))
# for item in table: print item,table[item] #useful debug - are things shifting?
runtime = time.time()-begin
Rvals['SVD0'] = result[2].get('SVD0',0)
Rvals['converged'] = result[2].get('Converged')
Rvals['DelChi2'] = result[2].get('DelChi2',-1.)
Rvals['chisq'] = np.sum(result[2]['fvec']**2)
G2stMth.Values2Dict(parmDict, varyList, result[0])
G2mv.Dict2Map(parmDict,varyList)
Rvals['Nobs'] = Histograms['Nobs']
Rvals['Rwp'] = np.sqrt(Rvals['chisq']/Histograms['sumwYo'])*100. #to %
Rvals['GOF'] = np.sqrt(Rvals['chisq']/(Histograms['Nobs']-len(varyList)))
printFile.write(' Number of function calls: %d No. of observations: %d No. of parameters: %d User rejected: %d Sp. gp. extinct: %d\n'% \
(result[2]['nfev'],Histograms['Nobs'],len(varyList),Histograms['Nrej'],Histograms['Next']))
printFile.write(' Refinement time = %8.3fs, %8.3fs/cycle, for %d cycles\n'%(runtime,runtime/ncyc,ncyc))
printFile.write(' wR = %7.2f%%, chi**2 = %12.6g, GOF = %6.2f\n'%(Rvals['Rwp'],Rvals['chisq'],Rvals['GOF']))
sig = len(varyList)*[None,]
if 'None' in str(type(result[1])) and ifSeq: #this bails out of a sequential refinement on singular matrix
IfOK = False
covMatrix = []
print ('**** Refinement failed - singular matrix ****')
if 'Hessian' in Controls['deriv type']:
num = len(varyList)-1
for i,val in enumerate(np.flipud(result[2]['psing'])):
if val:
print ('Bad parameter: '+varyList[num-i])
else:
Ipvt = result[2]['ipvt']
for i,ipvt in enumerate(Ipvt):
if not np.sum(result[2]['fjac'],axis=1)[i]:
print ('Bad parameter: '+varyList[ipvt-1])
break
IfOK = True
try:
covMatrix = result[1]*Rvals['GOF']**2
sig = np.sqrt(np.diag(covMatrix))
if np.any(np.isnan(sig)) or not sig.shape:
print ('*** Least squares aborted - some invalid esds possible ***')
# table = dict(zip(varyList,zip(values,result[0],(result[0]-values)/sig)))
# for item in table: print item,table[item] #useful debug - are things shifting?
break #refinement succeeded - finish up!
except TypeError: #result[1] is None on singular matrix or LinAlgError
IfOK = False
if not len(varyList):
covMatrix = []
break
print ('**** Refinement failed - singular matrix ****')
if 'Hessian' in Controls['deriv type']:
num = len(varyList)-1
fo
|
wanderer2/pymc3
|
pymc3/glm/__init__.py
|
Python
|
apache-2.0
| 89
| 0
|
from . import familie
|
s
from .glm import glm, linear_co
|
mponent, plot_posterior_predictive
|
marrow/wsgi.objects
|
examples/wsgify.py
|
Python
|
mit
| 521
| 0.003839
|
#!/usr/bin/env p
|
ython
# encoding: utf-8
from __future__ import unicode_literals
from pprint import pformat
from marrow.server.http import HTTPServer
from marrow.wsgi.objects.decorator import wsgify
@wsgify
def hello(request):
resp = request.response
resp.mime = "text/plain"
resp.body = "%r\n\n%s\n\n%s" % (request, request, pformat(request.__dict__))
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.DEBUG)
|
HTTPServer(None, 8080, application=hello).start()
|
grafeas/client-python
|
grafeas/models/api_project_repo_id.py
|
Python
|
apache-2.0
| 4,162
| 0.00024
|
# coding: utf-8
"""
An API to insert and retrieve metadata on cloud artifacts.
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1alpha1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ApiProjectRepoId(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'project_id': 'str',
'repo_name': 'str'
}
|
attribute_map = {
'project_id': 'project_id',
'repo_name': 'repo_name'
}
def __init__(self, project_id=None, repo_name=None): # noqa: E501
"""ApiProjectRepoId - a model defined in Swagger""" # noqa: E501
self._project_id = None
self._repo_name = None
self.discriminator = None
if project_id is not None:
self.project_id = project_id
if repo_na
|
me is not None:
self.repo_name = repo_name
@property
def project_id(self):
"""Gets the project_id of this ApiProjectRepoId. # noqa: E501
The ID of the project. # noqa: E501
:return: The project_id of this ApiProjectRepoId. # noqa: E501
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this ApiProjectRepoId.
The ID of the project. # noqa: E501
:param project_id: The project_id of this ApiProjectRepoId. # noqa: E501
:type: str
"""
self._project_id = project_id
@property
def repo_name(self):
"""Gets the repo_name of this ApiProjectRepoId. # noqa: E501
The name of the repo. Leave empty for the default repo. # noqa: E501
:return: The repo_name of this ApiProjectRepoId. # noqa: E501
:rtype: str
"""
return self._repo_name
@repo_name.setter
def repo_name(self, repo_name):
"""Sets the repo_name of this ApiProjectRepoId.
The name of the repo. Leave empty for the default repo. # noqa: E501
:param repo_name: The repo_name of this ApiProjectRepoId. # noqa: E501
:type: str
"""
self._repo_name = repo_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ApiProjectRepoId, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiProjectRepoId):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
SUSE/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/compute/v2017_03_30/models/virtual_machine_agent_instance_view.py
|
Python
|
mit
| 1,705
| 0.001173
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineAgentInstanceView(Model):
"""The instance view of the VM Agent running on the virtual machine.
:param vm_agent_version: The VM Agent full version.
:type vm_agent_version: str
:param extension_handlers: The virtual machine extension handler instance
view.
:type extension_handlers: list of
:class:`VirtualMachineExtensionHandlerInstanceView
<azure.mgmt
|
.compute.compute.v2017_03_30.models.VirtualMachineExtensionHandlerInstanceView>`
:param statuses: The resource status information.
:type statuses: list of :class:`InstanceViewStatus
<azure.mgmt.compute.comp
|
ute.v2017_03_30.models.InstanceViewStatus>`
"""
_attribute_map = {
'vm_agent_version': {'key': 'vmAgentVersion', 'type': 'str'},
'extension_handlers': {'key': 'extensionHandlers', 'type': '[VirtualMachineExtensionHandlerInstanceView]'},
'statuses': {'key': 'statuses', 'type': '[InstanceViewStatus]'},
}
def __init__(self, vm_agent_version=None, extension_handlers=None, statuses=None):
self.vm_agent_version = vm_agent_version
self.extension_handlers = extension_handlers
self.statuses = statuses
|
pet1330/strands_qsr_lib
|
qsr_lib/dbg/dbg_world_qsr_trace_slicing_methods.py
|
Python
|
mit
| 4,049
| 0.004199
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division
from qsrlib.qsrlib import QSRlib, QSRlib_Request_Message, QSRlib_Response_Message
from qsrlib_io.world_trace import Object_State, World_Trace
def print_world_trace(world_trace):
for t in world_trace.get_sorted_timestamps():
print("-t:", t)
for oname, o in world_trace.trace[t].objects.items():
print("%s\t%f\t%f\t%f\t%f\t%f\t%f" % (oname, o.x, o.y, o.z, o.xsize, o.ysize, o.xsize))
def pretty_print_world_qsr_trace(which_qsr, qsrlib_response_message):
print(which_qsr, "request was made at ", str(qsrlib_response_message.req_made_at)
+ " and received at " + str(qsrlib_respo
|
nse_message.req_received_at)
+ " and fini
|
shed at " + str(qsrlib_response_message.req_finished_at))
print("---")
print("Response is:")
for t in qsrlib_response_message.qsrs.get_sorted_timestamps():
foo = str(t) + ": "
for k, v in zip(qsrlib_response_message.qsrs.trace[t].qsrs.keys(),
qsrlib_response_message.qsrs.trace[t].qsrs.values()):
foo += str(k) + ":" + str(v.qsr) + "; "
print(foo)
def print_world_state(world_state):
for oname, o in world_state.objects.items():
print("%s\t%f\t%f\t%f\t%f\t%f\t%f" % (oname, o.x, o.y, o.z, o.xsize, o.ysize, o.xsize))
if __name__ == "__main__":
world = World_Trace()
o1 = [Object_State(name="o1", timestamp=0, x=1., y=1., xsize=5., ysize=8.),
Object_State(name="o1", timestamp=1, x=1., y=2., xsize=5., ysize=8.),
Object_State(name="o1", timestamp=2, x=1., y=3., xsize=5., ysize=8.),
Object_State(name="o1", timestamp=3, x=1., y=4., xsize=5., ysize=8.),
Object_State(name="o1", timestamp=4, x=1., y=5., xsize=5., ysize=8.)]
o2 = [Object_State(name="o2", timestamp=0, x=11., y=1., xsize=5., ysize=8.),
Object_State(name="o2", timestamp=1, x=11., y=2., xsize=5., ysize=8.),
Object_State(name="o2", timestamp=2, x=11., y=3., xsize=5., ysize=8.),
Object_State(name="o2", timestamp=3, x=11., y=4., xsize=5., ysize=8.),
Object_State(name="o2", timestamp=4, x=11., y=5., xsize=5., ysize=8.)]
world.add_object_state_series(o1)
world.add_object_state_series(o2)
#### WORLD_QSR_TRACE DBG
# which_qsr = "rcc2"
which_qsr = ["mos", "rcc2", "cardir"]
qsrlib = QSRlib()
qsrlib_request_message = QSRlib_Request_Message(which_qsr, world)
qsrlib_response_message = qsrlib.request_qsrs(qsrlib_request_message)
qsrs = qsrlib_response_message.qsrs
print(">> original")
pretty_print_world_qsr_trace(which_qsr, qsrlib_response_message)
print()
# last_state = qsrs.get_last_state(return_by_reference=True)
# t = last_state.timestamp
# last_state.qsrs["o1,o2"].qsr["rcc2"] = "c"
# foo = str(t) + ": "
# for k, v in zip(last_state.qsrs.keys(), last_state.qsrs.values()):
# foo += str(k) + ":" + str(v.qsr) + "; "
# print(foo)
# qsrs_new = qsrs.get_for_objects(["o1,o2"])
# qsrs_new = qsrs.get_for_objects(["o1"])
# qsrs_new = qsrs.get_at_timestamp_range(0, 2)
# qsrs_new = qsrs.get_for_objects_at_timestamp_range(1, 3, ["o1,o2"])
qsrs_new = qsrs.get_for_objects_at_timestamp_range(1, 3, ["o1"])
# qsrs_new = qsrs.get_for_qsrs(["mos", "rcc2"])
# qsrs_new.trace[2].qsrs["o1,o2"].qsr["rcc2"] = "c"
# qsrs_new.trace[2].qsrs["o1"].qsr["mos"] = "whatever"
qsrlib_response_message_new = QSRlib_Response_Message(qsrs_new,
qsrlib_response_message.req_made_at,
qsrlib_response_message.req_received_at,
qsrlib_response_message.req_finished_at)
print(">> new")
pretty_print_world_qsr_trace(which_qsr, qsrlib_response_message_new)
print()
print(">> original")
pretty_print_world_qsr_trace(which_qsr, qsrlib_response_message)
|
eclee25/flu-SDI-exploratory-age
|
scripts/OR_urbanmetro_v6-7-13.py
|
Python
|
mit
| 11,462
| 0.02661
|
#!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 6/9/13
###Function:
#### 1) create scatter of OR by zipcode vs. urban metro RUCC avg 2013
###Import data: zipcode_bysseas_cl.csv
###Command Line: python
##############################################
### notes ###
### packages ###
import matplotlib
import csv
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
## local packages ##
import ORgenerator_v060713 as od
### data structures ###
child1, adult1, zip3_sdi, snum_sdi = [],[],[],[] # attack rates for children and adults for total by zipcode, zip3s from sdi data, season number in sdi data
y1 = [] # odds ratios for total cases by zipcode
zipdict, rucc_bin = {},[] # dictionary of zip3 and rural-urban categorization, list of rucc 1-3 bins that correspond with order of zip3s in sdi data
cs1, cs2, cs3, cs4, cs5, cs6, cs7, cs8, cs9, cs10 = [],[],[],[],[],[],[],[],[],[] # childlist for seasons 1-10
as1, as2, as3, as4, as5, as6, as7, as8, as9, as10 = [],[],[],[],[],[],[],[],[],[] # adultlist for seasons 1-10
ys1, ys2, ys3, ys4, ys5, ys6, ys7, ys8, ys9, ys10 = [],[],[],[],[],[],[],[],[],[] # OR for seasons 1-10
rbs1, rbs2, rbs3, rbs4, rbs5, rbs6, rbs7, rbs8, rbs9, rbs10 = [],[],[],[],[],[],[],[],[],[] # rucc_mn_bin for seasons 1-10
z3s1, z3s2, z3s3, z3s4, z3s5, z3s6, z3s7, z3s8, z3s9, z3s10 = [],[],[],[],[],[],[],[],[],[] # zip3_sdi for seasons 1-10
sns1, sns2, sns3, sns4, sns5, sns6, sns7, sns8, sns9, sns10 = [],[],[],[],[],[],[],[],[],[] # season number from sdi data for dataset broken into seasons 1-10
### parameters ###
### functions ###
# create a dictionary of zip3, rural-urban categorization as key, value
def createzipdict(csvreadfile, dictname):
ct=0
for row in csvreadfile:
if ct==0:
ct+=1
continue
else:
zipdict[str(row[0])] = int(row[3])
### import data ###
zORin=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_bysseas_cl2.csv','r') # use to calculate OR by zip3
zOR=csv.reader(zORin, delimiter=',')
zOR1in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_cl2_s1.csv','r') # use to calculate OR by zip3
zOR1=csv.reader(zOR1in, delimiter=',')
zOR2in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_cl2_s2.csv','r') # use to calculate OR by zip3
zOR2=csv.reader(zOR2in, delimiter=',')
zOR3in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_cl2_s3.csv','r') # use to calculate OR by zip3
zOR3=csv.reader(zOR3in, delimiter=',')
zOR4in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_cl2_s4.csv','r') # use to calculate OR by zip3
zOR4=csv.reader(zOR4in, delimiter=',')
zOR5in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_cl2_s5.csv','r') # use to calculate OR by zip3
zOR5=csv.reader(zOR5in, delimiter=',')
zOR6in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_cl2_s6.csv','r') # use to calculate OR by zip3
zOR6=csv.reader(zOR6in, delimiter=',')
zOR7in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_cl2_s7.csv','r') # use to calculate OR by zip3
zOR7=csv.reader(zOR7in, delimiter=',')
zOR8in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_cl2_s8.csv','r') # use to calculate OR by zip3
zOR8=csv.reader(zOR8in, delimiter=',')
zOR9in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_cl2_s9.csv','r') # use to calculate OR by zip3
zOR9=csv.reader(zOR9in, delimiter=',')
zOR10in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_cl2_s10.csv','r') # use to calculate OR by zip3
zOR10=csv.reader(zOR10in, delimiter=',')
RUCCavgin=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Rural_Urban/R_export/zip3_RUCC2013avg_crosswalk.csv','r') # categorization of urban/rural by zip3
RUCCavg=csv.reader(RUCCavgin, delimiter=',')
### program ###
### analyze all zip3-season data together to see if there are patterns
createzipdict(RUCCavg, zipdict)
od.importer_zip3(zOR, adult1, child1, 3, 4, zip3_sdi, 2, snum_sdi, zipdict, rucc_bin)
print "rucc_binlen:", len(rucc_bin)
print "child1len:", len(child1), "adult1len:", len(adult1)
# adult1 index 101 and 104, adultlist == 0
# child1 index 2162, childlist == 0
od.ORgen(y1, child1, adult1)
print "y1len:", len(y1)
# OR vs. urban rural code (all seasons together)
rulab = ['populous urban metro area', 'small metro area', 'rural non-metro area']
xaxjitter = [x + np.random.uniform(-0.4, 0.4, 1) for x in rucc_bin]
print "length x-axis jitter:",len(xaxjitter)
plt.scatter(xaxjitter, y1, marker='o', color = 'black', label= "zipcode prefix")
# plt.scatter(domsubtypeplot, y3a, marker='o', color = 'red', label= "severe cases")
# plt.scatter(domsubtypeplot, y3b, marker='o', color = 'green', label= "milder cases")
# for num, subtype, OR in zip(seasonnum, rucc_bin, y1):
# plt.annotate(num, xy = (subtype, OR), xytext = (10,0), textcoords = 'offset points')
xlab=[1,2,3]
plt.ylabel('Odds ratio of attack rate, child:adult (zip3 popn normalized)')
plt.xlabel('Urban metro categorization')
plt.legend(loc="upper right")
plt.xticks(xlab, rulab)
plt.show()
# urban areas tend to have larger ranges of ORs
# number of zip3s: 396 populous urban metro - 322 smaller urban metro - 167 rural
#### analyze ORs by season
od.importer_zip3(zOR1, as1, cs1, 3, 4, z3s1, 2, sns1, zipdict, rbs1)
print "rucc_binlen:", len(rbs1)
print "childlen:", len(cs1), "adultlen:", len(as1)
# adult1 index 101 and 104, adultlist == 0
# child1 index 2162, childlist == 0
od.ORgen(ys1, cs1, as1)
print "ylen:", len(ys1)
od.importer_zip3(zOR2, as2, cs2, 3, 4, z3s2, 2, sns2, zipdict, rbs2)
od.importer_zip3(zOR3, as3, cs3, 3, 4, z3s3, 2, sns3, zipdict, rbs3)
od.importer_zip3(zOR4, as4, cs4, 3, 4, z3s4, 2, sns4, zipdict, rbs4)
od.importer_zip3(zOR5, as5, cs5, 3, 4, z3s5, 2, sns5, zipdict, rbs5)
od.importer_zip3(zOR6, as6, cs6, 3, 4, z3s6, 2, sns6, zipdict, rbs6)
od.importer_zip3(zOR7, as7, cs7, 3, 4, z3s7, 2, sns7, zipdict, rbs7)
od.importer_zip3(zOR8, as8, cs8, 3, 4, z3s8, 2, sns8, zipdict, rbs8)
od.importer_zip3(zOR9, as9, cs9, 3, 4, z3s9, 2, sns9, zipdict, rbs9)
od.importer_zip3(zOR10, as10, cs10, 3, 4, z3s10, 2, sns10, zipdict, rbs10)
od.ORgen(ys2, cs2, as2)
od.ORgen(ys3, cs3, as3)
od.ORgen(ys4, cs4, as4)
od.ORgen(ys5, cs5, as5)
od.ORgen(ys6, cs6, as6)
od.ORgen(ys7, cs7, as7)
od.ORgen(ys8, cs8, as8)
od.ORgen(ys9, cs9, as9)
od.ORgen(ys10, cs10, as10)
# OR vs. urban rural code by season
rulab = ['populous urban metro area', 'small metro area', 'rural non-metro area']
xaxjs1 = [x + np.random.uniform(-0.4, 0.4, 1) for x in rbs1]
print "ys1:",len(ys1),"length x-axis jitter:",len(xaxjs1)
xaxjs2 = [x + np.random.uniform(-0.4, 0.4, 1) for x in rbs2]
print "ys2:",len(ys2),"length x-axis jitter:",len(xaxjs2)
xaxjs3 = [x + np.random.uniform(-0.4, 0.4, 1) for x in rbs3]
print "ys3:",len(ys3),"length x-axis jitter:",len(xaxjs3)
xaxjs4 = [x + np.random.uniform(-0.4, 0.4, 1) for x in rbs4]
print "ys4",len(ys4),"length x-axis jitter:",len(xaxjs4)
xaxjs5 = [x + np.random.uniform(-0.4, 0.4, 1) for x in rbs5]
print "ys5:",len(ys5),"length x-axis jitter:",len(xaxjs5)
xaxjs6 = [x + np.random.uniform(-0.4, 0.4, 1) for x in rbs6]
print "ys6:",len(ys6),"length x-axis jitter:",len(xaxjs6)
xaxjs7 = [x + np.random.uniform(-0.4, 0.4, 1) for x in rbs7]
print "ys7:",len(ys7),"length x-axis jitter:",len(xaxjs7)
xaxjs8 = [x + np.random.uniform(-0.4, 0.4, 1) for x in rbs8]
print "ys8:",len(ys8),"length x-axis jitter:",len(xaxjs8)
xaxjs9 = [x + np.random.uniform(-0.4, 0.4, 1) for x in rbs9]
print "ys9:",len(ys9),"length x-axis jitter:",len(xaxjs9)
xaxjs10 = [x + np.random.uniform(-0.4, 0.4, 1) for x in rbs10]
print "ys10:",len(ys10),"length x-axis jitter:",len(x
|
axjs10)
plt.scatter(xaxjs1, ys1, marker='o', color = 'grey', label= "Season 1")
plt.scatter(xaxjs2, ys2, marker='o', color = 'black', label= "Season 2")
plt.scatter(xa
|
xjs3, ys3, marker='o', color = 'red', label= "Season 3")
plt.scatter(xaxjs4, ys4, marker='o', color = 'orange', label= "Season 4")
plt.scatter(xaxjs5, ys5, marker='o', color = 'gold', label= "Season 5")
plt.scatter(xaxjs6,
|
guykisel/inline-plz
|
inlineplz/linters/coala.py
|
Python
|
isc
| 1,918
| 0.000521
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
import shutil
import sys
import dirtyjson as json
from ..decorators import linter
from ..parsers.base import ParserBase
@linter(
name="coala",
install=[
["pipx", "install", "--spec", "coala-bears", "coala"],
[sys.executable, "-m", "pip", "install", "-U", "coala-bears"],
],
help_cmd=["coala", "-h"],
run=["coala", "-C", "--json", "--log-json", "--limit-files", "5000"],
rundefault=["coala", "-C", "--json", "--log-json", "--limit-files", "5000"],
dotfiles=[".coafile"],
language="all",
autorun=True,
run_per_file=False,
concurrency=1,
)
class CoalaParser(ParserBase):
"""Parse json coala output."""
def install(self):
if not any(
dotfile.strip() in os.listdir(os.getcwd())
for dotfile in self.config.get("dotfiles")
):
config_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "config")
)
dotfile_name = self.config.get("dotfiles")[0]
shutil.copyfile(
os.path.join(config_dir, dotfile_name),
os.path.join(os.getcwd(), dotfile_name),
)
def parse(self, output):
messages = set()
lint_data = [
msg
for category in json.loads(output).get("results", {}).values()
for msg in category
]
for msgdata in lint_data:
try:
|
msgbody = msgdata["message"]
for line in msgdata.get("affected_code", []):
path = line.get("file")
line = line.get("start", {}).get("line")
messages.add((path, line, msgbody))
except (ValueError, KeyError):
print("Invalid message: {0}".format(msgdata))
return messa
|
ges
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.