repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
aboudreault/dnds-debian | dsc/dsctl.py | Python | gpl-2.0 | 12,699 | 0.00378 | #!/usr/bin/env python
# Copyright (C) Nicolas J. Bouliane - 2012
# dsctl - Directory Service Control
import signal
import socket
import ssl
import pprint
import sys
from dnds import *
class Connection:
sock = None
ssl_sock = None
connected = False
loggedin = False
ClientId = 0
def signal_handler(signal, frame):
pass
def dsctl_help():
print ''
print 'Usage:'
print ' status'
print ' connect <ipaddr>'
print ' login <email,password>'
print ' add-client <firstname,lastname,email,password,company,phone,country,stateProvince,city,postalCode>'
print ' add-context <unique description>'
print ' add-node <context id, unique description>'
print ' show-context'
print ' show-node <context id>'
print ' logout'
print ' disconnect'
print ' exit'
pass
def login(conn, arg):
loginInfo = arg.split(',')
if len(loginInfo) != 2:
dsctl_help()
return
if conn.connected == False:
print 'you must connect first...'
return
if conn.loggedin == True:
print 'you are already logged in...'
return
msg = DNDSMessage()
msg.setComponentByName('version', '1')
msg.setComponentByName('channel', '0')
pdu = msg.setComponentByName('pdu').getComponentByName('pdu')
dsm = pdu.setComponentByName('dsm').getComponentByName('dsm')
dsm.setComponentByName('seqNumber', '1')
dsm.setComponentByName('ackNumber', '1')
dsop = dsm.setComponentByName('dsop').getComponentByName('dsop')
req = dsop.setComponentByName('searchRequest').getComponentByName('searchRequest')
req.setComponentByName('searchtype', 'object')
obj = req.setComponentByName('object').getComponentByName('object')
client = obj.setComponentByName('client').getComponentByName('client')
client.setComponentByName('email', loginInfo[0])
client.setComponentByName('password', loginInfo[1])
# print(msg.prettyPrint())
conn.ssl_sock.write(encoder.encode(msg))
data = conn.ssl_sock.read()
substrate = data
a_msg, substrate = decoder.decode(substrate, asn1Spec=DNDSMessage())
# print(a_msg.prettyPrint())
recv_pdu = a_msg.getComponentByName('pdu')
recv_dsm = recv_pdu.getComponentByName('dsm')
recv_dsop = recv_dsm.getComponentByName('dsop')
recv_req = recv_dsop.getComponentByName('searchResponse')
recv_objs = recv_req.getComponentByName('objects')
for idx in range(len(recv_objs)):
recv_obj = recv_objs.getComponentByPosition(idx)
recv_client = recv_obj.getComponentByName('client')
recv_clientId = recv_client.getComponentByName('id')
# print "the client id is " + str(recv_clientId)
conn.ClientId = str(recv_clientId)
if conn.ClientId == '0':
print 'failed to log in...'
return
conn.loggedin = True
print 'ClientId: ' + conn.ClientId
print 'you are now logged in!'
def showNode(conn, arg):
contextId = arg
if conn.connected == False:
print 'you must connect first...'
return
if conn.loggedin == False:
print 'you are not logged in...'
return
msg = DNDSMessage()
msg.setComponentByName('version', '1')
msg.setComponentByName('channel', '0')
pdu = msg.setComponentByName('pdu').getComponentByName('pdu')
dsm = pdu.setComponentByName('dsm').getComponentByName('dsm')
dsm.setComponentByName('seqNumber', '1')
dsm.setComponentByName('ackNumber', '1')
dsop = dsm.setComponentByName('dsop').getComponentByName('dsop')
req = dsop.setComponentByName('searchRequest').getComponentByName('searchRequest')
req.setComponentByName('searchtype', 'object')
obj = req.setComponentByName('object').getComponentByName('object')
node = obj.setComponentByName('node').getComponentByName('node')
node.setComponentByName('contextId', str(contextId))
conn.ssl_sock.write(encoder.encode(msg))
data = conn.ssl_sock.read()
substrate = data
a_msg, substrate = decoder.decode(substrate, asn1Spec=DNDSMessage())
recv_pdu = a_msg.getComponentByName('pdu')
recv_dsm = recv_pdu.getComponentByName('dsm')
recv_dsop = recv_dsm.getComponentByName | ('dsop')
recv_req = recv_dsop.getComponentByName('searchResponse')
recv_objs = recv_req.getComponentByName('objects')
for idx in range(len(recv_objs)):
recv_obj = recv_objs.getComponentByPosition(idx)
recv_node = recv_obj.getCompone | ntByName('node')
recv_uuid = recv_node.getComponentByName('uuid')
recv_provcode = recv_node.getComponentByName('provCode')
recv_desc = recv_node.getComponentByName('description')
recv_ipaddress = recv_node.getComponentByName('ipaddress')
print "node uuid: " + str(recv_uuid) + ' provCode: ' + str(recv_provcode) + ' <' + recv_desc + '> ' + socket.inet_ntoa(recv_ipaddress.asOctets())
def showContext(conn):
if conn.connected == False:
print 'you must connect first...'
return
if conn.loggedin == False:
print 'you are not logged in...'
return
msg = DNDSMessage()
msg.setComponentByName('version', '1')
msg.setComponentByName('channel', '0')
pdu = msg.setComponentByName('pdu').getComponentByName('pdu')
dsm = pdu.setComponentByName('dsm').getComponentByName('dsm')
dsm.setComponentByName('seqNumber', '1')
dsm.setComponentByName('ackNumber', '1')
dsop = dsm.setComponentByName('dsop').getComponentByName('dsop')
req = dsop.setComponentByName('searchRequest').getComponentByName('searchRequest')
req.setComponentByName('searchtype', 'object')
obj = req.setComponentByName('object').getComponentByName('object')
context = obj.setComponentByName('context').getComponentByName('context')
context.setComponentByName('clientId', str(conn.ClientId))
context.setComponentByName('topology', 'mesh')
context.setComponentByName('description', 'home network1')
context.setComponentByName('network', '0')
context.setComponentByName('netmask', '0')
conn.ssl_sock.write(encoder.encode(msg))
loop = True
data = ""
while loop is True:
data += conn.ssl_sock.read()
substrate = data
try:
a_msg, substrate = decoder.decode(substrate, asn1Spec=DNDSMessage())
loop = False
except:
pass
recv_pdu = a_msg.getComponentByName('pdu')
recv_dsm = recv_pdu.getComponentByName('dsm')
recv_dsop = recv_dsm.getComponentByName('dsop')
recv_req = recv_dsop.getComponentByName('searchResponse')
recv_objs = recv_req.getComponentByName('objects')
for idx in range(len(recv_objs)):
recv_obj = recv_objs.getComponentByPosition(idx)
recv_context = recv_obj.getComponentByName('context')
recv_id = recv_context.getComponentByName('id')
recv_desc = recv_context.getComponentByName('description')
print "context id: " + str(recv_id) + ' <' + recv_desc + '>'
def addNode(conn, arg):
nodeInfo = arg.split(',')
if len(nodeInfo) != 2:
dsctl_help()
return
if conn.connected == False:
print 'you must connect first...'
return
if conn.loggedin == False:
print 'you are not logged in...'
return
msg = DNDSMessage()
msg.setComponentByName('version', '1')
msg.setComponentByName('channel', '0')
pdu = msg.setComponentByName('pdu').getComponentByName('pdu')
dsm = pdu.setComponentByName('dsm').getComponentByName('dsm')
dsm.setComponentByName('seqNumber', '1')
dsm.setComponentByName('ackNumber', '1')
dsop = dsm.setComponentByName('dsop').getComponentByName('dsop')
obj = dsop.setComponentByName('addRequest').getComponentByName('addRequest')
node = obj.setComponentByName('node').getComponentByName('node')
node.setComponentByName('contextId', str(nodeInfo[0]))
node.setComponentByName('description', str(nodeInfo[1]))
conn.ssl_sock.write(encoder.encode(msg))
def addContext(conn, arg):
ContextDescription = arg
if conn.connected == False:
print 'you must connect first...'
return
if |
apagac/cfme_tests | cfme/tests/perf/workloads/test_memory_leak.py | Python | gpl-2.0 | 4,503 | 0.003109 | import time
import pytest
from cfme.markers.env_markers.provider import providers
from cfme.utils import conf
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.grafana import get_scenario_dashboard_urls
from cfme.utils.log import logger
from cfme.utils.providers import ProviderFilter
from cfme.utils.smem_memory_monitor import add_workload_quantifiers
from cfme.utils.smem_memory_monitor import SmemMemoryMonitor
from cfme.utils.workloads import get_memory_leak_scenarios
roles_memory_leak = ['automate', 'database_operations', 'ems_inventory', 'ems_metrics_collector',
'ems_metrics_coordinator', 'ems_metrics_processor', 'ems_operations', 'event', 'notifier',
'reporting', 'scheduler', 'user_interface', 'web_services']
pytestmark = [pytest.mark.provider(gen_func=providers,
filters=[ProviderFilter()],
scope="module")]
def prepare_workers(appliance):
"""Set single instance of each worker type and maximum threshold"""
view = navigate_to(appliance.server, 'Workers')
view.workers.fill({
"generic_worker_count": "1",
"cu_data_collector_worker_count": "1",
"ui_worker_count": "1",
"reporting_worker_count": "1",
"web_service_worker_count": "1",
"priority_worker_count": "1",
"cu_data_processor_worker_count": "1",
"vm_analysis_collectors_worker_count": "1",
"websocket_worker_count": "1",
"generic_worker_threshold": "1.5 GB",
"cu_data_collector_worker_threshold": "1.5 GB",
"event_monitor_worker_threshold": "10 GB",
"connection_broker_worker_threshold": "10 GB",
"reporting_worker_threshold": "1.5 GB",
"web_service_worker_threshold": "1.5 GB",
"priority_worker_threshold": "1.5 GB",
"cu_data_processor_worker_t | hreshold": "1.5 GB",
"refresh_worker_threshold": "10 GB",
"vm_analysis_collectors | _worker_threshold": "1.5 GB"
})
view.workers.save.click()
@pytest.mark.usefixtures('generate_version_files')
@pytest.mark.parametrize('scenario', get_memory_leak_scenarios())
def test_workload_memory_leak(request, scenario, appliance, provider):
"""Runs through provider based scenarios setting one worker instance and maximum threshold and
running for a set period of time. Memory Monitor creates graphs and summary info.
Polarion:
assignee: rhcf3_machine
casecomponent: CandU
initialEstimate: 1/4h
"""
from_ts = int(time.time() * 1000)
logger.debug('Scenario: {}'.format(scenario['name']))
appliance.clean_appliance()
quantifiers = {}
scenario_data = {'appliance_ip': appliance.hostname,
'appliance_name': conf.cfme_performance['appliance']['appliance_name'],
'test_dir': 'workload-memory-leak',
'test_name': 'Memory Leak',
'appliance_roles': ','.join(roles_memory_leak),
'scenario': scenario}
monitor_thread = SmemMemoryMonitor(appliance.ssh_client, scenario_data)
def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
starttime = time.time()
to_ts = int(starttime * 1000)
g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
logger.debug('Started cleaning up monitoring thread.')
monitor_thread.grafana_urls = g_urls
monitor_thread.signal = False
monitor_thread.join()
add_workload_quantifiers(quantifiers, scenario_data)
timediff = time.time() - starttime
logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))
monitor_thread.start()
appliance.wait_for_miq_server_workers_started(poll_interval=2)
appliance.update_server_roles({role: True for role in roles_memory_leak})
prepare_workers(appliance)
provider.create()
total_time = scenario['total_time']
starttime = time.time()
elapsed_time = 0
while (elapsed_time < total_time):
elapsed_time = time.time() - starttime
time_left = total_time - elapsed_time
logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))
if (time_left > 0 and time_left < 300):
time.sleep(time_left)
elif time_left > 0:
time.sleep(300)
quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
logger.info('Test Ending...')
|
anhstudios/swganh | data/scripts/templates/object/mobile/shared_warren_imperial_worker_s01.py | Python | mit | 458 | 0.045852 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_warren_imperial_worker_s01.iff"
result.attribute_template_id | = 9
result.stfName("npc_name","warr | en_imperial_worker")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
percyfal/luigi | test/cmdline_test.py | Python | apache-2.0 | 8,267 | 0.003266 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import logging
import mock
import os
import subprocess
from helpers import unittest
import warnings
from luigi import six
import luigi
from luigi.mock import MockTarget
class SomeTask(luigi.Task):
n = luigi.IntParameter()
def output(self):
return MockTarget('/tmp/test_%d' % self.n)
def run(self):
f = self.output().open('w')
f.write('done')
f.close()
class AmbiguousClass(luigi.Task):
pass
class AmbiguousClass(luigi.Task):
pass
class TaskWithSameName(luigi.Task):
def run(self):
self.x = 42
class TaskWithSameName(luigi.Task):
# there should be no ambiguity
def run(self):
self.x = 43
class WriteToFile(luigi.Task):
filename = luigi.Parameter()
def output(self):
return luigi.LocalTarget(self.filename)
def run(self):
f = self.output().open('w')
print('foo', file=f)
f.close()
class FooBaseClass(luigi.Task):
x = luigi.Parameter(default='foo_base_default')
class FooSubClass(FooBaseClass):
pass
class CmdlineTest(unittest.TestCase):
def setUp(self):
MockTarget.fs.clear()
@mock.patch("logging.getLogger")
def test_cmdline_main_task_cls(self, logger):
luigi.run(['--local-scheduler', '--no-lock', '--n', '100'], main_task_cls=SomeTask)
self.assertEqual(dict(MockTarget.fs.get_all_data()), {'/tmp/test_100': b'done'})
@mock.patch("logging.getLogger")
def test_cmdline_local_scheduler(self, logger):
luigi.run(['SomeTask', '--no-lock', '--n', '101'], local_scheduler=True)
self.assertEqual(dict(MockTarget.fs.get_all_data()), {'/tmp/test_101': b'done'})
@mock.patch("logging.getLogger")
def test_cmdline_other_task(self, logger):
luigi.run(['--local-scheduler', '--no-lock', 'SomeTask', '--n', '1000'])
self.assertEqual(dict(MockTarget.fs.get_all_data()), {'/tmp/test_1000': b'done'})
@mock.patch("logging.getLogger")
def test_cmdline_ambiguous_class(self, logger):
self.assertRaises(Exception, luigi.run, ['--local-scheduler', '--no-lock', 'AmbiguousClass'])
@mock.patch("logging.getLogger")
@mock.patch("logging.StreamHandler")
def test_setup_interface_logging(self, handler, logger):
handler.return_value = mock.Mock(name="stream_handler")
with mock.patch("luigi.interface.setup_interface_logging.has_run", new=False):
luigi.interface.setup_interface_logging()
self.assertEqual([mock.call(handler.return_value)], logger.return_value.addHandler.call_args_list)
with mock.patch("luigi.interface.setup_interface_logging.has_run", new=False):
if six.PY2:
error = ConfigParser.NoSectionError
else:
error = KeyError
self.assertRaises(error, luigi.interface.setup_interface_logging, '/blah')
@mock.patch("warnings.warn")
@mock.patch("luigi.interface.setup_interface_logging")
def test_cmdline_logger(self, setup_mock, warn):
with mock.patch("luigi.interface.core") as env_params:
env_params.return_value.logging_conf_file = None
luigi.run(['SomeTask', '--n', '7', '--local-scheduler', '--no-lock'])
self.assertEqual([mock.call(None)], setup_mock.call_args_list)
with mock.patch("luigi.configuration.get_config") as getconf:
getconf.return_value.get.side_effect = ConfigParser.NoOptionError(section='foo', option='bar')
getconf.return_value.getint.return_value = 0
luigi.interface.setup_interface_logging.call_args_list = []
luigi.run(['SomeTask', '--n', '42', '--local-scheduler', '--no-lock'])
self.assertEqual([], setup_mock.call_args_list)
@mock.patch('argparse.ArgumentParser.print_usage')
def test_non_existent_class(self, print_usage):
self.assertRaises(luigi.task_register.TaskClassNotFoundException,
luigi.run, ['--local-scheduler', '--no-lock', 'XYZ'])
@mock.patch('argparse.ArgumentParser.print_usage')
def test_no_task(self, print_usage):
self.assertRaises(SystemExit, luigi.run, ['--local-scheduler', '--no-lock'])
class InvokeOverCmdlineTest(unittest.TestCase):
def _run_cmdline(self, args):
env = os.environ.copy()
env['PYTHONPATH'] = env.get('PYTHONPATH', '') + ':.:test'
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
stdout, stderr = p.communicate() # Unfortunately subprocess.check_output is 2.7+
return p.returncode, stdout, stderr
def test_bin_luigi(self):
t = luigi.LocalTarget(is_tmp=True)
args = ['./bin/luigi', '--module', 'cmdline_test', 'WriteToFile', '--filename', t.path, '--local-scheduler', '--no-lock']
self._run_cmdline(args)
self.assertTrue(t.exists())
def test_direct_python(self):
t = luigi.LocalTarget(is_tmp=True)
args = ['python', 'test/cmdline_test.py', 'WriteToFile', '--filename', t.path, '--local-scheduler', '--no-lock']
self._run_cmdline(args)
self.assertTrue(t.exists())
def test_direct_python_help(self):
returncode, stdout, stderr = self._run_cmdline(['python', 'test/cmdline_test.py', '--help'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertFalse(stdout.find(b'--x') != -1)
def test_direct_python_help_class(self):
returncode, stdout, stderr = self._run_cmdline(['python', 'test/cmdline_test.py', 'FooBaseClass', '--help'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertTrue(stdout.find(b'--x') != -1)
| def test_bin_luigi_help(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--module', 'cmdline_test', '--help'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertFalse(stdout.find(b'--x') != -1)
| def test_bin_luigi_help_no_module(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--help'])
self.assertTrue(stdout.find(b'usage:') != -1)
def test_bin_luigi_no_parameters(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi'])
self.assertTrue(stderr.find(b'No task specified') != -1)
def test_bin_luigi_help_class(self):
returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--module', 'cmdline_test', 'FooBaseClass', '--help'])
self.assertTrue(stdout.find(b'--FooBaseClass-x') != -1)
self.assertTrue(stdout.find(b'--x') != -1)
class NewStyleParameters822Test(unittest.TestCase):
# See https://github.com/spotify/luigi/issues/822
def test_subclasses(self):
ap = luigi.interface.ArgParseInterface()
task, = ap.parse(['--local-scheduler', '--no-lock', 'FooSubClass', '--x', 'xyz', '--FooBaseClass-x', 'xyz'])
self.assertEquals(task.x, 'xyz')
# This won't work because --FooSubClass-x doesn't exist
self.assertRaises(BaseException, ap.parse, (['--local-scheduler', '--no-lock', 'FooBaseClass', '--x', 'xyz', '--FooSubClass-x', 'xyz']))
def test_subclasses_2(self):
ap = luigi.interface.ArgParseInterface()
# https://github.com/spotify/luigi/issues/822#issuecomment-77782714
task, = ap.parse(['--local-scheduler', '--no-lock', 'FooBaseClass', '--FooBaseClass-x', 'xyz'])
self.assertEquals(task.x, |
kiwiPhrases/EITChousing | EITCcalculations_ver2.py | Python | mit | 6,048 | 0.016534 | import pandas as pd
import numpy as np
import re
data_path = "C:/Users/SpiffyApple/Documents/USC/RaphaelBostic"
#################################################################
################### load tax data ###############################
#upload tax data
tx = pd.read_csv("/".join([data_path, "metro_TY13.csv"]))
#there is some weird formatting here:
tx['cbsa'] = tx.cbsa.apply(lambda x: re.sub("=|\"", "",x))
tx.iloc[:,3:] = tx.iloc[:,3:].replace(",", "",regex=True).astype(np.int64)
tx.set_index('cbsa', inplace=True)
#################################################################
################ Read & Process EITC data #######################
##parse the EITC data:
eitc = pd.ExcelFile("/".join([data_path, "EITC Calculator-2-14.xlsx"]))
sheets = eitc.sheet_names
eitc.close()
eitc_dict = pd.read_excel("/".join([data_path, "EITC Calculator-2-14.xlsx"]), sheetname = sheets[9:17], skiprows = 14 )
eitc = pd.concat(eitc_dict)
#filter the data down a bit
eitc = eitc.iloc[:,[0,40]]
eitc.dropna(inplace=True)
eitc = eitc.loc[eitc[2014]>0,:]
eitc.reset_index(level=0, inplace=True)
eitc.reset_index(drop=True, inplace=True)
eitc['num_kids'] = eitc.level_0.str.extract("(\d)", expand=False)
eitc['married'] = eitc.level_0.str.contains("Married").astype(int)
#calculate fair share of income for housing
eitc['total_income'] = eitc['Nominal Earnings']+eitc[2014]
eitc['haus_share'] = eitc.total_income*.3
#remove "Nominal"
eitc.level_0.replace(", Nominal", "", regex=True, inplace=True)
##Map FMR data to EITC data (Variable)
#assigned bedrooms to child counts
repl_dict ={'Married, 0 Kid':'fmr1', 'Married, 1 Kid':'fmr2', 'Married, 2 Kids':'fmr2',
'Married, 3 Kids':'fmr3', 'Single, 0 Kid':'fmr1', 'Single, 1 Kid':'fmr2',
'Single, 2 Kids':'fmr2', 'Single, 3 Kids':'fmr3'}
eitc['r_type'] = eitc.level_0.replace(repl_dict)
haus_share = eitc[['level_0', 'haus_share', 'r_type', 'Nominal Earnings']]
#################################################################
################# Read & process fmr data ######################
#read in fmr data
fmr = pd.read_excel("/".join([data_path, "FY2014_4050_RevFinal.xls"]))
#drop non Metro areas:
fmr = fmr[fmr.Metro_code.str.contains("METRO")]
#extract cbsa code:
fmr['cbsa'] = fmr.Metro_code.str.extract("O(\d{4,5})[MN]", expand=False)
cbsa_chng_map = {'14060':'14010', '29140':'29200', '31100':'31080', '42060':'4220', '44600':'48260'}
fmr.cbsa.replace(cbsa_chng_map, inplace=True)
#drop duplicates based on cbsa code:
#fmr = fmr.drop_duplicates(['cbsa','Areaname'])
fmr = fmr.drop_duplicates('cbsa')
fmr.reset_index(drop=True, inplace=True)
#clean up the area names:
fmr['Areaname'] = fmr.Areaname.apply(lambda x: re.sub(" MSA| HUD Metro FMR Area", "", x))
fmr.set_index("cbsa", inplace=True)
fmr_cols = fmr.columns[fmr.columns.str.contains("fmr\d")]
#reformat fmr to annual cost of rent
fmr[fmr_cols] = fmr[fmr_cols]*12
#subset to only matching cbsa codes between tax and fmr data
common_cbsa = fmr.index.intersection(tx.index)
fmr = fmr.loc[common_cbsa]
tx = tx.loc[common_cbsa]
print("The number of CBSAs matches?:", fmr.shape[0] == tx.shape[0])
###################################################################
####################### calculations ##############################
######################################
##I. Make a vector of income bin means
######################################
min_earn = 2500
mid_earn = 37500
step = 5000
income_vect = np.linspace(min_earn,mid_earn,((mid_earn-min_earn)/step+1))
add_vect = [45000,52000]
income_vect = np.concatenate([income_vect, add_vect])
groups = haus_share.groupby(by = 'level_0' | )
def calc_haus_eitc(group, income):
details = group[group['Nominal Earnings'] == income]
if details.shape[0] > 0:
aid = fmr[details.r_type]-details.haus_share.iloc[0]
aid[aid<0] = 0
else:
aid = pd.DataFrame(np.array([np.nan]*fmr.shape[0]))
aid.index = fmr.index
#aid.columns = [group.r_type.iloc[0 | ]]
aid.columns = ['aid']
return(aid)
aid_incomes = {}
for income in income_vect:
aid_per_income = groups.apply(calc_haus_eitc, income=income)
aid_incomes[income] = aid_per_income.unstack(level=0)
one_family_aid = pd.concat(aid_incomes)
#################################################################
###################### process tax data #########################
#calculate proportions
prop_married = 1-50.2/100 #some Christian Science article (not sure if credible source)
eagi_cols = tx.columns[tx.columns.str.contains("EAGI")]
#it doesn't seem that the total eligible for eitc matches the distributional count of incomes
print("Prop accounted for in income distribution counts\n",(tx[eagi_cols].sum(axis=1)/tx.eitc13).quantile(np.linspace(0,1,5)))
#-> we must assume that the proportions hold across income distributions
eqc_cols = tx.columns[tx.columns.str.contains("EQC\d_")]
#half the 50-60 bin number
tx['EAGI50_13'] = tx['EAGI50_13']/2
#calculate proportions
chld_prop = tx[eqc_cols].div(tx.eitc13,axis=0)
m_chld_prop = chld_prop*prop_married
s_chld_prop = chld_prop - m_chld_prop
m_chld_prop.columns = m_chld_prop.columns + "_married"
s_chld_prop.columns = s_chld_prop.columns + "_single"
tx = pd.concat([tx, m_chld_prop,s_chld_prop],axis=1)
eqc_cols = tx.columns[tx.columns.str.contains('EQC\d_13_married|EQC\d_13_single', regex=True)]
#this is confusing, this is a 3D matrix with metros on y axis.
C_3D=np.einsum('ij,ik->jik',tx[eagi_cols],tx[eqc_cols])
#make into a pandas dataframe
C_2D=pd.Panel(np.rollaxis(C_3D,2)).to_frame()
C_2D.columns = one_family_aid.columns
C_2D.index = one_family_aid.index
##################################################################
############### aggregate aid and filers #########################
disaggregated =np.multiply(C_2D, one_family_aid)
total = disaggregated.sum(axis=1).sum()
|
IanMayo/january | org.eclipse.january/src/org/eclipse/january/dataset/internal/template/markers.py | Python | epl-1.0 | 12,983 | 0.003851 | ###
# *******************************************************************************
# * Copyright (c) 2011, 2016 Diamond Light Source Ltd.
# * All rights reserved. This program and the accompanying materials
# * are made available under the terms of the Eclipse Public License v1.0
# * which accompanies this distribution, and is available at
# * http://www.eclipse.org/legal/epl-v10.html
# *
# * Contributors:
# * Peter Chang - initial API and implementation and/or initial documentation
# *******************************************************************************/
###
#!/usr/bin/env python
'''
transmutation class for markers
It performs line-by-line substitutions based on markers embedded in comments.
Mark up source class with following comment markers:
// DATA_TYPE - dataset constant
// CLASS_TYPE - boxed primitive class
// PRIM_TYPE - java primitive type
// PRIM_TYPE_LONG - java primitive type (cast to long first if integer)
// GET_ELEMENT - use get element method
// FROM_OBJECT - use convert from object method
// REAL_ONLY - keep line when a real dataset
// OBJECT_UNEQUAL - use object inequality
// OBJECT_USE - use commented out code
// BOOLEAN_OMIT - omit line when boolean dataset
// BOOLEAN_USE - use commented out code
// BOOLEAN_FALSE - return false when boolean dataset
// BOOLEAN_ZERO - return zero when boolean dataset
// NAN_OMIT - omit line when not a numerical dataset
// FORMAT_STRING - format string for getString method
// DEFAULT_VAL - default value for expanded dataset
// INT_EXCEPTION - surround with try/catch for integer arithmetic exception
// INT_USE - use commented out code for integer types
// ADD_CAST - add a cast to primitive type
// OMIT_SAME_CAST - omit a cast to same type
// OMIT_REAL_CAST - omit a cast to real type
// OMIT_CAST_INT - omit a cast for int type
// OMIT_UPCAST - omit a cast to same type
// IGNORE_CLASS - ignored dataset class used in line
// GEN_COMMENT - replace this with a message about generated class
// BCAST_WITH_CAST - replace Double with Long if is not real with cast if necessary
@SuppressWarnings("cast")
'''
class transmutate(object):
def __init__(self, scriptfile, srcclass, source, dstclass, destination, disreal=True,
disbool=False, disobj=False, isatomic=True):
'''
scriptfile
srcclass
source
dstclass
destination
disreal indicates whether destination is a real dataset
disbool indicates whether destination is a boolean dataset
disobj indicates whether destination is an object type-dataset
isatomic indicates whether dataset is atomic or compound
source and destination are lists of strings which describe dtype,
Java boxed primitive class, Java primitive type, getElement abstract method,
Object converter toReal, string format, default expansion value
(from class constant)
'''
self.sdsclass = srcclass
self.ddsclass = dstclass
self.commentline = "// This is generated from %s.java by %s" % (srcclass, scriptfile)
if len(source) != len(destination):
raise ValueErr | or, "length of lists should be the same"
(self.sdtype, self.spclass, self.sprim | , self.sgetel,
self.sconv, self.sform, self.sdef) = source
(self.ddtype, self.dpclass, self.dprim, self.dgetel,
self.dconv, self.dform, self.ddef) = destination
self.dcast = "(" + self.dprim + ") "
self.Sprim = self.sprim.capitalize()
self.Dprim = self.dprim.capitalize()
if (self.ddtype.startswith("INT") or self.ddtype.startswith("ARRAYINT")) and self.dprim is not "long":
self.dprimlong = self.dcast + "(long) "
else:
self.dprimlong = self.dcast
self.isreal = disreal
self.isbool = disbool
self.isobj = disobj
self.isatomic = isatomic
if self.isbool:
self.isreal = False
if not self.isatomic: # make compound dataset types
self.scdtype = "ARRAY" + self.sdtype
self.dcdtype = "ARRAY" + self.ddtype
processors = [("// DATA_TYPE", self.data),
("// CLASS_TYPE", self.jpclass),
("// PRIM_TYPE", self.primitive),
("// ADD_CAST", self.addcast),
("// PRIM_TYPE_LONG", self.primitivelong),
("// GET_ELEMENT", self.getelement),
("// GET_ELEMENT_WITH_CAST", self.getelementcast),
("// FROM_OBJECT", self.fromobj),
("// REAL_ONLY", self.unrealomit),
("// OBJECT_UNEQUAL", self.unequal),
("// OBJECT_USE", self.objuse),
("// BOOLEAN_OMIT", self.boolomit),
("// BOOLEAN_USE", self.booluse),
("// BOOLEAN_FALSE", self.boolfalse),
("// BOOLEAN_ZERO", self.boolzero),
("// NAN_OMIT", self.nanomit),
("// FORMAT_STRING", self.string),
("// INT_EXCEPTION", self.intexception),
("// INT_OMIT", self.intomit),
("// INT_USE", self.intuse),
("// OMIT_SAME_CAST", self.omitcast),
("// OMIT_REAL_CAST", self.omitrealcast),
("// OMIT_CAST_INT", self.omitcastint),
("// OMIT_UPCAST", self.omitupcast),
("// DEFAULT_VAL", self.defval),
("// BCAST_WITH_CAST", self.broadcast),
("@SuppressWarnings(\"cast\")", self.omit),
(srcclass, self.jclass)]
self.icasts = [ "(byte) ", "(short) ", "(int) ", "(long) "]
self.rcasts = [ "(float) ", "(double) "]
# also // IGNORE_CLASS
# print "prim |", self.dprim, "| conv |", self.dconv, "| cast |", self.dcast
# if self.dprim in self.dconv:
# print 'found primitive matches cast'
self.plist = [t[0] for t in processors]
self.processors = dict(processors)
# Java identifier
# starts with _, $ and unicode letter and comprises Unicode letters and digits
import re
separator = re.compile(r'[\s(]')
def data(self, line):
'''
dataset type
'''
l = line.replace(self.sdtype, self.ddtype)
if not self.isatomic:
l = l.replace(self.scdtype, self.dcdtype)
return l
def jclass(self, line):
'''
dataset name is also used as Java class name
'''
return line.replace(self.sdsclass, self.ddsclass)
def jpclass(self, line):
'''
Java class name for boxed primitive
'''
if self.isobj and 'valueOf' in line:
l = line.replace(self.spclass, '')
l = l.replace('.valueOf(', '')
l = l.replace(');', ';')
return l
return line.replace(self.spclass, self.dpclass)
def primitive(self, line):
'''
java primitive type is an element type
'''
if line.find(self.sprim) >= 0:
return line.replace(self.sprim, self.dprim)
if line.find(self.Sprim) >= 0:
return line.replace(self.Sprim, self.Dprim)
return line
def primitivelong(self, line):
return line.replace(self.dcast, self.dprimlong)
def getelement(self, line):
return line.replace(self.sgetel, self.dgetel)
def getelementcast(self, line):
l = line.replace(self.sgetel, self.dgetel)
if not self.isobj and self.dprim in self.dconv:
l = self.addcastmethod(l, self.dgetel)
return l
def addcast(self, line):
l = line
for t in [' = ', ' += ', ' -= ', ' *= ', ' /= ', ' %= ']:
l = l.replace(t, t + self.dcast)
return l
# t.rfind(separator.split(t)[-1]
def addcastmethod(self, line, method):
# find first part of identifier
bits = line.split(method)
prefix = bits[0][:-1] # miss out dot
l = transmutate.separator.split(prefix)
pind = prefix.rfind(l[-1])
if pind < 0:
raise ValueError, 'Cannot find an identifier'
return ''.join(prefix[:pind]) + self.dcast + ''.join(prefix[pind:]) + '.' + method + ''. |
akosyakov/intellij-community | python/testData/joinLines/ListComprehension-after.py | Python | apache-2.0 | 51 | 0.019608 | rec | ords = [select.query.deco | de(r) for r in records] |
vcatalano/py-authorize | tests/test_live_customer.py | Python | mit | 3,018 | 0.000331 | import random
from authorize import Customer, Transaction
from authorize import AuthorizeResponseError
from datetime import date
from nose.plugins.attrib import attr
from unittest import TestCase
FULL_CUSTOMER = {
'email': 'vincent@vincentcatalano.com',
'description': 'Cool web developer guy',
'customer_type': 'individual',
'billing': {
'first_name': 'Rob',
'last_name': 'Oteron',
'company': 'Robotron Studios',
'address': '101 Computer Street',
'city': 'Tucson',
'state': 'AZ',
'zip': '85704',
'country': 'US',
'phone_number': '520-123-4567',
'fax_number': '520-456-7890',
},
'bank_account': {
'routing_number': '322271627',
'account_number': '00987467838473',
'name_on_account': 'Rob Otron',
'bank_name': 'Evil Bank Co.',
'echeck_type': 'CCD'
},
'shipping': {
'first_name': 'Rob',
'last_name': 'Oteron',
'company': 'Robotron Studios',
'address': '101 Computer Street',
'city': 'Tucson',
'state': 'AZ',
'zip': '85704',
'country': 'US',
'phone_number': '520-123-4567',
'fax_number': '520-456-7890',
}
}
CUSTOMER_WITH_CARD = {
'email': 'vincent@vincentcatalano.com',
'description': 'Cool web developer guy',
'credit_card': {
'card_number': '4111111111111111',
'expiration_date': '04/{0}'.format(date.today().year + 1),
'card_code': '456',
},
}
@attr('live_tests')
class CustomerTests(TestCase):
def test_live_customer(self):
# Create customers
result = Customer.create()
Customer.create(FULL_CUSTOMER)
Customer.create(CUSTOMER_WITH_CARD)
# Read customer information. This returns the payment profile IDs
# address IDs for the user
customer_id = result.customer_id
Customer.details(customer_id)
# Update customer information
Customer.update(customer_id, {
'email': 'vincent@test.com',
'description': 'Cool web developer guy'
})
# Delete customer informati | on
Customer.delete(customer_id)
self.assertRaises(AuthorizeResponseError, Customer.delete, customer_id)
Customer.list()
def test_live_customer_from_transaction(self):
INVALID_TRANS_ID = '123'
self.assertRaises(AuthorizeResponseError, Customer.from_transaction, INVALID_TRANS_ID)
# Create the transaction
transaction | = CUSTOMER_WITH_CARD.copy()
transaction['amount'] = random.randrange(100, 100000) / 100.0
result = Transaction.auth(transaction)
trans_id = result.transaction_response.trans_id
# Create the customer from the above transaction
result = Customer.from_transaction(trans_id)
customer_id = result.customer_id
result = Customer.details(customer_id)
self.assertEquals(transaction['email'], result.profile.email)
|
nagyistoce/devide | modules/vtk_basic/vtkRuledSurfaceFilter.py | Python | bsd-3-clause | 497 | 0.002012 | # class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits | .vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkRuledSurfaceFilter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkRuledSurfaceFilter(), 'Processing.',
('vtkPolyData',), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, ou | tputFunctions=None)
|
hall1467/wikidata_usage_tracking | python_analysis_scripts/revision_comment_word_extractor.py | Python | mit | 2,427 | 0.00412 | """
returns tsv of word frequencies in revision comments
Usage:
revision_comment_word_extractor (-h|--help)
revision_comment_word_extractor <input> <output>
[--debug]
[--verbose]
Options:
-h, --help This help message is printed
<input> Path to file to process.
<output> Where revisions results
will be written
--debug Print debug logging to stderr
--verbose Print dots and stuff to stderr
"""
import docopt
import sys
import logging
import operator
from collections import defaultdict
import re
import mysqltsv
REMOVED_COMMENT_RE = re.compile(r'^\/\*.*.\*\/')
PUNCTUATION_RE = re.compile(r'\:|\(|\)|\.|\,|\-')
logger = logging.getLogger(__name__)
def main(argv=None):
args = docopt.docopt(__doc__)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
input_file = mysqltsv.Reader(open(args['<input>'], "r"), headers=False,
types=[str, int, str, str, int])
output_file = mysqltsv.Writer(open(args['<output>'], "w"))
verbose = args['--verbose']
run(input_file, output_file, verbose)
def run(input_file, output_file, verbose):
word_count = defaultdict(int)
for i, line in enumerate(input_file):
comment = line[3]
if comment != None:
comment = re.sub(REMOVED_COMMENT_RE, "", comment)
for word in comment.split(" "):
normalized_word = re.sub(PUNCTUATION_RE, "", word).lower()
word_count[normalized_word] += 1
if verbose and i % 10000 == 0 and i != 0:
sys.stderr.write("Revisions processed: {0}\n".format(i))
sys.stderr.flush()
sorted_word_count = sorted(word_count.items(), key=operator.itemgetter(1),
reverse=True)
sum_of_word_counts = 0
for i, entry in enumerate(sorted_word_count):
output_fi | le.write([entry[0], entry[1]])
sum_of_word_counts += entry[1]
if verbose and i % 10000 == 0 and i != 0:
sys.stderr.write("Words written: {0}\n".format(i))
sys.stderr.flush()
print("Total word count: {0}".format(sum_of_word_counts))
if verbose:
sys.stderr.write("Completed writing out result file\n")
sys.std | err.flush()
main()
|
asedunov/intellij-community | python/testData/formatter/blankLineBeforeFunction.py | Python | apache-2.0 | 42 | 0.02381 | class C:
| x = 1
def foo(self): pas | s |
marian42/pixelpi | modules/animation.py | Python | mit | 2,315 | 0.047084 | import os.path
import pygame.image
import time
import ConfigParser
from helpers import *
from modules import Module
class Animation(Module):
def __init__(self, screen, folder, interval = None, autoplay = True):
super(Animation, self).__init__(screen)
if folder[:-1] != '/':
folder = folder + '/'
self.folder = folder
self.screen = screen
try:
if self.is_single_file():
self.load_single()
else: self.load_frames()
if len(self.frames) == 0:
raise Exception('No frames found in animation ' + self.folder)
self.screen.pixel = self.frames[0]
except Exception:
print('Failed to load ' + folder)
raise
self.screen.update()
if interval == None:
try:
self.interval = self.load_interval()
except:
print('No interval info found.')
self.interval = 100
else: self.interval = interval
self.pos = 0
if autoplay:
self.start()
def load_frames(self):
self.frames = []
i = 0
while os.path.isfile(self.folder + str(i) + '.bmp'):
try:
bmp = pygame.image.load(self.folder + str(i) + '.bmp')
except Exception:
print('Error loading ' + str(i) + '.bmp from ' + self.folder)
raise
pixel_array = pygame.PixelArray(bmp)
frame = [[pixel_array[x, y] for y in range(16)] for x in range(16)]
self.frames.append(frame)
i += 1
def is_single_file(self):
return os.path.isfile(self.folder + '0.bmp') and not os.path.isfile(self.folder + '1.bmp')
| def load_single(self):
self.frames = []
bmp = pygame.image.load(self.folder + '0.bmp')
framecount = bmp.get_height() / 16
pixel_array = pygame.PixelArray(bmp)
for index in range(framecount):
frame = [[pixel_array[x, y + 16 * index] for y in range(16)] for x in range(16)]
self.frames.append(frame)
def load_interval(self):
cfg = ConfigParser.ConfigParser()
cfg.read(self.folder + 'config.ini')
return cfg.getint('animation', 'hold')
def tick(self):
self.pos += 1
| if self.pos >= len(self.frames):
self.pos = 0
self.screen.pixel = self.frames[self.pos]
self.screen.update()
time.sleep(self.interval / 1000.0)
def on_start(self):
print('Starting ' + self.folder)
def play_once(self):
for frame in self.frames:
self.screen.pixel = frame
self.screen.update()
time.sleep(self.interval / 1000.0) |
hickeroar/scentamint | scentamint/__init__.py | Python | mit | 1,087 | 0 | """
The MIT License (MIT)
Copyright (c) 2015 Ryan Vennell
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY | ,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WH | ETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
|
HenryHu/pybbs | xmppauth.py | Python | bsd-2-clause | 2,010 | 0.002985 | from UserManager import UserManager
im | port Session
from Log import Log
import sasl
class XMPPAuth(sasl.auth.Authenticator):
"""To authenticate XMPP users.
Plan to support 2 methods:
PLAIN: just username & password
X-BBS-OAUTH: use OAuth token
"""
de | f __init__(self, service_type, host, service_name):
self._service_type = service_type
self._host = host
self._service_name = service_name
self._username = None
def service_type(self):
return self._service_type
def host(self):
return self._host
def service_name(self):
return self._service_name
def username(self):
return self._username
def password(self):
raise NotImplementedError
def get_password(self):
raise NotImplementedError
def verify_token(self, token):
"""Verify token"""
try:
result = Session.SessionManager.CheckSession(token)
if result is not None:
self._username = result
else:
Log.warn("XMPPAuth: fail to verify session")
return result is not None
except Exception as e:
Log.warn("XMPPAuth: exception in CheckSession: %r" % e)
return False
def verify_password(self, authorize, username, passwd):
"""Verify password"""
if (authorize and username != authorize):
Log.warn("XMPPAuth: user %s does not match authorize %s" % (username, authorize))
return False
username = username.encode("gbk")
# print "trying to auth %s pass %s" % (user, passwd)
user = UserManager.LoadUser(username)
if (user == None):
Log.warn("XMPPAuth: user not exist: %s" % username)
return False
if (user.Authorize(passwd)):
# print "OK"
return True
Log.warn("XMPPAuth: user %s auth failed!" % username)
# print "Wrong PW"
return False
|
liuluheng/codereading | learn-cmus/scmus/.ycm_extra_conf.py | Python | mit | 5,982 | 0.025577 | # This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN | AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS | IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-DHAVE_CONFIG',
'-DREALTIME_SCHEDULING',
'-DVERSION="0.1"',
'-D_REENTRANT'
'-Wall',
'-Wextra',
#'-Werror',
#'-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
#'-stdlib=libc++',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
#'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c',
'-I',
'.',
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
'-isystem',
'/Library/Developer/CommandLineTools/usr/include',
#'-isystem',
#'/Library/Developer/CommandLineTools/usr/bin/../lib/c++/v1',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
#try:
# final_flags.remove( '-stdlib=libc++' )
#except ValueError:
# pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
reza-arjmandi/rpi-course | session3/Interrupt.py | Python | mit | 560 | 0.008929 | ######################################################################
# Interrupt.py
#
# This program enable interrupt on gpio pin 18 for read switch
# button
############################################################### | #######
import RPi.GPIO as GPIO
import time
def myCallbac | k(channel):
print("You pressed button")
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(18, GPIO.FALLING, callback=myCallback, bouncetime=200)
i = 0
while(True):
i = i + 1
print(i)
time.sleep(0.2)
|
henry0312/LightGBM | examples/python-guide/plot_example.py | Python | mit | 2,004 | 0.001996 | # coding: utf-8
from pathlib import Path
import pandas as pd
import lightgbm as lgb
if lgb.compat.MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
else:
raise ImportError('You need to install matplotlib and restart your session for plot_example.py.')
print('Loading data...')
# load or create your dataset
regression_example_dir = Path(__file__).absolute().parents[1] / 'regression'
df_train = pd.read_csv(str(regression_example_dir / 'regression.train'), header=None, sep='\t')
df_test = pd.read_csv(str(regression_example_dir / 'regression.test'), header=None, sep='\t')
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_test = lgb.Dataset(X_test, y_test | , reference=lgb_train)
# specify your configurations as a dict
params = {
'num_leaves': 5,
'metric': ('l1', 'l2'),
'verbose': 0
}
evals_result = {} # to record eval results for plotting
print('Starting training...')
# train
gbm = lgb.train(
params,
lgb_train,
num_boost_round=100,
valid_sets=[lgb_train, lgb_test],
feature_name=[f'f{i + 1}' for i in range(X_train.shape[-1])],
cate | gorical_feature=[21],
callbacks=[
lgb.log_evaluation(10),
lgb.record_evaluation(evals_result)
]
)
print('Plotting metrics recorded during training...')
ax = lgb.plot_metric(evals_result, metric='l1')
plt.show()
print('Plotting feature importances...')
ax = lgb.plot_importance(gbm, max_num_features=10)
plt.show()
print('Plotting split value histogram...')
ax = lgb.plot_split_value_histogram(gbm, feature='f26', bins='auto')
plt.show()
print('Plotting 54th tree...') # one tree use categorical feature to split
ax = lgb.plot_tree(gbm, tree_index=53, figsize=(15, 15), show_info=['split_gain'])
plt.show()
print('Plotting 54th tree with graphviz...')
graph = lgb.create_tree_digraph(gbm, tree_index=53, name='Tree54')
graph.render(view=True)
|
vinaypost/multiuploader | multiuploader/utils.py | Python | mit | 2,052 | 0.002437 | from __future__ import unicode_literals
import logging
import mimetypes
import os
from wsgiref.util import FileWrapper
from django.conf import settings
from django.http import HttpResponse
try:
from urllib import quote
except ImportError as ie:
from urllib.parse import quote
log = logging
# Getting files here
def format_file_extensions(extensions):
return ".(%s)$" % "|".join(extensions)
class FileResponse(HttpResponse):
def __init__(self, request, filepath, filename=None, status=None):
if settings.DEBUG:
wrapper = FileWrapper(file(filepath, 'rb'))
super(FileResponse, self).__init__(wrapper, status=status)
else:
super(FileResponse, self).__init__(status=status)
self['X-Accel-Redirect'] = filepath
self['XSendfile'] = filepath
if not filename:
filename = os.path.basename(filepath)
type, encoding = mimetypes.guess_type(filepath)
if type is None:
type = 'application/octet-stream'
self['Content-Ty | pe'] = type
self['Content-Length'] = os.path.getsize(filepath)
if encoding is not None:
self['Content-Encoding'] = encoding
# To inspect details | for the below code, see http://greenbytes.de/tech/tc2231/
if u'WebKit' in request.META['HTTP_USER_AGENT']:
# Safari 3.0 and Chrome 2.0 accepts UTF-8 encoded string directly.
filename_header = 'filename={}'.format(filename.encode('utf-8'))
elif u'MSIE' in request.META['HTTP_USER_AGENT']:
# IE does not support internationalized filename at all.
# It can only recognize internationalized URL, so we do the trick via routing rules.
filename_header = ''
else:
# For others like Firefox, we follow RFC2231 (encoding extension in HTTP headers).
filename_header = 'filename*=UTF-8\'\'{}'.format(quote(filename.encode('utf-8')))
self['Content-Disposition'] = 'attachment; ' + filename_header
|
cngo-github/nupic | tests/unit/nupic/algorithms/anomaly_test.py | Python | agpl-3.0 | 5,644 | 0.007619 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Tests for anomaly-related algorithms."""
import unittest
from numpy import array
import pickle
from nupic.algorithms import anomaly
from nupic.algorithms.anomaly import Anomaly
class AnomalyTest(unittest.TestCase):
"""Tests for anomaly score functions and classes."""
def testComputeRawAnomalyScoreNoActiveOrPredicted(self):
score = anomaly.computeRawAnomalyScore(array([]), array([]))
self.assertAlmostEqual(score, 0.0)
def testComputeRawAnomalyScoreNoActive(self):
score = anomaly.computeRawAnomalyScore(array([]), array([3, 5]))
self.assertAlmostEqual(score, 0.0)
def testComputeRawAnomalyScorePerfectMatch(self):
score = anomaly.computeRawAnomalyScore(array([3, 5, 7]), array([3, 5, 7]))
self.assertAlmostEqual(score, 0.0)
def testComputeRawAnomalyScoreNoMatch(self):
score = anomaly.computeRawAnomalyScore(array([2, 4, 6]), array([3, 5, 7]))
self.assertAlmostEqual(score, 1.0)
def testComputeRawAnomalyScorePartialMatch(self):
score = anomaly.computeRawAnomalyScore(array([2, 3, 6]), array([3, 5, 7]))
self.assertAlmostEqual(score, 2.0 / 3.0)
def testComputeAnomalyScoreNoActiveOrPredicted(self):
anomalyComputer = anomaly.Anomaly()
score = anomalyComputer.compute(array([]), array([]))
self.assertAlmostEqual(score, 0.0)
def testComputeAnomalyScoreNoActive(self):
anomalyComputer = anomaly.Anomaly()
score = anomalyComputer.compute(array([]), array([3, 5]))
self.assertAlmostEqual(score, 0.0)
def testComputeAnomalyScorePerfectMatch(self):
anomalyComputer = anomaly.Anomaly()
score = anomalyComputer.compute(array([3, 5, 7]), array([3, 5, 7]))
self.assertAlmostEqual(score, 0.0)
def testComputeAnomalyScoreNoMatch(self):
anomalyComputer = anomaly.Anomaly()
score = anomalyComputer.compute(array([2, 4, 6]), array([3, 5, 7]))
self.assertAlmostEqual(score, 1.0)
def testComputeAnomalyScorePartialMatch(self):
anomalyComputer = anomaly.Anomaly()
score = anomalyComputer.compute(array([2, 3, 6]), array([3, 5, 7]))
self.assertAlmostEqual(score, 2.0 / 3.0)
def testAnomalyCumulative(self):
"""Test cumulative anomaly scores."""
anomalyComputer = anomaly.Anomaly(slidingWindowSize=3)
predicted = (array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]),
array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]),
array([1, 2, 6]), array([1, 2, 6]), array([1, 2, 6]))
actual = (array([1, 2, 6]), array([1, 2, | 6]), array([1, 4, 6]),
array([10, 11, 6]), array([10, 11, 12]), array([10, 11, 12]),
| array([10, 11, 12]), array([1, 2, 6]), array([1, 2, 6]))
anomalyExpected = (0.0, 0.0, 1.0/9.0, 3.0/9.0, 2.0/3.0, 8.0/9.0, 1.0,
2.0/3.0, 1.0/3.0)
for act, pred, expected in zip(actual, predicted, anomalyExpected):
score = anomalyComputer.compute(act, pred)
self.assertAlmostEqual(
score, expected, places=5,
msg="Anomaly score of %f doesn't match expected of %f" % (
score, expected))
def testComputeAnomalySelectModePure(self):
anomalyComputer = anomaly.Anomaly(mode=anomaly.Anomaly.MODE_PURE)
score = anomalyComputer.compute(array([2, 3, 6]), array([3, 5, 7]))
self.assertAlmostEqual(score, 2.0 / 3.0)
def testSerialization(self):
"""serialization using pickle"""
# instances to test
aDef = Anomaly()
aLike = Anomaly(mode=Anomaly.MODE_LIKELIHOOD)
aWeig = Anomaly(mode=Anomaly.MODE_WEIGHTED)
# test anomaly with all whistles (MovingAverage, Likelihood, ...)
aAll = Anomaly(mode=Anomaly.MODE_LIKELIHOOD, slidingWindowSize=5)
inst = [aDef, aLike, aWeig, aAll]
for a in inst:
stored = pickle.dumps(a)
restored = pickle.loads(stored)
self.assertEqual(a, restored)
def testEquals(self):
an = Anomaly()
anP = Anomaly()
self.assertEqual(an, anP, "default constructors equal")
anN = Anomaly(mode=Anomaly.MODE_LIKELIHOOD)
self.assertNotEqual(an, anN)
an = Anomaly(mode=Anomaly.MODE_LIKELIHOOD)
self.assertEqual(an, anN)
an = Anomaly(slidingWindowSize=5, mode=Anomaly.MODE_WEIGHTED, binaryAnomalyThreshold=0.9)
anP = Anomaly(slidingWindowSize=5, mode=Anomaly.MODE_WEIGHTED, binaryAnomalyThreshold=0.9)
anN = Anomaly(slidingWindowSize=4, mode=Anomaly.MODE_WEIGHTED, binaryAnomalyThreshold=0.9)
self.assertEqual(an, anP)
self.assertNotEqual(an, anN)
anN = Anomaly(slidingWindowSize=5, mode=Anomaly.MODE_WEIGHTED, binaryAnomalyThreshold=0.5)
self.assertNotEqual(an, anN)
if __name__ == "__main__":
unittest.main()
|
mrniranjan/python-scripts | reboot/check1.py | Python | gpl-2.0 | 166 | 0.018072 | from sys | import argv
script, file_name = argv
def lineprint(f):
print f.readline()
target = open(file_name)
lineprint(target)
lineprint( | target)
lineprint(target)
|
fbradyirl/home-assistant | homeassistant/components/mailgun/config_flow.py | Python | apache-2.0 | 415 | 0 | """Config flow for Mailgun | ."""
from homeassistant.helpers import config_entry | _flow
from .const import DOMAIN
config_entry_flow.register_webhook_flow(
DOMAIN,
"Mailgun Webhook",
{
"mailgun_url": "https://documentation.mailgun.com/en/latest/user_manual.html#webhooks", # noqa: E501 pylint: disable=line-too-long
"docs_url": "https://www.home-assistant.io/components/mailgun/",
},
)
|
greggian/TapdIn | django/templatetags/i18n.py | Python | apache-2.0 | 9,355 | 0.003955 | import re
from django.template import Node, Variable, VariableNode, _render_value_in_context
from django.template import TemplateSyntaxError, TokenParser, Library
from django.template import TOKEN_TEXT, TOKEN_VAR
from django.utils import translation
from django.utils.encoding import force_unicode
register = Library()
class GetAvailableLanguagesNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
from django.conf import settings
context[self.variable] = [(k, translation.ugettext(v)) for k, v in settings.LANGUAGES]
return ''
class GetCurrentLanguageNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language()
return ''
class GetCurrentLanguageBidiNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language_bidi()
return ''
class TranslateNode(Node):
def __init__(self, value, noop):
self.value = Variable(value)
self.noop = noop
def render(self, context):
value = self.value.resolve(context)
if self.noop:
return value
else:
return _render_value_in_context(translation.ugettext(value), context)
class BlockTranslateNode(Node):
def __init__(self, extra_context, singular, plural=None, countervar=None,
counter=None):
self.extra_context = extra_context
self.singular = singular
self.plural = plural
self.countervar = countervar
self.counter = counter
def render_token_list(self, tokens):
result = []
vars = []
for token in tokens:
if token.token_type == TOKEN_TEXT:
result.append(token.contents)
elif token.token_type == TOKEN_VAR:
result.append(u'%%(%s)s' % token.contents)
vars.append(token.contents)
return ''.join(result), vars
def render(self, context):
tmp_context = {}
for var, val in self.extra_context.items():
tmp_context[var] = val.render(context)
# Update() works like a push(), so corresponding context.pop() is at
# the end of function
context.update(tmp_context)
singular, vars = self.render_token_list(self.singular)
if self.plural and self.countervar and self.counter:
count = self.counter.resolve(context)
context[self.countervar] = count
plural, vars = self.render_token_list(self.plural)
result = translation.ungettext(singular, plural, count)
else:
result = translation.ugettext(singular)
# Escape all isolated '%' before substituting in the context.
result = re.sub(u'%(?!\()', u'%%', result)
data = dict([(v, _render_value_in_context(context[v], context)) for v in vars])
context.pop()
return result % data
def do_get_available_languages(parser, token):
"""
This will store a list of available languages
in the context.
Usage::
{% get_available_languages as languages %}
{% for language in languages %}
...
{% endfor %}
This will just pull the LANGUAGES setting from
your setting file (or the default settings) and
put it into the named variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError, "'get_available_languages' requires 'as variable' (got %r)" % args
return GetAvailableLanguagesNode(args[2])
def do_get_current_language(parser, token):
"""
This will store the current language in the context.
Usage::
{% get_current_language as language %}
This will fetch the currently active language and
put it's value into the ``language`` context
variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError, "'get_current_language' requires 'as variable' (got %r)" % args
return GetCurrentLanguageNode(args[2])
def do_get_current_language_bidi(parser, token):
"""
This will store the current language layout in the context.
Usage::
{% get_current_language_bidi as bidi %}
This will fetch the currently active language's layout and
put it's value into the ``bidi`` context variable.
True indicates right-to-left layout, otherwise left-to-right
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError, "'get_current_language_bidi' requires 'as variable' (got %r)" % args
return GetCurrentLanguageBidiNode(args[2])
def do_translate(parser, token):
"""
This will mark a string for translation and will
translate the string for the current language.
Usage::
{% trans "this is a test" %}
This will mark the string for translation so it will
be pulled out by mark-messages.py into the .po files
and will run the string through the translation engine.
There is a second form::
{% trans "this is a test" noop %}
This will only mark for translation, but will return
the string unchanged. Use it when you need to store
values into forms that should be translated later on.
You can use variables instead of constant strings
to translate stuff you marked somewhere else::
{% trans variable %}
This will just try to translate the contents of
the variable ``variable``. Make sure that the string
in there is something that is in the .po file.
"""
class TranslateParser(TokenParser):
def top(self):
value = self.value()
i | f self.more():
if self.tag() == 'noop':
noop = True
else:
raise TemplateSyntaxError, "only option for 'trans' is 'noop'"
else:
noop = False
return (value, noop)
value, noop = TranslateParser(tok | en.contents).top()
return TranslateNode(value, noop)
def do_block_translate(parser, token):
"""
This will translate a block of text with parameters.
Usage::
{% blocktrans with foo|filter as bar and baz|filter as boo %}
This is {{ bar }} and {{ boo }}.
{% endblocktrans %}
Additionally, this supports pluralization::
{% blocktrans count var|length as count %}
There is {{ count }} object.
{% plural %}
There are {{ count }} objects.
{% endblocktrans %}
This is much like ngettext, only in template syntax.
"""
class BlockTranslateParser(TokenParser):
def top(self):
countervar = None
counter = None
extra_context = {}
while self.more():
tag = self.tag()
if tag == 'with' or tag == 'and':
value = self.value()
if self.tag() != 'as':
raise TemplateSyntaxError, "variable bindings in 'blocktrans' must be 'with value as variable'"
extra_context[self.tag()] = VariableNode(
parser.compile_filter(value))
elif tag == 'count':
counter = parser.compile_filter(self.value())
if self.tag() != 'as':
raise TemplateSyntaxError, "counter specification in 'blocktrans' must be 'count value as variable'"
countervar = self.tag()
else:
raise TemplateSyntaxError, "unknown subtag %s for 'blocktrans' found" % tag
return (countervar, counter, extra_context)
countervar, counter, extra_context = BlockTranslateParser(token.contents).top()
|
tonimichel/webkitpony | docs/conf.py | Python | mit | 8,098 | 0.007162 | # -*- coding: utf-8 -*-
#
# webkitpony documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 23 14:05:32 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'webkitpony'
copyright = u'2013, Toni Michel'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places | throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt | = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'webkitponydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'webkitpony.tex', u'webkitpony Documentation',
u'Toni Michel', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'webkitpony', u'webkitpony Documentation',
[u'Toni Michel'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'webkitpony', u'webkitpony Documentation',
u'Toni Michel', 'webkitpony', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
Azure/azure-documentdb-python | test/query_tests.py | Python | mit | 9,355 | 0.004383 | import unittest
import uuid
import pytest
import azure.cosmos.cosmos_client as cosmos_client
import azure.cosmos.retry_utility as retry_utility
import test.test_config as test_config
@pytest.mark.usefixtures("teardown")
class QueryTest(unittest.TestCase):
"""Test to ensure escaping of non-ascii characters from partition key"""
config = test_config._test_config
host = config.host
masterKey = config.masterKey
connectionPolicy = config.connectionPolicy
client = cosmos_client.CosmosClient(host, {'masterKey': masterKey}, connectionPolicy)
created_db = config.create_database_if_not_exist(client)
def test_first_and_last_slashes_trimmed_for_query_string (self):
created_collection = self.config.create_multi_partition_collection_with_custom_pk_if_not_exist(self.client)
document_definition = {'pk': 'pk', 'id':'myId'}
self.client.CreateItem(created_collection['_self'], document_definition)
query_options = {'partitionKey': 'pk'}
collectionLink = '/dbs/' + self.created_db['id'] + '/colls/' + created_collection['id'] + '/'
query = 'SELECT * from c'
query_iterable = self.client.QueryItems(collectionLink, query, query_options)
iter_list = list(query_iterable)
self.assertEqual(iter_list[0]['id'], 'myId')
def test_query_change_feed(self):
created_collection = self.config.create_multi_partition_collection_with_custom_pk_if_not_exist(self.client)
collection_link = created_collection['_self']
# The test targets partition #3
pkRangeId = "3"
# Read change feed with passing options
query_iterable = self.client.QueryItemsChangeFeed(collection_link)
iter_list = list(query_iterable)
self.assertEqual(len(iter_list), 0)
# Read change feed without specifying partition key range ID
options = {}
query_iterable = self.client.QueryItemsChangeFeed(collection_link, options)
iter_list = list(query_iterable)
self.assertEqual(len(iter_list), 0)
# Read change feed from current should return an empty list
options['partitionKeyRangeId'] = pkRangeId
query_iterable = self.client.QueryItemsChangeFeed(collection_link, options)
iter_list = list(query_iterable)
self.assertEqual(len(iter_list), 0)
self.assertTrue('etag' in self.client.last_response_headers)
self.assertNotEquals(self.client.last_response_headers['etag'], '')
# Read change feed from beginning should return an empty list
options['isStartFromBeginning'] = True
query_iterable = self.client.QueryItemsChangeFeed(collection_link, options)
iter_list = list(query_iterable)
self.assertEqual(len(iter_list), 0)
self.assertTrue('etag' in self.client.last_response_headers)
continuation1 = self.client.last_response_headers['etag']
self.assertNotEquals(continuation1, '')
# Create a document. Read change feed should return be able to read that document
document_definition = {'pk': 'pk', 'id':'doc1'}
self.client.CreateItem(collection_link, document_definition)
query_iterable = self.client.QueryItemsChangeFeed(collection_link, options)
iter_list = list(query_iterable)
self.assertEqual(len(iter_list), 1)
self.assertEqual(iter_list[0]['id'], 'doc1')
self.assertTrue('etag' in self.client.last_response_headers)
continuation2 = self.client.last_response_headers['etag']
self.assertNotEquals(continuation2, '')
self.assertNotEquals(continuation2, continuation1)
# Create two new documents. Verify that change feed contains the 2 new documents
# with page size 1 and page size 100
document_definition = {'pk': 'pk', 'id':'doc2'}
self.client.CreateItem(collection_link, document_definition)
document_definition = {'pk': 'pk', 'id':'doc3'}
self.client.CreateItem(collection_link, document_definition)
options['isStartFromBeginning'] = False
for pageSize in [1, 100]:
# verify iterator
options['continuation'] = continuation2
options['maxItemCount'] = pageSize
query_iterable = self.client.QueryItemsChangeFeed(collection_link, options)
it = query_iterable.__iter__()
expected_ids = 'doc2.doc3.'
actual_ids = ''
for item in it:
actual_ids += item['id'] + '.'
self.assertEqual(actual_ids, expected_ids)
# verify fetch_next_block
# the options is not copied, therefore it need to be restored
options['continuation'] = continuation2
query_iterable = self.client.QueryItemsChangeFeed(collection_link, options)
count = 0
expected_count = 2
all_fetched_res = []
while (True):
fetched_res = query_iterable.fetch_next_block()
self.assertEquals(len(fetched_res), min(pageSize, expected_count - count))
count += len(fetched_res)
all_fetched_res.extend(fetched_res)
if len(fetched_res) == 0:
break
actual_ids = ''
for item in all_fetched_res:
actual_ids += item['id'] + '.'
self.assertEqual(actual_ids, expected_ids)
# verify there's no more results
self.assertEquals(query_iterable.fetch_next_block(), [])
# verify reading change feed from the beginning
options['isStartFromBeginning'] = True
options['continuation'] = None
query_iterable = self.client.QueryItemsChangeFeed(collection_link, options)
expected_ids = ['doc1', 'doc2', 'doc3']
it = query_iterable.__iter__()
for i in range(0, len(expected_ids)):
doc = next(it)
self.assertEquals(doc['id'], expected_ids[i])
self.assertTrue('etag' in self.client.last_response_headers)
continuation3 = self.client.last_response_headers['etag']
# verify reading empty change feed
options['continuation'] = continuation3
query_iterable = self.client.QueryItemsChangeFeed(collection_link, options)
iter_list = list(query_iterable)
self.assertEqual(len(iter_list), 0)
def test_populate_query_metrics (self):
created_collection = self.config.create_multi_partition_collection_with_custom_pk_if_not_exist(self.client)
document_definition = {'pk': 'pk', 'id':'myId'}
self.client.CreateItem(created_collection['_self'], document_definition)
query_options = {'partitionKey': 'pk',
'populateQueryMetrics': True}
query = 'SELECT * from c'
query_iterable = self.client.QueryItems(created_collection['_self'], query, query_options)
iter_list = list(query_iterable)
self.assertEqual(iter_list[0]['id'], 'myId')
METRICS_HEADER_NAME = 'x-ms-documentdb-query-metrics'
self.assertTrue(METRICS_HEADER_NAME in self.client.last_response_headers)
metrics_header = self.client.last_r | esponse_headers[METRICS_HEADER_NAME]
# Validate header is well-formed: "key1=value1;key2=value2;etc"
metrics = metrics_header.split(';')
self.assertTrue(len(metrics) > 1)
self.assertTrue(all(['=' in x for x in metrics]))
def test_max_item_count_honored_in_order_by_query(self):
created_collection = self.config.create_multi_partition_collection_with_custom_pk_if_not_exist(self.client)
docs = []
for i in range(10):
documen | t_definition = {'pk': 'pk', 'id': 'myId' + str(uuid.uuid4())}
docs.append(self.client.CreateItem(created_collection['_self'], document_definition))
query = 'SELECT * from c ORDER BY c._ts'
query_options = {'enableCrossPartitionQuery': True,
'maxItemCount': 1}
query_iterable = self.client.QueryItems(created_collection['_self'], query, query_options)
#1 call to get query plans, 1 call to get pkr, 11 calls to |
mvaled/sentry | tests/snuba/search/test_backend.py | Python | bsd-3-clause | 53,904 | 0.002542 | from __future__ import absolute_import
import mock
import pytz
import pytest
from datetime import datetime, timedelta
from django.conf import settings
from django.utils import timezone
from hashlib import md5
from sentry import options
from sentry.api.issue_search import convert_query_values, IssueSearchVisitor, parse_search_query
from sentry.models import (
Environment,
Group,
GroupAssignee,
GroupBookmark,
GroupEnvironment,
GroupStatus,
GroupSubscription,
)
from sentry.search.snuba.backend import SnubaSearchBackend
from sentry.testutils import SnubaTestCase, TestCase, xfail_if_not_postgres
from sentry.testutils.helpers.datetime import iso_format
from sentry.utils.snuba import SENTRY_SNUBA_MAP, SnubaError
def date_to_query_format(date):
return date.strftime("%Y-%m-%dT%H:%M:%S")
class SnubaSearchTest(TestCase, SnubaTestCase):
def setUp(self):
super(SnubaSearchTest, self).setUp()
self.backend = SnubaSearchBackend()
self.base_datetime = (datetime.utcnow() - timedelta(days=3)).replace(tzinfo=pytz.utc)
event1_timestamp = iso_format(self.base_datetime - timedelta(days=21))
self.event1 = self.store_event(
data={
"fingerprint": ["put-me-in-group1"],
"event_id": "a" * 32,
"message": "foo",
"environment": "production",
"tags": {"server": "example.com"},
"timestamp": event1_timestamp,
"stacktrace": {"frames": [{"module": "group1"}]},
},
project_id=self.project.id,
)
self.event3 = self.store_event(
data={
"fingerprint": ["put-me-in-group1"],
"event_id": "c" * 32,
"message": "group1",
"environment": "production",
"tags": {"server": "example.com"},
"timestamp": iso_format(self.base_datetime),
"stacktrace": {"frames": [{"module": "group1"}]},
},
project_id=self.project.id,
)
self.group1 = Group.objects.get(id=self.event1.group.id)
assert self.group1.id == self.event1.group.id
assert self.group1.id == self.event3.group.id
assert self.group1.first_seen == self.event1.datetime
assert self.group1.last_seen == self.event3.datetime
self.group1.times_seen = 5
self.group1.status = GroupStatus.UNRESOLVED
self.group1.save()
self.event2 = self.store_event(
data={
"fingerprint": ["put-me-in-group2"],
"event_id": "b" * 32,
"timestamp": iso_format(self.base_datetime - timedelta(days=20)),
"message": "bar",
"stacktrace": {"frames": [{"module": "group2"}]},
| "environment": "staging",
"tags": {"server": "example.com", "url": "http://example.com"},
},
project_id=self.project.id,
)
self.group2 = Group.objects.get(id=self.event2.group.id)
assert self.group2.id == self.event2.group.id
| assert self.group2.first_seen == self.group2.last_seen == self.event2.datetime
self.group2.status = GroupStatus.RESOLVED
self.group2.times_seen = 10
self.group2.save()
GroupBookmark.objects.create(user=self.user, group=self.group2, project=self.group2.project)
GroupAssignee.objects.create(user=self.user, group=self.group2, project=self.group2.project)
GroupSubscription.objects.create(
user=self.user, group=self.group1, project=self.group1.project, is_active=True
)
GroupSubscription.objects.create(
user=self.user, group=self.group2, project=self.group2.project, is_active=False
)
self.environments = {
"production": self.event1.get_environment(),
"staging": self.event2.get_environment(),
}
def store_event(self, data, *args, **kwargs):
event = super(SnubaSearchTest, self).store_event(data, *args, **kwargs)
environment_name = data.get("environment")
if environment_name:
GroupEnvironment.objects.filter(
group_id=event.group_id,
environment__name=environment_name,
first_seen__gt=event.datetime,
).update(first_seen=event.datetime)
return event
def set_up_multi_project(self):
self.project2 = self.create_project(organization=self.project.organization)
self.event_p2 = self.store_event(
data={
"event_id": "a" * 32,
"fingerprint": ["put-me-in-groupP2"],
"timestamp": iso_format(self.base_datetime - timedelta(days=21)),
"message": "foo",
"stacktrace": {"frames": [{"module": "group_p2"}]},
"tags": {"server": "example.com"},
"environment": "production",
},
project_id=self.project2.id,
)
self.group_p2 = Group.objects.get(id=self.event_p2.group.id)
self.group_p2.times_seen = 6
self.group_p2.last_seen = self.base_datetime - timedelta(days=1)
self.group_p2.save()
def build_search_filter(self, query, projects=None, user=None, environments=None):
user = user if user is not None else self.user
projects = projects if projects is not None else [self.project]
return convert_query_values(parse_search_query(query), projects, user, environments)
def make_query(
self,
projects=None,
search_filter_query=None,
environments=None,
sort_by="date",
limit=None,
count_hits=False,
date_from=None,
date_to=None,
):
search_filters = []
projects = projects if projects is not None else [self.project]
if search_filter_query is not None:
search_filters = self.build_search_filter(
search_filter_query, projects, environments=environments
)
kwargs = {}
if limit is not None:
kwargs["limit"] = limit
return self.backend.query(
projects,
search_filters=search_filters,
environments=environments,
count_hits=count_hits,
sort_by=sort_by,
date_from=date_from,
date_to=date_to,
**kwargs
)
def test_query(self):
results = self.make_query(search_filter_query="foo")
assert set(results) == set([self.group1])
results = self.make_query(search_filter_query="bar")
assert set(results) == set([self.group2])
def test_query_multi_project(self):
self.set_up_multi_project()
results = self.make_query([self.project, self.project2], search_filter_query="foo")
assert set(results) == set([self.group1, self.group_p2])
def test_query_with_environment(self):
results = self.make_query(
environments=[self.environments["production"]], search_filter_query="foo"
)
assert set(results) == set([self.group1])
results = self.make_query(
environments=[self.environments["production"]], search_filter_query="bar"
)
assert set(results) == set([])
results = self.make_query(
environments=[self.environments["staging"]], search_filter_query="bar"
)
assert set(results) == set([self.group2])
def test_multi_environments(self):
self.set_up_multi_project()
results = self.make_query(
[self.project, self.project2],
environments=[self.environments["production"], self.environments["staging"]],
)
assert set(results) == set([self.group1, self.group2, self.group_p2])
def test_query_with_environment_multi_project(self):
self.set_up_multi_project()
results = self.make_query(
[self.project, self.project2],
environments=[self.environments["production"]],
search_filter_query="foo",
)
|
kipe/enocean | enocean/communicators/serialcommunicator.py | Python | mit | 1,554 | 0.001287 | # -*- encoding: utf-8 -*-
from __future__ import print_function, unicode_literals, division, absolute_import
import logging
import serial
import time
from enocean.communicators.communicator import Communicator
class SerialCommunicator(Communicator):
''' Serial port communicator class for EnOcean radio '''
logger = logging.getLogger('enocean.communicators.SerialCommunicator')
def __init__(self, port='/dev/ttyAMA0', callback=None):
super(SerialCommunicator, self).__init__(callback)
# Initialize serial port
self.__ser = serial.Serial(port, 57600, timeout=0.1)
def run(self):
self.logger.info('SerialCommunicator started')
while not self._stop_flag.is_set():
# If there's messages in transmit queue
# send them
while True:
| packet = self._get_from_send_queue()
if not packet:
break
try:
self.__ser.write(bytearray(packet.build()))
except serial.SerialException:
self.stop( | )
# Read chars from serial port as hex numbers
try:
self._buffer.extend(bytearray(self.__ser.read(16)))
except serial.SerialException:
self.logger.error('Serial port exception! (device disconnected or multiple access on port?)')
self.stop()
self.parse()
time.sleep(0)
self.__ser.close()
self.logger.info('SerialCommunicator stopped')
|
rebost/django | django/conf/locale/pl/formats.py | Python | bsd-3-clause | 1,327 | 0.001508 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j E Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Pytho | n strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%Y-%m-%d', '%y-%m-%d', # '2006-10-25', '06-10-25'
# '%d. | %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
|
akvo/akvo-rsr | akvo/thumbnail_backend.py | Python | agpl-3.0 | 1,113 | 0.003594 | # -*- coding: utf-8 -*-
# Akvo RSR is | covered by the GNU Affero General Public License.
# See more details in the license.txt file located a | t the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
from sorl.thumbnail.base import ThumbnailBackend, EXTENSIONS
from sorl.thumbnail.conf import settings
from sorl.thumbnail.helpers import tokey, serialize
class CustomThumbnailBackend(ThumbnailBackend):
def _get_thumbnail_filename(self, source, geometry_string, options):
"""Computes the destination filename.
Overridden to generate the same filename as generated with
'django.core.files.storage.FileSystemStorage' backend, irrespective of
what the current storage back-end is.
"""
source_key = tokey(source.name, 'django.core.files.storage.FileSystemStorage')
key = tokey(source_key, geometry_string, serialize(options))
path = '%s/%s/%s' % (key[:2], key[2:4], key)
return '%s%s.%s' % (settings.THUMBNAIL_PREFIX, path, EXTENSIONS[options['format']])
|
ingadhoc/odoo-legal | legal_portal/__init__.py | Python | agpl-3.0 | 274 | 0 | ################## | ############################################################
# For copyright and license notices, see __ma | nifest__.py file in module root
# directory
##############################################################################
from . import controllers
|
RENCI/xDCIShare | hs_core/tests/api/rest/test_resource_flags.py | Python | bsd-3-clause | 3,544 | 0 | import os
import tempfile
from rest_framework import status
from hs_core.hydroshare import resource
from .base import HSRESTTestCase
class TestPublicResourceFlagsEndpoint(HSRESTTestCase):
def setUp(self):
super(TestPublicResourceFlagsEndpoint, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.rtype = 'GenericResource'
self.title = 'My Test resource'
res = resource.create_resource(self.rtype,
self.user,
self.title)
metadata_dict = [
{'description': {'abstract': 'My test abstract'}},
{'subject': {'value': 'sub-1'}}
]
file_one = "test1.txt"
open(file_one, "w").close()
self.file_one = open(file_one, "r")
self.txt_file_path = os.path.join(self.tmp_dir, 'text.txt')
txt = open(self.txt_file_path, 'w')
txt.write("Hello World\n")
txt.close()
self.rtype = 'GenericResource'
self.title = 'My Test resource'
res_two = resource.create_resource(self.rtype,
self.user,
self.title,
files=(self.file_one,),
metadata=metadata_dict)
self.pid = res.short_id
self.pid_two = res_two.short_id
self.resources_to_delete.append(self.pid)
self.resources_to_delete.append(self.pid_two)
def test_set_resource_flag_make_public(self):
flag_url = "/hsapi/resource/%s/flag/" % self.pid
response = self.client.post(flag_url, {
"t": "make_public"
}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
flag_url = "/hsapi/resource/%s/flag/" % self.pid_two
response = self.client.post(flag_url, {
"t": "make_public"
}, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_set_resource_flag_make_private(self):
flag_url = "/hsapi/resource/%s/flag/" % self.pid
response = self.client.post(flag_url, {
"t": "make_private"
}, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_set_resource_flag_make_discoverable(self):
flag_url = "/hsapi/resource/%s/flag/" % self.pid_two
response = self.client.post(flag_url, {
"t": "make_discoverable"
}, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_set_resource_flag_make_not_discoverable(self):
flag_url = "/hsapi/resource/%s/flag/" % self.pid
response = self.client.post(flag_url, {
"t": "make_not_discoverable"
}, format= | 'json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_set_resource_flag_make_not_shareable( | self):
flag_url = "/hsapi/resource/%s/flag/" % self.pid
response = self.client.post(flag_url, {
"t": "make_not_shareable"
}, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_set_resource_flag_make_shareable(self):
flag_url = "/hsapi/resource/%s/flag/" % self.pid
response = self.client.post(flag_url, {
"t": "make_shareable"
}, format='json')
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
|
brion/gimp | plug-ins/pygimp/plug-ins/palette-sort.py | Python | gpl-3.0 | 13,495 | 0.003409 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gimpfu import *
# little known, colorsys is part of Python's stdlib
from colorsys import rgb_to_yiq
from textwrap import dedent
from random import randint
gettext.install("gimp20-python", gimp.locale_directory, unicode=True)
AVAILABLE_CHANNELS = (_("Red"), _("Green"), _("Blue"),
_("Luma (Y)"),
_("Hue"), _("Saturation"), _("Value"),
_("Saturation (HSL)"), _("Lightness (HSL)"),
_("Index"),
_("Random"))
GRAIN_SCALE = (1.0, 1.0 , 1.0,
1.0,
360., 100., 100.,
100., 100.,
16384.,
float(0x7ffffff),
100., 256., 256.,
256., 360.,)
SELECT_ALL = 0
SELECT_SLICE = 1
SELECT_AUTOSLICE = 2
SELECT_PARTITIONED = 3
SELECTIONS = (SELECT_ALL, SELECT_SLICE, SELECT_AUTOSLICE, SELECT_PARTITIONED)
def noop(v, i):
return v
def to_hsv(v, i):
return v.to_hsv()
def to_hsl(v, i):
return v.to_hsl()
def to_yiq(v, i):
return rgb_to_yiq(*v[:-1])
def to_index(v, i):
return (i,)
def to_random(v, i):
return (randint(0, 0x7fffffff),)
channel_getters = [ (noop, 0), (noop, 1), (noop, 2),
(to_yiq, 0),
(to_hsv, 0), (to_hsv, 1), (to_hsv, 2),
(to_hsl, 1), (to_hsl, 2),
(to_index, 0),
(to_random, 0)]
try:
from colormath.color_objects import RGBColor, LabColor, LCHabColor
AVAILABLE_CHANNELS = AVAILABLE_CHANNELS + (_("Lightness (LAB)"),
_("A-color"), _("B-color"),
_("Chroma (LCHab)"),
_("Hue (LCHab)"))
to_lab = lambda v,i: RGBColor(*v[:-1]).convert_to('LAB').get_value_tuple()
to_lchab = (lambda v,i:
RGBColor(*v[:-1]).convert_to('LCHab').get_value_tuple())
channel_getters.extend([(to_lab, 0), (to_lab, 1), (to_lab, 2),
(to_lchab, 1), (to_lchab, 2)])
except ImportError:
pass
def parse_slice(s, numcolors):
"""Parse a slice spec and return (start, nrows, length)
All items are optional. Omitting them makes the largest possible selection that
exactly fits the other items.
start:nrows,length
'' selects all items, as does ':'
':4,' makes a 4-row selection out of all colors (length auto-determined)
':4' also.
':1,4' selects the first 4 colors
':,4' selects rows of 4 colors (nrows auto-determined)
':4,4' selects 4 rows of 4 colors
'4:' selects a single row of all colors after 4, inclusive.
'4:,4' selects rows of 4 colors, starting at 4 (nrows auto-determined)
'4:4,4' selects 4 rows of 4 colors (16 colors total), beginning at index 4.
'4' is illegal (ambiguous)
In general, slices are comparable to a numpy sub-array.
'start at element START, with shape (NROWS, LENGTH)'
"""
s = s.strip()
def notunderstood():
raise ValueError('Slice %r not understood. Should be in format'
' START?:NROWS?,ROWLENGTH? eg. "0:4,16".' % s)
def _int(v):
try:
return int(v)
except ValueError:
notunderstood()
if s in ('', ':', ':,'):
return 0, 1, numcolors # entire palette, one row
if s.count(':') != 1:
notunderstood()
rowpos = s.find(':')
start = 0
if rowpos > 0:
start = _int(s[:rowpos])
numcolors -= start
nrows = 1
if ',' in s:
commapos = s.find(',')
nrows = s[rowpos+1:commapos]
length = s[commapos+1:]
if not nrows:
if not length:
notunderstood()
else:
length = _int(length)
if length == 0:
notunderstood()
nrows = numcolors // length
if numcolors % length:
nrows = -nrows
elif not length:
nrows = _int(nrows)
if nrows == 0:
notunderstood()
length = numcolors // nrows
if numcolors % nrows:
length = -length
else:
nrows = _int(nrows)
if nrows == 0:
notunderstood()
length = _int(length)
if length == 0:
notunderstood()
else:
nrows = _int(s[rowpos+1:])
if nrows == 0:
notunderstood()
length = numcolors // nrows
if numcolors % nrows:
length = -length
return start, nrows, length
def quantization_grain(channel, g):
"Given a channel and a quantization, return the size of a quantization grain"
g = max(1.0, g)
if g <= 1.0:
g = 0.00001
else:
g = max(0.00001, GRAIN_SCALE[channel] / g)
return g
def palette_sort(palette, selection, slice_expr, channel1, ascending1,
channel2, ascending2, quantize, pchannel, pquantize):
grain1 = quantization_grain(channel1, quantize)
grain2 = quantization_grain(channel2, quantize)
pgrain = quantization_grain(pchannel, pquantize)
#If palette is read only, work on a copy:
editable = pdb.gimp_palette_is_editable(palette)
if not editable:
palette = pdb.gimp_palette_duplicate (palette)
num_colors = pdb.gimp_palette_get_info (palette)
start, nrows, length = None, None, None
if selection == SELECT_AUTOS | LICE:
def find_index(color, startindex=0):
for i in range(startindex, num_colors):
c = pdb.gimp_palette_entry_get_color (palette, i)
if c == color:
return i
return None
def hexcolor( | c):
return "#%02x%02x%02x" % tuple(c[:-1])
fg = pdb.gimp_context_get_foreground()
bg = pdb.gimp_context_get_background()
start = find_index(fg)
end = find_index(bg)
if start is None:
raise ValueError("Couldn't find foreground color %r in palette" % list(fg))
if end is None:
raise ValueError("Couldn't find background color %r in palette" % list(bg))
if find_index(fg, start + 1):
raise ValueError('Autoslice cannot be used when more than one'
' instance of an endpoint'
' (%s) is present' % hexcolor(fg))
if find_index(bg, end + 1):
raise ValueError('Autoslice cannot be used when more than one'
' instance of an endpoint'
' (%s) is present' % hexcolor(bg))
if start > end:
end, start = start, end
length = (end - start) + 1
try:
_, nrows, _ = parse_slice(slice_expr, length)
nrows = abs(nrows)
if length % nrows:
raise ValueError('Total length %d not evenly divisible'
' by number of rows %d' % (length, nrows))
length /= nrows
except ValueError:
# bad expression is okay here, just assume one row
nrows = 1
# remaining behaviour is implemented by SELECT_SLICE 'inheritance'.
selection= SELECT_SLICE
elif selection in (SELECT_SLICE, SELECT_PARTITIONED):
start, nrows, length = parse_slice(slice_expr, num_colors)
channels_getter_1, channel_index = channel_getters[channel1]
|
Yeasayer/YoutubeVideoPage | ytdownload/apps.py | Python | mit | 134 | 0 | from __future__ impo | rt unicode_literals
from django.apps import AppConfig
class DownloadzConfig(AppConfig):
name = 'do | wnloadz'
|
dnephin/compose | compose/const.py | Python | apache-2.0 | 2,457 | 0.000407 | from __future__ import absolute_import
from __future__ import unicode_literals
import sys
from .version import ComposeVersion
DEFAULT_TIMEOUT = 10
HTTP_TIMEOUT = 60
IMAGE_EVENTS = ['delete', 'import', 'load', 'pull', 'push', 'save', 'tag', 'untag']
IS_WINDOWS_PLATFORM = (sys.platform == "win32")
LABEL_CONTAINER_NUMBER = 'com.docker.compose.container-number'
LABEL_ONE_OFF = 'com.docker.compose.oneoff'
LABEL_PROJECT = 'com.docker.compose.project'
LABEL_SERVICE = 'com.docker.compose.service'
LABEL_NETWORK = 'com.docker.compose.network'
LABEL_VERSION = 'com.docker.compose.version'
LABEL_VOLUME = 'com.docker.compose.volume'
LABEL_CONFIG_HASH = 'com.docker.compose.config-hash'
NANOCPUS_SCALE = 1000000000
PARALLEL_LIMIT = 64
SECRETS_PATH = '/run/secrets'
COMPOSEFILE_V1 = ComposeVersion('1')
COMPOSEFILE_V2_0 = ComposeVersion('2.0')
COMPOSEFILE_V2_1 = ComposeVersion('2.1')
COMPOSEFILE_V2_2 = ComposeVersion('2.2')
COMPOSEFILE_V2_3 = ComposeVersion('2.3')
COMPOSEFILE_V2_4 = ComposeVersion('2.4')
COMPOSEFILE_V3_0 = ComposeVersion('3.0')
COMPOSEFILE_V3_1 = ComposeVersion('3.1')
COMPOSEFILE_V3_2 = ComposeVersion('3.2')
COMPOSEFILE_V3_3 = ComposeVersion('3.3')
COMPOSEFILE_V3_4 = ComposeVersion('3.4')
COMPOSEFILE_V3_5 = ComposeVersion('3.5')
COMPOSEFILE_V3_6 = ComposeVersion(' | 3.6')
COMPOSEFILE_V3_7 = ComposeVersion('3.7')
API_VERSIONS = {
COMPOSEFILE_V1: '1.21',
COMPOSEFILE_V2_0: '1.22',
COMPOSEFILE_V2_1: '1.24',
COMPOSEFILE_V2_2: '1.25',
COMPOSEFILE_V2_3: '1.30',
COMPOSEFILE_V2_4: '1.35',
COMPOSEFILE_V3_0: '1.25',
COMPOSEFILE_V3_1: '1.25',
COMPOSEFILE_V3_2: '1.25',
COMPOSEFILE_V3 | _3: '1.30',
COMPOSEFILE_V3_4: '1.30',
COMPOSEFILE_V3_5: '1.30',
COMPOSEFILE_V3_6: '1.36',
COMPOSEFILE_V3_7: '1.38',
}
API_VERSION_TO_ENGINE_VERSION = {
API_VERSIONS[COMPOSEFILE_V1]: '1.9.0',
API_VERSIONS[COMPOSEFILE_V2_0]: '1.10.0',
API_VERSIONS[COMPOSEFILE_V2_1]: '1.12.0',
API_VERSIONS[COMPOSEFILE_V2_2]: '1.13.0',
API_VERSIONS[COMPOSEFILE_V2_3]: '17.06.0',
API_VERSIONS[COMPOSEFILE_V2_4]: '17.12.0',
API_VERSIONS[COMPOSEFILE_V3_0]: '1.13.0',
API_VERSIONS[COMPOSEFILE_V3_1]: '1.13.0',
API_VERSIONS[COMPOSEFILE_V3_2]: '1.13.0',
API_VERSIONS[COMPOSEFILE_V3_3]: '17.06.0',
API_VERSIONS[COMPOSEFILE_V3_4]: '17.06.0',
API_VERSIONS[COMPOSEFILE_V3_5]: '17.06.0',
API_VERSIONS[COMPOSEFILE_V3_6]: '18.02.0',
API_VERSIONS[COMPOSEFILE_V3_7]: '18.06.0',
}
|
PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/mlu/c_comm_init_op_mlu.py | Python | apache-2.0 | 2,475 | 0.000404 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import os
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.distributed.fleet.base.private_helper_function import wait_server_ready
import paddle
paddle.enable_static()
class TestCCommInitOp(unittest.TestCase):
def setUp(self):
self.endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS").split(',')
self.current_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT")
self.nranks = len(self.endpoints)
self.rank = self.endpoints.index(self.current_endpoint)
self.mlu_id = int(os.getenv("FLAGS_selected_mlus"))
self.place = fluid.MLUPlace(self.mlu_id)
self.exe = fluid.Executor(self.place)
self.endpoints.remove(self.current_endpoint)
self.other_endpoints = self.endpoints
if self.rank == 0:
wait_server_ready(self.other_endpoints)
def test_specifying_devices(self):
program = fluid.Program()
block = program.global_block()
cncl_id_var = block.create_var(
name=fluid.unique_name.generate('cncl_id'),
persistable | =True,
type=fluid.core.VarDesc.VarType.RAW)
block.append_op(
type='c_gen_cncl_id',
inputs={},
outputs={'Out': cncl_id_var},
attrs={
'rank': | self.rank,
'endpoint': self.current_endpoint,
'other_endpoints': self.other_endpoints
})
block.append_op(
type='c_comm_init',
inputs={'X': cncl_id_var},
outputs={},
attrs={
'nranks': self.nranks,
'rank': self.rank,
'ring_id': 0,
'device_id': self.mlu_id
})
self.exe.run(program)
if __name__ == "__main__":
unittest.main()
|
tensorflow/datasets | tensorflow_datasets/image/pass_dataset/__init__.py | Python | apache-2.0 | 702 | 0 | # coding | =utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WAR | RANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""pass_dataset."""
from tensorflow_datasets.image.pass_dataset.pass_dataset import PASS
|
op/gpsupd | gpsupd/gpsd.py | Python | bsd-3-clause | 1,207 | 0.000829 | # -*- coding: utf-8 -*-
# Copyright (c) 2010 Örjan Persson
import gps
import time
import gpsupd | .connector as connector
class GpsdConnector(connector.GpsConnector):
def __init__(self, address, verbose=None):
if verbose is None:
verbose = False
self.__verbose = verbose
self.__address = address
def connect(self):
conn = gps.gps(self.__address, verbose=self.__verbose)
conn.stream(gps.WATCH_ENABLE|gps.WATCH_NEWSTYLE)
return conn
def get_positions(self):
conn = self.connect()
try:
while True:
| if conn.waiting():
conn.poll()
else:
time.sleep(0.1)
continue
longitude = conn.fix.longitude
latitude = conn.fix.latitude
altitude = None
speed = None
if not gps.isnan(conn.fix.altitude):
altitude = conn.fix.altitude
if not gps.isnan(conn.fix.speed):
speed = conn.fix.speed
yield (longitude, latitude, altitude, speed)
finally:
conn.close()
|
honeybadger-io/honeybadger-python | honeybadger/tests/test_middleware.py | Python | mit | 674 | 0 | import warnings
from collections import OrderedDict
from honeybadger.plugins import default_plugin_manager
from .contrib.test_django import Django | MiddlewareTestCase
from honeybadger.middleware import DjangoHoneybadgerMiddleware
__all__ = ['MiddlewareTestCase']
class MiddlewareTestCase(DjangoMiddlewareTestCase):
def test_middleware_import_warning(self):
default_plugin_manager._registered = OrderedDict()
with warnings.catch_warnings(record=True) as w:
middleware = DjangoHoneybadgerMiddleware()
assert len(w) == 1
assert issubclass(w[-1]. | category, FutureWarning)
assert "moved" in str(w[-1].message)
|
bzamecnik/sms-tools | smst/utils/math.py | Python | agpl-3.0 | 692 | 0.00578 | import numpy as np
def is_power_of_two(num):
"""
Checks if num is power of two
"""
return ((num & (num - 1)) == 0) and num > 0
def rmse(x, y):
"""
Root mean square error.
: | param x: numpy array
:param y: numpy array
:return: RMSE(x,y)
"""
return np.sqrt(((x - y) ** 2).mean())
def to_db_magnitudes(amplitudes):
abs_amplitudes = abs(amplitudes)
# ensure non-zero values for logarithm
eps = np.finfo(float).eps
abs_amplitudes[abs_amplitudes < eps] = eps
# magnitude spectrum of positive frequencies in dB
return 20 * np.log10(abs_amplitudes)
def from_db_magnitudes(magn | itudes_db):
return 10 ** (magnitudes_db * 0.05)
|
youtube/cobalt | build/android/incremental_install/write_installer_json.py | Python | bsd-3-clause | 2,202 | 0.00772 | #!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Writes a .json file with the per-apk details for an incremental install."""
import argparse
import json
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'gyp'))
from util import build_utils
def _ParseArgs(args):
args = build_utils.ExpandFileArgs(args)
parser = argparse.ArgumentParser()
parser.add_argument('--output-path',
help='Output path for .json file.',
required=True)
parser.add_argument('--apk-path',
help='Path to .apk relative to output directory.',
required=True)
parser.add_argument('--split',
action='append',
dest='split_globs',
| default=[],
help='A glob matching the apk splits. '
'Can be specified multiple times.')
parser.add_argument(
'--native-libs',
action='append',
help='GN-list of paths to native libraries relative to '
'output directory. Can be repeated.')
parser.add_argument(
'--dex-files', help='GN-list of dex paths relative to output directory.')
parser.add_argument('--show-proguard-warning',
| action='store_true',
default=False,
help='Print a warning about proguard being disabled')
options = parser.parse_args(args)
options.dex_files = build_utils.ParseGnList(options.dex_files)
options.native_libs = build_utils.ParseGnList(options.native_libs)
return options
def main(args):
options = _ParseArgs(args)
data = {
'apk_path': options.apk_path,
'native_libs': options.native_libs,
'dex_files': options.dex_files,
'show_proguard_warning': options.show_proguard_warning,
'split_globs': options.split_globs,
}
with build_utils.AtomicOutput(options.output_path, mode='w+') as f:
json.dump(data, f, indent=2, sort_keys=True)
if __name__ == '__main__':
main(sys.argv[1:])
|
yw374cornell/e-mission-server | emission/core/wrapper/smoothresults.py | Python | bsd-3-clause | 660 | 0.024242 | import logging
import datetime as pydt
import emission.core.wrapper.wrapperbase as ecwb
class Smoothresults(ecwb.WrapperBase):
props = {"section": ecwb.WrapperBase.Access.WORM, # the section for which these points should be deleted
"deleted_points": ecwb.WrapperBase.Access.WORM, # list of IDs of deleted points
"ou | tlier_algo": ecwb.WrapperBase.Access.WORM, # the name of the algorithm used to generate outliers
"filtering_algo": ecwb.WrapperBase.Access.WORM} # the name of the algorithm used | to filter the points
enums = {}
geojson = []
nullable = []
local_dates = []
def _populateDependencies(self):
pass
|
auntieNeo/asterisk-testsuite | tests/manager/presence_state_changed/ami_presence_state.py | Python | gpl-2.0 | 2,320 | 0 | #!/usr/bin/env python
'''
Copyright (C) 2014, Digium, Inc.
Mark Michelson <mmichelson@digium.com>
This program is free software, distributed under the terms of
the GNU General Public License Version 2.
'''
import logging
from test | _case import TestCase
LOGGER = logging.getLogger(__name__)
STATES = [
{'status': 'unavailable', 'subtype': 'scrambled', 'message': 'breakfast'},
| {'status': 'available', 'subtype': 'fried', 'message': 'brunch'},
{'status': 'away', 'subtype': 'poached', 'message': 'lunch'},
{'status': 'xa', 'subtype': 'meringue', 'message': 'snack'},
{'status': 'chat', 'subtype': 'custard', 'message': 'dinner'},
{'status': 'dnd', 'subtype': 'souffle', 'message': 'dessert'},
]
class AMIPresenceState(TestCase):
def __init__(self, path=None, config=None):
super(AMIPresenceState, self).__init__(path, config)
self.create_asterisk()
self.state_pos = 0
def run(self):
super(AMIPresenceState, self).run()
self.create_ami_factory()
def check_parameter(self, event, parameter):
actual = event.get(parameter)
expected = STATES[self.state_pos][parameter]
if actual != expected:
LOGGER.error("Unexpected {0} received. Expected {1} but got \
{2}".format(parameter, expected, actual))
self.set_passed(False)
self.stop_reactor()
def presence_state_event(self, ami, event):
if event.get('presentity') != 'CustomPresence:Eggs':
return
self.check_parameter(event, 'status')
self.check_parameter(event, 'subtype')
self.check_parameter(event, 'message')
self.state_pos += 1
if self.state_pos >= len(STATES):
self.set_passed(True)
self.stop_reactor()
def ami_connect(self, ami):
ami.registerEvent('PresenceStateChange', self.presence_state_event)
for state in STATES:
status = state['status']
subtype = state['subtype']
message = state['message']
ami_message = {
'Action': 'SetVar',
'Variable': 'PRESENCE_STATE(CustomPresence:Eggs)',
'Value': "{0},{1},{2}".format(status, subtype, message)
}
ami.sendMessage(ami_message)
|
delcypher/klee-runner | kleeanalysis/rank.py | Python | mit | 43,340 | 0.003899 | # vim: set sw=4 ts=4 softtabstop=4 expandtab:
import copy
import logging
import os
import math
import pprint
import statistics
from collections import namedtuple
from . import kleedir
from .kleedir import test
from .kleedir import KleeDir, KleeDirProxy
from . import analyse
from enum import Enum
_logger = logging.getLogger(__name__)
RankReason = namedtuple('RankReason', ['rank_reason_type', 'msg'])
BoundType = namedtuple('BoundType', ['lower_bound', 'upper_bound'])
class RankReasonTy(Enum):
HAS_N_FALSE_POSITIVES = (0, "Has {n} false positives")
HAS_N_TRUE_POSITIVES = (1, "Has {n} true positives")
HAS_N_PERCENT_BRANCH_COVERAGE = (2, "Has {n:%} branch coverage")
HAS_N_CRASHES= (3, "Has {n} crashes")
HAS_T_SECOND_EXECUTION_TIME = (4, "Has {t} second execution time")
TIED = (5, "Results are tied")
# FIXME: These should be removed
MISSING_COVERAGE_DATA= (6, "Cannot be ranked. Requires coverage data")
def __init__(self, id, template_msg):
self.id = id
self.template_msg = template_msg
def mk_rank_reason(self, *nargs, **kwargs):
"""
Make a RankReason from the RankReasonTy.
The (optional) arguments are used to take
the `RankReasonTy.template_msg` and do
any substitutions.
"""
obj = RankReason(self, self.template_msg.format(*nargs, **kwargs))
return obj
def __lt__(self, other):
return self.id < other.id
class RankPosition:
def __init__(self, indices, rank_reason):
assert isinstance(indices, list)
assert len(indices) > 0
assert isinstance(rank_reason, RankReason)
self.indices = indices
for i in self.indices:
assert isinstance(i, int)
assert i >= 0
self.rank_reason = rank_reason
def __str__(self):
msg = None
if len(self.indices) == 1:
msg = "index {} ranked because \"{}\"".format(
self.indices[0],
self.rank_reason)
else:
msg = "indices {} ranked same because \"{}\"".format(
self.indices,
self.rank_reason)
msg = "<RankPosition: {}>".format(msg)
return msg
################################################################################
# Bounding and "average" functions
################################################################################
def get_median_and_range(values):
assert isinstance(values, list)
lower_bound = min(values)
upper_bound = max(values)
median = statistics.median(values)
return (lower_bound, median, upper_bound)
def get_arithmetic_mean_and_confidence_intervals(values, confidence_interval_factor):
assert isinstance(values, list)
assert confidence_interval_factor > 0
n = len(values)
assert n > 1
mean = statistics.mean(values)
variance_of_sample = statistics.variance(values)
standard_error_in_mean_squared = variance_of_sample / n
standard_error_in_mean = math.sqrt(standard_error_in_mean_squared)
lower_bound = mean - (standard_error_in_mean * confidence_interval_factor)
upper_bound = mean + (standard_error_in_mean * confidence_interval_factor)
return (lower_bound, mean , upper_bound)
def get_arithmetic_mean_and_95_confidence_intervals(values):
# 95 % confidence
return get_arithmetic_mean_and_confidence_intervals(values, 1.96)
def get_arithmetic_mean_and_99_confidence_intervals(values):
# 99.9 % confidence
return get_arithmetic_mean_and_confidence_intervals(values, 3.27)
__hack_stdev = 0.0
################################################################################
# Ranking
################################################################################
def rank(result_infos, bug_replay_infos=None, coverage_replay_infos=None, coverage_range_fn=get_arithmetic_mean_and_95_confidence_intervals, timing_range_fn=get_arithmetic_mean_and_99_confidence_intervals, max_exec_time=None, min_exec_time_diff=None):
"""
Given a list of `result_infos` compute a ranking. Optionally using
`bug_replay_infos` and `coverage_replay_infos`.
`coverage_range_fn` is the function that should return a tuple (lower_bound, middle_value, upper_bound)
when applied to a list of coverage values.
`timing_range_fn` is the function that should return a tuple (lower_bound, middle_value, upper_bound)
when applied to a list of execution time values.
`max_exec_time` is the maximum execution time. If specified and all results infos that have execution time >= to this then they are considered incomparable.
`min_exec_time_diff` is the minimum execution time difference between single value (or mean if have multiple values)
Returns `rank_reason_list`.
where
`rank_reason_list` is a list of `RankPosition`s. `RankPosition`s earlier
in the list are ranked higher (better). `RankPosition` contains `results`
which is a list of indicies (corresponding to `result_infos`) which are
considered to be ranked the same.
"""
assert isinstance(result_infos, list)
global __hack_stdev
# FIXME: We should stop using raw result infos
for ri in result_infos:
assert isinstance(ri, dict)
assert 'invocation_info' in ri
assert len(result_inf | os) > 1
if coverage_replay_infos:
assert isinstance(coverage_replay_infos, list)
assert len(result_infos) == len(coverage_replay_infos)
if bug_replay_infos:
assert isinstance(bug_replay_infos | , list)
assert len(result_infos) == len(bug_replay_infos)
reversed_rank = []
index_to_klee_dir_map = []
llvm_bc_program_path = None
llvm_bc_program_path_try = None
native_program_name = None
index_to_is_merged_map = []
index_to_number_of_repeat_runs_map = []
for index, r in enumerate(result_infos):
klee_dir_paths = r['klee_dir']
if isinstance(klee_dir_paths, str):
# Single result
klee_dir = KleeDir(r['klee_dir'])
index_to_is_merged_map.append(False)
elif isinstance(klee_dir_paths, list):
# merged result
klee_dir = KleeDirProxy(klee_dir_paths)
index_to_is_merged_map.append(True)
index_to_number_of_repeat_runs_map.append(len(klee_dir_paths))
else:
raise Exception('Invalid klee_dir value')
index_to_klee_dir_map.append(klee_dir)
# Get the program path
llvm_bc_program_path_try = result_infos[index]['invocation_info']['program']
# Sanity check
if llvm_bc_program_path is None:
llvm_bc_program_path = llvm_bc_program_path_try
else:
if llvm_bc_program_path_try != llvm_bc_program_path:
raise Exception('Program paths "{}" and "{}" do not match'.format(
llvm_bc_program_path,
llvm_bc_program_path_try))
# Sanity check: Make sure results are all single or are all merged
assert len(index_to_is_merged_map) == len(result_infos)
all_results_are_merged = all(index_to_is_merged_map)
all_results_are_single = all(map(lambda x: x is False, index_to_is_merged_map))
if (not all_results_are_merged) and (not all_results_are_single):
raise Exception("Can't mix merged and single results when ranking")
# Compute native_program_name
# FIXME: this a fp-bench specific hack
assert llvm_bc_program_path.endswith('.bc')
native_program_name = os.path.basename(llvm_bc_program_path)
native_program_name = native_program_name[:-3]
# Get KLEE verification results
index_to_klee_verification_results = []
for klee_dir in index_to_klee_dir_map:
kvr = analyse.get_klee_verification_results_for_fp_bench(
klee_dir,
allow_invalid_klee_dir=True)
index_to_klee_verification_results.append(kvr)
# Match against spec to find true positives and false positives.
# Load spec
augmented_spec_path = result_infos[0]['invocation_info']['misc']['augmented_spec_file']
# Sanity check. All re |
keeperofdakeys/ircbots | regexbot.py | Python | agpl-3.0 | 15,901 | 0.005849 | #!/usr/bin/env python
"""
regexbot: IRC-based regular expression evaluation tool.
Copyright 2010 - 2012 Michael Farrell <http://micolous.id.au>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import regex, asyncore, threading, inspect, ctypes, time
from datetime import datetime, timedelta
from configparser import RawConfigParser
from sys import argv, exit
from ircasync import *
from subprocess import Popen, PIPE
from copy import copy
from string import maketrans, translate
from Queue import PriorityQueue
DEFAULT_CONFIG = {
'regexbot': {
'server': 'localhost',
'port': DEFAULT_PORT,
'ipv6': 'no',
'nick': 'regexbot',
'channels': '#test',
'channel_flood_cooldown': 5,
'global_flood_cooldown': 1,
'max_messages': 25,
'max_message_size': 200,
'version': 'regexbot; https://github.com/micolous/ircbots/',
'translate_enabled': "True",
'reconnect_to_server': "False"
}
}
config = RawConfigParser()
config.read_dict(DEFAULT_CONFIG)
try:
config.readfp(open(argv[1]))
except:
try:
config.readfp(open('regexbot.ini'))
except Exception:
print "Syntax:"
print " %s [config]" % argv[0]
print ""
print "If no configuration file is specified or there was an error, it will default to `regexbot.ini'."
print "If there was a failure reading the configuration, it will display this message."
exit(1)
# read config
SERVER = config.get('regexbot', 'server')
PORT = config.getint('regexbot', 'port')
IPV6 = config.getboolean('regexbot', 'ipv6')
NICK = str(config.get('regexbot', 'nick'))
CHANNELS = str(config.get('regexbot', 'channels')).split()
VERSION = str(config.get('regexbot', 'version')) + '; %s'
try: VERSION = VERSION % Popen(["git","branch","-v","--contains"], stdout=PIPE).communicate()[0].strip()
except: VERSION = VERSION % 'unknown'
del Popen, PIPE
TRANSLATE_ENABLED = config.getboolean('regexbot','translate_enabled')
RECONNECT_TO_SERVER = config.getboolean('regexbot', 'reconnect_to_server')
CHANNEL_FLOOD_COOLDOWN = timedelta(seconds=config.getint('regexbot', 'channel_flood_cooldown'))
GLOBAL_FLOOD_COOLDOWN = timedelta(seconds=config.getint('regexbot', 'global_flood_cooldown'))
MAX_MESSAGES = config.getint('regexbot', 'max_messages')
MAX_MESSAGE_SIZE = config.getint('regexbot', 'max_message_size')
try: NICKSERV_PASS = str(config.get('regexbot', 'nickserv_pass'))
except: NICKSERV_PASS = None
message_buffer = {}
last_message = datetime.now()
last_message_times = {}
flooders = {}
ignore_list = []
channel_list = []
user_timeouts = PriorityQueue()
channel_timeouts = PriorityQueue()
if config.has_section('ignore'):
for k,v in config.items('ignore'):
try:
ignore_list.append(regex.compile(str(v), regex.I))
except Exception, ex:
print "Error compiling regular expression in ignore list (%s):" % k
print " %s" % v
print ex
exit(1)
for channel in CHANNELS:
c = channel.lower()
message_buffer[c] = []
last_message_times[c] = last_message
channel_list.append(c)
# main code
def flood_control(channel, when):
"Implements flood controls. Returns True if the message should be handled, returns False if the floods are in."
global last_message, last_message_times
# get delta
channel_delta = when - last_message_times[channel]
global_delta = when - last_message
# update times
last_message = last_message_times[channel] = when
# think global
if global_delta < GLOBAL_FLOOD_COOLDOWN:
print "Global flood protection hit, %s of %s seconds were waited" % (global_delta.seconds, GLOBAL_FLOOD_COOLDOWN.seconds)
return False
# act local
if channel_delta < CHANNEL_FLOOD_COOLDOWN:
print "Local %s flood protection hit, %s of %s seconds were waited" % (channel, channel_delta.seconds, CHANNEL_FLOOD_COOLDOWN.seconds)
return False
# we're cool.
return True
def channel_timeout(channel, when):
while not channel_timeouts.empty() and channel_timeouts.queue[0][0] < datetime.now():
channel_timeouts.get()
timeout_arg = 3
found_item = False
for item in channel_timeouts.queue:
if channel != item[1]['channel']:
continue
found_item = True
timeout_arg = item[1]['timeout']
channel_timeouts.queue.remove(item)
timeout_arg = timeout_arg + 1
break
# make the maximum timeout ~30 minutes
if timeout_arg > 6:
timeout_arg = 6
timeout = when + timedelta(seconds=2**timeout_arg)
new_item = (timeout, {})
new_item[1]['channel'] = channel
new_item[1]['timeout'] = timeout_arg
channel_timeouts.put(new_item)
if found_item:
print "Ignoring message on %s because of a timeout, timeout now %d seconds" % (channel, 2**timeout_arg)
return True
else:
return False
def user_timeout(user, when):
while not user_timeouts.empty() and user_timeouts.queue[0][0] < datetime.now():
user_timeouts.get()
timeout_arg = 3
found_item = False
for item in user_timeouts.queue:
if user != item[1]['user']:
continue
found_item = True
timeout_arg = item[1]['timeout']
user_timeouts.queue.remove(item)
timeout_arg = timeout_arg + 1
break
# make the maximum timeout ~30 minutes
if timeout_arg > 12:
timeout_arg = 12
timeout = when + timedelta(seconds=2**timeout_arg)
new_item = (timeout, {})
new_item[1]['user'] = user
new_item[1]['timeout'] = timeout_arg
user_timeouts.put(new_item)
if found_item:
print "Ignoring message from %s because of a timeout, timeout now %d seconds" % (user, 2**timeout_arg)
return True
else:
return False
def handle_ctcp(event, match):
channel = event.channel.lower()
global message_buffer, MAX_MESSAGES, channel_list
if channel in channel_list:
if event.args[0] == "ACTION":
message_buffer[channel].append([event.nick, event.text[:MAX_MESSAGE_SIZE], True])
message_buffer[channel] = message_buffer[channel][-MAX_MESSAGES:]
return
def handle_msg(event, match):
global message_buffer, MAX_MESSAGES, last_message, last_message_times, flooders, channel_list
msg = event.text
channel = event.channel.lower()
if channel not in channel_list:
# ignore messages not from our channels
return
if msg.startswith(NICK):
lmsg = msg.lower()
if 'help' in lmsg or 'info' in lmsg or '?' in lmsg:
# now flood protect!
if not flood_control(channel, event.when):
return
# give information
event.reply('%s: I am regexbot, the interactive IRC regular expression tool, originally written by micolous. Source/docs/version: %s' % (event.nick, VERSION))
return
str_replace = False
str_translate = False
if msg.startswith('s'):
str_replace = True
if msg.startswith('y') and TRANSLATE_ENABLED:
str_translate = True
valid_separators = ['@','#','%',':',';','/','\xe1']
separator = '/'
if (str_replace or str_translate) and len(msg) > 1 and m | sg[1] in valid_separators:
separator = msg[1]
else:
str_replace = False
str_translate = False
if (str_replace or str_translate) and msg[1] == separator:
for item in ignore_list:
if item.se | arch(event.origin) != None:
# ignore list item hit
|
JohnDevitt/appengine-django-skeleton-master | mysite/urls.py | Python | bsd-3-clause | 1,353 | 0 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the spec | ific language governing permissions and
# limitations under the License.
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns | : url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
]
|
roadmapper/ansible | test/units/modules/source_control/gitlab/test_gitlab_runner.py | Python | gpl-3.0 | 3,198 | 0.002502 | # -*- coding: utf-8 -*-
# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import
import pytest
from ansible.modules.source_control.gitlab.gitlab_runner import GitLabRunner
def _dummy(x):
"""Dummy function. Only used as a placeholder for toplevel definitions when the test is going
to be skipped anyway"""
return x
pytestmark = []
try:
from .gitlab import (GitlabModuleTestCase,
python_version_match_requirement,
resp_find_runners_list, resp_get_runner,
resp_create_runner, resp_delete_runner)
# GitLab module requirements
if python_version_match | _requirement():
from gitlab.v4.objects import Runner
except ImportError:
pytestmark.append(pytest.mark. | skip("Could not load gitlab module required for testing"))
# Need to set these to something so that we don't fail when parsing
GitlabModuleTestCase = object
resp_find_runners_list = _dummy
resp_get_runner = _dummy
resp_create_runner = _dummy
resp_delete_runner = _dummy
# Unit tests requirements
try:
from httmock import with_httmock # noqa
except ImportError:
pytestmark.append(pytest.mark.skip("Could not load httmock module required for testing"))
with_httmock = _dummy
class TestGitlabRunner(GitlabModuleTestCase):
def setUp(self):
super(TestGitlabRunner, self).setUp()
self.moduleUtil = GitLabRunner(module=self.mock_module, gitlab_instance=self.gitlab_instance)
@with_httmock(resp_find_runners_list)
@with_httmock(resp_get_runner)
def test_runner_exist(self):
rvalue = self.moduleUtil.existsRunner("test-1-20150125")
self.assertEqual(rvalue, True)
rvalue = self.moduleUtil.existsRunner("test-3-00000000")
self.assertEqual(rvalue, False)
@with_httmock(resp_create_runner)
def test_create_runner(self):
runner = self.moduleUtil.createRunner({"token": "token", "description": "test-1-20150125"})
self.assertEqual(type(runner), Runner)
self.assertEqual(runner.description, "test-1-20150125")
@with_httmock(resp_find_runners_list)
@with_httmock(resp_get_runner)
def test_update_runner(self):
runner = self.moduleUtil.findRunner("test-1-20150125")
changed, newRunner = self.moduleUtil.updateRunner(runner, {"description": "Runner description"})
self.assertEqual(changed, True)
self.assertEqual(type(newRunner), Runner)
self.assertEqual(newRunner.description, "Runner description")
changed, newRunner = self.moduleUtil.updateRunner(runner, {"description": "Runner description"})
self.assertEqual(changed, False)
self.assertEqual(newRunner.description, "Runner description")
@with_httmock(resp_find_runners_list)
@with_httmock(resp_get_runner)
@with_httmock(resp_delete_runner)
def test_delete_runner(self):
self.moduleUtil.existsRunner("test-1-20150125")
rvalue = self.moduleUtil.deleteRunner()
self.assertEqual(rvalue, None)
|
ssarangi/numba | numba/hsa/api.py | Python | bsd-2-clause | 1,293 | 0 | from __future__ import absolute_import, print_function
import numpy as np
from .stubs import (
get_global_id,
get_global_size,
get_local_id,
get_local_size,
get_group_id,
get_work_dim,
get_num_groups,
barrier,
mem_fence,
shared,
)
from .decorators import (
jit,
)
from .hsadrv.driver import hsa as _hsadrv
class _AutoDeregister(object):
def __init__( | self, args):
self.args = args
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
deregister(*self.args)
def register(*args):
"""Register data into the HSA system
Returns a contextmanager for use in with-context for auto deregistration.
Use in context:
with hsa.register(array):
do_work_on_HSA(array)
"""
for data in args:
if isinstance(data, np.ndarray):
_hsadrv.hsa_memory_register(data.c | types.data, data.nbytes)
else:
raise TypeError(type(data))
return _AutoDeregister(args)
def deregister(*args):
"""Deregister data form the HSA system
"""
for data in args:
if isinstance(data, np.ndarray):
_hsadrv.hsa_memory_deregister(data.ctypes.data, data.nbytes)
else:
raise TypeError(type(data))
|
egenerat/REST-API | uds/server.py | Python | mit | 539 | 0 | import socket
import os
server_address = '/tmp/uds_socket' |
# Unlink the socket if it already exists
try:
os.unlink(server_address)
except OSError:
if os.path.exists(server_address):
raise
# Create a socket
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
except socket.error:
print 'Failed to create socket'
sys.exit()
sock.bind(server_address)
# Allow 5 connections
sock.listen(5)
while True:
clientSocket, addr = sock.accept()
| print("Received a connection")
clientSocket.close()
|
frappe/frappe | frappe/utils/dateutils.py | Python | mit | 4,402 | 0.029986 | # Copyright (c) 2015, Frappe Technologi | es Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import frappe
import frappe.defaults
import datetime
from frappe.utils import get_datetime, add_to_date, getdate
from frappe.utils.data import get_first_day, get_first_day_of_week, get_quarter_star | t, get_year_start,\
get_last_day, get_last_day_of_week, get_quarter_ending, get_year_ending
# global values -- used for caching
dateformats = {
'yyyy-mm-dd': '%Y-%m-%d',
'mm/dd/yyyy': '%m/%d/%Y',
'mm-dd-yyyy': '%m-%d-%Y',
"mm/dd/yy": "%m/%d/%y",
'dd-mmm-yyyy': '%d-%b-%Y', # numbers app format
'dd/mm/yyyy': '%d/%m/%Y',
'dd.mm.yyyy': '%d.%m.%Y',
'dd.mm.yy': '%d.%m.%y',
'dd-mm-yyyy': '%d-%m-%Y',
"dd/mm/yy": "%d/%m/%y",
}
def user_to_str(date, date_format=None):
if not date: return date
if not date_format:
date_format = get_user_date_format()
try:
return datetime.datetime.strptime(date,
dateformats[date_format]).strftime('%Y-%m-%d')
except ValueError:
raise ValueError("Date %s must be in format %s" % (date, date_format))
def parse_date(date):
"""tries to parse given date to system's format i.e. yyyy-mm-dd. returns a string"""
parsed_date = None
if " " in date:
# as date-timestamp, remove the time part
date = date.split(" ")[0]
# why the sorting? checking should be done in a predictable order
check_formats = [None] + sorted(list(dateformats),
reverse=not get_user_date_format().startswith("dd"))
for f in check_formats:
try:
parsed_date = user_to_str(date, f)
if parsed_date:
break
except ValueError:
pass
if not parsed_date:
raise Exception("""Cannot understand date - '%s'.
Try formatting it like your default format - '%s'""" % (date, get_user_date_format())
)
return parsed_date
def get_user_date_format():
if getattr(frappe.local, "user_date_format", None) is None:
frappe.local.user_date_format = frappe.defaults.get_global_default("date_format") or "yyyy-mm-dd"
return frappe.local.user_date_format
def datetime_in_user_format(date_time):
if not date_time:
return ""
if isinstance(date_time, str):
date_time = get_datetime(date_time)
from frappe.utils import formatdate
return formatdate(date_time.date()) + " " + date_time.strftime("%H:%M")
def get_dates_from_timegrain(from_date, to_date, timegrain="Daily"):
from_date = getdate(from_date)
to_date = getdate(to_date)
days = months = years = 0
if "Daily" == timegrain:
days = 1
elif "Weekly" == timegrain:
days = 7
elif "Monthly" == timegrain:
months = 1
elif "Quarterly" == timegrain:
months = 3
if "Weekly" == timegrain:
dates = [get_last_day_of_week(from_date)]
else:
dates = [get_period_ending(from_date, timegrain)]
while getdate(dates[-1]) < getdate(to_date):
if "Weekly" == timegrain:
date = get_last_day_of_week(add_to_date(dates[-1], years=years, months=months, days=days))
else:
date = get_period_ending(add_to_date(dates[-1], years=years, months=months, days=days), timegrain)
dates.append(date)
return dates
def get_from_date_from_timespan(to_date, timespan):
days = months = years = 0
if timespan == "Last Week":
days = -7
if timespan == "Last Month":
months = -1
elif timespan == "Last Quarter":
months = -3
elif timespan == "Last Year":
years = -1
elif timespan == "All Time":
years = -50
return add_to_date(to_date, years=years, months=months, days=days,
as_datetime=True)
def get_period(date, interval='Monthly'):
date = getdate(date)
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
return {
'Daily': date.strftime('%d-%m-%y'),
'Weekly': date.strftime('%d-%m-%y'),
'Monthly': str(months[date.month - 1]) + ' ' + str(date.year),
'Quarterly': 'Quarter ' + str(((date.month-1)//3)+1) + ' ' + str(date.year),
'Yearly': str(date.year)
}[interval]
def get_period_beginning(date, timegrain, as_str=True):
return getdate({
'Daily': date,
'Weekly': get_first_day_of_week(date),
'Monthly': get_first_day(date),
'Quarterly': get_quarter_start(date),
'Yearly': get_year_start(date)
}[timegrain])
def get_period_ending(date, timegrain):
date = getdate(date)
if timegrain == 'Daily':
return date
else:
return getdate({
'Daily': date,
'Weekly': get_last_day_of_week(date),
'Monthly': get_last_day(date),
'Quarterly': get_quarter_ending(date),
'Yearly': get_year_ending(date)
}[timegrain])
|
RUBi-ZA/MD-TASK | lib/strategies/normalization.py | Python | gpl-3.0 | 1,170 | 0.004274 | class base(object):
def __init__(self, matrix_type | ):
self.matrix_type = matrix_type
class standard(base):
def normalize(self, difference, reference):
return difference / reference
def get_label(self):
return "$\Delta$ %s/%s" % (self.matrix_type, self.matrix_type)
def get_prefix(self):
return "standard"
class no | nzero(base):
def normalize(self, difference, reference):
nonzero = reference > 0
return difference[nonzero] / reference[nonzero]
def get_label(self):
return "$\Delta$ %s/%s (%s>0)" % (self.matrix_type, self.matrix_type, self.matrix_type)
def get_prefix(self):
return "nonzero"
class plusone(base):
def normalize(self, difference, reference):
return difference / (reference + 1)
def get_label(self):
return "$\Delta$ %s/(%s+1)" % (self.matrix_type, self.matrix_type)
def get_prefix(self):
return "plusone"
class none(base):
def normalize(self, difference, reference):
return difference
def get_label(self):
return "$\Delta$ %s" % self.matrix_type
def get_prefix(self):
return ""
|
hannesrauhe/lunchinator-gadgets | rocket_launcher/pyrocket_backend.py | Python | gpl-3.0 | 8,133 | 0.007623 | #!/usr/bin/python
# |
import usb
from time import sleep
class R | ocketManager:
vendor_product_ids = [(0x1941, 0x8021), (0x0a81, 0x0701), (0x0a81, 0xff01), (0x1130, 0x0202), (0x2123,0x1010)]
launcher_types = ["Original", "Webcam", "Wireless", "Striker II", "OIC Webcam"]
housing_colors = ["green", "blue", "silver", "black", "gray"]
def __init__(self):
self.launchers = []
# -----------------------------
def acquire_devices(self):
device_found = False
for bus in usb.busses():
for dev in bus.devices:
for i, (cheeky_vendor_id, cheeky_product_id) in enumerate(self.vendor_product_ids):
if dev.idVendor == cheeky_vendor_id and dev.idProduct == cheeky_product_id:
print "Located", self.housing_colors[i], "Rocket Launcher device."
launcher = None
if i == 0:
launcher = OriginalRocketLauncher()
elif i == 1:
launcher = BlueRocketLauncher()
elif i == 2:
# launcher = BlueRocketLauncher() # EXPERIMENTAL
return '''The '''+self.launcher_types[i]+''' ('''+self.housing_colors[i]+''') Rocket Launcher is not yet supported. Try the '''+self.launcher_types[0]+''' or '''+self.launcher_types[1]+''' one.'''
elif i == 3:
launcher = BlackRocketLauncher()
elif i == 4:
launcher = GrayRocketLauncher()
return_code = launcher.acquire( dev )
if not return_code:
self.launchers.append( launcher )
device_found = True
elif return_code == 2:
string = '''You don't have permission to operate the USB device. To give
yourself permission by default (in Ubuntu), create the file
/etc/udev/rules.d/40-missilelauncher.rules with the following line:
SUBSYSTEM=="usb", ENV{DEVTYPE}=="usb_device", ACTION=="add", SYSFS{idVendor}=="%04x", SYSFS{idProduct}=="%04x", GROUP="plugdev", MODE="0660"
The .deb installer should have done this for you. If you just installed
the .deb, you need to unplug and replug the USB device now. This will apply
the new permissions from the .rules file.''' % (cheeky_vendor_id, cheeky_product_id)
print string
return '''You don't have permission to operate the USB device.
If you just installed the .deb, you need to plug cycle the USB device now. This will apply
the new permissions from the .rules file.'''
if not device_found:
return 'No USB Rocket Launcher appears\nto be connected.'
# ============================================
# ============================================
class OriginalRocketLauncher:
color_green = True
has_laser = False
green_directions = [1, 0, 2, 3, 4]
def __init__(self):
self.usb_debug = False
self.previous_fire_state = False
self.previous_limit_switch_states = [False]*4 # Down, Up, Left, Right
# ------------------------------------------------------
def acquire(self, dev):
self.handle = dev.open()
try:
self.handle.reset()
except usb.USBError, e:
if e.message.find("not permitted") >= 0:
return 2
else:
raise e
# self.handle.setConfiguration(dev.configurations[0])
try:
self.handle.claimInterface( 0 )
except usb.USBError, e:
if e.message.find("could not claim interface") >= 0:
self.handle.detachKernelDriver( 0 )
self.handle.claimInterface( 0 )
self.handle.setAltInterface(0)
return 0
# -----------------------------
def issue_command(self, command_index):
signal = 0
if command_index >= 0:
signal = 1 << command_index
try:
self.handle.controlMsg(0x21, 0x09, [signal], 0x0200)
except usb.USBError:
pass
# -----------------------------
def start_movement(self, command_index):
self.issue_command( self.green_directions[command_index] )
# -----------------------------
def stop_movement(self):
self.issue_command( -1 )
# -----------------------------
def check_limits(self):
'''For the "green" rocket launcher, the MSB of byte 2 comes on when a rocket is ready to fire,
and is cleared again shortly after the rocket fires and cylinder is charged further.'''
bytes = self.handle.bulkRead(1, 8)
if self.usb_debug:
print "USB packet:", bytes
limit_bytes = list(bytes)[0:2]
self.previous_fire_state = limit_bytes[1] & (1 << 7)
limit_signal = (limit_bytes[1] & 0x0F) | (limit_bytes[0] >> 6)
new_limit_switch_states = [bool(limit_signal & (1 << i)) for i in range(4)]
self.previous_limit_switch_states = new_limit_switch_states
return new_limit_switch_states
# ============================================
# ============================================
class BlueRocketLauncher(OriginalRocketLauncher):
color_green = False
def __init__(self):
OriginalRocketLauncher.__init__(self)
# -----------------------------
def start_movement(self, command_index):
self.issue_command( command_index )
# -----------------------------
def stop_movement(self):
self.issue_command( 5 )
# -----------------------------
def check_limits(self):
'''For the "blue" rocket launcher, the firing bit is only toggled when the rocket fires, then
is immediately reset.'''
bytes = None
self.issue_command( 6 )
try:
bytes = self.handle.bulkRead(1, 1)
except usb.USBError, e:
if e.message.find("No error") >= 0 \
or e.message.find("could not claim interface") >= 0 \
or e.message.find("Value too large") >= 0:
pass
# if self.usb_debug:
# print "POLLING ERROR"
# TODO: Should we try again in a loop?
else:
raise e
if self.usb_debug:
print "USB packet:", bytes
self.previous_fire_state = bool(bytes)
if bytes is None:
return self.previous_limit_switch_states
else:
limit_signal, = bytes
new_limit_switch_states = [bool(limit_signal & (1 << i)) for i in range(4)]
self.previous_limit_switch_states = new_limit_switch_states
return new_limit_switch_states
# ============================================
# ============================================
class BlackRocketLauncher(BlueRocketLauncher):
striker_commands = [0xf, 0xe, 0xd, 0xc, 0xa, 0x14, 0xb]
has_laser = True
# -----------------------------
def issue_command(self, command_index):
signal = self.striker_commands[command_index]
try:
self.handle.controlMsg(0x21, 0x09, [signal, signal])
except usb.USBError:
pass
# -----------------------------
def check_limits(self):
return self.previous_limit_switch_states
# ============================================
# ============================================
class GrayRocketLauncher(BlueRocketLauncher):
striker_commands = [0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40]
has_laser = False
# -----------------------------
def issue_command(self, command_index):
signal = self.striker_commands[command_index]
try:
self.handle.controlMsg(0x21,0x09, [0x02, signal, 0x00,0x00,0x00,0x00,0x00,0x00])
except usb.USBError:
pass
# -----------------------------
def check_limits(self):
return self.previous_limit_switch_states
|
renstrom/passbook_flask_example | tests.py | Python | mit | 1,287 | 0.001554 | # -*- coding: utf-8 -*-
import os
import unittest
import tempfile
from datetime import datetime
from flask.ext.sqlalchemy import SQLAlchemy
from app import app, db, Pass, Registration
class PassbookTestCase(unittest.TestCase):
def setUp(self):
temp = tempfile.mkstemp()
self.temp = temp
self.db_fd = temp[0]
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///%s' % temp[1]
app.config['TESTING'] = True
self.app = app.test_client()
SQLAlchemy.create_all(db)
def tearDown(self):
os.close(self.db_fd)
os.unlink(app.config['SQLALCHEMY_DATABASE_URI'].replace('sqlite:///', ''))
def test_add_pass_and_registrations(self):
data = {
'foo': 57,
| 'bar': str(datetime.utcnow()),
'baz': 'Lorem ipsum dolar sit amet'
}
p = Pass('com.company.pass.example', 'ABC123', data)
db.session.add(p)
db.session.commit() |
assert Pass.query.get(1)
r = Registration('123456789', '00000000 00000000 00000000 00000000 \
00000000 00000000 00000000 00000000', p)
db.session.add(r)
db.session.commit()
assert Registration.query.get(1)
if __name__ == '__main__':
unittest.main()
|
ayust/kitnirc | kitnirc/contrib/admintools.py | Python | mit | 3,850 | 0.000519 | import logging
from kitnirc.client import Channel
from kitnirc.modular import Module
from kitnirc.user import User
_log = logging.getLogger(__name__)
def is_admin(controller, client, actor):
"""Used to determine whether someone issuing a command is an admin.
By default, checks to see if there's a line of the type nick=host that
matches the command's actor in the [admins] section of the config file,
or a key that matches the entire mask (e.g. "foo@bar" or "foo@bar=1").
"""
config = controller.config
if not config.has_section("admins"):
logging.debug("Ignoring is_admin check - no [admins] config found.")
return False
for key,val in config.items("admins"):
if actor == User(key):
logging.debug("is_admin: %r matches admin %r", actor, key)
return True
if actor.nick.lower() == key.lower() and actor.host.lower() == val.lower():
logging.debug("is_admin: %r matches admin %r=%r", actor, key, val)
return True
logging.debug("is_admin: %r is not an admin.", actor)
return False
class AdminModule(Module):
"""A | KitnIRC module which provides admin functionality.
Customization of what an "admin" is can be done by overriding the
is_admin global function in this file.
"""
@Module.handle("PRIVMSG")
| def privmsg(self, client, actor, recipient, message):
if isinstance(recipient, Channel):
# Only pay attention if addressed directly in channels
if not message.startswith("%s:" % client.user.nick):
return
message = message.split(":", 1)[1]
message = message.strip()
args = message.split()
# Ignore empty messages
if not args:
return
command, args = args[0], args[1:]
command = command.lower()
available_commands = {
'join': self.join,
'part': self.part,
'quit': self.quit,
'reload': self.reload,
'reloadall': self.reloadall,
'load': self.load,
'unload': self.unload,
}
# Only pay attention to valid commands
func = available_commands.get(command)
if not func:
return
# Only pay attention to admins
actor = User(actor)
if not is_admin(self.controller, client, actor):
client.reply(recipient, actor, "You are not allowed to do that.")
return
result = func(client, args)
if result is True:
client.reply(recipient, actor, "Okay.")
elif result is False:
client.reply(recipient, actor, "Sorry, try again.")
# Suppress further handling of the PRIVMSG event.
return True
def join(self, client, args):
if not args:
return False
if client.join(args[0], args[1] if len(args) > 1 else None):
return True
else:
return False
def part(self, client, args):
if not args:
return False
if client.part(args[0]):
return True
else:
return False
def quit(self, client, args):
# We immediately disconnect, so no reply
client.quit()
def reload(self, client, args):
if not args:
return False
return all(self.controller.reload_module(mod) for mod in args)
def reloadall(self, client, args):
return self.controller.reload_modules()
def load(self, client, args):
if not args:
return False
return self.controller.load_module(args[0])
def unload(self, client, args):
if not args:
return False
return self.controller.unload_module(args[0])
module = AdminModule
# vim: set ts=4 sts=4 sw=4 et:
|
sdague/home-assistant | homeassistant/components/mqtt/vacuum/schema_legacy.py | Python | apache-2.0 | 19,424 | 0.000927 | """Support for Legacy MQTT vacuum."""
import json
import logging
import voluptuous as vol
from homeassistant.components import mqtt
from homeassistant.components.mqtt import (
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
subscription,
)
from homeassistant.components.vacuum import (
SUPPORT_BATTERY,
SUPPORT_CLEAN_SPOT,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_STATUS,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
VacuumEntity,
)
from homeassistant.const import (
ATTR_SUPPORTED_FEATURES,
CONF_DEVICE,
CONF_NAME,
CONF_UNIQUE_ID,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.icon import icon_for_battery_level
from ..debug_info import log_messages
from .schema import MQTT_VACUUM_SCHEMA, services_to_strings, strings_to_services
_LOGGER = logging.getLogger(__name__)
SERVICE_TO_STRING = {
SUPPORT_TURN_ON: "turn_on",
SUPPORT_TURN_OFF: "turn_off",
SUPPORT_PAUSE: "pause",
SUPPORT_STOP: "stop",
SUPPORT_RETURN_HOME: "return_home",
SUPPORT_FAN_SPEED: "fan_speed",
SUPPORT_BATTERY: "battery",
SUPPORT_STATUS: "status",
SUPPORT_SEND_COMMAND: "send_command",
SUPPORT_LOCATE: "locate",
SUPPORT_CLEAN_SPOT: "clean_spot",
}
STRING_TO_SERVICE = {v: k for k, v in SERVICE_TO_STRING.items()}
DEFAULT_SERVICES = (
SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_STOP
| SUPPORT_RETURN_HOME
| SUPPORT_STATUS
| SUPPORT_BATTERY
| SUPPORT_CLEAN_SPOT
)
ALL_SERVICES = (
DEFAULT_SERVICES
| SUPPORT_PAUSE
| SUPPORT_LOCATE
| SUPPORT_FAN_SPEED
| SUPPORT_SEND_COMMAND
)
CONF_SUPPORTED_FEATURES = ATTR_SUPPORTED_FEATURES
CONF_BATTERY_LEVEL_TEMPLATE = "battery_level_template"
CONF_BATTERY_LEVEL_TOPIC = "battery_level_topic"
CONF_CHARGING_TEMPLATE = "charging_template"
CONF_CHARGING_TOPIC = "charging_topic"
CONF_CLEANING_TEMPLATE = "cleaning_template"
CONF_CLEANING_TOPIC = "cleaning_topic"
CONF_DOCKED_TEMPLATE = "docked_template"
CONF_DOCKED_TOPIC = "docked_topic"
CONF_ERROR_TEMPLATE = "error_template"
CONF_ERROR_TOPIC = "error_topic"
CONF_FAN_SPEED_LIST = "fan_speed_list"
CONF_FAN_SPEED_TEMPLATE = "fan_speed_template"
CONF_FAN_SPEED_TOPIC = "fan_speed_topic"
CONF_PAYLOAD_CLEAN_SPOT = "payload_clean_spot"
CONF_PAYLOAD_LOCATE = "payload_locate"
CONF_PAYLOAD_RETURN_TO_BASE = "payload_return_to_base"
CONF_PAYLOAD_START_PAUSE = "payload_start_pause"
CONF_PAYLOAD_STOP = "payload_stop"
CONF_PAYLOAD_TURN_OFF = "payload_turn_off"
CONF_PAYLOAD_TURN_ON = "payload_turn_on"
CONF_SEND_COMMAND_TOPIC = "send_command_topic"
CONF_SET_FAN_SPEED_TOPIC = "set_fan_speed_topic"
DEFAULT_NAME = "MQTT Vacuum"
DEFAULT_PAYLOAD_CLEAN_SPOT = "clean_spot"
DEFAULT_PAYLOAD_LOCATE = "locate"
DEFAULT_PAYLOAD_RETURN_TO_BASE = "return_to_base"
DEFAULT_PAYLOAD_START_PAUSE = "start_pause"
DEFAULT_PAYLOAD_STOP = "stop"
DEFAULT_PAYLOAD_TURN_OFF = "turn_off"
DEFAULT_PAYLOAD_TURN_ON = "turn_on"
DEFAULT_RETAIN = False
DEFAULT_SERVICE_STRINGS = services_to_strings(DEFAULT_SERVICES, SERVICE_TO_STRING)
PLATFORM_SCHEMA_LEGACY = (
mqtt.MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Inclusive(CONF_BATTERY_LEVEL_TEMPLATE, "battery"): cv.template,
vol.Inclusive(
CONF_BATTERY_LEVEL_TOPIC, "battery"
): mqtt.valid_publish_topic,
vol.Inclusive(CONF_CHARGING_TEMPLATE, "charging"): cv.template,
vol.Inclusive(CONF_CHARGING_TOPIC, "charging"): mqtt.valid_publish_topic,
vol.Inclusive(CONF_CLEANING_TEMPLATE, "cleaning"): cv.template,
vol.Inclusive(CONF_CLEANING_TOPIC, "cleaning"): mqtt.valid_publish_topic,
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Inclusive(CONF_DOCKED_TEMPLATE, "docked"): cv.template,
vol.Inclusive(CONF_DOCKED_TOPIC, "docked"): mqtt.valid_publish_topic,
vol.Inclusive(CONF_ERROR_TEMPLATE, "error"): cv.template,
vol.Inclusive(CONF_ERROR_TOPIC, "error"): mqtt.valid_publish_topic,
vol.Optional(CONF_FAN_SPEED_LIST, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Inclusive(CONF_FAN_SPEED_TEMPLATE, "fan_speed"): cv.template,
vol.Inclusive(CONF_FAN_SPEED_TOPIC, "fan_speed"): mqtt.valid_publish_topic,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(
CONF_PAYLOAD_CLEAN_SPOT, default=DEFAULT_PAYLOAD_CLEAN_SPOT
): cv.string,
vol.Optional(
CONF_PAYLOAD_LOCATE, default=DEFAULT_PAYLOAD_LOCATE
): cv.string,
vol.Optional(
CONF_PAYLOAD_RETURN_TO_BASE, default=DEFAULT_PAYLOAD_RETURN_TO_BASE
): cv.string,
vol.Optional(
CONF_PAYLOAD_START_PAUSE, default=DEFAULT_PAYLOAD_START_PAUSE
): cv.string,
vol.Optional(CONF_PAYLOAD_STOP, default=DEFAULT_PAYLOAD_STOP): cv.string,
vol.Optional(
CONF_PAYLOAD_TURN_OFF, default=DEFAULT_PAYLOAD_TURN_OFF
): cv.string,
vol.Optional(
CONF_PAYLOAD_TURN_ON, default=DEFAULT_PAYLOAD_TURN_ON
): cv.string,
vol.Optional(CONF_SEND_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_SET_FAN_SPEED_TOPIC): mqtt.valid_publish_topic,
vol.Optional(
CONF_SUPPORTED_FEATURES, default=DEFAULT_SERVICE_STRINGS
): vol.All(cv.ensure_list, [vol.In(STRING_TO_SERVICE.keys())]),
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(mqtt.CONF_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(mqtt.CONF_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
}
)
.extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema)
.extend(mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
.extend(MQTT_VACUUM_SCHEMA.schema)
)
async def async_setup_entity_legacy(
config, async_add_entities, config_entry, discovery_data
):
"""Set up a MQTT Vacuum Legacy."""
async_add_entities([MqttVacuum(config, config_entry, discovery_data)])
class MqttVacuum(
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
VacuumEntity,
):
"""Representation of a MQTT-controlled legacy vacuum."""
def __init__(self, config, config_entry, discovery_info):
"""Initialize the vacuum."""
self._cleaning = False
self._charging = False
self._docked = False
self._error = None
self._status = "Unknown"
self._battery_level = 0
self._fan_speed = "unknown"
self._fan_speed_list = []
self._sub_state = None
self._unique_id = config.get(CONF_UNIQUE_ID)
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_info, self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
def _setup_from_config(self, config):
self._name | = config[CONF_NAME]
supported_feature_strings = config[CONF_SUPPORTED_FEATURES]
self._supported_features = strings_to_services(
supported_feature_strings, STRING_TO_SERVICE
)
self._fan_speed_list = config[CONF_FAN_SPEED_LIST]
self._qos = config[mqtt.CONF_QOS]
self._retain = config[m | qtt.CONF_RETAIN]
self._command_topic = config.get(mqtt.CONF_COMMAND_TOPIC)
self._set_fan_speed_topic = config.get(CONF_SET_FAN_SPEED_TOPIC)
self._send_command_topic = config.get(CONF_SEND_COMMAND_TOPIC)
self._payloads = {
key: config.get(key)
for key in (
CONF_PAYLOAD_TURN_ON,
CONF_PAYLOAD_TURN_OFF,
CONF_PAYLOAD_RETURN_TO_BASE,
CONF_PAYLOAD |
OCA/l10n-italy | l10n_it_invoices_data_communication/wizard/export_file.py | Python | agpl-3.0 | 1,682 | 0.000595 | import base64
from odoo import _, exceptions, fields, models
class ComunicazioneDatiIvaExportFile(models.TransientModel):
_name = "comunicazione.dati.iva.export.file"
_description = "Export XML of invoices data communication"
file_export = fields.Binary("File", readonly=True)
filename = fields.Char()
name = fields.Char("File Name", readonly=True, default="dati_iva.xml")
def export(self):
comunicazione_ids = self._context.get("active_ids")
if not comunicazione_ids:
raise exceptions.UserError(_("No communication selected"))
if len(comunicazione_ids) > 1:
raise exceptions.UserError(_("You can only export one communication"))
for wizard in self:
for comunicazione in self.env["comunicazione.dati.iva"].browse(
comunicazione_ids
):
out = base64.encodebytes(comunicazione.get_export_xml())
filename = comunicazione.get_export_xml_filename()
wizard.sudo().file_export = out
wizard.filename = filename
model_data_obj = self.env["ir.model.data"]
view_rec = model_d | ata_obj.get_object_reference(
"l10n_it_invoices_data_communication",
"wizard_dati_iva_export_file_exit",
)
view_id = view_rec and vie | w_rec[1] or False
return {
"view_id": [view_id],
"view_mode": "form",
"res_model": "comunicazione.dati.iva.export.file",
"res_id": wizard.id,
"type": "ir.actions.act_window",
"target": "new",
}
|
wbyne/QGIS | tests/src/python/test_provider_ogr_gpkg.py | Python | gpl-2.0 | 6,014 | 0.002162 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for the OGR/GPKG provider.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Even Rouault'
__date__ = '2016-04-21'
__copyright__ = 'Copyright 2016, Even Rouault'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
import tempfile
import shutil
import glob
from osgeo import gdal, ogr
from qgis.core import QgsVectorLayer, QgsFeature, QgsGeometry, QgsFeatureRequest
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
start_app()
def GDAL_COMPUTE_VERSION(maj, min, rev):
return ((maj) * 1000000 + (min) * 10000 + (rev) * 100)
class ErrorReceiver():
def __init__(self):
self.msg = None
def receiveError(self, msg):
self.msg = msg
class TestPyQgsOGRProviderGpkg(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# Create test layer
cls.basetestpath = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
shutil.rmtree(cls.basetestpath, True)
def testSingleToMultiPolygonPromotion(self):
tmpfile = os.path.join(self.basetestpat | h, 'testSingleToMultiPolygonPromotion.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbMultiPolygon)
ds = None
vl = QgsVectorLayer('{}|layerid=0'.format(tmpfile), 'test', 'ogr')
f = QgsFeature()
f.setGeometry(QgsGeometry.fromWkt('POLYGON ((0 0, | 0 1,1 1,0 0))'))
vl.dataProvider().addFeatures([f])
got = [f for f in vl.getFeatures()][0]
got_geom = got.geometry()
reference = QgsGeometry.fromWkt('MultiPolygon (((0 0, 0 1, 1 1, 0 0)))')
# The geometries must be binarily identical
self.assertEqual(got_geom.asWkb(), reference.asWkb(), 'Expected {}, got {}'.format(reference.exportToWkt(), got_geom.exportToWkt()))
@unittest.expectedFailure(int(gdal.VersionInfo('VERSION_NUM')) < GDAL_COMPUTE_VERSION(2, 0, 0))
def testCurveGeometryType(self):
tmpfile = os.path.join(self.basetestpath, 'testCurveGeometryType.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbCurvePolygon)
ds = None
vl = QgsVectorLayer('{}'.format(tmpfile), 'test', 'ogr')
self.assertEqual(vl.dataProvider().subLayers(), ['0:test:0:CurvePolygon'])
f = QgsFeature()
f.setGeometry(QgsGeometry.fromWkt('POLYGON ((0 0,0 1,1 1,0 0))'))
vl.dataProvider().addFeatures([f])
got = [f for f in vl.getFeatures()][0]
got_geom = got.geometry()
reference = QgsGeometry.fromWkt('CurvePolygon (((0 0, 0 1, 1 1, 0 0)))')
# The geometries must be binarily identical
self.assertEqual(got_geom.asWkb(), reference.asWkb(), 'Expected {}, got {}'.format(reference.exportToWkt(), got_geom.exportToWkt()))
def internalTestBug15351(self, orderClosing):
tmpfile = os.path.join(self.basetestpath, 'testBug15351.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPoint)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(0 0)'))
lyr.CreateFeature(f)
f = None
ds = None
vl = QgsVectorLayer(u'{}'.format(tmpfile), u'test', u'ogr')
self.assertTrue(vl.startEditing())
self.assertTrue(vl.changeGeometry(1, QgsGeometry.fromWkt('Point (3 50)')))
# Iterate over features (will open a new OGR connection), but do not
# close the iterator for now
it = vl.getFeatures()
f = QgsFeature()
it.nextFeature(f)
if orderClosing == 'closeIter_commit_closeProvider':
it = None
# Commit changes
cbk = ErrorReceiver()
vl.dataProvider().raiseError.connect(cbk.receiveError)
self.assertTrue(vl.commitChanges())
self.assertIsNone(cbk.msg)
# Close layer and iterator in different orders
if orderClosing == 'closeIter_commit_closeProvider':
vl = None
elif orderClosing == 'commit_closeProvider_closeIter':
vl = None
it = None
else:
assert orderClosing == 'commit_closeIter_closeProvider'
it = None
vl = None
# Test that we succeeded restoring default journal mode, and we
# are not let in WAL mode.
ds = ogr.Open(tmpfile)
lyr = ds.ExecuteSQL('PRAGMA journal_mode')
f = lyr.GetNextFeature()
res = f.GetField(0)
ds.ReleaseResultSet(lyr)
ds = None
self.assertEqual(res, 'delete')
@unittest.expectedFailure(int(gdal.VersionInfo('VERSION_NUM')) < GDAL_COMPUTE_VERSION(2, 0, 0))
# We need GDAL 2.0 to issue PRAGMA journal_mode
# Note: for that case, we don't strictly need turning on WAL
def testBug15351_closeIter_commit_closeProvider(self):
self.internalTestBug15351('closeIter_commit_closeProvider')
@unittest.expectedFailure(int(gdal.VersionInfo('VERSION_NUM')) < GDAL_COMPUTE_VERSION(2, 0, 0))
# We need GDAL 2.0 to issue PRAGMA journal_mode
def testBug15351_closeIter_commit_closeProvider(self):
self.internalTestBug15351('closeIter_commit_closeProvider')
@unittest.expectedFailure(int(gdal.VersionInfo('VERSION_NUM')) < GDAL_COMPUTE_VERSION(2, 0, 0))
# We need GDAL 2.0 to issue PRAGMA journal_mode
def testBug15351_commit_closeIter_closeProvider(self):
self.internalTestBug15351('commit_closeIter_closeProvider')
if __name__ == '__main__':
unittest.main()
|
BL-Labs/annotator_demonstrator | djdan/wsgi.py | Python | mit | 1,132 | 0.000883 | """
WSGI config for djdan project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
tha | t later delegates to the Django one. For example, you could introduce WSGI
middleware here, | or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djdan.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
bwasti/caffe2 | caffe2/python/layers/split.py | Python | apache-2.0 | 1,613 | 0.00062 | ## @package split
# Module caffe2.python.layers.split
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
ModelLayer,
)
class Split(ModelLayer):
def __init__(self, model, input_record, num_splits, axis=1,
name='split', **kwargs):
super(Split, self).__init__(model, name, input_record, **kwargs)
self.axis = axis
# Assume that first dimension is batch, so actual axis in shape is
# axis - 1
axis -= 1
assert axis >= 0
assert isinstance(input_record, schema.Scalar),\
"Incorrect input type. Excpected Scalar, but received: {0}". | \
format(input_record)
input_shape = input_record.field_type().shape
assert len(input_shape) >= axis
assert input_shape[axis] % num_splits == 0
output_shape = list(input_shape)
output_shape[axis] = int(output_shape[axis] / num_splits)
data_type = input_record.field_type().base
output_scalars = [
schema.Scalar(
(data_type, output_sh | ape),
model.net.NextScopedBlob(name + '_output_{}'.format(i)),
)
for i in range(num_splits)
]
self.output_schema = schema.Tuple(*output_scalars)
def add_ops(self, net):
net.Split(
self.input_record.field_blobs(),
self.output_schema.field_blobs(),
axis=self.axis,
)
|
akintolga/superdesk-aap | server/aap/macros/currency_cny_to_aud.py | Python | agpl-3.0 | 1,220 | 0.002463 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full c | opyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import os
from . import aap_currency_base as currency_base
from decimal import Decimal
CNY_T | O_AUD = Decimal('0.21') # backup
def get_rate():
"""Get CNY to AUD rate."""
try:
return currency_base.get_rate('CNY', 'AUD')
except:
return CNY_TO_AUD
def yuan_to_aud(item, **kwargs):
"""Convert CNY to AUD."""
rate = kwargs.get('rate') or get_rate()
if os.environ.get('BEHAVE_TESTING'):
rate = CNY_TO_AUD
regex = r'([¥]|(CNY)|(RMB)|(CN¥))\s*\-?\s*\(?(((\d{1,3}((\,\d{3})*|\d*))?' \
r'(\.\d{1,4})?)|((\d{1,3}((\,\d{3})*|\d*))(\.\d{0,4})?))\)?' \
+ currency_base.SUFFIX_REGEX
return currency_base.do_conversion(item, rate, '$A', regex, match_index=0, value_index=5, suffix_index=18)
name = 'yuan_to_aud'
label = 'Currency CNY to AUD'
callback = yuan_to_aud
access_type = 'frontend'
action_type = 'interactive'
group = 'currency'
|
hybrid-storage-dev/cinder-fs-111t-hybrid-cherry | volume/drivers/huawei/rest_common.py | Python | apache-2.0 | 81,175 | 0.000382 | # Copyright (c) 2013 - 2014 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common class for Huawei 18000 storage drivers."""
import base64
import cookielib
import json
import time
import urllib2
import uuid
from xml.etree import ElementTree as ET
from FSComponentUtil import crypt
from oslo.utils import excutils
from oslo.utils import units
import six
from cinder import context
from cinder import ex | ception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import lo | opingcall
from cinder import utils
from cinder.volume import qos_specs
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
STATUS_HEALTH = '1'
STATUS_RUNNING = '10'
DEFAULT_WAIT_TIMEOUT = 3600 * 24 * 30
DEFAULT_WAIT_INTERVAL = 5
ERROR_CONNECT_TO_SERVER = -403
ERROR_UNAUTHORIZED_TO_SERVER = -401
HOSTGROUP_PREFIX = 'OpenStack_HostGroup_'
LUNGROUP_PREFIX = 'OpenStack_LunGroup_'
MAPPING_VIEW_PREFIX = 'OpenStack_Mapping_View_'
QOS_NAME_PREFIX = 'OpenStack_'
huawei_valid_keys = ['maxIOPS', 'minIOPS', 'minBandWidth',
'maxBandWidth', 'latency', 'IOType']
class RestCommon():
"""Common class for Huawei OceanStor 18000 storage system."""
def __init__(self, configuration):
self.configuration = configuration
self.cookie = cookielib.CookieJar()
self.url = None
self.productversion = None
self.headers = {"Connection": "keep-alive",
"Content-Type": "application/json"}
def call(self, url=False, data=None, method=None):
"""Send requests to 18000 server.
Send HTTPS call, get response in JSON.
Convert response into Python Object and return it.
"""
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie))
urllib2.install_opener(opener)
res_json = None
try:
urllib2.socket.setdefaulttimeout(720)
req = urllib2.Request(url, data, self.headers)
if method:
req.get_method = lambda: method
res = urllib2.urlopen(req).read().decode("utf-8")
if "xx/sessions" not in url:
LOG.info(_LI('\n\n\n\nRequest URL: %(url)s\n\n'
'Call Method: %(method)s\n\n'
'Request Data: %(data)s\n\n'
'Response Data:%(res)s\n\n'), {'url': url,
'method': method,
'data': data,
'res': res})
except Exception as err:
LOG.error(_LE('\nBad response from server: %(url)s.'
' Error: %(err)s'), {'url': url, 'err': err})
json_msg = '{"error":{"code":-403,"description":"Connet server error"}}'
res_json = json.loads(json_msg)
return res_json
try:
res_json = json.loads(res)
except Exception as err:
err_msg = (_LE('JSON transfer error: %s.') % err)
LOG.error(err_msg)
raise err
return res_json
def login(self):
"""Log in 18000 array."""
login_info = self._get_login_info()
urlstr = login_info['RestURL']
url_list = urlstr.split(";")
for item_url in url_list:
url = item_url + "xx/sessions"
data = json.dumps({"username": login_info['UserName'],
"password": login_info['UserPassword'],
"scope": "0"})
result = self.call(url, data)
if result['error']['code'] == ERROR_CONNECT_TO_SERVER:
continue
if (result['error']['code'] != 0) or ("data" not in result):
msg = (_("Login error, reason is: %s.") % result)
LOG.error(msg)
raise exception.CinderException(msg)
deviceid = result['data']['deviceid']
self.url = item_url + deviceid
self.headers['iBaseToken'] = result['data']['iBaseToken']
return deviceid
msg = _("Login Error: Can't connect server.")
LOG.error(msg)
raise exception.CinderException(msg)
def _init_lun_parameters(self, name, parameters):
"""Init basic LUN parameters."""
lunparam = {"TYPE": "11",
"NAME": name,
"PARENTTYPE": "216",
"PARENTID": parameters['pool_id'],
"DESCRIPTION": parameters['volume_description'],
"ALLOCTYPE": parameters['LUNType'],
"CAPACITY": parameters['volume_size'],
"WRITEPOLICY": parameters['WriteType'],
"MIRRORPOLICY": parameters['MirrorSwitch'],
"PREFETCHPOLICY": parameters['PrefetchType'],
"PREFETCHVALUE": parameters['PrefetchValue']}
return lunparam
def _assert_rest_result(self, result, err_str):
error_code = result['error']['code']
if error_code == ERROR_CONNECT_TO_SERVER or error_code == ERROR_UNAUTHORIZED_TO_SERVER:
LOG.error(_LE("can't open the recent url, relogin."))
self.login()
if error_code != 0:
msg = (_('%(err)s\nresult: %(res)s.') % {'err': err_str,
'res': result})
LOG.error(msg)
raise exception.CinderException(msg)
def _assert_data_in_result(self, result, msg):
if "data" not in result:
err_msg = (_('%s "data" was not in result.') % msg)
LOG.error(err_msg)
raise exception.CinderException(err_msg)
def _create_volume(self, lun_param):
url = self.url + "/lun"
data = json.dumps(lun_param)
result = self.call(url, data)
msg = 'Create volume error.'
self._assert_rest_result(result, msg)
self._assert_data_in_result(result, msg)
return result['data']
@utils.synchronized('huawei', external=True)
def create_volume(self, volume):
poolinfo = self._find_pool_info()
volume_name = self._encode_name(volume['id'])
volume_description = volume['name']
volume_size = self._get_volume_size(volume)
LOG.info(_LI(
'Create Volume: %(volume)s Size: %(size)s.'),
{'volume': volume_name,
'size': volume_size})
params = self._get_lun_conf_params()
params['pool_id'] = poolinfo['ID']
params['volume_size'] = volume_size
params['volume_description'] = volume_description
# Prepare lun parameters.
lun_param = self._init_lun_parameters(volume_name, params)
# Create LUN on the array.
lun_info = self._create_volume(lun_param)
lunid = lun_info['ID']
type_id = volume.get('volume_type_id', None)
policy_id = None
if type_id is not None:
volume_type = self._get_volume_type(type_id)
qos = self._get_qos_by_volume_type(volume_type)
if not qos:
LOG.info(_LI('No QoS configuration found for volume: %s.'),
volume_name)
return lun_info
try:
# Check QoS priority. if high, change lun priority to high.
if self._check_qos_high_priority(qos) i |
dimagi/commcare-hq | corehq/apps/sms/forms.py | Python | bsd-3-clause | 49,749 | 0.00205 | import copy
import json
import re
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.forms.fields import (
BooleanField,
CharField,
ChoiceField,
IntegerField,
)
from django.forms.forms import Form
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy, ugettext_noop
from couchdbkit.exceptions import ResourceNotFound
from crispy_forms import bootstrap as twbscrispy
from crispy_forms import layout as crispy
from crispy_forms.bootstrap import InlineField, StrictButton
from crispy_forms.layout import Div
from dimagi.utils.django.fields import TrimmedCharField
from corehq import toggles
from corehq.apps.commtrack.models import AlertConfig
from corehq.apps.domain.models import DayTimeWindow
from corehq.apps.groups.models import Group
from corehq.apps.hqwebapp import crispy as hqcrispy
from corehq.apps.hqwebapp.crispy import HQFormHelper
from corehq.apps.hqwebapp.fields import MultiEmailField
from corehq.apps.hqwebapp.widgets import SelectToggle
from corehq.apps.locations.models import SQLLocation
from corehq.apps.reminders.forms import validate_time
from corehq.apps.sms.models import SQLMobileBackend
from corehq.apps.sms.util import (
ALLOWED_SURVEY_DATE_FORMATS,
g | et_sms_b | ackend_classes,
is_superuser_or_contractor,
validate_phone_number,
)
from corehq.apps.users.models import CommCareUser, CouchUser
ENABLED = "ENABLED"
DISABLED = "DISABLED"
ENABLED_DISABLED_CHOICES = (
(DISABLED, ugettext_noop("Disabled")),
(ENABLED, ugettext_noop("Enabled")),
)
DEFAULT = "DEFAULT"
CUSTOM = "CUSTOM"
DEFAULT_CUSTOM_CHOICES = (
(DEFAULT, ugettext_noop("Default")),
(CUSTOM, ugettext_noop("Custom")),
)
MESSAGE_COUNTER_CHOICES = (
(DEFAULT, ugettext_noop("Don't use counter")),
(CUSTOM, ugettext_noop("Use counter with threshold:")),
)
SMS_CONVERSATION_LENGTH_CHOICES = (
(5, 5),
(10, 10),
(15, 15),
(20, 20),
(25, 25),
(30, 30),
)
SHOW_ALL = "SHOW_ALL"
SHOW_INVALID = "SHOW_INVALID"
HIDE_ALL = "HIDE_ALL"
TIME_BEFORE = "BEFORE"
TIME_AFTER = "AFTER"
TIME_BETWEEN = "BETWEEN"
WELCOME_RECIPIENT_NONE = 'NONE'
WELCOME_RECIPIENT_CASE = 'CASE'
WELCOME_RECIPIENT_MOBILE_WORKER = 'MOBILE_WORKER'
WELCOME_RECIPIENT_ALL = 'ALL'
WELCOME_RECIPIENT_CHOICES = (
(WELCOME_RECIPIENT_NONE, ugettext_lazy('Nobody')),
(WELCOME_RECIPIENT_CASE, ugettext_lazy('Cases only')),
(WELCOME_RECIPIENT_MOBILE_WORKER, ugettext_lazy('Mobile Workers only')),
(WELCOME_RECIPIENT_ALL, ugettext_lazy('Cases and Mobile Workers')),
)
LANGUAGE_FALLBACK_NONE = 'NONE'
LANGUAGE_FALLBACK_SCHEDULE = 'SCHEDULE'
LANGUAGE_FALLBACK_DOMAIN = 'DOMAIN'
LANGUAGE_FALLBACK_UNTRANSLATED = 'UNTRANSLATED'
LANGUAGE_FALLBACK_CHOICES = (
(LANGUAGE_FALLBACK_NONE, ugettext_lazy("""
Only send message if text is available in recipient's preferred language
""")),
(LANGUAGE_FALLBACK_SCHEDULE, ugettext_lazy("""
Use text from the alert or broadcast's default language as a backup
""")),
(LANGUAGE_FALLBACK_DOMAIN, ugettext_lazy("""
Use text from the project's default language as a backup
if the alert or broadcast's language is also unavailable
""")),
(LANGUAGE_FALLBACK_UNTRANSLATED, ugettext_lazy("""
Use all available text backups, including untranslated content
""")),
)
class LoadBalancingBackendFormMixin(Form):
phone_numbers = CharField(required=False)
def clean_phone_numbers(self):
"""
Expects a list of [{"phone_number": <phone number>}] as the value.
"""
value = self.cleaned_data.get("phone_numbers")
result = []
try:
value = json.loads(value)
assert isinstance(value, list)
for item in value:
assert isinstance(item, dict)
assert "phone_number" in item
result.append(item["phone_number"])
except (AssertionError, ValueError):
raise ValidationError(_("Something went wrong. Please reload the "
"page and try again."))
if len(result) == 0:
raise ValidationError(_("You must specify at least one phone"
"number."))
for phone_number in result:
self.validate_phone_number(phone_number)
return result
def validate_phone_number(self, phone_number: str) -> None:
validate_phone_number(phone_number)
class SettingsForm(Form):
# General Settings
use_default_sms_response = ChoiceField(
required=False,
label=ugettext_noop("Default SMS Response"),
choices=ENABLED_DISABLED_CHOICES,
)
default_sms_response = TrimmedCharField(
required=False,
label="",
)
use_restricted_sms_times = ChoiceField(
required=False,
label=ugettext_noop("Send SMS on..."),
choices=(
(DISABLED, ugettext_noop("any day, at any time")),
(ENABLED, ugettext_noop("only specific days and times")),
),
)
restricted_sms_times_json = CharField(
required=False,
widget=forms.HiddenInput,
)
sms_survey_date_format = ChoiceField(
required=False,
label=ugettext_lazy("SMS Survey Date Format"),
choices=(
(df.human_readable_format, ugettext_lazy(df.human_readable_format))
for df in ALLOWED_SURVEY_DATE_FORMATS
),
)
# Chat Settings
use_custom_case_username = ChoiceField(
required=False,
choices=DEFAULT_CUSTOM_CHOICES,
)
custom_case_username = TrimmedCharField(
required=False,
label=ugettext_noop("Enter a Case Property"),
)
use_custom_message_count_threshold = ChoiceField(
required=False,
choices=MESSAGE_COUNTER_CHOICES,
)
custom_message_count_threshold = IntegerField(
required=False,
label=ugettext_noop("Enter a Number"),
)
use_sms_conversation_times = ChoiceField(
required=False,
label=ugettext_noop("Delay Automated SMS"),
choices=ENABLED_DISABLED_CHOICES,
widget=SelectToggle(choices=ENABLED_DISABLED_CHOICES, attrs={"ko_value": "use_sms_conversation_times"}),
)
sms_conversation_times_json = CharField(
required=False,
widget=forms.HiddenInput,
)
sms_conversation_length = ChoiceField(
required=False,
label=ugettext_noop("Conversation Duration"),
choices=SMS_CONVERSATION_LENGTH_CHOICES,
)
survey_traffic_option = ChoiceField(
required=False,
label=ugettext_noop("Survey Traffic"),
choices=(
(SHOW_ALL, ugettext_noop("Show all survey traffic")),
(SHOW_INVALID, ugettext_noop("Hide all survey traffic except "
"invalid responses")),
(HIDE_ALL, ugettext_noop("Hide all survey traffic")),
),
)
count_messages_as_read_by_anyone = ChoiceField(
required=False,
label=ugettext_noop("A Message is Read..."),
choices=(
(ENABLED, ugettext_noop("when it is read by anyone")),
(DISABLED, ugettext_noop("only for the user that reads it")),
),
)
use_custom_chat_template = ChoiceField(
required=False,
choices=DEFAULT_CUSTOM_CHOICES,
)
custom_chat_template = TrimmedCharField(
required=False,
label=ugettext_noop("Enter Chat Template Identifier"),
)
# Registration settings
sms_case_registration_enabled = ChoiceField(
required=False,
choices=ENABLED_DISABLED_CHOICES,
label=ugettext_noop("Case Self-Registration"),
)
sms_case_registration_type = TrimmedCharField(
required=False,
label=ugettext_noop("Default Case Type"),
)
sms_case_registration_owner_id = CharField(
required=False,
label=ugettext_noop("Default Case Owner"),
widget=forms.Select(choices=[]),
)
sms_case_registration_user_id = Cha |
SebNickel/deep_learning | datasets.py | Python | mit | 1,241 | 0 | import numpy
from numpy import ndarray
import theano
from theano import tensor as T
import pickle
class SharedDataset:
def __init__(self,
vectors: ndarray,
labels: ndarray):
self.x = theano.shared(
value=numpy.asarray(
vectors,
dtype=theano.config.flo | atX
),
name='x',
borrow=True
)
# Intermittent conversion to float for GPU-compatibility.
y_as_floats = theano.shared(
value=numpy.asarray(
labels,
dtype=theano.config.floatX
),
name='y',
borrow=Tr | ue
)
self.y = T.cast(y_as_floats, 'int32')
@property
def vectors(self):
return self.x.get_value(borrow=True)
@property
def labels(self):
return self.y.get_value(borrow=True)
@property
def size(self):
return self.vectors.shape[0]
def save(dataset: SharedDataset,
file_path: str):
with open(file_path, 'wb') as file:
pickle._dump(dataset, file)
def load(file_path: str) -> SharedDataset:
with open(file_path, 'rb') as file:
return pickle.load(file)
|
sr-lab/verified-pam-cracklib | evaluation/csv.py | Python | bsd-3-clause | 1,804 | 0.007761 | import json
def import_json(path):
"""Decodes and returns the JSON object in the file at the specified path.
Args:
path (str): The path of the file to read.
"""
with open(path) as data_file:
return json.load(data_file)
# Import all files.
results_v = import_json('results_v.json')['runs']
results_nodict = import_json('results_nodict.json')['runs']
results_nodict_mcr = import_json('results_nodict_mcr.json')['runs']
results_nodict_mcr_fixed = import_json('results_nodict_mcr_fixed.json')['runs']
results_v_mcr = import_json('results_v_mcr.json')['runs']
results_v_basic16 = import_json('results_v_basic16.json')['runs']
# Produce CSV.
results = "Password, Default, Default T, HAPSL, HAPSL T, MCR, MCR T, MCR (fixed) | , MCR (fixed) T, MCR (HAPSL), MCR (HAPSL) T, Basic16, Basic16 T\n"
for i in range(0, 100000):
results += results_v[i]['password']
results += | ", "
results += str(results_nodict[i]['valid'])
results += ", "
results += str(results_nodict[i]['time'])
results += ", "
results += str(results_v[i]['valid'])
results += ", "
results += str(results_v[i]['time'])
results += ", "
results += str(results_nodict_mcr[i]['valid'])
results += ", "
results += str(results_nodict_mcr[i]['time'])
results += ", "
results += str(results_nodict_mcr_fixed[i]['valid'])
results += ", "
results += str(results_nodict_mcr_fixed[i]['time'])
results += ", "
results += str(results_v_mcr[i]['valid'])
results += ", "
results += str(results_v_mcr[i]['time'])
results += ", "
results += str(results_v_basic16[i]['valid'])
results += ", "
results += str(results_v_basic16[i]['time'])
results += "\n"
# Write CSV out.
open('results.csv', 'w').write(results)
|
ericfourrier/auto-clean | autoc/utils/corrplot.py | Python | mit | 590 | 0.001695 | import seaborn as sns
import matplotlib.pyplot as plt
def plot_corrmatrix(df, square=True, linewidths=0.1, annot=True,
size=None, figsize=(12, 9), *args, **kwargs):
"""
Plot correlation matrix of the dataset
see doc at https://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.heatmap.html#seaborn.heatmap
"""
sns.set(context="paper", font="monospace")
f, ax = plt.subplots(figsize=figs | ize)
sns.heatmap(df.corr(), vmax=1, square=square, linewidths=li | newidths,
annot=annot, annot_kws={"size": size}, *args, **kwargs)
|
danydoerr/large_syn_workflow | remove_masked_regions.py | Python | mit | 1,172 | 0.006826 | #!/usr/bin/env python
from sys import stdout, stderr, exit
from optparse import OptionParser
from Bio import SeqIO, Seq, Alphabet
from cStringIO import StringIO
MINLEN_DEFAULT=10
if __name__ == '__main__':
usage = 'usage: %prog [options] <FASTA FILE>'
parser = OptionParser(usage=usage)
parser.add_option('-l', '--min_len', dest='minLength', type=int,
default=MINLEN_DEFAULT,
help='Minimum number of consecutive X/N characters that are ' + \
'considered for removal. [default=%default] | ')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.print_help()
exit(1)
for rec in SeqIO.parse(args | [0], 'fasta'):
p = 0
c_count = 0
new_seq = StringIO()
for c in rec.seq:
if c in {'X', 'x', 'N', 'n'}:
c_count += 1
elif c_count >= options.minLength:
p -= c_count
new_seq.seek(p)
c_count = 0
new_seq.write(c)
p += 1
rec.seq = Seq.Seq(new_seq.getvalue(), Alphabet.generic_dna)
SeqIO.write(rec, stdout, 'fasta')
|
Lcaracol/ideasbox.lan | ideasbox/monitoring/migrations/0004_auto_20150330_0922.py | Python | mit | 403 | 0 | # -*- coding: utf-8 -*- |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('monitoring', '0003_auto_20150324_1806'),
]
operations = [
migrations.AlterUniqueTogether(
name='inventoryspecimen',
unique_together=set([('inve | ntory', 'specimen')]),
),
]
|
icometrix/dicom2nifti | dicom2nifti/image_reorientation.py | Python | mit | 6,865 | 0.004516 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 1 16:10:56 2013
@author: vterzopoulos, abrys
"""
# To ignore numpy errors:
# pylint: disable=E1101
import nibabel
import numpy
from dicom2nifti.image_volume import load, SliceType, ImageVolume
def reorient_image(input_image, output_image):
"""
Change the orientation of the Image data in order to be in LAS space
x will represent the coronal plane, y the sagittal and z the axial plane.
x increases from Right (R) to Left (L), y from Posterior (P) to Anterior (A) and z from Inferior (I) to Superior (S)
:returns: The output image in nibabel form
:param output_image: filepath to the nibabel image
:param input_image: filepath to the nibabel image
"""
# Use the | imageVolume module to find which coordinate corresponds to each plane
# and get the image data in RAS orientation
# print 'Reading nifti'
if isinstance(input_image, nibabel.Nifti1Image):
image = ImageVolume(input_image)
else:
image = load(input_image)
# 4d have a different conversion to 3d
# print 'Reorganizing data'
if image.nifti_data.squeeze().ndim == 4:
ne | w_image = _reorient_4d(image)
elif image.nifti_data.squeeze().ndim == 3 or image.nifti_data.ndim == 3 or image.nifti_data.squeeze().ndim == 2:
new_image = _reorient_3d(image)
else:
raise Exception('Only 3d and 4d images are supported')
# print 'Recreating affine'
affine = image.nifti.affine
# Based on VolumeImage.py where slice orientation 1 represents the axial plane
# Flipping on the data may be needed based on x_inverted, y_inverted, ZInverted
# Create new affine header by changing the order of the columns of the input image header
# the last column with the origin depends on the origin of the original image, the size and the direction of x,y,z
new_affine = numpy.eye(4)
new_affine[:, 0] = affine[:, image.sagittal_orientation.normal_component]
new_affine[:, 1] = affine[:, image.coronal_orientation.normal_component]
new_affine[:, 2] = affine[:, image.axial_orientation.normal_component]
point = [0, 0, 0, 1]
# If the orientation of coordinates is inverted, then the origin of the "new" image
# would correspond to the last voxel of the original image
# First we need to find which point is the origin point in image coordinates
# and then transform it in world coordinates
if not image.axial_orientation.x_inverted:
new_affine[:, 0] = - new_affine[:, 0]
point[image.sagittal_orientation.normal_component] = image.dimensions[
image.sagittal_orientation.normal_component] - 1
# new_affine[0, 3] = - new_affine[0, 3]
if image.axial_orientation.y_inverted:
new_affine[:, 1] = - new_affine[:, 1]
point[image.coronal_orientation.normal_component] = image.dimensions[
image.coronal_orientation.normal_component] - 1
# new_affine[1, 3] = - new_affine[1, 3]
if image.coronal_orientation.y_inverted:
new_affine[:, 2] = - new_affine[:, 2]
point[image.axial_orientation.normal_component] = image.dimensions[image.axial_orientation.normal_component] - 1
# new_affine[2, 3] = - new_affine[2, 3]
new_affine[:, 3] = numpy.dot(affine, point)
# DONE: Needs to update new_affine, so that there is no translation difference between the original
# and created image (now there is 1-2 voxels translation)
# print 'Creating new nifti image'
if new_image.ndim > 3: # do not squeeze single slice data
new_image = new_image.squeeze()
output = nibabel.nifti1.Nifti1Image(new_image, new_affine)
output.header.set_slope_inter(1, 0)
output.header.set_xyzt_units(2) # set units for xyz (leave t as unknown)
output.to_filename(output_image)
return output
def _reorient_4d(image):
"""
Reorganize the data for a 4d nifti
"""
# print 'converting 4d image'
# Create empty array where x,y,z correspond to LR (sagittal), PA (coronal), IS (axial) directions and the size
# of the array in each direction is the same with the corresponding direction of the input image.
new_image = numpy.zeros([image.dimensions[image.sagittal_orientation.normal_component],
image.dimensions[image.coronal_orientation.normal_component],
image.dimensions[image.axial_orientation.normal_component],
image.dimensions[3]],
dtype=image.nifti_data.dtype)
# loop over all timepoints
for timepoint in range(0, image.dimensions[3]):
# Fill the new image with the values of the input image but with mathicng the orientation with x,y,z
if image.coronal_orientation.y_inverted:
for i in range(new_image.shape[2]):
new_image[:, :, i, timepoint] = numpy.fliplr(numpy.squeeze(image.get_slice(SliceType.AXIAL,
new_image.shape[2] - 1 - i,
timepoint).original_data))
else:
for i in range(new_image.shape[2]):
new_image[:, :, i, timepoint] = numpy.fliplr(numpy.squeeze(image.get_slice(SliceType.AXIAL,
i, timepoint).original_data))
return new_image
def _reorient_3d(image):
"""
Reorganize the data for a 3d nifti
"""
# Create empty array where x,y,z correspond to LR (sagittal), PA (coronal), IS (axial) directions and the size
# of the array in each direction is the same with the corresponding direction of the input image.
new_image = numpy.zeros([image.dimensions[image.sagittal_orientation.normal_component],
image.dimensions[image.coronal_orientation.normal_component],
image.dimensions[image.axial_orientation.normal_component]],
dtype=image.nifti_data.dtype)
# Fill the new image with the values of the input image but with matching the orientation with x,y,z
if image.coronal_orientation.y_inverted:
for i in range(new_image.shape[2]):
new_image[:, :, i] = numpy.fliplr(image.get_slice(SliceType.AXIAL,
new_image.shape[2] - 1 - i).original_data)
else:
for i in range(new_image.shape[2]):
new_image[:, :, i] = numpy.fliplr(image.get_slice(SliceType.AXIAL,
i).original_data)
return new_image
|
janmbuys/DeepDeepParser | rnn/seq2seq_decoders.py | Python | apache-2.0 | 50,214 | 0.010276 | # Copyright 2015 Google Inc. All Rights Reserved.
# Modifications copyright 2017 Jan Buys.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.util import nest
# We disable pylint because we need python3 compatibility.
from six.moves import xrange # pylint: disable=redefined-builtin
from six.moves import zip # pylint: disable=redefined-builtin
import tensorflow as tf
import data_utils
import seq2seq_helpers
linear = tf.nn.rnn_cell._linear # pylint: disable=protected-access
def rnn_decoder(decoder_inputs, initial_state, cell, loop_function=None,
output_projection=None, scope=None):
"""RNN decoder for the sequence-to-sequence model | .
Args:
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
initial_state: 2D Tensor with shape [batch_size x cell.state_size].
cell: rnn_cell.RNNCell defining the cell function and size.
loop_function: If not None, this function will be applied to the i-th output
| in order to generate the i+1-st input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x input_size].
output_projection: If not None, weights and bias to project the decoder
cell output to the output logits.
scope: VariableScope for the created subgraph; defaults to "rnn_decoder".
Returns:
A tuple of the form (logits, outputs, [], state).
"""
with tf.variable_scope(scope or "rnn_decoder"):
state = initial_state
outputs = []
logits = []
prev = None
for i, inp in enumerate(decoder_inputs):
if loop_function is not None and prev is not None:
with tf.variable_scope("loop_function", reuse=True):
inp = loop_function(prev, i)
if i > 0:
tf.get_variable_scope().reuse_variables()
output, state = cell(inp, state)
outputs.append(output)
with tf.variable_scope("OutputProjection"):
if output_projection is not None:
logit = tf.matmul(output, output_projection[0]) + output_projection[1]
else:
logit = output
if loop_function is not None:
prev = logit
logits.append(logit)
return logits, outputs, [], state
def attention_decoder(decoder_inputs, encoder_inputs,
initial_state, attention_states, cell,
encoder_decoder_vocab_map=None,
decoder_vocab_sizes=None,
output_size=None, num_heads=1,
embed_functions=None, loop_functions=None,
output_projections=None, decoder_restrictions=None,
transition_state_map=None, dtype=tf.float32,
scope=None, initial_state_attention=False):
"""RNN decoder with attention for the sequence-to-sequence model.
In this context "attention" means that, during decoding, the RNN can look up
information in the additional tensor attention_states, and it does this by
focusing on a few entries from the tensor. This model has proven to yield
especially good results in a number of sequence-to-sequence tasks. This
implementation is based on http://arxiv.org/abs/1412.7449 (see below for
details). It is recommended for complex sequence-to-sequence tasks.
Args:
decoder_inputs: A list of 2D Tensors [batch_size x input_size] for the
decoder embedding inputs.
decoder_input_symbols: A list of 1D Tensors [batch_size] for the decoder
input symbols.
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: rnn_cell.RNNCell defining the cell function and size.
output_size: Size of the output vectors; if None, we use cell.output_size.
num_heads: Number of attention heads that read from attention_states.
loop_function: If not None, this function will be applied to i-th output
in order to generate i+1-th input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x input_size].
output_projection: None or a pair (W, B) of output projection weights and
biases.
decoder_restrictions: List of (dense) 1D int32 Tensors of allowed output
symbols for each decoder transition state.
transition_map: Constant 1D int Tensor size output_vocab_size. Maps
each word to its transition state.
dtype: The dtype to use for the RNN initial state (default: tf.float32).
scope: VariableScope for the created subgraph; default: "attention_decoder".
initial_state_attention: If False (default), initial attentions are zero.
If True, initialize the attentions from the initial state and attention
states -- useful when we wish to resume decoding from a previously
stored decoder state and attention states.
Returns:
A tuple of the form (logits, outputs, [], state).
Raises:
ValueError: when num_heads is not positive, there are no inputs, or shapes
of attention_states are not set.
"""
if not decoder_inputs:
raise ValueError("Must provide at least 1 input to attention decoder.")
if num_heads < 1:
raise ValueError("With less than 1 heads, use a non-attention decoder.")
if not attention_states.get_shape()[1:2].is_fully_defined():
raise ValueError("Shape[1] and [2] of attention_states must be known: %s"
% attention_states.get_shape())
if output_size is None:
output_size = cell.output_size
sample_output = False
use_nonlinear = False
max_num_concepts = data_utils.MAX_OUTPUT_SIZE
with tf.variable_scope(scope or "attention_decoder"):
batch_size = tf.shape(decoder_inputs[0]["parse"])[0] # Needed for reshaping.
attn_length = attention_states.get_shape()[1].value
attn_size = attention_states.get_shape()[2].value
# To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.
hidden = tf.reshape(
attention_states, [-1, attn_length, 1, attn_size])
hidden_features = []
v = []
y_w = []
attention_vec_size = attn_size # Size of query vectors for attention.
for a in xrange(num_heads):
k = tf.get_variable("AttnW_%d" % a,
[1, 1, attn_size, attention_vec_size])
hidden_features.append(tf.nn.conv2d(hidden, k, [1, 1, 1, 1], "SAME"))
v.append(tf.get_variable("AttnV_%d" % a,
[attention_vec_size]))
y_m = tf.get_variable("AttnInputLinearW_%d" % a,
[attn_size, attention_vec_size],
dtype=dtype)
y_bias = tf.get_variable("AttnInputLinearBias_%d" % a,
[attention_vec_size], dtype=dtype,
initializer=tf.c |
anhstudios/swganh | data/scripts/templates/object/tangible/gambling/table/shared_table_base.py | Python | mit | 450 | 0.046667 | #### NOTICE: THIS FILE IS AUTOGENERATED
# | ### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/gambling/table/shared_table_base.iff"
result.attribute_template_id = -1
result.stfName(" | item_n","gambling_table")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
JulienPobot/LeCubeMedia | src/clients/cldevice.py | Python | apache-2.0 | 1,869 | 0.010166 | # -*- coding: utf-8 -*-
import logging
import thread
from optparse import OptionParser
import sleekxmpp
class DeviceClient(sleekxmpp.ClientXMPP) :
def __init__(self, jid, password):
print "User : %s"%jid
super(DeviceClient, self).__init__(jid, password)
logging.info("XMPP client launched : %s"%jid)
#self.register_plugin('xep_0030')
#self.register_plugin('xep_0059')
#self.register_plugin('xep_0060')
self.add_event_handler('session_start', self.start)
#self.add_event_handler('presence', self.cb_presence)
#self.add_event_handler('message', self.cb_message)
def start(self, event):
logging.info("XMPP : session started")
self.get_roster()
self.send_presence()
def xmpp_setpresence(self,status):
logging.info("XMPP : setting presence to - %s"%status)
self.send_presence(pfrom=self.boundjid, pshow='chat',pstatus=status)
print "Device : Client XMPP connecte au Cube"
logging.basicConfig(level=logging.DEBUG, format='%(levelname)-8s %(message)s')
parser = OptionParser()
parser.add_option('-c', action="store", de | st='cube_jid', default='cube@localhost')
parser.add_option('-j', action="store", dest='self_jid', default='device@localhost')
parser.add_option('-k', action="store", dest='self_pwd', default='device')
options, args = parser.parse_args()
print options.self_jid, options.self_pwd
client = DeviceCli | ent(options.self_jid, options.self_pwd)
#thread.start_new_thread(client.start_loop, ())
print "hola"
if client.connect(use_tls=False):
client.process(block=False)
onemore = True
while onemore :
raw = raw_input('> ')
if raw=='q' :
break
else :
client.send_message(mto=options.cube_jid, mbody=raw)
client.disconnect()
else:
logging.info("Unable to connect.")
|
jokey2k/pyClanSphere | pyClanSphere/utils/support.py | Python | bsd-3-clause | 1,485 | 0.000673 | # -*- coding: utf-8 -*-
"""
pyClanSphere.utils.support
~~~~~~~~~~~~~~~~~~~~~~~~~~
A support module. Provides various support methods and helpers.
:copyright: (c) 2009 - 2010 by the pyClanSphere Team,
(c) 2009 by Plurk Inc.,
see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import unicodedata
from itertools import izip, imap
# imported for the side effects, registers a codec
import translitcodec
_punctuation_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
_missing = object()
def slugify(text, delim=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _punctuation_re.split(text.lower()):
| word = _punctuation_re.sub(u'', word.encode('translit/long'))
if word:
result.append(word)
return unicode(delim.join(result))
class UIException(Exception):
"""Exceptions that are display | ed in the user interface. The big
difference to a regular exception is that this exception uses
an unicode string as message.
"""
message = None
def __init__(self, message):
Exception.__init__(self, message.encode('utf-8'))
self.message = message
def __unicode__(self):
return self.message
def __str__(self):
return self.message.encode('utf-8')
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.message)
from pyClanSphere.i18n import _
|
chrxr/wagtail | wagtail/tests/routablepage/models.py | Python | bsd-3-clause | 1,231 | 0 | from __future__ import absolute_import, unicode_literals
from django.http import HttpResponse
from wagtail.contrib.wagtailroutablepage.models import RoutablePage, route
def routable_page_external_view(request, arg="ARG NOT SET"):
return HttpResponse("EXTERNAL | VIEW: " + arg)
class RoutablePageTest(RoutablePage):
@route(r'^$')
def main(self, request):
return HttpResponse("MAIN VIEW")
@route(r'^archive/year/(\d+)/$')
def archive_by_year(self, request, year):
return HttpResponse("ARCHIVE BY YEAR: " + str(year))
@route(r'^archive/author/(?P<author_slug>.+)/$')
def archive_by_author(self, request, author_slug):
return HttpResponse("ARCHIVE BY AUTHOR: " + author_slug)
@r | oute(r'^external/(.+)/$')
@route(r'^external-no-arg/$')
def external_view(self, *args, **kwargs):
return routable_page_external_view(*args, **kwargs)
# By default, the method name would be used as the url name but when the
# "name" kwarg is specified, this should override the default.
@route(r'^override-name-test/$', name='name_overridden')
def override_name_test(self, request):
pass
class RoutablePageWithoutIndexRouteTest(RoutablePage):
pass
|
slackeater/anal-beh | classes/gui/mylistwidget.py | Python | gpl-2.0 | 1,257 | 0.002387 | __author__ = 'snake'
from PyQt4 import QtGui, QtCore
class SiteItems(QtGui.QListWidget):
def __init__(self):
super(SiteItems, self).__init__()
def startDrag(self, dropAction):
# create mime data object
#get all selected items
selitems = ""
for i in self.selectedItems():
selitems += i.text() + ","
mime = QtCore.QMimeData()
mime.setText(str(selitems).strip(","))
# start drag
drag = QtGui.QDrag(self)
drag.setMimeData(mime)
drag.start(QtCore.Qt.CopyAction | QtCore.Qt.CopyAction)
def dragEnterEvent(self, event):
if event.mimeData().hasText():
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
if event.mimeData().hasText():
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
if event.mimeData().hasText():
sites = event.mimeData().text()
| for site in sites.split(","):
self.addItem(site)
event.setDropAction( | QtCore.Qt.CopyAction)
event.accept()
else:
event.ignore() |
karllessard/tensorflow | tensorflow/python/ops/tensor_array_ops_test.py | Python | apache-2.0 | 3,060 | 0.003595 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/li | censes/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitation | s under the License.
# ==============================================================================
"""Tests for tensor_array_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
class TensorArrayOpsTest(test.TestCase):
@test_util.run_v1_only('Testing placeholders specifically.')
def test_concat_graph(self):
values = tensor_array_ops.TensorArray(
size=4, dtype=dtypes.string, element_shape=[None], infer_shape=False)
a = array_ops.placeholder(dtypes.string, [
None,
])
b = array_ops.placeholder(dtypes.string, [
None,
])
values = (values.write(0, a).write(
1, constant_op.constant([], dtypes.string))).write(2, b).write(
3, constant_op.constant([], dtypes.string))
with self.session() as s:
result = s.run(values.concat(), {a: ['a', 'b', 'c'], b: ['c', 'd', 'e']})
self.assertAllEqual(result, [b'a', b'b', b'c', b'c', b'd', b'e'])
@test_util.run_v2_only
def test_concat(self):
values = tensor_array_ops.TensorArray(
size=4, dtype=dtypes.string, element_shape=[None], infer_shape=False)
a = constant_op.constant(['a', 'b', 'c'], dtypes.string)
b = constant_op.constant(['c', 'd', 'e'], dtypes.string)
values = (values.write(0, a).write(
1, constant_op.constant([], dtypes.string))).write(2, b).write(
3, constant_op.constant([], dtypes.string))
self.assertAllEqual(values.concat(), [b'a', b'b', b'c', b'c', b'd', b'e'])
@test_util.run_v2_only
def test_concat_in_function(self):
@def_function.function
def fn(a, b):
values = tensor_array_ops.TensorArray(
size=4, dtype=dtypes.string, element_shape=[None], infer_shape=False)
values = (values.write(0, a).write(
1, constant_op.constant([], dtypes.string))).write(2, b).write(
3, constant_op.constant([], dtypes.string))
return values.concat()
self.assertAllEqual(fn(['a', 'b', 'c'], ['c', 'd', 'e']),
[b'a', b'b', b'c', b'c', b'd', b'e'])
if __name__ == '__main__':
test.main()
|
npuichigo/ttsflow | third_party/tensorflow/tensorflow/contrib/distributions/python/kernel_tests/transformed_distribution_test.py | Python | apache-2.0 | 12,624 | 0.006179 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TransformedDistribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib import distributions
from tensorflow.contrib import linalg
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
bs = bijectors
ds = distributions
la = linalg
class TransformedDistributionTest(test.TestCase):
def _cls(self):
return ds.TransformedDistribution
def testTransformedDistribution(self):
g = ops.Graph()
with g.as_default():
mu = 3.0
sigma = 2.0
# Note: the Jacobian callable only works for this example; more generally
# you may or may not need a reduce_sum.
log_normal = self._cls()(
distribution=ds.Normal(loc=mu, scale=sigma),
bijector=bs.Exp(event_ndims=0))
sp_dist = stats.lognorm(s=sigma, scale=np.exp(mu))
# sample
sample = log_normal.sample(100000, seed=235)
self.assertAllEqual([], log_normal.event_shape)
with self.test_session(graph=g):
self.assertAllEqual([], log_normal.event_shape_tensor().eval())
self.assertAllClose(
sp_dist.mean(), np.mean(sample.eval()), atol=0.0, rtol=0.05)
# pdf, log_pdf, cdf, etc...
# The mean of the lognormal is around 148.
test_vals = np.linspace(0.1, 1000., num=20).astype(np.float32)
for func in [[log_normal.log_prob, sp_dist.logpdf],
[log_normal.prob, sp_dist.pdf],
[log_normal.log_cdf, sp_dist.logcdf],
[log_normal.cdf, sp_dist.cdf],
[log_normal.survival_function, sp_dist.sf],
[log_normal.log_survival_function, sp_dist.logsf]]:
actual = func[0](test_vals)
expected = func[1](test_vals)
with self.test_session(graph=g):
self.assertAllClose(expected, actual.eval(), atol=0, rtol=0.01)
def testCachedSamplesWithoutInverse(self):
with self.test_session() as sess:
mu = 3.0
sigma = 0.02
log_normal = self._cls()(
distribution=ds.Normal(loc=mu, scale=sigma),
bijector=bs.Exp(event_ndims=0))
sample = log_normal.sample(1)
sample_val, log_pdf_val = sess.run([sample, log_normal.log_prob(sample)])
self.assertAllClose(
stats.lognorm.logpdf(sample_val, s=sigma, scale=np.exp(mu)),
log_pdf_val,
atol=1e-2)
def testShapeChangingBijector(self):
with self.test_session():
softmax = bs.SoftmaxCentered()
standard_normal = ds.Normal(loc=0., scale=1.)
multi_logit_normal = self._cls()(
distribution=standard_normal,
bijector=softmax)
x = [[-np.log(3.), 0.],
[np.log(3), np.log(5)]]
y = softmax.forward(x).eval()
expected_log_pdf = (stats.norm(loc=0., scale=1.).logpdf(x) -
np.sum(np.log(y), axis=-1))
self.assertAllClose(expected_log_pdf,
| multi_logit_nor | mal.log_prob(y).eval())
self.assertAllClose(
[1, 2, 3, 2],
array_ops.shape(multi_logit_normal.sample([1, 2, 3])).eval())
self.assertAllEqual([2], multi_logit_normal.event_shape)
self.assertAllEqual([2], multi_logit_normal.event_shape_tensor().eval())
def testEntropy(self):
with self.test_session():
shift = np.array([[-1, 0, 1], [-1, -2, -3]], dtype=np.float32)
diag = np.array([[1, 2, 3], [2, 3, 2]], dtype=np.float32)
actual_mvn_entropy = np.concatenate([
[stats.multivariate_normal(shift[i], np.diag(diag[i]**2)).entropy()]
for i in range(len(diag))])
fake_mvn = self._cls()(
ds.MultivariateNormalDiag(
loc=array_ops.zeros_like(shift),
scale_diag=array_ops.ones_like(diag),
validate_args=True),
bs.AffineLinearOperator(
shift,
scale=la.LinearOperatorDiag(diag, is_non_singular=True),
validate_args=True),
validate_args=True)
self.assertAllClose(actual_mvn_entropy,
fake_mvn.entropy().eval())
class ScalarToMultiTest(test.TestCase):
def _cls(self):
return ds.TransformedDistribution
def setUp(self):
self._shift = np.array([-1, 0, 1], dtype=np.float32)
self._tril = np.array([[[1., 0, 0],
[2, 1, 0],
[3, 2, 1]],
[[2, 0, 0],
[3, 2, 0],
[4, 3, 2]]],
dtype=np.float32)
def _testMVN(self,
base_distribution_class,
base_distribution_kwargs,
batch_shape=(),
event_shape=(),
not_implemented_message=None):
with self.test_session() as sess:
# Overriding shapes must be compatible w/bijector; most bijectors are
# batch_shape agnostic and only care about event_ndims.
# In the case of `Affine`, if we got it wrong then it would fire an
# exception due to incompatible dimensions.
batch_shape_pl = array_ops.placeholder(
dtypes.int32, name="dynamic_batch_shape")
event_shape_pl = array_ops.placeholder(
dtypes.int32, name="dynamic_event_shape")
feed_dict = {batch_shape_pl: np.array(batch_shape, dtype=np.int32),
event_shape_pl: np.array(event_shape, dtype=np.int32)}
fake_mvn_dynamic = self._cls()(
distribution=base_distribution_class(validate_args=True,
**base_distribution_kwargs),
bijector=bs.Affine(shift=self._shift, scale_tril=self._tril),
batch_shape=batch_shape_pl,
event_shape=event_shape_pl,
validate_args=True)
fake_mvn_static = self._cls()(
distribution=base_distribution_class(validate_args=True,
**base_distribution_kwargs),
bijector=bs.Affine(shift=self._shift, scale_tril=self._tril),
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=True)
actual_mean = np.tile(self._shift, [2, 1]) # Affine elided this tile.
actual_cov = np.matmul(self._tril, np.transpose(self._tril, [0, 2, 1]))
def actual_mvn_log_prob(x):
return np.concatenate([
[stats.multivariate_normal(
actual_mean[i], actual_cov[i]).logpdf(x[:, i, :])]
for i in range(len(actual_cov))]).T
actual_mvn_entropy = np.concatenate([
[stats.multivariate_normal(
actual_mean[i], actual_cov[i]).entropy()]
for i in range(len(actual_cov))])
self.assertAllEqual([3], fake_mvn_static.event_shape)
self.assertAllEqual([2], fake_mvn_static.batch_shape)
self.assertAllEqual(tensor_shape.TensorShape(None),
fake_mvn_dynamic.event_shape)
self.assertAllEqual(tensor_shape.TensorShape(None),
fake_mvn_dynamic.batch_shape)
x = fake_mvn_static.sample(5, seed=0).eval()
for unsupported_fn in (fake_mvn_static.log_cdf,
|
jeremiahyan/odoo | addons/website/tests/test_controllers.py | Python | gpl-3.0 | 1,810 | 0.003315 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
from odoo import tests
from odoo.tools import mute_logger
@tests.tagged('post_install', '-at_install')
class TestControllers(tests.HttpCase):
@mute_logger('odoo.addons.http_routing.models.ir_http', 'odoo.http')
def test_last_created_pages_autocompletion(self):
self.authenticate("admin", "admin")
Page = self.env['website.page']
last_5_url_edited = []
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
suggested_links_url = base_url + '/website/get_suggested_links'
for i in range(0, 10):
new_page = Page.create({
'name': 'Generic',
'type': 'qweb',
'arch': '''
<div>content</div>
''',
'key': "test.generic_view-%d" % i,
'url': "/generic-%d" % i,
'is_published': True,
})
if i % 2 == 0:
# mark as old
new_page._write({'write_date': '2020-01-01'})
else:
last_5_url_edited.append(new_page.url)
res = self.opener.post(url=suggested_links_url, json={'params': {'needle': '/'}})
resp = json.loads(res.content)
assert 'res | ult' in resp
suggested_links = resp['result']
last_modified_history = next(o for o in suggested_links['others'] if o["title"] == "Last modified pages")
last_modified_values = map(lambda o: o['valu | e'], last_modified_history['values'])
matching_pages = set(map(lambda o: o['value'], suggested_links['matching_pages']))
self.assertEqual(set(last_modified_values), set(last_5_url_edited) - matching_pages)
|
nabla-c0d3/sslyze | sslyze/plugins/robot/_robot_tester.py | Python | agpl-3.0 | 19,827 | 0.003783 | import socket
import types
from enum import Enum
from typing import Optional, List, Dict
import binascii
import math
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey, RSAPublicNumbers
from cryptography.x509 import load_pem_x509_certificate
from nassl._nassl import WantReadError
from nassl.ssl_client import ClientCertificateRequested
from tls_parser.change_cipher_spec_protocol import TlsChangeCipherSpecRecord
from tls_parser.alert_protocol import TlsAlertRecord
from tls_parser.record_protocol import TlsRecordTlsVersionBytes
from tls_parser.exceptions import NotEnoughData
from tls_parser.handshake_protocol import TlsHandshakeRecord, TlsHandshakeTypeByte, TlsRsaClientKeyExchangeRecord
from tls_parser.parser import TlsRecordParser
import tls_parser.tls_version
from sslyze.errors import ServerRejectedTlsHandshake
from sslyze.server_connectivity import ServerConnectivityInfo, TlsVersionEnum, ClientAuthRequirementEnum
class RobotScanResultEnum(str, Enum):
"""The result of attempting exploit the ROBOT issue on the server.
Attributes:
VULNERABLE_WEAK_ORACLE: The server is vulnerable but the attack would take too long.
VULNERABLE_STRONG_ORACLE: The server is vulnerable and real attacks are feasible.
NOT_VULNERABLE_NO_ORACLE: The server supports RSA cipher suites but does not act as an oracle.
NOT_VULNERABLE_RSA_NOT_SUPPORTED: The server does not supports RSA cipher suites.
UNKNOWN_INCONSISTENT_RESULTS: Could not determine whether the server is vulnerable or not.
"""
VULNERABLE_WEAK_ORACLE = "VULNERABLE_WEAK_ORACLE"
VULNERABLE_STRONG_ORACLE = "VULNERABLE_STRONG_ORACLE"
NOT_VULNERABLE_NO_ORACLE = "NOT_VULNERABLE_NO_ORACLE"
NOT_VULNERABLE_RSA_NOT_SUPPORTED = "NOT_VULNERABLE_RSA_NOT_SUPPORTED"
UNKNOWN_INCONSISTENT_RESULTS = "UNKNOWN_INCONSISTENT_RESULTS"
class RobotPmsPaddingPayloadEnum(Enum):
VALID = 0
WRONG_FIRST_TWO_BYTES = 1
WRONG_POSITION_00 = 2
NO_00_IN_THE_MIDDLE = 3
WRONG_VERSION_NUMBER = 4
class _RobotTlsRecordPayloads:
# From https://github.com/robotattackorg/robot-detect and testssl.sh
# The high level idea of an oracle attack is to send several payloads that are slightly wrong, in different ways,
# hoping that the server is going to give a different response (a TLS alert, a connection reset, no data, etc.) for
# each payload
_CKE_PAYLOADS_HEX = {
RobotPmsPaddingPayloadEnum.VALID: "0002{pms_padding}00{tls_version}{pms}", # noqa: E241
RobotPmsPaddingPayloadEnum.WRONG_FIRST_TWO_BYTES: "4117{pms_padding}00{tls_version}{pms}", # noqa: E241
RobotPmsPaddingPayloadEnum.WRONG_POSITION_00: "0002{pms_padding}11{pms}0011", # noqa: E241
RobotPmsPaddingPayloadEnum.NO_00_IN_THE_MIDDLE: "0002{pms_padding}111111{pms}", # noqa: E241
RobotPmsPaddingPayloadEnum.WRONG_VERSION_NUMBER: "0002{pms_padding}000202{pms}", # noqa: E241
}
_PMS_HEX = "aa112233445566778899112233445566778899112233445566778899112233445566778899112233445566778899"
@classmethod
def get_client_key_exchange_record(
cls,
robot_payload_enum: RobotPmsPaddingPayloadEnum,
tls_version: tls_parser.tls_version.TlsVersionEnum,
modulus: int,
exponent: int,
) -> TlsRsaClientKeyExchangeRecord:
"""A client key exchange record with a hardcoded pre_master_secret, and a valid or invalid padding.
"""
pms_padding = cls._compute_pms_padding(modulus)
tls_version_hex = binascii.b2a_hex(TlsRecordTlsVersionBytes[tls_version.name].value).decode("ascii")
pms_with_padding_payload = cls._CKE_PAYLOADS_HEX[robot_payload_enum]
final_pms = pms_with_padding_payload.format(
pms_padding=pms_padding, tls_version=tls_version_hex, pms=cls._PMS_HEX
)
cke_robot_record = TlsRsaClientKeyExchangeRecord.from_parameters(
tls_version, exponent, modulus, int(final_pms, 16)
)
return cke_robot_record
@staticmethod
def _compute_pms_padding(modulus: int) -> str:
# Generate the padding for the pre_master_scecret
modulus_bit_size = int(math.ceil(math.log(modulus, 2)))
modulus_byte_size = (modulus_bit_size + 7) // 8
# pad_len is length in hex chars, so bytelen * 2
pad_len = (modulus_byte_size - 48 - 3) * 2
pms_padding_hex = ("abcd" * (pad_len // 2 + 1))[:pad_len]
return pms_padding_hex
# Encrypted Finished record corresponding to the PMS below and the ch_def client hello in the ROBOT poc script
_FINISHED_RECORD = bytearray.fromhex(
"005091a3b6aaa2b64d126e5583b04c113259c4efa48e40a19b8e5f2542c3b1d30f8d80b7582b72f08b21dfcbff09d4b281676a0fb40"
"d48c20c4f388617ff5c00808a96fbfe9bb6cc631101a6ba6b6bc696f0"
)
@classmethod
def get_finished_record_bytes(cls, tls_version: tls_parser.tls_version.TlsVersionEnum) -> bytes:
"""The Finished TLS record corresponding to the hardcoded PMS used in the Client Key Exchange record.
"""
# TODO(AD): The ROBOT poc script uses the same Finished record for all possible client hello (default, GCM,
# etc.); as the Finished record contains a hashes of all previous records, it will be wrong and will cause
# servers to send a TLS Alert 20
# Here just like in the poc script, the Finished message does not match the Client Hello we sent
return b"\x16" + TlsRecordTlsVersionBytes[tls_version.name].value + cls._FINISHED_RECORD
class RobotServerResponsesAnalyzer:
def __init__(self, payload_responses: Dict[RobotPmsPaddingPayloadEnum, List[str]], attempts_count: int) -> None:
# A mapping of a ROBOT payload enum -> a list of two server responses as text
for server_responses in payload_responses.values():
if len(server_responses) != attempts_count:
raise ValueError()
self._payload_responses = payload_responses
self._attempts_count = attempts_count
def compute_result_enum(self) -> RobotScanResultEnum:
"""Look at the server's response to each ROBOT payload and return the conclusion of the analysis.
"""
# Ensure the results were consistent
for payload_enum, server_responses in self._payload_responses.items():
# We ran the check a number of times per payload and the responses should be the same
if len(set(server_responses)) != 1:
return RobotScanResultEnum.UNKNOWN_INCONSISTENT_RESULTS
# Check if the server acts as an oracle by checking if the server replied differently to the payloads
if len(set([server_responses[0] for server_responses in self._payload_responses.values()])) == 1:
# All server responses were identical - no oracle
return RobotScanResultEnum.NOT_VULNERABLE_NO_ORACLE
# All server responses were NOT identical, server is vulnerable
# Check to see if it is a weak oracle
response_1 = self._payload_responses[RobotPmsPaddingPayloadEnum.WRONG_FIRST_TWO | _BYTES][0]
response_2 = self._payload_responses[RobotPmsPaddingPayloadEnum.WRONG_POSITION_00][0]
response_3 = self._payload_responses[RobotPmsPaddingPayloadEnum.NO_00_IN_THE_MIDDLE][0]
# From the original script:
# If th | e response to the invalid PKCS#1 request (oracle_bad1) is equal to both
# requests starting with 0002, we have a weak oracle. This is because the only
# case where we can distinguish valid from invalid requests is when we send
# correctly formatted PKCS#1 message with 0x00 on a correct position. This
# makes our oracle weak
if response_1 == response_2 == response_3:
return RobotScanResultEnum.VULNERABLE_WEAK_ORACLE
else:
return RobotScanResultEnum.VULNERABLE_STRONG_ORACLE
class ServerDoesNotSupportRsa(Exception):
pass
def test_robot(server_info: ServerConnectivityInfo) -> Dict[RobotPmsPaddingPayloadEnum, str]:
# Try with TLS 1.2 even if the server supports TLS 1.3 or higher
|
zenoss/ZenPacks.Opengear.ConsoleServer | setup.py | Python | gpl-2.0 | 2,682 | 0.012677 | ################################
# These variables are overwritten by Zenoss when the ZenPack is exported
# or saved. Do not modify them directly here.
# NB: PACKAGES is deprecated
NAME = "ZenPacks.Opengear.ConsoleServer"
VERSION = "1.0"
AUTHOR = "Peter Hunt <support@opengear.com>"
LICENSE = "GPLv2"
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.Opengear']
PACKAGES = ['ZenPacks', 'ZenPacks.Opengear', 'ZenPacks.Opengear.ConsoleServer']
INSTALL_REQUIRES = ['ZenPacks.Opengear.MIBs>= 1.0']
COMPAT_ZENOSS_VERS = '>=2.5'
PREV_ZENPACK_NAME = ""
# STOP_REPLACEMENTS
################################
# Zenoss will not overwrite any changes you make below here.
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
| # overwrite the values below (the ones it knows about) with new values.
name = NAME,
version = VERSION,
author = AUTHOR,
license = LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers | = COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName = PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages = NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages = find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data = True,
# The MANIFEST.in file is the recommended way of including additional files
# in your ZenPack. package_data is another.
#package_data = {}
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires = INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points = {
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe = False,
)
|
rodo/django-extensions | tests/test_uuid_field.py | Python | mit | 2,355 | 0.005096 | import re
import uuid
import six
from django.test import TestCase
from django_extensions.db.fields import PostgreSQLUUIDField
from .testapp.models import (
UUIDTestAgregateModel, UUIDTestManyToManyModel, UUIDTestModel_field,
UUIDTestModel_pk,
)
class UUIDFieldTest(TestCase):
def test_UUID_field_create(self):
j = UUIDTestModel_field.objects.create(a=6, uuid_field=six.u('550e8400-e29b-41d4-a716-446655440000'))
self.assertEqual(j.uuid_field, six.u('550e8400-e29b-41d4-a716-446655440000'))
def test_UUID_field_pk_create(self):
j = UUIDTestModel_pk.objects.create(uuid_field=six.u('550e8400-e29b-41d4-a716-446655440000'))
self.assertEqual(j.uuid_field, six.u('550e8400-e29b-41d4-a716-446655440000'))
self.assertEqual(j.pk, six.u('550e8400-e29b-41d4-a716-446655440000'))
def test_UUID_field_pk_agregate_create(self):
j = UUIDTestAgregateModel.objects.create(a=6, uuid_field=six.u('550e8400-e29b-41d4-a716-446655440001'))
self.assertEqual(j.a, 6)
self.assertIsInstance(j.pk, six.string_types)
self.assertEqual(len(j.pk), 36)
def test_UUID_field_manytomany_create(self):
j = UUIDTestManyToManyModel.objects.create(uuid_field=six.u('550e8400-e29b-41d4-a716-446655440010'))
self.assertEqual(j.uuid_field, six.u('550e8400-e29b-41d4-a716-446655440010'))
self.assertEqual(j.pk | , six.u('550e8400-e29b-41d4-a716-446655440010'))
class PostgreSQLUUIDFieldTest(TestCase):
def test_uuid_casting(self):
# As explain by postgres documentation
# http://www.p | ostgresql.org/docs/9.1/static/datatype-uuid.html
# an uuid needs to be a sequence of lower-case hexadecimal digits, in
# several groups separated by hyphens, specifically a group of 8 digits
# followed by three groups of 4 digits followed by a group of 12 digits
matcher = re.compile('^[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}'
'-[\da-f]{12}$')
field = PostgreSQLUUIDField()
for value in (str(uuid.uuid4()), uuid.uuid4().urn, uuid.uuid4().hex,
uuid.uuid4().int, uuid.uuid4().bytes):
prepared_value = field.get_db_prep_value(value, None)
self.assertTrue(matcher.match(prepared_value) is not None,
prepared_value)
|
gbowerman/vmsstools | cpuload/getnics.py | Python | mit | 1,010 | 0.008911 | # getnics.py - script to get the internal IP addresses from the VMs in a scale set
# to do: do not hardcode the rgname and vmssname - instead make them command line
# parameters
import azurerm
import json
# Load Azure app defaults
try:
with open('azurermconfig.json') as configFile:
configData = json.load(configFile)
except FileNotFoundError:
print("Error: Expecting azurermconfig.json in cu | rrent folder")
sys.exit()
tenant_id = configData['tenantId']
app_id = configData['appId']
app_secret = configData['appSecret']
subscription_id = configData['subscriptionId']
rgname = 'sgelastic'
vmssname = 'sgelastic'
# authenticate
access_token | = azurerm.get_access_token(tenant_id, app_id, app_secret)
nics = azurerm.get_vmss_nics(access_token, subscription_id, rgname, vmssname)
for nic in nics['value']:
#print(json.dumps(nic, sort_keys=False, indent=2, separators=(',', ': ')))
ipaddress = nic['properties']['ipConfigurations'][0]['properties']['privateIPAddress']
print(ipaddress) |
JasonMWhite/python_template | setup.py | Python | mit | 738 | 0.001355 | from setuptools import setup
setup(
name='python_template',
version='0.1.0',
description='A Python template re | po to save you some yak-shaving.',
long_description='',
author='Jason White',
author_email='jason.white@shopify.com',
url='https://github.com/JasonMWhite/python_template',
packages=['python_template', 'pylint_custom'],
include_package_data=True,
install_requires=[
],
license="MIT",
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved | :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
test_suite='tests',
) |
ngr/sandbox | neuraldata/problem_set1/sandbox.py | Python | mit | 557 | 0.025135 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 05 10:02:23 2014
@author: grischenko
"""
#import numpy as np
import random
#import matplotlib.pyplot as plt
#import matplotlib.axes as axs
import pylab as mpl
x = range(25)
y = []
for i in range(25):
y.append(random.randint(1,5))
print x, y
mpl.figure()
mpl.title("test")
mpl.xlabel("sdsf")
#mpl.axes.Axes.set_xlabel("time")
mpl.plot(x, y)
mpl.draw()
#plt.hold()
#plt.title('helo')
#my = plt.plot(x, y)
#plt.ylabel("vol")
#plt. | ylabel = "vol"
#axs.Axes.set_xlabel(my.xlabel, "time | ")
#plt.draw() |
ucfopen/canvasapi | canvasapi/quiz_group.py | Python | mit | 4,094 | 0.001466 | from canvasapi.canvas_object import CanvasObject
from canvasapi.exceptions import RequiredFieldMissing
from canvasapi.util import combine_kwargs
class QuizGroup(CanvasObject):
def __str__(self):
return "{} ({})".format(self.name, self.id)
def delete(self, **kwargs):
"""
Get details of the quiz group with the given id.
: | calls: `DELETE /api/v1/courses/:course_id/quizzes/:quiz_id/groups/:id \
<https://canvas.instructure.com/doc/api/quiz_question_groups.html#method.quizzes/quiz_groups.destroy>`_
:returns: True if the result was successful (Status code of 204)
:rtype: bool
"""
response = self._requester.request(
"DELETE",
"courses/{}/quizzes/{}/groups/{}".format(
self.course_id, self.quiz_id, self.id
),
| _kwargs=combine_kwargs(**kwargs),
)
return response.status_code == 204
def reorder_question_group(self, order, **kwargs):
"""
Update the order of questions within a given group
:calls: `POST /api/v1/courses/:course_id/quizzes/:quiz_id/groups/:id/reorder \
<https://canvas.instructure.com/doc/api/quiz_question_groups.html#method.quizzes/quiz_groups.reorder>`_
:param order: A list of dictionaries containing the key 'id' of
the question to be placed at order's index.
:type order: list[dict]
:returns: True if the result was successful (Status code of 204)
:rtype: bool
"""
if not isinstance(order, list) or not order:
raise ValueError("Param `order` must be a non-empty list.")
for question in order:
if not isinstance(question, dict):
raise ValueError(
"`order` must consist only of dictionaries representing "
"Question items."
)
if "id" not in question:
raise ValueError("Dictionaries in `order` must contain an `id` key.")
kwargs["order"] = order
response = self._requester.request(
"POST",
"courses/{}/quizzes/{}/groups/{}/reorder".format(
self.course_id, self.quiz_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
return response.status_code == 204
def update(self, quiz_groups, **kwargs):
"""
Update a question group given by id.
:calls: `PUT /api/v1/courses/:course_id/quizzes/:quiz_id/groups/:id \
<https://canvas.instructure.com/doc/api/quiz_question_groups.html#method.quizzes/quiz_groups.update>`_
:param quiz_groups: The name, pick count, and/or question points.
All of these parameters are optional, but at least one must exist
(even if empty) to recieve a response.
The request expects a list, but will only update 1 question group per request.
:type quiz_groups: list[dict]
:returns: `True` if the QuizGroup was updated. `False` otherwise.
:rtype: bool
"""
if not isinstance(quiz_groups, list) or len(quiz_groups) <= 0:
raise ValueError("Param `quiz_groups` must be a non-empty list.")
if not isinstance(quiz_groups[0], dict):
raise ValueError("Param `quiz_groups` must contain a dictionary")
param_list = ["name", "pick_count", "question_points"]
if not any(param in quiz_groups[0] for param in param_list):
raise RequiredFieldMissing("quiz_groups must contain at least 1 parameter.")
kwargs["quiz_groups"] = quiz_groups
response = self._requester.request(
"PUT",
"courses/{}/quizzes/{}/groups/{}".format(
self.course_id, self.quiz_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
successful = "name" in response.json().get("quiz_groups")[0]
if successful:
super(QuizGroup, self).set_attributes(response.json().get("quiz_groups")[0])
return successful
|
boldingd/BadRbm | badrbm.py | Python | mit | 3,890 | 0.004884 | #! /usr/bin/python3
import numpy
import scipy.special # for expit (a quick numpy-array-aware sigmoid)
# consider http://stackoverflow.com/questions/3985619/how-to-calculate-a-logistic-sigmoid-function-in-python
class RbmError(Exception):
pass
class rbm:
def __init__(self, i, j, rate, p=None): # i = |v|, j = |h|, p -> momentum
if i < 1:
raise RbmError("There must be at least one visible unit")
if j < 1:
raise RbmError("There must be at least one hidden unit")
if p is not None:
if p > 0.5:
raise RbmError("Momentum greater than 0.5 doesn't make sense.")
elif p <= 0.0:
raise RbmError("Zero-or-less momentum doesn't make sense (use momentum=None to turn momentum off).")
self.i = i # record |v| for error-checking
self.j = j # record |h| for error-checking
self.a = numpy.random.rand(i, 1) # visible biases
self.b = numpy.random.rand(j, 1) # hidden viases
self.W = numpy.random.rand(i,j)
self.rate = rate
self.p = p
self.last_dW = None
def get_energy(self, v, h):
if v.shape != (self.i, 1):
raise RbmError("wrong shape for v, should be (i, 1).")
if h.shape != (self.j, 1):
raise RbmError("wrong shape for h, should be (j, 1).")
E = -1.0 * self.a.T @ v
| E -= self.b.T @ h
E -= v.T @ self.W @ h
return E
def get_v(self, h):
v_probs = scipy.special.expit(self.a + (self.W @ h))
v_vals = numpy.random.rand(self.i, 1)
# there has to be some craftier way to do this
# possibly using numpy.vectorize?
v_res = numpy.zeros((self.i, 1))
for i in range(self.i):
if v_vals[i] <= v_probs[i]:
v_res[i] = 1.0
return v_res
def get_h(self, v):
h | _probs = scipy.special.expit(self.b + (self.W.T @ v))
#h_probs = scipy.special.expit(self.b + (v.T @ self.W).T) # trying to avoid an expensive matrix invert
h_vals = numpy.random.rand(self.j, 1)
h_res = numpy.zeros((self.j, 1))
for i in range(self.j):
if h_vals[i] < h_probs[i]:
h_res[i] = 1.0
return h_res
def get_updates(self, v):
h = self.get_h(v)
vprime = self.get_v(h)
hprime = self.get_h(vprime)
w_update = (v @ h.T) - (vprime @ hprime.T)
a_update = (v - vprime)
b_update = (h - hprime)
return w_update, a_update, b_update
def apply_update(self, v, rate=None):
if rate is None:
rate = self.rate
w_update, a_update, b_update = self.get_updates(v)
if self.p is not None:
self.W += (1.0 - self.p) * w_update * rate
if self.last_dW is not None:
self.W += self.p * self.last_dW * rate
self.last_dW = w_update
else:
self.W += w_update * rate
self.a += a_update * (rate / len(v))
self.b += b_update * (rate / len(v))
def get_samples(self, count, initial_visible=None):
samples = []
if initial_visible is None:
current_v = numpy.random.rand(self.i, 1)
else:
current_v = initial_visible
generated = 0;
while generated < count:
current_h = self.get_h(current_v)
current_v = self.get_v(current_h)
samples.append( current_v )
generated += 1
return samples
# I determined that W.T @ v = v @ W in the lasiest and dumbest way possible
# I asked my brother, who is a methematician
# with one proviso: at least in numpy, v.T @ w produces a row-vector, and w.T @ v produces a column vector.
# simple test shows that it's actually faster with the matrix transpose? It is a small matrix in the test, hmm.
|
graziano-giuliani/pythoncode | pyuwphysret/common/pyfiles/atmos/integral.py | Python | mit | 2,891 | 0.017987 | #!/usr/bin/env python
# integeral.py
import numpy as num
def integral(x,y):
"""
ROUTINE: INTEGRAL
USEAGE: RESULT = INTEGRAL( X, Y )
PURPOSE: Integrate tabulated data using Simpson's rule
with 3-point Lagragian interpolation. Data may be
regularly sampled in X, or irregularly sampled.
INPUT:
X Vector of x axis points.
(Elements must be unique and monotonically increasing)
Y Vector of corresponding Y axis points.
KEYWORD_INPUT: None.
OUTPUT: Result of integration.
EXAMPLE:
Example 1:
Define 11 x-values on the closed interval [0.0 , 0.8].
X = [ 0.0, .12, .22, .32, .36, .40, .44, .54, .64, .70, .80 ]
Define 11 f-values corresponding to x(i).
F = [ 0.200000, 1.30973, 1.30524, 1.74339, 2.07490, 2.45600, $
2.84299, 3.50730, 3.18194, 2.36302, 0.231964 ]
Compute the integral.
RESULT = INTEGRAL( X, F )
In this example, the f-values are generated from a known function,
(f = .2 + 25*x - 200*x^2 + 675*x^3 - 900*x^4 + 400*x^5)
The Multiple Application T | rapezoid Method yields; result = 1.5648
The Multiple A | pplication Simpson's Method yields; result = 1.6036
IDL User Library INT_TABULATED.PRO yields; result = 1.6232
INTEGRAL.PRO yields; result = 1.6274
The Exact Solution (4 decimal accuracy) yields; result = 1.6405
AUTHOR: Liam Gumley, CIMSS/SSEC (liam.gumley@ssec.wisc.edu)
Based on a FORTRAN-77 version by Paul van Delst, CIMSS/SSEC
22-DEC-95
REVISIONS: None.
"""
n = x.size
x0 = x[0:n-2]
x1 = x[1:n-1]
x2 = x[2:n-0]
y0 = y[0:n-2]
y1 = y[1:n-1]
y2 = y[2:n-0]
#
# compute interpolation delta and midpoint arrays
#
dx = x1-x0
xmid = 0.5*(x1+x0)
#
# compute 3 point lagrange interpolation
#
l0 = ((xmid-x1)/(x0-x1))*((xmid-x2)/(x0-x2))
l1 = ((xmid-x0)/(x1-x0))*((xmid-x2)/(x1-x2))
l2 = ((xmid-x0)/(x2-x0))*((xmid-x1)/(x2-x1))
ymid = y0*l0 + y1*l1 + y2*l2;
#
# compute integral sum
#
integ = sum(1.0/6.0*dx*(y0+4.0*ymid+y1))
#
# handle last 3 points similarly
#
x0 = x[n-3]
x1 = x[n-2]
x2 = x[n-1]
y0 = y[n-3]
y1 = y[n-2]
y2 = y[n-1]
dx = x2 - x1
xmid = 0.5*(x2+x1)
l0 = ((xmid-x1)/(x0-x1))*((xmid-x2)/(x0-x2))
l1 = ((xmid-x0)/(x1-x0))*((xmid-x2)/(x1-x2))
l2 = ((xmid-x0)/(x2-x0))*((xmid-x1)/(x2-x1))
ymid = y0*l0 + y1*l1 + y2*l2;
integ = integ + 1.0/6.0*dx*(y1+4.0*ymid+y2)
return integ
if __name__ == '__main__':
print(integral.__doc__)
X = num.array((0.0, .12, .22, .32, .36, .40, .44, .54, .64, .70, .80))
Y = num.array((0.200000, 1.30973, 1.30524, 1.74339, 2.07490, 2.45600,
2.84299, 3.50730, 3.18194, 2.36302, 0.231964))
i = integral(X,Y)
print(i)
|
OlegKlimenko/py-sdoc | sdoc/sdoc2/SDoc2Visitor.py | Python | mit | 7,232 | 0.001521 | """
SDoc
Copyright 2016 Set Based IT Consultancy
Licence MIT
"""
# ----------------------------------------------------------------------------------------------------------------------
import re
from sdoc import sdoc2
from sdoc.antlr.sdoc2ParserVisitor import sdoc2ParserVisitor
from sdoc.sdoc.SDocVisitor import SDocVisitor
from sdoc.sdoc2.Position import Position
class SDoc2Visitor(sdoc2ParserVisitor, SDocVisitor):
"""
Visitor for SDoc level 2.
"""
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, sdoc1_path, io):
"""
Object constructor.
:param str sdoc1_path: The the path to the original SDoc1 document.
:param cleo.styles.output_style.OutputStyle io: The IO object.
"""
SDocVisitor.__init__(self, io)
self._io = io
"""
Styled output formatter.
:type: sdoc.style.SdocStyle.SdocStyle
"""
self._output = None
"""
Object for streaming the generated output. This object MUST implement the write method.
"""
self._sdoc1_file_name = sdoc1_path
"""
The original file name at SDoc1 level.
:type: str
"""
self._sdoc1_line = 0
"""
The offset of for computing the current line at SDoc1 level.
:type: int
"""
self._sdoc1_column = 0
"""
The offset of for computing the current column at SDoc1 level.
:type: int
"""
self._sdoc2_line = 0
"""
The line position of the last position command.
:type: int
"""
self._sdoc2_column = 0
"""
The last column position of the last position command.
:type: int
"""
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def _get_options(ctx):
"""
Returns the option of an command.
:param ParserRuleContext ctx: The parse tree with the options.
:rtype: dict[str,str]
"""
options = {}
i = 0
while True:
name_token = ctx.OPT_ARG_NAME(i)
value_token = ctx.OPT_ARG_VALUE(i)
if not name_token:
break
option_name = name_token.getText()
option_value = value_token.getText()
# Trim leading and trailing (double)quotes from string. (Is there a way to do this in ANTLR?)
if (option_value[0] == '"' and option_value[-1] == '"') or \
(option_value[0] == "'" and option_value[-1] == " | '"):
option_value = option_value[1:-1]
options[option_name] = option_value
i += 1
return options
# ------------------------------------------------------------------------------------------------------------------
def get_position(self, token):
"""
Returns the position of the token in the original SDoc1 source file.
:param token:
:rtype: sdoc.sdoc2.Position.Po | sition
"""
line_number = token.line
column = token.column
if self._sdoc2_line == line_number:
column = self._sdoc1_column + (column - self._sdoc2_column)
line_number = self._sdoc1_line + (line_number - self._sdoc2_line)
return Position(self._sdoc1_file_name, line_number, column, -1, -1)
# ------------------------------------------------------------------------------------------------------------------
def set_output(self, output):
"""
Sets the object for streaming the generated output.
:param output: This object MUST implement the write method.
"""
self._output = output
# ------------------------------------------------------------------------------------------------------------------
def stream(self, snippet):
"""
Puts an output snippet on the output stream.
:param str snippet: The snippet to be appended to the output stream of this parser.
"""
if snippet is not None:
self._output.write(snippet)
# ------------------------------------------------------------------------------------------------------------------
def visitCmd_begin(self, ctx):
"""
Visit a parse tree produced by a begin command.
:param sdoc.antlr.sdoc2Parser.sdoc2Parser.Cmd_beginContext ctx: The parse tree.
"""
command = ctx.BLOCK_ARG_ARG().getText()
sdoc2.node_store.append_block_node(command, self._get_options(ctx), self.get_position(ctx.start))
# ------------------------------------------------------------------------------------------------------------------
def visitCmd_end(self, ctx):
"""
Visit a parse tree produced by an end command.
:param sdoc.antlr.sdoc2Parser.sdoc2Parser.Cmd_endContext ctx: The parse tree.
"""
command = ctx.BLOCK_ARG_ARG().getText().rstrip()
sdoc2.node_store.end_block_node(command)
# ------------------------------------------------------------------------------------------------------------------
def visitCmd_position(self, ctx):
"""
Visit a parse tree produced by a position command.
:param sdoc.antlr.sdoc2Parser.sdoc2Parser.Cmd_positionContext ctx: The parse tree.
"""
argument = ctx.INLINE_ARG_ARG()
parts = re.match(r'(.+):([0-9]+)\.([0-9]+)', str(argument))
if not parts:
self._error('{0!s} is not a valid position'.format(argument))
return
self._sdoc1_file_name = parts.group(1)
self._sdoc1_line = int(parts.group(2))
self._sdoc1_column = int(parts.group(3))
token = ctx.stop
self._sdoc2_line = token.line
self._sdoc2_column = token.column + len(token.text)
# ------------------------------------------------------------------------------------------------------------------
def visitCmd_sdoc2(self, ctx):
"""
Visit a parse tree produced by a inline command.
:param sdoc.antlr.sdoc2Parser.sdoc2Parser.Cmd_sdoc2Context ctx: The parse tree.
"""
command = ctx.SDOC2_COMMAND().getText()
argument = ctx.INLINE_ARG_ARG()
sdoc2.node_store.append_inline_node(command[1:],
self._get_options(ctx),
argument.getText() if argument else '',
self.get_position(ctx.start))
# ------------------------------------------------------------------------------------------------------------------
def visitText(self, ctx):
"""
Visit a parse tree produced by TEXT.
:param sdoc.antlr.sdoc2Parser.sdoc2Parser.TextContext ctx: The parse tree.
"""
sdoc2.node_store.append_inline_node('TEXT', {}, ctx.TEXT().getText(), self.get_position(ctx.start))
# ----------------------------------------------------------------------------------------------------------------------
|
DEAP/deap | setup.py | Python | lgpl-3.0 | 3,832 | 0.007046 | #!/usr/bin/env python
import sys
warnings = list()
try:
from setuptools import setup, Extension, find_packages
modules = find_packages(exclude=['examples'])
except ImportError:
warnings.append("warning: using distutils.core.setup, cannot use \"develop\" option")
from distutils.core import setup, Extension
modules = ['deap', 'deap.benchmarks', 'deap.tests', 'deap.tools', 'deap.tools._hypervolume']
from setuptools.command.build_ext import build_ext
from distutils.err | ors import CCompilerError, DistutilsExecError, \
DistutilsPlatformError
# read the contents of README file
from os import path
import codecs
this_directory = path.abspath(path.dirname(__file__))
long_description = codecs.open(path.join(this_directory, 'README.md'), 'r', 'utf-8').read()
import deap
if sys.platform == 'win32' and | sys.version_info > (2, 6):
# 2.6's distutils.msvc9compiler can raise an IOError when failing to
# find the compiler
# It can also raise ValueError http://bugs.python.org/issue7511
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError,
IOError, ValueError)
else:
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
class BuildFailed(Exception):
pass
class ve_build_ext(build_ext):
# This class allows C extension building to fail.
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError as e:
print(e)
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors as e:
print(e)
raise BuildFailed()
def run_setup(build_ext):
extra_modules = None
if build_ext:
extra_modules = list()
hv_module = Extension("deap.tools._hypervolume.hv", sources=["deap/tools/_hypervolume/_hv.c", "deap/tools/_hypervolume/hv.cpp"])
extra_modules.append(hv_module)
setup(name='deap',
version=deap.__revision__,
description='Distributed Evolutionary Algorithms in Python',
long_description=long_description,
long_description_content_type="text/markdown",
author='deap Development Team',
author_email='deap-users@googlegroups.com',
url='https://www.github.com/deap',
packages=find_packages(exclude=['examples']),
# packages=['deap', 'deap.tools', 'deap.tools._hypervolume', 'deap.benchmarks', 'deap.tests'],
platforms=['any'],
keywords=['evolutionary algorithms', 'genetic algorithms', 'genetic programming', 'cma-es', 'ga', 'gp', 'es', 'pso'],
license='LGPL',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering',
'Topic :: Software Development',
],
ext_modules=extra_modules,
cmdclass={"build_ext": ve_build_ext},
install_requires=['numpy'],
use_2to3=True
)
try:
run_setup(True)
except BuildFailed:
print("*" * 75)
print("WARNING: The C extensions could not be compiled, "
"speedups won't be available.")
print("Now building without C extensions.")
print("*" * 75)
run_setup(False)
print("*" * 75)
print("WARNING: The C extensions could not be compiled, "
"speedups won't be available.")
print("Plain-Python installation succeeded.")
print("*" * 75)
print("\n".join(warnings))
|
oodt-cloud/docker | examples/test-workflow/swarm/wmgr_config/rabbitmq_clients/rabbitmq_producer.py | Python | apache-2.0 | 20,108 | 0.003879 | # -*- coding: utf-8 -*-
# Adapted from:
# http://pika.readthedocs.io/en/0.10.0/examples/asynchronous_publisher_example.html
# Features:
# o Requests delivery confirmation from RabbitMQ server
# and keeps tracks of which messages have been acknowledged or not acknowledged
# o Reconnects if connection to RabbitMQ servers goes down for any reason
# o Shuts down if RabbitMQ server closes the channel
#
# Usage: python rabbitmq_producer.py <workflow_event> <number_of_events> [<metadata_key=metadata_value> <metadata_key=metadata_value> ...]
# To be used together with rabbitmq_consumer.py
import logging
import pika
import json
import os
import sys
import datetime
import requests
import time
import uuid
#LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) -35s %(lineno) -5d: %(message)s')
LOG_FORMAT = '%(levelname)s: %(message)s'
LOGGER = logging.getLogger(__name__)
LOG_FILE = "rabbitmq_producer.log" # in current directory
class RabbitmqProducer(object):
"""This is an example publisher that will handle unexpected interactions
with RabbitMQ such as channel and connection closures.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
It uses delivery confirmations and illustrates one way to keep track of
messages that have been sent and if they've been confirmed by RabbitMQ.
"""
EXCHANGE = 'oodt-exchange'
EXCHANGE_TYPE = 'direct'
PUBLISH_INTERVAL = 0.1
PRODUCER_ID = str(uuid.uuid4()) # unique producer identifer
def __init__(self, amqp_url, workflow_event, num_messages, msg_dict):
"""Setup the example publisher object, passing in the URL we will use
to connect to RabbitMQ.
:param str amqp_url: The URL for connecting to RabbitMQ
"""
self._connection = None
self._channel = None
self._deliveries = []
self._acked = 0
self._nacked = 0
self._message_number = 0
self._stopping = False
self._url = amqp_url
self._closing = False
self._queue = workflow_event
self._routing_key = workflow_event
self._num_messages = num_messages
self._msg_dict = msg_dict
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika. If you want the reconnection to work, make
sure you set stop_ioloop_on_close to False, which is not the default
behavior of this adapter.
:rtype: pika.SelectConnection
"""
LOGGER.info('Connecting to %s', self._url)
return pika.SelectConnection(pika.URLParameters(self._url),
self.on_connection_open,
stop_ioloop_on_close=False)
def on_connection_open(self, unused_connection):
"""This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
:type unused_connection: pika.SelectConnection
"""
LOGGER.info('Connection opened')
self.add_on_connection_close_callback()
self.open_channel()
def add_on_connection_close_callback(self):
"""This method adds an on close callback that will be invoked by pika
when RabbitMQ closes the connection to the publisher unexpectedly.
"""
LOGGER.info('Adding connection close callback')
self._connection.add_on_close_callback(self.on_connection_closed)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.connection.Connection connection: The closed connection obj
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
self._channel = None
if self._closing:
self._connection.ioloop.stop()
| else:
LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s',
reply_code, reply_text)
self._connection.add_timeout(5, self.reconnect)
def reconnect(self):
"""Will be invoked by the IOLoop timer if the connection is
closed. See the on_connection_closed method.
"""
self._del | iveries = []
self._acked = 0
self._nacked = 0
self._message_number = 0
# This is the old connection IOLoop instance, stop its ioloop
self._connection.ioloop.stop()
# Create a new connection
self._connection = self.connect()
# There is now a new connection, needs a new ioloop to run
self._connection.ioloop.start()
def open_channel(self):
"""This method will open a new channel with RabbitMQ by issuing the
Channel.Open RPC command. When RabbitMQ confirms the channel is open
by sending the Channel.OpenOK RPC reply, the on_channel_open method
will be invoked.
"""
LOGGER.info('Creating a new channel')
self._connection.channel(on_open_callback=self.on_channel_open)
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchange to use.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.info('Channel opened')
self._channel = channel
self.add_on_channel_close_callback()
self.setup_exchange(self.EXCHANGE)
def add_on_channel_close_callback(self):
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
LOGGER.info('Adding channel close callback')
self._channel.add_on_close_callback(self.on_channel_closed)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
LOGGER.warning('Channel was closed: (%s) %s', reply_code, reply_text)
if not self._closing:
self._connection.close()
def setup_exchange(self, exchange_name):
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
command. When it is complete, the on_exchange_declareok method will
be invoked by pika.
:param str|unicode exchange_name: The name of the exchange to declare
"""
LOGGER.info('Declaring exchange %s', exchange_name)
self._channel.exchange_declare(callback=self.on_exchange_declareok,
exchange=exchange_name,
exchange_type=self.EXCHANGE_TYPE,
durable=True) # survive server reboots
def on_exchange_declareok(self, unused_frame):
"""Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
command.
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame
"""
LOGGER.info('Exchange declared')
self.setup_queue(self._queue)
def setup_queue(self, qu |
calumk/ROS-Blocks | ros_blocks/scripts/ik_solver.py | Python | mit | 1,346 | 0.002972 | import argparse
import sys
import rospy
from geometry_msgs.msg import (
PoseStamped,
Pose,
Point,
Quaternion,
)
from std_msgs.msg import Header
from baxter_core_msgs.srv import (
SolvePositionIK,
SolvePositionIKRequest,
)
def ik_solve(limb, pos, orient):
#~ rospy.init_node("rsdk_ik_service_client")
ns = "ExternalTools/" + limb + "/PositionKinematicsNode/IKService"
iksvc = ro | spy.ServiceProxy(ns, SolvePositionIK)
ikreq = SolvePosi | tionIKRequest()
print "iksvc: ", iksvc
print "ikreq: ", ikreq
hdr = Header(stamp=rospy.Time.now(), frame_id='base')
poses = {
str(limb): PoseStamped(header=hdr,
pose=Pose(position=pos, orientation=orient))}
ikreq.pose_stamp.append(poses[limb])
try:
rospy.wait_for_service(ns, 5.0)
resp = iksvc(ikreq)
except (rospy.ServiceException, rospy.ROSException), e:
rospy.logerr("Service call failed: %s" % (e,))
return 1
if (resp.isValid[0]):
print("SUCCESS - Valid Joint Solution Found:")
# Format solution into Limb API-compatible dictionary
limb_joints = dict(zip(resp.joints[0].name, resp.joints[0].position))
print limb_joints
return limb_joints
else:
print("INVALID POSE - No Valid Joint Solution Found.")
return -1
|
NNPDF/reportengine | src/reportengine/tests/__init__.py | Python | gpl-2.0 | 95 | 0.010526 | # - | *- coding: utf-8 -*-
"""
Created on Wed Mar 2 12:13:32 2016
@author: Zahari Kassabov
"" | "
|
ruhulsbu/WEAT4TwitterGroups | histwords/gender_bias_plot.py | Python | mit | 5,322 | 0.009395 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 13 20:55:28 2017
@author: moamin
"""
import os, sys, time, gzip, math
import numpy as np,csv, json, re
#from gensim.models import Word2Vec
import matplotlib.pyplot as plt
#from sets import Set
from representations.sequentialembedding import SequentialEmbedding
import matplotlib, random, itertools
import matplotlib.pyplot as plt
"""
Example showing how to load a series of historical embeddings and compute similarities over time.
Warning that loading all the embeddings into main memory can take a lot of RAM
"""
def histword_similarity(target_word, attribute_word):
fiction_embeddings = SequentialEmbedding.load("embeddings/eng-fiction-all_sgns", range(1900, 2000, 10))
time_sims = fiction_embeddings.get_time_sims(target_word, attribute_word)
#print "Similarity between gay and lesbian drastically increases from 1950s to the 1990s:"
for year, sim in time_sims.iteritems():
print("{year:d}, cosine similarity={sim:0.2f}".format(year=year,sim=sim))
return time_sims
def cross_target_attribute(target, target_words, attri | bute, attribute_words):
for i in range(0, len(target_words)):
for k in range(0, len(attribute_words)):
wt = target_words[i][:-1]
at = attribute_ | words[k][:-1]
'''
try:
lib_cosine = lib_word2vec_model.similarity(wt, at)
con_cosine = con_word2vec_model.similarity(wt, at)
except:
continue
#print(wt, target, at, attribute, lib_cosine, con_cosine)
print_txt = wt + ',' + target + ',' + at + ',' + attribute + ',' + str(lib_cosine) + ',' + str(con_cosine)
print(print_txt)
'''
try:
print_txt = wt + ',' + target + ',' + at + ',' + attribute
print(print_txt)
histword_similarity(wt, at)
except:
print("Exception: Not Present")
continue
fig = plt.figure(figsize=(12, 12))
colorno = 0
number = 20
cmap = plt.get_cmap(sys.argv[1])
colors = [cmap(i) for i in np.linspace(0, 1, number)]
for k in range(2, len(sys.argv)):
input_file = sys.argv[k]
file_read = open(input_file, 'r')
for line in file_read:
word = line.strip()
print('woman vs ' + word)
time_sims_woman = histword_similarity(word, 'woman')
print('man vs ' + word)
time_sims_man = histword_similarity(word, 'man')
print(len(time_sims_woman), len(time_sims_man))
#print(time_sims_woman)
x = []
y = []
for year in time_sims_woman:
sim_w = time_sims_woman[year]
dit_w = 1 - sim_w
sim_m = time_sims_man[year]
dit_m = 1 - sim_m
print(year, sim_w - sim_m, dit_w - dit_m)
x.append(year)
y.append(dit_w - dit_m)
plt.plot(y, 'o-', label=word, color=colors[colorno])
colorno = (colorno + 1) % number
#break
plt.xticks([i for i in range(0, 10)], [str(y) for y in range(1900, 2000, 10)], rotation='vertical')
plt.plot([0 for i in range(0, 10)], 'k--', label='No Bias')
plt.xlabel('Year')
plt.ylabel('(Woman -ve) - Bias - (Man + )')
#plt.legend(loc='best', mode='expand')
#plt.legend(loc = 1, fontsize=8) #bbox_to_anchor=(0.90, 0.6))#, borderaxespad=0.05)
plt.legend(loc=7, fontsize=8) #, bbox_to_anchor=(1.0, 1.0))
plt.xlim(0, 12)
plt.ylim(-0.25, 0.25)
#plt.savefig("gender_bias_" + word + ".png")
plt.show()
exit()
'''
lib_word2vec_model = Word2Vec.load('../../updated_liberals_wordvec/model_visualize/model_liberal')#(sys.argv[0])
con_word2vec_model = Word2Vec.load('../../updated_conservatives_wordvec/model_visualize/model_conservative')#(sys.argv[1])
#cosine_similarity = model.similarity(word1,word2)
print('Wt, Ct, Wa, Ca, Liberals, Conservatives')
'''
input_file = './weat_file.txt'
file_read = open(input_file, 'r')
count = 0
for line in file_read:
if len(line.strip()) == 0:
break
#print(line)
count += 1
if count == 1:
continue
if count == 2:
words = line.split()
target_one = words[0][:-1]
target_one_words = words[1:]
if count == 3:
words = line.split()
target_two = words[0][:-1]
target_two_words = words[1:]
if count == 4:
words = line.split()
attribute_one = words[0][:-1]
attribute_one_words = words[1:]
if count == 5:
words = line.split()
attribute_two = words[0][:-1]
attribute_two_words = words[1:]
count = 0
if count == 0:
#print(target_one, attribute_one, "(Format: Wt, Ct, Wa, Ca, Liberals, Conservatives)")
#print('===============================================================================')
cross_target_attribute(target_one, target_one_words, attribute_one, attribute_one_words)
#print(target_two, attribute_two, "(Format: Wt, Ct, Wa, Ca, Liberals, Conservatives)")
#print('===============================================================================')
cross_target_attribute(target_two, target_two_words, attribute_two, attribute_two_words)
#break
|
vkaracic/Xblocks-Directory | xblock/tests/test_model.py | Python | mit | 550 | 0 | from django.test import TestCase
from xblock.models import Xblock
class XblockTest(TestCase):
def test_model(self):
Xblo | ck.objects.create(
name='test',
packages='test',
description='An xblock for testing',
version='1,0',
author='Tester',
organization='ExtensionEngine',
github_link='https://github.com/tester/xblock'
)
self.assertEqual(Xblock.objects.count(), 1)
self.assertEqual(Xblock.objects.f | ilter(name='test').count(), 1)
|
florian-f/sklearn | examples/cluster/plot_cluster_iris.py | Python | bsd-3-clause | 2,573 | 0.001943 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gael Varoqueux
# Modified for Documentation merge by Jaques Grobler
# License: BSD
import numpy as np
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_ | iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.iteritems():
fig = pl.figure(fignum, figsize=(4, 3))
pl.clf()
ax = Axes3D(fig, | rect=[0, 0, .95, 1], elev=48, azim=134)
pl.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = pl.figure(fignum, figsize=(4, 3))
pl.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
pl.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
pl.show()
|
noironetworks/group-based-policy | gbpservice/neutron/tests/unit/nfp/orchestrator/db/test_nfp_db.py | Python | apache-2.0 | 26,780 | 0.000037 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import uuid
from unittest import mock
import fixtures
from neutron.tests import base
from neutron_lib import context
from sqlalchemy.orm import exc
from gbpservice.neutron.db import api as db_api
from gbpservice.nfp.common import constants as nfp_constants
from gbpservice.nfp.common import exceptions as nfp_exc
from gbpservice.nfp.orchestrator.db import nfp_db
from gbpservice.nfp.orchestrator.db import nfp_db_model
class SqlFixture(fixtures.Fixture):
# flag to indicate that the models have been loaded
_TABLES_ESTABLISHED = False
def _setUp(self):
# Register all data models
engine = db_api.CONTEXT_WRITER.get_engine()
if not SqlFixture._TABLES_ESTABLISHED:
nfp_db_model.BASE.metadata.create_all(engine)
SqlFixture._TABLES_ESTABLISHED = True
def clear_tables():
with engine.begin() as conn:
for table in reversed(
nfp_db_model.BASE.metadata.sorted_tables):
conn.execute(table.delete())
self.addCleanup(clear_tables)
class SqlTestCaseLight(base.DietTestCase):
"""All SQL taste, zero plugin/rpc sugar"""
def setUp(self):
super(SqlTestCaseLight, self).setUp()
self.useFixture(SqlFixture())
class SqlTestCase(base.BaseTestCase):
def setUp(self):
super(SqlTestCase, self).setUp()
self.useFixture(SqlFixture())
class NFPDB(nfp_db.NFPDbBase):
pass
class NFPDBTestCase(SqlTestCase):
def setUp(self):
super(NFPDBTestCase, self).setUp()
self.ctx = context.get_admin_context()
self.nfp_db = NFPDB()
self.session = db_api.get_writer_session()
def create_network_function(self, attributes=None):
if attributes is None:
attributes = {
'name': 'name',
'description': 'description',
'tenant_id': 'tenant_id',
'service_id': 'service_id',
'service_chain_id': 'service_chain_id',
'service_profile_id': 'service_profile_id',
'service_config': 'service_config',
'config_policy_id': 'config_policy_id',
'status': 'status'
}
return self.nfp_db.create_network_function(self.session, attributes)
def test_create_network_function(self):
attrs = {
'name': 'name',
'description': 'description',
'tenant_id': 'tenant_id',
'service_id': 'service_id',
'service_chain_id': 'service_chain_id',
'service_profile_id': 'service_profile_id',
'service_config': 'service_config',
'config_policy_id': 'config_policy_id',
'status': 'status'
}
network_function = self.create_network_function(attrs)
for key in attrs:
self.assertEqual(attrs[key], network_function[key])
self.assertIsNotNone(network_function['id'])
def test_create_network_function_with_mandatory_values(self):
attrs_mandatory = {
'name': 'name',
'tenant_id': 'tenant_id',
'service_id': 'service_id',
'service_profile_id': 'service_profile_id',
'status': 'status'
}
network_function = self.create_network_function(attrs_mandatory)
for key in attrs_mandatory:
self.assertEqual(attrs_mandatory[key], network_function[key])
self.assertIsNotNone(network_function['id'])
non_mandatory_args = ['service_chain_id', 'service_config',
'config_policy_id']
for arg in non_mandatory_args:
self.assertIsNone(network_function[arg])
def test_get_network_function(self):
attrs_all = {
'name': 'name',
'description': 'description',
'tenant_id': 'tenant_id',
'service_id': 'service_id',
'service_chain_id': 'service_chain_id',
'service_profile_id': 'service_profile_id',
'service_config': 'service_config',
'config_policy_id': 'config_policy_id',
'status': 'status'
}
network_function = self.create_network_function(attrs_all)
db_network_function = self.nfp_db.get_network_function(
self.session, network_function['id'])
for key in attrs_all:
self.assertEqual(attrs_all[key], db_network_function[key])
def test_list_network_function(self):
network_function = self.create_network_function()
network_functions = self.nfp_db.get_network_functions(self.session)
self.assertEqual(1, len(network_functions))
self.assertEqual(network_function['id'], network_functions[0]['id'])
def test_list_network_function_with_filters(self):
attrs = {
'name': 'name',
'tenant_id': 'tenant_id',
'service_id': 'service_id',
'service_profile_id': 'service_profile_id',
'status': 'status'
}
network_function = self.create_network_function(attrs)
filters = {'service_id': ['service_id']}
network_functions = self.nfp_db.get_network_functions(
self.session, filters=filters)
self.assertEqual(1, len(network_functions))
self.assertEqual(network_function['id'], network_functions[0]['id'])
filters = {'service_id': ['nonexisting']}
network_functions = self.nfp_db.get_network_functions(
self.session, filters=filters)
self.assertEqual([], network_functions)
def test_update_network_function(self):
self.nfp_db.update_node_instance_network_function_map = mock.MagicMock(
return_value=None)
network_function = self.create_network_function()
self.assertIsNotNone(network_function['id'])
updated_network_function = {'status': 'ERROR'}
network_function = self.nfp_db.update_network_function(
self.session, network_function['id'], updated_network_function)
self.assertEqual('ERROR', network_function['st | atus'])
def test_delete_network_function(self):
network_function = self.create_network_function()
self.assertIsNotNone(network_function['id'])
self.nfp_ | db.delete_network_function(
self.session, network_function['id'])
self.assertRaises(nfp_exc.NetworkFunctionNotFound,
self.nfp_db.get_network_function,
self.session, network_function['id'])
def create_network_function_instance(self, attributes=None,
create_nfd=True):
if attributes is None:
nfd = (self.create_network_function_device()['id']
if create_nfd else None)
attributes = {
'name': 'name',
'description': 'description',
'tenant_id': 'tenant_id',
'network_function_id': self.create_network_function()['id'],
'network_function_device_id': nfd,
'ha_state': "Active",
'port_info': [
{'id': 'myportid1',
'port_model': nfp_constants.NEUTRON_PORT,
'port_classification': nfp_constants.PROVIDER,
'port_role': nfp_constants.ACTIVE_PORT},
{'id': 'myportid2',
'port_model': nfp_constants.GBP_PORT,
'port_classification': nfp_constants.CONSUMER,
'port_role': nfp_constants.MAST |
memmett/PyPFASST | examples/advection/main.py | Python | bsd-2-clause | 3,683 | 0.004887 | """Solve the advection/diffusion equation with PyPFASST."""
# Copyright (c) 2011, Matthew Emmett. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PAR | TICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTE | RRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from mpi4py import MPI
import argparse
import pfasst
import pfasst.imex
from ad import *
######################################################################
# options
parser = argparse.ArgumentParser(
description='solve the advection/diffusion equation')
parser.add_argument('-d',
type=int,
dest='dim',
default=1,
help='number of dimensions, defaults to 1')
parser.add_argument('-n',
type=int,
dest='steps',
default=MPI.COMM_WORLD.size,
help='number of time steps, defaults to number of mpi processes')
parser.add_argument('-l',
type=int,
dest='nlevs',
default=3,
help='number of levels, defaults to 3')
options = parser.parse_args()
###############################################################################
# config
comm = MPI.COMM_WORLD
nproc = comm.size
dt = 0.01
tend = dt*options.steps
N = 1024
D = options.dim
nnodes = [ 9, 5, 3 ]
###############################################################################
# init pfasst
pf = pfasst.PFASST()
pf.simple_communicators(ntime=nproc, comm=comm)
for l in range(options.nlevs):
F = AD(shape=D*(N,), refinement=2**l, dim=D)
SDC = pfasst.imex.IMEXSDC('GL', nnodes[l])
pf.add_level(F, SDC, interpolate, restrict)
if len(pf.levels) > 1:
pf.levels[-1].sweeps = 2
###############################################################################
# add hooks
def echo_error(level, state, **kwargs):
"""Compute and print error based on exact solution."""
if level.feval.burgers:
return
y1 = np.zeros(level.feval.shape)
level.feval.exact(state.t0+state.dt, y1)
err = np.log10(abs(level.qend-y1).max())
print 'step: %03d, iteration: %03d, position: %d, level: %02d, error: %f' % (
state.step, state.iteration, state.cycle, level.level, err)
pf.add_hook(0, 'post-sweep', echo_error)
###############################################################################
# create initial condition and run
F = AD(shape=D*(N,), dim=D)
q0 = np.zeros(F.shape)
F.exact(0.0, q0)
pf.run(q0=q0, dt=dt, tend=tend)
|
iamdork/dork | dork/git.py | Python | mit | 7,799 | 0.002821 | from subprocess import call, check_output, PIPE
from glob2 import glob, Globber
import os
import re
import config
def _git_globber_listdir(path):
if os.path.exists(path + '/.git'):
return []
else:
retur | n os.listdir(path)
def _git_globber_islink(path):
return os.path.islink(path) and not os.path.isdir(path)
class GitGlobber(Globber):
listdir = staticmethod(_git_globber_listdir)
islink = stat | icmethod(_git_globber_islink)
git_globber = GitGlobber()
def _gitless_globber_listdir(path):
return [d for d in os.listdir(path) if not d.endswith('.git')]
class GitlessGlobber(Globber):
listdir = staticmethod(_gitless_globber_listdir)
gitless_globber = GitlessGlobber()
def get_repositories(directory):
"""
Returns a <Repository> object or <None> if no repository was found.
:param: directory: str
:rtype: list[Repository]
"""
if _is_repository(directory):
yield Repository(directory)
else:
repositories = [subdir[:-5] for subdir in git_globber.glob(directory + '/**/.git')]
for d in repositories:
if not any([((r + '/') in d and d is not r) for r in repositories]):
yield Repository(d)
class Commit:
"""
Class for working with git commits.
<, > , <= and >= check if commits are valid ascendants/descendants of
each other.
"""
def __init__(self, commit_hash, repository):
"""
:type commit_hash: str
:type repository: Repository
"""
self.__hash = commit_hash
self.__directory = repository.directory
self.__repo = repository
pass
def __eq__(self, other):
"""
:type other: Commit
:rtype: bool
"""
return self.__hash == other.__hash
def __lt__(self, other):
"""
:type other: Commit
:rtype: bool
"""
return not self.__eq__(other) and _is_ancestor(self.__directory, self.__hash, other.__hash)
def __gt__(self, other):
"""
:type other: Commit
:rtype: bool
"""
return not self.__eq__(other) and _is_ancestor(self.__directory, other.__hash, self.__hash)
def __le__(self, other):
"""
:type other: Commit
:rtype: bool
"""
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
"""
:type other: Commit
:rtype: bool
"""
return self.__eq__(other) or self.__gt__(other)
def __sub__(self, other):
"""
:type other: Commit
:rtype: list
"""
return _commit_diff(self.__directory, self.__hash, other.__hash)
def __mod__(self, other):
"""
:type other: Commit
:rtype: list
"""
return _file_diff(self.__directory, self.__hash, other.__hash)
@property
def hash(self):
""":rtype: str"""
return self.__hash
@property
def message(self):
""":rtype: str"""
return _commit_message(self.__directory, self.__hash)
class Repository:
@property
def __segments(self):
return self.directory \
.replace(config.config.host_source_directory + '/', '') \
.split('/')
@property
def project(self):
return self.__segments[0]
@property
def instance(self):
return self.__segments[-1]
def __init__(self, directory):
"""
:type directory: str
"""
self.__directory = directory
@classmethod
def scan(cls, directory):
return get_repositories(directory)
__current_commit = None
@property
def current_commit(self):
"""
:rtype: Commit
"""
if self.__current_commit is None:
self.__current_commit = Commit(_current_commit(self.directory), self)
return self.__current_commit
def get_commit(self, commit_hash):
return Commit(commit_hash, self)
@property
def branch(self):
"""
:rtype: str
"""
return _current_branch(self.directory)
@property
def directory(self):
"""
:rtype: str
"""
return self.__directory
def contains_file(self, filepattern, contentpattern=None):
"""
Check if the repository contains a file matching a glob pattern.
Optionally provide a regex the files content is matched against
additionally.
:type filepattern: str
:type contentpattern: str
:rtype: bool
"""
sp = re.compile('^source:/')
dp = re.compile('^data:/')
if sp.match(filepattern) or dp.match(filepattern):
data = '%s/%s' % (config.config.host_data_directory, self.project)
f = dp.sub(data, sp.sub(self.directory, filepattern))
else:
f = "%s/%s" % (self.directory, filepattern)
if contentpattern:
matched_files = gitless_globber.glob(f) if '*' in f else [f]
expr = re.compile(contentpattern)
for f in matched_files:
if not os.path.isfile(f):
continue
with open(f) as fp:
if expr.search(fp.read()):
return True
return False
else:
if '*' in filepattern:
return len(gitless_globber.glob(f)) > 0
else:
return os.path.exists(f)
def _is_repository(directory):
"""
Test if a directory actually is a git repository.
:param directory:
:return:
"""
return os.path.exists(directory + '/.git')
def _current_commit(directory):
"""
:rtype: str
"""
return check_output(
['git', '--no-pager', 'log', '-1', '--format=%H']
, cwd=directory).strip()
def _current_branch(directory):
"""
:rtype: str
"""
return check_output(
['git', 'rev-parse', '--abbrev-ref', 'HEAD']
, cwd=directory).strip()
pass
def _commit_message(directory, commit):
"""
:type directory: str
:type commit: str
:rtype: str
"""
return check_output(
['git', 'log', '--format=%B', '-n', '1', commit]
, cwd=directory).strip()
__ancestors = {}
def _is_ancestor(directory, ancestor, descendant):
"""
:type directory: str
:type ancestor: str
:type descendant: str
:rtype: bool
"""
key = '%s:%s:%s' %(directory, ancestor, descendant)
global __ancestors
if key not in __ancestors:
if ancestor == descendant:
return False
elif ancestor == "new":
return True
elif descendant == "new":
return False
else:
__ancestors[key] = call(
['git', 'merge-base', '--is-ancestor', ancestor, descendant],
cwd=directory, stdout=PIPE, stderr=PIPE) is 0
return __ancestors[key]
__commit_diffs = {}
def _commit_diff(directory, a, b):
"""
:type directory: str
:type a: str
:type b: str
:rtype: list
"""
key = '%s:%s:%s' % (directory, a, b)
global __commit_diffs
if key not in __commit_diffs:
__commit_diffs[key] = check_output(
['git', '--no-pager', 'log', '--format=%H',
a + '...' + b], cwd=directory).splitlines()
return __commit_diffs[key]
__file_diffs = {}
def _file_diff(directory, a, b):
"""
:type directory: str
:type a: str
:type b: str
:rtype: list
"""
key = '%s:%s:%s' % (directory, a, b)
global __file_diffs
if key not in __file_diffs:
__file_diffs[key] = check_output(
['git', 'diff', '--name-only', a, b],
cwd=directory).splitlines()
return __file_diffs[key]
|
aliyun/aliyun-oss-python-sdk | examples/bucket_website.py | Python | mit | 6,202 | 0.011307 | # -*- coding: utf-8 -*-
import os
import oss2
from oss2.models import (ConditionInlcudeHeader,
Condition,
Redirect,
RedirectMirrorHeaders,
MirrorHeadersSet,
RoutingRule,
BucketWebsite,
REDIRECT_TYPE_MIRROR,
REDIRECT_TYPE_EXTERNAL,
REDIRECT_TYPE_ALICDN,
REDIRECT_TYPE_INTERNAL)
# 以下代码展示了设置静态网站托管的相关操作
# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')
# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param
# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)
index_file = 'index.html'
error_file = 'error.html'
# 以下代码展示只设置主页与404页面的静态网站托管
bucket.put_bucket_website(BucketWebsite(index_file, error_file))
# 获取website配置
result = bucket.get_bucket_website()
print('get_bucket_website without redirect:')
print('result index_file:', result.index_file)
print('result error_file:', result.error_file)
bucket.delete_bucket_website()
# 以下代码展示镜像回源的网站托管配置,采用主备模式或者多站点模式
# 设置匹配规则
include_header1= ConditionInlcudeHeader('host', 'test.oss-cn-beijing-internal.aliyuncs.com')
include_header2 = ConditionInlcudeHeader('host', 'test.oss-cn-shenzhen-internal.aliyuncs.com')
condition1 = Condition(key_prefix_equals='key1',
http_err_code_return_equals=404, include_header_list=[include_header1, include_header2])
condition2 = Condition(key_prefix_equals='key2',
http_err_code_return_equals=404, include_header_list=[include_header1, include_header2])
# 设置跳转规则,
mirror_headers_set_1 = MirrorHeadersSet("myheader-key5","myheader-value5")
mirror_headers_set_2 = MirrorHeadersSet("myheader-key6","myheader-value6")
set_list = [mirror_headers_set_1, mirror_headers_set_2]
pass_list = ['myheader-key1', 'myheader-key2']
remove_list = ['myheader-key3', 'myheader-key4']
mirror_header = RedirectMirrorHeaders(pass_all=True, pass_list=pass_list, remove_list=remove_list, set_list=set_list)
# 使用主备源站模式, 使用mirror_url_slave,mirror_url_probe参数
redirect1 = Redirect(redirect_type=REDIRECT_TYPE_MIRROR, pass_query_string=False, mirror_url='http://www.test.com/',
mirror_url_slave='http://www.slave.com/', mirror_url_probe='http://www.test.com/index.html', mirror_pass_query_string=False,
| mirror_follow_redirect=True, mirror_check_md5=True, mirror_headers=mirror_header)
# 不指定备站
redirect2 = Redirect(redirect_type=REDIRECT_TYPE_MIRROR, mirror_url='http://www.test.com/',
mirror_pass_query_string=True, mirror_follow_redirect=True, mirror_check_md5=False)
# 可以设置一条或多条,本示例展示设置多条
rule1 = RoutingRule(rule_num=1, condition=condition1, redirect=redirect1)
rule2 = RoutingRule | (rule_num=2, condition=condition2, redirect=redirect2)
website_set = BucketWebsite(index_file, error_file, [rule1, rule2])
bucket.put_bucket_website(website_set)
# 获取website配置
website_get = bucket.get_bucket_website()
print('get_bucket_website mirror type:')
print('indext_file:', website_get.index_file)
print('error_file:', website_get.error_file)
print('rule sum:', len(website_get.rules))
bucket.delete_bucket_website()
# 以下代码展示阿里云CDN跳转以及外部跳转或者内部跳转的设置
include_header1= ConditionInlcudeHeader('host', 'test.oss-cn-beijing-internal.aliyuncs.com')
include_header2 = ConditionInlcudeHeader('host', 'test.oss-cn-shenzhen-internal.aliyuncs.com')
condition1 = Condition(key_prefix_equals='key3',
http_err_code_return_equals=404, include_header_list=[include_header1, include_header2])
condition2 = Condition(key_prefix_equals='key4',
http_err_code_return_equals=404, include_header_list=[include_header1, include_header2])
condition3 = Condition(key_prefix_equals='key5',
http_err_code_return_equals=404, include_header_list=[include_header1, include_header2])
# AliCDN
redirect1 = Redirect(redirect_type=REDIRECT_TYPE_ALICDN, pass_query_string=True,
replace_key_with='${key}.suffix', proto='http', http_redirect_code=302)
# External
redirect2 = Redirect(redirect_type=REDIRECT_TYPE_EXTERNAL, pass_query_string=False, replace_key_prefix_with='abc',
proto='https', host_name='oss.aliyuncs.com', http_redirect_code=302)
# Internal
redirect3 = Redirect(redirect_type=REDIRECT_TYPE_INTERNAL, pass_query_string=False, replace_key_with='${key}.suffix')
# 可以设置一条或多条规则,本示例展示设置多条
rule1 = RoutingRule(rule_num=1, condition=condition1, redirect=redirect1)
rule2 = RoutingRule(rule_num=2, condition=condition2, redirect=redirect2)
rule3 = RoutingRule(rule_num=3, condition=condition3, redirect=redirect3)
website_set = BucketWebsite(index_file, error_file, [rule1, rule2, rule3])
bucket.put_bucket_website(website_set)
# 获取website配置
website_get = bucket.get_bucket_website()
print('get_bucket_website other type:')
print('indext_file:', website_get.index_file)
print('error_file:', website_get.error_file)
print('rule sum:', len(website_get.rules))
for rule in website_get.rules:
print('rule_num:{}, redirect_type:{}'.format(rule.rule_num, rule.redirect.redirect_type))
bucket.delete_bucket_website() |
FancyRice/RitoAPI | ritoapi/tests/tests_endpoints/test_spectator_v3.py | Python | mit | 283 | 0 | from ritoa | pi.endpoints.spectator_v3 import SpectatorV3
def test_featured_games(sample_api_key, sample_rate_limit, sample_region):
spectator_v3 = SpectatorV3(sample_api_key, sample_rate_limit)
dat | a = spectator_v3.featured_games(sample_region)
assert('gameList' in data)
|
fooelisa/netmiko | netmiko/ruckus/ruckus_fastiron.py | Python | mit | 2,254 | 0.001775 | from __future__ import unicode_literals
import re
import time
from netmiko.cisco_base_connection import CiscoSSHConnection
class RuckusFastironBase(CiscoSSHConnection):
"""Ruckus FastIron aka ICX support."""
def session_preparation(self):
"""FastIron requires to be enable mode to disable paging."""
self._test_channel_read()
self.set_base_prompt()
self.enable()
self.disable_paging(command="skip-page-display")
# Clear the read buffer
time.sleep(.3 * self.global_delay_factor)
self.clear_buffer()
def enable(self, cmd='enable', pattern=r'(ssword|User Name)', re_flags=re.IGNORECASE):
"""Enter enable mode.
With RADIUS can prompt for User Name
SSH@Lab-ICX7250>en
User Name:service_netmiko
Password:
SSH@Lab-ICX7250#
"""
output = ""
if not self.check_enable_mode():
count = 4
i = 1
while i < count:
self.write_channel(self.normalize_cmd(cmd))
new_data = self.read_until_prompt_or_pattern(pattern=pattern, re_flags=re_flags)
output += new_data
if 'User Name' in new_data:
self.write_channel(self.normalize_cmd(self.username | ))
new_data = self.read_until_prompt_or_pattern(pattern=pattern, re_flags=re_flags)
output += new_data
if 'ssword' in new_data:
self.write_channel(self.normalize_cmd(self.secret))
output += self.read_until_prompt()
return output
time.sl | eep(1)
i += 1
if not self.check_enable_mode():
msg = "Failed to enter enable mode. Please ensure you pass " \
"the 'secret' argument to ConnectHandler."
raise ValueError(msg)
class RuckusFastironTelnet(RuckusFastironBase):
def __init__(self, *args, **kwargs):
default_enter = kwargs.get('default_enter')
kwargs['default_enter'] = '\r\n' if default_enter is None else default_enter
super(RuckusFastironTelnet, self).__init__(*args, **kwargs)
class RuckusFastironSSH(RuckusFastironBase):
pass
|
Quantify-world/apification | src/apification/utils/tests/test_noninstantiable.py | Python | mit | 1,632 | 0.009191 | import warnings
from apification.utils import Noninstantiable, NoninstantiableMeta
def test_noninstantiable():
e, o = None, None
try:
o = Noninstantiable()
except TypeError as e: pass
assert o is None
assert isinstance(e, TypeError)
def test_noninstantiable_keyword_self_check_invalid():
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
class C(Noninstantiable):
def func(self, a, b=1):
pass
assert len(w) == 1
assert issubclass(w[0].category, SyntaxWarning)
assert issubclass(C, Noninstantiable)
def test_noninstantiable_keyword_self_check_valid():
C = None
with warnings.catch_warnings(record=True) as w:
class C(Noninstantiable):
def func(cls, self, b=1): # self as non first argument is allowed for whatever reasons
pass
assert not w
assert issubclass(C, Noninstantiable)
def test_noninstantiable_classmethods():
class A(Noninstantiable):
def a(cls):
return cls
asse | rt A.a() is A
def test_noninstantiable_keyword_self_check_suppression():
C = None
class C(Noninstantiable):
_allow_self_as_first_arg = True
def func(self, a, b=1):
| pass
assert issubclass(C, Noninstantiable)
def test_noninstantable_inheritance():
class A(object):
_allow_self_as_first_arg = True
e = None
try:
class B(A):
__metaclass__ = NoninstantiableMeta
def a(self):
pass
except TypeError as e:
pass
assert e is None
|
yasoob/PythonRSSReader | venv/lib/python2.7/dist-packages/OpenSSL/test/test_crypto.py | Python | mit | 104,799 | 0.002099 | # Copyright (c) Jean-Paul Calderone
# See LICENSE file for details.
"""
Unit tests for L{OpenSSL.crypto}.
"""
from unittest import main
import os, re
from subprocess import PIPE, Popen
from datetime import datetime, timedelta
from OpenSSL.crypto import TYPE_RSA, TYPE_DSA, Error, PKey, PKeyType
from OpenSSL.crypto import X509, X509Type, X509Name, X509NameType
from OpenSSL.crypto import X509Req, X509ReqType
from OpenSSL.crypto import X509Extension, X509ExtensionType
from OpenSSL.crypto import load_certificate, load_privatekey
from OpenSSL.crypto import FILETYPE_PEM, FILETYPE_ASN1, FILETYPE_TEXT
from OpenSSL.crypto import dump_certificate, load_certificate_request
from OpenSSL.crypto import dump_certificate_request, dump_privatekey
from OpenSSL.crypto import PKCS7Type, load_pkcs7_data
from OpenSSL.crypto import PKCS12, PKCS12Type, load_pkcs12
from OpenSSL.crypto import CRL, Revoked, load_crl
from OpenSSL.crypto import NetscapeSPKI, NetscapeSPKIType
from OpenSSL.crypto import sign, verify
from OpenSSL.test.util import TestCase, bytes, b
def normalize_certificate_pem(pem):
return dump_certificate(FILETYPE_PEM, load_certificate(FILETYPE_PEM, pem))
def normalize_privatekey_pem(pem):
return dump_privatekey(FILETYPE_PEM, load_privatekey(FILETYPE_PEM, pem))
root_cert_pem = b("""-----BEGIN CERTIFICATE-----
MIIC7TCCAlagAwIBAgIIPQzE4MbeufQwDQYJKoZIhvcNAQEFBQAwWDELMAkGA1UE
BhMCVVMxCzAJBgNVBAgTAklMMRAwDgYDVQQHEwdDaGljYWdvMRAwDgYDVQQKEwdU
ZXN0aW5nMRgwFgYDVQQDEw9UZXN0aW5nIFJvb3QgQ0EwIhgPMjAwOTAzMjUxMjM2
NThaGA8yMDE3MDYxMTEyMzY1OFowWDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAklM
MRAwDgYDVQQHEwdDaGljYWdvMRAwDgYDVQQKEwdUZXN0aW5nMRgwFgYDVQQDEw9U
ZXN0aW5nIFJvb3QgQ0EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAPmaQumL
urpE527uSEHdL1pqcDRmWzu+98Y6YHzT/J7KWEamyMCNZ6fRW1JCR782UQ8a07fy
2xXsKy4WdKaxyG8CcatwmXvpvRQ44dSANMihHELpANTdyVp6DCysED6wkQFurHlF
1dshEaJw8b/ypDhmbVIo6Ci1xvCJqivbLFnbAgMBAAGjgbswgbgwHQYDVR0OBBYE
FINVdy1eIfFJDAkk51QJEo3IfgSuMIGIBgNVHSMEgYAwfoAUg1V3LV4h8UkMCSTn
VAkSjch+BK6hXKRaMFgxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJJTDEQMA4GA1UE
BxMHQ2hpY2FnbzEQMA4GA1UEChMHVGVzdGluZzEYMBYGA1UEAxMPVGVzdGluZyBS
b290IENBggg9DMTgxt659DAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GB
AGGCDazMJGoWNBpc03u6+smc95dEead2KlZXBATOdFT1VesY3+nUOqZhEhTGlDMi
hkgaZnzoIq/Uamidegk4hirsCT/R+6vsKAAxNTcBjUeZjlykCJWy5ojShGftXIKY
w/njVbKMXrvc83qmTdGl3TAM0fxQIpqgcglFLveEBgzn
-----END CERTIFICATE-----
""")
root_key_pem = b("""-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQD5mkLpi7q6ROdu7khB3S9aanA0Zls7vvfGOmB80/yeylhGpsjA
jWen0VtSQke/NlEPGtO38tsV7CsuFnSmschvAnGrcJl76b0UOOHUgDTIoRxC6QDU
3claegwsrBA+sJEBbqx5RdXbIRGicPG/8qQ4Zm1SKOgotcbwiaor2yxZ2wIDAQAB
AoGBAPCgMpmLxzwDaUmcFbTJUvlLW1hoxNNYSu2jIZm1k/hRAcE60JYwvBkgz3UB
yMEh0AtLxYe0bFk6EHah11tMUPgscbCq73snJ++8koUw+csk22G65hOs51bVb7Aa
6JBe67oLzdtvgCUFAA2qfrKzWRZzAdhUirQUZgySZk+Xq1pBAkEA/kZG0A6roTSM
BVnx7LnPfsycKUsTumorpXiylZJjTi9XtmzxhrYN6wgZlDOOwOLgSQhszGpxVoMD
u3gByT1b2QJBAPtL3mSKdvwRu/+40zaZLwvSJRxaj0mcE4BJOS6Oqs/hS1xRlrNk
PpQ7WJ4yM6ZOLnXzm2mKyxm50Mv64109FtMCQQDOqS2KkjHaLowTGVxwC0DijMfr
I9Lf8sSQk32J5VWCySWf5gGTfEnpmUa41gKTMJIbqZZLuc | NuDcOtzUaeWZlZAkA8
ttXigLnCqR486JDPTi9ZscoZkZ+w7y6e/hH8t6d5Vjt48JVyfjPIaJY+km58LcN3
6AWSeGAdtRFHVzR7oHjVAkB4hutvxiOeiIVQNBhM6RSI9aBPMI21DoX2JRoxvNW2
cbvAhow217X9V0dVerEOKxnNYspXRrh36h7k4mQA+sDq
-----END RSA PRIVATE KEY-----
""")
server_cert_pem = b("""-----BEGIN CERTIFICATE-----
MIICKDCCAZGgAwIBAgIJAJn/HpR21r/8MA0GCSqGSIb3DQEBBQUAMFgxCzAJBgNV
BAYTAlVTMQswCQYDVQQIEwJJTDEQMA4GA1UEBxMHQ2hpY2FnbzEQMA4GA1UEChMH
VGVzdGluZzEYMBYGA1UEAxMPVGVzdGluZyBSb290IENBMCIYDzIwMDkwMzI1MTIz
NzUzW | hgPMjAxNzA2MTExMjM3NTNaMBgxFjAUBgNVBAMTDWxvdmVseSBzZXJ2ZXIw
gZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAL6m+G653V0tpBC/OKl22VxOi2Cv
lK4TYu9LHSDP9uDVTe7V5D5Tl6qzFoRRx5pfmnkqT5B+W9byp2NU3FC5hLm5zSAr
b45meUhjEJ/ifkZgbNUjHdBIGP9MAQUHZa5WKdkGIJvGAvs8UzUqlr4TBWQIB24+
lJ+Ukk/CRgasrYwdAgMBAAGjNjA0MB0GA1UdDgQWBBS4kC7Ij0W1TZXZqXQFAM2e
gKEG2DATBgNVHSUEDDAKBggrBgEFBQcDATANBgkqhkiG9w0BAQUFAAOBgQBh30Li
dJ+NlxIOx5343WqIBka3UbsOb2kxWrbkVCrvRapCMLCASO4FqiKWM+L0VDBprqIp
2mgpFQ6FHpoIENGvJhdEKpptQ5i7KaGhnDNTfdy3x1+h852G99f1iyj0RmbuFcM8
uzujnS8YXWvM7DM1Ilozk4MzPug8jzFp5uhKCQ==
-----END CERTIFICATE-----
""")
server_key_pem = normalize_privatekey_pem(b("""-----BEGIN RSA PRIVATE KEY-----
MIICWwIBAAKBgQC+pvhuud1dLaQQvzipdtlcTotgr5SuE2LvSx0gz/bg1U3u1eQ+
U5eqsxaEUceaX5p5Kk+QflvW8qdjVNxQuYS5uc0gK2+OZnlIYxCf4n5GYGzVIx3Q
SBj/TAEFB2WuVinZBiCbxgL7PFM1Kpa+EwVkCAduPpSflJJPwkYGrK2MHQIDAQAB
AoGAbwuZ0AR6JveahBaczjfnSpiFHf+mve2UxoQdpyr6ROJ4zg/PLW5K/KXrC48G
j6f3tXMrfKHcpEoZrQWUfYBRCUsGD5DCazEhD8zlxEHahIsqpwA0WWssJA2VOLEN
j6DuV2pCFbw67rfTBkTSo32ahfXxEKev5KswZk0JIzH3ooECQQDgzS9AI89h0gs8
Dt+1m11Rzqo3vZML7ZIyGApUzVan+a7hbc33nbGRkAXjHaUBJO31it/H6dTO+uwX
msWwNG5ZAkEA2RyFKs5xR5USTFaKLWCgpH/ydV96KPOpBND7TKQx62snDenFNNbn
FwwOhpahld+vqhYk+pfuWWUpQciE+Bu7ZQJASjfT4sQv4qbbKK/scePicnDdx9th
4e1EeB9xwb+tXXXUo/6Bor/AcUNwfiQ6Zt9PZOK9sR3lMZSsP7rMi7kzuQJABie6
1sXXjFH7nNJvRG4S39cIxq8YRYTy68II/dlB2QzGpKxV/POCxbJ/zu0CU79tuYK7
NaeNCFfH3aeTrX0LyQJAMBWjWmeKM2G2sCExheeQK0ROnaBC8itCECD4Jsve4nqf
r50+LF74iLXFwqysVCebPKMOpDWp/qQ1BbJQIPs7/A==
-----END RSA PRIVATE KEY-----
"""))
client_cert_pem = b("""-----BEGIN CERTIFICATE-----
MIICJjCCAY+gAwIBAgIJAKxpFI5lODkjMA0GCSqGSIb3DQEBBQUAMFgxCzAJBgNV
BAYTAlVTMQswCQYDVQQIEwJJTDEQMA4GA1UEBxMHQ2hpY2FnbzEQMA4GA1UEChMH
VGVzdGluZzEYMBYGA1UEAxMPVGVzdGluZyBSb290IENBMCIYDzIwMDkwMzI1MTIz
ODA1WhgPMjAxNzA2MTExMjM4MDVaMBYxFDASBgNVBAMTC3VnbHkgY2xpZW50MIGf
MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDAZh/SRtNm5ntMT4qb6YzEpTroMlq2
rn+GrRHRiZ+xkCw/CGNhbtPir7/QxaUj26BSmQrHw1bGKEbPsWiW7bdXSespl+xK
iku4G/KvnnmWdeJHqsiXeUZtqurMELcPQAw9xPHEuhqqUJvvEoMTsnCEqGM+7Dtb
oCRajYyHfluARQIDAQABozYwNDAdBgNVHQ4EFgQUNQB+qkaOaEVecf1J3TTUtAff
0fAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDQYJKoZIhvcNAQEFBQADgYEAyv/Jh7gM
Q3OHvmsFEEvRI+hsW8y66zK4K5de239Y44iZrFYkt7Q5nBPMEWDj4F2hLYWL/qtI
9Zdr0U4UDCU9SmmGYh4o7R4TZ5pGFvBYvjhHbkSFYFQXZxKUi+WUxplP6I0wr2KJ
PSTJCjJOn3xo2NTKRgV1gaoTf2EhL+RG8TQ=
-----END CERTIFICATE-----
""")
client_key_pem = normalize_privatekey_pem(b("""-----BEGIN RSA PRIVATE KEY-----
MIICXgIBAAKBgQDAZh/SRtNm5ntMT4qb6YzEpTroMlq2rn+GrRHRiZ+xkCw/CGNh
btPir7/QxaUj26BSmQrHw1bGKEbPsWiW7bdXSespl+xKiku4G/KvnnmWdeJHqsiX
eUZtqurMELcPQAw9xPHEuhqqUJvvEoMTsnCEqGM+7DtboCRajYyHfluARQIDAQAB
AoGATkZ+NceY5Glqyl4mD06SdcKfV65814vg2EL7V9t8+/mi9rYL8KztSXGlQWPX
zuHgtRoMl78yQ4ZJYOBVo+nsx8KZNRCEBlE19bamSbQLCeQMenWnpeYyQUZ908gF
h6L9qsFVJepgA9RDgAjyDoS5CaWCdCCPCH2lDkdcqC54SVUCQQDseuduc4wi8h4t
V8AahUn9fn9gYfhoNuM0gdguTA0nPLVWz4hy1yJiWYQe0H7NLNNTmCKiLQaJpAbb
TC6vE8C7AkEA0Ee8CMJUc20BnGEmxwgWcVuqFWaKCo8jTH1X38FlATUsyR3krjW2
dL3yDD9NwHxsYP7nTKp/U8MV7U9IBn4y/wJBAJl7H0/BcLeRmuJk7IqJ7b635iYB
D/9beFUw3MUXmQXZUfyYz39xf6CDZsu1GEdEC5haykeln3Of4M9d/4Kj+FcCQQCY
si6xwT7GzMDkk/ko684AV3KPc/h6G0yGtFIrMg7J3uExpR/VdH2KgwMkZXisSMvw
JJEQjOMCVsEJlRk54WWjAkEAzoZNH6UhDdBK5F38rVt/y4SEHgbSfJHIAmPS32Kq
f6GGcfNpip0Uk7q7udTKuX7Q/buZi/C4YW7u3VKAquv9NA==
-----END RSA PRIVATE KEY-----
"""))
cleartextCertificatePEM = b("""-----BEGIN CERTIFICATE-----
MIIC7TCCAlagAwIBAgIIPQzE4MbeufQwDQYJKoZIhvcNAQEFBQAwWDELMAkGA1UE
BhMCVVMxCzAJBgNVBAgTAklMMRAwDgYDVQQHEwdDaGljYWdvMRAwDgYDVQQKEwdU
ZXN0aW5nMRgwFgYDVQQDEw9UZXN0aW5nIFJvb3QgQ0EwIhgPMjAwOTAzMjUxMjM2
NThaGA8yMDE3MDYxMTEyMzY1OFowWDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAklM
MRAwDgYDVQQHEwdDaGljYWdvMRAwDgYDVQQKEwdUZXN0aW5nMRgwFgYDVQQDEw9U
ZXN0aW5nIFJvb3QgQ0EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAPmaQumL
urpE527uSEHdL1pqcDRmWzu+98Y6YHzT/J7KWEamyMCNZ6fRW1JCR782UQ8a07fy
2xXsKy4WdKaxyG8CcatwmXvpvRQ44dSANMihHELpANTdyVp6DCysED6wkQFurHlF
1dshEaJw8b/ypDhmbVIo6Ci1xvCJqivbLFnbAgMBAAGjgbswgbgwHQYDVR0OBBYE
FINVdy1eIfFJDAkk51QJEo3IfgSuMIGIBgNVHSMEgYAwfoAUg1V3LV4h8UkMCSTn
VAkSjch+BK6hXKRaMFgxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJJTDEQMA4GA1UE
BxMHQ2hpY2FnbzEQMA4GA1UEChMHVGVzdGluZzEYMBYGA1UEAxMPVGVzdGluZyBS
b290IENBggg9DMTgxt659DAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GB
AGGCDazMJGoWNBpc03u6+smc95dEead2KlZXBATOdFT1VesY3+nUOqZhEhTGlDMi
hkgaZnzoIq/Uamidegk4hirsCT/R+6vsKAAxNTcBjUeZjlykCJWy5ojShGftXIKY
w/njVbKMXrvc83qmTdGl3TAM0fxQIpqgcglFLveEBgzn
-----END CERTIFICATE-----
""")
cleartextPrivateKeyPEM = normalize_privatekey_pem(b("""\
-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQD5mkLpi7q6ROdu7khB3S9aanA0Zls7vvfGOmB80/yeylhGpsjA
jWen0VtSQke/NlEPGtO38tsV7CsuFnSmschvAnGrcJl76b0UOOHUgDTI |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.