text stringlengths 4 1.02M | meta dict |
|---|---|
import sqlite3
from checktheplug.models.User import User
class UserDao:
def __init__(self, settings):
self.conn = sqlite3.connect(settings.database)
def login(self, username, password):
with self.conn:
cur = self.conn.cursor()
cur.execute("SELECT id, username, gravatar, admin from users where username=? and password=?", (username, password))
row = cur.fetchone()
return User(row[0], row[1], row[2], True if row[3] == 1 else False) if row else None
| {
"content_hash": "8f3c03097342bd0053584a3fc1adba40",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 128,
"avg_line_length": 35.06666666666667,
"alnum_prop": 0.6311787072243346,
"repo_name": "maximx1/checktheplug",
"id": "4c411634a6bad4dcda08391926ab4a1f75245db0",
"size": "526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "checktheplug/data/UserDao.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1352"
},
{
"name": "Perl",
"bytes": "124"
},
{
"name": "Python",
"bytes": "25878"
},
{
"name": "Smarty",
"bytes": "13314"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import six
from sentry.api.serializers import Serializer, register, serialize
from sentry.models import Activity
@register(Activity)
class ActivitySerializer(Serializer):
def get_attrs(self, item_list, user):
# TODO(dcramer); assert on relations
users = {
d['id']: d
for d in serialize(set(i.user for i in item_list if i.user_id), user)
}
return {
item: {
'user': users[six.text_type(item.user_id)] if item.user_id else None,
} for item in item_list
}
def serialize(self, obj, attrs, user):
return {
'id': six.text_type(obj.id),
'user': attrs['user'],
'type': obj.get_type_display(),
'data': obj.data,
'dateCreated': obj.datetime,
}
class OrganizationActivitySerializer(ActivitySerializer):
def get_attrs(self, item_list, user):
# TODO(dcramer); assert on relations
attrs = super(OrganizationActivitySerializer, self).get_attrs(
item_list, user,
)
groups = {
d['id']: d
for d in serialize(set(i.group for i in item_list if i.group_id), user)
}
projects = {
d['id']: d
for d in serialize(set(i.project for i in item_list), user)
}
for item in item_list:
attrs[item]['issue'] = groups[six.text_type(item.group_id)] if item.group_id else None
attrs[item]['project'] = projects[six.text_type(item.project_id)]
return attrs
def serialize(self, obj, attrs, user):
context = super(OrganizationActivitySerializer, self).serialize(
obj, attrs, user,
)
context['issue'] = attrs['issue']
context['project'] = attrs['project']
return context
| {
"content_hash": "ad7f55f087e6470396b098bbe64f91c5",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 98,
"avg_line_length": 30.403225806451612,
"alnum_prop": 0.5676392572944297,
"repo_name": "zenefits/sentry",
"id": "2d62b5ddda8e23910a9a43cad530f0108948a744",
"size": "1885",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/api/serializers/models/activity.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "249557"
},
{
"name": "HTML",
"bytes": "293019"
},
{
"name": "JavaScript",
"bytes": "975797"
},
{
"name": "Lua",
"bytes": "22367"
},
{
"name": "Makefile",
"bytes": "5959"
},
{
"name": "Python",
"bytes": "12550461"
},
{
"name": "Ruby",
"bytes": "4026"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
} |
from django import forms
from django.contrib.auth import authenticate
# crispy forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, Submit, Div, HTML
class LoginForm(forms.Form):
username = forms.CharField(label='Username', widget=forms.TextInput())
password = forms.CharField(label='Password', widget=forms.PasswordInput())
def clean(self):
username = self.cleaned_data["username"]
password = self.cleaned_data["password"]
user = authenticate(username=username, password=password)
if not user or not user.is_active:
raise forms.ValidationError("Wrong credentials!!!")
return self.cleaned_data
def getuser(self):
username = self.cleaned_data["username"]
password = self.cleaned_data["password"]
user = authenticate(username=username, password=password)
return user
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.layout = Layout(
Fieldset(
'Zaloguj się',
'username',
'password',
HTML(
'''
<small>*field necessary</small> <br>
<a href="{% url 'info:register' %}">First time? Sign in!</a>
'''
),
),
Submit('submit', 'Login', css_class='btn-success')
)
| {
"content_hash": "9dcedb0682ffe667cbf6707080367045",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 84,
"avg_line_length": 31.5625,
"alnum_prop": 0.5702970297029702,
"repo_name": "pawel-wlk/ecommerce-site",
"id": "3f392ed379ba2556bcc0350e102617008b5a65d9",
"size": "1516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/info/loginform.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10734"
},
{
"name": "Python",
"bytes": "29216"
}
],
"symlink_target": ""
} |
import uuid as uuid_lib
from oslo_config import cfg
from nova.cloudpipe import pipelib
from nova.network import api as network_api
from nova.tests.functional.api_sample_tests import api_sample_base
from nova.tests.unit.image import fake
CONF = cfg.CONF
CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class CloudPipeSampleTest(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = "os-cloudpipe"
def _get_flags(self):
f = super(CloudPipeSampleTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.cloudpipe.Cloudpipe')
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.cloudpipe_update.Cloudpipe_update')
return f
def setUp(self):
super(CloudPipeSampleTest, self).setUp()
def get_user_data(self, project_id):
"""Stub method to generate user data for cloudpipe tests."""
return "VVNFUiBEQVRB\n"
def network_api_get(self, context, network_uuid):
"""Stub to get a valid network and its information."""
return {'vpn_public_address': '127.0.0.1',
'vpn_public_port': 22}
self.stubs.Set(pipelib.CloudPipe, 'get_encoded_zip', get_user_data)
self.stubs.Set(network_api.API, "get",
network_api_get)
def generalize_subs(self, subs, vanilla_regexes):
subs['project_id'] = '[0-9a-f-]+'
return subs
def test_cloud_pipe_create(self):
# Get api samples of cloud pipe extension creation.
self.flags(vpn_image_id=fake.get_valid_image_id())
project = {'project_id': str(uuid_lib.uuid4().hex)}
response = self._do_post('os-cloudpipe', 'cloud-pipe-create-req',
project)
subs = self._get_regexes()
subs.update(project)
subs['image_id'] = CONF.vpn_image_id
self._verify_response('cloud-pipe-create-resp', subs, response, 200)
return project
def test_cloud_pipe_list(self):
# Get api samples of cloud pipe extension get request.
project = self.test_cloud_pipe_create()
response = self._do_get('os-cloudpipe')
subs = self._get_regexes()
subs.update(project)
subs['image_id'] = CONF.vpn_image_id
self._verify_response('cloud-pipe-get-resp', subs, response, 200)
def test_cloud_pipe_update(self):
subs = {'vpn_ip': '192.168.1.1',
'vpn_port': 2000}
response = self._do_put('os-cloudpipe/configure-project',
'cloud-pipe-update-req',
subs)
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, "")
| {
"content_hash": "f59f73478f05b58b95fae78cee696eca",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 76,
"avg_line_length": 38.666666666666664,
"alnum_prop": 0.610079575596817,
"repo_name": "isyippee/nova",
"id": "797938e2201d9a36560ab5481259aeacb5f69ecc",
"size": "3618",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/tests/functional/api_sample_tests/test_cloudpipe.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16597219"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "259485"
}
],
"symlink_target": ""
} |
import getopt
import logging
import signal
import sys
logger = logging.getLogger('aep-parser')
# pylint: disable=attribute-defined-outside-init
class AepParser(object):
prepared = False
@staticmethod
def topology_from_data(array, topo):
# Extract topology information for the data file
# The header of a data file looks like this ('#' included):
# configuration: <file path>
# config_name: <file name>
# trigger: 0.400000V (hyst 0.200000V) 0.000000W (hyst 0.200000W) 400us
# date: Fri, 10 Jun 2016 11:25:07 +0200
# host: <host name>
#
# CHN_0 Pretty_name_0 PARENT_0 Color0 Class0
# CHN_1 Pretty_name_1 PARENT_1 Color1 Class1
# CHN_2 Pretty_name_2 PARENT_2 Color2 Class2
# CHN_3 Pretty_name_3 PARENT_3 Color3 Class3
# ..
# CHN_N Pretty_name_N PARENT_N ColorN ClassN
#
info = {}
if len(array) == 6:
info['name'] = array[1]
info['parent'] = array[3]
info['pretty'] = array[2]
# add an entry for both name and pretty name in order to not parse
# the whole dict when looking for a parent and the parent of parent
topo[array[1]] = info
topo[array[2]] = info
return topo
@staticmethod
def create_virtual(topo, label, hide, duplicate):
# Create a list of virtual power domain that are the sum of others
# A virtual domain is the parent of several channels but is not sampled by a
# channel
# This can be useful if a power domain is supplied by 2 power rails
virtual = {}
# Create an entry for each virtual parent
for supply in topo.keys():
index = topo[supply]['index']
# Don't care of hidden columns
if hide[index]:
continue
# Parent is in the topology
parent = topo[supply]['parent']
if parent in topo:
continue
if parent not in virtual:
virtual[parent] = {supply : index}
virtual[parent][supply] = index
# Remove parent with 1 child as they don't give more information than their
# child
for supply in list(virtual.keys()):
if len(virtual[supply]) == 1:
del virtual[supply]
for supply in list(virtual.keys()):
# Add label, hide and duplicate columns for virtual domains
hide.append(0)
duplicate.append(1)
label.append(supply)
return virtual
@staticmethod
def get_label(array):
# Get the label of each column
# Remove unit '(X)' from the end of the label
label = [""]*len(array)
unit = [""]*len(array)
label[0] = array[0]
unit[0] = "(S)"
for i in range(1, len(array)):
label[i] = array[i][:-3]
unit[i] = array[i][-3:]
return label, unit
@staticmethod
def filter_column(label, unit, topo):
# Filter columns
# We don't parse Volt and Amper columns: put in hide list
# We don't add in Total a column that is the child of another one: put in duplicate list
# By default we hide all columns
hide = [1] * len(label)
# By default we assume that there is no child
duplicate = [0] * len(label)
for i in range(len(label)): # pylint: disable=consider-using-enumerate
# We only care about time and Watt
if label[i] == 'time':
hide[i] = 0
continue
if '(W)' not in unit[i]:
continue
hide[i] = 0
#label is pretty name
pretty = label[i]
# We don't add a power domain that is already accounted by its parent
if topo[pretty]['parent'] in topo:
duplicate[i] = 1
# Set index, that will be used by virtual domain
topo[topo[pretty]['name']]['index'] = i
# remove pretty element that is useless now
del topo[pretty]
return hide, duplicate
@staticmethod
def parse_text(array, hide):
data = [0]*len(array)
for i in range(len(array)): # pylint: disable=consider-using-enumerate
if hide[i]:
continue
try:
data[i] = int(float(array[i])*1000000)
except ValueError:
continue
return data
@staticmethod
def add_virtual_data(data, virtual):
# write virtual domain
for parent in virtual.keys():
power = 0
for child in list(virtual[parent].values()):
try:
power += data[child]
except IndexError:
continue
data.append(power)
return data
@staticmethod
def delta_nrj(array, delta, minimu, maximum, hide):
# Compute the energy consumed in this time slice and add it
# delta[0] is used to save the last time stamp
if delta[0] < 0:
delta[0] = array[0]
time = array[0] - delta[0]
if time <= 0:
return delta
for i in range(len(array)): # pylint: disable=consider-using-enumerate
if hide[i]:
continue
try:
data = array[i]
except ValueError:
continue
if data < minimu[i]:
minimu[i] = data
if data > maximum[i]:
maximum[i] = data
delta[i] += time * data
# save last time stamp
delta[0] = array[0]
return delta
def output_label(self, label, hide):
self.fo.write(label[0] + "(uS)")
for i in range(1, len(label)):
if hide[i]:
continue
self.fo.write(" " + label[i] + "(uW)")
self.fo.write("\n")
def output_power(self, array, hide):
#skip partial line. Most probably the last one
if len(array) < len(hide):
return
# write not hidden colums
self.fo.write(str(array[0]))
for i in range(1, len(array)):
if hide[i]:
continue
self.fo.write(" "+str(array[i]))
self.fo.write("\n")
# pylint: disable-redefined-outer-name,
def prepare(self, input_file, outfile, summaryfile):
try:
self.fi = open(input_file, "r")
except IOError:
logger.warning('Unable to open input file {}'.format(input_file))
logger.warning('Usage: parse_arp.py -i <inputfile> [-o <outputfile>]')
sys.exit(2)
self.parse = True
if outfile:
try:
self.fo = open(outfile, "w")
except IOError:
logger.warning('Unable to create {}'.format(outfile))
self.parse = False
else:
self.parse = False
self.summary = True
if summaryfile:
try:
self.fs = open(summaryfile, "w")
except IOError:
logger.warning('Unable to create {}'.format(summaryfile))
self.fs = sys.stdout
else:
self.fs = sys.stdout
self.prepared = True
def unprepare(self):
if not self.prepared:
# nothing has been prepared
return
self.fi.close()
if self.parse:
self.fo.close()
self.prepared = False
# pylint: disable=too-many-branches,too-many-statements,redefined-outer-name,too-many-locals
def parse_aep(self, start=0, length=-1):
# Parse aep data and calculate the energy consumed
begin = 0
label_line = 1
topo = {}
lines = self.fi.readlines()
for myline in lines:
array = myline.split()
if "#" in myline:
# update power topology
topo = self.topology_from_data(array, topo)
continue
if label_line:
label_line = 0
# 1st line not starting with # gives label of each column
label, unit = self.get_label(array)
# hide useless columns and detect channels that are children
# of other channels
hide, duplicate = self.filter_column(label, unit, topo)
# Create virtual power domains
virtual = self.create_virtual(topo, label, hide, duplicate)
if self.parse:
self.output_label(label, hide)
logger.debug('Topology : {}'.format(topo))
logger.debug('Virtual power domain : {}'.format(virtual))
logger.debug('Duplicated power domain : : {}'.format(duplicate))
logger.debug('Name of columns : {}'.format(label))
logger.debug('Hidden columns : {}'.format(hide))
logger.debug('Unit of columns : {}'.format(unit))
# Init arrays
nrj = [0]*len(label)
minimum = [100000000]*len(label)
maximum = [0]*len(label)
offset = [0]*len(label)
continue
# convert text to int and unit to micro-unit
data = self.parse_text(array, hide)
# get 1st time stamp
if begin <= 0:
begin = data[0]
# skip data before start
if (data[0]-begin) < start:
continue
# stop after length
if length >= 0 and (data[0]-begin) > (start + length):
continue
# add virtual domains
data = self.add_virtual_data(data, virtual)
# extract power figures
self.delta_nrj(data, nrj, minimum, maximum, hide)
# write data into new file
if self.parse:
self.output_power(data, hide)
# if there is no data just return
if label_line or len(nrj) == 1:
raise ValueError('No data found in the data file. Please check the Arm Energy Probe')
# display energy consumption of each channel and total energy consumption
total = 0
results_table = {}
for i in range(1, len(nrj)):
if hide[i]:
continue
nrj[i] -= offset[i] * nrj[0]
total_nrj = nrj[i]/1000000000000.0
duration = (maximum[0]-minimum[0])/1000000.0
channel_name = label[i]
average_power = total_nrj/duration
total = nrj[i]/1000000000000.0
duration = (maximum[0]-minimum[0])/1000000.0
min_power = minimum[i]/1000000.0
max_power = maximum[i]/1000000.0
output = "Total nrj: %8.3f J for %s -- duration %8.3f sec -- min %8.3f W -- max %8.3f W\n"
self.fs.write(output.format(total, label[i], duration, min_power, max_power))
# store each AEP channel info except Platform in the results table
results_table[channel_name] = total_nrj, average_power
if minimum[i] < offset[i]:
self.fs.write("!!! Min below offset\n")
if duplicate[i]:
continue
total += nrj[i]
output = "Total nrj: %8.3f J for Platform -- duration %8.3f sec\n"
self.fs.write(output.format(total/1000000000000.0, (maximum[0]-minimum[0])/1000000.0))
total_nrj = total/1000000000000.0
duration = (maximum[0]-minimum[0])/1000000.0
average_power = total_nrj/duration
# store AEP Platform channel info in the results table
results_table["Platform"] = total_nrj, average_power
return results_table
# pylint: disable=too-many-branches,no-self-use,too-many-locals
def topology_from_config(self, topofile):
try:
ft = open(topofile, "r")
except IOError:
logger.warning('Unable to open config file {}'.format(topofile))
return
lines = ft.readlines()
topo = {}
virtual = {}
name = ""
offset = 0
index = 0
#parse config file
for myline in lines:
if myline.startswith("#"):
# skip comment
continue
if myline == "\n":
# skip empty line
continue
if name == "":
# 1st valid line is the config's name
name = myline
continue
if not myline.startswith((' ', '\t')):
# new device path
offset = index
continue
# Get parameters of channel configuration
items = myline.split()
info = {}
info['name'] = items[0]
info['parent'] = items[9]
info['pretty'] = items[8]
info['index'] = int(items[2])+offset
# Add channel
topo[items[0]] = info
# Increase index
index += 1
# Create an entry for each virtual parent
# pylint: disable=consider-iterating-dictionary
for supply in topo.keys():
# Parent is in the topology
parent = topo[supply]['parent']
if parent in topo:
continue
if parent not in virtual:
virtual[parent] = {supply : topo[supply]['index']}
virtual[parent][supply] = topo[supply]['index']
# Remove parent with 1 child as they don't give more information than their
# child
# pylint: disable=consider-iterating-dictionary
for supply in list(virtual.keys()):
if len(virtual[supply]) == 1:
del virtual[supply]
topo_list = ['']*(1+len(topo)+len(virtual))
topo_list[0] = 'time'
# pylint: disable=consider-iterating-dictionary
for chnl in topo.keys():
topo_list[topo[chnl]['index']] = chnl
for chnl in virtual.keys():
index += 1
topo_list[index] = chnl
ft.close()
return topo_list
def __del__(self):
self.unprepare()
if __name__ == '__main__':
# pylint: disable=unused-argument
def handleSigTERM(signum, frame):
sys.exit(2)
signal.signal(signal.SIGTERM, handleSigTERM)
signal.signal(signal.SIGINT, handleSigTERM)
logger.setLevel(logging.WARN)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
in_file = ""
out_file = ""
figurefile = ""
start = 0
length = -1
try:
opts, args = getopt.getopt(sys.argv[1:], "i:vo:s:l:t:")
except getopt.GetoptError as err:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
for o, a in opts:
if o == "-i":
in_file = a
if o == "-v":
logger.setLevel(logging.DEBUG)
if o == "-o":
parse = True
out_file = a
if o == "-s":
start = int(float(a)*1000000)
if o == "-l":
length = int(float(a)*1000000)
if o == "-t":
topfile = a
parser = AepParser()
print(parser.topology_from_config(topfile))
exit(0)
parser = AepParser()
parser.prepare(in_file, out_file, figurefile)
parser.parse_aep(start, length)
| {
"content_hash": "74943d8df8ee6829c9a01579dd68806e",
"timestamp": "",
"source": "github",
"line_count": 513,
"max_line_length": 102,
"avg_line_length": 30.269005847953217,
"alnum_prop": 0.5243431221020093,
"repo_name": "credp/lisa",
"id": "111aa0240e4c51d29bb8a0ea7863eb8c7b49e40b",
"size": "16713",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "external/devlib/devlib/utils/parse_aep.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "18748"
},
{
"name": "Jupyter Notebook",
"bytes": "81363929"
},
{
"name": "Makefile",
"bytes": "4003"
},
{
"name": "Perl",
"bytes": "6106"
},
{
"name": "Python",
"bytes": "2309481"
},
{
"name": "Shell",
"bytes": "108055"
}
],
"symlink_target": ""
} |
"""
Helper methods for operations related to the management of volumes,
and storage repositories for Windows 2012
"""
import time
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt.hyperv import basevolumeutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class VolumeUtilsV2(basevolumeutils.BaseVolumeUtils):
def __init__(self, conn_storage, conn_wmi):
self._conn_storage = conn_storage
self._conn_wmi = conn_wmi
def login_storage_target(self, target_lun, target_iqn,
target_portal):
"""Add target portal, list targets and logins to the target."""
separator = target_portal.find(':')
target_address = target_portal[:separator]
target_port = target_portal[separator + 1:]
#Adding target portal to iscsi initiator. Sending targets
portal = self._conn_storage.__getattr__("MSFT_iSCSITargetPortal")
portal.New(TargetPortalAddress=target_address,
TargetPortalPortNumber=target_port)
#Connecting to the target
target = self._conn_storage.__getattr__("MSFT_iSCSITarget")
target.Connect(NodeAddress=target_iqn,
IsPersistent=True)
#Waiting the disk to be mounted. Research this
time.sleep(CONF.hyperv_wait_between_attach_retry)
def logout_storage_target(self, target_iqn):
"""Logs out storage target through its session id."""
target = self._conn_storage.MSFT_iSCSITarget(
NodeAddress=target_iqn)[0]
if target.IsConnected:
session = self._conn_storage.MSFT_iSCSISession(
TargetNodeAddress=target_iqn)[0]
if session.IsPersistent:
session.Unregister()
target.Disconnect()
def execute_log_out(self, session_id):
session = self._conn_wmi.MSiSCSIInitiator_SessionClass(
SessionId=session_id)[0]
self.logout_storage_target(session.TargetName)
| {
"content_hash": "c7df1b4107d8e3bff4150bffa37409c8",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 77,
"avg_line_length": 40.528301886792455,
"alnum_prop": 0.61731843575419,
"repo_name": "maoy/zknova",
"id": "6f5bcdac9d11772dfad160059253ba3e6f616ea3",
"size": "2829",
"binary": false,
"copies": "1",
"ref": "refs/heads/zk-servicegroup",
"path": "nova/virt/hyperv/volumeutilsV2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "7960822"
},
{
"name": "Shell",
"bytes": "16987"
}
],
"symlink_target": ""
} |
from optparse import OptionParser
import time, socket
from struct import pack, unpack
import signal
def commandline():
parser = OptionParser(usage="%prog [OPTIONS] device src_ip_addr src_hw_addr broadcast_ip_addr netmask")
parser.add_option("-i", "--interval", dest="interval", default="1000",
help="Repeat interval in ms", metavar="INTERVAL")
parser.add_option("-r", "--repeat", dest="repeat", default="1",
help="Repeat count", metavar="REPEAT")
parser.add_option("-p", "--pidfile", dest="pidfile",
default="/tmp/arp.pid",
help="PID file", metavar="PID")
(options, args) = parser.parse_args()
if len(args) != 5:
parser.error("Expects: [-i repeatinterval-ms] [-r repeatcount] [-p pidfile] \\\n"+
" device src_ip_addr src_hw_addr broadcast_ip_addr netmask")
class Args: pass
ret = Args()
ret.interval = int(options.interval)
ret.repeat = int(options.repeat)
ret.pidfile = options.pidfile
ret.device = args[0]
ret.src_ip_addr = args[1]
ret.src_hw_addr = args[2]
ret.broadcast_ip_addr = args[3]
ret.netmask = args[4]
return ret
def mssleep(ms):
time.sleep(ms/1000.0)
def send_arp(ip, device, sender_mac, broadcast, netmask, arptype):
#if_ipaddr = socket.gethostbyname(socket.gethostname())
sock = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.SOCK_RAW)
sock.bind((device, socket.SOCK_RAW))
bcast_mac = pack('!6B', *(0xFF,)*6)
zero_mac = pack('!6B', *(0x00,)*6)
socket_mac = sock.getsockname()[4]
if sender_mac == 'auto':
sender_mac = socket_mac
else:
raise Exception("Can't ARP this: " + sender_mac)
ARPOP_REQUEST = pack('!H', 0x0001)
ARPOP_REPLY = pack('!H', 0x0002)
arpop = None
target_mac = None
if arptype == 'REQUEST':
target_mac = zero_mac
arpop = ARPOP_REQUEST
else:
target_mac = sender_mac
arpop = ARPOP_REPLY
sender_ip = pack('!4B', *[int(x) for x in ip.split('.')])
target_ip = pack('!4B', *[int(x) for x in ip.split('.')])
arpframe = [
### ETHERNET
# destination MAC addr
bcast_mac,
# source MAC addr
socket_mac,
# protocol type (=ARP)
pack('!H', 0x0806),
### ARP
# logical protocol type (Ethernet/IP)
pack('!HHBB', 0x0001, 0x0800, 0x0006, 0x0004),
# operation type
arpop,
# sender MAC addr
sender_mac,
# sender IP addr
sender_ip,
# target hardware addr
target_mac,
# target IP addr
target_ip
]
# send the ARP
sock.send(''.join(arpframe))
return True
def main():
args = commandline()
for j in range(args.repeat):
if not send_arp(args.src_ip_addr, args.device,
args.src_hw_addr,
args.broadcast_ip_addr,
args.netmask, 'REQUEST'):
break
mssleep(args.interval / 2)
if not send_arp(args.src_ip_addr, args.device,
args.src_hw_addr,
args.broadcast_ip_addr,
args.netmask, 'REPLY'):
break
if j != args.repeat-1:
mssleep(args.interval / 2)
if __name__=="__main__":
main()
| {
"content_hash": "6b4d67ac0f7ac6beee9a40a7fbc2a88d",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 107,
"avg_line_length": 29.205128205128204,
"alnum_prop": 0.5484342990927714,
"repo_name": "chasemp/sup",
"id": "01bf20367304c82dcea0211bf88aa73d30105d65",
"size": "4051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arping.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32079"
}
],
"symlink_target": ""
} |
import unittest
from pywin32_testutil import str2bytes
import win32api, win32con, win32event, winerror
import sys, os
import tempfile
import datetime
class CurrentUserTestCase(unittest.TestCase):
def testGetCurrentUser(self):
name = "%s\\%s" % (win32api.GetDomainName(), win32api.GetUserName())
self.failUnless(name == win32api.GetUserNameEx(win32api.NameSamCompatible))
class TestTime(unittest.TestCase):
def testTimezone(self):
# GetTimeZoneInformation
rc, tzinfo = win32api.GetTimeZoneInformation()
if rc == win32con.TIME_ZONE_ID_DAYLIGHT:
tz_str = tzinfo[4]
tz_time = tzinfo[5]
else:
tz_str = tzinfo[1]
tz_time = tzinfo[2]
# for the sake of code exercise but don't output
tz_str.encode()
if not isinstance(tz_time, datetime.datetime):
tz_time.Format()
def TestDateFormat(self):
DATE_LONGDATE = 2
date_flags = DATE_LONGDATE
win32api.GetDateFormat(0, date_flags, None)
win32api.GetDateFormat(0, date_flags, 0)
win32api.GetDateFormat(0, date_flags, datetime.datetime.now())
win32api.GetDateFormat(0, date_flags, time.time())
def TestTimeFormat(self):
win32api.GetTimeFormat(0, 0, None)
win32api.GetTimeFormat(0, 0, 0)
win32api.GetTimeFormat(0, 0, datetime.datetime.now())
win32api.GetTimeFormat(0, 0, time.time())
class Registry(unittest.TestCase):
key_name = r'PythonTestHarness\Whatever'
def test1(self):
# This used to leave a stale exception behind.
def reg_operation():
hkey = win32api.RegCreateKey(win32con.HKEY_CURRENT_USER, self.key_name)
x = 3/0 # or a statement like: raise 'error'
# do the test
try:
try:
try:
reg_operation()
except:
1/0 # Force exception
finally:
win32api.RegDeleteKey(win32con.HKEY_CURRENT_USER, self.key_name)
except ZeroDivisionError:
pass
def testValues(self):
key_name = r'PythonTestHarness\win32api'
## tuples containing value name, value type, data
values=(
(None, win32con.REG_SZ, 'This is default unnamed value'),
('REG_SZ', win32con.REG_SZ,'REG_SZ text data'),
('REG_EXPAND_SZ', win32con.REG_EXPAND_SZ, '%systemdir%'),
## REG_MULTI_SZ value needs to be a list since strings are returned as a list
('REG_MULTI_SZ', win32con.REG_MULTI_SZ, ['string 1','string 2','string 3','string 4']),
('REG_MULTI_SZ_empty', win32con.REG_MULTI_SZ, []),
('REG_DWORD', win32con.REG_DWORD, 666),
('REG_QWORD_INT', win32con.REG_QWORD, 99),
('REG_QWORD', win32con.REG_QWORD, 2**33),
('REG_BINARY', win32con.REG_BINARY, str2bytes('\x00\x01\x02\x03\x04\x05\x06\x07\x08\x01\x00')),
)
hkey = win32api.RegCreateKey(win32con.HKEY_CURRENT_USER, key_name)
for value_name, reg_type, data in values:
win32api.RegSetValueEx(hkey, value_name, None, reg_type, data)
for value_name, orig_type, orig_data in values:
data, typ=win32api.RegQueryValueEx(hkey, value_name)
self.assertEqual(typ, orig_type)
self.assertEqual(data, orig_data)
def testNotifyChange(self):
def change():
hkey = win32api.RegCreateKey(win32con.HKEY_CURRENT_USER, self.key_name)
try:
win32api.RegSetValue(hkey, None, win32con.REG_SZ, "foo")
finally:
win32api.RegDeleteKey(win32con.HKEY_CURRENT_USER, self.key_name)
evt = win32event.CreateEvent(None,0,0,None)
## REG_NOTIFY_CHANGE_LAST_SET - values
## REG_CHANGE_NOTIFY_NAME - keys
## REG_NOTIFY_CHANGE_SECURITY - security descriptor
## REG_NOTIFY_CHANGE_ATTRIBUTES
win32api.RegNotifyChangeKeyValue(win32con.HKEY_CURRENT_USER,1,win32api.REG_NOTIFY_CHANGE_LAST_SET,evt,True)
ret_code=win32event.WaitForSingleObject(evt,0)
# Should be no change.
self.failUnless(ret_code==win32con.WAIT_TIMEOUT)
change()
# Our event should now be in a signalled state.
ret_code=win32event.WaitForSingleObject(evt,0)
self.failUnless(ret_code==win32con.WAIT_OBJECT_0)
class FileNames(unittest.TestCase):
def testShortLongPathNames(self):
try:
me = __file__
except NameError:
me = sys.argv[0]
fname = os.path.abspath(me).lower()
short_name = win32api.GetShortPathName(fname).lower()
long_name = win32api.GetLongPathName(short_name).lower()
self.failUnless(long_name==fname, \
"Expected long name ('%s') to be original name ('%s')" % (long_name, fname))
self.failUnlessEqual(long_name, win32api.GetLongPathNameW(short_name).lower())
long_name = win32api.GetLongPathNameW(short_name).lower()
self.failUnless(type(long_name)==str, "GetLongPathNameW returned type '%s'" % (type(long_name),))
self.failUnless(long_name==fname, \
"Expected long name ('%s') to be original name ('%s')" % (long_name, fname))
def testShortUnicodeNames(self):
try:
me = __file__
except NameError:
me = sys.argv[0]
fname = os.path.abspath(me).lower()
# passing unicode should cause GetShortPathNameW to be called.
short_name = win32api.GetShortPathName(str(fname)).lower()
self.failUnless(isinstance(short_name, str))
long_name = win32api.GetLongPathName(short_name).lower()
self.failUnless(long_name==fname, \
"Expected long name ('%s') to be original name ('%s')" % (long_name, fname))
self.failUnlessEqual(long_name, win32api.GetLongPathNameW(short_name).lower())
long_name = win32api.GetLongPathNameW(short_name).lower()
self.failUnless(type(long_name)==str, "GetLongPathNameW returned type '%s'" % (type(long_name),))
self.failUnless(long_name==fname, \
"Expected long name ('%s') to be original name ('%s')" % (long_name, fname))
def testLongLongPathNames(self):
# We need filename where the FQN is > 256 - simplest way is to create a
# 250 character directory in the cwd (except - cwd may be on a drive
# not supporting \\\\?\\ (eg, network share) - so use temp.
import win32file
basename = "a" * 250
# but we need to ensure we use the 'long' version of the
# temp dir for later comparison.
long_temp_dir = win32api.GetLongPathNameW(tempfile.gettempdir())
fname = "\\\\?\\" + os.path.join(long_temp_dir, basename)
try:
win32file.CreateDirectoryW(fname, None)
except win32api.error as details:
if details.winerror!=winerror.ERROR_ALREADY_EXISTS:
raise
try:
# GetFileAttributes automatically calls GetFileAttributesW when
# passed unicode
try:
attr = win32api.GetFileAttributes(fname)
except win32api.error as details:
if details.winerror != winerror.ERROR_FILENAME_EXCED_RANGE:
raise
attr = win32api.GetFileAttributes(str(fname))
self.failUnless(attr & win32con.FILE_ATTRIBUTE_DIRECTORY, attr)
long_name = win32api.GetLongPathNameW(fname)
self.failUnlessEqual(long_name.lower(), fname.lower())
finally:
win32file.RemoveDirectory(fname)
class FormatMessage(unittest.TestCase):
def test_FromString(self):
msg = "Hello %1, how are you %2?"
inserts = ["Mark", "today"]
result = win32api.FormatMessage(win32con.FORMAT_MESSAGE_FROM_STRING,
msg, # source
0, # ID
0, # LangID
inserts)
self.assertEqual(result, "Hello Mark, how are you today?")
class Misc(unittest.TestCase):
def test_last_error(self):
for x in (0, 1, -1, winerror.TRUST_E_PROVIDER_UNKNOWN):
win32api.SetLastError(x)
self.failUnlessEqual(x, win32api.GetLastError())
def testVkKeyScan(self):
# hopefully ' ' doesn't depend on the locale!
self.failUnlessEqual(win32api.VkKeyScan(' '), 32)
def testVkKeyScanEx(self):
# hopefully ' ' doesn't depend on the locale!
self.failUnlessEqual(win32api.VkKeyScanEx(' ', 0), 32)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "40a3c3d8ce74ba0c4690d05a25620152",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 115,
"avg_line_length": 43.482587064676615,
"alnum_prop": 0.6043478260869565,
"repo_name": "sserrot/champion_relationships",
"id": "26de71b0bf6ca9f7097b7d5e229db6483e01c4b7",
"size": "8797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/win32/test/test_win32api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "18324224"
},
{
"name": "Jupyter Notebook",
"bytes": "9131072"
},
{
"name": "Python",
"bytes": "10702"
}
],
"symlink_target": ""
} |
from django import forms
from django.forms.util import ErrorList
from shout.models import Shout
from shout.utils import is_private, user2link, url2link
class ShoutForm(forms.ModelForm):
class Meta:
model = Shout
fields = ('message',)
def save(self, user, *args, **kwargs):
instance = self.instance
instance.is_private, instance.message = is_private(instance.message)
instance.message = url2link(instance.message)
instance.message, mentions = user2link(instance.message)
instance.user = user
super(ShoutForm, self).save(*args, **kwargs)
instance.mentions.add(*mentions)
return instance
| {
"content_hash": "5318e35eda449f23315b44f4f3a267f7",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 76,
"avg_line_length": 35.526315789473685,
"alnum_prop": 0.6844444444444444,
"repo_name": "django-stars/dash2011",
"id": "c9ea15ec50361a899622ad014d72f68b0c1e4794",
"size": "675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "presence/apps/shout/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "4760"
},
{
"name": "Python",
"bytes": "62786"
}
],
"symlink_target": ""
} |
"""Contains convenience wrappers for typical Neural Network TensorFlow layers.
Additionally it maintains a collection with update_ops that need to be
updated after the ops have been computed, for exmaple to update moving means
and moving variances of batch_norm.
Ops that have different behavior during training or eval have an is_training
parameter. Additionally Ops that contain variables.variable have a trainable
parameter, which control if the ops variables are trainable or not.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.training import moving_averages
from slim import losses
from slim import scopes
from slim import variables
# Used to keep the update ops done by batch_norm.
UPDATE_OPS_COLLECTION = '_update_ops_'
@scopes.add_arg_scope
def batch_norm(inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
moving_vars='moving_vars',
activation=None,
is_training=True,
trainable=True,
restore=True,
scope=None,
reuse=None):
"""Adds a Batch Normalization layer.
Args:
inputs: a tensor of size [batch_size, height, width, channels]
or [batch_size, channels].
decay: decay for the moving average.
center: If True, subtract beta. If False, beta is not created and ignored.
scale: If True, multiply by gamma. If False, gamma is
not used. When the next layer is linear (also e.g. ReLU), this can be
disabled since the scaling can be done by the next layer.
epsilon: small float added to variance to avoid dividing by zero.
moving_vars: collection to store the moving_mean and moving_variance.
activation: activation function.
is_training: whether or not the model is in training mode.
trainable: whether or not the variables should be trainable or not.
restore: whether or not the variables should be marked for restore.
scope: Optional scope for variable_op_scope.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
Returns:
a tensor representing the output of the operation.
"""
inputs_shape = inputs.get_shape()
with tf.variable_op_scope([inputs], scope, 'BatchNorm', reuse=reuse):
axis = list(range(len(inputs_shape) - 1))
params_shape = inputs_shape[-1:]
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if center:
beta = variables.variable('beta',
params_shape,
initializer=tf.zeros_initializer,
trainable=trainable,
restore=restore)
if scale:
gamma = variables.variable('gamma',
params_shape,
initializer=tf.ones_initializer,
trainable=trainable,
restore=restore)
# Create moving_mean and moving_variance add them to
# GraphKeys.MOVING_AVERAGE_VARIABLES collections.
moving_collections = [moving_vars, tf.GraphKeys.MOVING_AVERAGE_VARIABLES]
moving_mean = variables.variable('moving_mean',
params_shape,
initializer=tf.zeros_initializer,
trainable=False,
restore=restore,
collections=moving_collections)
moving_variance = variables.variable('moving_variance',
params_shape,
initializer=tf.ones_initializer,
trainable=False,
restore=restore,
collections=moving_collections)
if is_training:
# Calculate the moments based on the individual batch.
mean, variance = tf.nn.moments(inputs, axis)
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
else:
# Just use the moving_mean and moving_variance.
mean = moving_mean
variance = moving_variance
# Normalize the activations.
outputs = tf.nn.batch_normalization(
inputs, mean, variance, beta, gamma, epsilon)
outputs.set_shape(inputs.get_shape())
if activation:
outputs = activation(outputs)
return outputs
def _two_element_tuple(int_or_tuple):
"""Converts `int_or_tuple` to height, width.
Several of the functions that follow accept arguments as either
a tuple of 2 integers or a single integer. A single integer
indicates that the 2 values of the tuple are the same.
This functions normalizes the input value by always returning a tuple.
Args:
int_or_tuple: A list of 2 ints, a single int or a tf.TensorShape.
Returns:
A tuple with 2 values.
Raises:
ValueError: If `int_or_tuple` it not well formed.
"""
if isinstance(int_or_tuple, (list, tuple)):
if len(int_or_tuple) != 2:
raise ValueError('Must be a list with 2 elements: %s' % int_or_tuple)
return int(int_or_tuple[0]), int(int_or_tuple[1])
if isinstance(int_or_tuple, int):
return int(int_or_tuple), int(int_or_tuple)
if isinstance(int_or_tuple, tf.TensorShape):
if len(int_or_tuple) == 2:
return int_or_tuple[0], int_or_tuple[1]
raise ValueError('Must be an int, a list with 2 elements or a TensorShape of '
'length 2')
@scopes.add_arg_scope
def conv2d(inputs,
num_filters_out,
kernel_size,
stride=1,
padding='SAME',
activation=tf.nn.relu,
stddev=0.01,
bias=0.0,
weight_decay=0,
batch_norm_params=None,
is_training=True,
trainable=True,
restore=True,
scope=None,
reuse=None):
"""Adds a 2D convolution followed by an optional batch_norm layer.
conv2d creates a variable called 'weights', representing the convolutional
kernel, that is convolved with the input. If `batch_norm_params` is None, a
second variable called 'biases' is added to the result of the convolution
operation.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_filters_out: the number of output filters.
kernel_size: a list of length 2: [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: one of 'VALID' or 'SAME'.
activation: activation function.
stddev: standard deviation of the truncated guassian weight distribution.
bias: the initial value of the biases.
weight_decay: the weight decay.
batch_norm_params: parameters for the batch_norm. If is None don't use it.
is_training: whether or not the model is in training mode.
trainable: whether or not the variables should be trainable or not.
restore: whether or not the variables should be marked for restore.
scope: Optional scope for variable_op_scope.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
Returns:
a tensor representing the output of the operation.
"""
with tf.variable_op_scope([inputs], scope, 'Conv', reuse=reuse):
kernel_h, kernel_w = _two_element_tuple(kernel_size)
stride_h, stride_w = _two_element_tuple(stride)
num_filters_in = inputs.get_shape()[-1]
weights_shape = [kernel_h, kernel_w,
num_filters_in, num_filters_out]
weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
l2_regularizer = None
if weight_decay and weight_decay > 0:
l2_regularizer = losses.l2_regularizer(weight_decay)
weights = variables.variable('weights',
shape=weights_shape,
initializer=weights_initializer,
regularizer=l2_regularizer,
trainable=trainable,
restore=restore)
conv = tf.nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1],
padding=padding)
if batch_norm_params is not None:
with scopes.arg_scope([batch_norm], is_training=is_training,
trainable=trainable, restore=restore):
outputs = batch_norm(conv, **batch_norm_params)
else:
bias_shape = [num_filters_out,]
bias_initializer = tf.constant_initializer(bias)
biases = variables.variable('biases',
shape=bias_shape,
initializer=bias_initializer,
trainable=trainable,
restore=restore)
outputs = tf.nn.bias_add(conv, biases)
if activation:
outputs = activation(outputs)
return outputs
@scopes.add_arg_scope
def fc(inputs,
num_units_out,
activation=tf.nn.relu,
stddev=0.01,
bias=0.0,
weight_decay=0,
batch_norm_params=None,
is_training=True,
trainable=True,
restore=True,
scope=None,
reuse=None):
"""Adds a fully connected layer followed by an optional batch_norm layer.
FC creates a variable called 'weights', representing the fully connected
weight matrix, that is multiplied by the input. If `batch_norm` is None, a
second variable called 'biases' is added to the result of the initial
vector-matrix multiplication.
Args:
inputs: a [B x N] tensor where B is the batch size and N is the number of
input units in the layer.
num_units_out: the number of output units in the layer.
activation: activation function.
stddev: the standard deviation for the weights.
bias: the initial value of the biases.
weight_decay: the weight decay.
batch_norm_params: parameters for the batch_norm. If is None don't use it.
is_training: whether or not the model is in training mode.
trainable: whether or not the variables should be trainable or not.
restore: whether or not the variables should be marked for restore.
scope: Optional scope for variable_op_scope.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
Returns:
the tensor variable representing the result of the series of operations.
"""
with tf.variable_op_scope([inputs], scope, 'FC', reuse=reuse):
num_units_in = inputs.get_shape()[1]
weights_shape = [num_units_in, num_units_out]
weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
l2_regularizer = None
if weight_decay and weight_decay > 0:
l2_regularizer = losses.l2_regularizer(weight_decay)
weights = variables.variable('weights',
shape=weights_shape,
initializer=weights_initializer,
regularizer=l2_regularizer,
trainable=trainable,
restore=restore)
if batch_norm_params is not None:
outputs = tf.matmul(inputs, weights)
with scopes.arg_scope([batch_norm], is_training=is_training,
trainable=trainable, restore=restore):
outputs = batch_norm(outputs, **batch_norm_params)
else:
bias_shape = [num_units_out,]
bias_initializer = tf.constant_initializer(bias)
biases = variables.variable('biases',
shape=bias_shape,
initializer=bias_initializer,
trainable=trainable,
restore=restore)
outputs = tf.nn.xw_plus_b(inputs, weights, biases)
if activation:
outputs = activation(outputs)
return outputs
def one_hot_encoding(labels, num_classes, scope=None):
"""Transform numeric labels into onehot_labels.
Args:
labels: [batch_size] target labels.
num_classes: total number of classes.
scope: Optional scope for op_scope.
Returns:
one hot encoding of the labels.
"""
with tf.op_scope([labels], scope, 'OneHotEncoding'):
batch_size = labels.get_shape()[0]
indices = tf.expand_dims(tf.range(0, batch_size), 1)
labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(
concated, tf.pack([batch_size, num_classes]), 1.0, 0.0)
onehot_labels.set_shape([batch_size, num_classes])
return onehot_labels
@scopes.add_arg_scope
def max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
"""Adds a Max Pooling layer.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding method, either 'VALID' or 'SAME'.
scope: Optional scope for op_scope.
Returns:
a tensor representing the results of the pooling operation.
Raises:
ValueError: if 'kernel_size' is not a 2-D list
"""
with tf.op_scope([inputs], scope, 'MaxPool'):
kernel_h, kernel_w = _two_element_tuple(kernel_size)
stride_h, stride_w = _two_element_tuple(stride)
return tf.nn.max_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding)
@scopes.add_arg_scope
def avg_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
"""Adds a Avg Pooling layer.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding method, either 'VALID' or 'SAME'.
scope: Optional scope for op_scope.
Returns:
a tensor representing the results of the pooling operation.
"""
with tf.op_scope([inputs], scope, 'AvgPool'):
kernel_h, kernel_w = _two_element_tuple(kernel_size)
stride_h, stride_w = _two_element_tuple(stride)
return tf.nn.avg_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding)
@scopes.add_arg_scope
def dropout(inputs, keep_prob=0.5, is_training=True, scope=None):
"""Returns a dropout layer applied to the input.
Args:
inputs: the tensor to pass to the Dropout layer.
keep_prob: the probability of keeping each input unit.
is_training: whether or not the model is in training mode. If so, dropout is
applied and values scaled. Otherwise, inputs is returned.
scope: Optional scope for op_scope.
Returns:
a tensor representing the output of the operation.
"""
if is_training and keep_prob > 0:
with tf.op_scope([inputs], scope, 'Dropout'):
return tf.nn.dropout(inputs, keep_prob)
else:
return inputs
def flatten(inputs, scope=None):
"""Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
scope: Optional scope for op_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is wrong.
"""
if len(inputs.get_shape()) < 2:
raise ValueError('Inputs must be have a least 2 dimensions')
dims = inputs.get_shape()[1:]
k = dims.num_elements()
with tf.op_scope([inputs], scope, 'Flatten'):
return tf.reshape(inputs, [-1, k])
def repeat_op(repetitions, inputs, op, *args, **kwargs):
"""Build a sequential Tower starting from inputs by using an op repeatedly.
It creates new scopes for each operation by increasing the counter.
Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
it will repeat the given op under the following variable_scopes:
conv1/Conv
conv1/Conv_1
conv1/Conv_2
Args:
repetitions: number or repetitions.
inputs: a tensor of size [batch_size, height, width, channels].
op: an operation.
*args: args for the op.
**kwargs: kwargs for the op.
Returns:
a tensor result of applying the operation op, num times.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
tower = inputs
for _ in range(repetitions):
tower = op(tower, *args, **kwargs)
return tower
| {
"content_hash": "4f0a1c2e232e6a3000cbc6860a33aeeb",
"timestamp": "",
"source": "github",
"line_count": 459,
"max_line_length": 80,
"avg_line_length": 39.34204793028322,
"alnum_prop": 0.633514231919371,
"repo_name": "trigeorgis/mdm",
"id": "d6e8020a0f81cb944ef0925971edcf5f59d99568",
"size": "18731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slim/ops.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1030441"
},
{
"name": "Python",
"bytes": "175631"
}
],
"symlink_target": ""
} |
from builtins import range
import json
import random
import time
import itertools
from ethereum.utils import (
parse_as_bin,
big_endian_to_int,
to_string,
)
from ethereum.hybrid_casper import casper_utils
from ethereum.meta import apply_block
from ethereum.common import update_block_env_variables
from ethereum.tools import tester
import rlp
from rlp.utils import encode_hex
from ethereum.exceptions import InvalidTransaction, VerificationFailed
from ethereum.slogging import get_logger
from ethereum.config import Env
from ethereum.state import State, dict_to_prev_header
from ethereum.block import Block, BlockHeader, BLANK_UNCLES_HASH
from ethereum.pow.consensus import initialize
from ethereum.genesis_helpers import mk_basic_state, state_from_genesis_declaration, initialize_genesis_keys
log = get_logger('eth.chain')
config_string = ':info,eth.chain:debug'
# from ethereum.slogging import configure_logging
# config_string = ':info,eth.vm.log:trace,eth.vm.op:trace,eth.vm.stack:trace,eth.vm.exit:trace,eth.pb.msg:trace,eth.pb.tx:debug'
# configure_logging(config_string=config_string)
class Chain(object):
def __init__(self, genesis=None, env=None, coinbase=b'\x00' * 20,
new_head_cb=None, reset_genesis=False, localtime=None, **kwargs):
self.env = env or Env()
# Initialize the state
if b'head_hash' in self.db: # new head tag
self.state = self.mk_poststate_of_blockhash(self.db.get(b'head_hash'))
print('Initializing chain from saved head, #%d (%s)' %
(self.state.prev_headers[0].number, encode_hex(self.state.prev_headers[0].hash)))
elif genesis is None:
raise Exception("Need genesis decl!")
elif isinstance(genesis, State):
assert env is None or env == genesis.env
self.state = genesis
self.env = self.state.env
print('Initializing chain from provided state')
reset_genesis = True
elif "extraData" in genesis:
self.state = state_from_genesis_declaration(
genesis, self.env)
reset_genesis = True
print('Initializing chain from provided genesis declaration')
elif "prev_headers" in genesis:
self.state = State.from_snapshot(genesis, self.env)
reset_genesis = True
print('Initializing chain from provided state snapshot, %d (%s)' %
(self.state.block_number, encode_hex(self.state.prev_headers[0].hash[:8])))
else:
print('Initializing chain from new state based on alloc')
self.state = mk_basic_state(genesis, {
"number": kwargs.get('number', 0),
"gas_limit": kwargs.get('gas_limit', 4712388),
"gas_used": kwargs.get('gas_used', 0),
"timestamp": kwargs.get('timestamp', 1467446877),
"difficulty": kwargs.get('difficulty', 2**25),
"hash": kwargs.get('prevhash', '00' * 32),
"uncles_hash": kwargs.get('uncles_hash', '0x' + encode_hex(BLANK_UNCLES_HASH))
}, self.env)
reset_genesis = True
assert self.env.db == self.state.db
initialize(self.state)
self.new_head_cb = new_head_cb
self.head_hash = self.state.prev_headers[0].hash
self.checkpoint_head_score = 0
self.casper_address = self.config['CASPER_ADDRESS']
log.info('Casper address: 0x'+encode_hex(self.casper_address))
assert self.state.block_number == self.state.prev_headers[0].number
if reset_genesis:
self.genesis = Block(self.state.prev_headers[0], [], [])
initialize_genesis_keys(self.state, self.genesis)
else:
self.genesis = self.get_block_by_number(0)
self.min_gasprice = kwargs.get('min_gasprice', 5 * 10**9)
self.coinbase = coinbase
self.extra_data = 'moo ha ha says the laughing cow.'
self.time_queue = []
self.parent_queue = {}
self.localtime = time.time() if localtime is None else localtime
@property
def head(self):
try:
block_rlp = self.db.get(self.head_hash)
if block_rlp == b'GENESIS':
return self.genesis
else:
return rlp.decode(block_rlp, Block)
except Exception as e:
log.error(e)
return None
# ~~~~~~~~~~~~~~~~~~~~ ADD BLOCK ~~~~~~~~~~~~~~~~~~~~ #
# This function should be called periodically so as to
# process blocks that were received but laid aside because
# they were received too early
def process_time_queue(self, new_time=None):
self.localtime = time.time() if new_time is None else new_time
i = 0
while i < len(self.time_queue) and self.time_queue[i].timestamp <= self.localtime:
log.info('Adding scheduled block')
pre_len = len(self.time_queue)
self.add_block(self.time_queue.pop(i))
if len(self.time_queue) == pre_len:
i += 1
def should_add_block(self, block):
# Check that the block wasn't recieved too early
now = self.localtime
if block.header.timestamp > now:
i = 0
while i < len(self.time_queue) and block.timestamp > self.time_queue[i].timestamp:
i += 1
self.time_queue.insert(i, block)
log.info('Block received too early (%d vs %d). Delaying for %d seconds' %
(now, block.header.timestamp, block.header.timestamp - now))
return False
# Check that the block's parent has already been added
if block.header.prevhash not in self.env.db:
if block.header.prevhash not in self.parent_queue:
self.parent_queue[block.header.prevhash] = []
self.parent_queue[block.header.prevhash].append(block)
log.info('Got block %d (%s) with prevhash %s, parent not found. Delaying for now' %
(block.number, encode_hex(block.hash), encode_hex(block.prevhash)))
return False
# Check that the block doesn't throw an exception
if block.header.prevhash == self.head_hash:
temp_state = self.state.ephemeral_clone()
else:
temp_state = self.mk_poststate_of_blockhash(block.header.prevhash)
try:
apply_block(temp_state, block)
except (AssertionError, KeyError, ValueError, InvalidTransaction, VerificationFailed) as e: # FIXME add relevant exceptions here
log.info('Block %s with parent %s invalid, reason: %s' % (encode_hex(block.header.hash), encode_hex(block.header.prevhash), e))
return False
return True
def add_block_to_head(self, block):
log.info('Adding to head', head=encode_hex(block.header.prevhash))
apply_block(self.state, block)
self.db.put(b'block:' + to_string(block.header.number), block.header.hash)
self.get_pow_difficulty(block) # side effect: put 'score:' cache in db
self.head_hash = block.header.hash
for i, tx in enumerate(block.transactions):
self.db.put(b'txindex:' + tx.hash, rlp.encode([block.number, i]))
def set_head(self, block):
# ~~~ PoW Fork Choice ~~~~ #
# If block is directly on top of the head, immediately make it our head
if block.header.prevhash == self.head_hash:
self.add_block_to_head(block)
else: # Otherwise, check if we should change our head
# Here we should run `is_fork_heavier_than_head` but modify it so it works for both PoW and Casper... ODEE great
log.info('Receiving block not on head, adding to secondary post state',
prevhash=encode_hex(block.header.prevhash))
self.reorganize_head_to(block)
self.db.put(b'head_hash', self.head_hash)
self.db.commit()
log.info('Reorganizing chain to block %d (%s) with %d txs and %d gas' %
(block.header.number, encode_hex(block.header.hash)[:8],
len(block.transactions), block.header.gas_used))
if self.new_head_cb and block.header.number != 0:
self.new_head_cb(block)
return True
# Call upon receiving a block
def add_block(self, block):
# ~~~ Validate ~~~~ #
# Validate that the block should be added
if not self.should_add_block(block):
return False
# ~~~ Store ~~~~ #
# Store the block
self.db.put(block.header.hash, rlp.encode(block))
self.add_child(block)
# Store the state root
if block.header.prevhash == self.head_hash:
temp_state = self.state.ephemeral_clone()
else:
temp_state = self.mk_poststate_of_blockhash(block.header.prevhash)
apply_block(temp_state, block)
self.db.put(b'state:' + block.header.hash, temp_state.trie.root_hash)
# Check the last finalized checkpoint and store
# ~~~ Add block ~~~~ #
if self.get_score(self.state, self.head) < self.get_score(temp_state, block) and not self.switch_reverts_finalized_block(self.head, block):
self.set_head(block)
log.info('Changed head to: {}'.format(block.number))
casper = tester.ABIContract(tester.State(temp_state), casper_utils.casper_abi, self.config['CASPER_ADDRESS'])
if casper.get_last_finalized_epoch() == casper.get_current_epoch() - 1:
h = casper.get_checkpoint_hashes(casper.get_last_finalized_epoch())
if h != b'\x00' * 32:
hist_casper = tester.ABIContract(tester.State(self.mk_poststate_of_blockhash(h)), casper_utils.casper_abi, self.config['CASPER_ADDRESS'])
if hist_casper.get_total_curdyn_deposits() > self.config['NON_REVERT_MIN_DEPOSIT'] and \
hist_casper.get_total_prevdyn_deposits() > self.config['NON_REVERT_MIN_DEPOSIT']:
self.db.put(b'finalized:'+h, b'true')
log.info('Finalized checkpoint {} {}'.format(casper.get_last_finalized_epoch(), encode_hex(h)[:8]))
else:
log.info('Trivially finalized checkpoint {}'.format(casper.get_last_finalized_epoch()))
else:
log.info('Skipping block {} which is not a descendant of current head checkpoint'.format(block.number))
# Are there blocks that we received that were waiting for this block?
# If so, process them.
if block.header.hash in self.parent_queue:
for _blk in self.parent_queue[block.header.hash]:
self.add_block(_blk)
del self.parent_queue[block.header.hash]
return True
def get_score(self, prestate, block):
casper = tester.ABIContract(tester.State(prestate), casper_utils.casper_abi, self.config['CASPER_ADDRESS'])
return casper.get_last_justified_epoch() * 10**40 + self.get_pow_difficulty(block)
def switch_reverts_finalized_block(self, old_head, new_head):
while old_head.number > new_head.number:
if b'finalized:'+old_head.hash in self.db:
log.info('[WARNING] Attempt to revert failed: checkpoint {} is finalized'.format(encode_hex(old_head.hash)))
return True
old_head = self.get_parent(old_head)
while new_head.number > old_head.number:
new_head = self.get_parent(new_head)
if new_head is None:
log.info('Revert: new_head is None')
return False
while new_head.hash != old_head.hash:
if b'finalized:'+old_head.hash in self.db:
log.info('[WARNING] Attempt to revert failed; checkpoint {} is finalized'.format(encode_hex(old_head.hash)))
return True
old_head = self.get_parent(old_head)
new_head = self.get_parent(new_head)
if new_head is None or old_head is None:
log.info('Revert: new_head or old_head is None')
return False
return False
def reorganize_head_to(self, block):
log.info('Replacing head')
b = block
new_chain = {}
while b and b.header.number >= int(self.db.get(b'GENESIS_NUMBER')):
new_chain[b.header.number] = b
key = b'block:' + to_string(b.header.number)
orig_at_height = self.db.get(key) if key in self.db else None
if orig_at_height == b.header.hash:
break
if b.prevhash not in self.db or self.db.get(b.prevhash) == b'GENESIS':
break
b = self.get_parent(b)
replace_from = b.header.number if b else (int(self.db.get(b'GENESIS_NUMBER')) + 1)
for i in itertools.count(replace_from):
log.info('Rewriting height %d' % i)
key = b'block:' + to_string(i)
orig_at_height = self.db.get(key) if key in self.db else None
if orig_at_height:
self.db.delete(key)
orig_block_at_height = self.get_block(orig_at_height)
for tx in orig_block_at_height.transactions:
if b'txindex:' + tx.hash in self.db:
self.db.delete(b'txindex:' + tx.hash)
if i in new_chain:
new_block_at_height = new_chain[i]
self.db.put(key, new_block_at_height.header.hash)
for i, tx in enumerate(new_block_at_height.transactions):
self.db.put(b'txindex:' + tx.hash,
rlp.encode([new_block_at_height.number, i]))
if i not in new_chain and not orig_at_height:
break
self.head_hash = block.header.hash
self.state = self.mk_poststate_of_blockhash(block.hash)
def find_heaviest_pow_block(self, root):
children = self.get_children(root)
maxchild, maxscore = root, self.get_pow_difficulty(root)
for c in children:
maxc, s = self.find_heaviest_pow_block(c)
if s > maxscore:
maxchild, maxscore = maxc, s
return maxchild, maxscore
# ~~~~~~~~~~~~~~~~~~~~ CASPER UTILS ~~~~~~~~~~~~~~~~~~~~ #
def is_child_checkpoint(self, child, parent):
if parent == b'\x00' * 32:
# If the parent checkpoint is the genesis checkpoint, then the child must be a decedent
return True
parent_block = self.get_block(parent)
child_block = self.get_block(child)
while parent_block.number < child_block.number:
child_block = self.get_block(child_block.prevhash)
if parent_block == child_block:
return True
else:
return False
# ~~~~~~~~~~~~~~~~~~~~ BLOCK UTILS ~~~~~~~~~~~~~~~~~~~~ #
def mk_poststate_of_blockhash(self, blockhash, convert=False):
if blockhash not in self.db:
raise Exception("Block hash %s not found" % encode_hex(blockhash))
block_rlp = self.db.get(blockhash)
if block_rlp == b'GENESIS':
return State.from_snapshot(json.loads(self.db.get(b'GENESIS_STATE')), self.env)
block = rlp.decode(block_rlp, Block)
state = State(env=self.env)
state.trie.root_hash = block.header.state_root if convert else self.db.get(b'state:'+blockhash)
update_block_env_variables(state, block)
state.gas_used = block.header.gas_used
state.txindex = len(block.transactions)
state.recent_uncles = {}
state.prev_headers = []
b = block
header_depth = state.config['PREV_HEADER_DEPTH']
for i in range(header_depth + 1):
state.prev_headers.append(b.header)
if i < 6:
state.recent_uncles[state.block_number - i] = []
for u in b.uncles:
state.recent_uncles[state.block_number - i].append(u.hash)
try:
b = rlp.decode(state.db.get(b.header.prevhash), Block)
except:
break
if i < header_depth:
if state.db.get(b.header.prevhash) == b'GENESIS':
jsondata = json.loads(state.db.get(b'GENESIS_STATE'))
for h in jsondata["prev_headers"][:header_depth - i]:
state.prev_headers.append(dict_to_prev_header(h))
for blknum, uncles in jsondata["recent_uncles"].items():
if int(blknum) >= state.block_number - int(state.config['MAX_UNCLE_DEPTH']):
state.recent_uncles[blknum] = [parse_as_bin(u) for u in uncles]
else:
raise Exception("Dangling prevhash")
assert len(state.journal) == 0, state.journal
return state
def get_parent(self, block):
if block.header.number == int(self.db.get(b'GENESIS_NUMBER')):
return None
return self.get_block(block.header.prevhash)
def get_block(self, blockhash):
try:
block_rlp = self.db.get(blockhash)
if block_rlp == b'GENESIS':
if not hasattr(self, 'genesis'):
self.genesis = rlp.decode(self.db.get(b'GENESIS_RLP'), sedes=Block)
return self.genesis
else:
return rlp.decode(block_rlp, Block)
except Exception as e:
log.debug("Failed to get block", hash=blockhash, error=e)
return None
# Add a record allowing you to later look up the provided block's
# parent hash and see that it is one of its children
def add_child(self, child):
try:
existing = self.db.get(b'child:' + child.header.prevhash)
except:
existing = b''
existing_hashes = []
for i in range(0, len(existing), 32):
existing_hashes.append(existing[i: i+32])
if child.header.hash not in existing_hashes:
self.db.put(b'child:' + child.header.prevhash, existing + child.header.hash)
def get_blockhash_by_number(self, number):
try:
return self.db.get(b'block:' + to_string(number))
except:
return None
def get_block_by_number(self, number):
return self.get_block(self.get_blockhash_by_number(number))
# Get the hashes of all known children of a given block
def get_child_hashes(self, blockhash):
o = []
try:
data = self.db.get(b'child:' + blockhash)
for i in range(0, len(data), 32):
o.append(data[i:i + 32])
return o
except:
return []
def get_children(self, block):
if isinstance(block, Block):
block = block.header.hash
if isinstance(block, BlockHeader):
block = block.hash
return [self.get_block(h) for h in self.get_child_hashes(block)]
# Get the score (AKA total difficulty in PoW) of a given block
def get_pow_difficulty(self, block):
if not block:
return 0
key = b'score:' + block.header.hash
fills = []
while key not in self.db:
fills.insert(0, (block.header.hash, block.difficulty))
key = b'score:' + block.header.prevhash
block = self.get_parent(block)
if block is None:
return 0
score = int(self.db.get(key))
for h, d in fills:
key = b'score:' + h
score = score + d + random.randrange(d // 10**6 + 1)
self.db.put(key, str(score))
return score
def has_block(self, block):
return block in self
def has_blockhash(self, blockhash):
return blockhash in self.db
def get_chain(self, frm=None, to=2**63 - 1):
if frm is None:
frm = int(self.db.get(b'GENESIS_NUMBER')) + 1
chain = []
for i in itertools.islice(itertools.count(), frm, to):
h = self.get_blockhash_by_number(i)
if not h:
return chain
chain.append(self.get_block(h))
# Recover transaction and the block that contains it
def get_transaction(self, tx):
if not isinstance(tx, (str, bytes)):
tx = tx.hash
if b'txindex:' + tx in self.db:
data = rlp.decode(self.db.get(b'txindex:' + tx))
blk, index = self.get_block_by_number(
big_endian_to_int(data[0])), big_endian_to_int(data[1])
tx = blk.transactions[index]
return tx, blk, index
else:
return None
def get_descendants(self, block):
output = []
blocks = [block]
while len(blocks):
b = blocks.pop()
blocks.extend(self.get_children(b))
output.append(b)
return output
# Get blockhashes starting from a hash and going backwards
def get_blockhashes_from_hash(self, hash, max):
block = self.get_block(hash)
if block is None:
return []
header = block.header
hashes = []
for i in range(max):
hash = header.prevhash
block = self.get_block(hash)
if block is None:
break
header = block.header
hashes.append(header.hash)
if header.number == 0:
break
return hashes
def __contains__(self, blk):
if isinstance(blk, (str, bytes)):
try:
blk = rlp.decode(self.db.get(blk), Block)
except:
return False
try:
o = self.get_block(self.get_blockhash_by_number(blk.number)).hash
assert o == blk.hash
return True
except:
return False
@property
def config(self):
return self.env.config
@property
def db(self):
return self.env.db
| {
"content_hash": "5c7ba2f7bf06e10747f189af89c68123",
"timestamp": "",
"source": "github",
"line_count": 506,
"max_line_length": 157,
"avg_line_length": 43.47430830039526,
"alnum_prop": 0.5793253932175653,
"repo_name": "karlfloersch/pyethereum",
"id": "8ee185d0369ac380662fa1c9e33dc0edfd322625",
"size": "21998",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "ethereum/hybrid_casper/chain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2764"
},
{
"name": "Python",
"bytes": "717449"
},
{
"name": "Shell",
"bytes": "1297"
}
],
"symlink_target": ""
} |
"""Django Bouncy App"""
| {
"content_hash": "b0a0a9bc771d0482bd211d6a7912df37",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 23,
"avg_line_length": 24,
"alnum_prop": 0.625,
"repo_name": "ofa/django-bouncy",
"id": "9cb20576a86765a79c2547158a4c960a3be5b7cf",
"size": "24",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_bouncy/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "61212"
}
],
"symlink_target": ""
} |
import json, sys, argparse
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from lsh_benchmark_plot import get_precision_recall, fscore, average_fscore
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("benchmark_output")
args = parser.parse_args(sys.argv[1:])
with open(args.benchmark_output) as f:
benchmark = json.load(f)
num_perms = benchmark["num_perms"]
lsh_times = benchmark["lsh_times"]
ground_truth_results = [[x[0] for x in r] for r in benchmark["ground_truth_results"]]
lsh_fscores = []
for results in benchmark["lsh_results"]:
query_results = [[x[0] for x in r] for r in results]
lsh_fscores.append(average_fscore(query_results, ground_truth_results))
lsh_times = np.array([np.percentile(ts, 90)
for ts in lsh_times])*1000
fig, axes = plt.subplots(1, 2, figsize=(5*2, 4.5), sharex=True)
# Plot query fscore vs. num perm
axes[0].plot(num_perms, lsh_fscores, marker="+", label="LSH Ensemble")
axes[0].set_ylabel("Average F-Score")
axes[0].set_xlabel("# of Permmutation Functions")
axes[0].grid()
# Plot query time vs. num perm
axes[1].plot(num_perms, lsh_times, marker="+", label="LSH Ensemble")
axes[1].set_xlabel("# of Permutation Functions")
axes[1].set_ylabel("90 Percentile Query Time (ms)")
axes[1].grid()
axes[1].legend(loc="lower right")
plt.tight_layout()
fig.savefig("lshensemble_benchmark.png", pad_inches=0.05, bbox_inches="tight")
| {
"content_hash": "ab5996a8c6d61c636ebb64ff2ecee1fa",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 89,
"avg_line_length": 39.075,
"alnum_prop": 0.6609085092770314,
"repo_name": "arbazkhan002/datasketch",
"id": "077d635851dd31e01546b2d9087af1cdafb80f3a",
"size": "1563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "benchmark/lshensemble_benchmark_plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "7801"
},
{
"name": "Python",
"bytes": "239691"
}
],
"symlink_target": ""
} |
"""Unittests for the project.py module."""
import contextlib
import os
import shutil
import subprocess
import tempfile
import unittest
import error
import git_command
import git_config
import platform_utils
import project
@contextlib.contextmanager
def TempGitTree():
"""Create a new empty git checkout for testing."""
# TODO(vapier): Convert this to tempfile.TemporaryDirectory once we drop
# Python 2 support entirely.
try:
tempdir = tempfile.mkdtemp(prefix='repo-tests')
# Tests need to assume, that main is default branch at init,
# which is not supported in config until 2.28.
cmd = ['git', 'init']
if git_command.git_require((2, 28, 0)):
cmd += ['--initial-branch=main']
else:
# Use template dir for init.
templatedir = tempfile.mkdtemp(prefix='.test-template')
with open(os.path.join(templatedir, 'HEAD'), 'w') as fp:
fp.write('ref: refs/heads/main\n')
cmd += ['--template=', templatedir]
subprocess.check_call(cmd, cwd=tempdir)
yield tempdir
finally:
platform_utils.rmtree(tempdir)
class FakeProject(object):
"""A fake for Project for basic functionality."""
def __init__(self, worktree):
self.worktree = worktree
self.gitdir = os.path.join(worktree, '.git')
self.name = 'fakeproject'
self.work_git = project.Project._GitGetByExec(
self, bare=False, gitdir=self.gitdir)
self.bare_git = project.Project._GitGetByExec(
self, bare=True, gitdir=self.gitdir)
self.config = git_config.GitConfig.ForRepository(gitdir=self.gitdir)
class ReviewableBranchTests(unittest.TestCase):
"""Check ReviewableBranch behavior."""
def test_smoke(self):
"""A quick run through everything."""
with TempGitTree() as tempdir:
fakeproj = FakeProject(tempdir)
# Generate some commits.
with open(os.path.join(tempdir, 'readme'), 'w') as fp:
fp.write('txt')
fakeproj.work_git.add('readme')
fakeproj.work_git.commit('-mAdd file')
fakeproj.work_git.checkout('-b', 'work')
fakeproj.work_git.rm('-f', 'readme')
fakeproj.work_git.commit('-mDel file')
# Start off with the normal details.
rb = project.ReviewableBranch(
fakeproj, fakeproj.config.GetBranch('work'), 'main')
self.assertEqual('work', rb.name)
self.assertEqual(1, len(rb.commits))
self.assertIn('Del file', rb.commits[0])
d = rb.unabbrev_commits
self.assertEqual(1, len(d))
short, long = next(iter(d.items()))
self.assertTrue(long.startswith(short))
self.assertTrue(rb.base_exists)
# Hard to assert anything useful about this.
self.assertTrue(rb.date)
# Now delete the tracking branch!
fakeproj.work_git.branch('-D', 'main')
rb = project.ReviewableBranch(
fakeproj, fakeproj.config.GetBranch('work'), 'main')
self.assertEqual(0, len(rb.commits))
self.assertFalse(rb.base_exists)
# Hard to assert anything useful about this.
self.assertTrue(rb.date)
class CopyLinkTestCase(unittest.TestCase):
"""TestCase for stub repo client checkouts.
It'll have a layout like:
tempdir/ # self.tempdir
checkout/ # self.topdir
git-project/ # self.worktree
Attributes:
tempdir: A dedicated temporary directory.
worktree: The top of the repo client checkout.
topdir: The top of a project checkout.
"""
def setUp(self):
self.tempdir = tempfile.mkdtemp(prefix='repo_tests')
self.topdir = os.path.join(self.tempdir, 'checkout')
self.worktree = os.path.join(self.topdir, 'git-project')
os.makedirs(self.topdir)
os.makedirs(self.worktree)
def tearDown(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
@staticmethod
def touch(path):
with open(path, 'w'):
pass
def assertExists(self, path, msg=None):
"""Make sure |path| exists."""
if os.path.exists(path):
return
if msg is None:
msg = ['path is missing: %s' % path]
while path != '/':
path = os.path.dirname(path)
if not path:
# If we're given something like "foo", abort once we get to "".
break
result = os.path.exists(path)
msg.append('\tos.path.exists(%s): %s' % (path, result))
if result:
msg.append('\tcontents: %r' % os.listdir(path))
break
msg = '\n'.join(msg)
raise self.failureException(msg)
class CopyFile(CopyLinkTestCase):
"""Check _CopyFile handling."""
def CopyFile(self, src, dest):
return project._CopyFile(self.worktree, src, self.topdir, dest)
def test_basic(self):
"""Basic test of copying a file from a project to the toplevel."""
src = os.path.join(self.worktree, 'foo.txt')
self.touch(src)
cf = self.CopyFile('foo.txt', 'foo')
cf._Copy()
self.assertExists(os.path.join(self.topdir, 'foo'))
def test_src_subdir(self):
"""Copy a file from a subdir of a project."""
src = os.path.join(self.worktree, 'bar', 'foo.txt')
os.makedirs(os.path.dirname(src))
self.touch(src)
cf = self.CopyFile('bar/foo.txt', 'new.txt')
cf._Copy()
self.assertExists(os.path.join(self.topdir, 'new.txt'))
def test_dest_subdir(self):
"""Copy a file to a subdir of a checkout."""
src = os.path.join(self.worktree, 'foo.txt')
self.touch(src)
cf = self.CopyFile('foo.txt', 'sub/dir/new.txt')
self.assertFalse(os.path.exists(os.path.join(self.topdir, 'sub')))
cf._Copy()
self.assertExists(os.path.join(self.topdir, 'sub', 'dir', 'new.txt'))
def test_update(self):
"""Make sure changed files get copied again."""
src = os.path.join(self.worktree, 'foo.txt')
dest = os.path.join(self.topdir, 'bar')
with open(src, 'w') as f:
f.write('1st')
cf = self.CopyFile('foo.txt', 'bar')
cf._Copy()
self.assertExists(dest)
with open(dest) as f:
self.assertEqual(f.read(), '1st')
with open(src, 'w') as f:
f.write('2nd!')
cf._Copy()
with open(dest) as f:
self.assertEqual(f.read(), '2nd!')
def test_src_block_symlink(self):
"""Do not allow reading from a symlinked path."""
src = os.path.join(self.worktree, 'foo.txt')
sym = os.path.join(self.worktree, 'sym')
self.touch(src)
platform_utils.symlink('foo.txt', sym)
self.assertExists(sym)
cf = self.CopyFile('sym', 'foo')
self.assertRaises(error.ManifestInvalidPathError, cf._Copy)
def test_src_block_symlink_traversal(self):
"""Do not allow reading through a symlink dir."""
realfile = os.path.join(self.tempdir, 'file.txt')
self.touch(realfile)
src = os.path.join(self.worktree, 'bar', 'file.txt')
platform_utils.symlink(self.tempdir, os.path.join(self.worktree, 'bar'))
self.assertExists(src)
cf = self.CopyFile('bar/file.txt', 'foo')
self.assertRaises(error.ManifestInvalidPathError, cf._Copy)
def test_src_block_copy_from_dir(self):
"""Do not allow copying from a directory."""
src = os.path.join(self.worktree, 'dir')
os.makedirs(src)
cf = self.CopyFile('dir', 'foo')
self.assertRaises(error.ManifestInvalidPathError, cf._Copy)
def test_dest_block_symlink(self):
"""Do not allow writing to a symlink."""
src = os.path.join(self.worktree, 'foo.txt')
self.touch(src)
platform_utils.symlink('dest', os.path.join(self.topdir, 'sym'))
cf = self.CopyFile('foo.txt', 'sym')
self.assertRaises(error.ManifestInvalidPathError, cf._Copy)
def test_dest_block_symlink_traversal(self):
"""Do not allow writing through a symlink dir."""
src = os.path.join(self.worktree, 'foo.txt')
self.touch(src)
platform_utils.symlink(tempfile.gettempdir(),
os.path.join(self.topdir, 'sym'))
cf = self.CopyFile('foo.txt', 'sym/foo.txt')
self.assertRaises(error.ManifestInvalidPathError, cf._Copy)
def test_src_block_copy_to_dir(self):
"""Do not allow copying to a directory."""
src = os.path.join(self.worktree, 'foo.txt')
self.touch(src)
os.makedirs(os.path.join(self.topdir, 'dir'))
cf = self.CopyFile('foo.txt', 'dir')
self.assertRaises(error.ManifestInvalidPathError, cf._Copy)
class LinkFile(CopyLinkTestCase):
"""Check _LinkFile handling."""
def LinkFile(self, src, dest):
return project._LinkFile(self.worktree, src, self.topdir, dest)
def test_basic(self):
"""Basic test of linking a file from a project into the toplevel."""
src = os.path.join(self.worktree, 'foo.txt')
self.touch(src)
lf = self.LinkFile('foo.txt', 'foo')
lf._Link()
dest = os.path.join(self.topdir, 'foo')
self.assertExists(dest)
self.assertTrue(os.path.islink(dest))
self.assertEqual(os.path.join('git-project', 'foo.txt'), os.readlink(dest))
def test_src_subdir(self):
"""Link to a file in a subdir of a project."""
src = os.path.join(self.worktree, 'bar', 'foo.txt')
os.makedirs(os.path.dirname(src))
self.touch(src)
lf = self.LinkFile('bar/foo.txt', 'foo')
lf._Link()
self.assertExists(os.path.join(self.topdir, 'foo'))
def test_src_self(self):
"""Link to the project itself."""
dest = os.path.join(self.topdir, 'foo', 'bar')
lf = self.LinkFile('.', 'foo/bar')
lf._Link()
self.assertExists(dest)
self.assertEqual(os.path.join('..', 'git-project'), os.readlink(dest))
def test_dest_subdir(self):
"""Link a file to a subdir of a checkout."""
src = os.path.join(self.worktree, 'foo.txt')
self.touch(src)
lf = self.LinkFile('foo.txt', 'sub/dir/foo/bar')
self.assertFalse(os.path.exists(os.path.join(self.topdir, 'sub')))
lf._Link()
self.assertExists(os.path.join(self.topdir, 'sub', 'dir', 'foo', 'bar'))
def test_src_block_relative(self):
"""Do not allow relative symlinks."""
BAD_SOURCES = (
'./',
'..',
'../',
'foo/.',
'foo/./bar',
'foo/..',
'foo/../foo',
)
for src in BAD_SOURCES:
lf = self.LinkFile(src, 'foo')
self.assertRaises(error.ManifestInvalidPathError, lf._Link)
def test_update(self):
"""Make sure changed targets get updated."""
dest = os.path.join(self.topdir, 'sym')
src = os.path.join(self.worktree, 'foo.txt')
self.touch(src)
lf = self.LinkFile('foo.txt', 'sym')
lf._Link()
self.assertEqual(os.path.join('git-project', 'foo.txt'), os.readlink(dest))
# Point the symlink somewhere else.
os.unlink(dest)
platform_utils.symlink(self.tempdir, dest)
lf._Link()
self.assertEqual(os.path.join('git-project', 'foo.txt'), os.readlink(dest))
| {
"content_hash": "cbfcef6e21faec3a23b6cd1104bcf350",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 79,
"avg_line_length": 32.88235294117647,
"alnum_prop": 0.6398644195461821,
"repo_name": "lipro-yocto/git-repo",
"id": "7dfbabb309aa507b8496b1979b23746581a80915",
"size": "11222",
"binary": false,
"copies": "1",
"ref": "refs/heads/lpn-launcher",
"path": "tests/test_project.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "389284"
},
{
"name": "Shell",
"bytes": "6043"
}
],
"symlink_target": ""
} |
from django.conf import settings
URL_EXCEPTIONS = getattr(settings, 'LOCKDOWN_URL_EXCEPTIONS', ())
PASSWORDS = getattr(settings, 'LOCKDOWN_PASSWORDS', ())
FORM = getattr(settings, 'LOCKDOWN_FORM', 'lockdown.forms.LockdownForm')
SESSION_KEY = getattr(settings, 'LOCKDOWN_SESSION_KEY', 'lockdown-allow')
LOGOUT_KEY = getattr(settings, 'LOCKDOWN_LOGOUT_KEY', 'preview-logout')
UNTIL_DATE = getattr(settings, 'LOCKDOWN_UNTIL', None)
AFTER_DATE = getattr(settings, 'LOCKDOWN_AFTER', None)
if not isinstance(PASSWORDS, (tuple, list)):
PASSWORDS = PASSWORDS and (PASSWORDS,) or ()
| {
"content_hash": "0044c8cfe29d1f1d915ae5b97fe9ce76",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 73,
"avg_line_length": 48.333333333333336,
"alnum_prop": 0.743103448275862,
"repo_name": "RanadeepPolavarapu/django-lockdown",
"id": "f03ae3f93bf5cef245ad32f669d879c5740e0647",
"size": "580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lockdown/settings.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "825"
},
{
"name": "Python",
"bytes": "27395"
}
],
"symlink_target": ""
} |
class HTTPException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| {
"content_hash": "00e03b7c7a2fae7daa6ae48338ab99fe",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 31,
"avg_line_length": 24.333333333333332,
"alnum_prop": 0.589041095890411,
"repo_name": "tuvistavie/python2-http-wrapper",
"id": "3dd611dd68e138bdb8da03703773d5df7bcde823",
"size": "146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "http/http_exception.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10874"
}
],
"symlink_target": ""
} |
import functools as ft
from inspect import getdoc
import re
import h2o
from h2o.automl._base import H2OAutoMLBaseMixin
from h2o.automl._h2o_automl_output import H2OAutoMLOutput
from h2o.base import Keyed
from h2o.estimators import H2OEstimator
from h2o.exceptions import H2OResponseError, H2OValueError
from h2o.frame import H2OFrame
from h2o.job import H2OJob
from h2o.utils.shared_utils import check_id
from h2o.utils.typechecks import assert_is_type, is_type, numeric
_params_doc_ = dict() # holds the doc per param extracted from H2OAutoML constructor
def _extract_params_doc(docstr):
pat = re.compile(r"^:param (\w+ )?(?P<name>\w+):\s?(?P<doc>.*)") # match param doc-start in Sphinx format ":param type name: description"
lines = docstr.splitlines()
param, doc = None, None
for l in lines:
m = pat.match(l)
if m:
if param:
_params_doc_[param] = "\n".join(doc)
param = m.group('name')
doc = [m.group('doc')]
elif param:
doc.append(l)
def _aml_property(param_path, name=None, types=None, validate_fn=None, freezable=False, set_input=True):
path = param_path.split('.')
name = name or path[-1]
def attr_name(self, attr):
return ("_"+self.__class__.__name__+attr) if attr.startswith('__') and not attr.endswith('__') else attr
def _fget(self):
_input = getattr(self, attr_name(self, '__input'))
return _input.get(name)
def _fset(self, value):
if freezable and getattr(self, attr_name(self, '__frozen'), False):
raise H2OValueError("Param ``%s`` can not be modified after the first call to ``train``." % name, name)
if types is not None:
assert_is_type(value, *types)
input_val = value
if validate_fn:
value = validate_fn(self, value)
_input = getattr(self, attr_name(self, '__input'))
_input[name] = input_val if set_input else value
group = getattr(self, attr_name(self, path[0]))
if group is None:
group = {}
setattr(self, attr_name(self, path[0]), group)
obj = group
for t in path[1:-1]:
tmp = obj.get(t)
if tmp is None:
tmp = obj[t] = {}
obj = tmp
obj[path[-1]] = value
return property(fget=_fget, fset=_fset, doc=_params_doc_.get(name, None))
class H2OAutoML(H2OAutoMLBaseMixin, Keyed):
"""
Automatic Machine Learning
The Automatic Machine Learning (AutoML) function automates the supervised machine learning model training process.
The current version of AutoML trains and cross-validates the following algorithms (in the following order):
three pre-specified XGBoost GBM (Gradient Boosting Machine) models,
a fixed grid of GLMs,
a default Random Forest (DRF),
five pre-specified H2O GBMs,
a near-default Deep Neural Net,
an Extremely Randomized Forest (XRT),
a random grid of XGBoost GBMs,
a random grid of H2O GBMs,
and a random grid of Deep Neural Nets.
In some cases, there will not be enough time to complete all the algorithms, so some may be missing from the
leaderboard. AutoML then trains two Stacked Ensemble models, one of all the models, and one of only the best
models of each kind.
:examples:
>>> import h2o
>>> from h2o.automl import H2OAutoML
>>> h2o.init()
>>> # Import a sample binary outcome train/test set into H2O
>>> train = h2o.import_file("https://s3.amazonaws.com/erin-data/higgs/higgs_train_10k.csv")
>>> test = h2o.import_file("https://s3.amazonaws.com/erin-data/higgs/higgs_test_5k.csv")
>>> # Identify the response and set of predictors
>>> y = "response"
>>> x = list(train.columns) #if x is defined as all columns except the response, then x is not required
>>> x.remove(y)
>>> # For binary classification, response should be a factor
>>> train[y] = train[y].asfactor()
>>> test[y] = test[y].asfactor()
>>> # Run AutoML for 30 seconds
>>> aml = H2OAutoML(max_runtime_secs = 30)
>>> aml.train(x = x, y = y, training_frame = train)
>>> # Print Leaderboard (ranked by xval metrics)
>>> aml.leaderboard
>>> # (Optional) Evaluate performance on a test set
>>> perf = aml.leader.model_performance(test)
>>> perf.auc()
"""
def __init__(self,
nfolds=5,
balance_classes=False,
class_sampling_factors=None,
max_after_balance_size=5.0,
max_runtime_secs=None,
max_runtime_secs_per_model=None,
max_models=None,
stopping_metric="AUTO",
stopping_tolerance=None,
stopping_rounds=3,
seed=None,
project_name=None,
exclude_algos=None,
include_algos=None,
exploitation_ratio=0,
modeling_plan=None,
preprocessing=None,
monotone_constraints=None,
keep_cross_validation_predictions=False,
keep_cross_validation_models=False,
keep_cross_validation_fold_assignment=False,
sort_metric="AUTO",
export_checkpoints_dir=None,
verbosity="warn",
**kwargs):
"""
Create a new H2OAutoML instance.
:param int nfolds: Number of folds for k-fold cross-validation.
Use ``0`` to disable cross-validation; this will also disable Stacked Ensemble (thus decreasing the overall model performance).
Defaults to ``5``.
:param bool balance_classes: Balance training data class counts via over/under-sampling (for imbalanced data).
Defaults to ``False``.
:param class_sampling_factors: Desired over/under-sampling ratios per class (in lexicographic order).
If not specified, sampling factors will be automatically computed to obtain class balance during training.
:param float max_after_balance_size: Maximum relative size of the training data after balancing class counts (can be less than 1.0).
Requires ``balance_classes``.
Defaults to ``5.0``.
:param int max_runtime_secs: Specify the maximum time that the AutoML process will run for.
If neither ``max_runtime_secs`` nor ``max_models`` are specified by the user, then ``max_runtime_secs``.
Defaults to 3600 seconds (1 hour).
:param int max_runtime_secs_per_model: Controls the max time the AutoML run will dedicate to each individual model.
Defaults to ``0`` (disabled: no time limit).
:param int max_models: Specify the maximum number of models to build in an AutoML run, excluding the Stacked Ensemble models.
Defaults to ``0`` (disabled: no limitation).
:param str stopping_metric: Specifies the metric to use for early stopping.
The available options are:
``"AUTO"`` (This defaults to ``"logloss"`` for classification, ``"deviance"`` for regression),
``"deviance"``, ``"logloss"``, ``"mse"``, ``"rmse"``, ``"mae"``, ``"rmsle"``, ``"auc"``, ``aucpr``, ``"lift_top_group"``,
``"misclassification"``, ``"mean_per_class_error"``, ``"r2"``.
Defaults to ``"AUTO"``.
:param float stopping_tolerance: Specify the relative tolerance for the metric-based stopping to stop the AutoML run if the improvement is less than this value.
Defaults to ``0.001`` if the dataset is at least 1 million rows;
otherwise it defaults to a value determined by the size of the dataset and the non-NA-rate, in which case the value is computed as 1/sqrt(nrows * non-NA-rate).
:param int stopping_rounds: Stop training new models in the AutoML run when the option selected for
stopping_metric doesn't improve for the specified number of models, based on a simple moving average.
To disable this feature, set it to ``0``.
Defaults to ``3`` and must be an non-negative integer.
:param int seed: Set a seed for reproducibility.
AutoML can only guarantee reproducibility if ``max_models`` or early stopping is used because ``max_runtime_secs`` is resource limited,
meaning that if the resources are not the same between runs, AutoML may be able to train more models on one run vs another.
Defaults to ``None``.
:param str project_name: Character string to identify an AutoML project.
Defaults to ``None``, which means a project name will be auto-generated based on the training frame ID.
More models can be trained on an existing AutoML project by specifying the same project name in multiple calls to the AutoML function
(as long as the same training frame, or a sample, is used in subsequent runs).
:param exclude_algos: List the algorithms to skip during the model-building phase.
The full list of options is:
``"DRF"`` (Random Forest and Extremely-Randomized Trees),
``"GLM"``,
``"XGBoost"``,
``"GBM"``,
``"DeepLearning"``,
``"StackedEnsemble"``.
Defaults to ``None``, which means that all appropriate H2O algorithms will be used, if the search stopping criteria allow. Optional.
Usage example: ``exclude_algos = ["GLM", "DeepLearning", "DRF"]``.
:param include_algos: List the algorithms to restrict to during the model-building phase.
This can't be used in combination with `exclude_algos` param.
Defaults to ``None``, which means that all appropriate H2O algorithms will be used, if the search stopping criteria allow. Optional.
:param exploitation_ratio: The budget ratio (between 0 and 1) dedicated to the exploitation (vs exploration) phase.
By default, the exploitation phase is disabled (exploitation_ratio=0) as this is still experimental;
to activate it, it is recommended to try a ratio around 0.1.
Note that the current exploitation phase only tries to fine-tune the best XGBoost and the best GBM found during exploration.
:param modeling_plan: List of modeling steps to be used by the AutoML engine (they may not all get executed, depending on other constraints).
Defaults to None (Expert usage only).
:param preprocessing: List of preprocessing steps to run. Only 'target_encoding' is currently supported.
:param monotone_constraints: Dict representing monotonic constraints.
Use +1 to enforce an increasing constraint and -1 to specify a decreasing constraint.
:param keep_cross_validation_predictions: Whether to keep the predictions of the cross-validation predictions.
This needs to be set to ``True`` if running the same AutoML object for repeated runs because CV predictions are required to build
additional Stacked Ensemble models in AutoML.
Defaults to ``False``.
:param keep_cross_validation_models: Whether to keep the cross-validated models.
Keeping cross-validation models may consume significantly more memory in the H2O cluster.
Defaults to ``False``.
:param keep_cross_validation_fold_assignment: Whether to keep fold assignments in the models.
Deleting them will save memory in the H2O cluster.
Defaults to ``False``.
:param sort_metric: Metric to sort the leaderboard by.
For binomial classification choose between ``"auc"``, ``"aucpr"``, ``"logloss"``, ``"mean_per_class_error"``, ``"rmse"``, ``"mse"``.
For multinomial classification choose between ``"mean_per_class_error"``, ``"logloss"``, ``"rmse"``, ``"mse"``.
For regression choose between ``"deviance"``, ``"rmse"``, ``"mse"``, ``"mae"``, ``"rmlse"``.
Defaults to ``"AUTO"`` (This translates to ``"auc"`` for binomial classification, ``"mean_per_class_error"`` for multinomial classification, ``"deviance"`` for regression).
:param export_checkpoints_dir: Path to a directory where every model will be stored in binary form.
:param verbosity: Verbosity of the backend messages printed during training.
Available options are None (live log disabled), ``"debug"``, ``"info"`` or ``"warn"``.
Defaults to ``"warn"``.
"""
# early validate kwargs, extracting hidden parameters:
algo_parameters = {}
for k in kwargs:
if k == 'algo_parameters':
algo_parameters = kwargs[k] or {}
else:
raise TypeError("H2OAutoML got an unexpected keyword argument '%s'" % k)
# Check if H2O jar contains AutoML
try:
h2o.api("GET /3/Metadata/schemas/AutoMLV99")
except h2o.exceptions.H2OResponseError as e:
print(e)
print("*******************************************************************\n" \
"*Please verify that your H2O jar has the proper AutoML extensions.*\n" \
"*******************************************************************\n" \
"\nVerbose Error Message:")
self._job = None
self._leader_id = None
self._leaderboard = None
self._verbosity = verbosity
self._event_log = None
self._training_info = None
self._state_json = None
self._build_resp = None # contains all the actual parameters used on backend
self.__frozen = False
self.__input = dict() # contains all the input params as entered by the user
# Make bare minimum params containers
self.build_control = dict()
self.build_models = dict()
self.input_spec = dict()
self.project_name = project_name
self.nfolds = nfolds
self.balance_classes = balance_classes
self.class_sampling_factors = class_sampling_factors
self.max_after_balance_size = max_after_balance_size
self.keep_cross_validation_models = keep_cross_validation_models
self.keep_cross_validation_fold_assignment = keep_cross_validation_fold_assignment
self.keep_cross_validation_predictions = keep_cross_validation_predictions
self.export_checkpoints_dir = export_checkpoints_dir
self.max_runtime_secs = max_runtime_secs
self.max_runtime_secs_per_model = max_runtime_secs_per_model
self.max_models = max_models
self.stopping_metric = stopping_metric
self.stopping_tolerance = stopping_tolerance
self.stopping_rounds = stopping_rounds
self.seed = seed
self.exclude_algos = exclude_algos
self.include_algos = include_algos
self.exploitation_ratio = exploitation_ratio
self.modeling_plan = modeling_plan
self.preprocessing = preprocessing
if monotone_constraints is not None:
algo_parameters['monotone_constraints'] = monotone_constraints
self._algo_parameters = algo_parameters
self.sort_metric = sort_metric
#---------------------------------------------------------------------------
# AutoML params
#---------------------------------------------------------------------------
def __validate_not_set(self, val, prop=None, message=None):
assert val is None or getattr(self, prop, None) is None, message
return val
def __validate_project_name(self, project_name):
check_id(project_name, "H2OAutoML")
return project_name
def __validate_nfolds(self, nfolds):
assert nfolds == 0 or nfolds > 1, "nfolds set to %s; use nfolds >=2 if you want cross-validated metrics and Stacked Ensembles or use nfolds = 0 to disable." % nfolds
return nfolds
def __validate_modeling_plan(self, modeling_plan):
if modeling_plan is None:
return None
supported_aliases = ['all', 'defaults', 'grids']
def assert_is_step_def(sd):
assert 'name' in sd, "each definition must have a 'name' key"
assert 0 < len(sd) < 3, "each definition must have only 1 or 2 keys: name, name+alias or name+steps"
assert len(sd) == 1 or 'alias' in sd or 'steps' in sd, "steps definitions support only the following keys: name, alias, steps"
assert 'alias' not in sd or sd['alias'] in supported_aliases, "alias must be one of %s" % supported_aliases
assert 'steps' not in sd or (is_type(sd['steps'], list) and all(assert_is_step(s) for s in sd['steps']))
def assert_is_step(s):
assert is_type(s, dict), "each step must be a dict with an 'id' key and an optional 'weight' key"
assert 'id' in s, "each step must have an 'id' key"
assert len(s) == 1 or ('weight' in s and is_type(s['weight'], int)), "weight must be an integer"
return True
plan = []
for step_def in modeling_plan:
assert_is_type(step_def, dict, tuple, str)
if is_type(step_def, dict):
assert_is_step_def(step_def)
plan.append(step_def)
elif is_type(step_def, str):
plan.append(dict(name=step_def))
else:
assert 0 < len(step_def) < 3
assert_is_type(step_def[0], str)
name = step_def[0]
if len(step_def) == 1:
plan.append(dict(name=name))
else:
assert_is_type(step_def[1], str, list)
ids = step_def[1]
if is_type(ids, str):
assert_is_type(ids, *supported_aliases)
plan.append(dict(name=name, alias=ids))
else:
plan.append(dict(name=name, steps=[dict(id=i) for i in ids]))
return plan
def __validate_preprocessing(self, preprocessing):
if preprocessing is None:
return
assert all(p in ['target_encoding'] for p in preprocessing)
return [dict(type=p.replace("_", "")) for p in preprocessing]
def __validate_monotone_constraints(self, monotone_constraints):
if monotone_constraints is None:
self._algo_parameters.pop('monotone_constraints', None)
else:
self._algo_parameters['monotone_constraints'] = monotone_constraints
return self.__validate_algo_parameters(self._algo_parameters)
def __validate_algo_parameters(self, algo_parameters):
if algo_parameters is None:
return None
algo_parameters_json = []
for k, v in algo_parameters.items():
scope, __, name = k.partition('__')
if len(name) == 0:
name, scope = scope, 'any'
value = [dict(key=k, value=v) for k, v in v.items()] if isinstance(v, dict) else v # we can't use stringify_dict here as this will be converted into a JSON string
algo_parameters_json.append(dict(scope=scope, name=name, value=value))
return algo_parameters_json
def __validate_frame(self, fr, name=None, required=False):
return H2OFrame._validate(fr, name, required=required)
_extract_params_doc(getdoc(__init__))
project_name = _aml_property('build_control.project_name', types=(None, str), freezable=True,
validate_fn=__validate_project_name)
nfolds = _aml_property('build_control.nfolds', types=(int,), freezable=True,
validate_fn=__validate_nfolds)
balance_classes = _aml_property('build_control.balance_classes', types=(bool,), freezable=True)
class_sampling_factors = _aml_property('build_control.class_sampling_factors', types=(None, [numeric]), freezable=True)
max_after_balance_size = _aml_property('build_control.max_after_balance_size', types=(None, numeric), freezable=True)
keep_cross_validation_models = _aml_property('build_control.keep_cross_validation_models', types=(bool,), freezable=True)
keep_cross_validation_fold_assignment = _aml_property('build_control.keep_cross_validation_fold_assignment', types=(bool,), freezable=True)
keep_cross_validation_predictions = _aml_property('build_control.keep_cross_validation_predictions', types=(bool,), freezable=True)
export_checkpoints_dir = _aml_property('build_control.export_checkpoints_dir', types=(None, str), freezable=True)
max_runtime_secs = _aml_property('build_control.stopping_criteria.max_runtime_secs', types=(None, int), freezable=True)
max_runtime_secs_per_model = _aml_property('build_control.stopping_criteria.max_runtime_secs_per_model', types=(None, int), freezable=True)
max_models = _aml_property('build_control.stopping_criteria.max_models', types=(None, int), freezable=True)
stopping_metric = _aml_property('build_control.stopping_criteria.stopping_metric', types=(None, str), freezable=True)
stopping_tolerance = _aml_property('build_control.stopping_criteria.stopping_tolerance', types=(None, numeric), freezable=True)
stopping_rounds = _aml_property('build_control.stopping_criteria.stopping_rounds', types=(None, int), freezable=True)
seed = _aml_property('build_control.stopping_criteria.seed', types=(None, int), freezable=True)
exclude_algos = _aml_property('build_models.exclude_algos', types=(None, [str]), freezable=True,
validate_fn=ft.partial(__validate_not_set, prop='include_algos',
message="Use either `exclude_algos` or `include_algos`, not both."))
include_algos = _aml_property('build_models.include_algos', types=(None, [str]), freezable=True,
validate_fn=ft.partial(__validate_not_set, prop='exclude_algos',
message="Use either `exclude_algos` or `include_algos`, not both."))
exploitation_ratio = _aml_property('build_models.exploitation_ratio', types=(None, numeric), freezable=True)
modeling_plan = _aml_property('build_models.modeling_plan', types=(None, list), freezable=True,
validate_fn=__validate_modeling_plan)
preprocessing = _aml_property('build_models.preprocessing', types=(None, [str]), freezable=True,
validate_fn=__validate_preprocessing)
monotone_constraints = _aml_property('build_models.algo_parameters', name='monotone_constraints', types=(None, dict), freezable=True,
validate_fn=__validate_monotone_constraints)
_algo_parameters = _aml_property('build_models.algo_parameters', types=(None, dict), freezable=True,
validate_fn=__validate_algo_parameters)
sort_metric = _aml_property('input_spec.sort_metric', types=(None, str))
fold_column = _aml_property('input_spec.fold_column', types=(None, int, str))
weights_column = _aml_property('input_spec.weights_column', types=(None, int, str))
training_frame = _aml_property('input_spec.training_frame', set_input=False,
validate_fn=ft.partial(__validate_frame, name='training_frame', required=True))
validation_frame = _aml_property('input_spec.validation_frame', set_input=False,
validate_fn=ft.partial(__validate_frame, name='validation_frame'))
leaderboard_frame = _aml_property('input_spec.leaderboard_frame', set_input=False,
validate_fn=ft.partial(__validate_frame, name='leaderboard_frame'))
blending_frame = _aml_property('input_spec.blending_frame', set_input=False,
validate_fn=ft.partial(__validate_frame, name='blending_frame'))
response_column = _aml_property('input_spec.response_column', types=(str,))
#---------------------------------------------------------------------------
# Basic properties
#---------------------------------------------------------------------------
@property
def key(self):
return self._job.dest_key if self._job else self.project_name
@property
def leader(self):
return None if self._leader_id is None else h2o.get_model(self._leader_id)
@property
def leaderboard(self):
return H2OFrame([]) if self._leaderboard is None else self._leaderboard
@property
def event_log(self):
return H2OFrame([]) if self._event_log is None else self._event_log
@property
def training_info(self):
return dict() if self._training_info is None else self._training_info
@property
def modeling_steps(self):
"""
expose the modeling steps effectively used by the AutoML run.
This executed plan can be directly reinjected as the `modeling_plan` property of a new AutoML instance
to improve reproducibility across AutoML versions.
:return: a list of dictionaries representing the effective modeling plan.
"""
# removing alias key to be able to reinject result to a new AutoML instance
return list(map(lambda sdef: dict(name=sdef['name'], steps=sdef['steps']), self._state_json['modeling_steps']))
#---------------------------------------------------------------------------
# Training AutoML
#---------------------------------------------------------------------------
def train(self, x=None, y=None, training_frame=None, fold_column=None,
weights_column=None, validation_frame=None, leaderboard_frame=None, blending_frame=None):
"""
Begins an AutoML task, a background task that automatically builds a number of models
with various algorithms and tracks their performance in a leaderboard. At any point
in the process you may use H2O's performance or prediction functions on the resulting
models.
:param x: A list of column names or indices indicating the predictor columns.
:param y: An index or a column name indicating the response column.
:param fold_column: The name or index of the column in training_frame that holds per-row fold
assignments.
:param weights_column: The name or index of the column in training_frame that holds per-row weights.
:param training_frame: The H2OFrame having the columns indicated by x and y (as well as any
additional columns specified by fold_column or weights_column).
:param validation_frame: H2OFrame with validation data. This argument is ignored unless the user sets
nfolds = 0. If cross-validation is turned off, then a validation frame can be specified and used
for early stopping of individual models and early stopping of the grid searches. By default and
when nfolds > 1, cross-validation metrics will be used for early stopping and thus validation_frame will be ignored.
:param leaderboard_frame: H2OFrame with test data for scoring the leaderboard. This is optional and
if this is set to None (the default), then cross-validation metrics will be used to generate the leaderboard
rankings instead.
:param blending_frame: H2OFrame used to train the the metalearning algorithm in Stacked Ensembles (instead of relying on cross-validated predicted values).
This is optional, but when provided, it is also recommended to disable cross validation
by setting `nfolds=0` and to provide a leaderboard frame for scoring purposes.
:returns: An H2OAutoML object.
:examples:
>>> # Set up an H2OAutoML object
>>> aml = H2OAutoML(max_runtime_secs=30)
>>> # Launch an AutoML run
>>> aml.train(y=y, training_frame=train)
"""
# Minimal required arguments are training_frame and y (response)
self.training_frame = training_frame
ncols = self.training_frame.ncols
names = self.training_frame.names
if y is None and self.response_column is None:
raise H2OValueError('The response column (y) is not set; please set it to the name of the column that you are trying to predict in your data.')
elif y is not None:
assert_is_type(y, int, str)
if is_type(y, int):
if not (-ncols <= y < ncols):
raise H2OValueError("Column %d does not exist in the training frame" % y)
y = names[y]
else:
if y not in names:
raise H2OValueError("Column %s does not exist in the training frame" % y)
self.response_column = y
self.fold_column = fold_column
self.weights_column = weights_column
self.validation_frame = validation_frame
self.leaderboard_frame = leaderboard_frame
self.blending_frame = blending_frame
if x is not None:
assert_is_type(x, list)
xset = set()
if is_type(x, int, str): x = [x]
for xi in x:
if is_type(xi, int):
if not (-ncols <= xi < ncols):
raise H2OValueError("Column %d does not exist in the training frame" % xi)
xset.add(names[xi])
else:
if xi not in names:
raise H2OValueError("Column %s not in the training frame" % xi)
xset.add(xi)
ignored_columns = set(names) - xset
for col in [y, fold_column, weights_column]:
if col is not None and col in ignored_columns:
ignored_columns.remove(col)
if ignored_columns is not None:
self.input_spec['ignored_columns'] = list(ignored_columns)
def clean_params(params):
return ({k: clean_params(v) for k, v in params.items() if v is not None} if isinstance(params, dict)
else H2OEstimator._keyify(params))
automl_build_params = clean_params(dict(
build_control=self.build_control,
build_models=self.build_models,
input_spec=self.input_spec,
))
resp = self._build_resp = h2o.api('POST /99/AutoMLBuilder', json=automl_build_params)
if 'job' not in resp:
raise H2OResponseError("Backend failed to build the AutoML job: {}".format(resp))
if not self.project_name:
self.project_name = resp['build_control']['project_name']
self.__frozen = True
self._job = H2OJob(resp['job'], "AutoML")
poll_updates = ft.partial(self._poll_training_updates, verbosity=self._verbosity, state={})
try:
self._job.poll(poll_updates=poll_updates)
finally:
poll_updates(self._job, 1)
self._fetch()
return self.leader
#---------------------------------------------------------------------------
# Predict with AutoML
#---------------------------------------------------------------------------
def predict(self, test_data):
leader = self.leader
if leader is None:
self._fetch()
leader = self.leader
if leader is not None:
return leader.predict(test_data)
print("No model built yet...")
#-------------------------------------------------------------------------------------------------------------------
# Overrides
#-------------------------------------------------------------------------------------------------------------------
def detach(self):
self.__frozen = False
self.project_name = None
h2o.remove(self.leaderboard)
h2o.remove(self.event_log)
#-------------------------------------------------------------------------------------------------------------------
# Private
#-------------------------------------------------------------------------------------------------------------------
def _fetch(self):
state = H2OAutoML._fetch_state(self.key)
self._leader_id = state['leader_id']
self._leaderboard = state['leaderboard']
self._event_log = el = state['event_log']
self._training_info = { r[0]: r[1]
for r in el[el['name'] != '', ['name', 'value']]
.as_data_frame(use_pandas=False, header=False)
}
self._state_json = state['json']
return self._leader_id is not None
def _poll_training_updates(self, job, bar_progress=0, verbosity=None, state=None):
"""
the callback function used to print verbose info when polling AutoML job.
"""
levels = ['Debug', 'Info', 'Warn']
if verbosity is None or verbosity.capitalize() not in levels:
return
levels = levels[levels.index(verbosity.capitalize()):]
try:
if job.progress > state.get('last_job_progress', 0):
# print("\nbar_progress={}, job_progress={}".format(bar_progress, job.progress))
events = H2OAutoML._fetch_state(job.dest_key, properties=['event_log'])['event_log']
events = events[events['level'].isin(levels), :]
last_nrows = state.get('last_events_nrows', 0)
if events.nrows > last_nrows:
fr = events[last_nrows:, ['timestamp', 'message']].as_data_frame(use_pandas=False, header=False)
print('')
for r in fr:
print("{}: {}".format(r[0], r[1]))
print('')
state['last_events_nrows'] = events.nrows
state['last_job_progress'] = job.progress
except Exception as e:
print("Failed polling AutoML progress log: {}".format(e))
@staticmethod
def _fetch_leaderboard(aml_id, extensions=None):
assert_is_type(extensions, None, str, [str])
extensions = ([] if extensions is None
else [extensions] if is_type(extensions, str)
else extensions)
resp = h2o.api("GET /99/Leaderboards/%s" % aml_id, data=dict(extensions=extensions))
dest_key = resp['project_name'].split('@', 1)[0]+"_custom_leaderboard"
lb = H2OAutoML._fetch_table(resp['table'], key=dest_key, progress_bar=False)
return h2o.assign(lb[1:], dest_key)
@staticmethod
def _fetch_table(table, key=None, progress_bar=True):
try:
# Intentionally mask the progress bar here since showing multiple progress bars is confusing to users.
# If any failure happens, revert back to user's original setting for progress and display the error message.
ori_progress_state = H2OJob.__PROGRESS_BAR__
H2OJob.__PROGRESS_BAR__ = progress_bar
# Parse leaderboard H2OTwoDimTable & return as an H2OFrame
return h2o.H2OFrame(table.cell_values, destination_frame=key, column_names=table.col_header, column_types=table.col_types)
finally:
H2OJob.__PROGRESS_BAR__ = ori_progress_state
@staticmethod
def _fetch_state(aml_id, properties=None):
state_json = h2o.api("GET /99/AutoML/%s" % aml_id)
project_name = state_json["project_name"]
if project_name is None:
raise H2OValueError("No AutoML instance with id {}.".format(aml_id))
leaderboard_list = [key["name"] for key in state_json['leaderboard']['models']]
leader_id = leaderboard_list[0] if (leaderboard_list is not None and len(leaderboard_list) > 0) else None
should_fetch = lambda prop: properties is None or prop in properties
leader = None
if should_fetch('leader'):
leader = h2o.get_model(leader_id) if leader_id is not None else None
leaderboard = None
if should_fetch('leaderboard'):
leaderboard = H2OAutoML._fetch_table(state_json['leaderboard_table'], key=project_name+"_leaderboard", progress_bar=False)
leaderboard = h2o.assign(leaderboard[1:], project_name+"_leaderboard") # removing index and reassign id to ensure persistence on backend
event_log = None
if should_fetch('event_log'):
event_log = H2OAutoML._fetch_table(state_json['event_log_table'], key=project_name+"_eventlog", progress_bar=False)
event_log = h2o.assign(event_log[1:], project_name+"_eventlog") # removing index and reassign id to ensure persistence on backend
return dict(
project_name=project_name,
json=state_json,
leader_id=leader_id,
leader=leader,
leaderboard=leaderboard,
event_log=event_log,
)
def get_automl(project_name):
"""
Retrieve information about an AutoML instance.
:param str project_name: A string indicating the project_name of the automl instance to retrieve.
:returns: A dictionary containing the project_name, leader model, leaderboard, event_log.
"""
state = H2OAutoML._fetch_state(project_name)
return H2OAutoMLOutput(state)
def get_leaderboard(aml, extra_columns=None):
"""
Retrieve the leaderboard from the AutoML instance.
Contrary to the default leaderboard attached to the automl instance, this one can return columns other than the metrics.
:param H2OAutoML aml: the instance for which to return the leaderboard.
:param extra_columns: a string or a list of string specifying which optional columns should be added to the leaderboard. Defaults to None.
Currently supported extensions are:
- 'ALL': adds all columns below.
- 'training_time_ms': column providing the training time of each model in milliseconds (doesn't include the training of cross validation models).
- 'predict_time_per_row_ms`: column providing the average prediction time by the model for a single row.
- 'algo': column providing the algorithm name for each model.
:return: An H2OFrame representing the leaderboard.
:examples:
>>> aml = H2OAutoML(max_runtime_secs=30)
>>> aml.train(y=y, training_frame=train)
>>> lb_all = h2o.automl.get_leaderboard(aml, 'ALL')
>>> lb_custom = h2o.automl.get_leaderboard(aml, ['predict_time_per_row_ms', 'training_time_ms'])
>>> lb_custom_sorted = lb_custom.sort(by='predict_time_per_row_ms')
"""
assert_is_type(aml, H2OAutoML, H2OAutoMLOutput)
return H2OAutoML._fetch_leaderboard(aml.key, extra_columns)
| {
"content_hash": "b2ba9b312a7365e8d6558aeaabf1fb6e",
"timestamp": "",
"source": "github",
"line_count": 725,
"max_line_length": 184,
"avg_line_length": 53.69241379310345,
"alnum_prop": 0.6022298147815142,
"repo_name": "michalkurka/h2o-3",
"id": "74ba5e025bbe07c9e5ccd64f951c36dec9dc38f8",
"size": "38953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "h2o-py/h2o/automl/autoh2o.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12629"
},
{
"name": "CSS",
"bytes": "231770"
},
{
"name": "CoffeeScript",
"bytes": "7550"
},
{
"name": "Dockerfile",
"bytes": "10302"
},
{
"name": "Emacs Lisp",
"bytes": "2226"
},
{
"name": "Groovy",
"bytes": "166480"
},
{
"name": "HCL",
"bytes": "15007"
},
{
"name": "HTML",
"bytes": "251906"
},
{
"name": "HiveQL",
"bytes": "3965"
},
{
"name": "Java",
"bytes": "11932863"
},
{
"name": "JavaScript",
"bytes": "89484"
},
{
"name": "Jupyter Notebook",
"bytes": "13867219"
},
{
"name": "Makefile",
"bytes": "50635"
},
{
"name": "Python",
"bytes": "6801044"
},
{
"name": "R",
"bytes": "3223113"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "33647"
},
{
"name": "Shell",
"bytes": "186559"
},
{
"name": "TeX",
"bytes": "634412"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2011 Barry Schwartz
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import fontforge
import string
from sortsmill import font_db
from sortsmill.glyphbuild import *
from sortsmill.spacing_by_anchors import *
emsize = 1000
spacesize = 210
def build_glyphs(bitbucket, f):
from sortsmill import cap_spacing
figures = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']
def base(letter):
if letter == 'i':
base = 'dotlessi'
elif letter == 'j':
base = 'uni0237'
else:
base = letter
return base
db = font_db.db_create(f)
db['spacing_anchor_heights'] = {
'hi' : 675, # caps and ascenders,
'x' : 389, # ex-height
'o' : 215, # like the letter o
'bl' : 10, # baseline
'lo' : -204, # descenders
}
all_glyphs = set(f) - set(['.notdef'])
(smallcaps, capssmall, uppercase, lowercase, fraction_bar, numerators, denominators, remaining) = \
tuple(separate_strings(all_glyphs, [
(lambda s: s[-3:] == '.sc'),
(lambda s: s[-3:] == '.c2'),
(lambda s: is_uppercase(s, last_name)),
(lambda s: is_lowercase(s, last_name)),
(lambda s: s == 'fraction'),
(lambda s: s[-6:] == '.numer'),
(lambda s: s[-6:] == '.denom'),
]))
db["kerning_sets"] = [
(remaining, uppercase | lowercase | smallcaps | capssmall | remaining),
(uppercase, uppercase | lowercase | smallcaps | remaining),
(smallcaps, uppercase | smallcaps | capssmall | remaining),
(capssmall, smallcaps | capssmall | remaining),
(lowercase, uppercase | lowercase | remaining),
(numerators, fraction_bar),
(fraction_bar, denominators),
]
build_several_space_glyphs(f, emsize = emsize, spacesize = spacesize,
thinspacesize = emsize / 6,
hairspacesize = emsize / 10,
tabwidth = f['zero'].width)
propagate_hyphens(f)
propagate_hyphens(f, '.u')
build_spacing_marks(f, width = 2 * 220)
make_glyph_reference('asciitilde', f['uni2053']) # Swung dash.
make_glyph_reference('i.TRK', f['i'])
make_glyph_reference('L.CAT', f['L'])
make_glyph_reference('l.CAT', f['l'])
make_glyph_reference('L.CAT.c2', f['L.c2'])
make_glyph_reference('l.CAT.sc', f['l.sc'])
make_glyph_reference('Dcroat', f['Eth'])
make_glyph_reference('dcroat.sc', f['eth.sc'])
build_multigraph('ellipsis', [f['period'], f['period'], f['period']])
make_glyph_reference('quotesingle', f['minute'])
make_glyph_reference('quotedbl', f['second'])
for extension in [('.numer', 250), ('.sub', -150), ('.sup', 350)]:
for fig in figures + ['comma', 'period',
'parenleft', 'parenright',
'bracketleft', 'bracketright',
# 'hyphen',
# 'dollar', 'cent',
] + [string.ascii_lowercase[i] for i in range(0, 26)]:
make_glyph_reference(fig + extension[0],
f[fig + '.denom'],
transformation = (1, 0, 0, 1, 0, extension[1]),
copy_spacing_anchors = (extension[0] == '.numer'))
make_glyph_reference('uni00B9', f['one.sup'])
make_glyph_reference('uni00B2', f['two.sup'])
make_glyph_reference('uni00B3', f['three.sup'])
make_glyph_reference('ordfeminine', f['a.sup'])
make_glyph_reference('ordmasculine', f['o.sup'])
build_multigraph('onequarter', [f['one.numer'], f['fraction'], f['four.denom']], copy_spacing_anchors = False)
build_multigraph('onehalf', [f['one.numer'], f['fraction'], f['two.denom']], copy_spacing_anchors = False)
build_multigraph('threequarters', [f['three.numer'], f['fraction'], f['four.denom']], copy_spacing_anchors = False)
special_cases = {
'uni0163.sc' : 'uni0162.c2',
'uni0219.sc' : 'uni0218.c2',
'uni021B.sc' : 'uni021A.c2',
'uni1E03.sc' : 'uni1E02.c2',
'uni1E0B.sc' : 'uni1E0A.c2',
'uni1E1F.sc' : 'uni1E1E.c2',
'uni1E23.sc' : 'uni1E22.c2',
'uni1E41.sc' : 'uni1E40.c2',
'uni1E57.sc' : 'uni1E56.c2',
'uni1E61.sc' : 'uni1E60.c2',
'uni1E6B.sc' : 'uni1E6A.c2',
}
for g in f:
if g[-3:] == '.sc' and g not in ['i.TRK.sc', 'l.CAT.sc', 'germandbls.sc']:
if g in special_cases:
make_glyph_reference(special_cases[g], f[g])
elif g in ('ampersand.sc', 'periodcentered.sc'):
make_glyph_reference(g[:-3] + '.c2', f[g])
elif g == 'uni0163.sc':
make_glyph_reference('uni0162.c2', f[g])
elif g == 'uni0219.sc':
make_glyph_reference('uni0218.c2', f[g])
elif g == 'uni021B.sc':
make_glyph_reference('uni021A.c2', f[g])
elif g in ('ae.sc', 'oe.sc', 'ij.sc'):
make_glyph_reference(g[:-3].upper() + '.c2', f[g])
else:
make_glyph_reference(g[:-3].capitalize() + '.c2', f[g])
#--------------------------------------------------------------------------
for letter in 'GKkLlNnRr':
build_accented_glyph(letter + 'commaaccent', f[base(letter)], f['uni0326'])
build_accented_glyph('uni0218', f['S'], f['uni0326'])
build_accented_glyph('uni0219', f['s'], f['uni0326'])
build_accented_glyph('uni021A', f['T'], f['uni0326'])
build_accented_glyph('uni021B', f['t'], f['uni0326'])
build_accented_glyph('gcommaaccent', f['g'], f['uni0312'])
for letter in 'gklnr':
build_accented_glyph(letter + 'commaaccent.sc', f[letter + '.sc'], f['uni0326'])
build_accented_glyph('uni0219.sc', f['s.sc'], f['uni0326'])
build_accented_glyph('uni021B.sc', f['t.sc'], f['uni0326'])
#--------------------------------------------------------------------------
for letter in 'CcSs':
build_accented_glyph(letter + 'cedilla', f[base(letter)], f['uni0327'])
remove_overlap(f[letter + 'cedilla'])
build_accented_glyph('uni0162', f['T'], f['uni0327'])
remove_overlap(f['uni0162'])
# build_accented_glyph('uni0163', f['t'], f['uni0327']) <-- Manually hinted, so don't rebuild.
# remove_overlap(f['uni0163'])
for letter in 'cs':
build_accented_glyph(letter + 'cedilla.sc', f[letter + '.sc'], f['uni0327'])
remove_overlap(f[letter + 'cedilla.sc'])
build_accented_glyph('uni0163.sc', f['t.sc'], f['uni0327'])
remove_overlap(f['uni0163.sc'])
#--------------------------------------------------------------------------
for letter in 'aeiou':
build_accented_glyph(letter + 'grave', f[base(letter)], f['gravecomb'])
for letter in 'AEIOU':
build_accented_glyph(letter + 'grave', f[base(letter)], f['gravecomb.cap'])
for letter in 'aeiou':
build_accented_glyph(letter + 'grave.sc', f[letter + '.sc'], f['gravecomb'])
#--------------------------------------------------------------------------
for letter in 'aceinorsuyz':
build_accented_glyph(letter + 'acute', f[base(letter)], f['acutecomb'])
for letter in 'ACEILNORSUYZ':
build_accented_glyph(letter + 'acute', f[base(letter)], f['acutecomb.cap'])
build_accented_glyph('lacute', f['l'], f['acutecomb.cap'])
for letter in 'aceilnorsuyz':
build_accented_glyph(letter + 'acute.sc', f[letter + '.sc'], f['acutecomb'])
#--------------------------------------------------------------------------
for letter in 'ainou':
build_accented_glyph(letter + 'tilde', f[base(letter)], f['tildecomb'])
for letter in 'AINOU':
build_accented_glyph(letter + 'tilde', f[base(letter)], f['tildecomb.cap'])
for letter in 'ainou':
build_accented_glyph(letter + 'tilde.sc', f[letter + '.sc'], f['tildecomb'])
#--------------------------------------------------------------------------
for letter in 'aeouy':
build_accented_glyph(letter + 'dieresis', f[base(letter)], f['uni0308'])
for letter in 'AEIOUY':
build_accented_glyph(letter + 'dieresis', f[base(letter)], f['uni0308.cap'])
for letter in 'i':
build_accented_glyph(letter + 'dieresis', f[base(letter)], f['uni0308.narrow'])
for letter in 'aeiouy':
build_accented_glyph(letter + 'dieresis.sc', f[letter + '.sc'], f['uni0308'])
#--------------------------------------------------------------------------
for letter in 'au':
build_accented_glyph(letter + 'ring', f[base(letter)], f['uni030A'])
for letter in 'AU':
build_accented_glyph(letter + 'ring', f[base(letter)], f['uni030A.cap'])
for letter in 'au':
build_accented_glyph(letter + 'ring.sc', f[letter + '.sc'], f['uni030A'])
#--------------------------------------------------------------------------
for letter in 'acegijosuwy':
build_accented_glyph(letter + 'circumflex', f[base(letter)], f['uni0302'])
for letter in 'ACEGHIJOSUWY':
build_accented_glyph(letter + 'circumflex', f[base(letter)], f['uni0302.cap'])
for letter in 'h':
build_accented_glyph(letter + 'circumflex', f[base(letter)], f['uni0302.ascend'])
for letter in ['f_h', 'f_f_h']:
build_accented_glyph(letter + 'circumflex', f[base(letter)], f['uni0302.cap'])
for letter in 'aceghijosuwy':
build_accented_glyph(letter + 'circumflex.sc', f[letter + '.sc'], f['uni0302'])
#--------------------------------------------------------------------------
for letter in 'aegiou':
build_accented_glyph(letter + 'breve', f[base(letter)], f['uni0306'])
for letter in 'AEGIOU':
build_accented_glyph(letter + 'breve', f[base(letter)], f['uni0306.cap'])
for letter in 'aegiou':
build_accented_glyph(letter + 'breve.sc', f[letter + '.sc'], f['uni0306'])
#--------------------------------------------------------------------------
for letter in 'cegz':
build_accented_glyph(letter + 'dotaccent', f[base(letter)], f['uni0307'])
for letter in 'CEGIZ':
build_accented_glyph(letter + 'dotaccent', f[base(letter)], f['uni0307.cap'])
for letter in 'cegz':
build_accented_glyph(letter + 'dotaccent.sc', f[letter + '.sc'], f['uni0307'])
build_accented_glyph('i.TRK.sc', f['i.sc'], f['uni0307'])
# Extra dot accents for Old Irish.
build_accented_glyph('uni1E02', f['B'], f['uni0307.cap'])
build_accented_glyph('uni1E03', f['b'], f['uni0307.cap'])
build_accented_glyph('uni1E0A', f['D'], f['uni0307.cap'])
build_accented_glyph('uni1E0B', f['d'], f['uni0307.cap'])
build_accented_glyph('uni1E1E', f['F'], f['uni0307.cap'])
build_accented_glyph('uni1E1F', f['f'], f['uni0307.cap'])
build_accented_glyph('uni1E22', f['H'], f['uni0307.cap'])
build_accented_glyph('uni1E23', f['h'], f['uni0307.cap'])
build_accented_glyph('uni1E40', f['M'], f['uni0307.cap'])
build_accented_glyph('uni1E41', f['m'], f['uni0307'])
build_accented_glyph('uni1E56', f['P'], f['uni0307.cap'])
build_accented_glyph('uni1E57', f['p'], f['uni0307'])
build_accented_glyph('uni1E60', f['S'], f['uni0307.cap'])
build_accented_glyph('uni1E61', f['s'], f['uni0307'])
build_accented_glyph('uni1E6A', f['T'], f['uni0307.cap'])
build_accented_glyph('uni1E6B', f['t'], f['uni0307'])
# Extra small caps for Old Irish.
build_accented_glyph('uni1E03.sc', f['b.sc'], f['uni0307'])
build_accented_glyph('uni1E0B.sc', f['d.sc'], f['uni0307'])
build_accented_glyph('uni1E1F.sc', f['f.sc'], f['uni0307'])
build_accented_glyph('uni1E23.sc', f['h.sc'], f['uni0307'])
build_accented_glyph('uni1E41.sc', f['m.sc'], f['uni0307'])
build_accented_glyph('uni1E57.sc', f['p.sc'], f['uni0307'])
build_accented_glyph('uni1E61.sc', f['s.sc'], f['uni0307'])
build_accented_glyph('uni1E6B.sc', f['t.sc'], f['uni0307'])
#--------------------------------------------------------------------------
for letter in 'cenrsz':
build_accented_glyph(letter + 'caron', f[base(letter)], f['uni030C'])
for letter in 'CDENRTSZ':
build_accented_glyph(letter + 'caron', f[base(letter)], f['uni030C.cap'])
for letter in 'dLlt':
build_accented_glyph(letter + 'caron', f[base(letter)], f['uni0315'])
for letter in 'cdenrstz':
build_accented_glyph(letter + 'caron.sc', f[letter + '.sc'], f['uni030C'])
build_accented_glyph('lcaron.sc', f['l.sc'], f['uni0315'])
#--------------------------------------------------------------------------
for letter in 'aeiou':
build_accented_glyph(letter + 'macron', f[base(letter)], f['uni0304'])
for letter in 'AEIOU':
build_accented_glyph(letter + 'macron', f[base(letter)], f['uni0304.cap'])
for letter in 'aeiou':
build_accented_glyph(letter + 'macron.sc', f[letter + '.sc'], f['uni0304'])
#--------------------------------------------------------------------------
for letter in 'ou':
build_accented_glyph(letter + 'hungarumlaut', f[base(letter)], f['uni030B'])
for letter in 'OU':
build_accented_glyph(letter + 'hungarumlaut', f[base(letter)], f['uni030B.cap'])
for letter in 'ou':
build_accented_glyph(letter + 'hungarumlaut.sc', f[letter + '.sc'], f['uni030B'])
#--------------------------------------------------------------------------
build_multigraph('Q_U', [f['Q.001'], f['U']])
build_multigraph('Q_u', [f['Q.002'], f['u']])
build_multigraph('napostrophe', [f['quoteright'], f['n']])
build_multigraph('napostrophe.sc', [f['quoteright'], f['n.sc']])
build_multigraph('IJ', [f['I'], f['J']])
build_multigraph('ij', [f['i'], f['j']])
build_multigraph('ij.sc', [f['i.sc'], f['j.sc']])
build_multigraph('germandbls.sc', [f['s.sc'], f['s.sc']])
for g in ['parenleft', 'parenright',
'bracketleft', 'bracketright',
'braceleft', 'braceright',
]:
make_glyph_reference(g + '.u', f[g],
transformation = (1, 0, 0, 1, 0, 70),
copy_spacing_anchors = False)
for g in ['figuredash']:
make_glyph_reference(g + '.u', f[g],
transformation = (1, 0, 0, 1, 0, 70),
copy_spacing_anchors = False)
#--------------------------------------------------------------------------
f.selection.all()
space_selected_by_anchors(f)
f.selection.none()
generate_kerning_and_read_features(None, f)
#--------------------------------------------------------------------------
font_db.db_close(f)
#--------------------------------------------------------------------------
| {
"content_hash": "ce891efba145e7c8c1813d665e9a2ad6",
"timestamp": "",
"source": "github",
"line_count": 363,
"max_line_length": 119,
"avg_line_length": 44.17906336088154,
"alnum_prop": 0.5297125397518239,
"repo_name": "chemoelectric/sortsmill",
"id": "8232bc556b51d325b107df41100d836b49c417a8",
"size": "16062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jannon/JannonStM_build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OCaml",
"bytes": "170116"
},
{
"name": "Python",
"bytes": "215416"
},
{
"name": "Shell",
"bytes": "100328"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from PIL import VERSION, PILLOW_VERSION, _plugins
import logging
import warnings
logger = logging.getLogger(__name__)
class DecompressionBombWarning(RuntimeWarning):
pass
class _imaging_not_installed(object):
# module placeholder
def __getattr__(self, id):
raise ImportError("The _imaging C module is not installed")
# Limit to around a quarter gigabyte for a 24 bit (3 bpp) image
MAX_IMAGE_PIXELS = int(1024 * 1024 * 1024 / 4 / 3)
try:
# give Tk a chance to set up the environment, in case we're
# using an _imaging module linked against libtcl/libtk (use
# __import__ to hide this from naive packagers; we don't really
# depend on Tk unless ImageTk is used, and that module already
# imports Tkinter)
__import__("FixTk")
except ImportError:
pass
try:
# If the _imaging C module is not present, Pillow will not load.
# Note that other modules should not refer to _imaging directly;
# import Image and use the Image.core variable instead.
# Also note that Image.core is not a publicly documented interface,
# and should be considered private and subject to change.
from PIL import _imaging as core
if PILLOW_VERSION != getattr(core, 'PILLOW_VERSION', None):
raise ImportError("The _imaging extension was built for another "
" version of Pillow or PIL")
except ImportError as v:
core = _imaging_not_installed()
# Explanations for ways that we know we might have an import error
if str(v).startswith("Module use of python"):
# The _imaging C module is present, but not compiled for
# the right version (windows only). Print a warning, if
# possible.
warnings.warn(
"The _imaging extension was built for another version "
"of Python.",
RuntimeWarning
)
elif str(v).startswith("The _imaging extension"):
warnings.warn(str(v), RuntimeWarning)
elif "Symbol not found: _PyUnicodeUCS2_FromString" in str(v):
warnings.warn(
"The _imaging extension was built for Python with UCS2 support; "
"recompile PIL or build Python --without-wide-unicode. ",
RuntimeWarning
)
elif "Symbol not found: _PyUnicodeUCS4_FromString" in str(v):
warnings.warn(
"The _imaging extension was built for Python with UCS4 support; "
"recompile PIL or build Python --with-wide-unicode. ",
RuntimeWarning
)
# Fail here anyway. Don't let people run with a mostly broken Pillow.
# see docs/porting-pil-to-pillow.rst
raise
try:
import builtins
except ImportError:
import __builtin__
builtins = __builtin__
from PIL import ImageMode
from PIL._binary import i8
from PIL._util import isPath
from PIL._util import isStringType
from PIL._util import deferred_error
import os
import sys
import io
import struct
# type stuff
import collections
import numbers
# works everywhere, win for pypy, not cpython
USE_CFFI_ACCESS = hasattr(sys, 'pypy_version_info')
try:
import cffi
HAS_CFFI = True
except ImportError:
HAS_CFFI = False
def isImageType(t):
"""
Checks if an object is an image object.
.. warning::
This function is for internal use only.
:param t: object to check if it's an image
:returns: True if the object is an image
"""
return hasattr(t, "im")
#
# Constants (also defined in _imagingmodule.c!)
NONE = 0
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
ROTATE_90 = 2
ROTATE_180 = 3
ROTATE_270 = 4
TRANSPOSE = 5
# transforms
AFFINE = 0
EXTENT = 1
PERSPECTIVE = 2
QUAD = 3
MESH = 4
# resampling filters
NEAREST = NONE = 0
LANCZOS = ANTIALIAS = 1
BILINEAR = LINEAR = 2
BICUBIC = CUBIC = 3
# dithers
NONE = 0
NEAREST = 0
ORDERED = 1 # Not yet implemented
RASTERIZE = 2 # Not yet implemented
FLOYDSTEINBERG = 3 # default
# palettes/quantizers
WEB = 0
ADAPTIVE = 1
MEDIANCUT = 0
MAXCOVERAGE = 1
FASTOCTREE = 2
# categories
NORMAL = 0
SEQUENCE = 1
CONTAINER = 2
if hasattr(core, 'DEFAULT_STRATEGY'):
DEFAULT_STRATEGY = core.DEFAULT_STRATEGY
FILTERED = core.FILTERED
HUFFMAN_ONLY = core.HUFFMAN_ONLY
RLE = core.RLE
FIXED = core.FIXED
# --------------------------------------------------------------------
# Registries
ID = []
OPEN = {}
MIME = {}
SAVE = {}
SAVE_ALL = {}
EXTENSION = {}
# --------------------------------------------------------------------
# Modes supported by this version
_MODEINFO = {
# NOTE: this table will be removed in future versions. use
# getmode* functions or ImageMode descriptors instead.
# official modes
"1": ("L", "L", ("1",)),
"L": ("L", "L", ("L",)),
"I": ("L", "I", ("I",)),
"F": ("L", "F", ("F",)),
"P": ("RGB", "L", ("P",)),
"RGB": ("RGB", "L", ("R", "G", "B")),
"RGBX": ("RGB", "L", ("R", "G", "B", "X")),
"RGBA": ("RGB", "L", ("R", "G", "B", "A")),
"CMYK": ("RGB", "L", ("C", "M", "Y", "K")),
"YCbCr": ("RGB", "L", ("Y", "Cb", "Cr")),
"LAB": ("RGB", "L", ("L", "A", "B")),
"HSV": ("RGB", "L", ("H", "S", "V")),
# Experimental modes include I;16, I;16L, I;16B, RGBa, BGR;15, and
# BGR;24. Use these modes only if you know exactly what you're
# doing...
}
if sys.byteorder == 'little':
_ENDIAN = '<'
else:
_ENDIAN = '>'
_MODE_CONV = {
# official modes
"1": ('|b1', None), # broken
"L": ('|u1', None),
"I": (_ENDIAN + 'i4', None),
"F": (_ENDIAN + 'f4', None),
"P": ('|u1', None),
"RGB": ('|u1', 3),
"RGBX": ('|u1', 4),
"RGBA": ('|u1', 4),
"CMYK": ('|u1', 4),
"YCbCr": ('|u1', 3),
"LAB": ('|u1', 3), # UNDONE - unsigned |u1i1i1
# I;16 == I;16L, and I;32 == I;32L
"I;16": ('<u2', None),
"I;16B": ('>u2', None),
"I;16L": ('<u2', None),
"I;16S": ('<i2', None),
"I;16BS": ('>i2', None),
"I;16LS": ('<i2', None),
"I;32": ('<u4', None),
"I;32B": ('>u4', None),
"I;32L": ('<u4', None),
"I;32S": ('<i4', None),
"I;32BS": ('>i4', None),
"I;32LS": ('<i4', None),
}
def _conv_type_shape(im):
shape = im.size[1], im.size[0]
typ, extra = _MODE_CONV[im.mode]
if extra is None:
return shape, typ
else:
return shape+(extra,), typ
MODES = sorted(_MODEINFO.keys())
# raw modes that may be memory mapped. NOTE: if you change this, you
# may have to modify the stride calculation in map.c too!
_MAPMODES = ("L", "P", "RGBX", "RGBA", "CMYK", "I;16", "I;16L", "I;16B")
def getmodebase(mode):
"""
Gets the "base" mode for given mode. This function returns "L" for
images that contain grayscale data, and "RGB" for images that
contain color data.
:param mode: Input mode.
:returns: "L" or "RGB".
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode.getmode(mode).basemode
def getmodetype(mode):
"""
Gets the storage type mode. Given a mode, this function returns a
single-layer mode suitable for storing individual bands.
:param mode: Input mode.
:returns: "L", "I", or "F".
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode.getmode(mode).basetype
def getmodebandnames(mode):
"""
Gets a list of individual band names. Given a mode, this function returns
a tuple containing the names of individual bands (use
:py:method:`~PIL.Image.getmodetype` to get the mode used to store each
individual band.
:param mode: Input mode.
:returns: A tuple containing band names. The length of the tuple
gives the number of bands in an image of the given mode.
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode.getmode(mode).bands
def getmodebands(mode):
"""
Gets the number of individual bands for this mode.
:param mode: Input mode.
:returns: The number of bands in this mode.
:exception KeyError: If the input mode was not a standard mode.
"""
return len(ImageMode.getmode(mode).bands)
# --------------------------------------------------------------------
# Helpers
_initialized = 0
def preinit():
"Explicitly load standard file format drivers."
global _initialized
if _initialized >= 1:
return
try:
from PIL import BmpImagePlugin
except ImportError:
pass
try:
from PIL import GifImagePlugin
except ImportError:
pass
try:
from PIL import JpegImagePlugin
except ImportError:
pass
try:
from PIL import PpmImagePlugin
except ImportError:
pass
try:
from PIL import PngImagePlugin
except ImportError:
pass
# try:
# import TiffImagePlugin
# except ImportError:
# pass
_initialized = 1
def init():
"""
Explicitly initializes the Python Imaging Library. This function
loads all available file format drivers.
"""
global _initialized
if _initialized >= 2:
return 0
for plugin in _plugins:
try:
logger.debug("Importing %s", plugin)
__import__("PIL.%s" % plugin, globals(), locals(), [])
except ImportError as e:
logger.debug("Image: failed to import %s: %s", plugin, e)
if OPEN or SAVE:
_initialized = 2
return 1
# --------------------------------------------------------------------
# Codec factories (used by tobytes/frombytes and ImageFile.load)
def _getdecoder(mode, decoder_name, args, extra=()):
# tweak arguments
if args is None:
args = ()
elif not isinstance(args, tuple):
args = (args,)
try:
# get decoder
decoder = getattr(core, decoder_name + "_decoder")
# print(decoder, mode, args + extra)
return decoder(mode, *args + extra)
except AttributeError:
raise IOError("decoder %s not available" % decoder_name)
def _getencoder(mode, encoder_name, args, extra=()):
# tweak arguments
if args is None:
args = ()
elif not isinstance(args, tuple):
args = (args,)
try:
# get encoder
encoder = getattr(core, encoder_name + "_encoder")
# print(encoder, mode, args + extra)
return encoder(mode, *args + extra)
except AttributeError:
raise IOError("encoder %s not available" % encoder_name)
# --------------------------------------------------------------------
# Simple expression analyzer
def coerce_e(value):
return value if isinstance(value, _E) else _E(value)
class _E(object):
def __init__(self, data):
self.data = data
def __add__(self, other):
return _E((self.data, "__add__", coerce_e(other).data))
def __mul__(self, other):
return _E((self.data, "__mul__", coerce_e(other).data))
def _getscaleoffset(expr):
stub = ["stub"]
data = expr(_E(stub)).data
try:
(a, b, c) = data # simplified syntax
if (a is stub and b == "__mul__" and isinstance(c, numbers.Number)):
return c, 0.0
if a is stub and b == "__add__" and isinstance(c, numbers.Number):
return 1.0, c
except TypeError:
pass
try:
((a, b, c), d, e) = data # full syntax
if (a is stub and b == "__mul__" and isinstance(c, numbers.Number) and
d == "__add__" and isinstance(e, numbers.Number)):
return c, e
except TypeError:
pass
raise ValueError("illegal expression")
# --------------------------------------------------------------------
# Implementation wrapper
class Image(object):
"""
This class represents an image object. To create
:py:class:`~PIL.Image.Image` objects, use the appropriate factory
functions. There's hardly ever any reason to call the Image constructor
directly.
* :py:func:`~PIL.Image.open`
* :py:func:`~PIL.Image.new`
* :py:func:`~PIL.Image.frombytes`
"""
format = None
format_description = None
def __init__(self):
# FIXME: take "new" parameters / other image?
# FIXME: turn mode and size into delegating properties?
self.im = None
self.mode = ""
self.size = (0, 0)
self.palette = None
self.info = {}
self.category = NORMAL
self.readonly = 0
self.pyaccess = None
@property
def width(self):
return self.size[0]
@property
def height(self):
return self.size[1]
def _new(self, im):
new = Image()
new.im = im
new.mode = im.mode
new.size = im.size
if self.palette:
new.palette = self.palette.copy()
if im.mode == "P" and not new.palette:
from PIL import ImagePalette
new.palette = ImagePalette.ImagePalette()
try:
new.info = self.info.copy()
except AttributeError:
# fallback (pre-1.5.2)
new.info = {}
for k, v in self.info:
new.info[k] = v
return new
_makeself = _new # compatibility
# Context Manager Support
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
"""
Closes the file pointer, if possible.
This operation will destroy the image core and release its memory.
The image data will be unusable afterward.
This function is only required to close images that have not
had their file read and closed by the
:py:meth:`~PIL.Image.Image.load` method.
"""
try:
self.fp.close()
except Exception as msg:
logger.debug("Error closing: %s" % msg)
# Instead of simply setting to None, we're setting up a
# deferred error that will better explain that the core image
# object is gone.
self.im = deferred_error(ValueError("Operation on closed image"))
def _copy(self):
self.load()
self.im = self.im.copy()
self.pyaccess = None
self.readonly = 0
def _dump(self, file=None, format=None):
import tempfile
suffix = ''
if format:
suffix = '.'+format
if not file:
f, file = tempfile.mkstemp(suffix)
os.close(f)
self.load()
if not format or format == "PPM":
self.im.save_ppm(file)
else:
if not file.endswith(format):
file = file + "." + format
self.save(file, format)
return file
def __eq__(self, other):
if self.__class__.__name__ != other.__class__.__name__:
return False
a = (self.mode == other.mode)
b = (self.size == other.size)
c = (self.getpalette() == other.getpalette())
d = (self.info == other.info)
e = (self.category == other.category)
f = (self.readonly == other.readonly)
g = (self.tobytes() == other.tobytes())
return a and b and c and d and e and f and g
def __ne__(self, other):
eq = (self == other)
return not eq
def __repr__(self):
return "<%s.%s image mode=%s size=%dx%d at 0x%X>" % (
self.__class__.__module__, self.__class__.__name__,
self.mode, self.size[0], self.size[1],
id(self)
)
def _repr_png_(self):
""" iPython display hook support
:returns: png version of the image as bytes
"""
from io import BytesIO
b = BytesIO()
self.save(b, 'PNG')
return b.getvalue()
def __getattr__(self, name):
if name == "__array_interface__":
# numpy array interface support
new = {}
shape, typestr = _conv_type_shape(self)
new['shape'] = shape
new['typestr'] = typestr
new['data'] = self.tobytes()
return new
raise AttributeError(name)
def __getstate__(self):
return [
self.info,
self.mode,
self.size,
self.getpalette(),
self.tobytes()]
def __setstate__(self, state):
Image.__init__(self)
self.tile = []
info, mode, size, palette, data = state
self.info = info
self.mode = mode
self.size = size
self.im = core.new(mode, size)
if mode in ("L", "P") and palette:
self.putpalette(palette)
self.frombytes(data)
def tobytes(self, encoder_name="raw", *args):
"""
Return image as a bytes object
:param encoder_name: What encoder to use. The default is to
use the standard "raw" encoder.
:param args: Extra arguments to the encoder.
:rtype: A bytes object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if encoder_name == "raw" and args == ():
args = self.mode
self.load()
# unpack data
e = _getencoder(self.mode, encoder_name, args)
e.setimage(self.im)
bufsize = max(65536, self.size[0] * 4) # see RawEncode.c
data = []
while True:
l, s, d = e.encode(bufsize)
data.append(d)
if s:
break
if s < 0:
raise RuntimeError("encoder error %d in tobytes" % s)
return b"".join(data)
def tostring(self, *args, **kw):
raise Exception("tostring() has been removed. " +
"Please call tobytes() instead.")
def tobitmap(self, name="image"):
"""
Returns the image converted to an X11 bitmap.
.. note:: This method only works for mode "1" images.
:param name: The name prefix to use for the bitmap variables.
:returns: A string containing an X11 bitmap.
:raises ValueError: If the mode is not "1"
"""
self.load()
if self.mode != "1":
raise ValueError("not a bitmap")
data = self.tobytes("xbm")
return b"".join([
("#define %s_width %d\n" % (name, self.size[0])).encode('ascii'),
("#define %s_height %d\n" % (name, self.size[1])).encode('ascii'),
("static char %s_bits[] = {\n" % name).encode('ascii'), data, b"};"
])
def frombytes(self, data, decoder_name="raw", *args):
"""
Loads this image with pixel data from a bytes object.
This method is similar to the :py:func:`~PIL.Image.frombytes` function,
but loads data into this image instead of creating a new image object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
# default format
if decoder_name == "raw" and args == ():
args = self.mode
# unpack data
d = _getdecoder(self.mode, decoder_name, args)
d.setimage(self.im)
s = d.decode(data)
if s[0] >= 0:
raise ValueError("not enough image data")
if s[1] != 0:
raise ValueError("cannot decode image data")
def fromstring(self, *args, **kw):
raise Exception("fromstring() has been removed. " +
"Please call frombytes() instead.")
def load(self):
"""
Allocates storage for the image and loads the pixel data. In
normal cases, you don't need to call this method, since the
Image class automatically loads an opened image when it is
accessed for the first time. This method will close the file
associated with the image.
:returns: An image access object.
:rtype: :ref:`PixelAccess` or :py:class:`PIL.PyAccess`
"""
if self.im and self.palette and self.palette.dirty:
# realize palette
self.im.putpalette(*self.palette.getdata())
self.palette.dirty = 0
self.palette.mode = "RGB"
self.palette.rawmode = None
if "transparency" in self.info:
if isinstance(self.info["transparency"], int):
self.im.putpalettealpha(self.info["transparency"], 0)
else:
self.im.putpalettealphas(self.info["transparency"])
self.palette.mode = "RGBA"
if self.im:
if HAS_CFFI and USE_CFFI_ACCESS:
if self.pyaccess:
return self.pyaccess
from PIL import PyAccess
self.pyaccess = PyAccess.new(self, self.readonly)
if self.pyaccess:
return self.pyaccess
return self.im.pixel_access(self.readonly)
def verify(self):
"""
Verifies the contents of a file. For data read from a file, this
method attempts to determine if the file is broken, without
actually decoding the image data. If this method finds any
problems, it raises suitable exceptions. If you need to load
the image after using this method, you must reopen the image
file.
"""
pass
def convert(self, mode=None, matrix=None, dither=None,
palette=WEB, colors=256):
"""
Returns a converted copy of this image. For the "P" mode, this
method translates pixels through the palette. If mode is
omitted, a mode is chosen so that all information in the image
and the palette can be represented without a palette.
The current version supports all possible conversions between
"L", "RGB" and "CMYK." The **matrix** argument only supports "L"
and "RGB".
When translating a color image to black and white (mode "L"),
the library uses the ITU-R 601-2 luma transform::
L = R * 299/1000 + G * 587/1000 + B * 114/1000
The default method of converting a greyscale ("L") or "RGB"
image into a bilevel (mode "1") image uses Floyd-Steinberg
dither to approximate the original image luminosity levels. If
dither is NONE, all non-zero values are set to 255 (white). To
use other thresholds, use the :py:meth:`~PIL.Image.Image.point`
method.
:param mode: The requested mode. See: :ref:`concept-modes`.
:param matrix: An optional conversion matrix. If given, this
should be 4- or 12-tuple containing floating point values.
:param dither: Dithering method, used when converting from
mode "RGB" to "P" or from "RGB" or "L" to "1".
Available methods are NONE or FLOYDSTEINBERG (default).
:param palette: Palette to use when converting from mode "RGB"
to "P". Available palettes are WEB or ADAPTIVE.
:param colors: Number of colors to use for the ADAPTIVE palette.
Defaults to 256.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if not mode:
# determine default mode
if self.mode == "P":
self.load()
if self.palette:
mode = self.palette.mode
else:
mode = "RGB"
else:
return self.copy()
self.load()
if matrix:
# matrix conversion
if mode not in ("L", "RGB"):
raise ValueError("illegal conversion")
im = self.im.convert_matrix(mode, matrix)
return self._new(im)
if mode == "P" and self.mode == "RGBA":
return self.quantize(colors)
trns = None
delete_trns = False
# transparency handling
if "transparency" in self.info and \
self.info['transparency'] is not None:
if self.mode in ('L', 'RGB') and mode == 'RGBA':
# Use transparent conversion to promote from transparent
# color to an alpha channel.
return self._new(self.im.convert_transparent(
mode, self.info['transparency']))
elif self.mode in ('L', 'RGB', 'P') and mode in ('L', 'RGB', 'P'):
t = self.info['transparency']
if isinstance(t, bytes):
# Dragons. This can't be represented by a single color
warnings.warn('Palette images with Transparency ' +
' expressed in bytes should be converted ' +
'to RGBA images')
delete_trns = True
else:
# get the new transparency color.
# use existing conversions
trns_im = Image()._new(core.new(self.mode, (1, 1)))
if self.mode == 'P':
trns_im.putpalette(self.palette)
trns_im.putpixel((0, 0), t)
if mode in ('L', 'RGB'):
trns_im = trns_im.convert(mode)
else:
# can't just retrieve the palette number, got to do it
# after quantization.
trns_im = trns_im.convert('RGB')
trns = trns_im.getpixel((0, 0))
elif self.mode == 'P' and mode == 'RGBA':
t = self.info['transparency']
delete_trns = True
if isinstance(t, bytes):
self.im.putpalettealphas(t)
elif isinstance(t, int):
self.im.putpalettealpha(t, 0)
else:
raise ValueError("Transparency for P mode should" +
" be bytes or int")
if mode == "P" and palette == ADAPTIVE:
im = self.im.quantize(colors)
new = self._new(im)
from PIL import ImagePalette
new.palette = ImagePalette.raw("RGB", new.im.getpalette("RGB"))
if delete_trns:
# This could possibly happen if we requantize to fewer colors.
# The transparency would be totally off in that case.
del(new.info['transparency'])
if trns is not None:
try:
new.info['transparency'] = new.palette.getcolor(trns)
except:
# if we can't make a transparent color, don't leave the old
# transparency hanging around to mess us up.
del(new.info['transparency'])
warnings.warn("Couldn't allocate palette entry " +
"for transparency")
return new
# colorspace conversion
if dither is None:
dither = FLOYDSTEINBERG
try:
im = self.im.convert(mode, dither)
except ValueError:
try:
# normalize source image and try again
im = self.im.convert(getmodebase(self.mode))
im = im.convert(mode, dither)
except KeyError:
raise ValueError("illegal conversion")
new_im = self._new(im)
if delete_trns:
# crash fail if we leave a bytes transparency in an rgb/l mode.
del(new_im.info['transparency'])
if trns is not None:
if new_im.mode == 'P':
try:
new_im.info['transparency'] = new_im.palette.getcolor(trns)
except:
del(new_im.info['transparency'])
warnings.warn("Couldn't allocate palette entry " +
"for transparency")
else:
new_im.info['transparency'] = trns
return new_im
def quantize(self, colors=256, method=None, kmeans=0, palette=None):
"""
Convert the image to 'P' mode with the specified number
of colors.
:param colors: The desired number of colors, <= 256
:param method: 0 = median cut
1 = maximum coverage
2 = fast octree
:param kmeans: Integer
:param palette: Quantize to the :py:class:`PIL.ImagingPalette` palette.
:returns: A new image
"""
self.load()
if method is None:
# defaults:
method = 0
if self.mode == 'RGBA':
method = 2
if self.mode == 'RGBA' and method != 2:
# Caller specified an invalid mode.
raise ValueError('Fast Octree (method == 2) is the ' +
' only valid method for quantizing RGBA images')
if palette:
# use palette from reference image
palette.load()
if palette.mode != "P":
raise ValueError("bad mode for palette image")
if self.mode != "RGB" and self.mode != "L":
raise ValueError(
"only RGB or L mode images can be quantized to a palette"
)
im = self.im.convert("P", 1, palette.im)
return self._makeself(im)
im = self.im.quantize(colors, method, kmeans)
return self._new(im)
def copy(self):
"""
Copies this image. Use this method if you wish to paste things
into an image, but still retain the original.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
im = self.im.copy()
return self._new(im)
def crop(self, box=None):
"""
Returns a rectangular region from this image. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate.
This is a lazy operation. Changes to the source image may or
may not be reflected in the cropped image. To break the
connection, call the :py:meth:`~PIL.Image.Image.load` method on
the cropped copy.
:param box: The crop rectangle, as a (left, upper, right, lower)-tuple.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
if box is None:
return self.copy()
# lazy operation
return _ImageCrop(self, box)
def draft(self, mode, size):
"""
Configures the image file loader so it returns a version of the
image that as closely as possible matches the given mode and
size. For example, you can use this method to convert a color
JPEG to greyscale while loading it, or to extract a 128x192
version from a PCD file.
Note that this method modifies the :py:class:`~PIL.Image.Image` object
in place. If the image has already been loaded, this method has no
effect.
:param mode: The requested mode.
:param size: The requested size.
"""
pass
def _expand(self, xmargin, ymargin=None):
if ymargin is None:
ymargin = xmargin
self.load()
return self._new(self.im.expand(xmargin, ymargin, 0))
def filter(self, filter):
"""
Filters this image using the given filter. For a list of
available filters, see the :py:mod:`~PIL.ImageFilter` module.
:param filter: Filter kernel.
:returns: An :py:class:`~PIL.Image.Image` object. """
self.load()
if isinstance(filter, collections.Callable):
filter = filter()
if not hasattr(filter, "filter"):
raise TypeError("filter argument should be ImageFilter.Filter " +
"instance or class")
if self.im.bands == 1:
return self._new(filter.filter(self.im))
# fix to handle multiband images since _imaging doesn't
ims = []
for c in range(self.im.bands):
ims.append(self._new(filter.filter(self.im.getband(c))))
return merge(self.mode, ims)
def getbands(self):
"""
Returns a tuple containing the name of each band in this image.
For example, **getbands** on an RGB image returns ("R", "G", "B").
:returns: A tuple containing band names.
:rtype: tuple
"""
return ImageMode.getmode(self.mode).bands
def getbbox(self):
"""
Calculates the bounding box of the non-zero regions in the
image.
:returns: The bounding box is returned as a 4-tuple defining the
left, upper, right, and lower pixel coordinate. If the image
is completely empty, this method returns None.
"""
self.load()
return self.im.getbbox()
def getcolors(self, maxcolors=256):
"""
Returns a list of colors used in this image.
:param maxcolors: Maximum number of colors. If this number is
exceeded, this method returns None. The default limit is
256 colors.
:returns: An unsorted list of (count, pixel) values.
"""
self.load()
if self.mode in ("1", "L", "P"):
h = self.im.histogram()
out = []
for i in range(256):
if h[i]:
out.append((h[i], i))
if len(out) > maxcolors:
return None
return out
return self.im.getcolors(maxcolors)
def getdata(self, band=None):
"""
Returns the contents of this image as a sequence object
containing pixel values. The sequence object is flattened, so
that values for line one follow directly after the values of
line zero, and so on.
Note that the sequence object returned by this method is an
internal PIL data type, which only supports certain sequence
operations. To convert it to an ordinary sequence (e.g. for
printing), use **list(im.getdata())**.
:param band: What band to return. The default is to return
all bands. To return a single band, pass in the index
value (e.g. 0 to get the "R" band from an "RGB" image).
:returns: A sequence-like object.
"""
self.load()
if band is not None:
return self.im.getband(band)
return self.im # could be abused
def getextrema(self):
"""
Gets the the minimum and maximum pixel values for each band in
the image.
:returns: For a single-band image, a 2-tuple containing the
minimum and maximum pixel value. For a multi-band image,
a tuple containing one 2-tuple for each band.
"""
self.load()
if self.im.bands > 1:
extrema = []
for i in range(self.im.bands):
extrema.append(self.im.getband(i).getextrema())
return tuple(extrema)
return self.im.getextrema()
def getim(self):
"""
Returns a capsule that points to the internal image memory.
:returns: A capsule object.
"""
self.load()
return self.im.ptr
def getpalette(self):
"""
Returns the image palette as a list.
:returns: A list of color values [r, g, b, ...], or None if the
image has no palette.
"""
self.load()
try:
if bytes is str:
return [i8(c) for c in self.im.getpalette()]
else:
return list(self.im.getpalette())
except ValueError:
return None # no palette
def getpixel(self, xy):
"""
Returns the pixel value at a given position.
:param xy: The coordinate, given as (x, y).
:returns: The pixel value. If the image is a multi-layer image,
this method returns a tuple.
"""
self.load()
if self.pyaccess:
return self.pyaccess.getpixel(xy)
return self.im.getpixel(xy)
def getprojection(self):
"""
Get projection to x and y axes
:returns: Two sequences, indicating where there are non-zero
pixels along the X-axis and the Y-axis, respectively.
"""
self.load()
x, y = self.im.getprojection()
return [i8(c) for c in x], [i8(c) for c in y]
def histogram(self, mask=None, extrema=None):
"""
Returns a histogram for the image. The histogram is returned as
a list of pixel counts, one for each pixel value in the source
image. If the image has more than one band, the histograms for
all bands are concatenated (for example, the histogram for an
"RGB" image contains 768 values).
A bilevel image (mode "1") is treated as a greyscale ("L") image
by this method.
If a mask is provided, the method returns a histogram for those
parts of the image where the mask image is non-zero. The mask
image must have the same size as the image, and be either a
bi-level image (mode "1") or a greyscale image ("L").
:param mask: An optional mask.
:returns: A list containing pixel counts.
"""
self.load()
if mask:
mask.load()
return self.im.histogram((0, 0), mask.im)
if self.mode in ("I", "F"):
if extrema is None:
extrema = self.getextrema()
return self.im.histogram(extrema)
return self.im.histogram()
def offset(self, xoffset, yoffset=None):
raise Exception("offset() has been removed. " +
"Please call ImageChops.offset() instead.")
def paste(self, im, box=None, mask=None):
"""
Pastes another image into this image. The box argument is either
a 2-tuple giving the upper left corner, a 4-tuple defining the
left, upper, right, and lower pixel coordinate, or None (same as
(0, 0)). If a 4-tuple is given, the size of the pasted image
must match the size of the region.
If the modes don't match, the pasted image is converted to the mode of
this image (see the :py:meth:`~PIL.Image.Image.convert` method for
details).
Instead of an image, the source can be a integer or tuple
containing pixel values. The method then fills the region
with the given color. When creating RGB images, you can
also use color strings as supported by the ImageColor module.
If a mask is given, this method updates only the regions
indicated by the mask. You can use either "1", "L" or "RGBA"
images (in the latter case, the alpha band is used as mask).
Where the mask is 255, the given image is copied as is. Where
the mask is 0, the current value is preserved. Intermediate
values will mix the two images together, including their alpha
channels if they have them.
See :py:meth:`~PIL.Image.Image.alpha_composite` if you want to
combine images with respect to their alpha channels.
:param im: Source image or pixel value (integer or tuple).
:param box: An optional 4-tuple giving the region to paste into.
If a 2-tuple is used instead, it's treated as the upper left
corner. If omitted or None, the source is pasted into the
upper left corner.
If an image is given as the second argument and there is no
third, the box defaults to (0, 0), and the second argument
is interpreted as a mask image.
:param mask: An optional mask image.
"""
if isImageType(box) and mask is None:
# abbreviated paste(im, mask) syntax
mask = box
box = None
if box is None:
# cover all of self
box = (0, 0) + self.size
if len(box) == 2:
# lower left corner given; get size from image or mask
if isImageType(im):
size = im.size
elif isImageType(mask):
size = mask.size
else:
# FIXME: use self.size here?
raise ValueError(
"cannot determine region size; use 4-item box"
)
box = box + (box[0]+size[0], box[1]+size[1])
if isStringType(im):
from PIL import ImageColor
im = ImageColor.getcolor(im, self.mode)
elif isImageType(im):
im.load()
if self.mode != im.mode:
if self.mode != "RGB" or im.mode not in ("RGBA", "RGBa"):
# should use an adapter for this!
im = im.convert(self.mode)
im = im.im
self.load()
if self.readonly:
self._copy()
if mask:
mask.load()
self.im.paste(im, box, mask.im)
else:
self.im.paste(im, box)
def point(self, lut, mode=None):
"""
Maps this image through a lookup table or function.
:param lut: A lookup table, containing 256 (or 65336 if
self.mode=="I" and mode == "L") values per band in the
image. A function can be used instead, it should take a
single argument. The function is called once for each
possible pixel value, and the resulting table is applied to
all bands of the image.
:param mode: Output mode (default is same as input). In the
current version, this can only be used if the source image
has mode "L" or "P", and the output has mode "1" or the
source image mode is "I" and the output mode is "L".
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
if isinstance(lut, ImagePointHandler):
return lut.point(self)
if callable(lut):
# if it isn't a list, it should be a function
if self.mode in ("I", "I;16", "F"):
# check if the function can be used with point_transform
# UNDONE wiredfool -- I think this prevents us from ever doing
# a gamma function point transform on > 8bit images.
scale, offset = _getscaleoffset(lut)
return self._new(self.im.point_transform(scale, offset))
# for other modes, convert the function to a table
lut = [lut(i) for i in range(256)] * self.im.bands
if self.mode == "F":
# FIXME: _imaging returns a confusing error message for this case
raise ValueError("point operation not supported for this mode")
return self._new(self.im.point(lut, mode))
def putalpha(self, alpha):
"""
Adds or replaces the alpha layer in this image. If the image
does not have an alpha layer, it's converted to "LA" or "RGBA".
The new layer must be either "L" or "1".
:param alpha: The new alpha layer. This can either be an "L" or "1"
image having the same size as this image, or an integer or
other color value.
"""
self.load()
if self.readonly:
self._copy()
if self.mode not in ("LA", "RGBA"):
# attempt to promote self to a matching alpha mode
try:
mode = getmodebase(self.mode) + "A"
try:
self.im.setmode(mode)
self.pyaccess = None
except (AttributeError, ValueError):
# do things the hard way
im = self.im.convert(mode)
if im.mode not in ("LA", "RGBA"):
raise ValueError # sanity check
self.im = im
self.pyaccess = None
self.mode = self.im.mode
except (KeyError, ValueError):
raise ValueError("illegal image mode")
if self.mode == "LA":
band = 1
else:
band = 3
if isImageType(alpha):
# alpha layer
if alpha.mode not in ("1", "L"):
raise ValueError("illegal image mode")
alpha.load()
if alpha.mode == "1":
alpha = alpha.convert("L")
else:
# constant alpha
try:
self.im.fillband(band, alpha)
except (AttributeError, ValueError):
# do things the hard way
alpha = new("L", self.size, alpha)
else:
return
self.im.putband(alpha.im, band)
def putdata(self, data, scale=1.0, offset=0.0):
"""
Copies pixel data to this image. This method copies data from a
sequence object into the image, starting at the upper left
corner (0, 0), and continuing until either the image or the
sequence ends. The scale and offset values are used to adjust
the sequence values: **pixel = value*scale + offset**.
:param data: A sequence object.
:param scale: An optional scale value. The default is 1.0.
:param offset: An optional offset value. The default is 0.0.
"""
self.load()
if self.readonly:
self._copy()
self.im.putdata(data, scale, offset)
def putpalette(self, data, rawmode="RGB"):
"""
Attaches a palette to this image. The image must be a "P" or
"L" image, and the palette sequence must contain 768 integer
values, where each group of three values represent the red,
green, and blue values for the corresponding pixel
index. Instead of an integer sequence, you can use an 8-bit
string.
:param data: A palette sequence (either a list or a string).
"""
from PIL import ImagePalette
if self.mode not in ("L", "P"):
raise ValueError("illegal image mode")
self.load()
if isinstance(data, ImagePalette.ImagePalette):
palette = ImagePalette.raw(data.rawmode, data.palette)
else:
if not isinstance(data, bytes):
if bytes is str:
data = "".join(chr(x) for x in data)
else:
data = bytes(data)
palette = ImagePalette.raw(rawmode, data)
self.mode = "P"
self.palette = palette
self.palette.mode = "RGB"
self.load() # install new palette
def putpixel(self, xy, value):
"""
Modifies the pixel at the given position. The color is given as
a single numerical value for single-band images, and a tuple for
multi-band images.
Note that this method is relatively slow. For more extensive changes,
use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw`
module instead.
See:
* :py:meth:`~PIL.Image.Image.paste`
* :py:meth:`~PIL.Image.Image.putdata`
* :py:mod:`~PIL.ImageDraw`
:param xy: The pixel coordinate, given as (x, y).
:param value: The pixel value.
"""
self.load()
if self.readonly:
self._copy()
self.pyaccess = None
self.load()
if self.pyaccess:
return self.pyaccess.putpixel(xy, value)
return self.im.putpixel(xy, value)
def resize(self, size, resample=NEAREST):
"""
Returns a resized copy of this image.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param resample: An optional resampling filter. This can be
one of :py:attr:`PIL.Image.NEAREST` (use nearest neighbour),
:py:attr:`PIL.Image.BILINEAR` (linear interpolation),
:py:attr:`PIL.Image.BICUBIC` (cubic spline interpolation), or
:py:attr:`PIL.Image.LANCZOS` (a high-quality downsampling filter).
If omitted, or if the image has mode "1" or "P", it is
set :py:attr:`PIL.Image.NEAREST`.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if resample not in (NEAREST, BILINEAR, BICUBIC, LANCZOS):
raise ValueError("unknown resampling filter")
self.load()
size = tuple(size)
if self.size == size:
return self._new(self.im)
if self.mode in ("1", "P"):
resample = NEAREST
if self.mode == 'RGBA':
return self.convert('RGBa').resize(size, resample).convert('RGBA')
return self._new(self.im.resize(size, resample))
def rotate(self, angle, resample=NEAREST, expand=0):
"""
Returns a rotated copy of this image. This method returns a
copy of this image, rotated the given number of degrees counter
clockwise around its centre.
:param angle: In degrees counter clockwise.
:param resample: An optional resampling filter. This can be
one of :py:attr:`PIL.Image.NEAREST` (use nearest neighbour),
:py:attr:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), or :py:attr:`PIL.Image.BICUBIC`
(cubic spline interpolation in a 4x4 environment).
If omitted, or if the image has mode "1" or "P", it is
set :py:attr:`PIL.Image.NEAREST`.
:param expand: Optional expansion flag. If true, expands the output
image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the
input image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if expand:
import math
angle = -angle * math.pi / 180
matrix = [
math.cos(angle), math.sin(angle), 0.0,
-math.sin(angle), math.cos(angle), 0.0
]
def transform(x, y, matrix=matrix):
(a, b, c, d, e, f) = matrix
return a*x + b*y + c, d*x + e*y + f
# calculate output size
w, h = self.size
xx = []
yy = []
for x, y in ((0, 0), (w, 0), (w, h), (0, h)):
x, y = transform(x, y)
xx.append(x)
yy.append(y)
w = int(math.ceil(max(xx)) - math.floor(min(xx)))
h = int(math.ceil(max(yy)) - math.floor(min(yy)))
# adjust center
x, y = transform(w / 2.0, h / 2.0)
matrix[2] = self.size[0] / 2.0 - x
matrix[5] = self.size[1] / 2.0 - y
return self.transform((w, h), AFFINE, matrix, resample)
if resample not in (NEAREST, BILINEAR, BICUBIC):
raise ValueError("unknown resampling filter")
self.load()
if self.mode in ("1", "P"):
resample = NEAREST
return self._new(self.im.rotate(angle, resample, expand))
def save(self, fp, format=None, **params):
"""
Saves this image under the given filename. If no format is
specified, the format to use is determined from the filename
extension, if possible.
Keyword options can be used to provide additional instructions
to the writer. If a writer doesn't recognise an option, it is
silently ignored. The available options are described in the
:doc:`image format documentation
<../handbook/image-file-formats>` for each writer.
You can use a file object instead of a filename. In this case,
you must always specify the format. The file object must
implement the ``seek``, ``tell``, and ``write``
methods, and be opened in binary mode.
:param fp: A filename (string), pathlib.Path object or file object.
:param format: Optional format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
:param options: Extra parameters to the image writer.
:returns: None
:exception KeyError: If the output format could not be determined
from the file name. Use the format option to solve this.
:exception IOError: If the file could not be written. The file
may have been created, and may contain partial data.
"""
filename = ""
if isPath(fp):
filename = fp
elif sys.version_info >= (3, 4):
from pathlib import Path
if isinstance(fp, Path):
filename = str(fp.resolve())
elif hasattr(fp, "name") and isPath(fp.name):
filename = fp.name
# may mutate self!
self.load()
save_all = False
if 'save_all' in params:
save_all = params['save_all']
del params['save_all']
self.encoderinfo = params
self.encoderconfig = ()
preinit()
ext = os.path.splitext(filename)[1].lower()
if not format:
if ext not in EXTENSION:
init()
format = EXTENSION[ext]
if format.upper() not in SAVE:
init()
if save_all:
save_handler = SAVE_ALL[format.upper()]
else:
save_handler = SAVE[format.upper()]
if filename:
fp = builtins.open(filename, "wb")
close = 1
else:
close = 0
try:
save_handler(self, fp, filename)
finally:
# do what we can to clean up
if close:
fp.close()
def seek(self, frame):
"""
Seeks to the given frame in this sequence file. If you seek
beyond the end of the sequence, the method raises an
**EOFError** exception. When a sequence file is opened, the
library automatically seeks to frame 0.
Note that in the current version of the library, most sequence
formats only allows you to seek to the next frame.
See :py:meth:`~PIL.Image.Image.tell`.
:param frame: Frame number, starting at 0.
:exception EOFError: If the call attempts to seek beyond the end
of the sequence.
"""
# overridden by file handlers
if frame != 0:
raise EOFError
def show(self, title=None, command=None):
"""
Displays this image. This method is mainly intended for
debugging purposes.
On Unix platforms, this method saves the image to a temporary
PPM file, and calls the **xv** utility.
On Windows, it saves the image to a temporary BMP file, and uses
the standard BMP display utility to show it (usually Paint).
:param title: Optional title to use for the image window,
where possible.
:param command: command used to show the image
"""
_show(self, title=title, command=command)
def split(self):
"""
Split this image into individual bands. This method returns a
tuple of individual image bands from an image. For example,
splitting an "RGB" image creates three new images each
containing a copy of one of the original bands (red, green,
blue).
:returns: A tuple containing bands.
"""
self.load()
if self.im.bands == 1:
ims = [self.copy()]
else:
ims = []
for i in range(self.im.bands):
ims.append(self._new(self.im.getband(i)))
return tuple(ims)
def tell(self):
"""
Returns the current frame number. See :py:meth:`~PIL.Image.Image.seek`.
:returns: Frame number, starting with 0.
"""
return 0
def thumbnail(self, size, resample=BICUBIC):
"""
Make this image into a thumbnail. This method modifies the
image to contain a thumbnail version of itself, no larger than
the given size. This method calculates an appropriate thumbnail
size to preserve the aspect of the image, calls the
:py:meth:`~PIL.Image.Image.draft` method to configure the file reader
(where applicable), and finally resizes the image.
Note that this function modifies the :py:class:`~PIL.Image.Image`
object in place. If you need to use the full resolution image as well,
apply this method to a :py:meth:`~PIL.Image.Image.copy` of the original
image.
:param size: Requested size.
:param resample: Optional resampling filter. This can be one
of :py:attr:`PIL.Image.NEAREST`, :py:attr:`PIL.Image.BILINEAR`,
:py:attr:`PIL.Image.BICUBIC`, or :py:attr:`PIL.Image.LANCZOS`.
If omitted, it defaults to :py:attr:`PIL.Image.BICUBIC`.
(was :py:attr:`PIL.Image.NEAREST` prior to version 2.5.0)
:returns: None
"""
# preserve aspect ratio
x, y = self.size
if x > size[0]:
y = int(max(y * size[0] / x, 1))
x = int(size[0])
if y > size[1]:
x = int(max(x * size[1] / y, 1))
y = int(size[1])
size = x, y
if size == self.size:
return
self.draft(None, size)
im = self.resize(size, resample)
self.im = im.im
self.mode = im.mode
self.size = size
self.readonly = 0
self.pyaccess = None
# FIXME: the different transform methods need further explanation
# instead of bloating the method docs, add a separate chapter.
def transform(self, size, method, data=None, resample=NEAREST, fill=1):
"""
Transforms this image. This method creates a new image with the
given size, and the same mode as the original, and copies data
to the new image using the given transform.
:param size: The output size.
:param method: The transformation method. This is one of
:py:attr:`PIL.Image.EXTENT` (cut out a rectangular subregion),
:py:attr:`PIL.Image.AFFINE` (affine transform),
:py:attr:`PIL.Image.PERSPECTIVE` (perspective transform),
:py:attr:`PIL.Image.QUAD` (map a quadrilateral to a rectangle), or
:py:attr:`PIL.Image.MESH` (map a number of source quadrilaterals
in one operation).
:param data: Extra data to the transformation method.
:param resample: Optional resampling filter. It can be one of
:py:attr:`PIL.Image.NEAREST` (use nearest neighbour),
:py:attr:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), or :py:attr:`PIL.Image.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image
has mode "1" or "P", it is set to :py:attr:`PIL.Image.NEAREST`.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if self.mode == 'RGBA':
return self.convert('RGBa').transform(
size, method, data, resample, fill).convert('RGBA')
if isinstance(method, ImageTransformHandler):
return method.transform(size, self, resample=resample, fill=fill)
if hasattr(method, "getdata"):
# compatibility w. old-style transform objects
method, data = method.getdata()
if data is None:
raise ValueError("missing method data")
im = new(self.mode, size, None)
if method == MESH:
# list of quads
for box, quad in data:
im.__transformer(box, self, QUAD, quad, resample, fill)
else:
im.__transformer((0, 0)+size, self, method, data, resample, fill)
return im
def __transformer(self, box, image, method, data,
resample=NEAREST, fill=1):
# FIXME: this should be turned into a lazy operation (?)
w = box[2]-box[0]
h = box[3]-box[1]
if method == AFFINE:
# change argument order to match implementation
data = (data[2], data[0], data[1],
data[5], data[3], data[4])
elif method == EXTENT:
# convert extent to an affine transform
x0, y0, x1, y1 = data
xs = float(x1 - x0) / w
ys = float(y1 - y0) / h
method = AFFINE
data = (x0 + xs/2, xs, 0, y0 + ys/2, 0, ys)
elif method == PERSPECTIVE:
# change argument order to match implementation
data = (data[2], data[0], data[1],
data[5], data[3], data[4],
data[6], data[7])
elif method == QUAD:
# quadrilateral warp. data specifies the four corners
# given as NW, SW, SE, and NE.
nw = data[0:2]
sw = data[2:4]
se = data[4:6]
ne = data[6:8]
x0, y0 = nw
As = 1.0 / w
At = 1.0 / h
data = (x0, (ne[0]-x0)*As, (sw[0]-x0)*At,
(se[0]-sw[0]-ne[0]+x0)*As*At,
y0, (ne[1]-y0)*As, (sw[1]-y0)*At,
(se[1]-sw[1]-ne[1]+y0)*As*At)
else:
raise ValueError("unknown transformation method")
if resample not in (NEAREST, BILINEAR, BICUBIC):
raise ValueError("unknown resampling filter")
image.load()
self.load()
if image.mode in ("1", "P"):
resample = NEAREST
self.im.transform2(box, image.im, method, data, resample, fill)
def transpose(self, method):
"""
Transpose image (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270` or
:py:attr:`PIL.Image.TRANSPOSE`.
:returns: Returns a flipped or rotated copy of this image.
"""
self.load()
return self._new(self.im.transpose(method))
def effect_spread(self, distance):
"""
Randomly spread pixels in an image.
:param distance: Distance to spread pixels.
"""
self.load()
im = self.im.effect_spread(distance)
return self._new(im)
def toqimage(self):
"""Returns a QImage copy of this image"""
from PIL import ImageQt
if not ImageQt.qt_is_installed:
raise ImportError("Qt bindings are not installed")
return ImageQt.toqimage(self)
def toqpixmap(self):
"""Returns a QPixmap copy of this image"""
from PIL import ImageQt
if not ImageQt.qt_is_installed:
raise ImportError("Qt bindings are not installed")
return ImageQt.toqpixmap(self)
# --------------------------------------------------------------------
# Lazy operations
class _ImageCrop(Image):
def __init__(self, im, box):
Image.__init__(self)
x0, y0, x1, y1 = box
if x1 < x0:
x1 = x0
if y1 < y0:
y1 = y0
self.mode = im.mode
self.size = x1-x0, y1-y0
self.__crop = x0, y0, x1, y1
self.im = im.im
def load(self):
# lazy evaluation!
if self.__crop:
self.im = self.im.crop(self.__crop)
self.__crop = None
if self.im:
return self.im.pixel_access(self.readonly)
# FIXME: future versions should optimize crop/paste
# sequences!
# --------------------------------------------------------------------
# Abstract handlers.
class ImagePointHandler(object):
# used as a mixin by point transforms (for use with im.point)
pass
class ImageTransformHandler(object):
# used as a mixin by geometry transforms (for use with im.transform)
pass
# --------------------------------------------------------------------
# Factories
#
# Debugging
def _wedge():
"Create greyscale wedge (for debugging only)"
return Image()._new(core.wedge("L"))
def new(mode, size, color=0):
"""
Creates a new image with the given mode and size.
:param mode: The mode to use for the new image. See:
:ref:`concept-modes`.
:param size: A 2-tuple, containing (width, height) in pixels.
:param color: What color to use for the image. Default is black.
If given, this should be a single integer or floating point value
for single-band modes, and a tuple for multi-band modes (one value
per band). When creating RGB images, you can also use color
strings as supported by the ImageColor module. If the color is
None, the image is not initialised.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if color is None:
# don't initialize
return Image()._new(core.new(mode, size))
if isStringType(color):
# css3-style specifier
from PIL import ImageColor
color = ImageColor.getcolor(color, mode)
return Image()._new(core.fill(mode, size, color))
def frombytes(mode, size, data, decoder_name="raw", *args):
"""
Creates a copy of an image memory from pixel data in a buffer.
In its simplest form, this function takes three arguments
(mode, size, and unpacked pixel data).
You can also use any pixel decoder supported by PIL. For more
information on available decoders, see the section
:ref:`Writing Your Own File Decoder <file-decoders>`.
Note that this function decodes pixel data only, not entire images.
If you have an entire image in a string, wrap it in a
:py:class:`~io.BytesIO` object, and use :py:func:`~PIL.Image.open` to load
it.
:param mode: The image mode. See: :ref:`concept-modes`.
:param size: The image size.
:param data: A byte buffer containing raw data for the given mode.
:param decoder_name: What decoder to use.
:param args: Additional parameters for the given decoder.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if decoder_name == "raw" and args == ():
args = mode
im = new(mode, size)
im.frombytes(data, decoder_name, args)
return im
def fromstring(*args, **kw):
raise Exception("fromstring() has been removed. " +
"Please call frombytes() instead.")
def frombuffer(mode, size, data, decoder_name="raw", *args):
"""
Creates an image memory referencing pixel data in a byte buffer.
This function is similar to :py:func:`~PIL.Image.frombytes`, but uses data
in the byte buffer, where possible. This means that changes to the
original buffer object are reflected in this image). Not all modes can
share memory; supported modes include "L", "RGBX", "RGBA", and "CMYK".
Note that this function decodes pixel data only, not entire images.
If you have an entire image file in a string, wrap it in a
**BytesIO** object, and use :py:func:`~PIL.Image.open` to load it.
In the current version, the default parameters used for the "raw" decoder
differs from that used for :py:func:`~PIL.Image.fromstring`. This is a
bug, and will probably be fixed in a future release. The current release
issues a warning if you do this; to disable the warning, you should provide
the full set of parameters. See below for details.
:param mode: The image mode. See: :ref:`concept-modes`.
:param size: The image size.
:param data: A bytes or other buffer object containing raw
data for the given mode.
:param decoder_name: What decoder to use.
:param args: Additional parameters for the given decoder. For the
default encoder ("raw"), it's recommended that you provide the
full set of parameters::
frombuffer(mode, size, data, "raw", mode, 0, 1)
:returns: An :py:class:`~PIL.Image.Image` object.
.. versionadded:: 1.1.4
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if decoder_name == "raw":
if args == ():
if warnings:
warnings.warn(
"the frombuffer defaults may change in a future release; "
"for portability, change the call to read:\n"
" frombuffer(mode, size, data, 'raw', mode, 0, 1)",
RuntimeWarning, stacklevel=2
)
args = mode, 0, -1 # may change to (mode, 0, 1) post-1.1.6
if args[0] in _MAPMODES:
im = new(mode, (1, 1))
im = im._new(
core.map_buffer(data, size, decoder_name, None, 0, args)
)
im.readonly = 1
return im
return frombytes(mode, size, data, decoder_name, args)
def fromarray(obj, mode=None):
"""
Creates an image memory from an object exporting the array interface
(using the buffer protocol).
If obj is not contiguous, then the tobytes method is called
and :py:func:`~PIL.Image.frombuffer` is used.
:param obj: Object with array interface
:param mode: Mode to use (will be determined from type if None)
See: :ref:`concept-modes`.
:returns: An image object.
.. versionadded:: 1.1.6
"""
arr = obj.__array_interface__
shape = arr['shape']
ndim = len(shape)
try:
strides = arr['strides']
except KeyError:
strides = None
if mode is None:
try:
typekey = (1, 1) + shape[2:], arr['typestr']
mode, rawmode = _fromarray_typemap[typekey]
except KeyError:
# print typekey
raise TypeError("Cannot handle this data type")
else:
rawmode = mode
if mode in ["1", "L", "I", "P", "F"]:
ndmax = 2
elif mode == "RGB":
ndmax = 3
else:
ndmax = 4
if ndim > ndmax:
raise ValueError("Too many dimensions: %d > %d." % (ndim, ndmax))
size = shape[1], shape[0]
if strides is not None:
if hasattr(obj, 'tobytes'):
obj = obj.tobytes()
else:
obj = obj.tostring()
return frombuffer(mode, size, obj, "raw", rawmode, 0, 1)
def fromqimage(im):
"""Creates an image instance from a QImage image"""
from PIL import ImageQt
if not ImageQt.qt_is_installed:
raise ImportError("Qt bindings are not installed")
return ImageQt.fromqimage(im)
def fromqpixmap(im):
"""Creates an image instance from a QPixmap image"""
from PIL import ImageQt
if not ImageQt.qt_is_installed:
raise ImportError("Qt bindings are not installed")
return ImageQt.fromqpixmap(im)
_fromarray_typemap = {
# (shape, typestr) => mode, rawmode
# first two members of shape are set to one
# ((1, 1), "|b1"): ("1", "1"), # broken
((1, 1), "|u1"): ("L", "L"),
((1, 1), "|i1"): ("I", "I;8"),
((1, 1), "<i2"): ("I", "I;16"),
((1, 1), ">i2"): ("I", "I;16B"),
((1, 1), "<i4"): ("I", "I;32"),
((1, 1), ">i4"): ("I", "I;32B"),
((1, 1), "<f4"): ("F", "F;32F"),
((1, 1), ">f4"): ("F", "F;32BF"),
((1, 1), "<f8"): ("F", "F;64F"),
((1, 1), ">f8"): ("F", "F;64BF"),
((1, 1, 3), "|u1"): ("RGB", "RGB"),
((1, 1, 4), "|u1"): ("RGBA", "RGBA"),
}
# shortcuts
_fromarray_typemap[((1, 1), _ENDIAN + "i4")] = ("I", "I")
_fromarray_typemap[((1, 1), _ENDIAN + "f4")] = ("F", "F")
def _decompression_bomb_check(size):
if MAX_IMAGE_PIXELS is None:
return
pixels = size[0] * size[1]
if pixels > MAX_IMAGE_PIXELS:
warnings.warn(
"Image size (%d pixels) exceeds limit of %d pixels, "
"could be decompression bomb DOS attack." %
(pixels, MAX_IMAGE_PIXELS),
DecompressionBombWarning)
def open(fp, mode="r"):
"""
Opens and identifies the given image file.
This is a lazy operation; this function identifies the file, but
the file remains open and the actual image data is not read from
the file until you try to process the data (or call the
:py:meth:`~PIL.Image.Image.load` method). See
:py:func:`~PIL.Image.new`.
:param fp: A filename (string), pathlib.Path object or a file object.
The file object must implement :py:meth:`~file.read`,
:py:meth:`~file.seek`, and :py:meth:`~file.tell` methods,
and be opened in binary mode.
:param mode: The mode. If given, this argument must be "r".
:returns: An :py:class:`~PIL.Image.Image` object.
:exception IOError: If the file cannot be found, or the image cannot be
opened and identified.
"""
if mode != "r":
raise ValueError("bad mode %r" % mode)
filename = ""
if isPath(fp):
filename = fp
elif sys.version_info >= (3, 4):
from pathlib import Path
if isinstance(fp, Path):
filename = str(fp.resolve())
if filename:
fp = builtins.open(filename, "rb")
try:
fp.seek(0)
except (AttributeError, io.UnsupportedOperation):
fp = io.BytesIO(fp.read())
prefix = fp.read(16)
preinit()
def _open_core(fp, filename, prefix):
for i in ID:
try:
factory, accept = OPEN[i]
if not accept or accept(prefix):
fp.seek(0)
im = factory(fp, filename)
_decompression_bomb_check(im.size)
return im
except (SyntaxError, IndexError, TypeError, struct.error):
# Leave disabled by default, spams the logs with image
# opening failures that are entirely expected.
#logger.debug("", exc_info=True)
continue
return None
im = _open_core(fp, filename, prefix)
if im is None:
if init():
im = _open_core(fp, filename, prefix)
if im:
return im
raise IOError("cannot identify image file %r"
% (filename if filename else fp))
#
# Image processing.
def alpha_composite(im1, im2):
"""
Alpha composite im2 over im1.
:param im1: The first image.
:param im2: The second image. Must have the same mode and size as
the first image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
im1.load()
im2.load()
return im1._new(core.alpha_composite(im1.im, im2.im))
def blend(im1, im2, alpha):
"""
Creates a new image by interpolating between two input images, using
a constant alpha.::
out = image1 * (1.0 - alpha) + image2 * alpha
:param im1: The first image.
:param im2: The second image. Must have the same mode and size as
the first image.
:param alpha: The interpolation alpha factor. If alpha is 0.0, a
copy of the first image is returned. If alpha is 1.0, a copy of
the second image is returned. There are no restrictions on the
alpha value. If necessary, the result is clipped to fit into
the allowed output range.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
im1.load()
im2.load()
return im1._new(core.blend(im1.im, im2.im, alpha))
def composite(image1, image2, mask):
"""
Create composite image by blending images using a transparency mask.
:param image1: The first image.
:param image2: The second image. Must have the same mode and
size as the first image.
:param mask: A mask image. This image can have mode
"1", "L", or "RGBA", and must have the same size as the
other two images.
"""
image = image2.copy()
image.paste(image1, None, mask)
return image
def eval(image, *args):
"""
Applies the function (which should take one argument) to each pixel
in the given image. If the image has more than one band, the same
function is applied to each band. Note that the function is
evaluated once for each possible pixel value, so you cannot use
random components or other generators.
:param image: The input image.
:param function: A function object, taking one integer argument.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
return image.point(args[0])
def merge(mode, bands):
"""
Merge a set of single band images into a new multiband image.
:param mode: The mode to use for the output image. See:
:ref:`concept-modes`.
:param bands: A sequence containing one single-band image for
each band in the output image. All bands must have the
same size.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if getmodebands(mode) != len(bands) or "*" in mode:
raise ValueError("wrong number of bands")
for im in bands[1:]:
if im.mode != getmodetype(mode):
raise ValueError("mode mismatch")
if im.size != bands[0].size:
raise ValueError("size mismatch")
im = core.new(mode, bands[0].size)
for i in range(getmodebands(mode)):
bands[i].load()
im.putband(bands[i].im, i)
return bands[0]._new(im)
# --------------------------------------------------------------------
# Plugin registry
def register_open(id, factory, accept=None):
"""
Register an image file plugin. This function should not be used
in application code.
:param id: An image format identifier.
:param factory: An image file factory method.
:param accept: An optional function that can be used to quickly
reject images having another format.
"""
id = id.upper()
ID.append(id)
OPEN[id] = factory, accept
def register_mime(id, mimetype):
"""
Registers an image MIME type. This function should not be used
in application code.
:param id: An image format identifier.
:param mimetype: The image MIME type for this format.
"""
MIME[id.upper()] = mimetype
def register_save(id, driver):
"""
Registers an image save function. This function should not be
used in application code.
:param id: An image format identifier.
:param driver: A function to save images in this format.
"""
SAVE[id.upper()] = driver
def register_save_all(id, driver):
"""
Registers an image function to save all the frames
of a multiframe format. This function should not be
used in application code.
:param id: An image format identifier.
:param driver: A function to save images in this format.
"""
SAVE_ALL[id.upper()] = driver
def register_extension(id, extension):
"""
Registers an image extension. This function should not be
used in application code.
:param id: An image format identifier.
:param extension: An extension used for this format.
"""
EXTENSION[extension.lower()] = id.upper()
# --------------------------------------------------------------------
# Simple display support. User code may override this.
def _show(image, **options):
# override me, as necessary
_showxv(image, **options)
def _showxv(image, title=None, **options):
from PIL import ImageShow
ImageShow.show(image, title, **options)
# --------------------------------------------------------------------
# Effects
def effect_mandelbrot(size, extent, quality):
"""
Generate a Mandelbrot set covering the given extent.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param extent: The extent to cover, as a 4-tuple:
(x0, y0, x1, y2).
:param quality: Quality.
"""
return Image()._new(core.effect_mandelbrot(size, extent, quality))
def effect_noise(size, sigma):
"""
Generate Gaussian noise centered around 128.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param sigma: Standard deviation of noise.
"""
return Image()._new(core.effect_noise(size, sigma))
# End of file
| {
"content_hash": "dd8676a3521347fb498e2c32378f1d26",
"timestamp": "",
"source": "github",
"line_count": 2465,
"max_line_length": 79,
"avg_line_length": 32.640973630831645,
"alnum_prop": 0.5632985334327616,
"repo_name": "kronicz/ecommerce-2",
"id": "06bf7ce945978f20ff0359876226d070041c92bf",
"size": "81182",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/PIL/Image.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "1350"
},
{
"name": "CSS",
"bytes": "86110"
},
{
"name": "HTML",
"bytes": "143921"
},
{
"name": "JavaScript",
"bytes": "183948"
},
{
"name": "Python",
"bytes": "6475376"
},
{
"name": "Shell",
"bytes": "3773"
}
],
"symlink_target": ""
} |
import time
import abc
import logging
import xml.etree.ElementTree as ET
from collections import OrderedDict
import re
import csv
import tempfile
import luigi
from luigi import Task
logger = logging.getLogger('luigi-interface')
try:
import requests
except ImportError:
logger.warning("This module requires the python package 'requests'.")
try:
from urlparse import urlsplit
except ImportError:
from urllib.parse import urlsplit
def get_soql_fields(soql):
"""
Gets queried columns names.
"""
soql_fields = re.search('(?<=select)(?s)(.*)(?=from)', soql, re.IGNORECASE) # get fields
soql_fields = re.sub(' ', '', soql_fields.group()) # remove extra spaces
soql_fields = re.sub('\t', '', soql_fields) # remove tabs
fields = re.split(',|\n|\r|', soql_fields) # split on commas and newlines
fields = [field for field in fields if field != ''] # remove empty strings
return fields
def ensure_utf(value):
return value.encode("utf-8") if isinstance(value, unicode) else value
def parse_results(fields, data):
"""
Traverses ordered dictionary, calls _traverse_results() to recursively read into the dictionary depth of data
"""
master = []
for record in data['records']: # for each 'record' in response
row = [None] * len(fields) # create null list the length of number of columns
for obj, value in record.iteritems(): # for each obj in record
if not isinstance(value, (dict, list, tuple)): # if not data structure
if obj in fields:
row[fields.index(obj)] = ensure_utf(value)
elif isinstance(value, dict) and obj != 'attributes': # traverse down into object
path = obj
_traverse_results(value, fields, row, path)
master.append(row)
return master
def _traverse_results(value, fields, row, path):
"""
Helper method for parse_results().
Traverses through ordered dict and recursively calls itself when encountering a dictionary
"""
for f, v in value.iteritems(): # for each item in obj
field_name = '{path}.{name}'.format(path=path, name=f) if path else f
if not isinstance(v, (dict, list, tuple)): # if not data structure
if field_name in fields:
row[fields.index(field_name)] = ensure_utf(v)
elif isinstance(v, dict) and f != 'attributes': # it is a dict
_traverse_results(v, fields, row, field_name)
class salesforce(luigi.Config):
"""
Config system to get config vars from 'salesforce' section in configuration file.
Did not include sandbox_name here, as the user may have multiple sandboxes.
"""
username = luigi.Parameter(default='')
password = luigi.Parameter(default='')
security_token = luigi.Parameter(default='')
# sandbox token
sb_security_token = luigi.Parameter(default='')
class QuerySalesforce(Task):
@abc.abstractproperty
def object_name(self):
"""
Override to return the SF object we are querying.
Must have the SF "__c" suffix if it is a customer object.
"""
return None
@property
def use_sandbox(self):
"""
Override to specify use of SF sandbox.
True iff we should be uploading to a sandbox environment instead of the production organization.
"""
return False
@property
def sandbox_name(self):
"""Override to specify the sandbox name if it is intended to be used."""
return None
@abc.abstractproperty
def soql(self):
"""Override to return the raw string SOQL or the path to it."""
return None
@property
def is_soql_file(self):
"""Override to True if soql property is a file path."""
return False
def run(self):
if self.use_sandbox and not self.sandbox_name:
raise Exception("Parameter sf_sandbox_name must be provided when uploading to a Salesforce Sandbox")
sf = SalesforceAPI(salesforce().username,
salesforce().password,
salesforce().security_token,
salesforce().sb_security_token,
self.sandbox_name)
job_id = sf.create_operation_job('query', self.object_name)
logger.info("Started query job %s in salesforce for object %s" % (job_id, self.object_name))
batch_id = ''
msg = ''
try:
if self.is_soql_file:
with open(self.soql, 'r') as infile:
self.soql = infile.read()
batch_id = sf.create_batch(job_id, self.soql)
logger.info("Creating new batch %s to query: %s for job: %s." % (batch_id, self.object_name, job_id))
status = sf.block_on_batch(job_id, batch_id)
if status['state'].lower() == 'failed':
msg = "Batch failed with message: %s" % status['state_message']
logger.error(msg)
# don't raise exception if it's b/c of an included relationship
# normal query will execute (with relationship) after bulk job is closed
if 'foreign key relationships not supported' not in status['state_message'].lower():
raise Exception(msg)
else:
result_id = sf.get_batch_results(job_id, batch_id)
data = sf.get_batch_result(job_id, batch_id, result_id)
with open(self.output().fn, 'w') as outfile:
outfile.write(data)
finally:
logger.info("Closing job %s" % job_id)
sf.close_job(job_id)
if 'state_message' in status and 'foreign key relationships not supported' in status['state_message'].lower():
logger.info("Retrying with REST API query")
data_file = sf.query_all(self.soql)
reader = csv.reader(data_file)
with open(self.output().fn, 'w') as outfile:
writer = csv.writer(outfile, dialect='excel')
for row in reader:
writer.writerow(row)
class SalesforceAPI(object):
"""
Class used to interact with the SalesforceAPI. Currently provides only the
methods necessary for performing a bulk upload operation.
"""
API_VERSION = 34.0
SOAP_NS = "{urn:partner.soap.sforce.com}"
API_NS = "{http://www.force.com/2009/06/asyncapi/dataload}"
def __init__(self, username, password, security_token, sb_token=None, sandbox_name=None):
self.username = username
self.password = password
self.security_token = security_token
self.sb_security_token = sb_token
self.sandbox_name = sandbox_name
if self.sandbox_name:
self.username += ".%s" % self.sandbox_name
self.session_id = None
self.server_url = None
self.hostname = None
def start_session(self):
"""
Starts a Salesforce session and determines which SF instance to use for future requests.
"""
if self.has_active_session():
raise Exception("Session already in progress.")
response = requests.post(self._get_login_url(),
headers=self._get_login_headers(),
data=self._get_login_xml())
response.raise_for_status()
root = ET.fromstring(response.text)
for e in root.iter("%ssessionId" % self.SOAP_NS):
if self.session_id:
raise Exception("Invalid login attempt. Multiple session ids found.")
self.session_id = e.text
for e in root.iter("%sserverUrl" % self.SOAP_NS):
if self.server_url:
raise Exception("Invalid login attempt. Multiple server urls found.")
self.server_url = e.text
if not self.has_active_session():
raise Exception("Invalid login attempt resulted in null sessionId [%s] and/or serverUrl [%s]." %
(self.session_id, self.server_url))
self.hostname = urlsplit(self.server_url).hostname
def has_active_session(self):
return self.session_id and self.server_url
def query(self, query, **kwargs):
"""
Return the result of a Salesforce SOQL query as a dict decoded from the Salesforce response JSON payload.
:param query: the SOQL query to send to Salesforce, e.g. "SELECT id from Lead WHERE email = 'a@b.com'"
"""
params = {'q': query}
response = requests.get(self._get_norm_query_url(),
headers=self._get_rest_headers(),
params=params,
**kwargs)
if response.status_code != requests.codes.ok:
raise Exception(response.content)
return response.json()
def query_more(self, next_records_identifier, identifier_is_url=False, **kwargs):
"""
Retrieves more results from a query that returned more results
than the batch maximum. Returns a dict decoded from the Salesforce
response JSON payload.
:param next_records_identifier: either the Id of the next Salesforce
object in the result, or a URL to the
next record in the result.
:param identifier_is_url: True if `next_records_identifier` should be
treated as a URL, False if
`next_records_identifer` should be treated as
an Id.
"""
if identifier_is_url:
# Don't use `self.base_url` here because the full URI is provided
url = (u'https://{instance}{next_record_url}'
.format(instance=self.hostname,
next_record_url=next_records_identifier))
else:
url = self._get_norm_query_url() + '{next_record_id}'
url = url.format(next_record_id=next_records_identifier)
response = requests.get(url, headers=self._get_rest_headers(), **kwargs)
response.raise_for_status()
return response.json()
def query_all(self, query, **kwargs):
"""
Returns the full set of results for the `query`. This is a
convenience wrapper around `query(...)` and `query_more(...)`.
The returned dict is the decoded JSON payload from the final call to
Salesforce, but with the `totalSize` field representing the full
number of results retrieved and the `records` list representing the
full list of records retrieved.
:param query: the SOQL query to send to Salesforce, e.g.
`SELECT Id FROM Lead WHERE Email = "waldo@somewhere.com"`
"""
# Make the initial query to Salesforce
response = self.query(query, **kwargs)
# get fields
fields = get_soql_fields(query)
# put fields and first page of results into a temp list to be written to TempFile
tmp_list = [fields]
tmp_list.extend(parse_results(fields, response))
tmp_dir = luigi.configuration.get_config().get('salesforce', 'local-tmp-dir', None)
tmp_file = tempfile.TemporaryFile(mode='a+b', dir=tmp_dir)
writer = csv.writer(tmp_file)
writer.writerows(tmp_list)
# The number of results might have exceeded the Salesforce batch limit
# so check whether there are more results and retrieve them if so.
length = len(response['records'])
while not response['done']:
response = self.query_more(response['nextRecordsUrl'], identifier_is_url=True, **kwargs)
writer.writerows(parse_results(fields, response))
length += len(response['records'])
if not length % 10000:
logger.info('Requested {0} lines...'.format(length))
logger.info('Requested a total of {0} lines.'.format(length))
tmp_file.seek(0)
return tmp_file
# Generic Rest Function
def restful(self, path, params):
"""
Allows you to make a direct REST call if you know the path
Arguments:
:param path: The path of the request. Example: sobjects/User/ABC123/password'
:param params: dict of parameters to pass to the path
"""
url = self._get_norm_base_url() + path
response = requests.get(url, headers=self._get_rest_headers(), params=params)
if response.status_code != 200:
raise Exception(response)
json_result = response.json(object_pairs_hook=OrderedDict)
if len(json_result) == 0:
return None
else:
return json_result
def create_operation_job(self, operation, obj, external_id_field_name=None, content_type='CSV'):
"""
Creates a new SF job that for doing any operation (insert, upsert, update, delete, query)
:param operation: delete, insert, query, upsert, update, hardDelete. Must be lowercase.
:param obj: Parent SF object
:param external_id_field_name: Optional.
:param content_type: XML, CSV, ZIP_CSV, or ZIP_XML. Defaults to CSV
"""
if not self.has_active_session():
self.start_session()
response = requests.post(self._get_create_job_url(),
headers=self._get_create_job_headers(),
data=self._get_create_job_xml(operation, obj, external_id_field_name, content_type))
response.raise_for_status()
root = ET.fromstring(response.text)
job_id = root.find('%sid' % self.API_NS).text
return job_id
def get_job_details(self, job_id):
"""
Gets all details for existing job
:param job_id: job_id as returned by 'create_operation_job(...)'
:return: job info as xml
"""
response = requests.get(self._get_job_details_url(job_id))
response.raise_for_status()
return response
def abort_job(self, job_id):
"""
Abort an existing job. When a job is aborted, no more records are processed.
Changes to data may already have been committed and aren't rolled back.
:param job_id: job_id as returned by 'create_operation_job(...)'
:return: abort response as xml
"""
response = requests.post(self._get_abort_job_url(job_id),
headers=self._get_abort_job_headers(),
data=self._get_abort_job_xml())
response.raise_for_status()
return response
def close_job(self, job_id):
"""
Closes job
:param job_id: job_id as returned by 'create_operation_job(...)'
:return: close response as xml
"""
if not job_id or not self.has_active_session():
raise Exception("Can not close job without valid job_id and an active session.")
response = requests.post(self._get_close_job_url(job_id),
headers=self._get_close_job_headers(),
data=self._get_close_job_xml())
response.raise_for_status()
return response
def create_batch(self, job_id, data, file_type='csv'):
"""
Creates a batch with either a string of data or a file containing data.
If a file is provided, this will pull the contents of the file_target into memory when running.
That shouldn't be a problem for any files that meet the Salesforce single batch upload
size limit (10MB) and is done to ensure compressed files can be uploaded properly.
:param job_id: job_id as returned by 'create_operation_job(...)'
:param data:
:param file_type:
:return: Returns batch_id
"""
if not job_id or not self.has_active_session():
raise Exception("Can not create a batch without a valid job_id and an active session.")
headers = self._get_create_batch_content_headers(file_type)
headers['Content-Length'] = len(data)
response = requests.post(self._get_create_batch_url(job_id),
headers=headers,
data=data)
response.raise_for_status()
root = ET.fromstring(response.text)
batch_id = root.find('%sid' % self.API_NS).text
return batch_id
def block_on_batch(self, job_id, batch_id, sleep_time_seconds=5, max_wait_time_seconds=-1):
"""
Blocks until @batch_id is completed or failed.
:param job_id:
:param batch_id:
:param sleep_time_seconds:
:param max_wait_time_seconds:
"""
if not job_id or not batch_id or not self.has_active_session():
raise Exception("Can not block on a batch without a valid batch_id, job_id and an active session.")
start_time = time.time()
status = {}
while max_wait_time_seconds < 0 or time.time() - start_time < max_wait_time_seconds:
status = self._get_batch_info(job_id, batch_id)
logger.info("Batch %s Job %s in state %s. %s records processed. %s records failed." %
(batch_id, job_id, status['state'], status['num_processed'], status['num_failed']))
if status['state'].lower() in ["completed", "failed"]:
return status
time.sleep(sleep_time_seconds)
raise Exception("Batch did not complete in %s seconds. Final status was: %s" % (sleep_time_seconds, status))
def get_batch_results(self, job_id, batch_id):
"""
Get results of a batch that has completed processing.
If the batch is a CSV file, the response is in CSV format.
If the batch is an XML file, the response is in XML format.
:param job_id: job_id as returned by 'create_operation_job(...)'
:param batch_id: batch_id as returned by 'create_batch(...)'
:return: batch result response as either CSV or XML, dependent on the batch
"""
response = requests.get(self._get_batch_results_url(job_id, batch_id),
headers=self._get_batch_info_headers())
response.raise_for_status()
root = ET.fromstring(response.text)
result = root.find('%sresult' % self.API_NS).text
return result
def get_batch_result(self, job_id, batch_id, result_id):
"""
Gets result back from Salesforce as whatever type was originally sent in create_batch (xml, or csv).
:param job_id:
:param batch_id:
:param result_id:
"""
response = requests.get(self._get_batch_result_url(job_id, batch_id, result_id),
headers=self._get_session_headers())
response.raise_for_status()
return response.content
def _get_batch_info(self, job_id, batch_id):
response = requests.get(self._get_batch_info_url(job_id, batch_id),
headers=self._get_batch_info_headers())
response.raise_for_status()
root = ET.fromstring(response.text)
result = {
"state": root.find('%sstate' % self.API_NS).text,
"num_processed": root.find('%snumberRecordsProcessed' % self.API_NS).text,
"num_failed": root.find('%snumberRecordsFailed' % self.API_NS).text,
}
if root.find('%sstateMessage' % self.API_NS) is not None:
result['state_message'] = root.find('%sstateMessage' % self.API_NS).text
return result
def _get_login_url(self):
server = "login" if not self.sandbox_name else "test"
return "https://%s.salesforce.com/services/Soap/u/%s" % (server, self.API_VERSION)
def _get_base_url(self):
return "https://%s/services" % self.hostname
def _get_bulk_base_url(self):
# Expands on Base Url for Bulk
return "%s/async/%s" % (self._get_base_url(), self.API_VERSION)
def _get_norm_base_url(self):
# Expands on Base Url for Norm
return "%s/data/v%s" % (self._get_base_url(), self.API_VERSION)
def _get_norm_query_url(self):
# Expands on Norm Base Url
return "%s/query" % self._get_norm_base_url()
def _get_create_job_url(self):
# Expands on Bulk url
return "%s/job" % (self._get_bulk_base_url())
def _get_job_id_url(self, job_id):
# Expands on Job Creation url
return "%s/%s" % (self._get_create_job_url(), job_id)
def _get_job_details_url(self, job_id):
# Expands on basic Job Id url
return self._get_job_id_url(job_id)
def _get_abort_job_url(self, job_id):
# Expands on basic Job Id url
return self._get_job_id_url(job_id)
def _get_close_job_url(self, job_id):
# Expands on basic Job Id url
return self._get_job_id_url(job_id)
def _get_create_batch_url(self, job_id):
# Expands on basic Job Id url
return "%s/batch" % (self._get_job_id_url(job_id))
def _get_batch_info_url(self, job_id, batch_id):
# Expands on Batch Creation url
return "%s/%s" % (self._get_create_batch_url(job_id), batch_id)
def _get_batch_results_url(self, job_id, batch_id):
# Expands on Batch Info url
return "%s/result" % (self._get_batch_info_url(job_id, batch_id))
def _get_batch_result_url(self, job_id, batch_id, result_id):
# Expands on Batch Results url
return "%s/%s" % (self._get_batch_results_url(job_id, batch_id), result_id)
def _get_login_headers(self):
headers = {
'Content-Type': "text/xml; charset=UTF-8",
'SOAPAction': 'login'
}
return headers
def _get_session_headers(self):
headers = {
'X-SFDC-Session': self.session_id
}
return headers
def _get_norm_session_headers(self):
headers = {
'Authorization': 'Bearer %s' % self.session_id
}
return headers
def _get_rest_headers(self):
headers = self._get_norm_session_headers()
headers['Content-Type'] = 'application/json'
return headers
def _get_job_headers(self):
headers = self._get_session_headers()
headers['Content-Type'] = "application/xml; charset=UTF-8"
return headers
def _get_create_job_headers(self):
return self._get_job_headers()
def _get_abort_job_headers(self):
return self._get_job_headers()
def _get_close_job_headers(self):
return self._get_job_headers()
def _get_create_batch_content_headers(self, content_type):
headers = self._get_session_headers()
content_type = 'text/csv' if content_type.lower() == 'csv' else 'application/xml'
headers['Content-Type'] = "%s; charset=UTF-8" % content_type
return headers
def _get_batch_info_headers(self):
return self._get_session_headers()
def _get_login_xml(self):
return """<?xml version="1.0" encoding="utf-8" ?>
<env:Envelope xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:env="http://schemas.xmlsoap.org/soap/envelope/">
<env:Body>
<n1:login xmlns:n1="urn:partner.soap.sforce.com">
<n1:username>%s</n1:username>
<n1:password>%s%s</n1:password>
</n1:login>
</env:Body>
</env:Envelope>
""" % (self.username, self.password, self.security_token if self.sandbox_name is None else self.sb_security_token)
def _get_create_job_xml(self, operation, obj, external_id_field_name, content_type):
external_id_field_name_element = "" if not external_id_field_name else \
"\n<externalIdFieldName>%s</externalIdFieldName>" % external_id_field_name
# Note: "Unable to parse job" error may be caused by reordering fields.
# ExternalIdFieldName element must be before contentType element.
return """<?xml version="1.0" encoding="UTF-8"?>
<jobInfo xmlns="http://www.force.com/2009/06/asyncapi/dataload">
<operation>%s</operation>
<object>%s</object>
%s
<contentType>%s</contentType>
</jobInfo>
""" % (operation, obj, external_id_field_name_element, content_type)
def _get_abort_job_xml(self):
return """<?xml version="1.0" encoding="UTF-8"?>
<jobInfo xmlns="http://www.force.com/2009/06/asyncapi/dataload">
<state>Aborted</state>
</jobInfo>
"""
def _get_close_job_xml(self):
return """<?xml version="1.0" encoding="UTF-8"?>
<jobInfo xmlns="http://www.force.com/2009/06/asyncapi/dataload">
<state>Closed</state>
</jobInfo>
"""
| {
"content_hash": "5487cbbd36fb1f7a48302d714b21785c",
"timestamp": "",
"source": "github",
"line_count": 650,
"max_line_length": 122,
"avg_line_length": 38.793846153846154,
"alnum_prop": 0.5866116751269036,
"repo_name": "Wattpad/luigi",
"id": "dadbd86a65b7a5462fc0a2a906abdf574980e12e",
"size": "25819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "luigi/contrib/salesforce.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "8274"
},
{
"name": "JavaScript",
"bytes": "35864"
},
{
"name": "Python",
"bytes": "602046"
}
],
"symlink_target": ""
} |
import urllib.request
import os
import tarfile
import tempfile
import pypandoc
import pymongo
# setup database
mongodb_url = os.environ.get('MONGODB_URL', 'mongodb://db:27017/db')
mongo_client = pymongo.MongoClient(mongodb_url)
db = mongo_client.get_default_database()
def fetch_and_convert_tex(id):
try:
with tempfile.TemporaryDirectory() as workdir:
print('PATH', os.environ['PATH'])
# download an archive from arXiv
archive_path = os.path.join(workdir, 'archive.tar.gz')
urllib.request.urlretrieve(
"https://arxiv.org/e-print/{}".format(id), archive_path)
# extract the archive
tar = tarfile.open(archive_path)
tar.extractall(workdir)
tar.close()
# DEBUG
print('List of contents', os.listdir(workdir))
# search for a TeX source
tex_files = [x for x in os.listdir(workdir) if x.endswith('.tex')]
print('TeX', tex_files)
def hasDC(texpath):
with open(os.path.join(workdir, texpath), 'r') as f:
if "documentclass" in f.read():
return True
else:
return False
tex_files = list(filter(hasDC, tex_files))
if len(tex_files) == 0:
return False
tex_filepath = os.path.join(workdir, tex_files[0])
print(tex_filepath)
# convert a TeX source to HTML
pandoc_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'pandoc')
os.chdir(workdir)
extra_args = ['--self-contained', '--data-dir', pandoc_dir]
print(pandoc_dir)
print(os.listdir(pandoc_dir))
output = pypandoc.convert_file(
tex_filepath, 'html5', extra_args=extra_args)
paper_id = db.papers.insert_one({
"arxiv_id": id,
"content": output
}).inserted_id
return paper_id
except Exception:
pass
| {
"content_hash": "8ec51bf38838628aeacb85f5804fcb64",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 78,
"avg_line_length": 30.34285714285714,
"alnum_prop": 0.5362523540489642,
"repo_name": "uetchy/readable-paper",
"id": "fcd1ada8ac4d39f1b7461d447549af1fdb058b14",
"size": "2124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "worker/arxiv_worker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Dockerfile",
"bytes": "1405"
},
{
"name": "Python",
"bytes": "4939"
},
{
"name": "Smarty",
"bytes": "1185"
}
],
"symlink_target": ""
} |
"""Handle file URL"""
import mimetypes
import os
try:
url2pathname = __import__('urllib.request').url2pathname
except ImportError:
url2pathname = __import__('urllib').url2pathname
import multipla
content_encodings = multipla.power_up('scriba.content_encodings')
content_types = multipla.power_up('scriba.content_types')
class FileResource(object):
def __init__(self, url, **params):
self.url = url
self.params = params
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def read(self):
with open(url2pathname(self.url.path), 'rb') as file_:
return self._read(file_.read())
def _read(self, content):
params = self.params.copy()
content_type, encoding = mimetypes.guess_type(self.url.path)
content_type = params.pop('content_type', content_type)
encoding = params.pop('content_encoding', encoding)
content = content_encodings.get(encoding).decode(content)
return content_types.get(content_type).parse(content, **params)
def write(self, content):
path = url2pathname(self.url.path)
with open(path, 'wb') as file_:
file_.write(self._write(path, content))
def _write(self, content):
params = self.params.copy()
content_type, encoding = mimetypes.guess_type(self.url.path)
content_type = params.pop('content_type', content_type)
encoding = params.pop('content_encoding', encoding)
content = content_types.get(content_type).parse(content, **params)
content = content_encodings.get(encoding).decode(content)
return content
def erase(self):
os.remove(url2pathname(self.url.path))
def read(url, **args):
"""Get the file/direcotry from a file URL."""
with FileResource(url, **args) as resource:
return resource.read()
def write(url, content, **args):
"""Put the object/collection into a file URL."""
with FileResource(url, **args) as resource:
resource.write(content)
def erase(url, **args):
"""Remove the sample from a file URL."""
FileResource(url, **args).erase()
| {
"content_hash": "c6fbd3535789138feb51d47b2ba86568",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 74,
"avg_line_length": 30.291666666666668,
"alnum_prop": 0.6405318661164603,
"repo_name": "monkeython/scriba",
"id": "14174037ec74dbe2e050250f799bbe177b380c0c",
"size": "2181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scriba/schemes/file.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "38661"
}
],
"symlink_target": ""
} |
"""Provide common Z-Wave JS fixtures."""
import asyncio
import copy
import io
import json
from unittest.mock import AsyncMock, patch
import pytest
from zwave_js_server.event import Event
from zwave_js_server.model.driver import Driver
from zwave_js_server.model.node import Node
from zwave_js_server.version import VersionInfo
from tests.common import MockConfigEntry, load_fixture
# Add-on fixtures
@pytest.fixture(name="addon_info_side_effect")
def addon_info_side_effect_fixture():
"""Return the add-on info side effect."""
return None
@pytest.fixture(name="addon_info")
def mock_addon_info(addon_info_side_effect):
"""Mock Supervisor add-on info."""
with patch(
"homeassistant.components.zwave_js.addon.async_get_addon_info",
side_effect=addon_info_side_effect,
) as addon_info:
addon_info.return_value = {
"options": {},
"state": None,
"update_available": False,
"version": None,
}
yield addon_info
@pytest.fixture(name="addon_running")
def mock_addon_running(addon_info):
"""Mock add-on already running."""
addon_info.return_value["state"] = "started"
return addon_info
@pytest.fixture(name="addon_installed")
def mock_addon_installed(addon_info):
"""Mock add-on already installed but not running."""
addon_info.return_value["state"] = "stopped"
addon_info.return_value["version"] = "1.0"
return addon_info
@pytest.fixture(name="addon_options")
def mock_addon_options(addon_info):
"""Mock add-on options."""
return addon_info.return_value["options"]
@pytest.fixture(name="set_addon_options_side_effect")
def set_addon_options_side_effect_fixture():
"""Return the set add-on options side effect."""
return None
@pytest.fixture(name="set_addon_options")
def mock_set_addon_options(set_addon_options_side_effect):
"""Mock set add-on options."""
with patch(
"homeassistant.components.zwave_js.addon.async_set_addon_options",
side_effect=set_addon_options_side_effect,
) as set_options:
yield set_options
@pytest.fixture(name="install_addon")
def mock_install_addon():
"""Mock install add-on."""
with patch(
"homeassistant.components.zwave_js.addon.async_install_addon"
) as install_addon:
yield install_addon
@pytest.fixture(name="update_addon")
def mock_update_addon():
"""Mock update add-on."""
with patch(
"homeassistant.components.zwave_js.addon.async_update_addon"
) as update_addon:
yield update_addon
@pytest.fixture(name="start_addon_side_effect")
def start_addon_side_effect_fixture():
"""Return the set add-on options side effect."""
return None
@pytest.fixture(name="start_addon")
def mock_start_addon(start_addon_side_effect):
"""Mock start add-on."""
with patch(
"homeassistant.components.zwave_js.addon.async_start_addon",
side_effect=start_addon_side_effect,
) as start_addon:
yield start_addon
@pytest.fixture(name="stop_addon")
def stop_addon_fixture():
"""Mock stop add-on."""
with patch(
"homeassistant.components.zwave_js.addon.async_stop_addon"
) as stop_addon:
yield stop_addon
@pytest.fixture(name="uninstall_addon")
def uninstall_addon_fixture():
"""Mock uninstall add-on."""
with patch(
"homeassistant.components.zwave_js.addon.async_uninstall_addon"
) as uninstall_addon:
yield uninstall_addon
@pytest.fixture(name="create_shapshot")
def create_snapshot_fixture():
"""Mock create snapshot."""
with patch(
"homeassistant.components.zwave_js.addon.async_create_snapshot"
) as create_shapshot:
yield create_shapshot
@pytest.fixture(name="controller_state", scope="session")
def controller_state_fixture():
"""Load the controller state fixture data."""
return json.loads(load_fixture("zwave_js/controller_state.json"))
@pytest.fixture(name="version_state", scope="session")
def version_state_fixture():
"""Load the version state fixture data."""
return {
"type": "version",
"driverVersion": "6.0.0-beta.0",
"serverVersion": "1.0.0",
"homeId": 1234567890,
}
@pytest.fixture(name="log_config_state")
def log_config_state_fixture():
"""Return log config state fixture data."""
return {
"enabled": True,
"level": "info",
"logToFile": False,
"filename": "",
"forceConsole": False,
}
@pytest.fixture(name="multisensor_6_state", scope="session")
def multisensor_6_state_fixture():
"""Load the multisensor 6 node state fixture data."""
return json.loads(load_fixture("zwave_js/multisensor_6_state.json"))
@pytest.fixture(name="ecolink_door_sensor_state", scope="session")
def ecolink_door_sensor_state_fixture():
"""Load the Ecolink Door/Window Sensor node state fixture data."""
return json.loads(load_fixture("zwave_js/ecolink_door_sensor_state.json"))
@pytest.fixture(name="hank_binary_switch_state", scope="session")
def binary_switch_state_fixture():
"""Load the hank binary switch node state fixture data."""
return json.loads(load_fixture("zwave_js/hank_binary_switch_state.json"))
@pytest.fixture(name="bulb_6_multi_color_state", scope="session")
def bulb_6_multi_color_state_fixture():
"""Load the bulb 6 multi-color node state fixture data."""
return json.loads(load_fixture("zwave_js/bulb_6_multi_color_state.json"))
@pytest.fixture(name="light_color_null_values_state", scope="session")
def light_color_null_values_state_fixture():
"""Load the light color null values node state fixture data."""
return json.loads(load_fixture("zwave_js/light_color_null_values_state.json"))
@pytest.fixture(name="eaton_rf9640_dimmer_state", scope="session")
def eaton_rf9640_dimmer_state_fixture():
"""Load the eaton rf9640 dimmer node state fixture data."""
return json.loads(load_fixture("zwave_js/eaton_rf9640_dimmer_state.json"))
@pytest.fixture(name="lock_schlage_be469_state", scope="session")
def lock_schlage_be469_state_fixture():
"""Load the schlage lock node state fixture data."""
return json.loads(load_fixture("zwave_js/lock_schlage_be469_state.json"))
@pytest.fixture(name="lock_august_asl03_state", scope="session")
def lock_august_asl03_state_fixture():
"""Load the August Pro lock node state fixture data."""
return json.loads(load_fixture("zwave_js/lock_august_asl03_state.json"))
@pytest.fixture(name="climate_radio_thermostat_ct100_plus_state", scope="session")
def climate_radio_thermostat_ct100_plus_state_fixture():
"""Load the climate radio thermostat ct100 plus node state fixture data."""
return json.loads(
load_fixture("zwave_js/climate_radio_thermostat_ct100_plus_state.json")
)
@pytest.fixture(
name="climate_radio_thermostat_ct100_plus_different_endpoints_state",
scope="session",
)
def climate_radio_thermostat_ct100_plus_different_endpoints_state_fixture():
"""Load the thermostat fixture state with values on different endpoints.
This device is a radio thermostat ct100.
"""
return json.loads(
load_fixture(
"zwave_js/climate_radio_thermostat_ct100_plus_different_endpoints_state.json"
)
)
@pytest.fixture(name="climate_danfoss_lc_13_state", scope="session")
def climate_danfoss_lc_13_state_fixture():
"""Load the climate Danfoss (LC-13) electronic radiator thermostat node state fixture data."""
return json.loads(load_fixture("zwave_js/climate_danfoss_lc_13_state.json"))
@pytest.fixture(name="climate_eurotronic_spirit_z_state", scope="session")
def climate_eurotronic_spirit_z_state_fixture():
"""Load the climate Eurotronic Spirit Z thermostat node state fixture data."""
return json.loads(load_fixture("zwave_js/climate_eurotronic_spirit_z_state.json"))
@pytest.fixture(name="climate_heatit_z_trm3_state", scope="session")
def climate_heatit_z_trm3_state_fixture():
"""Load the climate HEATIT Z-TRM3 thermostat node state fixture data."""
return json.loads(load_fixture("zwave_js/climate_heatit_z_trm3_state.json"))
@pytest.fixture(name="climate_heatit_z_trm2fx_state", scope="session")
def climate_heatit_z_trm2fx_state_fixture():
"""Load the climate HEATIT Z-TRM2fx thermostat node state fixture data."""
return json.loads(load_fixture("zwave_js/climate_heatit_z_trm2fx_state.json"))
@pytest.fixture(name="nortek_thermostat_state", scope="session")
def nortek_thermostat_state_fixture():
"""Load the nortek thermostat node state fixture data."""
return json.loads(load_fixture("zwave_js/nortek_thermostat_state.json"))
@pytest.fixture(name="srt321_hrt4_zw_state", scope="session")
def srt321_hrt4_zw_state_fixture():
"""Load the climate HRT4-ZW / SRT321 / SRT322 thermostat node state fixture data."""
return json.loads(load_fixture("zwave_js/srt321_hrt4_zw_state.json"))
@pytest.fixture(name="chain_actuator_zws12_state", scope="session")
def window_cover_state_fixture():
"""Load the window cover node state fixture data."""
return json.loads(load_fixture("zwave_js/chain_actuator_zws12_state.json"))
@pytest.fixture(name="in_wall_smart_fan_control_state", scope="session")
def in_wall_smart_fan_control_state_fixture():
"""Load the fan node state fixture data."""
return json.loads(load_fixture("zwave_js/in_wall_smart_fan_control_state.json"))
@pytest.fixture(name="gdc_zw062_state", scope="session")
def motorized_barrier_cover_state_fixture():
"""Load the motorized barrier cover node state fixture data."""
return json.loads(load_fixture("zwave_js/cover_zw062_state.json"))
@pytest.fixture(name="iblinds_v2_state", scope="session")
def iblinds_v2_state_fixture():
"""Load the iBlinds v2 node state fixture data."""
return json.loads(load_fixture("zwave_js/cover_iblinds_v2_state.json"))
@pytest.fixture(name="qubino_shutter_state", scope="session")
def qubino_shutter_state_fixture():
"""Load the Qubino Shutter node state fixture data."""
return json.loads(load_fixture("zwave_js/cover_qubino_shutter_state.json"))
@pytest.fixture(name="aeon_smart_switch_6_state", scope="session")
def aeon_smart_switch_6_state_fixture():
"""Load the AEON Labs (ZW096) Smart Switch 6 node state fixture data."""
return json.loads(load_fixture("zwave_js/aeon_smart_switch_6_state.json"))
@pytest.fixture(name="ge_12730_state", scope="session")
def ge_12730_state_fixture():
"""Load the GE 12730 node state fixture data."""
return json.loads(load_fixture("zwave_js/fan_ge_12730_state.json"))
@pytest.fixture(name="aeotec_radiator_thermostat_state", scope="session")
def aeotec_radiator_thermostat_state_fixture():
"""Load the Aeotec Radiator Thermostat node state fixture data."""
return json.loads(load_fixture("zwave_js/aeotec_radiator_thermostat_state.json"))
@pytest.fixture(name="inovelli_lzw36_state", scope="session")
def inovelli_lzw36_state_fixture():
"""Load the Inovelli LZW36 node state fixture data."""
return json.loads(load_fixture("zwave_js/inovelli_lzw36_state.json"))
@pytest.fixture(name="null_name_check_state", scope="session")
def null_name_check_state_fixture():
"""Load the null name check node state fixture data."""
return json.loads(load_fixture("zwave_js/null_name_check_state.json"))
@pytest.fixture(name="lock_id_lock_as_id150_state", scope="session")
def lock_id_lock_as_id150_state_fixture():
"""Load the id lock id-150 lock node state fixture data."""
return json.loads(load_fixture("zwave_js/lock_id_lock_as_id150_state.json"))
@pytest.fixture(
name="climate_radio_thermostat_ct101_multiple_temp_units_state", scope="session"
)
def climate_radio_thermostat_ct101_multiple_temp_units_state_fixture():
"""Load the climate multiple temp units node state fixture data."""
return json.loads(
load_fixture(
"zwave_js/climate_radio_thermostat_ct101_multiple_temp_units_state.json"
)
)
@pytest.fixture(
name="climate_radio_thermostat_ct100_mode_and_setpoint_on_different_endpoints_state",
scope="session",
)
def climate_radio_thermostat_ct100_mode_and_setpoint_on_different_endpoints_state_fixture():
"""Load the climate device with mode and setpoint on different endpoints node state fixture data."""
return json.loads(
load_fixture(
"zwave_js/climate_radio_thermostat_ct100_mode_and_setpoint_on_different_endpoints_state.json"
)
)
@pytest.fixture(name="vision_security_zl7432_state", scope="session")
def vision_security_zl7432_state_fixture():
"""Load the vision security zl7432 switch node state fixture data."""
return json.loads(load_fixture("zwave_js/vision_security_zl7432_state.json"))
@pytest.fixture(name="zen_31_state", scope="session")
def zem_31_state_fixture():
"""Load the zen_31 node state fixture data."""
return json.loads(load_fixture("zwave_js/zen_31_state.json"))
@pytest.fixture(name="wallmote_central_scene_state", scope="session")
def wallmote_central_scene_state_fixture():
"""Load the wallmote central scene node state fixture data."""
return json.loads(load_fixture("zwave_js/wallmote_central_scene_state.json"))
@pytest.fixture(name="client")
def mock_client_fixture(controller_state, version_state, log_config_state):
"""Mock a client."""
with patch(
"homeassistant.components.zwave_js.ZwaveClient", autospec=True
) as client_class:
client = client_class.return_value
async def connect():
await asyncio.sleep(0)
client.connected = True
async def listen(driver_ready: asyncio.Event) -> None:
driver_ready.set()
await asyncio.sleep(30)
assert False, "Listen wasn't canceled!"
async def disconnect():
client.connected = False
client.connect = AsyncMock(side_effect=connect)
client.listen = AsyncMock(side_effect=listen)
client.disconnect = AsyncMock(side_effect=disconnect)
client.driver = Driver(client, controller_state, log_config_state)
client.version = VersionInfo.from_message(version_state)
client.ws_server_url = "ws://test:3000/zjs"
yield client
@pytest.fixture(name="multisensor_6")
def multisensor_6_fixture(client, multisensor_6_state):
"""Mock a multisensor 6 node."""
node = Node(client, copy.deepcopy(multisensor_6_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="ecolink_door_sensor")
def legacy_binary_sensor_fixture(client, ecolink_door_sensor_state):
"""Mock a legacy_binary_sensor node."""
node = Node(client, copy.deepcopy(ecolink_door_sensor_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="hank_binary_switch")
def hank_binary_switch_fixture(client, hank_binary_switch_state):
"""Mock a binary switch node."""
node = Node(client, copy.deepcopy(hank_binary_switch_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="bulb_6_multi_color")
def bulb_6_multi_color_fixture(client, bulb_6_multi_color_state):
"""Mock a bulb 6 multi-color node."""
node = Node(client, copy.deepcopy(bulb_6_multi_color_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="light_color_null_values")
def light_color_null_values_fixture(client, light_color_null_values_state):
"""Mock a node with current color value item being null."""
node = Node(client, copy.deepcopy(light_color_null_values_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="eaton_rf9640_dimmer")
def eaton_rf9640_dimmer_fixture(client, eaton_rf9640_dimmer_state):
"""Mock a Eaton RF9640 (V4 compatible) dimmer node."""
node = Node(client, copy.deepcopy(eaton_rf9640_dimmer_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="lock_schlage_be469")
def lock_schlage_be469_fixture(client, lock_schlage_be469_state):
"""Mock a schlage lock node."""
node = Node(client, copy.deepcopy(lock_schlage_be469_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="lock_august_pro")
def lock_august_asl03_fixture(client, lock_august_asl03_state):
"""Mock a August Pro lock node."""
node = Node(client, copy.deepcopy(lock_august_asl03_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="climate_radio_thermostat_ct100_plus")
def climate_radio_thermostat_ct100_plus_fixture(
client, climate_radio_thermostat_ct100_plus_state
):
"""Mock a climate radio thermostat ct100 plus node."""
node = Node(client, copy.deepcopy(climate_radio_thermostat_ct100_plus_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="climate_radio_thermostat_ct100_plus_different_endpoints")
def climate_radio_thermostat_ct100_plus_different_endpoints_fixture(
client, climate_radio_thermostat_ct100_plus_different_endpoints_state
):
"""Mock a climate radio thermostat ct100 plus node with values on different endpoints."""
node = Node(
client,
copy.deepcopy(climate_radio_thermostat_ct100_plus_different_endpoints_state),
)
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="climate_danfoss_lc_13")
def climate_danfoss_lc_13_fixture(client, climate_danfoss_lc_13_state):
"""Mock a climate radio danfoss LC-13 node."""
node = Node(client, copy.deepcopy(climate_danfoss_lc_13_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="climate_eurotronic_spirit_z")
def climate_eurotronic_spirit_z_fixture(client, climate_eurotronic_spirit_z_state):
"""Mock a climate radio danfoss LC-13 node."""
node = Node(client, climate_eurotronic_spirit_z_state)
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="climate_heatit_z_trm3")
def climate_heatit_z_trm3_fixture(client, climate_heatit_z_trm3_state):
"""Mock a climate radio HEATIT Z-TRM3 node."""
node = Node(client, copy.deepcopy(climate_heatit_z_trm3_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="climate_heatit_z_trm2fx")
def climate_heatit_z_trm2fx_fixture(client, climate_heatit_z_trm2fx_state):
"""Mock a climate radio HEATIT Z-TRM2fx node."""
node = Node(client, copy.deepcopy(climate_heatit_z_trm2fx_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="nortek_thermostat")
def nortek_thermostat_fixture(client, nortek_thermostat_state):
"""Mock a nortek thermostat node."""
node = Node(client, copy.deepcopy(nortek_thermostat_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="srt321_hrt4_zw")
def srt321_hrt4_zw_fixture(client, srt321_hrt4_zw_state):
"""Mock a HRT4-ZW / SRT321 / SRT322 thermostat node."""
node = Node(client, copy.deepcopy(srt321_hrt4_zw_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="aeotec_radiator_thermostat")
def aeotec_radiator_thermostat_fixture(client, aeotec_radiator_thermostat_state):
"""Mock a Aeotec thermostat node."""
node = Node(client, aeotec_radiator_thermostat_state)
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="nortek_thermostat_added_event")
def nortek_thermostat_added_event_fixture(client):
"""Mock a Nortek thermostat node added event."""
event_data = json.loads(load_fixture("zwave_js/nortek_thermostat_added_event.json"))
event = Event("node added", event_data)
return event
@pytest.fixture(name="nortek_thermostat_removed_event")
def nortek_thermostat_removed_event_fixture(client):
"""Mock a Nortek thermostat node removed event."""
event_data = json.loads(
load_fixture("zwave_js/nortek_thermostat_removed_event.json")
)
event = Event("node removed", event_data)
return event
@pytest.fixture(name="integration")
async def integration_fixture(hass, client):
"""Set up the zwave_js integration."""
entry = MockConfigEntry(domain="zwave_js", data={"url": "ws://test.org"})
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
@pytest.fixture(name="chain_actuator_zws12")
def window_cover_fixture(client, chain_actuator_zws12_state):
"""Mock a window cover node."""
node = Node(client, copy.deepcopy(chain_actuator_zws12_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="in_wall_smart_fan_control")
def in_wall_smart_fan_control_fixture(client, in_wall_smart_fan_control_state):
"""Mock a fan node."""
node = Node(client, copy.deepcopy(in_wall_smart_fan_control_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="null_name_check")
def null_name_check_fixture(client, null_name_check_state):
"""Mock a node with no name."""
node = Node(client, copy.deepcopy(null_name_check_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="multiple_devices")
def multiple_devices_fixture(
client, climate_radio_thermostat_ct100_plus_state, lock_schlage_be469_state
):
"""Mock a client with multiple devices."""
node = Node(client, copy.deepcopy(climate_radio_thermostat_ct100_plus_state))
client.driver.controller.nodes[node.node_id] = node
node = Node(client, copy.deepcopy(lock_schlage_be469_state))
client.driver.controller.nodes[node.node_id] = node
return client.driver.controller.nodes
@pytest.fixture(name="gdc_zw062")
def motorized_barrier_cover_fixture(client, gdc_zw062_state):
"""Mock a motorized barrier node."""
node = Node(client, copy.deepcopy(gdc_zw062_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="iblinds_v2")
def iblinds_cover_fixture(client, iblinds_v2_state):
"""Mock an iBlinds v2.0 window cover node."""
node = Node(client, copy.deepcopy(iblinds_v2_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="qubino_shutter")
def qubino_shutter_cover_fixture(client, qubino_shutter_state):
"""Mock a Qubino flush shutter node."""
node = Node(client, copy.deepcopy(qubino_shutter_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="aeon_smart_switch_6")
def aeon_smart_switch_6_fixture(client, aeon_smart_switch_6_state):
"""Mock an AEON Labs (ZW096) Smart Switch 6 node."""
node = Node(client, aeon_smart_switch_6_state)
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="ge_12730")
def ge_12730_fixture(client, ge_12730_state):
"""Mock a GE 12730 fan controller node."""
node = Node(client, copy.deepcopy(ge_12730_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="inovelli_lzw36")
def inovelli_lzw36_fixture(client, inovelli_lzw36_state):
"""Mock a Inovelli LZW36 fan controller node."""
node = Node(client, copy.deepcopy(inovelli_lzw36_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="lock_id_lock_as_id150")
def lock_id_lock_as_id150(client, lock_id_lock_as_id150_state):
"""Mock an id lock id-150 lock node."""
node = Node(client, copy.deepcopy(lock_id_lock_as_id150_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="climate_radio_thermostat_ct101_multiple_temp_units")
def climate_radio_thermostat_ct101_multiple_temp_units_fixture(
client, climate_radio_thermostat_ct101_multiple_temp_units_state
):
"""Mock a climate device with multiple temp units node."""
node = Node(
client, copy.deepcopy(climate_radio_thermostat_ct101_multiple_temp_units_state)
)
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(
name="climate_radio_thermostat_ct100_mode_and_setpoint_on_different_endpoints"
)
def climate_radio_thermostat_ct100_mode_and_setpoint_on_different_endpoints_fixture(
client,
climate_radio_thermostat_ct100_mode_and_setpoint_on_different_endpoints_state,
):
"""Mock a climate device with mode and setpoint on differenet endpoints node."""
node = Node(
client,
copy.deepcopy(
climate_radio_thermostat_ct100_mode_and_setpoint_on_different_endpoints_state
),
)
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="vision_security_zl7432")
def vision_security_zl7432_fixture(client, vision_security_zl7432_state):
"""Mock a vision security zl7432 node."""
node = Node(client, copy.deepcopy(vision_security_zl7432_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="zen_31")
def zen_31_fixture(client, zen_31_state):
"""Mock a bulb 6 multi-color node."""
node = Node(client, copy.deepcopy(zen_31_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="wallmote_central_scene")
def wallmote_central_scene_fixture(client, wallmote_central_scene_state):
"""Mock a wallmote central scene node."""
node = Node(client, copy.deepcopy(wallmote_central_scene_state))
client.driver.controller.nodes[node.node_id] = node
return node
@pytest.fixture(name="firmware_file")
def firmware_file_fixture():
"""Return mock firmware file stream."""
return io.BytesIO(bytes(10))
| {
"content_hash": "9d618faafad8cde273da78550a880f49",
"timestamp": "",
"source": "github",
"line_count": 738,
"max_line_length": 105,
"avg_line_length": 35.33739837398374,
"alnum_prop": 0.7095747536331914,
"repo_name": "kennedyshead/home-assistant",
"id": "12db8bafb77c99ea42c7b83d4061c69150854733",
"size": "26079",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/zwave_js/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "33970989"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
"""``PTransforms`` for manipulating files in Apache Beam.
Provides reading ``PTransform``\\s, ``MatchFiles``,
``MatchAll``, that produces a ``PCollection`` of records representing a file
and its metadata; and ``ReadMatches``, which takes in a ``PCollection`` of file
metadata records, and produces a ``PCollection`` of ``ReadableFile`` objects.
These transforms currently do not support splitting by themselves.
Writing to Files
================
The transforms in this file include ``WriteToFiles``, which allows you to write
a ``beam.PCollection`` to files, and gives you many options to customize how to
do this.
The ``WriteToFiles`` transform supports bounded and unbounded PCollections
(i.e. it can be used both batch and streaming pipelines). For streaming
pipelines, it currently does not have support for multiple trigger firings
on the same window.
File Naming
-----------
One of the parameters received by ``WriteToFiles`` is a function specifying how
to name the files that are written. This is a function that takes in the
following parameters:
- window
- pane
- shard_index
- total_shards
- compression
- destination
It should return a file name that is unique for a combination of these
parameters.
The default naming strategy is to name files
in the format
`$prefix-$start-$end-$pane-$shard-of-$numShards$suffix$compressionSuffix`,
where:
- `$prefix` is, by default, `"output"`.
- `$start` and `$end` are the boundaries of the window for the data being
written. These are omitted if we're using the Global window.
- `$pane` is the index for the number of firing for a window.
- `$shard` and `$numShards` are the current shard number, and the total number
of shards for this window firing.
- `$suffix` is, by default, an empty string, but it can be set by the user via
``default_file_naming``.
Dynamic Destinations
--------------------
If the elements in the input ``beam.PCollection`` can be partitioned into groups
that should be treated differently (e.g. some events are to be stored as CSV,
while some others are to be stored as Avro files), it is possible to do this
by passing a `destination` parameter to ``WriteToFiles``. Something like the
following::
my_pcollection | beam.io.fileio.WriteToFiles(
path='/my/file/path',
destination=lambda record: 'avro' if record['type'] == 'A' else 'csv',
sink=lambda dest: AvroSink() if dest == 'avro' else CsvSink(),
file_naming=beam.io.fileio.destination_prefix_naming())
In this transform, depending on the type of a record, it will be written down to
a destination named `'avro'`, or `'csv'`. The value returned by the
`destination` call is then passed to the `sink` call, to determine what sort of
sink will be used for each destination. The return type of the `destination`
parameter can be anything, as long as elements can be grouped by it.
No backward compatibility guarantees. Everything in this module is experimental.
"""
from __future__ import absolute_import
import collections
import logging
import random
import uuid
from typing import TYPE_CHECKING
from typing import Any
from typing import BinaryIO # pylint: disable=unused-import
from typing import Callable
from typing import DefaultDict
from typing import Dict
from typing import Tuple
from past.builtins import unicode
import apache_beam as beam
from apache_beam.io import filesystem
from apache_beam.io import filesystems
from apache_beam.io.filesystem import BeamIOError
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.value_provider import StaticValueProvider
from apache_beam.options.value_provider import ValueProvider
from apache_beam.transforms.window import GlobalWindow
from apache_beam.utils.annotations import experimental
if TYPE_CHECKING:
from apache_beam.transforms.window import BoundedWindow
__all__ = ['EmptyMatchTreatment',
'MatchFiles',
'MatchAll',
'ReadableFile',
'ReadMatches']
_LOGGER = logging.getLogger(__name__)
class EmptyMatchTreatment(object):
"""How to treat empty matches in ``MatchAll`` and ``MatchFiles`` transforms.
If empty matches are disallowed, an error will be thrown if a pattern does not
match any files."""
ALLOW = 'ALLOW'
DISALLOW = 'DISALLOW'
ALLOW_IF_WILDCARD = 'ALLOW_IF_WILDCARD'
@staticmethod
def allow_empty_match(pattern, setting):
if setting == EmptyMatchTreatment.ALLOW:
return True
elif setting == EmptyMatchTreatment.ALLOW_IF_WILDCARD and '*' in pattern:
return True
elif setting == EmptyMatchTreatment.DISALLOW:
return False
else:
raise ValueError(setting)
class _MatchAllFn(beam.DoFn):
def __init__(self, empty_match_treatment):
self._empty_match_treatment = empty_match_treatment
def process(self, file_pattern):
# TODO: Should we batch the lookups?
match_results = filesystems.FileSystems.match([file_pattern])
match_result = match_results[0]
if (not match_result.metadata_list
and not EmptyMatchTreatment.allow_empty_match(
file_pattern, self._empty_match_treatment)):
raise BeamIOError(
'Empty match for pattern %s. Disallowed.' % file_pattern)
return match_result.metadata_list
@experimental()
class MatchFiles(beam.PTransform):
"""Matches a file pattern using ``FileSystems.match``.
This ``PTransform`` returns a ``PCollection`` of matching files in the form
of ``FileMetadata`` objects."""
def __init__(self,
file_pattern,
empty_match_treatment=EmptyMatchTreatment.ALLOW_IF_WILDCARD):
self._file_pattern = file_pattern
self._empty_match_treatment = empty_match_treatment
def expand(self, pcoll):
return (pcoll.pipeline
| beam.Create([self._file_pattern])
| MatchAll())
@experimental()
class MatchAll(beam.PTransform):
"""Matches file patterns from the input PCollection via ``FileSystems.match``.
This ``PTransform`` returns a ``PCollection`` of matching files in the form
of ``FileMetadata`` objects."""
def __init__(self, empty_match_treatment=EmptyMatchTreatment.ALLOW):
self._empty_match_treatment = empty_match_treatment
def expand(self, pcoll):
return (pcoll
| beam.ParDo(_MatchAllFn(self._empty_match_treatment)))
class _ReadMatchesFn(beam.DoFn):
def __init__(self, compression, skip_directories):
self._compression = compression
self._skip_directories = skip_directories
def process(self, file_metadata):
metadata = (filesystem.FileMetadata(file_metadata, 0)
if isinstance(file_metadata, (str, unicode))
else file_metadata)
if ((metadata.path.endswith('/') or metadata.path.endswith('\\'))
and self._skip_directories):
return
elif metadata.path.endswith('/') or metadata.path.endswith('\\'):
raise BeamIOError(
'Directories are not allowed in ReadMatches transform.'
'Found %s.' % metadata.path)
# TODO: Mime type? Other arguments? Maybe arguments passed in to transform?
yield ReadableFile(metadata, self._compression)
class ReadableFile(object):
"""A utility class for accessing files."""
def __init__(self, metadata, compression=None):
self.metadata = metadata
self._compression = compression
def open(self,
mime_type='text/plain',
compression_type=None):
compression = (
compression_type or
self._compression or
filesystems.CompressionTypes.AUTO)
return filesystems.FileSystems.open(self.metadata.path,
mime_type=mime_type,
compression_type=compression)
def read(self, mime_type='application/octet-stream'):
return self.open(mime_type).read()
def read_utf8(self):
return self.open().read().decode('utf-8')
@experimental()
class ReadMatches(beam.PTransform):
"""Converts each result of MatchFiles() or MatchAll() to a ReadableFile.
This helps read in a file's contents or obtain a file descriptor."""
def __init__(self, compression=None, skip_directories=True):
self._compression = compression
self._skip_directories = skip_directories
def expand(self, pcoll):
return pcoll | beam.ParDo(_ReadMatchesFn(self._compression,
self._skip_directories))
class FileSink(object):
"""Specifies how to write elements to individual files in ``WriteToFiles``.
**NOTE: THIS CLASS IS EXPERIMENTAL.**
A Sink class must implement the following:
- The ``open`` method, which initializes writing to a file handler (it is not
responsible for opening the file handler itself).
- The ``write`` method, which writes an element to the file that was passed
in ``open``.
- The ``flush`` method, which flushes any buffered state. This is most often
called before closing a file (but not exclusively called in that
situation). The sink is not responsible for closing the file handler.
"""
def open(self, fh):
# type: (BinaryIO) -> None
raise NotImplementedError
def write(self, record):
raise NotImplementedError
def flush(self):
raise NotImplementedError
@beam.typehints.with_input_types(str)
class TextSink(FileSink):
"""A sink that encodes utf8 elements, and writes to file handlers.
**NOTE: THIS CLASS IS EXPERIMENTAL.**
This sink simply calls file_handler.write(record.encode('utf8') + '\n') on all
records that come into it.
"""
def open(self, fh):
self._fh = fh
def write(self, record):
self._fh.write(record.encode('utf8'))
self._fh.write(b'\n')
def flush(self):
self._fh.flush()
def prefix_naming(prefix):
return default_file_naming(prefix)
_DEFAULT_FILE_NAME_TEMPLATE = (
'{prefix}-{start}-{end}-{pane}-'
'{shard:05d}-{total_shards:05d}'
'{suffix}{compression}')
def destination_prefix_naming():
def _inner(window, pane, shard_index, total_shards, compression, destination):
kwargs = {'prefix': str(destination),
'start': '',
'end': '',
'pane': '',
'shard': 0,
'total_shards': 0,
'suffix': '',
'compression': ''}
if total_shards is not None and shard_index is not None:
kwargs['shard'] = int(shard_index)
kwargs['total_shards'] = int(total_shards)
if window != GlobalWindow():
kwargs['start'] = window.start.to_utc_datetime().isoformat()
kwargs['end'] = window.end.to_utc_datetime().isoformat()
# TODO(BEAM-3759): Add support for PaneInfo
# If the PANE is the ONLY firing in the window, we don't add it.
#if pane and not (pane.is_first and pane.is_last):
# kwargs['pane'] = pane.index
if compression:
kwargs['compression'] = '.%s' % compression
return _DEFAULT_FILE_NAME_TEMPLATE.format(**kwargs)
return _inner
def default_file_naming(prefix, suffix=None):
def _inner(window, pane, shard_index, total_shards, compression, destination):
kwargs = {'prefix': prefix,
'start': '',
'end': '',
'pane': '',
'shard': 0,
'total_shards': 0,
'suffix': '',
'compression': ''}
if total_shards is not None and shard_index is not None:
kwargs['shard'] = int(shard_index)
kwargs['total_shards'] = int(total_shards)
if window != GlobalWindow():
kwargs['start'] = window.start.to_utc_datetime().isoformat()
kwargs['end'] = window.end.to_utc_datetime().isoformat()
# TODO(pabloem): Add support for PaneInfo
# If the PANE is the ONLY firing in the window, we don't add it.
#if pane and not (pane.is_first and pane.is_last):
# kwargs['pane'] = pane.index
if compression:
kwargs['compression'] = '.%s' % compression
if suffix:
kwargs['suffix'] = suffix
return _DEFAULT_FILE_NAME_TEMPLATE.format(**kwargs)
return _inner
_FileResult = collections.namedtuple('FileResult',
['file_name',
'shard_index',
'total_shards',
'window',
'pane',
'destination'])
# Adding a class to contain PyDoc.
class FileResult(_FileResult):
"""A descriptor of a file that has been written."""
pass
@experimental()
class WriteToFiles(beam.PTransform):
"""Write the incoming PCollection to a set of output files.
The incoming ``PCollection`` may be bounded or unbounded.
**Note:** For unbounded ``PCollection``s, this transform does not support
multiple firings per Window (due to the fact that files are named only by
their destination, and window, at the moment).
"""
# We allow up to 20 different destinations to be written in a single bundle.
# Too many files will add memory pressure to the worker, so we let it be 20.
MAX_NUM_WRITERS_PER_BUNDLE = 20
DEFAULT_SHARDING = 5
def __init__(self,
path,
file_naming=None,
destination=None,
temp_directory=None,
sink=None,
shards=None,
output_fn=None,
max_writers_per_bundle=MAX_NUM_WRITERS_PER_BUNDLE):
"""Initializes a WriteToFiles transform.
Args:
path (str, ValueProvider): The directory to write files into.
file_naming (callable): A callable that takes in a window, pane,
shard_index, total_shards and compression; and returns a file name.
destination (callable): If this argument is provided, the sink parameter
must also be a callable.
temp_directory (str, ValueProvider): To ensure atomicity in the transform,
the output is written into temporary files, which are written to a
directory that is meant to be temporary as well. Once the whole output
has been written, the files are moved into their final destination, and
given their final names. By default, the temporary directory will be
within the temp_location of your pipeline.
sink (callable, FileSink): The sink to use to write into a file. It should
implement the methods of a ``FileSink``. If none is provided, a
``TextSink`` is used.
shards (int): The number of shards per destination and trigger firing.
max_writers_per_bundle (int): The number of writers that can be open
concurrently in a single worker that's processing one bundle.
"""
self.path = (
path if isinstance(path, ValueProvider) else StaticValueProvider(str,
path))
self.file_naming_fn = file_naming or default_file_naming('output')
self.destination_fn = self._get_destination_fn(destination)
self._temp_directory = temp_directory
self.sink_fn = self._get_sink_fn(sink)
self.shards = shards or WriteToFiles.DEFAULT_SHARDING
self.output_fn = output_fn or (lambda x: x)
self._max_num_writers_per_bundle = max_writers_per_bundle
@staticmethod
def _get_sink_fn(input_sink):
# type: (...) -> Callable[[Any], FileSink]
if isinstance(input_sink, FileSink):
return lambda x: input_sink
elif callable(input_sink):
return input_sink
else:
return lambda x: TextSink()
@staticmethod
def _get_destination_fn(destination):
# type: (...) -> Callable[[Any], str]
if isinstance(destination, ValueProvider):
return lambda elm: destination.get()
elif callable(destination):
return destination
else:
return lambda elm: destination
def expand(self, pcoll):
p = pcoll.pipeline
if not self._temp_directory:
temp_location = (
p.options.view_as(GoogleCloudOptions).temp_location
or self.path.get())
dir_uid = str(uuid.uuid4())
self._temp_directory = StaticValueProvider(
str,
filesystems.FileSystems.join(temp_location,
'.temp%s' % dir_uid))
_LOGGER.info('Added temporary directory %s', self._temp_directory.get())
output = (pcoll
| beam.ParDo(_WriteUnshardedRecordsFn(
base_path=self._temp_directory,
destination_fn=self.destination_fn,
sink_fn=self.sink_fn,
max_writers_per_bundle=self._max_num_writers_per_bundle))
.with_outputs(_WriteUnshardedRecordsFn.SPILLED_RECORDS,
_WriteUnshardedRecordsFn.WRITTEN_FILES))
written_files_pc = output[_WriteUnshardedRecordsFn.WRITTEN_FILES]
spilled_records_pc = output[_WriteUnshardedRecordsFn.SPILLED_RECORDS]
more_written_files_pc = (
spilled_records_pc
| beam.ParDo(_AppendShardedDestination(self.destination_fn,
self.shards))
| "GroupRecordsByDestinationAndShard" >> beam.GroupByKey()
| beam.ParDo(_WriteShardedRecordsFn(self._temp_directory,
self.sink_fn,
self.shards))
)
files_by_destination_pc = (
(written_files_pc, more_written_files_pc)
| beam.Flatten()
| beam.Map(lambda file_result: (file_result.destination, file_result))
| "GroupTempFilesByDestination" >> beam.GroupByKey())
# Now we should take the temporary files, and write them to the final
# destination, with their proper names.
file_results = (files_by_destination_pc
| beam.ParDo(
_MoveTempFilesIntoFinalDestinationFn(
self.path, self.file_naming_fn,
self._temp_directory)))
return file_results
def _create_writer(base_path, writer_key):
try:
filesystems.FileSystems.mkdirs(base_path)
except IOError:
# Directory already exists.
pass
# The file name has a prefix determined by destination+window, along with
# a random string. This allows us to retrieve orphaned files later on.
file_name = '%s_%s' % (abs(hash(writer_key)), uuid.uuid4())
full_file_name = filesystems.FileSystems.join(base_path, file_name)
return full_file_name, filesystems.FileSystems.create(full_file_name)
class _MoveTempFilesIntoFinalDestinationFn(beam.DoFn):
def __init__(self, path, file_naming_fn, temp_dir):
self.path = path
self.file_naming_fn = file_naming_fn
self.temporary_directory = temp_dir
def process(self,
element,
w=beam.DoFn.WindowParam):
destination = element[0]
file_results = list(element[1])
for i, r in enumerate(file_results):
# TODO(pabloem): Handle compression for files.
final_file_name = self.file_naming_fn(r.window,
r.pane,
i,
len(file_results),
'',
destination)
_LOGGER.info('Moving temporary file %s to dir: %s as %s. Res: %s',
r.file_name, self.path.get(), final_file_name, r)
final_full_path = filesystems.FileSystems.join(self.path.get(),
final_file_name)
# TODO(pabloem): Batch rename requests?
try:
filesystems.FileSystems.rename([r.file_name],
[final_full_path])
except BeamIOError:
# This error is not serious, because it may happen on a retry of the
# bundle. We simply log it.
_LOGGER.debug('File %s failed to be copied. This may be due to a bundle'
' being retried.', r.file_name)
yield FileResult(final_file_name,
i,
len(file_results),
r.window,
r.pane,
destination)
_LOGGER.info('Cautiously removing temporary files for'
' destination %s and window %s', destination, w)
writer_key = (destination, w)
self._remove_temporary_files(writer_key)
def _remove_temporary_files(self, writer_key):
try:
prefix = filesystems.FileSystems.join(
self.temporary_directory.get(), str(abs(hash(writer_key))))
match_result = filesystems.FileSystems.match(['%s*' % prefix])
orphaned_files = [m.path for m in match_result[0].metadata_list]
_LOGGER.debug('Deleting orphaned files: %s', orphaned_files)
filesystems.FileSystems.delete(orphaned_files)
except BeamIOError as e:
_LOGGER.debug('Exceptions when deleting files: %s', e)
class _WriteShardedRecordsFn(beam.DoFn):
def __init__(self,
base_path,
sink_fn, # type: Callable[[Any], FileSink]
shards # type: int
):
self.base_path = base_path
self.sink_fn = sink_fn
self.shards = shards
def process(self,
element,
w=beam.DoFn.WindowParam,
pane=beam.DoFn.PaneInfoParam):
destination_and_shard = element[0]
destination = destination_and_shard[0]
shard = destination_and_shard[1]
records = element[1]
full_file_name, writer = _create_writer(base_path=self.base_path.get(),
writer_key=(destination, w))
sink = self.sink_fn(destination)
sink.open(writer)
for r in records:
sink.write(r)
sink.flush()
writer.close()
_LOGGER.info('Writing file %s for destination %s and shard %s',
full_file_name, destination, repr(shard))
yield FileResult(full_file_name,
shard_index=shard,
total_shards=self.shards,
window=w,
pane=pane,
destination=destination)
class _AppendShardedDestination(beam.DoFn):
def __init__(self,
destination, # type: Callable[[Any], str]
shards # type: int
):
self.destination_fn = destination
self.shards = shards
# We start the shards for a single destination at an arbitrary point.
self._shard_counter = collections.defaultdict(
lambda: random.randrange(self.shards)) # type: DefaultDict[str, int]
def _next_shard_for_destination(self, destination):
self._shard_counter[destination] = (
(self._shard_counter[destination] + 1) % self.shards)
return self._shard_counter[destination]
def process(self, record):
destination = self.destination_fn(record)
shard = self._next_shard_for_destination(destination)
yield ((destination, shard), record)
class _WriteUnshardedRecordsFn(beam.DoFn):
SPILLED_RECORDS = 'spilled_records'
WRITTEN_FILES = 'written_files'
_writers_and_sinks = None # type: Dict[Tuple[str, BoundedWindow], Tuple[BinaryIO, FileSink]]
_file_names = None # type: Dict[Tuple[str, BoundedWindow], str]
def __init__(self,
base_path,
destination_fn,
sink_fn,
max_writers_per_bundle=WriteToFiles.MAX_NUM_WRITERS_PER_BUNDLE):
self.base_path = base_path
self.destination_fn = destination_fn
self.sink_fn = sink_fn
self.max_num_writers_per_bundle = max_writers_per_bundle
def start_bundle(self):
self._writers_and_sinks = {}
self._file_names = {}
def process(self,
record,
w=beam.DoFn.WindowParam,
pane=beam.DoFn.PaneInfoParam):
destination = self.destination_fn(record)
writer, sink = self._get_or_create_writer_and_sink(destination, w)
if not writer:
return [beam.pvalue.TaggedOutput(self.SPILLED_RECORDS, record)]
else:
sink.write(record)
def _get_or_create_writer_and_sink(self, destination, window):
"""Returns a tuple of writer, sink."""
writer_key = (destination, window)
if writer_key in self._writers_and_sinks:
return self._writers_and_sinks.get(writer_key)
elif len(self._writers_and_sinks) >= self.max_num_writers_per_bundle:
# The writer does not exist, and we have too many writers already.
return None, None
else:
# The writer does not exist, but we can still create a new one.
full_file_name, writer = _create_writer(base_path=self.base_path.get(),
writer_key=writer_key)
sink = self.sink_fn(destination)
sink.open(writer)
self._writers_and_sinks[writer_key] = (writer, sink)
self._file_names[writer_key] = full_file_name
return self._writers_and_sinks[writer_key]
def finish_bundle(self):
for key, (writer, sink) in self._writers_and_sinks.items():
sink.flush()
writer.close()
file_result = FileResult(self._file_names[key],
shard_index=-1,
total_shards=0,
window=key[1],
pane=None, # TODO(pabloem): get the pane info
destination=key[0])
yield beam.pvalue.TaggedOutput(
self.WRITTEN_FILES,
beam.transforms.window.WindowedValue(
file_result,
timestamp=key[1].start,
windows=[key[1]] # TODO(pabloem) HOW DO WE GET THE PANE
))
| {
"content_hash": "6c5c5a373685a7049d7d44e696ed8696",
"timestamp": "",
"source": "github",
"line_count": 743,
"max_line_length": 95,
"avg_line_length": 34.46433378196501,
"alnum_prop": 0.6309993361190299,
"repo_name": "RyanSkraba/beam",
"id": "76b125fb60b50876665495394f4981c3495b32c8",
"size": "26392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/io/fileio.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1597"
},
{
"name": "CSS",
"bytes": "40963"
},
{
"name": "Dockerfile",
"bytes": "16638"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2683402"
},
{
"name": "Groovy",
"bytes": "517560"
},
{
"name": "HTML",
"bytes": "183330"
},
{
"name": "Java",
"bytes": "28609011"
},
{
"name": "JavaScript",
"bytes": "16595"
},
{
"name": "Jupyter Notebook",
"bytes": "56365"
},
{
"name": "Python",
"bytes": "6191025"
},
{
"name": "Ruby",
"bytes": "4159"
},
{
"name": "Shell",
"bytes": "235061"
},
{
"name": "TSQL",
"bytes": "841"
}
],
"symlink_target": ""
} |
import pytest
from pages.desktop.categories import Categories
from pages.desktop.home import Home
@pytest.mark.nondestructive
def test_there_are_6_extension_categories(base_url, selenium):
page = Home(selenium, base_url).open()
assert len(page.extension_category.list) == 6
@pytest.mark.nondestructive
def test_there_are_6_theme_categories(base_url, selenium):
page = Home(selenium, base_url).open()
assert len(page.theme_category.list) == 6
@pytest.mark.nondestructive
def test_extensions_section_load_correctly(base_url, selenium):
page = Home(selenium, base_url).open()
ext_page = page.header.click_extensions()
assert 'Extensions' in ext_page.title
@pytest.mark.nondestructive
def test_explore_section_loads(base_url, selenium):
page = Home(selenium, base_url).open()
page.header.click_explore()
assert 'firefox/' in selenium.current_url
@pytest.mark.nondestructive
def test_themes_section_loads(base_url, selenium):
page = Home(selenium, base_url).open()
themes_page = page.header.click_themes()
assert 'Themes' in themes_page.text
@pytest.mark.nondestructive
def test_browse_all_button_loads_correct_page(base_url, selenium):
page = Home(selenium, base_url).open()
page.featured_extensions.browse_all
assert 'type=extension' in selenium.current_url
@pytest.mark.nondestructive
@pytest.mark.xfail(reason=(
'No static themes since '
'https://github.com/mozilla/addons-frontend/pull/5501/'))
def test_browse_all_themes_button_loads_correct_page(
base_url, selenium):
page = Home(selenium, base_url).open()
page.popular_themes.browse_all
assert 'type=statictheme' in selenium.current_url
@pytest.mark.nondestructive
def test_category_loads_extensions(base_url, selenium):
page = Home(selenium, base_url).open()
category = page.extension_category.list[0]
category_name = category.name
category.click()
assert category_name in selenium.current_url
@pytest.mark.nondestructive
def test_category_section_loads_correct_category(base_url, selenium):
page = Categories(selenium, base_url).open()
item = page.category_list[0]
name = item.name
category = item.click()
assert name in category.header.name
@pytest.mark.nondestructive
def test_title_routes_to_home(base_url, selenium):
page = Home(selenium, base_url).open()
home = page.header.click_title()
assert home.hero_banner.is_displayed()
@pytest.mark.parametrize(
'i, page_url',
enumerate(['language-tools', 'search-tools', 'android']))
@pytest.mark.nondestructive
def test_more_dropdown_navigates_correctly(base_url, selenium, i, page_url):
page = Home(selenium, base_url).open()
page.header.more_menu(item=i)
assert page_url in selenium.current_url
@pytest.mark.desktop_only
@pytest.mark.parametrize(
'i, links',
enumerate([
'about',
'blog.mozilla.org',
'extensionworkshop',
'developers',
'AMO/Policy',
'discourse',
'#Contact_us',
'review_guide',
'status',
])
)
@pytest.mark.nondestructive
def test_add_ons_footer_links(base_url, selenium, i, links):
page = Home(selenium, base_url).open()
page.footer.addon_links[i].click()
assert links in selenium.current_url
@pytest.mark.desktop_only
@pytest.mark.parametrize(
'i, links',
enumerate([
'firefox/new',
'firefox/mobile',
'firefox/mobile',
'firefox/mobile',
'firefox',
'firefox/channel/desktop',
])
)
@pytest.mark.nondestructive
def test_firefox_footer_links(base_url, selenium, i, links):
page = Home(selenium, base_url).open()
page.footer.firefox_links[i].click()
assert links in selenium.current_url
| {
"content_hash": "11e58572d72ea308e0db00edadb138ac",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 76,
"avg_line_length": 28.717557251908396,
"alnum_prop": 0.694843168527379,
"repo_name": "kumar303/addons-server",
"id": "195c6c8ec9def8d1677787681294f29b60ff7c7a",
"size": "3762",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/ui/test_home.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "803737"
},
{
"name": "Dockerfile",
"bytes": "3059"
},
{
"name": "HTML",
"bytes": "422013"
},
{
"name": "JavaScript",
"bytes": "1048955"
},
{
"name": "Makefile",
"bytes": "384"
},
{
"name": "PLSQL",
"bytes": "316"
},
{
"name": "PLpgSQL",
"bytes": "9352"
},
{
"name": "Python",
"bytes": "5160043"
},
{
"name": "SQLPL",
"bytes": "645"
},
{
"name": "Shell",
"bytes": "7787"
},
{
"name": "Smarty",
"bytes": "1356"
}
],
"symlink_target": ""
} |
from ... import settings
from ...compat import re_path
from ...views.contacts import ContactListView, ContactDetailView
urlpatterns = [
# These three URLs must be protected.
re_path(r'^(?P<user>%s)/' % settings.USERNAME_PAT,
ContactDetailView.as_view(), name='contact'),
re_path(r'^', ContactListView.as_view(), name='contacts'),
]
| {
"content_hash": "6696d08eb251d77ffd2f812f009a9c4c",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 64,
"avg_line_length": 35.3,
"alnum_prop": 0.6827195467422096,
"repo_name": "djaodjin/djaodjin-signup",
"id": "26adacae36291ba0ab11cdf33d8515198716c3fa",
"size": "1697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "signup/urls/views/contacts.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "17514"
},
{
"name": "JavaScript",
"bytes": "89302"
},
{
"name": "Makefile",
"bytes": "3690"
},
{
"name": "Python",
"bytes": "345507"
}
],
"symlink_target": ""
} |
__author__ = 'Yves Bonjour'
from Mock import Mock
class ArticleProxyMock(Mock):
def set_id(self, identifier):
self.identifier = identifier
def add_article(self, title, text, updated_on, feed):
self._handle_method_call("add_article", (title, text, updated_on, feed))
return self.identifier | {
"content_hash": "47e658d18bd4e66b9e0301cd863ec04e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 80,
"avg_line_length": 29.363636363636363,
"alnum_prop": 0.6718266253869969,
"repo_name": "ybonjour/nuus",
"id": "515324ca5da7a34554df45dcadeb3b0b5fb055fe",
"size": "323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feedcollector/tests/ArticleProxyMock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "229"
},
{
"name": "JavaScript",
"bytes": "210240"
},
{
"name": "Python",
"bytes": "133059"
},
{
"name": "Ruby",
"bytes": "21666"
},
{
"name": "Shell",
"bytes": "2943"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from typing import Sequence, TYPE_CHECKING, Iterable
from ezdxf.lldxf.tagger import internal_tag_compiler
if TYPE_CHECKING:
from ezdxf.lldxf.types import DXFTag
from ezdxf.lldxf.extendedtags import ExtendedTags
def compile_tags_without_handles(text: str) -> Iterable[DXFTag]:
return (
tag for tag in internal_tag_compiler(text) if tag.code not in (5, 105)
)
def normlines(text: str) -> Sequence[str]:
lines = text.split("\n")
return [line.strip() for line in lines]
def load_section(text: str, name: str) -> list[ExtendedTags]:
from ezdxf.lldxf.loader import load_dxf_structure
dxf = load_dxf_structure(
internal_tag_compiler(text), ignore_missing_eof=True
)
return dxf[name] # type: ignore
def load_entities(text: str, name: str):
from ezdxf.lldxf.loader import load_dxf_structure, load_dxf_entities
dxf = load_dxf_structure(
internal_tag_compiler(text), ignore_missing_eof=True
)
return load_dxf_entities(dxf[name]) # type: ignore
def parse_hex_dump(txt: str) -> bytes:
b = bytearray()
lines = txt.split("\n")
for line in lines:
if line == "":
continue
data = [int(v, 16) for v in line.strip().split(" ")]
assert data[0] == len(b)
b.extend(data[1:])
return b
| {
"content_hash": "d8803d2f860d825d78e8c7a45445b3da",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 78,
"avg_line_length": 28.145833333333332,
"alnum_prop": 0.6550703182827535,
"repo_name": "mozman/ezdxf",
"id": "9e7aebfe9ed9b12069c2be6e4a46aeecee9b9b96",
"size": "1416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ezdxf/tools/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "5745"
},
{
"name": "CSS",
"bytes": "3565"
},
{
"name": "Common Lisp",
"bytes": "727"
},
{
"name": "Cython",
"bytes": "111923"
},
{
"name": "HTML",
"bytes": "1417"
},
{
"name": "JavaScript",
"bytes": "11132"
},
{
"name": "Python",
"bytes": "6336553"
}
],
"symlink_target": ""
} |
import logging
import os
import sys
logging.basicConfig(level=logging.ERROR)
top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir,
os.pardir))
sys.path.insert(0, top_dir)
import taskflow.engines
from taskflow.patterns import graph_flow as gf
from taskflow.patterns import linear_flow as lf
from taskflow import task
# INTRO: This examples shows how a graph_flow and linear_flow can be used
# together to execute non-dependent tasks by going through the steps required
# to build a simplistic car (an assembly line if you will). It also shows
# how raw functions can be wrapped into a task object instead of being forced
# to use the more heavy task base class. This is useful in scenarios where
# pre-existing code has functions that you easily want to plug-in to taskflow,
# without requiring a large amount of code changes.
def build_frame():
return 'steel'
def build_engine():
return 'honda'
def build_doors():
return '2'
def build_wheels():
return '4'
def install_engine(frame, engine):
return True
def install_doors(frame, windows_installed, doors):
return True
def install_windows(frame, doors):
return True
def install_wheels(frame, engine, engine_installed, wheels):
return True
def trash(**kwargs):
print_wrapped("Throwing away pieces of car!")
def print_wrapped(text):
print("-" * (len(text)))
print(text)
print("-" * (len(text)))
def startup(**kwargs):
# If you want to see the rollback function being activated try uncommenting
# the following line.
#
# raise ValueError("Car not verified")
return True
def verify(spec, **kwargs):
# If the car is not what we ordered throw away the car (trigger reversion).
for key, value in kwargs.items():
if spec[key] != value:
raise Exception("Car doesn't match spec!")
return True
# These two functions connect into the state transition notification emission
# points that the engine outputs, they can be used to log state transitions
# that are occurring, or they can be used to suspend the engine (or perform
# other useful activities).
def flow_watch(state, details):
print('Flow => %s' % state)
def task_watch(state, details):
print('Task %s => %s' % (details.get('task_name'), state))
flow = lf.Flow("make-auto").add(
task.FunctorTask(startup, revert=trash, provides='ran'),
gf.Flow("install-parts").add(
task.FunctorTask(build_frame, provides='frame'),
task.FunctorTask(build_engine, provides='engine'),
task.FunctorTask(build_doors, provides='doors'),
task.FunctorTask(build_wheels, provides='wheels'),
# These *_installed outputs allow for other tasks to depend on certain
# actions being performed (aka the components were installed), another
# way to do this is to link() the tasks manually instead of creating
# an 'artificial' data dependency that accomplishes the same goal the
# manual linking would result in.
task.FunctorTask(install_engine, provides='engine_installed'),
task.FunctorTask(install_doors, provides='doors_installed'),
task.FunctorTask(install_windows, provides='windows_installed'),
task.FunctorTask(install_wheels, provides='wheels_installed')),
task.FunctorTask(verify, requires=['frame',
'engine',
'doors',
'wheels',
'engine_installed',
'doors_installed',
'windows_installed',
'wheels_installed']))
# This dictionary will be provided to the tasks as a specification for what
# the tasks should produce, in this example this specification will influence
# what those tasks do and what output they create. Different tasks depend on
# different information from this specification, all of which will be provided
# automatically by the engine.
spec = {
"frame": 'steel',
"engine": 'honda',
"doors": '2',
"wheels": '4',
# These are used to compare the result product, a car without the pieces
# installed is not a car after all.
"engine_installed": True,
"doors_installed": True,
"windows_installed": True,
"wheels_installed": True,
}
engine = taskflow.engines.load(flow, store={'spec': spec.copy()})
# This registers all (*) state transitions to trigger a call to the flow_watch
# function for flow state transitions, and registers the same all (*) state
# transitions for task state transitions.
engine.notifier.register('*', flow_watch)
engine.task_notifier.register('*', task_watch)
print_wrapped("Building a car")
engine.run()
# Alter the specification and ensure that the reverting logic gets triggered
# since the resultant car that will be built by the build_wheels function will
# build a car with 4 doors only (not 5), this will cause the verification
# task to mark the car that is produced as not matching the desired spec.
spec['doors'] = 5
engine = taskflow.engines.load(flow, store={'spec': spec.copy()})
engine.notifier.register('*', flow_watch)
engine.task_notifier.register('*', task_watch)
print_wrapped("Building a wrong car that doesn't match specification")
try:
engine.run()
except Exception as e:
print_wrapped("Flow failed: %s" % e)
| {
"content_hash": "01f0de6d6f61d799ac6dc04f8ad0bde1",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 79,
"avg_line_length": 32.904761904761905,
"alnum_prop": 0.6653400868306801,
"repo_name": "citrix-openstack-build/taskflow",
"id": "7367c348c44590902e6c91decd37249d5bfda60e",
"size": "6190",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "taskflow/examples/build_a_car.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "702993"
},
{
"name": "Shell",
"bytes": "1988"
}
],
"symlink_target": ""
} |
import datetime
import json
import random
import re
from collections import Counter
from collections import defaultdict
from dateutil.relativedelta import relativedelta
from threading import Thread
from time import time
import requests
from enum import Enum
from lxml import etree
from sqlalchemy import orm, sql
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm.attributes import flag_modified
import endpoint
import pmh_record
import oa_evidence
import oa_local
import oa_manual
import oa_page
import page
from abstract import Abstract
from app import db
from app import logger
from http_cache import get_session_id
from journal import Journal
from oa_pmc import query_pmc
from open_location import OpenLocation, validate_pdf_urls, OAStatus, oa_status_sort_key
from pdf_url import PdfUrl
from pmh_record import PmhRecord
from pmh_record import title_is_too_common
from pmh_record import title_is_too_short
from reported_noncompliant_copies import reported_noncompliant_url_fragments
from util import NoDoiException
from util import clean_doi, is_pmc, clamp
from util import elapsed
from util import normalize
from util import normalize_title
from util import safe_commit
from webpage import PublisherWebpage
def build_new_pub(doi, crossref_api):
my_pub = Pub(id=doi, crossref_api_raw_new=crossref_api)
my_pub.title = my_pub.crossref_title
my_pub.normalized_title = normalize_title(my_pub.title)
return my_pub
def add_new_pubs(pubs_to_commit):
if not pubs_to_commit:
return []
pubs_indexed_by_id = dict((my_pub.id, my_pub) for my_pub in pubs_to_commit)
ids_already_in_db = [
id_tuple[0] for id_tuple in db.session.query(Pub.id).filter(Pub.id.in_(pubs_indexed_by_id.keys())).all()
]
pubs_to_add_to_db = []
for (pub_id, my_pub) in pubs_indexed_by_id.iteritems():
if pub_id in ids_already_in_db:
# merge if we need to
pass
else:
pubs_to_add_to_db.append(my_pub)
# logger.info(u"adding new pub {}".format(my_pub.id))
if pubs_to_add_to_db:
logger.info(u"adding {} pubs".format(len(pubs_to_add_to_db)))
db.session.add_all(pubs_to_add_to_db)
safe_commit(db)
return pubs_to_add_to_db
def call_targets_in_parallel(targets):
if not targets:
return
# logger.info(u"calling", targets)
threads = []
for target in targets:
process = Thread(target=target, args=[])
process.start()
threads.append(process)
for process in threads:
try:
process.join(timeout=60*10)
except (KeyboardInterrupt, SystemExit):
pass
except Exception as e:
logger.exception(u"thread Exception {} in call_targets_in_parallel. continuing.".format(e))
# logger.info(u"finished the calls to {}".format(targets))
def call_args_in_parallel(target, args_list):
# logger.info(u"calling", targets)
threads = []
for args in args_list:
process = Thread(target=target, args=args)
process.start()
threads.append(process)
for process in threads:
try:
process.join(timeout=60*10)
except (KeyboardInterrupt, SystemExit):
pass
except Exception as e:
logger.exception(u"thread Exception {} in call_args_in_parallel. continuing.".format(e))
# logger.info(u"finished the calls to {}".format(targets))
def lookup_product_by_doi(doi):
biblio = {"doi": doi}
return lookup_product(**biblio)
def lookup_product(**biblio):
my_pub = None
if "doi" in biblio and biblio["doi"]:
doi = clean_doi(biblio["doi"])
my_pub = Pub.query.get(doi)
if my_pub:
# logger.info(u"found {} in pub db table!".format(my_pub.id))
my_pub.reset_vars()
else:
raise NoDoiException
# my_pub = Crossref(**biblio)
# logger.info(u"didn't find {} in crossref db table".format(my_pub))
return my_pub
def refresh_pub(my_pub, do_commit=False):
my_pub.run_with_hybrid()
db.session.merge(my_pub)
if do_commit:
safe_commit(db)
return my_pub
def thread_result_wrapper(func, args, res):
res.append(func(*args))
# get rid of this when we get rid of POST endpoint
# for now, simplify it so it just calls the single endpoint
def get_pubs_from_biblio(biblios, run_with_hybrid=False):
returned_pubs = []
for biblio in biblios:
returned_pubs.append(get_pub_from_biblio(biblio, run_with_hybrid=run_with_hybrid))
return returned_pubs
def get_pub_from_biblio(biblio, run_with_hybrid=False, skip_all_hybrid=False):
my_pub = lookup_product(**biblio)
if run_with_hybrid:
my_pub.run_with_hybrid()
safe_commit(db)
else:
my_pub.recalculate()
return my_pub
def max_pages_from_one_repo(endpoint_ids):
endpoint_id_counter = Counter(endpoint_ids)
most_common = endpoint_id_counter.most_common(1)
if most_common:
return most_common[0][1]
return None
def get_citeproc_date(year=0, month=1, day=1):
try:
return datetime.date(year, month, day)
except ValueError:
return None
def csv_dict_from_response_dict(data):
if not data:
return None
response = defaultdict(str)
response["doi"] = data.get("doi", None)
response["doi_url"] = data.get("doi_url", None)
response["is_oa"] = data.get("is_oa", None)
response["oa_status"] = data.get("oa_status", None)
response["genre"] = data.get("genre", None)
response["is_paratext"] = data.get("is_paratext", None)
response["journal_name"] = data.get("journal_name", None)
response["journal_issns"] = data.get("journal_issns", None)
response["journal_issn_l"] = data.get("journal_issn_l", None)
response["journal_is_oa"] = data.get("journal_is_oa", None)
response["publisher"] = data.get("publisher", None)
response["published_date"] = data.get("published_date", None)
response["data_standard"] = data.get("data_standard", None)
best_location_data = data.get("best_oa_location", None)
if not best_location_data:
best_location_data = defaultdict(str)
response["best_oa_url"] = best_location_data.get("url", "")
response["best_oa_url_is_pdf"] = best_location_data.get("url_for_pdf", "") != ""
response["best_oa_evidence"] = best_location_data.get("evidence", None)
response["best_oa_host"] = best_location_data.get("host_type", None)
response["best_oa_version"] = best_location_data.get("version", None)
response["best_oa_license"] = best_location_data.get("license", None)
return response
def build_crossref_record(data):
if not data:
return None
record = {}
simple_fields = [
"publisher",
"subject",
"link",
"license",
"funder",
"type",
"update-to",
"clinical-trial-number",
"ISSN", # needs to be uppercase
"ISBN", # needs to be uppercase
"alternative-id"
]
for field in simple_fields:
if field in data:
record[field.lower()] = data[field]
if "title" in data:
if isinstance(data["title"], basestring):
record["title"] = data["title"]
else:
if data["title"]:
record["title"] = data["title"][0] # first one
if "title" in record and record["title"]:
record["title"] = re.sub(u"\s+", u" ", record["title"])
if "container-title" in data:
record["all_journals"] = data["container-title"]
if isinstance(data["container-title"], basestring):
record["journal"] = data["container-title"]
else:
if data["container-title"]:
record["journal"] = data["container-title"][-1] # last one
# get rid of leading and trailing newlines
if record.get("journal", None):
record["journal"] = record["journal"].strip()
if "author" in data:
# record["authors_json"] = json.dumps(data["author"])
record["all_authors"] = data["author"]
if data["author"]:
first_author = data["author"][0]
if first_author and u"family" in first_author:
record["first_author_lastname"] = first_author["family"]
for author in record["all_authors"]:
if author and "affiliation" in author and not author.get("affiliation", None):
del author["affiliation"]
if "issued" in data:
# record["issued_raw"] = data["issued"]
try:
if "raw" in data["issued"]:
record["year"] = int(data["issued"]["raw"])
elif "date-parts" in data["issued"]:
record["year"] = int(data["issued"]["date-parts"][0][0])
date_parts = data["issued"]["date-parts"][0]
pubdate = get_citeproc_date(*date_parts)
if pubdate:
record["pubdate"] = pubdate.isoformat()
except (IndexError, TypeError):
pass
if "deposited" in data:
try:
record["deposited"] = data["deposited"]["date-time"]
except (IndexError, TypeError):
pass
record["added_timestamp"] = datetime.datetime.utcnow().isoformat()
return record
class PmcidPublishedVersionLookup(db.Model):
pmcid = db.Column(db.Text, db.ForeignKey('pmcid_lookup.pmcid'), primary_key=True)
class PmcidLookup(db.Model):
doi = db.Column(db.Text, db.ForeignKey('pub.id'), primary_key=True)
pmcid = db.Column(db.Text)
release_date = db.Column(db.Text)
pmcid_pubished_version_link = db.relationship(
'PmcidPublishedVersionLookup',
lazy='subquery',
viewonly=True,
cascade="all, delete-orphan",
backref=db.backref("pmcid_lookup", lazy="subquery"),
foreign_keys="PmcidPublishedVersionLookup.pmcid"
)
@property
def version(self):
if self.pmcid_pubished_version_link:
return "publishedVersion"
return "acceptedVersion"
class IssnlLookup(db.Model):
__tablename__ = 'issn_to_issnl'
issn = db.Column(db.Text, primary_key=True)
issn_l = db.Column(db.Text)
class JournalOaStartYear(db.Model):
__tablename__ = 'journal_oa_start_year_patched'
issn_l = db.Column(db.Text, primary_key=True)
title = db.Column(db.Text)
oa_year = db.Column(db.Integer)
class S2Lookup(db.Model):
__tablename__ = 'semantic_scholar'
doi = db.Column(db.Text, primary_key=True)
s2_url = db.Column(db.Text)
s2_pdf_url = db.Column(db.Text)
class GreenScrapeAction(Enum):
scrape_now = 1
queue = 2
none = 3
class Pub(db.Model):
id = db.Column(db.Text, primary_key=True)
updated = db.Column(db.DateTime)
crossref_api_raw_new = db.Column(JSONB)
published_date = db.Column(db.DateTime)
title = db.Column(db.Text)
normalized_title = db.Column(db.Text)
issns_jsonb = db.Column(JSONB)
last_changed_date = db.Column(db.DateTime)
response_jsonb = db.Column(JSONB)
response_is_oa = db.Column(db.Boolean)
response_best_evidence = db.Column(db.Text)
response_best_url = db.Column(db.Text)
response_best_host = db.Column(db.Text)
response_best_repo_id = db.Column(db.Text)
response_best_version = db.Column(db.Text)
scrape_updated = db.Column(db.DateTime)
scrape_evidence = db.Column(db.Text)
scrape_pdf_url = db.Column(db.Text)
scrape_metadata_url = db.Column(db.Text)
scrape_license = db.Column(db.Text)
error = db.Column(db.Text)
rand = db.Column(db.Numeric)
# abstracts = db.relationship(
# 'Abstract',
# lazy='subquery',
# viewonly=True,
# cascade="all, delete-orphan",
# backref=db.backref("pub", lazy="subquery"),
# foreign_keys="Abstract.doi"
# )
pmcid_links = db.relationship(
'PmcidLookup',
lazy='subquery',
viewonly=True,
cascade="all, delete-orphan",
backref=db.backref("pub", lazy="subquery"),
foreign_keys="PmcidLookup.doi"
)
page_matches_by_doi = db.relationship(
'Page',
lazy='subquery',
cascade="all, delete-orphan",
viewonly=True,
enable_typechecks=False,
backref=db.backref("pub_by_doi", lazy="subquery"),
foreign_keys="Page.doi"
)
page_new_matches_by_doi = db.relationship(
'PageDoiMatch',
lazy='subquery',
cascade="",
viewonly=True,
enable_typechecks=False,
backref=db.backref("pub", lazy="subquery"),
foreign_keys="PageDoiMatch.doi"
)
page_new_matches_by_title = db.relationship(
'PageTitleMatch',
lazy='subquery',
cascade="",
viewonly=True,
enable_typechecks=False,
backref=db.backref("pub", lazy="subquery"),
foreign_keys="PageTitleMatch.normalized_title"
)
def __init__(self, **biblio):
self.reset_vars()
self.rand = random.random()
self.license = None
self.free_metadata_url = None
self.free_pdf_url = None
self.oa_status = None
self.evidence = None
self.open_locations = []
self.closed_urls = []
self.session_id = None
self.version = None
self.issn_l = None
# self.updated = datetime.datetime.utcnow()
for (k, v) in biblio.iteritems():
self.__setattr__(k, v)
@orm.reconstructor
def init_on_load(self):
self.reset_vars()
def reset_vars(self):
if self.id and self.id.startswith("10."):
self.id = clean_doi(self.id)
self.license = None
self.free_metadata_url = None
self.free_pdf_url = None
self.oa_status = None
self.evidence = None
self.open_locations = []
self.closed_urls = []
self.session_id = None
self.version = None
self.issn_l = self.lookup_issn_l()
@property
def doi(self):
return self.id
@property
def unpaywall_api_url(self):
return u"https://api.unpaywall.org/v2/{}?email=internal@impactstory.org".format(self.id)
@property
def tdm_api(self):
return None
@property
def crossref_api_raw(self):
record = None
try:
if self.crossref_api_raw_new:
return self.crossref_api_raw_new
except IndexError:
pass
return record
@property
def crossref_api_modified(self):
record = None
if self.crossref_api_raw_new:
try:
return build_crossref_record(self.crossref_api_raw_new)
except IndexError:
pass
if self.crossref_api_raw:
try:
record = build_crossref_record(self.crossref_api_raw)
print "got record"
return record
except IndexError:
pass
return record
@property
def open_urls(self):
# return sorted urls, without dups
urls = []
for location in self.sorted_locations:
if location.best_url not in urls:
urls.append(location.best_url)
return urls
@property
def url(self):
return u"https://doi.org/{}".format(self.id)
@property
def is_oa(self):
return bool(self.fulltext_url)
@property
def is_paratext(self):
paratext_exprs = [
ur'^Author Index$'
ur'^Back Cover',
ur'^Contents$',
ur'^Contents:',
ur'^Cover Image',
ur'^Cover Picture',
ur'^Editorial Board',
ur'^Front Cover',
ur'^Frontispiece',
ur'^Inside Back Cover',
ur'^Inside Cover',
ur'^Inside Front Cover',
ur'^Issue Information',
ur'^List of contents',
ur'^Masthead',
ur'^Title page',
]
for expr in paratext_exprs:
if self.title and re.search(expr, self.title, re.IGNORECASE):
return True
return False
def recalculate(self, quiet=False):
self.clear_locations()
if self.publisher == "CrossRef Test Account":
self.error += "CrossRef Test Account"
raise NoDoiException
self.find_open_locations()
self.decide_if_open()
self.set_license_hacks()
if self.is_oa and not quiet:
logger.info(u"**REFRESH found a fulltext_url for {}! {}: {} **".format(
self.id, self.oa_status.value, self.fulltext_url))
def refresh_crossref(self):
from put_crossref_in_db import get_api_for_one_doi
self.crossref_api_raw_new = get_api_for_one_doi(self.doi)
def refresh_including_crossref(self):
self.refresh_crossref()
return self.refresh()
def refresh(self, session_id=None):
self.session_id = session_id or get_session_id()
# self.refresh_green_locations()
self.refresh_hybrid_scrape()
# and then recalcualte everything, so can do to_dict() after this and it all works
self.update()
# then do this so the recalculated stuff saves
# it's ok if this takes a long time... is a short time compared to refresh_hybrid_scrape
db.session.merge(self)
def set_results(self):
self.issns_jsonb = self.issns
self.response_jsonb = self.to_dict_v2()
self.response_is_oa = self.is_oa
self.response_best_url = self.best_url
self.response_best_evidence = self.best_evidence
self.response_best_version = self.best_version
self.response_best_host = self.best_host
self.response_best_repo_id = self.best_repo_id
def clear_results(self):
self.response_jsonb = None
self.response_is_oa = None
self.response_best_url = None
self.response_best_evidence = None
self.response_best_version = None
self.response_best_host = None
self.response_best_repo_id = None
self.error = ""
self.issns_jsonb = None
@staticmethod
def ignored_keys_for_internal_diff():
# remove these keys from comparison because their contents are volatile or we don't care about them
return ["updated", "last_changed_date", "x_reported_noncompliant_copies", "x_error", "data_standard"]
@staticmethod
def ignored_keys_for_external_diff():
# remove these keys because they have been added to the api response but we don't want to trigger a diff
return Pub.ignored_keys_for_internal_diff() + ["issn_l", "journal_issn_l", "has_repository_copy", "is_paratext"]
@staticmethod
def ignored_top_level_keys_for_external_diff():
# existing ignored key regex method doesn't work for multiline keys
# but don't want to replace it yet because it works on nested rows
return ["z_authors"]
@staticmethod
def remove_response_keys(jsonb_response, keys):
response_copy = json.loads(json.dumps(jsonb_response))
for key in keys:
try:
del response_copy[key]
except KeyError:
pass
return response_copy
def has_changed(self, old_response_jsonb, ignored_keys, ignored_top_level_keys):
if not old_response_jsonb:
logger.info(u"response for {} has changed: no old response".format(self.id))
return True
copy_of_new_response = Pub.remove_response_keys(self.response_jsonb, ignored_top_level_keys)
copy_of_old_response = Pub.remove_response_keys(old_response_jsonb, ignored_top_level_keys)
# have to sort to compare
copy_of_new_response_in_json = json.dumps(copy_of_new_response, sort_keys=True, indent=2)
# have to sort to compare
copy_of_old_response_in_json = json.dumps(copy_of_old_response, sort_keys=True, indent=2)
for key in ignored_keys:
# remove it
copy_of_new_response_in_json = re.sub(ur'"{}":\s*".+?",?\s*'.format(key), '', copy_of_new_response_in_json)
copy_of_old_response_in_json = re.sub(ur'"{}":\s*".+?",?\s*'.format(key), '', copy_of_old_response_in_json)
# also remove it if it is an empty list
copy_of_new_response_in_json = re.sub(ur'"{}":\s*\[\],?\s*'.format(key), '', copy_of_new_response_in_json)
copy_of_old_response_in_json = re.sub(ur'"{}":\s*\[\],?\s*'.format(key), '', copy_of_old_response_in_json)
# also anything till a comma (gets data_standard)
copy_of_new_response_in_json = re.sub(ur'"{}":\s*.+?,\s*'.format(key), '', copy_of_new_response_in_json)
copy_of_old_response_in_json = re.sub(ur'"{}":\s*.+?,\s*'.format(key), '', copy_of_old_response_in_json)
return copy_of_new_response_in_json != copy_of_old_response_in_json
def update(self):
return self.recalculate_and_store()
def recalculate_and_store(self):
if not self.crossref_api_raw_new:
self.crossref_api_raw_new = self.crossref_api_raw
if not self.title:
self.title = self.crossref_title
self.normalized_title = normalize_title(self.title)
if not self.published_date:
self.published_date = self.issued
if not self.rand:
self.rand = random.random()
old_response_jsonb = self.response_jsonb
self.clear_results()
try:
self.recalculate()
except NoDoiException:
logger.info(u"invalid doi {}".format(self))
self.error += "Invalid DOI"
pass
self.set_results()
self.mint_pages()
self.scrape_green_locations(GreenScrapeAction.queue)
self.store_pdf_urls_for_validation()
self.store_refresh_priority()
if self.has_changed(old_response_jsonb, Pub.ignored_keys_for_external_diff(), Pub.ignored_top_level_keys_for_external_diff()):
logger.info(u"changed! updating last_changed_date for this record! {}".format(self.id))
self.last_changed_date = datetime.datetime.utcnow().isoformat()
if self.has_changed(old_response_jsonb, Pub.ignored_keys_for_internal_diff(), []):
logger.info(u"changed! updating updated timestamp for this record! {}".format(self.id))
self.updated = datetime.datetime.utcnow()
flag_modified(self, "response_jsonb") # force it to be saved
# after recalculate, so can know if is open
# self.set_abstracts()
def run(self):
try:
self.recalculate_and_store()
except NoDoiException:
logger.info(u"invalid doi {}".format(self))
self.error += "Invalid DOI"
pass
# logger.info(json.dumps(self.response_jsonb, indent=4))
def run_with_hybrid(self, quiet=False, shortcut_data=None):
logger.info(u"in run_with_hybrid")
self.clear_results()
try:
self.refresh()
except NoDoiException:
logger.info(u"invalid doi {}".format(self))
self.error += "Invalid DOI"
pass
# set whether changed or not
self.set_results()
@property
def has_been_run(self):
if self.evidence:
return True
return False
@property
def best_redirect_url(self):
return self.fulltext_url or self.url
@property
def has_fulltext_url(self):
return self.fulltext_url is not None
@property
def has_license(self):
if not self.license:
return False
if self.license == "unknown":
return False
return True
@property
def clean_doi(self):
if not self.id:
return None
return clean_doi(self.id)
def ask_manual_overrides(self):
if not self.doi:
return
override_dict = oa_manual.get_override_dict(self)
if override_dict is not None:
logger.info(u"manual override for {}".format(self.doi))
self.open_locations = []
if override_dict:
my_location = OpenLocation()
my_location.pdf_url = None
my_location.metadata_url = None
my_location.license = None
my_location.version = None
my_location.evidence = "manual"
my_location.doi = self.doi
# set just what the override dict specifies
for (k, v) in override_dict.iteritems():
setattr(my_location, k, v)
# don't append, make it the only one
self.open_locations.append(my_location)
@property
def fulltext_url(self):
return self.free_pdf_url or self.free_metadata_url or None
def decide_if_open(self):
# look through the locations here
# overwrites, hence the sorting
self.license = None
self.free_metadata_url = None
self.free_pdf_url = None
self.oa_status = OAStatus.closed
self.version = None
self.evidence = None
reversed_sorted_locations = self.sorted_locations
reversed_sorted_locations.reverse()
# go through all the locations, using valid ones to update the best open url data
for location in reversed_sorted_locations:
self.free_pdf_url = location.pdf_url
self.free_metadata_url = location.metadata_url
self.evidence = location.evidence
self.version = location.version
self.license = location.license
if reversed_sorted_locations:
self.oa_status = sorted(reversed_sorted_locations, key=oa_status_sort_key)[-1].oa_status
# don't return an open license on a closed thing, that's confusing
if not self.fulltext_url:
self.license = None
self.evidence = None
self.oa_status = OAStatus.closed
self.version = None
def clear_locations(self):
self.reset_vars()
@property
def has_hybrid(self):
return any([location.oa_status is OAStatus.hybrid for location in self.all_oa_locations])
@property
def has_gold(self):
return any([location.oa_status is OAStatus.gold for location in self.all_oa_locations])
@property
def has_green(self):
return any([location.oa_status is OAStatus.green for location in self.all_oa_locations])
def refresh_green_locations(self):
for my_page in self.pages:
my_page.scrape()
def refresh_hybrid_scrape(self):
logger.info(u"***** {}: {}".format(self.publisher, self.journal))
# look for hybrid
self.scrape_updated = datetime.datetime.utcnow()
# reset
self.scrape_evidence = None
self.scrape_pdf_url = None
self.scrape_metadata_url = None
self.scrape_license = None
if self.url:
with PublisherWebpage(url=self.url,
related_pub_doi=self.doi,
related_pub_publisher=self.publisher,
session_id=self.session_id) as publisher_landing_page:
# end the session before the scrape
# logger.info(u"closing session for {}".format(self.doi))
db.session.close()
self.scrape_page_for_open_location(publisher_landing_page)
# now merge our object back in
# logger.info(u"after scrape, merging {}".format(self.doi))
db.session.merge(self)
if publisher_landing_page.is_open:
self.scrape_evidence = publisher_landing_page.open_version_source_string
self.scrape_pdf_url = publisher_landing_page.scraped_pdf_url
self.scrape_metadata_url = publisher_landing_page.scraped_open_metadata_url
self.scrape_license = publisher_landing_page.scraped_license
if (publisher_landing_page.is_open
and not publisher_landing_page.scraped_pdf_url
and not publisher_landing_page.use_resolved_landing_url(publisher_landing_page.scraped_open_metadata_url)
):
self.scrape_metadata_url = self.url
# Academic Medicine, delayed OA
if self.issn_l == '1040-2446' and self.issued < datetime.datetime.utcnow().date() - relativedelta(months=14):
if not self.scrape_metadata_url:
self.scrape_evidence = 'open (via free article)'
self.scrape_metadata_url = publisher_landing_page.resolved_url
logger.info('making {} bronze due to delayed OA policy'.format(self.doi))
# Genome Research, delayed OA
if self.issn_l == '1088-9051' and (self.issued < datetime.datetime.utcnow().date() - relativedelta(months=7) or self.scrape_pdf_url):
logger.info('making {} hybrid due to delayed OA policy'.format(self.doi))
self.scrape_evidence = 'open (via page says license)'
self.scrape_metadata_url = self.url
self.scrape_license = 'cc-by-nc'
return
def find_open_locations(self):
# just based on doi
self.ask_local_lookup()
self.ask_pmc()
# based on titles
self.set_title_hacks() # has to be before ask_green_locations, because changes titles
self.ask_green_locations()
self.ask_publisher_equivalent_pages()
self.ask_hybrid_scrape()
self.ask_s2()
self.ask_manual_overrides()
def ask_local_lookup(self):
evidence = None
fulltext_url = self.url
license = None
pdf_url = None
version = "publishedVersion" # default
if oa_local.is_open_via_doaj(self.issns, self.all_journals, self.year):
license = oa_local.is_open_via_doaj(self.issns, self.all_journals, self.year)
evidence = oa_evidence.oa_journal_doaj
elif oa_local.is_open_via_publisher(self.publisher):
evidence = oa_evidence.oa_journal_publisher
elif self.is_open_journal_via_observed_oa_rate():
evidence = oa_evidence.oa_journal_observed
elif oa_local.is_open_via_manual_journal_setting(self.issns, self.year):
evidence = oa_evidence.oa_journal_manual
elif oa_local.is_open_via_doi_fragment(self.doi):
evidence = "oa repository (via doi prefix)"
elif oa_local.is_open_via_url_fragment(self.url):
evidence = "oa repository (via url prefix)"
elif oa_local.is_open_via_license_urls(self.crossref_license_urls, self.issns):
freetext_license = oa_local.is_open_via_license_urls(self.crossref_license_urls, self.issns)
license = oa_local.find_normalized_license(freetext_license)
evidence = "open (via crossref license)"
elif self.open_manuscript_license_urls:
has_open_manuscript = True
freetext_license = self.open_manuscript_license_urls[0]
license = oa_local.find_normalized_license(freetext_license)
if freetext_license and not license:
license = "publisher-specific, author manuscript: {}".format(freetext_license)
version = "acceptedVersion"
if self.is_same_publisher("Elsevier BV"):
elsevier_id = self.crossref_alternative_id
pdf_url = u"https://manuscript.elsevier.com/{}/pdf/{}.pdf".format(elsevier_id, elsevier_id)
elif self.is_same_publisher("American Physical Society (APS)"):
proper_case_id = self.id
proper_case_id = proper_case_id.replace("revmodphys", "RevModPhys")
proper_case_id = proper_case_id.replace("physrevlett", "PhysRevLett")
proper_case_id = proper_case_id.replace("physreva", "PhysRevA")
proper_case_id = proper_case_id.replace("physrevb", "PhysRevB")
proper_case_id = proper_case_id.replace("physrevc", "PhysRevC")
proper_case_id = proper_case_id.replace("physrevd", "PhysRevD")
proper_case_id = proper_case_id.replace("physreve", "PhysRevE")
proper_case_id = proper_case_id.replace("physrevx", "PhysRevX")
proper_case_id = proper_case_id.replace("physrevaccelbeams", "PhysRevAccelBeams")
proper_case_id = proper_case_id.replace("physrevapplied", "PhysRevApplied")
proper_case_id = proper_case_id.replace("physrevphyseducres", "PhysRevPhysEducRes")
proper_case_id = proper_case_id.replace("physrevstper", "PhysRevSTPER")
if proper_case_id != self.id:
pdf_url = u"https://link.aps.org/accepted/{}".format(proper_case_id)
elif self.is_same_publisher("AIP Publishing"):
pdf_url = "https://aip.scitation.org/doi/{}".format(self.id)
elif self.is_same_publisher("IOP Publishing"):
has_open_manuscript = False
elif self.is_same_publisher("Wiley-Blackwell"):
has_open_manuscript = False
elif self.is_same_publisher("Wiley"):
pdf_url = u'https://rss.onlinelibrary.wiley.com/doi/am-pdf/{}'.format(self.doi)
elif self.is_same_publisher("Royal Society of Chemistry (RSC)"):
has_open_manuscript = False
elif self.is_same_publisher("Oxford University Press (OUP)"):
has_open_manuscript = False
# just bail for now. is too hard to figure out which ones are real.
# # IOP isn't trustworthy, and made a fuss, so check them.
# # this includes /ampdf: http://iopscience.iop.org/article/10.1088/0029-5515/55/8/083011
# # this does not: http://iopscience.iop.org/article/10.1088/1741-2552/aad46e
#
# logger.info(u"doing live check on IOP author manuscript")
# r = requests.get("http://iopscience.iop.org/article/{}".format(self.id))
# if "/ampdf" in r.content:
# logger.info(u"is iop open manuscript!")
# pdf_url = "http://iopscience.iop.org/article/{}/ampdf".format(self.id)
# else:
# logger.info(u"is NOT iop open manuscript")
# has_open_manuscript = False
elif freetext_license == u'https://academic.oup.com/journals/pages/open_access/funder_policies/chorus/standard_publication_model':
# license says available after 12 months
if not (self.issued and self.issued < datetime.datetime.utcnow().date() - relativedelta(months=13)):
has_open_manuscript = False
if has_open_manuscript:
evidence = "open (via crossref license, author manuscript)"
if evidence:
my_location = OpenLocation()
my_location.metadata_url = fulltext_url
my_location.license = license
my_location.evidence = evidence
my_location.updated = datetime.datetime.utcnow()
my_location.doi = self.doi
my_location.version = version
if pdf_url:
my_location.pdf_url = pdf_url
self.open_locations.append(my_location)
def ask_pmc(self):
for pmc_obj in self.pmcid_links:
if pmc_obj.release_date == "live":
my_location = OpenLocation()
my_location.metadata_url = "https://www.ncbi.nlm.nih.gov/pmc/articles/{}".format(pmc_obj.pmcid.upper())
# we don't know this has a pdf version
# my_location.pdf_url = "https://www.ncbi.nlm.nih.gov/pmc/articles/{}/pdf".format(pmc_obj.pmcid.upper())
my_location.evidence = "oa repository (via pmcid lookup)"
my_location.updated = datetime.datetime.utcnow()
my_location.doi = self.doi
my_location.version = pmc_obj.version
# set version in one central place for pmc right now, till refactor done
self.open_locations.append(my_location)
@property
def has_stored_hybrid_scrape(self):
return self.scrape_evidence and self.scrape_evidence != "closed"
def ask_hybrid_scrape(self):
if self.has_stored_hybrid_scrape:
my_location = OpenLocation()
my_location.pdf_url = self.scrape_pdf_url
my_location.metadata_url = self.scrape_metadata_url
my_location.license = self.scrape_license
my_location.evidence = self.scrape_evidence
my_location.updated = self.scrape_updated and self.scrape_updated.isoformat()
my_location.doi = self.doi
my_location.version = "publishedVersion"
if not self.issns and self.genre == 'posted-content':
# this is from a preprint server or similar
# treat the publisher site like a repository
my_location.evidence = re.sub(r'.*?(?= \(|$)', 'oa repository', my_location.evidence, 1)
self.open_locations.append(my_location)
@property
def page_matches_by_doi_filtered(self):
return self.page_matches_by_doi + self.page_new_matches_by_doi
@property
def page_matches_by_title_filtered(self):
my_pages = []
if not self.normalized_title:
return my_pages
for my_page in self.page_new_matches_by_title:
# don't do this right now. not sure if it helps or hurts.
# don't check title match if we already know it belongs to a different doi
# if my_page.doi and my_page.doi != self.doi:
# continue
# double check author match
match_type = "title"
if self.first_author_lastname or self.last_author_lastname:
if my_page.authors:
try:
pmh_author_string = normalize(u", ".join(my_page.authors))
if self.first_author_lastname and normalize(self.first_author_lastname) in pmh_author_string:
match_type = "title and first author"
elif self.last_author_lastname and normalize(self.last_author_lastname) in pmh_author_string:
match_type = "title and last author"
else:
# logger.info(
# u"author check fails, so skipping this record. Looked for {} and {} in {}".format(
# self.first_author_lastname, self.last_author_lastname, pmh_author_string))
# logger.info(self.authors)
# don't match if bad author match
continue
except TypeError:
pass # couldn't make author string
my_page.match_evidence = u"oa repository (via OAI-PMH {} match)".format(match_type)
my_pages.append(my_page)
return my_pages
@property
def pages(self):
my_pages = []
# @todo remove these checks once we are just using the new page
if self.normalized_title:
if title_is_too_short(self.normalized_title):
# logger.info(u"title too short! don't match by title")
pass
elif title_is_too_common(self.normalized_title):
# logger.info(u"title too common! don't match by title.")
pass
elif self.id and u'/(issn)' in self.id.lower():
pass
else:
my_pages = self.page_matches_by_title_filtered
# do dois last, because the objects are actually the same, not copies, and then they get the doi reason
for my_page in self.page_matches_by_doi_filtered:
my_page.match_evidence = u"oa repository (via OAI-PMH doi match)"
if not my_page.scrape_version and u"/pmc/" in my_page.url:
my_page.set_info_for_pmc_page()
my_pages.append(my_page)
# eventually only apply this filter to matches by title, once pages only includes
# the doi when it comes straight from the pmh record
if max_pages_from_one_repo([p.endpoint_id for p in self.page_matches_by_title_filtered]) >= 10:
my_pages = []
logger.info(u"matched too many pages in one repo, not allowing matches")
return [
p for p in my_pages
# don't match bioRxiv preprints to themselves
if not (p.doi == self.doi and p.endpoint_id == oa_page.biorxiv_endpoint_id)
]
def ask_green_locations(self):
has_new_green_locations = False
for my_page in [p for p in self.pages if p.pmh_id != oa_page.publisher_equivalent_pmh_id]:
# this step isn't scraping, is just looking in db
# recalculate the version and license based on local PMH metadata in case code changes find more things
if hasattr(my_page, "scrape_version") and my_page.scrape_version is not None:
my_page.update_with_local_info()
if my_page.is_open:
new_open_location = OpenLocation()
new_open_location.pdf_url = my_page.scrape_pdf_url
new_open_location.metadata_url = my_page.scrape_metadata_url
new_open_location.license = my_page.scrape_license
new_open_location.evidence = my_page.match_evidence
new_open_location.version = my_page.scrape_version
new_open_location.updated = my_page.scrape_updated
new_open_location.doi = self.doi
new_open_location.pmh_id = my_page.bare_pmh_id
new_open_location.endpoint_id = my_page.endpoint_id
new_open_location.institution = my_page.repository_display_name
self.open_locations.append(new_open_location)
has_new_green_locations = True
return has_new_green_locations
def ask_publisher_equivalent_pages(self):
has_new_green_locations = False
for my_page in [p for p in self.pages if p.pmh_id == oa_page.publisher_equivalent_pmh_id]:
if my_page.is_open:
new_open_location = OpenLocation()
new_open_location.pdf_url = my_page.scrape_pdf_url
new_open_location.metadata_url = my_page.scrape_metadata_url
new_open_location.license = my_page.scrape_license
new_open_location.evidence = my_page.scrape_version
new_open_location.version = 'publishedVersion'
new_open_location.updated = my_page.scrape_updated
new_open_location.doi = my_page.doi
new_open_location.pmh_id = None
new_open_location.endpoint_id = None
self.open_locations.append(new_open_location)
has_new_green_locations = True
return has_new_green_locations
def ask_s2(self):
lookup = db.session.query(S2Lookup).get(self.doi)
if lookup:
location = OpenLocation()
location.pdf_url = lookup.s2_pdf_url
location.metadata_url = lookup.s2_url
location.evidence = 'oa repository (semantic scholar lookup)'
location.updated = datetime.datetime(2019, 10, 1)
location.doi = self.doi
location.version = 'submittedVersion'
self.open_locations.append(location)
def scrape_green_locations(self, green_scrape=GreenScrapeAction.queue):
for my_page in self.pages:
if isinstance(my_page, page.PageNew):
if green_scrape is GreenScrapeAction.scrape_now:
my_page.scrape_if_matches_pub()
elif green_scrape is GreenScrapeAction.queue:
my_page.enqueue_scrape_if_matches_pub()
# comment out for now so that not scraping by accident
# def scrape_these_pages(self, webpages):
# webpage_arg_list = [[page] for page in webpages]
# call_args_in_parallel(self.scrape_page_for_open_location, webpage_arg_list)
def scrape_page_for_open_location(self, my_webpage):
# logger.info(u"scraping", url)
try:
find_pdf_link = self.should_look_for_publisher_pdf()
if not find_pdf_link:
logger.info('skipping pdf search')
my_webpage.scrape_for_fulltext_link(find_pdf_link=find_pdf_link)
if my_webpage.error:
self.error += my_webpage.error
if my_webpage.is_open:
my_open_location = my_webpage.mint_open_location()
self.open_locations.append(my_open_location)
# logger.info(u"found open version at", webpage.url)
else:
# logger.info(u"didn't find open version at", webpage.url)
pass
except requests.Timeout, e:
self.error += "Timeout in scrape_page_for_open_location on {}: {}".format(
my_webpage, unicode(e.message).encode("utf-8"))
logger.info(self.error)
except requests.exceptions.ConnectionError, e:
self.error += "ConnectionError in scrape_page_for_open_location on {}: {}".format(
my_webpage, unicode(e.message).encode("utf-8"))
logger.info(self.error)
except requests.exceptions.ChunkedEncodingError, e:
self.error += "ChunkedEncodingError in scrape_page_for_open_location on {}: {}".format(
my_webpage, unicode(e.message).encode("utf-8"))
logger.info(self.error)
except requests.exceptions.RequestException, e:
self.error += "RequestException in scrape_page_for_open_location on {}: {}".format(
my_webpage, unicode(e.message).encode("utf-8"))
logger.info(self.error)
except etree.XMLSyntaxError, e:
self.error += "XMLSyntaxError in scrape_page_for_open_location on {}: {}".format(
my_webpage, unicode(e.message).encode("utf-8"))
logger.info(self.error)
except Exception:
logger.exception(u"Exception in scrape_page_for_open_location")
self.error += "Exception in scrape_page_for_open_location"
logger.info(self.error)
def should_look_for_publisher_pdf(self):
if self.genre == 'book':
return False
# landing page has pdfs for every article in issue
if self.issn_l in [
'1818-5487', # Aquatic Invasions
'2072-5981', # Magnetic resonance in solids
]:
return False
if self.issn_l == '0007-0610' and self.year <= 1999:
# British Dental Journal, https://www.nature.com/articles/4806453.pdf
return False
return True
def set_title_hacks(self):
workaround_titles = {
# these preprints doesn't have the same title as the doi
# eventually solve these by querying arxiv like this:
# http://export.arxiv.org/api/query?search_query=doi:10.1103/PhysRevD.89.085017
"10.1016/j.astropartphys.2007.12.004": "In situ radioglaciological measurements near Taylor Dome, Antarctica and implications for UHE neutrino astronomy",
"10.1016/s0375-9601(02)01803-0": "Universal quantum computation using only projective measurement, quantum memory, and preparation of the 0 state",
"10.1103/physreva.65.062312": "An entanglement monotone derived from Grover's algorithm",
# crossref has title "aol" for this
# set it to real title
"10.1038/493159a": "Altmetrics: Value all research products",
# crossref has no title for this
"10.1038/23891": "Complete quantum teleportation using nuclear magnetic resonance",
# is a closed-access datacite one, with the open-access version in BASE
# need to set title here because not looking up datacite titles yet (because ususally open access directly)
"10.1515/fabl.1988.29.1.21": u"Thesen zur Verabschiedung des Begriffs der 'historischen Sage'",
# preprint has a different title
"10.1123/iscj.2016-0037": u"METACOGNITION AND PROFESSIONAL JUDGMENT AND DECISION MAKING: IMPORTANCE, APPLICATION AND EVALUATION",
# preprint has a different title
"10.1038/s41477-017-0066-9": u"Low Rate of Somatic Mutations in a Long-Lived Oak Tree",
}
if self.doi in workaround_titles:
self.title = workaround_titles[self.doi]
self.normalized_title = normalize_title(self.title)
def set_license_hacks(self):
if self.fulltext_url and u"harvard.edu/" in self.fulltext_url:
if not self.license or self.license == "unknown":
self.license = "cc-by-nc"
@property
def crossref_alternative_id(self):
try:
return re.sub(ur"\s+", " ", self.crossref_api_raw_new["alternative-id"][0])
except (KeyError, TypeError, AttributeError):
return None
@property
def publisher(self):
try:
return re.sub(u"\s+", " ", self.crossref_api_modified["publisher"])
except (KeyError, TypeError, AttributeError):
return None
@property
def issued(self):
try:
if self.crossref_api_raw_new and "date-parts" in self.crossref_api_raw_new["issued"]:
date_parts = self.crossref_api_raw_new["issued"]["date-parts"][0]
return get_citeproc_date(*date_parts)
except (KeyError, TypeError, AttributeError):
return None
@property
def deposited(self):
try:
if self.crossref_api_raw_new and "date-parts" in self.crossref_api_raw_new["deposited"]:
date_parts = self.crossref_api_raw_new["deposited"]["date-parts"][0]
return get_citeproc_date(*date_parts)
except (KeyError, TypeError, AttributeError):
return None
@property
def open_manuscript_license_urls(self):
try:
license_dicts = self.crossref_api_modified["license"]
author_manuscript_urls = []
# only include licenses that are past the start date
for license_dict in license_dicts:
if license_dict.get("content-version", None):
if license_dict["content-version"] == u"am":
valid_now = True
if license_dict.get("start", None):
if license_dict["start"].get("date-time", None):
license_date = license_dict["start"]["date-time"]
if license_date > (datetime.datetime.utcnow() - self._author_manuscript_delay()).isoformat():
valid_now = False
if valid_now:
author_manuscript_urls.append(license_dict["URL"])
return author_manuscript_urls
except (KeyError, TypeError):
return []
def _author_manuscript_delay(self):
if self.is_same_publisher('Institute of Electrical and Electronics Engineers (IEEE)'):
# policy says 2 years after publication but license date is date of publication
return datetime.timedelta(days=365*2)
else:
return datetime.timedelta()
@property
def crossref_license_urls(self):
try:
license_dicts = self.crossref_api_modified["license"]
license_urls = []
# only include licenses that are past the start date
for license_dict in license_dicts:
if license_dict.get("content-version", None):
if license_dict["content-version"] == u"vor":
valid_now = True
if license_dict.get("start", None):
if license_dict["start"].get("date-time", None):
if license_dict["start"]["date-time"] > datetime.datetime.utcnow().isoformat():
valid_now = False
if valid_now:
license_urls.append(license_dict["URL"])
return license_urls
except (KeyError, TypeError):
return []
@property
def is_subscription_journal(self):
if (
oa_local.is_open_via_doaj(self.issns, self.all_journals, self.year)
or oa_local.is_open_via_doi_fragment(self.doi)
or oa_local.is_open_via_publisher(self.publisher)
or self.is_open_journal_via_observed_oa_rate()
or oa_local.is_open_via_manual_journal_setting(self.issns, self.year)
or oa_local.is_open_via_url_fragment(self.url)
):
return False
return True
@property
def doi_resolver(self):
if not self.doi:
return None
if oa_local.is_open_via_datacite_prefix(self.doi):
return "datacite"
if self.crossref_api_modified and "error" not in self.crossref_api_modified:
return "crossref"
return None
@property
def is_free_to_read(self):
return bool(self.fulltext_url)
@property
def is_boai_license(self):
boai_licenses = ["cc-by", "cc0", "pd"]
if self.license and (self.license in boai_licenses):
return True
return False
@property
def authors(self):
try:
return self.crossref_api_modified["all_authors"]
except (AttributeError, TypeError, KeyError):
return None
@property
def first_author_lastname(self):
try:
return self.crossref_api_modified["first_author_lastname"]
except (AttributeError, TypeError, KeyError):
return None
@property
def last_author_lastname(self):
try:
last_author = self.authors[-1]
return last_author["family"]
except (AttributeError, TypeError, KeyError):
return None
@property
def display_issns(self):
if self.issns:
return ",".join(self.issns)
return None
@property
def issns(self):
issns = []
try:
issns = self.crossref_api_modified["issn"]
except (AttributeError, TypeError, KeyError):
try:
issns = self.crossref_api_modified["issn"]
except (AttributeError, TypeError, KeyError):
if self.tdm_api:
issns = re.findall(u"<issn media_type=.*>(.*)</issn>", self.tdm_api)
if not issns:
return None
else:
return issns
@property
def best_title(self):
if hasattr(self, "title") and self.title:
return re.sub(u"\s+", " ", self.title)
return self.crossref_title
@property
def crossref_title(self):
try:
return re.sub(u"\s+", " ", self.crossref_api_modified["title"])
except (AttributeError, TypeError, KeyError, IndexError):
return None
@property
def year(self):
try:
return self.crossref_api_modified["year"]
except (AttributeError, TypeError, KeyError, IndexError):
return None
@property
def journal(self):
try:
return re.sub(u"\s+", " ", self.crossref_api_modified["journal"])
except (AttributeError, TypeError, KeyError, IndexError):
return None
@property
def all_journals(self):
try:
return self.crossref_api_modified["all_journals"]
except (AttributeError, TypeError, KeyError, IndexError):
return None
@property
def genre(self):
try:
return re.sub(u"\s+", " ", self.crossref_api_modified["type"])
except (AttributeError, TypeError, KeyError):
return None
@property
def abstract_from_crossref(self):
try:
return self.crossref_api_raw_new["abstract"]
except (AttributeError, TypeError, KeyError):
return None
@property
def deduped_sorted_locations(self):
locations = []
for next_location in self.sorted_locations:
urls_so_far = [location.best_url for location in locations]
if next_location.best_url not in urls_so_far:
locations.append(next_location)
return locations
@property
def filtered_locations(self):
locations = self.open_locations
# now remove noncompliant ones
compliant_locations = [location for location in locations if not location.is_reported_noncompliant]
validate_pdf_urls(compliant_locations)
valid_locations = [
x for x in compliant_locations
if x.pdf_url_valid
and not (self.has_bad_doi_url and x.best_url == self.url)
and x.endpoint_id != '01b84da34b861aa938d' # lots of abstracts presented as full text. find a better way to do this.
and x.endpoint_id != '58e562cef9eb07c3c1d' # garbage PDFs in identifier tags
]
return valid_locations
@property
def sorted_locations(self):
locations = self.filtered_locations
# first sort by best_url so ties are handled consistently
locations = sorted(locations, key=lambda x: x.best_url, reverse=False)
# now sort by what's actually better
locations = sorted(locations, key=lambda x: x.sort_score, reverse=False)
return locations
@property
def data_standard(self):
if self.scrape_updated and not self.error:
return 2
else:
return 1
def lookup_issn_l(self):
for issn in self.issns or []:
# use the first issn that matches an issn_l
# can't really do anything if they would match different issn_ls
lookup = db.session.query(IssnlLookup).get(issn)
if lookup:
return lookup.issn_l
return None
def lookup_journal(self):
return self.issn_l and db.session.query(Journal).options(
orm.defer('api_raw_crossref'), orm.defer('api_raw_issn')
).get({'issn_l': self.issn_l})
def get_resolved_url(self):
if hasattr(self, "my_resolved_url_cached"):
return self.my_resolved_url_cached
try:
r = requests.get("http://doi.org/{}".format(self.id),
stream=True,
allow_redirects=True,
timeout=(3,3),
verify=False
)
self.my_resolved_url_cached = r.url
except Exception: #hardly ever do this, but man it seems worth it right here
logger.exception(u"get_resolved_url failed")
self.my_resolved_url_cached = None
return self.my_resolved_url_cached
def __repr__(self):
if self.id:
my_string = self.id
else:
my_string = self.best_title
return u"<Pub ( {} )>".format(my_string)
@property
def reported_noncompliant_copies(self):
return reported_noncompliant_url_fragments(self.doi)
def is_same_publisher(self, publisher):
if self.publisher:
return normalize(self.publisher) == normalize(publisher)
return False
@property
def best_url(self):
if not self.best_oa_location:
return None
return self.best_oa_location.best_url
@property
def best_url_is_pdf(self):
if not self.best_oa_location:
return None
return self.best_oa_location.best_url_is_pdf
@property
def best_evidence(self):
if not self.best_oa_location:
return None
return self.best_oa_location.display_evidence
@property
def best_host(self):
if not self.best_oa_location:
return None
return self.best_oa_location.host_type
@property
def best_repo_id(self):
if self.best_host != 'repository':
return None
return self.best_oa_location.endpoint_id
@property
def best_license(self):
if not self.best_oa_location:
return None
return self.best_oa_location.license
@property
def best_version(self):
if not self.best_oa_location:
return None
return self.best_oa_location.version
@property
def best_oa_location_dict(self):
best_location = self.best_oa_location
if best_location:
return best_location.to_dict_v2()
return None
@property
def best_oa_location(self):
all_locations = [location for location in self.all_oa_locations]
if all_locations:
return all_locations[0]
return None
@property
def all_oa_locations(self):
all_locations = [location for location in self.deduped_sorted_locations]
if all_locations:
for location in all_locations:
location.is_best = False
all_locations[0].is_best = True
return all_locations
def all_oa_location_dicts(self):
return [location.to_dict_v2() for location in self.all_oa_locations]
def to_dict_v1(self):
response = {
"algorithm_version": self.data_standard,
"doi_resolver": self.doi_resolver,
"evidence": self.evidence,
"free_fulltext_url": self.fulltext_url,
"is_boai_license": self.is_boai_license,
"is_free_to_read": self.is_free_to_read,
"is_subscription_journal": self.is_subscription_journal,
"license": self.license,
"oa_color": self.oa_status and self.oa_status.value,
"reported_noncompliant_copies": self.reported_noncompliant_copies
}
for k in ["doi", "title", "url"]:
value = getattr(self, k, None)
if value:
response[k] = value
if self.error:
response["error"] = self.error
return response
@property
def best_location(self):
if not self.deduped_sorted_locations:
return None
return self.deduped_sorted_locations[0]
@property
def is_archived_somewhere(self):
if self.is_oa:
return any([location.oa_status is OAStatus.green for location in self.deduped_sorted_locations])
return None
@property
def oa_is_doaj_journal(self):
if self.is_oa:
if oa_local.is_open_via_doaj(self.issns, self.all_journals, self.year):
return True
else:
return False
return False
@property
def oa_is_open_journal(self):
if self.is_oa:
if self.oa_is_doaj_journal:
return True
if oa_local.is_open_via_publisher(self.publisher):
return True
if oa_local.is_open_via_manual_journal_setting(self.issns, self.year):
return True
if self.is_open_journal_via_observed_oa_rate():
return True
return False
@property
def display_updated(self):
if self.updated:
return self.updated.isoformat()
return None
@property
def has_abstract(self):
if self.abstracts:
return True
return False
@property
def display_abstracts(self):
# self.set_abstracts()
# return [a.to_dict() for a in self.abstracts]
return []
@property
def refresh_priority(self):
published = self.issued or self.deposited or datetime.date(1970, 1, 1)
if published > datetime.date.today():
# refresh things that aren't published yet infrequently
refresh_interval = datetime.timedelta(days=365)
else:
today = datetime.date.today()
journal = self.lookup_journal()
if journal and journal.delayed_oa:
# treat every 6th mensiversary for the first 4 years like the publication date
six_months = relativedelta(months=6)
shifts = 0
while shifts < 8 and published < today - six_months:
published += six_months
shifts += 1
age = today - published
# arbitrary scale factor, refresh newer things more often
refresh_interval = age / 6
if self.genre == 'component':
refresh_interval *= 2
refresh_interval = clamp(refresh_interval, datetime.timedelta(days=2), datetime.timedelta(days=365))
last_refresh = self.scrape_updated or datetime.datetime(1970, 1, 1)
since_last_refresh = datetime.datetime.utcnow() - last_refresh
priority = (since_last_refresh - refresh_interval).total_seconds() / refresh_interval.total_seconds()
return priority
@property
def has_bad_doi_url(self):
return (
(self.issns and (
# links don't resolve
'1507-1367' in self.issns or
# links don't resolve
'0718-1876' in self.issns or
# links don't resolve
'2237-0722' in self.issns
)) or
# pdf abstracts
self.id.startswith('10.5004/dwt.')
)
def is_open_journal_via_observed_oa_rate(self):
lookup = self.issn_l and db.session.query(JournalOaStartYear).get({'issn_l': self.issn_l})
return lookup and self.issued and self.issued.year >= lookup.oa_year
def store_refresh_priority(self):
stmt = sql.text(
u'update pub_refresh_queue set priority = :priority where id = :id'
).bindparams(priority=self.refresh_priority, id=self.id)
db.session.execute(stmt)
def store_pdf_urls_for_validation(self):
urls = {loc.pdf_url for loc in self.open_locations if loc.pdf_url and not is_pmc(loc.pdf_url)}
for url in urls:
db.session.merge(
PdfUrl(url=url, publisher=self.publisher)
)
def mint_pages(self):
for p in oa_page.make_oa_pages(self):
db.session.merge(p)
def set_abstracts(self):
start_time = time()
abstract_objects = []
# already have abstracts, don't keep trying
if self.abstracts:
logger.info(u"already had abstract stored!")
return
# try locally first
if self.abstract_from_crossref:
abstract_objects.append(Abstract(source="crossref", source_id=self.doi, abstract=self.abstract_from_crossref, doi=self.id))
pmh_ids = [p.pmh_id for p in self.pages if p.pmh_id]
if pmh_ids:
pmh_records = db.session.query(PmhRecord).filter(PmhRecord.id.in_(pmh_ids)).all()
for pmh_record in pmh_records:
api_contents = pmh_record.api_raw.replace("\n", " ")
matches = re.findall(u"<dc:description>(.*?)</dc:description>", api_contents, re.IGNORECASE | re.MULTILINE)
if matches:
concat_description = u"\n".join(matches).strip()
abstract_objects.append(Abstract(source="pmh", source_id=pmh_record.id, abstract=concat_description, doi=self.id))
# the more time consuming checks, only do them if the paper is open and recent for now
# if self.is_oa and self.year and self.year == 2018:
if self.is_oa and self.year and self.year >= 2017:
# if nothing yet, query pmc with doi
if not abstract_objects:
result_list = query_pmc(self.id)
for result in result_list:
if result.get("doi", None) == self.id:
pmid = result.get("pmid", None)
if u"abstractText" in result:
abstract_text = result["abstractText"]
abstract_obj = Abstract(source="pubmed", source_id=pmid, abstract=abstract_text, doi=self.id)
try:
abstract_obj.mesh = result["meshHeadingList"]["meshHeading"]
except KeyError:
pass
try:
abstract_obj.keywords = result["keywordList"]["keyword"]
except KeyError:
pass
abstract_objects.append(abstract_obj)
logger.info(u"got abstract from pubmed")
# removed mendeley from requirements for now due to library conflicts
# if not abstract_objects:
# from oa_mendeley import query_mendeley
# result = query_mendeley(self.id)
# if result and result["abstract"]:
# mendeley_url = result["mendeley_url"]
# abstract_obj = Abstract(source="mendeley", source_id=mendeley_url, abstract=result["abstract"], doi=self.id)
# abstract_objects.append(abstract_obj)
# logger.info(u"GOT abstract from mendeley for {}".format(self.id))
# else:
# logger.info(u"no abstract in mendeley for {}".format(self.id))
logger.info(u"spent {} seconds getting abstracts for {}, success: {}".format(elapsed(start_time), self.id, len(abstract_objects)>0))
# make sure to save what we got
for abstract in abstract_objects:
if abstract.source_id not in [a.source_id for a in self.abstracts]:
self.abstracts.append(abstract)
def to_dict_v2(self):
response = {
"doi": self.doi,
"doi_url": self.url,
"is_oa": self.is_oa,
"oa_status": self.oa_status and self.oa_status.value,
"best_oa_location": self.best_oa_location_dict,
"oa_locations": self.all_oa_location_dicts(),
"has_repository_copy": self.has_green,
"data_standard": self.data_standard,
"title": self.best_title,
"year": self.year,
"journal_is_oa": self.oa_is_open_journal,
"journal_is_in_doaj": self.oa_is_doaj_journal,
"journal_issns": self.display_issns,
"journal_issn_l": self.issn_l,
"journal_name": self.journal,
"publisher": self.publisher,
"published_date": self.issued and self.issued.isoformat(),
"updated": self.display_updated,
"genre": self.genre,
"is_paratext": self.is_paratext,
"z_authors": self.authors,
# "abstracts": self.display_abstracts,
}
# if self.error:
# response["x_error"] = True
return response
def to_dict_search(self):
response = self.to_dict_v2()
response["abstracts"] = self.display_abstracts
del response["z_authors"]
if self.authors:
response["author_lastnames"] = [author.get("family", None) for author in self.authors]
else:
response["author_lastnames"] = []
if not hasattr(self, "score"):
self.score = None
response["score"] = self.score
if not hasattr(self, "snippet"):
self.snippet = None
response["snippet"] = self.snippet
return response
# db.create_all()
# commit_success = safe_commit(db)
# if not commit_success:
# logger.info(u"COMMIT fail making objects")
| {
"content_hash": "ce98f6d88ce51fa8e964cc33275f7230",
"timestamp": "",
"source": "github",
"line_count": 1928,
"max_line_length": 166,
"avg_line_length": 37.51348547717842,
"alnum_prop": 0.5870364737438819,
"repo_name": "Impactstory/oadoi",
"id": "dbd4540efb8b388d66512492ac49a566e622887a",
"size": "72326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pub.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2022"
},
{
"name": "PLpgSQL",
"bytes": "11112"
},
{
"name": "Python",
"bytes": "509115"
},
{
"name": "Shell",
"bytes": "3599"
},
{
"name": "TSQL",
"bytes": "596"
}
],
"symlink_target": ""
} |
def merge_sort(array):
"""
An implementation of the merge sort algorithm. Recursively sorts arrays
by calling merge sort on halves of the array, then merging by comparing
the first element of each array, then adding the smaller of the two to a
the sorted array.
"""
# Base Cases
if len(array) == 1 or len(array) == 0:
return array
elif len(array) == 2:
if array[0] > array[1]:
array[0], array[1] = array[1], array[0]
return array
else:
i, j, sort = 0, 0, []
# Split the array into 2 equal parts and sort them
middle = int(len(array)/2)
left = merge_sort(array[:middle].copy())
right = merge_sort(array[middle:].copy())
# Merge the sorted halves
ll, lr = len(left), len(right)
while i < ll and j < lr:
if left[i] <= right[j]:
sort.append(left[i]); i += 1
else: sort.append(right[j]); j += 1
# Add anything left over
sort += left[i:]
sort += right[j:]
return sort
def merge_sort_from_file(file_path):
"""
Performs merge sort on a text file that contains an array containing
a single element per line.
"""
return merge_sort([int(line) for line in open(file_path)])
| {
"content_hash": "c4b05446c779191be12ba9d7e00e11a0",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 76,
"avg_line_length": 33,
"alnum_prop": 0.5461936437546193,
"repo_name": "doug-wade/AlgorithmsGreatestHits",
"id": "7a9af552ba8b8b19dc208d6fc838d37bdd062715",
"size": "1353",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sort_algorithms/merge_sort.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "86606"
}
],
"symlink_target": ""
} |
"""API for the glance service."""
from django import forms
from django.views.decorators.csrf import csrf_exempt
from django.views import generic
from six.moves import zip as izip
from openstack_dashboard import api
from openstack_dashboard.api.rest import urls
from openstack_dashboard.api.rest import utils as rest_utils
CLIENT_KEYWORDS = {'resource_type', 'marker',
'sort_dir', 'sort_key', 'paginate'}
@urls.register
class Version(generic.View):
"""API for active glance version."""
url_regex = r'glance/version/$'
@rest_utils.ajax()
def get(self, request):
"""Get active glance version."""
return {'version': str(api.glance.get_version())}
@urls.register
class Image(generic.View):
"""API for retrieving a single image"""
url_regex = r'glance/images/(?P<image_id>[^/]+|default)/$'
@rest_utils.ajax()
def get(self, request, image_id):
"""Get a specific image
http://localhost/api/glance/images/cc758c90-3d98-4ea1-af44-aab405c9c915
"""
image = api.glance.image_get(request, image_id)
return image.to_dict(show_ext_attrs=True)
@rest_utils.ajax(data_required=True)
def patch(self, request, image_id):
"""Update a specific image
Update an Image using the parameters supplied in the POST
application/json object. The parameters are:
:param name: (required) the name to give the image
:param description: (optional) description of the image
:param disk_format: (required) format of the image
:param kernel: (optional) kernel to use for the image
:param ramdisk: (optional) Ramdisk to use for the image
:param architecture: (optional) the Architecture of the image
:param min_disk: (optional) the minimum disk size
for the image to boot with
:param min_ram: (optional) the minimum ram for the image to boot with
:param visibility: (required) takes 'public', 'shared', and 'private'
:param protected: (required) true if the image is protected
Any parameters not listed above will be assigned as custom properties
for the image.
http://localhost/api/glance/images/cc758c90-3d98-4ea1-af44-aab405c9c915
"""
meta = create_image_metadata(request.DATA)
api.glance.image_update(request, image_id, **meta)
@rest_utils.ajax()
def delete(self, request, image_id):
"""Delete a specific image
DELETE http://localhost/api/glance/images/<image_id>
"""
api.glance.image_delete(request, image_id)
@urls.register
class ImageProperties(generic.View):
"""API for retrieving only a custom properties of single image."""
url_regex = r'glance/images/(?P<image_id>[^/]+)/properties/'
@rest_utils.ajax()
def get(self, request, image_id):
"""Get custom properties of specific image."""
return api.glance.image_get(request, image_id).properties
@rest_utils.ajax(data_required=True)
def patch(self, request, image_id):
"""Update custom properties of specific image.
This method returns HTTP 204 (no content) on success.
"""
api.glance.image_update_properties(
request, image_id, request.DATA.get('removed'),
**request.DATA['updated']
)
class UploadObjectForm(forms.Form):
data = forms.FileField(required=False)
@urls.register
class Images(generic.View):
"""API for Glance images."""
url_regex = r'glance/images/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of images.
The listing result is an object with property "items". Each item is
an image.
Example GET:
http://localhost/api/glance/images?sort_dir=desc&sort_key=name&name=cirros-0.3.2-x86_64-uec
The following get parameters may be passed in the GET
request:
:param paginate: If true will perform pagination based on settings.
:param marker: Specifies the namespace of the last-seen image.
The typical pattern of limit and marker is to make an
initial limited request and then to use the last
namespace from the response as the marker parameter
in a subsequent limited request. With paginate, limit
is automatically set.
:param sort_dir: The sort direction ('asc' or 'desc').
:param sort_key: The field to sort on (for example, 'created_at').
Default is created_at.
Any additional request parameters will be passed through the API as
filters. There are v1/v2 complications which are being addressed as a
separate work stream: https://review.openstack.org/#/c/150084/
"""
filters, kwargs = rest_utils.parse_filters_kwargs(request,
CLIENT_KEYWORDS)
images, has_more_data, has_prev_data = api.glance.image_list_detailed(
request, filters=filters, **kwargs)
return {
'items': [i.to_dict() for i in images],
'has_more_data': has_more_data,
'has_prev_data': has_prev_data,
}
# note: not an AJAX request - the body will be raw file content mixed with
# metadata
@rest_utils.post2data
@csrf_exempt
def post(self, request):
form = UploadObjectForm(request.DATA, request.FILES)
if not form.is_valid():
raise rest_utils.AjaxError(500, 'Invalid request')
data = form.clean()
meta = create_image_metadata(request.DATA)
meta['data'] = data['data']
image = api.glance.image_create(request, **meta)
return rest_utils.CreatedResponse(
'/api/glance/images/%s' % image.name,
image.to_dict()
)
@rest_utils.ajax(data_required=True)
def put(self, request):
"""Create an Image.
Create an Image using the parameters supplied in the POST
application/json object. The parameters are:
:param name: the name to give the image
:param description: (optional) description of the image
:param source_type: (required) source type.
current only 'url' is supported
:param image_url: (required) URL to get the image
:param disk_format: (required) format of the image
:param kernel: (optional) kernel to use for the image
:param ramdisk: (optional) Ramdisk to use for the image
:param architecture: (optional) the Architecture of the image
:param min_disk: (optional) the minimum disk size
for the image to boot with
:param min_ram: (optional) the minimum ram for the image to boot with
:param visibility: (required) takes 'public', 'private', and 'shared'
:param protected: (required) true if the image is protected
:param import_data: (optional) true to copy the image data
to the image service or use it from the current location
Any parameters not listed above will be assigned as custom properties
for the image.
This returns the new image object on success.
"""
meta = create_image_metadata(request.DATA)
if request.DATA.get('image_url'):
if request.DATA.get('import_data'):
meta['copy_from'] = request.DATA.get('image_url')
else:
meta['location'] = request.DATA.get('image_url')
else:
meta['data'] = request.DATA.get('data')
image = api.glance.image_create(request, **meta)
return rest_utils.CreatedResponse(
'/api/glance/images/%s' % image.name,
image.to_dict()
)
@urls.register
class MetadefsNamespaces(generic.View):
"""API for Single Glance Metadata Definitions.
https://docs.openstack.org/glance/latest/user/metadefs-concepts.html
"""
url_regex = r'glance/metadefs/namespaces/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of metadata definition namespaces.
The listing result is an object with property "items". Each item is
a namespace.
Example GET:
http://localhost/api/glance/metadefs/namespaces?resource_types=OS::Nova::Flavor&sort_dir=desc&marker=OS::Compute::Watchdog&paginate=False&sort_key=namespace
The following get parameters may be passed in the GET
request:
:param resource_type: Namespace resource type.
If specified returned namespace properties will have prefixes
proper for selected resource type.
:param paginate: If true will perform pagination based on settings.
:param marker: Specifies the namespace of the last-seen namespace.
The typical pattern of limit and marker is to make an
initial limited request and then to use the last
namespace from the response as the marker parameter
in a subsequent limited request. With paginate, limit
is automatically set.
:param sort_dir: The sort direction ('asc' or 'desc').
:param sort_key: The field to sort on (for example, 'created_at').
Default is namespace. The way base namespaces are loaded into
glance typically at first deployment is done in a single
transaction giving them a potentially unpredictable sort result
when using create_at.
Any additional request parameters will be passed through the API as
filters.
"""
filters, kwargs = rest_utils.parse_filters_kwargs(
request, CLIENT_KEYWORDS
)
names = ('items', 'has_more_data', 'has_prev_data')
return dict(izip(names, api.glance.metadefs_namespace_full_list(
request, filters=filters, **kwargs
)))
@urls.register
class MetadefsResourceTypesList(generic.View):
"""API for getting Metadata Definitions Resource Types List.
https://docs.openstack.org/glance/latest/user/metadefs-concepts.html
"""
url_regex = r'glance/metadefs/resourcetypes/$'
@rest_utils.ajax()
def get(self, request):
"""Get Metadata definitions resource types list.
The listing result is an object with property "items". Each item is
a resource type.
Example GET:
http://localhost/api/glance/resourcetypes/
Any request parameters will be passed through the API as filters.
"""
return {
'items': [resource_type for resource_type in
api.glance.metadefs_resource_types_list(request)]
}
def create_image_metadata(data):
try:
"""Use the given dict of image form data to generate the metadata used for
creating the image in glance.
"""
meta = {'protected': data.get('protected'),
'min_disk': data.get('min_disk', 0),
'min_ram': data.get('min_ram', 0),
'name': data.get('name'),
'disk_format': data.get('disk_format'),
'container_format': data.get('container_format')}
properties = {}
# 'architecture' will be directly mapped
# into the .properties by the handle_unknown_properties function.
# 'kernel' and 'ramdisk' need to get specifically mapped for backwards
# compatibility.
props = data.get('properties')
if props and props.get('description'):
properties['description'] = props.get('description')
if data.get('kernel'):
properties['kernel_id'] = data.get('kernel')
if data.get('ramdisk'):
properties['ramdisk_id'] = data.get('ramdisk')
handle_unknown_properties(data, properties)
if api.glance.VERSIONS.active >= 2:
meta.update(properties)
else:
meta['properties'] = properties
handle_visibility(data.get('visibility'), meta)
except KeyError as e:
raise rest_utils.AjaxError(400,
'missing required parameter %s' % e.args[0])
return meta
def handle_unknown_properties(data, properties):
# The Glance API takes in both known and unknown fields. Unknown fields
# are assumed as metadata. To achieve this and continue to use the
# existing horizon api wrapper, we need this function. This way, the
# client REST mirrors the Glance API.
known_props = ['visibility', 'protected', 'disk_format',
'container_format', 'min_disk', 'min_ram', 'name',
'properties', 'kernel', 'ramdisk',
'tags', 'import_data', 'source',
'image_url', 'source_type', 'data',
'checksum', 'created_at', 'deleted', 'is_copying',
'deleted_at', 'is_public', 'virtual_size',
'status', 'size', 'owner', 'id', 'updated_at']
other_props = {k: v for (k, v) in data.items() if k not in known_props}
properties.update(other_props)
def handle_visibility(visibility, meta):
mapping_to_v1 = {'public': True, 'private': False, 'shared': False}
# note: presence of 'visibility' previously checked for in general call
try:
is_public = mapping_to_v1[visibility]
if api.glance.VERSIONS.active >= 2:
meta['visibility'] = visibility
else:
meta['is_public'] = is_public
except KeyError as e:
raise rest_utils.AjaxError(400,
'invalid visibility option: %s' % e.args[0])
| {
"content_hash": "817eefd61fdd64d1f38c9ca8b56721b4",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 164,
"avg_line_length": 37.77562326869806,
"alnum_prop": 0.6227909364229669,
"repo_name": "yeming233/horizon",
"id": "cca4fd8d3865085cd5770d3181188051a64f8190",
"size": "14242",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack_dashboard/api/rest/glance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "105527"
},
{
"name": "HTML",
"bytes": "517093"
},
{
"name": "JavaScript",
"bytes": "953373"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4845896"
},
{
"name": "Shell",
"bytes": "18658"
}
],
"symlink_target": ""
} |
import logging
import traceback
from pyscreenshot.childproc import childprocess_grab
from pyscreenshot.err import FailedBackendError
from pyscreenshot.plugins.gdk3pixbuf import Gdk3PixbufWrapper
from pyscreenshot.plugins.freedesktop_dbus import FreedesktopDBusWrapper
from pyscreenshot.plugins.gnome_dbus import GnomeDBusWrapper
from pyscreenshot.plugins.gnome_screenshot import GnomeScreenshotWrapper
from pyscreenshot.plugins.grim import GrimWrapper
from pyscreenshot.plugins.imagemagick import ImagemagickWrapper
from pyscreenshot.plugins.kwin_dbus import KwinDBusWrapper
from pyscreenshot.plugins.mac_quartz import MacQuartzWrapper
from pyscreenshot.plugins.mac_screencapture import ScreencaptureWrapper
from pyscreenshot.plugins.maim import MaimWrapper
from pyscreenshot.plugins.msswrap import MssWrapper
from pyscreenshot.plugins.pilwrap import PilWrapper
from pyscreenshot.plugins.pyside2_grabwindow import PySide2GrabWindow
from pyscreenshot.plugins.pyside_grabwindow import PySideGrabWindow
from pyscreenshot.plugins.qt4grabwindow import Qt4GrabWindow
from pyscreenshot.plugins.qt5grabwindow import Qt5GrabWindow
from pyscreenshot.plugins.scrot import ScrotWrapper
from pyscreenshot.plugins.wxscreen import WxScreen
from pyscreenshot.plugins.ksnip import KsnipWrapper
from pyscreenshot.util import (
platform_is_linux,
platform_is_osx,
platform_is_win,
use_x_display,
)
log = logging.getLogger(__name__)
backend_dict = {
PilWrapper.name: PilWrapper,
MssWrapper.name: MssWrapper,
ScrotWrapper.name: ScrotWrapper,
GrimWrapper.name: GrimWrapper,
MaimWrapper.name: MaimWrapper,
ImagemagickWrapper.name: ImagemagickWrapper,
Qt5GrabWindow.name: Qt5GrabWindow,
Qt4GrabWindow.name: Qt4GrabWindow,
PySide2GrabWindow.name: PySide2GrabWindow,
PySideGrabWindow.name: PySideGrabWindow,
WxScreen.name: WxScreen,
Gdk3PixbufWrapper.name: Gdk3PixbufWrapper,
ScreencaptureWrapper.name: ScreencaptureWrapper,
MacQuartzWrapper.name: MacQuartzWrapper,
FreedesktopDBusWrapper.name: FreedesktopDBusWrapper,
GnomeDBusWrapper.name: GnomeDBusWrapper,
GnomeScreenshotWrapper.name: GnomeScreenshotWrapper,
KwinDBusWrapper.name: KwinDBusWrapper,
# XwdWrapper.name: XwdWrapper,
KsnipWrapper.name: KsnipWrapper,
}
def qt():
yield Qt5GrabWindow
yield Qt4GrabWindow
yield PySide2GrabWindow
yield PySideGrabWindow
def backends(childprocess):
# the order is based on performance
if platform_is_linux():
if use_x_display():
if childprocess:
yield ScrotWrapper
yield PilWrapper
yield MssWrapper
else:
yield PilWrapper
yield MssWrapper
yield ScrotWrapper
yield MaimWrapper
yield ImagemagickWrapper
yield Gdk3PixbufWrapper
yield WxScreen
for x in qt():
yield x
yield FreedesktopDBusWrapper
yield GnomeDBusWrapper
# on screen notification
yield KwinDBusWrapper
# flash effect
yield GnomeScreenshotWrapper
yield GrimWrapper
yield KsnipWrapper
elif platform_is_osx():
yield PilWrapper
yield MssWrapper
# alternatives for older pillow versions
yield ScreencaptureWrapper
yield MacQuartzWrapper
# qt has some color difference
# does not work: Gdk3, wx, Imagemagick
elif platform_is_win():
yield PilWrapper
yield MssWrapper
else:
yield PilWrapper
yield MssWrapper
for x in backend_dict.values():
yield x
def select_childprocess(childprocess, backend_class):
if backend_class.is_subprocess:
# backend is always a subprocess -> nothing to do
return False
return childprocess
def auto(bbox, childprocess):
im = None
for backend_class in backends(childprocess):
backend_name = backend_class.name
try:
if select_childprocess(childprocess, backend_class):
log.debug('running "%s" in child process', backend_name)
im = childprocess_grab(backend_name, bbox)
else:
obj = backend_class()
im = obj.grab(bbox)
break
except Exception:
msg = traceback.format_exc()
log.debug(msg)
if not im:
msg = "All backends failed!"
raise FailedBackendError(msg)
return im
def force(backend_name, bbox, childprocess):
backend_class = backend_dict[backend_name]
if select_childprocess(childprocess, backend_class):
log.debug('running "%s" in child process', backend_name)
return childprocess_grab(backend_name, bbox)
else:
obj = backend_class()
im = obj.grab(bbox)
return im
def backend_grab(backend, bbox, childprocess):
if backend:
return force(backend, bbox, childprocess)
else:
return auto(bbox, childprocess)
def backend_version2(backend_name):
backend_class = backend_dict[backend_name]
obj = backend_class()
v = obj.backend_version()
return v
| {
"content_hash": "6ecec4fce667fb5c86d378e021ce5bb7",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 72,
"avg_line_length": 30.348837209302324,
"alnum_prop": 0.7017241379310345,
"repo_name": "ponty/pyscreenshot",
"id": "7e216def76d40e49b39852eb3eac1237fa3396af",
"size": "5220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyscreenshot/loader.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "77446"
},
{
"name": "Ruby",
"bytes": "13811"
},
{
"name": "Shell",
"bytes": "16329"
}
],
"symlink_target": ""
} |
"""Training the network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import datetime
import functools
import glob
import itertools
import json
import os
import random
import sys
import time
import numpy as np
import tensorflow as tf
from tensorflow.core.framework import summary_pb2
import model.network as network
import cognitive.task_bank as task_bank
from cognitive import constants
from cognitive.constants import config
import cognitive.train_utils as tu
tf.app.flags.DEFINE_string('hparams', '',
'Comma separated list of name=value hyperparameter '
'pairs. These values will override the defaults')
# task_family flag inherited from task_bank.py
tf.app.flags.DEFINE_integer('batch_size', 128, 'batch size')
# Logistics
tf.app.flags.DEFINE_string('data_dir', None,
'Directory with training and validation data. '
'The directory should contain subdirectories '
'starting with train_, val_, and test_ . If no directory '
'is given, data will be generated on the fly.')
tf.app.flags.DEFINE_string('model_dir', '/tmp/cog',
'Directory containing saved model files.')
FLAGS = tf.app.flags.FLAGS
def get_default_hparams_dict():
return dict(
# exclude one task during training
exclude_task_train='none',
# learning rate decay: lr multiplier per 1M examples
# value of 0.85 tranlates to 0.1 per 14M examples
# value of 0.90 tranlates to 0.23 per 14M examples
# value of 0.95 tranlates to 0.5 per 14M examples
lr_decay=0.95,
# learning rate
learning_rate=0.0005,
# gradient clipping
grad_clip=10.,
# clipping value for rnn state norm
rnn_state_norm_clip=10000.,
# number of core recurrent units
n_rnn=768,
# type of core rnn
rnn_type='gru',
# whether to use 128 as input image size
use_img_size_128=False,
# number of vision network output
n_out_vis=128,
# type of visual network
use_vgg_pretrain=False,
# type of visual network
vis_type='myconv',
# number of units for question network
n_rnn_rule=128,
# type of rule rnn
rnn_rule_type='lstm',
# embedding size for question words
embedding_size=64,
# train initial state or not
train_init=True,
# beta1 for AdamOptimizer
adam_beta1=0.1,
# beta2 for AdamOptimizer
adam_beta2=0.0001,
# epsilon for AdamOptimizer
adam_epsilon=1e-8,
# rule network bidirectional or not
rnn_rule_bidir=True,
# number of time point to repeat for each epoch
n_time_repeat=5,
# build network with visual attention or not
feature_attention=True,
# state-dependent attention?
state_dep_feature_attention=False,
# whether use a MLP to generation feature attention
feature_attention_use_mlp=False,
# whether apply feature attention to the second-to-last conv layer
feature_attend_to_2conv=False,
# whether to feed a spatially-summed visual input to core
feed_space_sum_to_core=True,
# build network with visual spatial attention or not
spatial_attention=True,
# whether spatial attention depends on retrieved memory
memory_dep_spatial_attention=False,
# whether spatial attention is fed back to controller
feed_spatial_attn_back=True,
# how are rule outputs used as memory
verbal_attention=True,
# size of the query for rule memory
memory_query_size=128,
# number of maps in visual spatial memory
vis_memory_maps=4,
# only use visual memory to point short-cut
only_vis_to_pnt=True,
# optimizer to use
optimizer='adam',
# momentum value to use with "momentum" optimizer
momentum=0.9,
# Whether to use Nesterov Accelerated Gradient with "momentum" optimizer
nesterov=True,
# signal new epoch
signal_new_epoch=False,
# final readout using a MLP
final_mlp=False,
# L2 regularization, consider a value between 1e-4 and 1e-5
l2_weight=2*1e-5,
# number of epochs each trial
n_epoch=4,
# average number of epochs an object needs to be held in memory
average_memory_span=2,
# maximum number of distractors
max_distractors=1,
# value 'factor' param to variance_scaling_initializer used as
# controller GRU kernel initializer
controller_gru_init_factor=0.3,
# normalize images mean 0/std 1
normalize_images=False,
)
def get_dataparams(hparams):
# Pick a random number of distractors between 1 and 10
return dict(
n_distractor=random.randint(1, hparams.max_distractors),
average_memory_span=hparams.average_memory_span,
)
def test_input_generator(task_families, hparams):
def getter():
next_family = 0
n_task_family = len(task_families)
while True:
tasks = []
family = task_families[next_family]
next_family = (next_family + 1) % n_task_family
for i in range(FLAGS.batch_size):
tasks.append(task_bank.random_task(family))
#print("Yielding batch of " + tasks[0].__class__.__name__)
feeds = tu.generate_feeds(tasks, hparams, get_dataparams(hparams))
yield feeds + (np.array([family]), )
return getter
def evaluate(sess, task_families, model, task_family_tensor):
print('Evaluating over the test dataset. You can interrupt the script at any '
'time to see accuracy up to that point.')
task_family_dict = dict([(task_family, i) for i, task_family in
enumerate(task_families)])
acc_list = [0] * len(task_families)
loss_list = [0] * len(task_families)
family_list = [0] * len(task_families)
start = time.time()
for i in itertools.count():
# Log progress
if i and i % 100 == 0:
rate = (i * FLAGS.batch_size) / (time.time() - start)
done_examples = (i * FLAGS.batch_size)
remaining_secs = int((500000 - done_examples) / rate)
print("Running batch {}. Rate: {} examples/sec. Reaching 0.5M examples "
"will take {} more".format(
i, rate, str(datetime.timedelta(seconds=remaining_secs))))
try:
tf_val, acc_tmp, loss_tmp = sess.run([task_family_tensor, model.acc, model.loss])
assert (tf_val == tf_val[0]).all(), ('Not all task families are the '
'same in an evaluation batch. Does your batch_size evenly divide '
'total number of evaluation examples? Family values %s' % tf_val)
family = tf_val[0].decode('utf-8')
# TF pads string tensors with zero bytes for some reason. Remove them.
family = family.strip('\x00')
family_ind = task_family_dict[family]
acc_list[family_ind] += acc_tmp
loss_list[family_ind] += loss_tmp
family_list[family_ind] += 1
except (tf.errors.OutOfRangeError, KeyboardInterrupt) as e:
print("Run the testing over {} examples".format(FLAGS.batch_size * i))
break
acc_list = [x/float(count) if count else 0.0 for x, count in zip(acc_list, family_list)]
loss_list = [x/float(count) if count else 0.0 for x, count in zip(loss_list, family_list)]
for i, task_family_test in enumerate(task_families):
print('{:25s}: Acc {:0.3f} Loss {:0.3f}'.format(
task_family_test, acc_list[i], loss_list[i]))
acc_avg = np.mean([x for x in acc_list if x])
loss_avg = np.mean([x for x in loss_list if x])
print('Overall accuracy {:0.3f}'.format(acc_avg))
print('------------------------------------')
print('Validation took : {:0.2f}s'.format(time.time() - start))
sys.stdout.flush()
return acc_list
def _fname_to_ds(input_files, batch_size):
feed_types = (tf.float32, tf.int64, tf.int64, tf.float32, tf.float32,
tf.int64, tf.float32, tf.float32, tf.string)
ds = tf.data.TextLineDataset(filenames=input_files, compression_type='GZIP')
ds = ds.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))
ds = ds.map(
lambda examples: tuple(tf.py_func(
tu.json_to_feeds, [examples], feed_types, stateful=False)))
return ds
def get_ds_from_files(data_dir, batch_size):
"""Returns inputs tensors for data in data_dir."""
data_type = 'test'
input_dir_glob = os.path.join(data_dir, data_type + '_*')
input_dir = glob.glob(input_dir_glob)
assert len(input_dir) == 1, ('Expected to find one ' + data_type +
' directory in ' + data_dir + ' but got ' + input_dir)
input_dir = input_dir[0]
input_files = glob.glob(os.path.join(input_dir, '*'))
feed_shapes = (tf.TensorShape([None, None, 112, 112, 3]),
tf.TensorShape([config['maxseqlength'], None]),
tf.TensorShape([None]),
tf.TensorShape([None, None]),
tf.TensorShape([None, None]),
tf.TensorShape([None]),
tf.TensorShape([None]),
tf.TensorShape([None]),
tf.TensorShape([None]))
input_ds = (tf.data.Dataset
.from_tensor_slices(input_files)
.interleave(functools.partial(_fname_to_ds, batch_size=batch_size),
cycle_length=len(input_files),
block_length=1))
input_ds = input_ds.prefetch(buffer_size=2)
input_feeds = input_ds.make_one_shot_iterator().get_next()
for inp, shape in zip(input_feeds, feed_shapes):
inp.set_shape(shape)
input_feeds_dict = {'image': input_feeds[0],
'question': input_feeds[1],
'seq_len': input_feeds[2],
'point': input_feeds[3],
'point_xy': input_feeds[4],
'answer': input_feeds[5],
'mask_point': input_feeds[6],
'mask_answer': input_feeds[7],
}
task_family_tensor = input_feeds[8]
return input_feeds_dict, task_family_tensor
def get_inputs(train_task_families, task_families, hparams):
if FLAGS.data_dir:
print("Reading test examples from ", FLAGS.data_dir)
return get_ds_from_files(FLAGS.data_dir, FLAGS.batch_size)
print("Generating test examples on the fly")
# Set up input pipelines
feed_types = (tf.float32, tf.int64, tf.int64, tf.float32, tf.float32,
tf.int64, tf.float32, tf.float32)
feed_shapes = (tf.TensorShape([None, None, 112, 112, 3]),
tf.TensorShape([config['maxseqlength'], None]),
tf.TensorShape([None]),
tf.TensorShape([None, None]),
tf.TensorShape([None, None]),
tf.TensorShape([None]),
tf.TensorShape([None]),
tf.TensorShape([None]))
test_ds = tf.data.Dataset.from_generator(
test_input_generator(task_families[:], hparams),
feed_types + (tf.string,),
feed_shapes + (tf.TensorShape([None]),))
test_ds = test_ds.prefetch(buffer_size=2)
test_feeds = test_ds.make_one_shot_iterator().get_next()
test_feeds_dict = {'image': test_feeds[0],
'question': test_feeds[1],
'seq_len': test_feeds[2],
'point': test_feeds[3],
'point_xy': test_feeds[4],
'answer': test_feeds[5],
'mask_point': test_feeds[6],
'mask_answer': test_feeds[7],
}
task_family_tensor = test_feeds[8]
return test_feeds_dict, task_family_tensor
def run_test(hparams, model_dir):
task_families = list(task_bank.task_family_dict.keys())
train_task_families = task_families
if not tf.gfile.Exists(model_dir):
print("model directory", model_dir, "does not exist")
sys.exit(1)
######################### Build the model ##################################
feeds_dict, task_family_tensor = get_inputs(
train_task_families, task_families, hparams)
tf.train.get_or_create_global_step()
model = network.Model(hparams, config)
model.build(feeds_dict, FLAGS.batch_size, is_training=True)
saver = tf.train.Saver()
########################## Restore and Test ##########################
checkpoint_path = model_dir
with tf.Session() as sess:
cpkt_path = tf.train.latest_checkpoint(checkpoint_path)
if cpkt_path is not None:
print("Restoring model from: " + cpkt_path)
saver.restore(sess, cpkt_path)
else:
print("Did not find checkpoint at: " + checkpoint_path)
sys.exit(1)
evaluate(sess, task_families, model, task_family_tensor)
def main(_):
hparams_dict = get_default_hparams_dict()
hparams = tf.contrib.training.HParams(**hparams_dict)
hparams = hparams.parse(FLAGS.hparams) # Overwritten by FLAGS.hparams
run_test(hparams, FLAGS.model_dir)
if __name__ == '__main__':
tf.app.run(main)
| {
"content_hash": "abef0af39465a192f8709ca31d6afc4b",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 92,
"avg_line_length": 35.65934065934066,
"alnum_prop": 0.6178736517719569,
"repo_name": "google/cog",
"id": "8642fbcc8699e6e95cb1741636719f3c00f34009",
"size": "13637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cognitive/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "256439"
}
],
"symlink_target": ""
} |
from django.template.defaultfilters import capfirst # noqa
from django.template.defaultfilters import floatformat # noqa
from django.utils.translation import ugettext as _ # noqa
from django.views.generic import TemplateView # noqa
from openstack_dashboard import usage
from openstack_dashboard.usage import base
class ProjectUsageCsvRenderer(base.BaseCsvResponse):
columns = [_("Instance Name"), _("VCPUs"), _("Ram (MB)"),
_("Disk (GB)"), _("Usage (Hours)"),
_("Uptime(Seconds)"), _("State")]
def get_row_data(self):
for inst in self.context['usage'].get_instances():
yield (inst['name'],
inst['vcpus'],
inst['memory_mb'],
inst['local_gb'],
floatformat(inst['hours'], 2),
inst['uptime'],
capfirst(inst['state']))
class ProjectOverview(usage.UsageView):
table_class = usage.ProjectUsageTable
usage_class = usage.ProjectUsage
template_name = 'project/overview/usage.html'
csv_response_class = ProjectUsageCsvRenderer
def get_data(self):
super(ProjectOverview, self).get_data()
return self.usage.get_instances()
class WarningView(TemplateView):
template_name = "project/_warning.html"
| {
"content_hash": "e0c3788f284e4693848d32f2e6a050c8",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 62,
"avg_line_length": 32.75,
"alnum_prop": 0.6244274809160305,
"repo_name": "deepakselvaraj/federated-horizon",
"id": "28cd535f6e70ac2f0370522266684d600ae2b1e6",
"size": "2120",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/overview/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from .view import *
from .register import *
from .recover import *
from .login import *
from .logout import *
from .edit_password import *
from .edit_login import *
from .password_form import *
from .profile import *
from .nabar_autocomplete import * | {
"content_hash": "de5f44fcdf417303be3e430723ad0f2a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 33,
"avg_line_length": 16,
"alnum_prop": 0.73046875,
"repo_name": "vinoth3v/In",
"id": "073d90971fbc77d90ad06f40a93895af38351072",
"size": "257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "In/nabar/page/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "33032"
},
{
"name": "Python",
"bytes": "779047"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from datetime import timedelta
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.crypto import get_random_string
from django.utils.translation import ugettext_lazy as _
from sentry.db.models import FlexibleForeignKey, Model, sane_repr
CHARACTERS = u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
class UserEmail(Model):
__core__ = True
user = FlexibleForeignKey(settings.AUTH_USER_MODEL,
related_name='emails')
email = models.EmailField(_('email address'))
validation_hash = models.CharField(
max_length=32, default=lambda: get_random_string(32, CHARACTERS))
date_hash_added = models.DateTimeField(default=timezone.now)
is_verified = models.BooleanField(
_('verified'), default=False,
help_text=_('Designates whether this user has confirmed their email.'))
class Meta:
app_label = 'sentry'
db_table = 'sentry_useremail'
unique_together = (('user', 'email'),)
__repr__ = sane_repr('user_id', 'email')
def set_hash(self):
self.date_hash_added = timezone.now()
self.validation_hash = get_random_string(32, CHARACTERS)
def hash_is_valid(self):
return self.validation_hash and self.date_hash_added > timezone.now() - timedelta(hours=48)
def is_primary(self):
return self.user.email == self.email
@classmethod
def get_primary_email(self, user):
return UserEmail.objects.get_or_create(
user=user,
email=user.email,
)[0]
| {
"content_hash": "71ea10b841468e94c9aadff15fa5a3ab",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 99,
"avg_line_length": 31.73076923076923,
"alnum_prop": 0.6733333333333333,
"repo_name": "JackDanger/sentry",
"id": "be43f7c9f642613cf86ac08a57e5e5e10ee27b69",
"size": "1650",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/models/useremail.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "583430"
},
{
"name": "HTML",
"bytes": "319622"
},
{
"name": "JavaScript",
"bytes": "624672"
},
{
"name": "Makefile",
"bytes": "2660"
},
{
"name": "Python",
"bytes": "6279717"
}
],
"symlink_target": ""
} |
from django.contrib import messages
from django.contrib.auth import views
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from django.utils.http import urlsafe_base64_decode
from admin.common_auth.models import MyUser
@login_required
def home(request):
context = {}
return render(request, 'home.html', context)
def password_reset_done(request, **kwargs):
messages.success(request, 'You have successfully reset your password and activated your admin account. Thank you')
return redirect('auth:login')
def password_reset_confirm_custom(request, **kwargs):
response = views.password_reset_confirm(request, **kwargs)
# i.e. if the user successfully resets their password
if response.status_code == 302:
try:
uid = urlsafe_base64_decode(kwargs['uidb64'])
user = MyUser.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, MyUser.DoesNotExist):
pass
else:
user.confirmed = True
user.save()
return response
| {
"content_hash": "31c55ef6469cfb1ae2baadf7afe56452",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 118,
"avg_line_length": 36.3,
"alnum_prop": 0.7061524334251607,
"repo_name": "GageGaskins/osf.io",
"id": "736dfee18487fc54f544008d41399c6f7d9ccc9d",
"size": "1089",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "admin/base/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "133911"
},
{
"name": "HTML",
"bytes": "58475"
},
{
"name": "JavaScript",
"bytes": "1393750"
},
{
"name": "Mako",
"bytes": "635929"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "4889695"
},
{
"name": "Shell",
"bytes": "2118"
}
],
"symlink_target": ""
} |
"""
Test LinearTimeInterpolator object
"""
from thetis.interpolation import *
import numpy as np
from scipy.interpolate import interp1d
def do_interpolation(plot=False):
np.random.seed(2)
# construct data set
x_scale = 100.
ndata = 35
xx = np.linspace(0, x_scale, ndata)
yy = np.random.rand(*xx.shape)
# construct interpolation points
ninterp = 100
x_interp = np.random.rand(ninterp)*x_scale
# get correct solution with scipy
y_interp = interp1d(xx, yy)(x_interp)
class TimeSeriesReader(FileTreeReader):
def __init__(self, y):
self.y = y
def __call__(self, descriptor, time_index):
return [self.y[time_index]]
class SimpleTimeSearch(TimeSearch):
def __init__(self, t):
self.t = t
def find(self, time, previous=False):
ix = np.searchsorted(self.t, time)
if previous:
ix -= 1
if ix < 0:
raise Exception('Index out of bounds')
tstamp = self.t[ix]
return ('cat', ix, tstamp)
timesearch_obj = SimpleTimeSearch(xx)
reader = TimeSeriesReader(yy)
lintimeinterp = LinearTimeInterpolator(timesearch_obj, reader)
y_interp2 = np.zeros_like(y_interp)
for i in range(len(y_interp2)):
y_interp2[i] = lintimeinterp(x_interp[i])[0]
if plot:
import matplotlib.pyplot as plt
plt.plot(xx, yy, 'k')
plt.plot(x_interp, y_interp, 'bo')
plt.plot(x_interp, y_interp2, 'rx')
plt.show()
assert np.allclose(y_interp, y_interp2)
def test_linearinterpolator():
do_interpolation()
if __name__ == '__main__':
do_interpolation(plot=True)
| {
"content_hash": "2587de3bbb84287f1f11cc1fcf5b4f8b",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 66,
"avg_line_length": 25.264705882352942,
"alnum_prop": 0.5960419091967404,
"repo_name": "tkarna/cofs",
"id": "b8ef21e9897dc93497ab3ea199e89750463eb74c",
"size": "1718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/interpolation/test_lintimeinterp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "418"
},
{
"name": "Makefile",
"bytes": "221"
},
{
"name": "Python",
"bytes": "385082"
}
],
"symlink_target": ""
} |
from PySide import QtCore, QtGui
from androguard.core import androconf
from androguard.gui.xrefwindow import XrefDialog
from androguard.gui.sourcewindow import SourceWindow
from androguard.gui.helpers import class2dotclass, classdot2class
class TreeWindow(QtGui.QTreeWidget):
'''TODO
'''
def __init__(self, parent=None, win=None):
super(TreeWindow, self).__init__(parent)
self.itemDoubleClicked.connect(self.itemDoubleClickedHandler)
self.mainwin = win
self.createActions()
self.header().close()
def insertTree(self, paths):
'''Parse all the paths (['Lcom/sogeti/example/myclass/MyActivity$1;', ...])
and build a tree using the QTreeWidgetItem insertion method.
NB: This algo works because in alphabetical order thanks to sort()
'''
paths.sort()
d = {} # dict of QTreeWodgetItem indexed by paths (already browsed)
for p in paths:
p = class2dotclass(p)
elements = p.split(".")
#add folders if does not exist
for i in range(len(elements)):
parent_str = ".".join(elements[:i])
current_str = elements[i]
path = (parent_str, current_str)
if path not in d.keys():
if not parent_str:
e = QtGui.QTreeWidgetItem(self, [elements[i]])
else:
elements2 = parent_str.split(".")
# we are sure it exists due to algorithm
e = QtGui.QTreeWidgetItem(d[(".".join(elements2[:-1]), elements2[-1])], [elements[i]])
d[path] = e
#self.expandItem(e) #HAX to expand all items during loading
def item2path(self, item, column=0):
'''Browse all parents from QTreeWidgetItem item
in order to rebuild the complete path
Return both complete path (ex: "Landroid/support/AccessibilityServiceInfoCompat$1;")
and path_elts (ex: [u'Landroid', u'support', u'AccessibilityServiceInfoCompat$1;'])
'''
path_elts = []
while item is not None:
# print item.text(column)
path_elts.append(item.text(column))
item = item.parent()
path_elts.reverse()
path = ".".join(path_elts)
path = classdot2class(path)
return path, path_elts
def itemDoubleClickedHandler(self, item, column):
'''Signal sent by PySide when a tree element is clicked'''
# print "item %s has been double clicked at column %s" % (str(item), str(column))
path, path_elts = self.item2path(item)
if item.childCount() != 0:
self.mainwin.showStatus("Sources not available. %s is not a class" % path)
return
self.mainwin.openSourceWindow(path)
def createActions(self):
self.xrefAct = QtGui.QAction("Xref from...", self,
# shortcut=QtGui.QKeySequence("CTRL+B"),
statusTip="List the references where this element is used",
triggered=self.actionXref)
self.expandAct = QtGui.QAction("Expand...", self,
statusTip="Expand all the subtrees",
triggered=self.actionExpand)
self.collapseAct = QtGui.QAction("Collapse...", self,
statusTip="Collapse all the subtrees",
triggered=self.actionCollapse)
def actionXref(self):
item = self.currentItem()
path, path_elts = self.item2path(item)
if item.childCount() != 0:
self.mainwin.showStatus("Xref not available. %s is not a class" % path)
return
xrefs_list = XrefDialog.get_xrefs_list(self.mainwin.d, path=path)
if not xrefs_list:
self.mainwin.showStatus("No xref returned.")
return
xwin = XrefDialog(parent=self.mainwin, win=self.mainwin, xrefs_list=xrefs_list, path=path)
xwin.show()
def expand_children(self, item):
self.expandItem(item)
for i in range(item.childCount()):
self.expand_children(item.child(i))
def actionExpand(self):
self.expand_children(self.currentItem())
def collapse_children(self, item):
for i in range(item.childCount()):
self.collapse_children(item.child(i))
self.collapseItem(item)
def actionCollapse(self):
self.collapse_children(self.currentItem())
def contextMenuEvent(self, event):
menu = QtGui.QMenu(self)
menu.addAction(self.xrefAct)
menu.addAction(self.expandAct)
menu.addAction(self.collapseAct)
menu.exec_(event.globalPos())
| {
"content_hash": "f3c3f1da84c5b2ecc99d455403c1289d",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 110,
"avg_line_length": 38.34146341463415,
"alnum_prop": 0.5945716709075488,
"repo_name": "0x0mar/androguard",
"id": "5bfaf25687433cc954a1b2bbf8fb45dcebe505f9",
"size": "4716",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "androguard/gui/treewindow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from .standard import *
from .periodic import * | {
"content_hash": "86554d3cc580b0f9384cbaf7aa9de597",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 23,
"avg_line_length": 23.5,
"alnum_prop": 0.7659574468085106,
"repo_name": "pombredanne/unuk",
"id": "60ca5bfb81cad9e540e1c964102138465caf4c5d",
"size": "47",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/unuk/contrib/tasks/tests/__init__.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
'''TODO:
- Finish battle system
- Finish stat system
- Finish writing tutorial
- Add romance system
- Finish save system'''
#imports
import stats
import settings
import save
import items
import battle
import story
import lists
#what words count as "yes" or "no"
name = "Debug"
difficulty = "Normal"
debug = False
silly = False
#main
def main():
titleScreen = True
resetstats = open("stats.py", "w")
statsbackup = open("statsbak.py")
resetstats.write(statsbackup.read())
resetstats.close()
statsbackup.close()
while titleScreen:
print("The adventures of a drunk knight\nCopyright 2017 by Pleiadesu\n")
print("New Game\nLoad\nSettings\nCredits")
titleChoice = input(": ")
if titleChoice.lower() == "new game":
if input("Are you sure you want to start a new game? (y/n): ") in lists.yes:
titleScreen = False
story.main()
elif titleChoice.lower() == "load":
#make save detection script so that if there are no saves it gives an error and returns to title
saveexists = save.listSaves()
if saveexists != False:
loadSave = input("Choose a save to load (number): ")
save.load(loadSave)
titleScreen = False
story.main()
#whatever the fuck happens when you load a save
elif titleChoice.lower() == "settings":
settings.settings()
print(stats.difficulty)
print(stats.silly)
print(stats.debug)
elif titleChoice.lower() == "credits":
print("Game written and developed by Pleiadesu (@PleiadesuPix).\nInspired by other text-based games such as Zork and Corruption of Champions.")
else:
print("Invalid choice!")
if __name__ == '__main__':
main() | {
"content_hash": "e00450115df8d3c9089d7602b7b5f16b",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 155,
"avg_line_length": 34.654545454545456,
"alnum_prop": 0.5944386149003148,
"repo_name": "Pleiadesu/drunkenknight",
"id": "c0d85a135c50c282340a29e8e85f219cd33984b5",
"size": "1906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77855"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import six
class AssistantManager(object):
def __init__(self):
self._guides = {}
def add(self, guides):
for k, v in six.iteritems(guides):
self._guides[k] = v
def get_valid_ids(self):
return list(v["id"] for k, v in six.iteritems(self._guides))
def get_guide_id(self, guide):
guide = self._guides.get(guide)
if guide:
return guide.get("id")
def all(self):
return self._guides
| {
"content_hash": "9095cf41700701ec7880b95d3e15b38c",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 68,
"avg_line_length": 23.318181818181817,
"alnum_prop": 0.5730994152046783,
"repo_name": "beeftornado/sentry",
"id": "3e04fe94e3bd1d663c4419467e5bf2f05f33d3f8",
"size": "513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/assistant/manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
} |
import numpy as np
from .... import survey
from ....utils import Zero, validate_ndarray_with_shape
class BaseSrc(survey.BaseSrc):
"""Base DC/IP source
Parameters
----------
receiver_list : list of SimPEG.electromagnetics.static.resistivity.receivers.BaseRx
A list of DC/IP receivers
location : (n_source, dim) numpy.ndarray
Source locations
current : float or numpy.ndarray, default: 1.0
Current amplitude [A]
"""
_q = None
def __init__(self, receiver_list, location, current=1.0, **kwargs):
super().__init__(receiver_list=receiver_list, **kwargs)
self.location = location
self.current = current
@property
def location(self):
"""locations of the source electrodes
Returns
-------
(n, dim) numpy.ndarray
Locations of the source electrodes
"""
return self._location
@location.setter
def location(self, other):
self._location = validate_ndarray_with_shape(
"location", other, shape=("*", "*"), dtype=float
)
@property
def current(self):
"""Amplitudes of the source currents
Returns
-------
(n_source) numpy.ndarray
Amplitudes of the source currents
"""
return self._current
@current.setter
def current(self, other):
other = validate_ndarray_with_shape("current", other, shape=("*",), dtype=float)
if len(other) > 1 and len(other) != self.location.shape[0]:
raise ValueError(
"Current must be constant or equal to the number of specified source locations."
f" saw {len(other)} current sources and {self.location.shape[0]} locations."
)
self._current = other
def eval(self, sim):
"""Discretize sources to mesh
Parameters
----------
sim : SimPEG.base.BaseElectricalPDESimulation
The static electromagnetic simulation
Returns
-------
numpy.ndarray
The right-hand sides corresponding to the sources
"""
if sim._formulation == "HJ":
inds = sim.mesh.closest_points_index(self.location, grid_loc="CC")
q = np.zeros(sim.mesh.nC)
q[inds] = self.current
elif sim._formulation == "EB":
loc = self.location
cur = self.current
interpolation_matrix = sim.mesh.get_interpolation_matrix(
loc, location_type="N"
).toarray()
q = np.sum(cur[:, np.newaxis] * interpolation_matrix, axis=0)
return q
def evalDeriv(self, sim):
"""Returns the derivative of the source term with respect to the model.
This is zero.
Parameters
----------
sim : SimPEG.base.BaseElectricalPDESimulation
The static electromagnetic simulation
Returns
-------
discretize.utils.Zero
"""
return Zero()
class Multipole(BaseSrc):
"""
Generic Multipole Source
"""
@property
def location_a(self):
"""Locations of the A electrodes
Returns
-------
(n, dim) numpy.ndarray
Locations of the A electrodes
"""
return self.location
@property
def location_b(self):
"""Location of the B electrodes
Returns
-------
(n, dim) numpy.ndarray
Locations of the B electrodes
"""
return np.full_like(self.location, np.nan)
class Dipole(BaseSrc):
"""Dipole source
Parameters
----------
receiver_list : list of SimPEG.electromagnetics.static.resistivity.receivers.BaseRx
A list of DC/IP receivers
location_a : (n_source, dim) numpy.array_like
A electrode locations; remember to set 'location_b' keyword argument to define N electrode locations.
location_b : (n_source, dim) numpy.array_like
B electrode locations; remember to set 'location_a' keyword argument to define M electrode locations.
location : list or tuple of length 2 of numpy.array_like
A and B electrode locations. In this case, do not set the 'location_a' and 'location_b'
keyword arguments. And we supply a list or tuple of the form [location_a, location_b].
"""
def __init__(
self,
receiver_list,
location_a=None,
location_b=None,
location=None,
**kwargs,
):
if "current" in kwargs.keys():
value = kwargs.pop("current")
current = [value, -value]
else:
current = [1.0, -1.0]
# if location_a set, then use location_a, location_b
if location_a is not None:
if location_b is None:
raise ValueError(
"For a dipole source both location_a and location_b " "must be set"
)
if location is not None:
raise ValueError(
"Cannot set both location and location_a, location_b. "
"Please provide either location=(location_a, location_b) "
"or both location_a=location_a, location_b=location_b"
)
location = [location_a, location_b]
elif location is not None:
if len(location) != 2:
raise ValueError(
"location must be a list or tuple of length 2: "
"[location_a, location_b]. The input location has "
f"length {len(location)}"
)
# instantiate
super().__init__(
receiver_list=receiver_list, location=location, current=current, **kwargs
)
def __repr__(self):
return (
f"{self.__class__.__name__}(" f"a: {self.location_a}; b: {self.location_b})"
)
@property
def location_a(self):
"""Locations of the A-electrodes
Returns
-------
(n_source, dim) numpy.ndarray
Locations of the A-electrodes
"""
return self.location[0]
@property
def location_b(self):
"""Locations of the B-electrodes
Returns
-------
(n_source, dim) numpy.ndarray
Locations of the B-electrodes
"""
return self.location[1]
class Pole(BaseSrc):
"""Pole source
Parameters
----------
receiver_list : list of SimPEG.electromagnetics.static.resistivity.receivers.BaseRx
A list of DC/IP receivers
location : (n_source, dim) numpy.ndarray
Electrode locations
"""
def __init__(self, receiver_list, location=None, **kwargs):
super().__init__(receiver_list=receiver_list, location=location, **kwargs)
if len(self.location) != 1:
raise ValueError(
f"Pole sources only have a single location, not {len(self.location)}"
)
@property
def location_a(self):
"""Locations of the A-electrodes
Returns
-------
(n_source, dim) numpy.ndarray
Locations of the A-electrodes
"""
return self.location[0]
@property
def location_b(self):
"""Locations of the B-electrodes
Returns
-------
(n_source, dim) numpy.ndarray of ``numpy.nan``
Locations of the B-electrodes
"""
return np.full_like(self.location[0], np.nan)
| {
"content_hash": "9ab93d1de957213a4374dc79b38ef2b2",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 109,
"avg_line_length": 28.876923076923077,
"alnum_prop": 0.5554075652637187,
"repo_name": "simpeg/simpeg",
"id": "e0c9368a1c8f7e4a3d6a737c8923755d76422b45",
"size": "7508",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "SimPEG/electromagnetics/static/resistivity/sources.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Python",
"bytes": "3476002"
}
],
"symlink_target": ""
} |
import os
import sys
# Without this environment variable set to 'no' importing 'gst'
# causes 100% CPU load. (Tested on OSX.)
os.environ['GST_REGISTRY_FORK'] = 'no'
# Tested on OSX only.
os.environ['GST_PLUGIN_PATH'] = os.path.join(sys._MEIPASS, 'gst_plugins')
| {
"content_hash": "cf10135797c50789ebe0b2c94572bdd5",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 73,
"avg_line_length": 24.09090909090909,
"alnum_prop": 0.7018867924528301,
"repo_name": "bl4ckdu5t/registron",
"id": "6b3bc01f1271bdbe7498919de84feaf79a6e58ff",
"size": "670",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "PyInstaller/loader/rthooks/pyi_rth_gstreamer.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "884174"
},
{
"name": "C++",
"bytes": "1272"
},
{
"name": "CSS",
"bytes": "3410"
},
{
"name": "Objective-C",
"bytes": "30562"
},
{
"name": "Perl",
"bytes": "4306"
},
{
"name": "Prolog",
"bytes": "640"
},
{
"name": "Python",
"bytes": "1753967"
},
{
"name": "Shell",
"bytes": "4016"
},
{
"name": "TeX",
"bytes": "186440"
},
{
"name": "Visual Basic",
"bytes": "166"
}
],
"symlink_target": ""
} |
import pylab as pl
import pandas as pd
import math
cycle_start = 0.58
cycle_end = 1.81
duration_of_gait_cycle = cycle_end - cycle_start
half_gait_cycle = 0.5 * duration_of_gait_cycle
muscles = ['glut_max', 'iliopsoas', 'rect_fem',
'hamstrings', 'bifemsh', 'vasti',
'gastroc', 'soleus', 'tib_ant']
sol = pd.read_csv('GlobalStaticOptimization_OCP_solution.csv', index_col=0,
skiprows=2)
#sol = pd.read_csv('INDYGO_OCP_solution.csv', index_col=0,
# skiprows=2)
# sol.plot()
#num_muscles = 0
#plot_names = list()
#for col in sol.columns:
# if col.endswith('activation'):
# num_muscles += 1
# plot_names.append(col)
col_indices_r = list()
col_indices_l = list()
for muscle in muscles:
for i, col in enumerate(sol.columns):
if muscle + '_r' in col and 'activation' in col:
col_indices_r.append(i)
if muscle + '_l' in col and 'activation' in col:
col_indices_l.append(i)
num_cols = 3
num_rows = 3 #math.ceil(float(num_muscles) / num_cols)
pl.figure(figsize=(4 * num_cols, 3 * num_rows))
pgc_r = 100.0 * (sol.index - cycle_start) / duration_of_gait_cycle
for i in range(len(muscles)):
ax = pl.subplot(num_rows, num_cols, i + 1)
col_label_r = sol.columns[col_indices_r[i]]
ax.plot(pgc_r, sol[col_label_r])
#col_label_l = sol.columns[col_indices_l[i]]
#ax.plot(sol.index + half_gait_cycle, sol[col_label_l])
ax.set_title(col_label_r.split('/')[-1].replace('_r_activation', ''))
ax.set_ylim(0, 1)
if i == 3:
ax.set_ylabel('activation')
if i < 6:
ax.set_xticklabels([])
i_col = i % num_cols
if i_col > 0:
ax.set_yticklabels([])
if i == 7:
ax.set_xlabel('time (% gait cycle)')
pl.savefig('gait10dof18musc_activation.png')
| {
"content_hash": "222c7eb39440bc769f968f4cbd628b81",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 75,
"avg_line_length": 30.948275862068964,
"alnum_prop": 0.6094707520891365,
"repo_name": "opensim-org/opensim-core",
"id": "daf179e24070bf55a926bf82a513e84d35cb3a78",
"size": "3138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OpenSim/Moco/Archive/Tests/plot_gait10dof18musc_activation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2463647"
},
{
"name": "C++",
"bytes": "14727896"
},
{
"name": "CMake",
"bytes": "284589"
},
{
"name": "HTML",
"bytes": "230"
},
{
"name": "Java",
"bytes": "81560"
},
{
"name": "MATLAB",
"bytes": "576488"
},
{
"name": "Python",
"bytes": "320084"
},
{
"name": "SWIG",
"bytes": "155144"
},
{
"name": "Shell",
"bytes": "862"
},
{
"name": "Yacc",
"bytes": "19078"
}
],
"symlink_target": ""
} |
"""
=====================================================
Distance computations (:mod:`scipy.spatial.distance`)
=====================================================
.. sectionauthor:: Damian Eads
Function Reference
------------------
Distance matrix computation from a collection of raw observation vectors
stored in a rectangular array.
.. autosummary::
:toctree: generated/
pdist -- pairwise distances between observation vectors.
cdist -- distances between two collections of observation vectors
squareform -- convert distance matrix to a condensed one and vice versa
Predicates for checking the validity of distance matrices, both
condensed and redundant. Also contained in this module are functions
for computing the number of observations in a distance matrix.
.. autosummary::
:toctree: generated/
is_valid_dm -- checks for a valid distance matrix
is_valid_y -- checks for a valid condensed distance matrix
num_obs_dm -- # of observations in a distance matrix
num_obs_y -- # of observations in a condensed distance matrix
Distance functions between two vectors ``u`` and ``v``. Computing
distances over a large collection of vectors is inefficient for these
functions. Use ``pdist`` for this purpose.
.. autosummary::
:toctree: generated/
braycurtis -- the Bray-Curtis distance.
canberra -- the Canberra distance.
chebyshev -- the Chebyshev distance.
cityblock -- the Manhattan distance.
correlation -- the Correlation distance.
cosine -- the Cosine distance.
dice -- the Dice dissimilarity (boolean).
euclidean -- the Euclidean distance.
hamming -- the Hamming distance (boolean).
jaccard -- the Jaccard distance (boolean).
kulsinski -- the Kulsinski distance (boolean).
mahalanobis -- the Mahalanobis distance.
matching -- the matching dissimilarity (boolean).
minkowski -- the Minkowski distance.
rogerstanimoto -- the Rogers-Tanimoto dissimilarity (boolean).
russellrao -- the Russell-Rao dissimilarity (boolean).
seuclidean -- the normalized Euclidean distance.
sokalmichener -- the Sokal-Michener dissimilarity (boolean).
sokalsneath -- the Sokal-Sneath dissimilarity (boolean).
sqeuclidean -- the squared Euclidean distance.
wminkowski -- the weighted Minkowski distance.
yule -- the Yule dissimilarity (boolean).
"""
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy.lib.six import callable, string_types
from scipy.lib.six import xrange
from . import _distance_wrap
from ..linalg import norm
import collections
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accepts a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _convert_to_bool(X):
if X.dtype != np.bool:
X = X.astype(np.bool)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = X.astype(np.double)
if not X.flags.contiguous:
X = X.copy()
return X
def _validate_vector(u, dtype=None):
# XXX Is order='c' really necessary?
u = np.asarray(u, dtype=dtype, order='c').squeeze()
# Ensure values such as u=1 and u=[1] still return 1-D arrays.
u = np.atleast_1d(u)
if u.ndim > 1:
raise ValueError("Input vector should be 1-D.")
return u
def minkowski(u, v, p):
"""
Computes the Minkowski distance between two 1-D arrays.
The Minkowski distance between 1-D arrays `u` and `v`,
is defined as
.. math::
{||u-v||}_p = (\\sum{|u_i - v_i|^p})^{1/p}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
p : int
The order of the norm of the difference :math:`{||u-v||}_p`.
Returns
-------
d : double
The Minkowski distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if p < 1:
raise ValueError("p must be at least 1")
dist = norm(u - v, ord=p)
return dist
def wminkowski(u, v, p, w):
"""
Computes the weighted Minkowski distance between two 1-D arrays.
The weighted Minkowski distance between `u` and `v`, defined as
.. math::
\\left(\\sum{(w_i |u_i - v_i|^p)}\\right)^{1/p}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
p : int
The order of the norm of the difference :math:`{||u-v||}_p`.
w : (N,) array_like
The weight vector.
Returns
-------
wminkowski : double
The weighted Minkowski distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
w = _validate_vector(w)
if p < 1:
raise ValueError("p must be at least 1")
dist = norm(w * (u - v), ord=p)
return dist
def euclidean(u, v):
"""
Computes the Euclidean distance between two 1-D arrays.
The Euclidean distance between 1-D arrays `u` and `v`, is defined as
.. math::
{||u-v||}_2
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
euclidean : double
The Euclidean distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
dist = norm(u - v)
return dist
def sqeuclidean(u, v):
"""
Computes the squared Euclidean distance between two 1-D arrays.
The squared Euclidean distance between `u` and `v` is defined as
.. math::
{||u-v||}_2^2.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
sqeuclidean : double
The squared Euclidean distance between vectors `u` and `v`.
"""
# Preserve float dtypes, but convert everything else to np.float64
# for stability.
utype, vtype = None, None
if not (hasattr(u, "dtype") and np.issubdtype(u.dtype, np.inexact)):
utype = np.float64
if not (hasattr(v, "dtype") and np.issubdtype(v.dtype, np.inexact)):
vtype = np.float64
u = _validate_vector(u, dtype=utype)
v = _validate_vector(v, dtype=vtype)
u_v = u - v
return np.dot(u_v, u_v)
def cosine(u, v):
"""
Computes the Cosine distance between 1-D arrays.
The Cosine distance between `u` and `v`, is defined as
.. math::
1 - \\frac{u \\cdot v}
{||u||_2 ||v||_2}.
where :math:`u \\cdot v` is the dot product of :math:`u` and
:math:`v`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
cosine : double
The Cosine distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
dist = 1.0 - np.dot(u, v) / (norm(u) * norm(v))
return dist
def correlation(u, v):
"""
Computes the correlation distance between two 1-D arrays.
The correlation distance between `u` and `v`, is
defined as
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{u}` is the mean of the elements of `u`
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
correlation : double
The correlation distance between 1-D array `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
umu = u.mean()
vmu = v.mean()
um = u - umu
vm = v - vmu
dist = 1.0 - np.dot(um, vm) / (norm(um) * norm(vm))
return dist
def hamming(u, v):
"""
Computes the Hamming distance between two 1-D arrays.
The Hamming distance between 1-D arrays `u` and `v`, is simply the
proportion of disagreeing components in `u` and `v`. If `u` and `v` are
boolean vectors, the Hamming distance is
.. math::
\\frac{c_{01} + c_{10}}{n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
hamming : double
The Hamming distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.shape != v.shape:
raise ValueError('The 1d arrays must have equal lengths.')
return (u != v).mean()
def jaccard(u, v):
"""
Computes the Jaccard-Needham dissimilarity between two boolean 1-D arrays.
The Jaccard-Needham dissimilarity between 1-D boolean arrays `u` and `v`,
is defined as
.. math::
\\frac{c_{TF} + c_{FT}}
{c_{TT} + c_{FT} + c_{TF}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
jaccard : double
The Jaccard distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
dist = (np.double(np.bitwise_and((u != v),
np.bitwise_or(u != 0, v != 0)).sum())
/ np.double(np.bitwise_or(u != 0, v != 0).sum()))
return dist
def kulsinski(u, v):
"""
Computes the Kulsinski dissimilarity between two boolean 1-D arrays.
The Kulsinski dissimilarity between two boolean 1-D arrays `u` and `v`,
is defined as
.. math::
\\frac{c_{TF} + c_{FT} - c_{TT} + n}
{c_{FT} + c_{TF} + n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
kulsinski : double
The Kulsinski distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
n = float(len(u))
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v)
return (ntf + nft - ntt + n) / (ntf + nft + n)
def seuclidean(u, v, V):
"""
Returns the standardized Euclidean distance between two 1-D arrays.
The standardized Euclidean distance between `u` and `v`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
V : (N,) array_like
`V` is an 1-D array of component variances. It is usually computed
among a larger collection vectors.
Returns
-------
seuclidean : double
The standardized Euclidean distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
V = _validate_vector(V, dtype=np.float64)
if V.shape[0] != u.shape[0] or u.shape[0] != v.shape[0]:
raise TypeError('V must be a 1-D array of the same dimension '
'as u and v.')
return np.sqrt(((u - v) ** 2 / V).sum())
def cityblock(u, v):
"""
Computes the City Block (Manhattan) distance.
Computes the Manhattan distance between two 1-D arrays `u` and `v`,
which is defined as
.. math::
\\sum_i {\\left| u_i - v_i \\right|}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
cityblock : double
The City Block (Manhattan) distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
return abs(u - v).sum()
def mahalanobis(u, v, VI):
"""
Computes the Mahalanobis distance between two 1-D arrays.
The Mahalanobis distance between 1-D arrays `u` and `v`, is defined as
.. math::
\\sqrt{ (u-v) V^{-1} (u-v)^T }
where ``V`` is the covariance matrix. Note that the argument `VI`
is the inverse of ``V``.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
VI : ndarray
The inverse of the covariance matrix.
Returns
-------
mahalanobis : double
The Mahalanobis distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
VI = np.atleast_2d(VI)
delta = u - v
m = np.dot(np.dot(delta, VI), delta)
return np.sqrt(m)
def chebyshev(u, v):
"""
Computes the Chebyshev distance.
Computes the Chebyshev distance between two 1-D arrays `u` and `v`,
which is defined as
.. math::
\\max_i {|u_i-v_i|}.
Parameters
----------
u : (N,) array_like
Input vector.
v : (N,) array_like
Input vector.
Returns
-------
chebyshev : double
The Chebyshev distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
return max(abs(u - v))
def braycurtis(u, v):
"""
Computes the Bray-Curtis distance between two 1-D arrays.
Bray-Curtis distance is defined as
.. math::
\\sum{|u_i-v_i|} / \\sum{|u_i+v_i|}
The Bray-Curtis distance is in the range [0, 1] if all coordinates are
positive, and is undefined if the inputs are of length zero.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
braycurtis : double
The Bray-Curtis distance between 1-D arrays `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v, dtype=np.float64)
return abs(u - v).sum() / abs(u + v).sum()
def canberra(u, v):
"""
Computes the Canberra distance between two 1-D arrays.
The Canberra distance is defined as
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
canberra : double
The Canberra distance between vectors `u` and `v`.
Notes
-----
When `u[i]` and `v[i]` are 0 for given i, then the fraction 0/0 = 0 is
used in the calculation.
"""
u = _validate_vector(u)
v = _validate_vector(v, dtype=np.float64)
olderr = np.seterr(invalid='ignore')
try:
d = np.nansum(abs(u - v) / (abs(u) + abs(v)))
finally:
np.seterr(**olderr)
return d
def _nbool_correspond_all(u, v):
if u.dtype != v.dtype:
raise TypeError("Arrays being compared must be of the same data type.")
if u.dtype == np.int or u.dtype == np.float_ or u.dtype == np.double:
not_u = 1.0 - u
not_v = 1.0 - v
nff = (not_u * not_v).sum()
nft = (not_u * v).sum()
ntf = (u * not_v).sum()
ntt = (u * v).sum()
elif u.dtype == np.bool:
not_u = ~u
not_v = ~v
nff = (not_u & not_v).sum()
nft = (not_u & v).sum()
ntf = (u & not_v).sum()
ntt = (u & v).sum()
else:
raise TypeError("Arrays being compared have unknown type.")
return (nff, nft, ntf, ntt)
def _nbool_correspond_ft_tf(u, v):
if u.dtype == np.int or u.dtype == np.float_ or u.dtype == np.double:
not_u = 1.0 - u
not_v = 1.0 - v
nft = (not_u * v).sum()
ntf = (u * not_v).sum()
else:
not_u = ~u
not_v = ~v
nft = (not_u & v).sum()
ntf = (u & not_v).sum()
return (nft, ntf)
def yule(u, v):
"""
Computes the Yule dissimilarity between two boolean 1-D arrays.
The Yule dissimilarity is defined as
.. math::
\\frac{R}{c_{TT} * c_{FF} + \\frac{R}{2}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2.0 * c_{TF} * c_{FT}`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
yule : double
The Yule dissimilarity between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v)
return float(2.0 * ntf * nft) / float(ntt * nff + ntf * nft)
def matching(u, v):
"""
Computes the Matching dissimilarity between two boolean 1-D arrays.
The Matching dissimilarity between two boolean 1-D arrays
`u` and `v`, is defined as
.. math::
\\frac{c_{TF} + c_{FT}}{n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
matching : double
The Matching dissimilarity between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
(nft, ntf) = _nbool_correspond_ft_tf(u, v)
return float(nft + ntf) / float(len(u))
def dice(u, v):
"""
Computes the Dice dissimilarity between two boolean 1-D arrays.
The Dice dissimilarity between `u` and `v`, is
.. math::
\\frac{c_{TF} + c_{FT}}
{2c_{TT} + c_{FT} + c_{TF}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) ndarray, bool
Input 1-D array.
v : (N,) ndarray, bool
Input 1-D array.
Returns
-------
dice : double
The Dice dissimilarity between 1-D arrays `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == np.bool:
ntt = (u & v).sum()
else:
ntt = (u * v).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v)
return float(ntf + nft) / float(2.0 * ntt + ntf + nft)
def rogerstanimoto(u, v):
"""
Computes the Rogers-Tanimoto dissimilarity between two boolean 1-D arrays.
The Rogers-Tanimoto dissimilarity between two boolean 1-D arrays
`u` and `v`, is defined as
.. math::
\\frac{R}
{c_{TT} + c_{FF} + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
rogerstanimoto : double
The Rogers-Tanimoto dissimilarity between vectors
`u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v)
return float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft)))
def russellrao(u, v):
"""
Computes the Russell-Rao dissimilarity between two boolean 1-D arrays.
The Russell-Rao dissimilarity between two boolean 1-D arrays, `u` and
`v`, is defined as
.. math::
\\frac{n - c_{TT}}
{n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
russellrao : double
The Russell-Rao dissimilarity between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == np.bool:
ntt = (u & v).sum()
else:
ntt = (u * v).sum()
return float(len(u) - ntt) / float(len(u))
def sokalmichener(u, v):
"""
Computes the Sokal-Michener dissimilarity between two boolean 1-D arrays.
The Sokal-Michener dissimilarity between boolean 1-D arrays `u` and `v`,
is defined as
.. math::
\\frac{R}
{S + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`, :math:`R = 2 * (c_{TF} + c_{FT})` and
:math:`S = c_{FF} + c_{TT}`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
sokalmichener : double
The Sokal-Michener dissimilarity between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == np.bool:
ntt = (u & v).sum()
nff = (~u & ~v).sum()
else:
ntt = (u * v).sum()
nff = ((1.0 - u) * (1.0 - v)).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v)
return float(2.0 * (ntf + nft)) / float(ntt + nff + 2.0 * (ntf + nft))
def sokalsneath(u, v):
"""
Computes the Sokal-Sneath dissimilarity between two boolean 1-D arrays.
The Sokal-Sneath dissimilarity between `u` and `v`,
.. math::
\\frac{R}
{c_{TT} + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
sokalsneath : double
The Sokal-Sneath dissimilarity between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == np.bool:
ntt = (u & v).sum()
else:
ntt = (u * v).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v)
denom = ntt + 2.0 * (ntf + nft)
if denom == 0:
raise ValueError('Sokal-Sneath dissimilarity is not defined for '
'vectors that are entirely false.')
return float(2.0 * (ntf + nft)) / denom
def pdist(X, metric='euclidean', p=2, w=None, V=None, VI=None):
"""
Pairwise distances between observations in n-dimensional space.
The following are common calling conventions.
1. ``Y = pdist(X, 'euclidean')``
Computes the distance between m points using Euclidean distance
(2-norm) as the distance metric between the points. The points
are arranged as m n-dimensional row vectors in the matrix X.
2. ``Y = pdist(X, 'minkowski', p)``
Computes the distances using the Minkowski distance
:math:`||u-v||_p` (p-norm) where :math:`p \\geq 1`.
3. ``Y = pdist(X, 'cityblock')``
Computes the city block or Manhattan distance between the
points.
4. ``Y = pdist(X, 'seuclidean', V=None)``
Computes the standardized Euclidean distance. The standardized
Euclidean distance between two n-vectors ``u`` and ``v`` is
.. math::
\\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}
V is the variance vector; V[i] is the variance computed over all
the i'th components of the points. If not passed, it is
automatically computed.
5. ``Y = pdist(X, 'sqeuclidean')``
Computes the squared Euclidean distance :math:`||u-v||_2^2` between
the vectors.
6. ``Y = pdist(X, 'cosine')``
Computes the cosine distance between vectors u and v,
.. math::
1 - \\frac{u \\cdot v}
{{||u||}_2 {||v||}_2}
where :math:`||*||_2` is the 2-norm of its argument ``*``, and
:math:`u \\cdot v` is the dot product of ``u`` and ``v``.
7. ``Y = pdist(X, 'correlation')``
Computes the correlation distance between vectors u and v. This is
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{v}` is the mean of the elements of vector v,
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
8. ``Y = pdist(X, 'hamming')``
Computes the normalized Hamming distance, or the proportion of
those vector elements between two n-vectors ``u`` and ``v``
which disagree. To save memory, the matrix ``X`` can be of type
boolean.
9. ``Y = pdist(X, 'jaccard')``
Computes the Jaccard distance between the points. Given two
vectors, ``u`` and ``v``, the Jaccard distance is the
proportion of those elements ``u[i]`` and ``v[i]`` that
disagree where at least one of them is non-zero.
10. ``Y = pdist(X, 'chebyshev')``
Computes the Chebyshev distance between the points. The
Chebyshev distance between two n-vectors ``u`` and ``v`` is the
maximum norm-1 distance between their respective elements. More
precisely, the distance is given by
.. math::
d(u,v) = \\max_i {|u_i-v_i|}
11. ``Y = pdist(X, 'canberra')``
Computes the Canberra distance between the points. The
Canberra distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}
12. ``Y = pdist(X, 'braycurtis')``
Computes the Bray-Curtis distance between the points. The
Bray-Curtis distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\frac{\\sum_i {u_i-v_i}}
{\\sum_i {u_i+v_i}}
13. ``Y = pdist(X, 'mahalanobis', VI=None)``
Computes the Mahalanobis distance between the points. The
Mahalanobis distance between two points ``u`` and ``v`` is
:math:`(u-v)(1/V)(u-v)^T` where :math:`(1/V)` (the ``VI``
variable) is the inverse covariance. If ``VI`` is not None,
``VI`` will be used as the inverse covariance matrix.
14. ``Y = pdist(X, 'yule')``
Computes the Yule distance between each pair of boolean
vectors. (see yule function documentation)
15. ``Y = pdist(X, 'matching')``
Computes the matching distance between each pair of boolean
vectors. (see matching function documentation)
16. ``Y = pdist(X, 'dice')``
Computes the Dice distance between each pair of boolean
vectors. (see dice function documentation)
17. ``Y = pdist(X, 'kulsinski')``
Computes the Kulsinski distance between each pair of
boolean vectors. (see kulsinski function documentation)
18. ``Y = pdist(X, 'rogerstanimoto')``
Computes the Rogers-Tanimoto distance between each pair of
boolean vectors. (see rogerstanimoto function documentation)
19. ``Y = pdist(X, 'russellrao')``
Computes the Russell-Rao distance between each pair of
boolean vectors. (see russellrao function documentation)
20. ``Y = pdist(X, 'sokalmichener')``
Computes the Sokal-Michener distance between each pair of
boolean vectors. (see sokalmichener function documentation)
21. ``Y = pdist(X, 'sokalsneath')``
Computes the Sokal-Sneath distance between each pair of
boolean vectors. (see sokalsneath function documentation)
22. ``Y = pdist(X, 'wminkowski')``
Computes the weighted Minkowski distance between each pair of
vectors. (see wminkowski function documentation)
23. ``Y = pdist(X, f)``
Computes the distance between all pairs of vectors in X
using the user supplied 2-arity function f. For example,
Euclidean distance between the vectors could be computed
as follows::
dm = pdist(X, lambda u, v: np.sqrt(((u-v)**2).sum()))
Note that you should avoid passing a reference to one of
the distance functions defined in this library. For example,::
dm = pdist(X, sokalsneath)
would calculate the pair-wise distances between the vectors in
X using the Python function sokalsneath. This would result in
sokalsneath being called :math:`{n \\choose 2}` times, which
is inefficient. Instead, the optimized C version is more
efficient, and we call it using the following syntax.::
dm = pdist(X, 'sokalsneath')
Parameters
----------
X : ndarray
An m by n array of m original observations in an
n-dimensional space.
metric : string or function
The distance metric to use. The distance function can
be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',
'correlation', 'cosine', 'dice', 'euclidean', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis', 'matching',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'.
w : ndarray
The weight vector (for weighted Minkowski).
p : double
The p-norm to apply (for Minkowski, weighted and unweighted)
V : ndarray
The variance vector (for standardized Euclidean).
VI : ndarray
The inverse of the covariance matrix (for Mahalanobis).
Returns
-------
Y : ndarray
Returns a condensed distance matrix Y. For
each :math:`i` and :math:`j` (where :math:`i<j<n`), the
metric ``dist(u=X[i], v=X[j])`` is computed and stored in entry ``ij``.
See Also
--------
squareform : converts between condensed distance matrices and
square distance matrices.
Notes
-----
See ``squareform`` for information on how to calculate the index of
this entry or to convert the condensed distance matrix to a
redundant square matrix.
"""
# 21. Y = pdist(X, 'test_Y')
#
# Computes the distance between all pairs of vectors in X
# using the distance metric Y but with a more succinct,
# verifiable, but less efficient implementation.
X = np.asarray(X, order='c')
# The C code doesn't do striding.
[X] = _copy_arrays_if_base_present([_convert_to_double(X)])
s = X.shape
if len(s) != 2:
raise ValueError('A 2-dimensional array must be passed.')
m, n = s
dm = np.zeros((m * (m - 1)) // 2, dtype=np.double)
wmink_names = ['wminkowski', 'wmi', 'wm', 'wpnorm']
if w is None and (metric == wminkowski or metric in wmink_names):
raise ValueError('weighted minkowski requires a weight '
'vector `w` to be given.')
if callable(metric):
if metric == minkowski:
def dfun(u, v):
return minkowski(u, v, p)
elif metric == wminkowski:
def dfun(u, v):
return wminkowski(u, v, p, w)
elif metric == seuclidean:
def dfun(u, v):
return seuclidean(u, v, V)
elif metric == mahalanobis:
def dfun(u, v):
return mahalanobis(u, v, V)
else:
dfun = metric
k = 0
for i in xrange(0, m - 1):
for j in xrange(i + 1, m):
dm[k] = dfun(X[i], X[j])
k = k + 1
elif isinstance(metric, string_types):
mstr = metric.lower()
#if X.dtype != np.double and \
# (mstr != 'hamming' and mstr != 'jaccard'):
# TypeError('A double array must be passed.')
if mstr in set(['euclidean', 'euclid', 'eu', 'e']):
_distance_wrap.pdist_euclidean_wrap(_convert_to_double(X), dm)
elif mstr in set(['sqeuclidean', 'sqe', 'sqeuclid']):
_distance_wrap.pdist_sqeuclidean_wrap(_convert_to_double(X), dm)
elif mstr in set(['cityblock', 'cblock', 'cb', 'c']):
_distance_wrap.pdist_city_block_wrap(X, dm)
elif mstr in set(['hamming', 'hamm', 'ha', 'h']):
if X.dtype == np.bool:
_distance_wrap.pdist_hamming_bool_wrap(_convert_to_bool(X), dm)
else:
_distance_wrap.pdist_hamming_wrap(_convert_to_double(X), dm)
elif mstr in set(['jaccard', 'jacc', 'ja', 'j']):
if X.dtype == np.bool:
_distance_wrap.pdist_jaccard_bool_wrap(_convert_to_bool(X), dm)
else:
_distance_wrap.pdist_jaccard_wrap(_convert_to_double(X), dm)
elif mstr in set(['chebychev', 'chebyshev', 'cheby', 'cheb', 'ch']):
_distance_wrap.pdist_chebyshev_wrap(_convert_to_double(X), dm)
elif mstr in set(['minkowski', 'mi', 'm']):
_distance_wrap.pdist_minkowski_wrap(_convert_to_double(X), dm, p)
elif mstr in wmink_names:
_distance_wrap.pdist_weighted_minkowski_wrap(_convert_to_double(X),
dm, p, np.asarray(w))
elif mstr in set(['seuclidean', 'se', 's']):
if V is not None:
V = np.asarray(V, order='c')
if type(V) != np.ndarray:
raise TypeError('Variance vector V must be a numpy array')
if V.dtype != np.double:
raise TypeError('Variance vector V must contain doubles.')
if len(V.shape) != 1:
raise ValueError('Variance vector V must '
'be one-dimensional.')
if V.shape[0] != n:
raise ValueError('Variance vector V must be of the same '
'dimension as the vectors on which the distances '
'are computed.')
# The C code doesn't do striding.
[VV] = _copy_arrays_if_base_present([_convert_to_double(V)])
else:
VV = np.var(X, axis=0, ddof=1)
_distance_wrap.pdist_seuclidean_wrap(_convert_to_double(X), VV, dm)
# Need to test whether vectorized cosine works better.
# Find out: Is there a dot subtraction operator so I can
# subtract matrices in a similar way to multiplying them?
# Need to get rid of as much unnecessary C code as possible.
elif mstr in set(['cosine', 'cos']):
norms = np.sqrt(np.sum(X * X, axis=1))
_distance_wrap.pdist_cosine_wrap(_convert_to_double(X), dm, norms)
elif mstr in set(['old_cosine', 'old_cos']):
norms = np.sqrt(np.sum(X * X, axis=1))
nV = norms.reshape(m, 1)
# The numerator u * v
nm = np.dot(X, X.T)
# The denom. ||u||*||v||
de = np.dot(nV, nV.T)
dm = 1.0 - (nm / de)
dm[xrange(0, m), xrange(0, m)] = 0.0
dm = squareform(dm)
elif mstr in set(['correlation', 'co']):
X2 = X - X.mean(1)[:, np.newaxis]
#X2 = X - np.matlib.repmat(np.mean(X, axis=1).reshape(m, 1), 1, n)
norms = np.sqrt(np.sum(X2 * X2, axis=1))
_distance_wrap.pdist_cosine_wrap(_convert_to_double(X2),
_convert_to_double(dm),
_convert_to_double(norms))
elif mstr in set(['mahalanobis', 'mahal', 'mah']):
if VI is not None:
VI = _convert_to_double(np.asarray(VI, order='c'))
if type(VI) != np.ndarray:
raise TypeError('VI must be a numpy array.')
if VI.dtype != np.double:
raise TypeError('The array must contain 64-bit floats.')
[VI] = _copy_arrays_if_base_present([VI])
else:
V = np.cov(X.T)
VI = _convert_to_double(np.linalg.inv(V).T.copy())
# (u-v)V^(-1)(u-v)^T
_distance_wrap.pdist_mahalanobis_wrap(_convert_to_double(X),
VI, dm)
elif mstr == 'canberra':
_distance_wrap.pdist_canberra_wrap(_convert_to_double(X), dm)
elif mstr == 'braycurtis':
_distance_wrap.pdist_bray_curtis_wrap(_convert_to_double(X), dm)
elif mstr == 'yule':
_distance_wrap.pdist_yule_bool_wrap(_convert_to_bool(X), dm)
elif mstr == 'matching':
_distance_wrap.pdist_matching_bool_wrap(_convert_to_bool(X), dm)
elif mstr == 'kulsinski':
_distance_wrap.pdist_kulsinski_bool_wrap(_convert_to_bool(X), dm)
elif mstr == 'dice':
_distance_wrap.pdist_dice_bool_wrap(_convert_to_bool(X), dm)
elif mstr == 'rogerstanimoto':
_distance_wrap.pdist_rogerstanimoto_bool_wrap(_convert_to_bool(X),
dm)
elif mstr == 'russellrao':
_distance_wrap.pdist_russellrao_bool_wrap(_convert_to_bool(X), dm)
elif mstr == 'sokalmichener':
_distance_wrap.pdist_sokalmichener_bool_wrap(_convert_to_bool(X),
dm)
elif mstr == 'sokalsneath':
_distance_wrap.pdist_sokalsneath_bool_wrap(_convert_to_bool(X), dm)
elif metric == 'test_euclidean':
dm = pdist(X, euclidean)
elif metric == 'test_sqeuclidean':
if V is None:
V = np.var(X, axis=0, ddof=1)
else:
V = np.asarray(V, order='c')
dm = pdist(X, lambda u, v: seuclidean(u, v, V))
elif metric == 'test_braycurtis':
dm = pdist(X, braycurtis)
elif metric == 'test_mahalanobis':
if VI is None:
V = np.cov(X.T)
VI = np.linalg.inv(V)
else:
VI = np.asarray(VI, order='c')
[VI] = _copy_arrays_if_base_present([VI])
# (u-v)V^(-1)(u-v)^T
dm = pdist(X, (lambda u, v: mahalanobis(u, v, VI)))
elif metric == 'test_canberra':
dm = pdist(X, canberra)
elif metric == 'test_cityblock':
dm = pdist(X, cityblock)
elif metric == 'test_minkowski':
dm = pdist(X, minkowski, p=p)
elif metric == 'test_wminkowski':
dm = pdist(X, wminkowski, p=p, w=w)
elif metric == 'test_cosine':
dm = pdist(X, cosine)
elif metric == 'test_correlation':
dm = pdist(X, correlation)
elif metric == 'test_hamming':
dm = pdist(X, hamming)
elif metric == 'test_jaccard':
dm = pdist(X, jaccard)
elif metric == 'test_chebyshev' or metric == 'test_chebychev':
dm = pdist(X, chebyshev)
elif metric == 'test_yule':
dm = pdist(X, yule)
elif metric == 'test_matching':
dm = pdist(X, matching)
elif metric == 'test_dice':
dm = pdist(X, dice)
elif metric == 'test_kulsinski':
dm = pdist(X, kulsinski)
elif metric == 'test_rogerstanimoto':
dm = pdist(X, rogerstanimoto)
elif metric == 'test_russellrao':
dm = pdist(X, russellrao)
elif metric == 'test_sokalsneath':
dm = pdist(X, sokalsneath)
elif metric == 'test_sokalmichener':
dm = pdist(X, sokalmichener)
else:
raise ValueError('Unknown Distance Metric: %s' % mstr)
else:
raise TypeError('2nd argument metric must be a string identifier '
'or a function.')
return dm
def squareform(X, force="no", checks=True):
"""
Converts a vector-form distance vector to a square-form distance
matrix, and vice-versa.
Parameters
----------
X : ndarray
Either a condensed or redundant distance matrix.
force : str, optional
As with MATLAB(TM), if force is equal to 'tovector' or 'tomatrix',
the input will be treated as a distance matrix or distance vector
respectively.
checks : bool, optional
If `checks` is set to False, no checks will be made for matrix
symmetry nor zero diagonals. This is useful if it is known that
``X - X.T1`` is small and ``diag(X)`` is close to zero.
These values are ignored any way so they do not disrupt the
squareform transformation.
Returns
-------
Y : ndarray
If a condensed distance matrix is passed, a redundant one is
returned, or if a redundant one is passed, a condensed distance
matrix is returned.
Notes
-----
1. v = squareform(X)
Given a square d-by-d symmetric distance matrix X,
``v=squareform(X)`` returns a ``d * (d-1) / 2`` (or
`${n \\choose 2}$`) sized vector v.
v[{n \\choose 2}-{n-i \\choose 2} + (j-i-1)] is the distance
between points i and j. If X is non-square or asymmetric, an error
is returned.
2. X = squareform(v)
Given a d*d(-1)/2 sized v for some integer d>=2 encoding distances
as described, X=squareform(v) returns a d by d distance matrix X. The
X[i, j] and X[j, i] values are set to
v[{n \\choose 2}-{n-i \\choose 2} + (j-u-1)] and all
diagonal elements are zero.
"""
X = _convert_to_double(np.asarray(X, order='c'))
if not np.issubsctype(X, np.double):
raise TypeError('A double array must be passed.')
s = X.shape
if force.lower() == 'tomatrix':
if len(s) != 1:
raise ValueError("Forcing 'tomatrix' but input X is not a "
"distance vector.")
elif force.lower() == 'tovector':
if len(s) != 2:
raise ValueError("Forcing 'tovector' but input X is not a "
"distance matrix.")
# X = squareform(v)
if len(s) == 1:
if X.shape[0] == 0:
return np.zeros((1, 1), dtype=np.double)
# Grab the closest value to the square root of the number
# of elements times 2 to see if the number of elements
# is indeed a binomial coefficient.
d = int(np.ceil(np.sqrt(X.shape[0] * 2)))
# Check that v is of valid dimensions.
if d * (d - 1) / 2 != int(s[0]):
raise ValueError('Incompatible vector size. It must be a binomial '
'coefficient n choose 2 for some integer n >= 2.')
# Allocate memory for the distance matrix.
M = np.zeros((d, d), dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[X] = _copy_arrays_if_base_present([X])
# Fill in the values of the distance matrix.
_distance_wrap.to_squareform_from_vector_wrap(M, X)
# Return the distance matrix.
M = M + M.transpose()
return M
elif len(s) == 2:
if s[0] != s[1]:
raise ValueError('The matrix argument must be square.')
if checks:
is_valid_dm(X, throw=True, name='X')
# One-side of the dimensions is set here.
d = s[0]
if d <= 1:
return np.array([], dtype=np.double)
# Create a vector.
v = np.zeros((d * (d - 1)) // 2, dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[X] = _copy_arrays_if_base_present([X])
# Convert the vector to squareform.
_distance_wrap.to_vector_from_squareform_wrap(X, v)
return v
else:
raise ValueError(('The first argument must be one or two dimensional '
'array. A %d-dimensional array is not '
'permitted') % len(s))
def is_valid_dm(D, tol=0.0, throw=False, name="D", warning=False):
"""
Returns True if input array is a valid distance matrix.
Distance matrices must be 2-dimensional numpy arrays containing
doubles. They must have a zero-diagonal, and they must be symmetric.
Parameters
----------
D : ndarray
The candidate object to test for validity.
tol : float, optional
The distance matrix should be symmetric. `tol` is the maximum
difference between entries ``ij`` and ``ji`` for the distance
metric to be considered symmetric.
throw : bool, optional
An exception is thrown if the distance matrix passed is not valid.
name : str, optional
The name of the variable to checked. This is useful if
throw is set to True so the offending variable can be identified
in the exception message when an exception is thrown.
warning : bool, optional
Instead of throwing an exception, a warning message is
raised.
Returns
-------
valid : bool
True if the variable `D` passed is a valid distance matrix.
Notes
-----
Small numerical differences in `D` and `D.T` and non-zeroness of
the diagonal are ignored if they are within the tolerance specified
by `tol`.
"""
D = np.asarray(D, order='c')
valid = True
try:
s = D.shape
if D.dtype != np.double:
if name:
raise TypeError(('Distance matrix \'%s\' must contain doubles '
'(double).') % name)
else:
raise TypeError('Distance matrix must contain doubles '
'(double).')
if len(D.shape) != 2:
if name:
raise ValueError(('Distance matrix \'%s\' must have shape=2 '
'(i.e. be two-dimensional).') % name)
else:
raise ValueError('Distance matrix must have shape=2 (i.e. '
'be two-dimensional).')
if tol == 0.0:
if not (D == D.T).all():
if name:
raise ValueError(('Distance matrix \'%s\' must be '
'symmetric.') % name)
else:
raise ValueError('Distance matrix must be symmetric.')
if not (D[xrange(0, s[0]), xrange(0, s[0])] == 0).all():
if name:
raise ValueError(('Distance matrix \'%s\' diagonal must '
'be zero.') % name)
else:
raise ValueError('Distance matrix diagonal must be zero.')
else:
if not (D - D.T <= tol).all():
if name:
raise ValueError(('Distance matrix \'%s\' must be '
'symmetric within tolerance %d.')
% (name, tol))
else:
raise ValueError('Distance matrix must be symmetric within'
' tolerance %5.5f.' % tol)
if not (D[xrange(0, s[0]), xrange(0, s[0])] <= tol).all():
if name:
raise ValueError(('Distance matrix \'%s\' diagonal must be'
' close to zero within tolerance %5.5f.')
% (name, tol))
else:
raise ValueError(('Distance matrix \'%s\' diagonal must be'
' close to zero within tolerance %5.5f.')
% tol)
except Exception as e:
if throw:
raise
if warning:
warnings.warn(str(e))
valid = False
return valid
def is_valid_y(y, warning=False, throw=False, name=None):
"""
Returns True if the input array is a valid condensed distance matrix.
Condensed distance matrices must be 1-dimensional
numpy arrays containing doubles. Their length must be a binomial
coefficient :math:`{n \\choose 2}` for some positive integer n.
Parameters
----------
y : ndarray
The condensed distance matrix.
warning : bool, optional
Invokes a warning if the variable passed is not a valid
condensed distance matrix. The warning message explains why
the distance matrix is not valid. `name` is used when
referencing the offending variable.
throws : throw, optional
Throws an exception if the variable passed is not a valid
condensed distance matrix.
name : bool, optional
Used when referencing the offending variable in the
warning or exception message.
"""
y = np.asarray(y, order='c')
valid = True
try:
if type(y) != np.ndarray:
if name:
raise TypeError(('\'%s\' passed as a condensed distance '
'matrix is not a numpy array.') % name)
else:
raise TypeError('Variable is not a numpy array.')
if y.dtype != np.double:
if name:
raise TypeError(('Condensed distance matrix \'%s\' must '
'contain doubles (double).') % name)
else:
raise TypeError('Condensed distance matrix must contain '
'doubles (double).')
if len(y.shape) != 1:
if name:
raise ValueError(('Condensed distance matrix \'%s\' must '
'have shape=1 (i.e. be one-dimensional).')
% name)
else:
raise ValueError('Condensed distance matrix must have shape=1 '
'(i.e. be one-dimensional).')
n = y.shape[0]
d = int(np.ceil(np.sqrt(n * 2)))
if (d * (d - 1) / 2) != n:
if name:
raise ValueError(('Length n of condensed distance matrix '
'\'%s\' must be a binomial coefficient, i.e.'
'there must be a k such that '
'(k \choose 2)=n)!') % name)
else:
raise ValueError('Length n of condensed distance matrix must '
'be a binomial coefficient, i.e. there must '
'be a k such that (k \choose 2)=n)!')
except Exception as e:
if throw:
raise
if warning:
warnings.warn(str(e))
valid = False
return valid
def num_obs_dm(d):
"""
Returns the number of original observations that correspond to a
square, redundant distance matrix.
Parameters
----------
d : ndarray
The target distance matrix.
Returns
-------
num_obs_dm : int
The number of observations in the redundant distance matrix.
"""
d = np.asarray(d, order='c')
is_valid_dm(d, tol=np.inf, throw=True, name='d')
return d.shape[0]
def num_obs_y(Y):
"""
Returns the number of original observations that correspond to a
condensed distance matrix.
Parameters
----------
Y : ndarray
Condensed distance matrix.
Returns
-------
n : int
The number of observations in the condensed distance matrix `Y`.
"""
Y = np.asarray(Y, order='c')
is_valid_y(Y, throw=True, name='Y')
k = Y.shape[0]
if k == 0:
raise ValueError("The number of observations cannot be determined on "
"an empty distance matrix.")
d = int(np.ceil(np.sqrt(k * 2)))
if (d * (d - 1) / 2) != k:
raise ValueError("Invalid condensed distance matrix passed. Must be "
"some k where k=(n choose 2) for some n >= 2.")
return d
def cdist(XA, XB, metric='euclidean', p=2, V=None, VI=None, w=None):
"""
Computes distance between each pair of the two collections of inputs.
The following are common calling conventions:
1. ``Y = cdist(XA, XB, 'euclidean')``
Computes the distance between :math:`m` points using
Euclidean distance (2-norm) as the distance metric between the
points. The points are arranged as :math:`m`
:math:`n`-dimensional row vectors in the matrix X.
2. ``Y = cdist(XA, XB, 'minkowski', p)``
Computes the distances using the Minkowski distance
:math:`||u-v||_p` (:math:`p`-norm) where :math:`p \\geq 1`.
3. ``Y = cdist(XA, XB, 'cityblock')``
Computes the city block or Manhattan distance between the
points.
4. ``Y = cdist(XA, XB, 'seuclidean', V=None)``
Computes the standardized Euclidean distance. The standardized
Euclidean distance between two n-vectors ``u`` and ``v`` is
.. math::
\\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}.
V is the variance vector; V[i] is the variance computed over all
the i'th components of the points. If not passed, it is
automatically computed.
5. ``Y = cdist(XA, XB, 'sqeuclidean')``
Computes the squared Euclidean distance :math:`||u-v||_2^2` between
the vectors.
6. ``Y = cdist(XA, XB, 'cosine')``
Computes the cosine distance between vectors u and v,
.. math::
1 - \\frac{u \\cdot v}
{{||u||}_2 {||v||}_2}
where :math:`||*||_2` is the 2-norm of its argument ``*``, and
:math:`u \\cdot v` is the dot product of :math:`u` and :math:`v`.
7. ``Y = cdist(XA, XB, 'correlation')``
Computes the correlation distance between vectors u and v. This is
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{v}` is the mean of the elements of vector v,
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
8. ``Y = cdist(XA, XB, 'hamming')``
Computes the normalized Hamming distance, or the proportion of
those vector elements between two n-vectors ``u`` and ``v``
which disagree. To save memory, the matrix ``X`` can be of type
boolean.
9. ``Y = cdist(XA, XB, 'jaccard')``
Computes the Jaccard distance between the points. Given two
vectors, ``u`` and ``v``, the Jaccard distance is the
proportion of those elements ``u[i]`` and ``v[i]`` that
disagree where at least one of them is non-zero.
10. ``Y = cdist(XA, XB, 'chebyshev')``
Computes the Chebyshev distance between the points. The
Chebyshev distance between two n-vectors ``u`` and ``v`` is the
maximum norm-1 distance between their respective elements. More
precisely, the distance is given by
.. math::
d(u,v) = \\max_i {|u_i-v_i|}.
11. ``Y = cdist(XA, XB, 'canberra')``
Computes the Canberra distance between the points. The
Canberra distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}.
12. ``Y = cdist(XA, XB, 'braycurtis')``
Computes the Bray-Curtis distance between the points. The
Bray-Curtis distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\frac{\\sum_i (u_i-v_i)}
{\\sum_i (u_i+v_i)}
13. ``Y = cdist(XA, XB, 'mahalanobis', VI=None)``
Computes the Mahalanobis distance between the points. The
Mahalanobis distance between two points ``u`` and ``v`` is
:math:`(u-v)(1/V)(u-v)^T` where :math:`(1/V)` (the ``VI``
variable) is the inverse covariance. If ``VI`` is not None,
``VI`` will be used as the inverse covariance matrix.
14. ``Y = cdist(XA, XB, 'yule')``
Computes the Yule distance between the boolean
vectors. (see `yule` function documentation)
15. ``Y = cdist(XA, XB, 'matching')``
Computes the matching distance between the boolean
vectors. (see `matching` function documentation)
16. ``Y = cdist(XA, XB, 'dice')``
Computes the Dice distance between the boolean vectors. (see
`dice` function documentation)
17. ``Y = cdist(XA, XB, 'kulsinski')``
Computes the Kulsinski distance between the boolean
vectors. (see `kulsinski` function documentation)
18. ``Y = cdist(XA, XB, 'rogerstanimoto')``
Computes the Rogers-Tanimoto distance between the boolean
vectors. (see `rogerstanimoto` function documentation)
19. ``Y = cdist(XA, XB, 'russellrao')``
Computes the Russell-Rao distance between the boolean
vectors. (see `russellrao` function documentation)
20. ``Y = cdist(XA, XB, 'sokalmichener')``
Computes the Sokal-Michener distance between the boolean
vectors. (see `sokalmichener` function documentation)
21. ``Y = cdist(XA, XB, 'sokalsneath')``
Computes the Sokal-Sneath distance between the vectors. (see
`sokalsneath` function documentation)
22. ``Y = cdist(XA, XB, 'wminkowski')``
Computes the weighted Minkowski distance between the
vectors. (see `wminkowski` function documentation)
23. ``Y = cdist(XA, XB, f)``
Computes the distance between all pairs of vectors in X
using the user supplied 2-arity function f. For example,
Euclidean distance between the vectors could be computed
as follows::
dm = cdist(XA, XB, lambda u, v: np.sqrt(((u-v)**2).sum()))
Note that you should avoid passing a reference to one of
the distance functions defined in this library. For example,::
dm = cdist(XA, XB, sokalsneath)
would calculate the pair-wise distances between the vectors in
X using the Python function `sokalsneath`. This would result in
sokalsneath being called :math:`{n \\choose 2}` times, which
is inefficient. Instead, the optimized C version is more
efficient, and we call it using the following syntax::
dm = cdist(XA, XB, 'sokalsneath')
Parameters
----------
XA : ndarray
An :math:`m_A` by :math:`n` array of :math:`m_A`
original observations in an :math:`n`-dimensional space.
Inputs are converted to float type.
XB : ndarray
An :math:`m_B` by :math:`n` array of :math:`m_B`
original observations in an :math:`n`-dimensional space.
Inputs are converted to float type.
metric : str or callable, optional
The distance metric to use. If a string, the distance function can be
'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation',
'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'wminkowski', 'yule'.
w : ndarray, optional
The weight vector (for weighted Minkowski).
p : scalar, optional
The p-norm to apply (for Minkowski, weighted and unweighted)
V : ndarray, optional
The variance vector (for standardized Euclidean).
VI : ndarray, optional
The inverse of the covariance matrix (for Mahalanobis).
Returns
-------
Y : ndarray
A :math:`m_A` by :math:`m_B` distance matrix is returned.
For each :math:`i` and :math:`j`, the metric
``dist(u=XA[i], v=XB[j])`` is computed and stored in the
:math:`ij` th entry.
Raises
------
ValueError
An exception is thrown if `XA` and `XB` do not have
the same number of columns.
Examples
--------
Find the Euclidean distances between four 2-D coordinates:
>>> from scipy.spatial import distance
>>> coords = [(35.0456, -85.2672),
... (35.1174, -89.9711),
... (35.9728, -83.9422),
... (36.1667, -86.7833)]
>>> distance.cdist(coords, coords, 'euclidean')
array([[ 0. , 4.7044, 1.6172, 1.8856],
[ 4.7044, 0. , 6.0893, 3.3561],
[ 1.6172, 6.0893, 0. , 2.8477],
[ 1.8856, 3.3561, 2.8477, 0. ]])
Find the Manhattan distance from a 3-D point to the corners of the unit
cube:
>>> a = np.array([[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1]])
>>> b = np.array([[ 0.1, 0.2, 0.4]])
>>> distance.cdist(a, b, 'cityblock')
array([[ 0.7],
[ 0.9],
[ 1.3],
[ 1.5],
[ 1.5],
[ 1.7],
[ 2.1],
[ 2.3]])
"""
# 21. Y = cdist(XA, XB, 'test_Y')
#
# Computes the distance between all pairs of vectors in X
# using the distance metric Y but with a more succint,
# verifiable, but less efficient implementation.
XA = np.asarray(XA, order='c')
XB = np.asarray(XB, order='c')
#if np.issubsctype(X, np.floating) and not np.issubsctype(X, np.double):
# raise TypeError('Floating point arrays must be 64-bit (got %r).' %
# (X.dtype.type,))
# The C code doesn't do striding.
[XA] = _copy_arrays_if_base_present([_convert_to_double(XA)])
[XB] = _copy_arrays_if_base_present([_convert_to_double(XB)])
s = XA.shape
sB = XB.shape
if len(s) != 2:
raise ValueError('XA must be a 2-dimensional array.')
if len(sB) != 2:
raise ValueError('XB must be a 2-dimensional array.')
if s[1] != sB[1]:
raise ValueError('XA and XB must have the same number of columns '
'(i.e. feature dimension.)')
mA = s[0]
mB = sB[0]
n = s[1]
dm = np.zeros((mA, mB), dtype=np.double)
if callable(metric):
if metric == minkowski:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = minkowski(XA[i, :], XB[j, :], p)
elif metric == wminkowski:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = wminkowski(XA[i, :], XB[j, :], p, w)
elif metric == seuclidean:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = seuclidean(XA[i, :], XB[j, :], V)
elif metric == mahalanobis:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = mahalanobis(XA[i, :], XB[j, :], V)
else:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = metric(XA[i, :], XB[j, :])
elif isinstance(metric, string_types):
mstr = metric.lower()
#if XA.dtype != np.double and \
# (mstr != 'hamming' and mstr != 'jaccard'):
# TypeError('A double array must be passed.')
if mstr in set(['euclidean', 'euclid', 'eu', 'e']):
_distance_wrap.cdist_euclidean_wrap(_convert_to_double(XA),
_convert_to_double(XB), dm)
elif mstr in set(['sqeuclidean', 'sqe', 'sqeuclid']):
_distance_wrap.cdist_sqeuclidean_wrap(_convert_to_double(XA),
_convert_to_double(XB), dm)
elif mstr in set(['cityblock', 'cblock', 'cb', 'c']):
_distance_wrap.cdist_city_block_wrap(_convert_to_double(XA),
_convert_to_double(XB), dm)
elif mstr in set(['hamming', 'hamm', 'ha', 'h']):
if XA.dtype == np.bool:
_distance_wrap.cdist_hamming_bool_wrap(_convert_to_bool(XA),
_convert_to_bool(XB),
dm)
else:
_distance_wrap.cdist_hamming_wrap(_convert_to_double(XA),
_convert_to_double(XB), dm)
elif mstr in set(['jaccard', 'jacc', 'ja', 'j']):
if XA.dtype == np.bool:
_distance_wrap.cdist_jaccard_bool_wrap(_convert_to_bool(XA),
_convert_to_bool(XB),
dm)
else:
_distance_wrap.cdist_jaccard_wrap(_convert_to_double(XA),
_convert_to_double(XB), dm)
elif mstr in set(['chebychev', 'chebyshev', 'cheby', 'cheb', 'ch']):
_distance_wrap.cdist_chebyshev_wrap(_convert_to_double(XA),
_convert_to_double(XB), dm)
elif mstr in set(['minkowski', 'mi', 'm', 'pnorm']):
_distance_wrap.cdist_minkowski_wrap(_convert_to_double(XA),
_convert_to_double(XB), dm, p)
elif mstr in set(['wminkowski', 'wmi', 'wm', 'wpnorm']):
_distance_wrap.cdist_weighted_minkowski_wrap(_convert_to_double(XA),
_convert_to_double(XB),
dm, p,
_convert_to_double(w))
elif mstr in set(['seuclidean', 'se', 's']):
if V is not None:
V = np.asarray(V, order='c')
if type(V) != np.ndarray:
raise TypeError('Variance vector V must be a numpy array')
if V.dtype != np.double:
raise TypeError('Variance vector V must contain doubles.')
if len(V.shape) != 1:
raise ValueError('Variance vector V must be '
'one-dimensional.')
if V.shape[0] != n:
raise ValueError('Variance vector V must be of the same '
'dimension as the vectors on which the '
'distances are computed.')
# The C code doesn't do striding.
[VV] = _copy_arrays_if_base_present([_convert_to_double(V)])
else:
X = np.vstack([XA, XB])
VV = np.var(X, axis=0, ddof=1)
X = None
del X
_distance_wrap.cdist_seuclidean_wrap(_convert_to_double(XA),
_convert_to_double(XB), VV, dm)
# Need to test whether vectorized cosine works better.
# Find out: Is there a dot subtraction operator so I can
# subtract matrices in a similar way to multiplying them?
# Need to get rid of as much unnecessary C code as possible.
elif mstr in set(['cosine', 'cos']):
normsA = np.sqrt(np.sum(XA * XA, axis=1))
normsB = np.sqrt(np.sum(XB * XB, axis=1))
_distance_wrap.cdist_cosine_wrap(_convert_to_double(XA),
_convert_to_double(XB), dm,
normsA,
normsB)
elif mstr in set(['correlation', 'co']):
XA2 = XA - XA.mean(1)[:, np.newaxis]
XB2 = XB - XB.mean(1)[:, np.newaxis]
#X2 = X - np.matlib.repmat(np.mean(X, axis=1).reshape(m, 1), 1, n)
normsA = np.sqrt(np.sum(XA2 * XA2, axis=1))
normsB = np.sqrt(np.sum(XB2 * XB2, axis=1))
_distance_wrap.cdist_cosine_wrap(_convert_to_double(XA2),
_convert_to_double(XB2),
_convert_to_double(dm),
_convert_to_double(normsA),
_convert_to_double(normsB))
elif mstr in set(['mahalanobis', 'mahal', 'mah']):
if VI is not None:
VI = _convert_to_double(np.asarray(VI, order='c'))
if type(VI) != np.ndarray:
raise TypeError('VI must be a numpy array.')
if VI.dtype != np.double:
raise TypeError('The array must contain 64-bit floats.')
[VI] = _copy_arrays_if_base_present([VI])
else:
X = np.vstack([XA, XB])
V = np.cov(X.T)
X = None
del X
VI = _convert_to_double(np.linalg.inv(V).T.copy())
# (u-v)V^(-1)(u-v)^T
_distance_wrap.cdist_mahalanobis_wrap(_convert_to_double(XA),
_convert_to_double(XB),
VI, dm)
elif mstr == 'canberra':
_distance_wrap.cdist_canberra_wrap(_convert_to_double(XA),
_convert_to_double(XB), dm)
elif mstr == 'braycurtis':
_distance_wrap.cdist_bray_curtis_wrap(_convert_to_double(XA),
_convert_to_double(XB), dm)
elif mstr == 'yule':
_distance_wrap.cdist_yule_bool_wrap(_convert_to_bool(XA),
_convert_to_bool(XB), dm)
elif mstr == 'matching':
_distance_wrap.cdist_matching_bool_wrap(_convert_to_bool(XA),
_convert_to_bool(XB), dm)
elif mstr == 'kulsinski':
_distance_wrap.cdist_kulsinski_bool_wrap(_convert_to_bool(XA),
_convert_to_bool(XB), dm)
elif mstr == 'dice':
_distance_wrap.cdist_dice_bool_wrap(_convert_to_bool(XA),
_convert_to_bool(XB), dm)
elif mstr == 'rogerstanimoto':
_distance_wrap.cdist_rogerstanimoto_bool_wrap(_convert_to_bool(XA),
_convert_to_bool(XB),
dm)
elif mstr == 'russellrao':
_distance_wrap.cdist_russellrao_bool_wrap(_convert_to_bool(XA),
_convert_to_bool(XB), dm)
elif mstr == 'sokalmichener':
_distance_wrap.cdist_sokalmichener_bool_wrap(_convert_to_bool(XA),
_convert_to_bool(XB),
dm)
elif mstr == 'sokalsneath':
_distance_wrap.cdist_sokalsneath_bool_wrap(_convert_to_bool(XA),
_convert_to_bool(XB),
dm)
elif metric == 'test_euclidean':
dm = cdist(XA, XB, euclidean)
elif metric == 'test_seuclidean':
if V is None:
V = np.var(np.vstack([XA, XB]), axis=0, ddof=1)
else:
V = np.asarray(V, order='c')
dm = cdist(XA, XB, lambda u, v: seuclidean(u, v, V))
elif metric == 'test_sqeuclidean':
dm = cdist(XA, XB, lambda u, v: sqeuclidean(u, v))
elif metric == 'test_braycurtis':
dm = cdist(XA, XB, braycurtis)
elif metric == 'test_mahalanobis':
if VI is None:
X = np.vstack([XA, XB])
V = np.cov(X.T)
VI = np.linalg.inv(V)
X = None
del X
else:
VI = np.asarray(VI, order='c')
[VI] = _copy_arrays_if_base_present([VI])
# (u-v)V^(-1)(u-v)^T
dm = cdist(XA, XB, (lambda u, v: mahalanobis(u, v, VI)))
elif metric == 'test_canberra':
dm = cdist(XA, XB, canberra)
elif metric == 'test_cityblock':
dm = cdist(XA, XB, cityblock)
elif metric == 'test_minkowski':
dm = cdist(XA, XB, minkowski, p=p)
elif metric == 'test_wminkowski':
dm = cdist(XA, XB, wminkowski, p=p, w=w)
elif metric == 'test_cosine':
dm = cdist(XA, XB, cosine)
elif metric == 'test_correlation':
dm = cdist(XA, XB, correlation)
elif metric == 'test_hamming':
dm = cdist(XA, XB, hamming)
elif metric == 'test_jaccard':
dm = cdist(XA, XB, jaccard)
elif metric == 'test_chebyshev' or metric == 'test_chebychev':
dm = cdist(XA, XB, chebyshev)
elif metric == 'test_yule':
dm = cdist(XA, XB, yule)
elif metric == 'test_matching':
dm = cdist(XA, XB, matching)
elif metric == 'test_dice':
dm = cdist(XA, XB, dice)
elif metric == 'test_kulsinski':
dm = cdist(XA, XB, kulsinski)
elif metric == 'test_rogerstanimoto':
dm = cdist(XA, XB, rogerstanimoto)
elif metric == 'test_russellrao':
dm = cdist(XA, XB, russellrao)
elif metric == 'test_sokalsneath':
dm = cdist(XA, XB, sokalsneath)
elif metric == 'test_sokalmichener':
dm = cdist(XA, XB, sokalmichener)
else:
raise ValueError('Unknown Distance Metric: %s' % mstr)
else:
raise TypeError('2nd argument metric must be a string identifier '
'or a function.')
return dm
| {
"content_hash": "f65002545ce0aae5b5d4436da9c6ae84",
"timestamp": "",
"source": "github",
"line_count": 2245,
"max_line_length": 80,
"avg_line_length": 33.069042316258354,
"alnum_prop": 0.5299973060344828,
"repo_name": "maciejkula/scipy",
"id": "bf9e4fa304396613f31e190787ed3e020eb4c5af",
"size": "74240",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scipy/spatial/distance.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3954232"
},
{
"name": "C++",
"bytes": "3625507"
},
{
"name": "CSS",
"bytes": "2624"
},
{
"name": "FORTRAN",
"bytes": "5481486"
},
{
"name": "M",
"bytes": "66"
},
{
"name": "Makefile",
"bytes": "10154"
},
{
"name": "Matlab",
"bytes": "4280"
},
{
"name": "Python",
"bytes": "8163768"
},
{
"name": "Shell",
"bytes": "1580"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
import web
import site
import urllib
import re
import simplejson
import web_user_auth
from web import session
from feed import Feed
from tapechat_tag import TapeChatTag
VALID_TAGS = ['a']
render = web.template.render('templates/', base='layout')
render_plain = web.template.render('templates/', base='plain')
valid_email = re.compile(r"(?:^|\s)[-a-z0-9_.]+@(?:[-a-z0-9]+\.)+[a-z]{2,6}(?:\s|$)",re.IGNORECASE)
urls = (
'/','index',
'/tags/(.*)','tags',
'/user/new','user_add',
'/login','login',
'/logout','logout',
'/feed/new','feed_add',
'/stream', 'index_stream',
'/tag_stream/([0-9]+)', 'tag_stream'
)
app = web.application(urls, globals())
def layout_processor(handle):
result = handle()
return result
app.add_processor(layout_processor)
# tags
class index:
def GET(self):
try:
user_id = session.user_id
except: user_id = 0
try:
chat_tag = TapeChatTag()
if web.input(page = 'page')['page']:
chat_tag.page_value = web.input(page = 'page')['page']
chat_tag.generate(user_id=user_id)
return render.index(chat_tag.all_entries,chat_tag.next_page,chat_tag.prev_page,user_id)
except: return render.index('',-1,-1,user_id)
class index_stream:
def GET(self):
try:
user_id = session.user_id
except: user_id = 0
return render_plain.index_stream(user_id)
class tag_stream:
def GET(self,tag_counter):
try:
user_id = session.user_id
except: user_id = 0
chat_tag = TapeChatTag()
chat_tag.generate_stream(tag_counter)
return simplejson.dumps(chat_tag.all_entries)
class tags:
def GET(self,tag_word):
chat_tag = TapeChatTag()
try:
user_id = session.user_id
except: user_id = 0
return render.tags(chat_tag.tag_text(tag_word,user_id),urllib.unquote(tag_word),user_id)
# feeds
class feed_add:
def GET(self):
user_id = session.user_id
error_message = None
return render.feed_add(error_message,user_id)
def POST(self):
feed = Feed()
if feed.add(web.input(url = 'url')['url']):
raise web.seeother('/')
else:
user_id = session.user_id
error_message = "Invalid feed url"
render.feed_add(error_message,user_id)
# users
class login:
def GET(self):
error_message = None
return render.login(error_message)
def POST(self):
email = web.input(email = 'email')['email']
password = web.input(password = 'password')['password']
if web_user_auth.login(email,password):
raise web.seeother('/')
else:
error_message = "Invalid email/password"
return render.login(error_message)
class logout:
def GET(self):
user = User()
web_user_auth.logout()
raise web.seeother('/login')
class user_add:
def GET(self):
error_message = None
return render.user_add(error_message)
def POST(self):
email = web.input(email = 'email')['email']
password = web.input(password = 'password')['password']
if valid_email.match(email.strip()) and len(password) > 4 and web_user_auth.create(email,password):
raise web.seeother('/')
else:
error_message = "Invalid email/password"
return render.user_add(error_message)
if __name__ == '__main__': app.run() | {
"content_hash": "518872cfc71fffade2a18c29d17c2c46",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 103,
"avg_line_length": 26.024193548387096,
"alnum_prop": 0.6355748373101953,
"repo_name": "ednapiranha/tapechat",
"id": "d8ba5f7b71b27575a47e0a6b94940ec92076c242",
"size": "3227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tapechat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1828"
},
{
"name": "Python",
"bytes": "10399"
}
],
"symlink_target": ""
} |
from __future__ import nested_scopes
"""
################################################################################
#
# SOAPpy - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
# Gregory Warnes (Gregory.R.Warnes@Pfizer.com)
# Christopher Blunck (blunck@gst.com)
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: Client.py 1496 2010-03-04 23:46:17Z pooryorick $'
from version import __version__
#import xml.sax
import urllib
from types import *
import re
import base64
import socket, httplib
from httplib import HTTPConnection, HTTP
import Cookie
# SOAPpy modules
from Errors import *
from Config import Config
from Parser import parseSOAPRPC
from SOAPBuilder import buildSOAP
from Utilities import *
from Types import faultType, simplify
################################################################################
# Client
################################################################################
def SOAPUserAgent():
return "SOAPpy " + __version__ + " (pywebsvcs.sf.net)"
class SOAPAddress:
def __init__(self, url, config = Config):
proto, uri = urllib.splittype(url)
# apply some defaults
if uri[0:2] != '//':
if proto != None:
uri = proto + ':' + uri
uri = '//' + uri
proto = 'http'
host, path = urllib.splithost(uri)
try:
int(host)
host = 'localhost:' + host
except:
pass
if not path:
path = '/'
if proto not in ('http', 'https', 'httpg'):
raise IOError, "unsupported SOAP protocol"
if proto == 'httpg' and not config.GSIclient:
raise AttributeError, \
"GSI client not supported by this Python installation"
if proto == 'https' and not config.SSLclient:
raise AttributeError, \
"SSL client not supported by this Python installation"
self.user,host = urllib.splituser(host)
self.proto = proto
self.host = host
self.path = path
def __str__(self):
return "%(proto)s://%(host)s%(path)s" % self.__dict__
__repr__ = __str__
class SOAPTimeoutError(socket.timeout):
'''This exception is raised when a timeout occurs in SOAP operations'''
pass
class HTTPConnectionWithTimeout(HTTPConnection):
'''Extend HTTPConnection for timeout support'''
def __init__(self, host, port=None, strict=None, timeout=None):
HTTPConnection.__init__(self, host, port, strict)
self._timeout = timeout
def connect(self):
HTTPConnection.connect(self)
if self.sock and self._timeout:
self.sock.settimeout(self._timeout)
class HTTPWithTimeout(HTTP):
_connection_class = HTTPConnectionWithTimeout
## this __init__ copied from httplib.HTML class
def __init__(self, host='', port=None, strict=None, timeout=None):
"Provide a default host, since the superclass requires one."
# some joker passed 0 explicitly, meaning default port
if port == 0:
port = None
# Note that we may pass an empty string as the host; this will throw
# an error when we attempt to connect. Presumably, the client code
# will call connect before then, with a proper host.
self._setup(self._connection_class(host, port, strict, timeout))
class HTTPTransport:
def __init__(self):
self.cookies = Cookie.SimpleCookie();
def getNS(self, original_namespace, data):
"""Extract the (possibly extended) namespace from the returned
SOAP message."""
if type(original_namespace) == StringType:
pattern="xmlns:\w+=['\"](" + original_namespace + "[^'\"]*)['\"]"
match = re.search(pattern, data)
if match:
return match.group(1)
else:
return original_namespace
else:
return original_namespace
def __addcookies(self, r):
'''Add cookies from self.cookies to request r
'''
for cname, morsel in self.cookies.items():
attrs = []
value = morsel.get('version', '')
if value != '' and value != '0':
attrs.append('$Version=%s' % value)
attrs.append('%s=%s' % (cname, morsel.coded_value))
value = morsel.get('path')
if value:
attrs.append('$Path=%s' % value)
value = morsel.get('domain')
if value:
attrs.append('$Domain=%s' % value)
r.putheader('Cookie', "; ".join(attrs))
def call(self, addr, data, namespace, soapaction = None, encoding = None,
http_proxy = None, config = Config, timeout=None):
if not isinstance(addr, SOAPAddress):
addr = SOAPAddress(addr, config)
# Build a request
if http_proxy:
real_addr = http_proxy
real_path = addr.proto + "://" + addr.host + addr.path
else:
real_addr = addr.host
real_path = addr.path
if addr.proto == 'httpg':
from pyGlobus.io import GSIHTTP
r = GSIHTTP(real_addr, tcpAttr = config.tcpAttr)
elif addr.proto == 'https':
r = httplib.HTTPS(real_addr, key_file=config.SSL.key_file, cert_file=config.SSL.cert_file)
else:
r = HTTPWithTimeout(real_addr, timeout=timeout)
r.putrequest("POST", real_path)
r.putheader("Host", addr.host)
r.putheader("User-agent", SOAPUserAgent())
t = 'text/xml';
if encoding != None:
t += '; charset=%s' % encoding
r.putheader("Content-type", t)
r.putheader("Content-length", str(len(data)))
self.__addcookies(r);
# if user is not a user:passwd format
# we'll receive a failure from the server. . .I guess (??)
if addr.user != None:
val = base64.encodestring(addr.user)
r.putheader('Authorization','Basic ' + val.replace('\012',''))
# This fixes sending either "" or "None"
if soapaction == None or len(soapaction) == 0:
r.putheader("SOAPAction", "")
else:
r.putheader("SOAPAction", '"%s"' % soapaction)
if config.dumpHeadersOut:
s = 'Outgoing HTTP headers'
debugHeader(s)
print "POST %s %s" % (real_path, r._http_vsn_str)
print "Host:", addr.host
print "User-agent: SOAPpy " + __version__ + " (http://pywebsvcs.sf.net)"
print "Content-type:", t
print "Content-length:", len(data)
print 'SOAPAction: "%s"' % soapaction
debugFooter(s)
r.endheaders()
if config.dumpSOAPOut:
s = 'Outgoing SOAP'
debugHeader(s)
print data,
if data[-1] != '\n':
print
debugFooter(s)
# send the payload
r.send(data)
# read response line
code, msg, headers = r.getreply()
self.cookies = Cookie.SimpleCookie();
if headers:
content_type = headers.get("content-type","text/xml")
content_length = headers.get("Content-length")
for cookie in headers.getallmatchingheaders("Set-Cookie"):
self.cookies.load(cookie);
else:
content_type=None
content_length=None
# work around OC4J bug which does '<len>, <len>' for some reaason
if content_length:
comma=content_length.find(',')
if comma>0:
content_length = content_length[:comma]
# attempt to extract integer message size
try:
message_len = int(content_length)
except:
message_len = -1
if message_len < 0:
# Content-Length missing or invalid; just read the whole socket
# This won't work with HTTP/1.1 chunked encoding
data = r.getfile().read()
message_len = len(data)
else:
data = r.getfile().read(message_len)
if(config.debug):
print "code=",code
print "msg=", msg
print "headers=", headers
print "content-type=", content_type
print "data=", data
if config.dumpHeadersIn:
s = 'Incoming HTTP headers'
debugHeader(s)
if headers.headers:
print "HTTP/1.? %d %s" % (code, msg)
print "\n".join(map (lambda x: x.strip(), headers.headers))
else:
print "HTTP/0.9 %d %s" % (code, msg)
debugFooter(s)
def startswith(string, val):
return string[0:len(val)] == val
if code == 500 and not \
( startswith(content_type, "text/xml") and message_len > 0 ):
raise HTTPError(code, msg)
if config.dumpSOAPIn:
s = 'Incoming SOAP'
debugHeader(s)
print data,
if (len(data)>0) and (data[-1] != '\n'):
print
debugFooter(s)
if code not in (200, 500):
raise HTTPError(code, msg)
# get the new namespace
if namespace is None:
new_ns = None
else:
new_ns = self.getNS(namespace, data)
# return response payload
return data, new_ns
################################################################################
# SOAP Proxy
################################################################################
class SOAPProxy:
def __init__(self, proxy, namespace = None, soapaction = None,
header = None, methodattrs = None, transport = HTTPTransport,
encoding = 'UTF-8', throw_faults = 1, unwrap_results = None,
http_proxy=None, config = Config, noroot = 0,
simplify_objects=None, timeout=None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
# get default values for unwrap_results and simplify_objects
# from config
if unwrap_results is None:
self.unwrap_results=config.unwrap_results
else:
self.unwrap_results=unwrap_results
if simplify_objects is None:
self.simplify_objects=config.simplify_objects
else:
self.simplify_objects=simplify_objects
self.proxy = SOAPAddress(proxy, config)
self.namespace = namespace
self.soapaction = soapaction
self.header = header
self.methodattrs = methodattrs
self.transport = transport()
self.encoding = encoding
self.throw_faults = throw_faults
self.http_proxy = http_proxy
self.config = config
self.noroot = noroot
self.timeout = timeout
# GSI Additions
if hasattr(config, "channel_mode") and \
hasattr(config, "delegation_mode"):
self.channel_mode = config.channel_mode
self.delegation_mode = config.delegation_mode
#end GSI Additions
def invoke(self, method, args):
return self.__call(method, args, {})
def __call(self, name, args, kw, ns = None, sa = None, hd = None,
ma = None):
ns = ns or self.namespace
ma = ma or self.methodattrs
if sa: # Get soapaction
if type(sa) == TupleType:
sa = sa[0]
else:
if self.soapaction:
sa = self.soapaction
else:
sa = name
if hd: # Get header
if type(hd) == TupleType:
hd = hd[0]
else:
hd = self.header
hd = hd or self.header
if ma: # Get methodattrs
if type(ma) == TupleType: ma = ma[0]
else:
ma = self.methodattrs
ma = ma or self.methodattrs
m = buildSOAP(args = args, kw = kw, method = name, namespace = ns,
header = hd, methodattrs = ma, encoding = self.encoding,
config = self.config, noroot = self.noroot)
call_retry = 0
try:
r, self.namespace = self.transport.call(self.proxy, m, ns, sa,
encoding = self.encoding,
http_proxy = self.http_proxy,
config = self.config,
timeout = self.timeout)
except socket.timeout:
raise SOAPTimeoutError
except Exception, ex:
#
# Call failed.
#
# See if we have a fault handling vector installed in our
# config. If we do, invoke it. If it returns a true value,
# retry the call.
#
# In any circumstance other than the fault handler returning
# true, reraise the exception. This keeps the semantics of this
# code the same as without the faultHandler code.
#
if hasattr(self.config, "faultHandler"):
if callable(self.config.faultHandler):
call_retry = self.config.faultHandler(self.proxy, ex)
if not call_retry:
raise
else:
raise
else:
raise
if call_retry:
try:
r, self.namespace = self.transport.call(self.proxy, m, ns, sa,
encoding = self.encoding,
http_proxy = self.http_proxy,
config = self.config,
timeout = self.timeout)
except socket.timeout:
raise SOAPTimeoutError
p, attrs = parseSOAPRPC(r, attrs = 1)
try:
throw_struct = self.throw_faults and \
isinstance (p, faultType)
except:
throw_struct = 0
if throw_struct:
if Config.debug:
print p
raise p
# If unwrap_results=1 and there is only element in the struct,
# SOAPProxy will assume that this element is the result
# and return it rather than the struct containing it.
# Otherwise SOAPproxy will return the struct with all the
# elements as attributes.
if self.unwrap_results:
try:
count = 0
for i in p.__dict__.keys():
if i[0] != "_": # don't count the private stuff
count += 1
t = getattr(p, i)
if count == 1: # Only one piece of data, bubble it up
p = t
except:
pass
# Automatically simplfy SOAP complex types into the
# corresponding python types. (structType --> dict,
# arrayType --> array, etc.)
if self.simplify_objects:
p = simplify(p)
if self.config.returnAllAttrs:
return p, attrs
return p
def _callWithBody(self, body):
return self.__call(None, body, {})
def __getattr__(self, name): # hook to catch method calls
if name in ( '__del__', '__getinitargs__', '__getnewargs__',
'__getstate__', '__setstate__', '__reduce__', '__reduce_ex__'):
raise AttributeError, name
return self.__Method(self.__call, name, config = self.config)
# To handle attribute weirdness
class __Method:
# Some magic to bind a SOAP method to an RPC server.
# Supports "nested" methods (e.g. examples.getStateName) -- concept
# borrowed from xmlrpc/soaplib -- www.pythonware.com
# Altered (improved?) to let you inline namespaces on a per call
# basis ala SOAP::LITE -- www.soaplite.com
def __init__(self, call, name, ns = None, sa = None, hd = None,
ma = None, config = Config):
self.__call = call
self.__name = name
self.__ns = ns
self.__sa = sa
self.__hd = hd
self.__ma = ma
self.__config = config
return
def __call__(self, *args, **kw):
if self.__name[0] == "_":
if self.__name in ["__repr__","__str__"]:
return self.__repr__()
else:
return self.__f_call(*args, **kw)
else:
return self.__r_call(*args, **kw)
def __getattr__(self, name):
if name == '__del__':
raise AttributeError, name
if self.__name[0] == "_":
# Don't nest method if it is a directive
return self.__class__(self.__call, name, self.__ns,
self.__sa, self.__hd, self.__ma)
return self.__class__(self.__call, "%s.%s" % (self.__name, name),
self.__ns, self.__sa, self.__hd, self.__ma)
def __f_call(self, *args, **kw):
if self.__name == "_ns": self.__ns = args
elif self.__name == "_sa": self.__sa = args
elif self.__name == "_hd": self.__hd = args
elif self.__name == "_ma": self.__ma = args
return self
def __r_call(self, *args, **kw):
return self.__call(self.__name, args, kw, self.__ns, self.__sa,
self.__hd, self.__ma)
def __repr__(self):
return "<%s at %d>" % (self.__class__, id(self))
| {
"content_hash": "be56f932de6566d2ac2e2c65079f456a",
"timestamp": "",
"source": "github",
"line_count": 565,
"max_line_length": 102,
"avg_line_length": 35.08141592920354,
"alnum_prop": 0.5180364260128147,
"repo_name": "m42e/jirash",
"id": "374987955d08eddfba1ab5de0f60b1b6072fd190",
"size": "19821",
"binary": false,
"copies": "289",
"ref": "refs/heads/master",
"path": "deps/SOAPpy/Client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "109591"
},
{
"name": "Shell",
"bytes": "87"
}
],
"symlink_target": ""
} |
import os.path
from baka.core import Job
from baka.core import Container
class SnapcraftJob(Job):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._snap_filename = 'snapcraft_{}.snap'.format(self.name)
def setup(self, c: Container, run_script: bool=True):
super().setup(c, run_script=False)
c.log('Installing snapcraft ...')
c.exec('apt-get', 'install', '-y', 'snapcraft')
# Run script
if run_script:
self._run_script('setup', c)
def perform(self, c: Container, run_script: bool=True):
super().perform(c, run_script=False)
c.exec('snapcraft', 'snap', '-o', self._snap_filename, path=self.path, envvars=self._envvars)
# Run script
if run_script:
self._run_script('perform', c)
def finish(self, c: Container, run_script: bool=True):
super().finish(c, run_script=False)
artifact_filename = os.path.join(self._artifacts_path, self._snap_filename)
c.pull(os.path.join(self.path, self._snap_filename), self._artifacts_path)
self._add_artifact(artifact_filename)
# Run script
if run_script:
self._run_script('finish', c)
| {
"content_hash": "5f826de18dd2124ce20f6d40135f26fb",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 101,
"avg_line_length": 36.3235294117647,
"alnum_prop": 0.6016194331983805,
"repo_name": "tim-sueberkrueb/grout",
"id": "4de20c9c0ac9eb45651b08bddacd62f104d5eb6b",
"size": "1260",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "baka/jobs/snapcraft.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36976"
}
],
"symlink_target": ""
} |
from copy import deepcopy
import numpy as np
from ..fixes import _import_fft
from ..io.pick import _pick_data_channels, pick_info
from ..utils import verbose, warn, fill_doc, _validate_type
from ..parallel import parallel_func, check_n_jobs
from .tfr import AverageTFR, _get_data
def _check_input_st(x_in, n_fft):
"""Aux function."""
# flatten to 2 D and memorize original shape
n_times = x_in.shape[-1]
def _is_power_of_two(n):
return not (n > 0 and ((n & (n - 1))))
if n_fft is None or (not _is_power_of_two(n_fft) and n_times > n_fft):
# Compute next power of 2
n_fft = 2 ** int(np.ceil(np.log2(n_times)))
elif n_fft < n_times:
raise ValueError("n_fft cannot be smaller than signal size. "
"Got %s < %s." % (n_fft, n_times))
if n_times < n_fft:
warn('The input signal is shorter ({}) than "n_fft" ({}). '
'Applying zero padding.'.format(x_in.shape[-1], n_fft))
zero_pad = n_fft - n_times
pad_array = np.zeros(x_in.shape[:-1] + (zero_pad,), x_in.dtype)
x_in = np.concatenate((x_in, pad_array), axis=-1)
else:
zero_pad = 0
return x_in, n_fft, zero_pad
def _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width):
"""Precompute stockwell Gaussian windows (in the freq domain)."""
fft, fftfreq = _import_fft(('fft', 'fftfreq'))
tw = fftfreq(n_samp, 1. / sfreq) / n_samp
tw = np.r_[tw[:1], tw[1:][::-1]]
k = width # 1 for classical stowckwell transform
f_range = np.arange(start_f, stop_f, 1)
windows = np.empty((len(f_range), len(tw)), dtype=np.complex128)
for i_f, f in enumerate(f_range):
if f == 0.:
window = np.ones(len(tw))
else:
window = ((f / (np.sqrt(2. * np.pi) * k)) *
np.exp(-0.5 * (1. / k ** 2.) * (f ** 2.) * tw ** 2.))
window /= window.sum() # normalisation
windows[i_f] = fft(window)
return windows
def _st(x, start_f, windows):
"""Compute ST based on Ali Moukadem MATLAB code (used in tests)."""
fft, ifft = _import_fft(('fft', 'ifft'))
n_samp = x.shape[-1]
ST = np.empty(x.shape[:-1] + (len(windows), n_samp), dtype=np.complex128)
# do the work
Fx = fft(x)
XF = np.concatenate([Fx, Fx], axis=-1)
for i_f, window in enumerate(windows):
f = start_f + i_f
ST[..., i_f, :] = ifft(XF[..., f:f + n_samp] * window)
return ST
def _st_power_itc(x, start_f, compute_itc, zero_pad, decim, W):
"""Aux function."""
fft, ifft = _import_fft(('fft', 'ifft'))
n_samp = x.shape[-1]
n_out = (n_samp - zero_pad)
n_out = n_out // decim + bool(n_out % decim)
psd = np.empty((len(W), n_out))
itc = np.empty_like(psd) if compute_itc else None
X = fft(x)
XX = np.concatenate([X, X], axis=-1)
for i_f, window in enumerate(W):
f = start_f + i_f
ST = ifft(XX[:, f:f + n_samp] * window)
if zero_pad > 0:
TFR = ST[:, :-zero_pad:decim]
else:
TFR = ST[:, ::decim]
TFR_abs = np.abs(TFR)
TFR_abs[TFR_abs == 0] = 1.
if compute_itc:
TFR /= TFR_abs
itc[i_f] = np.abs(np.mean(TFR, axis=0))
TFR_abs *= TFR_abs
psd[i_f] = np.mean(TFR_abs, axis=0)
return psd, itc
@fill_doc
def tfr_array_stockwell(data, sfreq, fmin=None, fmax=None, n_fft=None,
width=1.0, decim=1, return_itc=False, n_jobs=1):
"""Compute power and intertrial coherence using Stockwell (S) transform.
Same computation as `~mne.time_frequency.tfr_stockwell`, but operates on
:class:`NumPy arrays <numpy.ndarray>` instead of `~mne.Epochs` objects.
See :footcite:`Stockwell2007,MoukademEtAl2014,WheatEtAl2010,JonesEtAl2006`
for more information.
Parameters
----------
data : ndarray, shape (n_epochs, n_channels, n_times)
The signal to transform.
sfreq : float
The sampling frequency.
fmin : None, float
The minimum frequency to include. If None defaults to the minimum fft
frequency greater than zero.
fmax : None, float
The maximum frequency to include. If None defaults to the maximum fft.
n_fft : int | None
The length of the windows used for FFT. If None, it defaults to the
next power of 2 larger than the signal length.
width : float
The width of the Gaussian window. If < 1, increased temporal
resolution, if > 1, increased frequency resolution. Defaults to 1.
(classical S-Transform).
decim : int
The decimation factor on the time axis. To reduce memory usage.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
%(n_jobs)s
Returns
-------
st_power : ndarray
The multitaper power of the Stockwell transformed data.
The last two dimensions are frequency and time.
itc : ndarray
The intertrial coherence. Only returned if return_itc is True.
freqs : ndarray
The frequencies.
See Also
--------
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_array_morlet
References
----------
.. footbibliography::
"""
fftfreq = _import_fft('fftfreq')
_validate_type(data, np.ndarray, 'data')
if data.ndim != 3:
raise ValueError(
'data must be 3D with shape (n_epochs, n_channels, n_times), '
f'got {data.shape}')
n_epochs, n_channels = data.shape[:2]
n_out = data.shape[2] // decim + bool(data.shape[-1] % decim)
data, n_fft_, zero_pad = _check_input_st(data, n_fft)
freqs = fftfreq(n_fft_, 1. / sfreq)
if fmin is None:
fmin = freqs[freqs > 0][0]
if fmax is None:
fmax = freqs.max()
start_f = np.abs(freqs - fmin).argmin()
stop_f = np.abs(freqs - fmax).argmin()
freqs = freqs[start_f:stop_f]
W = _precompute_st_windows(data.shape[-1], start_f, stop_f, sfreq, width)
n_freq = stop_f - start_f
psd = np.empty((n_channels, n_freq, n_out))
itc = np.empty((n_channels, n_freq, n_out)) if return_itc else None
parallel, my_st, _ = parallel_func(_st_power_itc, n_jobs)
tfrs = parallel(my_st(data[:, c, :], start_f, return_itc, zero_pad,
decim, W)
for c in range(n_channels))
for c, (this_psd, this_itc) in enumerate(iter(tfrs)):
psd[c] = this_psd
if this_itc is not None:
itc[c] = this_itc
return psd, itc, freqs
@verbose
def tfr_stockwell(inst, fmin=None, fmax=None, n_fft=None,
width=1.0, decim=1, return_itc=False, n_jobs=1,
verbose=None):
"""Compute Time-Frequency Representation (TFR) using Stockwell Transform.
Same computation as `~mne.time_frequency.tfr_array_stockwell`, but operates
on `~mne.Epochs` objects instead of :class:`NumPy arrays <numpy.ndarray>`.
See :footcite:`Stockwell2007,MoukademEtAl2014,WheatEtAl2010,JonesEtAl2006`
for more information.
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
fmin : None, float
The minimum frequency to include. If None defaults to the minimum fft
frequency greater than zero.
fmax : None, float
The maximum frequency to include. If None defaults to the maximum fft.
n_fft : int | None
The length of the windows used for FFT. If None, it defaults to the
next power of 2 larger than the signal length.
width : float
The width of the Gaussian window. If < 1, increased temporal
resolution, if > 1, increased frequency resolution. Defaults to 1.
(classical S-Transform).
decim : int
The decimation factor on the time axis. To reduce memory usage.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
n_jobs : int
The number of jobs to run in parallel (over channels).
%(verbose)s
Returns
-------
power : AverageTFR
The averaged power.
itc : AverageTFR
The intertrial coherence. Only returned if return_itc is True.
See Also
--------
mne.time_frequency.tfr_array_stockwell
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_array_morlet
Notes
-----
.. versionadded:: 0.9.0
References
----------
.. footbibliography::
"""
# verbose dec is used b/c subfunctions are verbose
data = _get_data(inst, return_itc)
picks = _pick_data_channels(inst.info)
info = pick_info(inst.info, picks)
data = data[:, picks, :]
n_jobs = check_n_jobs(n_jobs)
power, itc, freqs = tfr_array_stockwell(data, sfreq=info['sfreq'],
fmin=fmin, fmax=fmax, n_fft=n_fft,
width=width, decim=decim,
return_itc=return_itc,
n_jobs=n_jobs)
times = inst.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave, method='stockwell-power')
if return_itc:
out = (out, AverageTFR(deepcopy(info), itc, times.copy(),
freqs.copy(), nave, method='stockwell-itc'))
return out
| {
"content_hash": "c21e8cebf69db03067ecc165567c7ab8",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 79,
"avg_line_length": 35.61940298507463,
"alnum_prop": 0.5858998533417138,
"repo_name": "bloyl/mne-python",
"id": "a8e99acd4bd6cfc100e8ca8b0901e774eaec610a",
"size": "9692",
"binary": false,
"copies": "1",
"ref": "refs/heads/placeholder",
"path": "mne/time_frequency/_stockwell.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "24999"
},
{
"name": "Makefile",
"bytes": "4450"
},
{
"name": "Python",
"bytes": "8190297"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('volunteers', '0025_auto_20150125_1922'),
]
operations = [
migrations.AddField(
model_name='voucher',
name='creator',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='voucher',
name='generateDate',
field=models.DateTimeField(auto_now_add=True),
preserve_default=True,
),
]
| {
"content_hash": "a2f60247c57926ae9ea71580aaff5c1b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 76,
"avg_line_length": 27.59259259259259,
"alnum_prop": 0.6080536912751678,
"repo_name": "codeforgood2015/bnb",
"id": "a1a59b79c9fc30129a45f03f0b49be3b5bfb293c",
"size": "769",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bnbvolunteer/volunteers/migrations/0026_auto_20150128_1317.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from __future__ import print_function
import os
import re
import sys
import json
import types
import inspect
import requests
import tempfile
import datetime
import itertools
try:
import mock
except ImportError:
from unittest import mock
if sys.version_info[:2] <= (2, 6):
import unittest2 as unittest
else:
import unittest
import spells
import lib
import lib.spell
import lib.registry
class WebMock(object):
"""
:type root: str
:param root: The root directory that contains the data files that
will be used in the mock
"""
def __init__(self, root):
self.root = root
self.routes = []
self.mock = mock.patch(
'lib.spell.BaseSpell.fetch',
spec=True,
side_effect=self.mock_fetch
)
def route(self, url, file, post=None, get=None, format='raw'):
"""
:type url: str
:param url: The base URL to intercept. Do not
include the query string
:type file: str
:param file: The file name (in ``self.root``) that contains
the response data corresponding to the request
:type post: dict or None
:param post: If set, will only intercept URLs which have post
data matching ``post``
:type post: dict or None
:param post: If set, will only intercept URLs which have get
data matching ``post``
:type format: str
:param format: Only requests asking for the same format will be
handled
"""
with open(os.path.join(self.root, file), 'r') as f:
request = mock.Mock()
request.text = f.read()
request.json = lambda: json.loads(request.text)
args = (url, post, get, format)
self.routes = list(filter(
lambda item: item[0] != args,
self.routes
))
self.routes.append((
args,
lib.spell.BaseSpell.fetchFormats[format](request)
))
def mock_fetch(self, url, post=None, get=None, format='raw'):
"""
This function replaces ``lib.spell.BaseSpell.fetch`` when the mock
is active.
:raises:Exception: Any request that does not match one
predefined with ``route()`` will result in an
``Exception`` being thrown.
"""
for args, content in self.routes:
if args == (url, post, get, format):
return content
raise Exception(
'Unknown request: fetch(url=%s, post=%s, get=%s, format=%s)'
% (url, post, get, format)
)
class WebCapture(object):
"""
This is a proxy class that intercepts web requests and then
prints information to the screen and also saves the result
to a temporary file.
This is used to quickly build up test cases in a
pseudo record/playback manner.
"""
def __init__(self):
self._get = requests.get
self._post = requests.post
self.patches = (
mock.patch('requests.get', spec=True, side_effect=self.mock_get),
mock.patch('requests.post', spec=True, side_effect=self.mock_post)
)
def mock_get(self, url, **kwargs):
"""
This replaces ``requests.get``. It prints out the ``kwargs`` that
is used to build the query string, passes the request along to
the original ``get(...)`` function and then saves the result to a file
:type url: str
:param url: The URL of the resource being requested
:type kwargs: dict
:param kwargs: The keyword arguments used to build the query string
:rtype: str
:return: The result from the original ``get(...)`` function
"""
print('url: %s' % url, end='')
if 'params' in kwargs:
print('get: %s' % kwargs['params'], end='')
result = self._get(url, **kwargs)
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(result.text.encode('UTF-8'))
print('Output saved to %s' % f.name)
return result
def mock_post(self, url, data, **kwargs):
"""
This replaces ``requests.post``. It prints out post payload
(``data``), passes the request along to the original
``get(...)`` function and then saves the result to a file
:type url: str
:param url: The URL of the resource being requested
:type data: dict
:param data: The data payload that will be sent via post
to the server.
:rtype: str
:return: The result from the original ``get(...)`` function
"""
print('url: %s' % url, end='')
print('data: %s' % data, end='')
result = self._post(url, data, **kwargs)
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(result.text)
print('Output saved to %s' % f.name)
return result
def __enter__(self):
for patch in self.patches:
patch.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
for patch in self.patches:
patch.stop()
class _ShamanMeta(type):
"""
The ``lib.test.Shaman.generate`` function sets metadata on
what functions the user wants to create. Then,
``__metaclass__`` reads that metadata and is the one
that actually creates the new functions
"""
def __new__(cls, name, bases, class_dict):
def callback(function, args):
try:
string_types = types.StringTypes # Python2.x
except AttributeError:
string_types = str # Python3
if isinstance(args, string_types):
return lambda self: function(self, args)
else:
return lambda self: function(self, *args)
if name != 'Shaman':
for objName, obj in list(class_dict.items()):
if hasattr(obj, 'shaman_generate_inputs'):
for id, input in obj.shaman_generate_inputs:
key = '%s_%s' % (obj.__name__, id)
class_dict[key] = callback(obj, input)
del class_dict[objName]
return type.__new__(cls, name, bases, class_dict)
def __init__(cls, name, bases, class_dict):
if name != 'Shaman':
lib.registry.register(test=cls)
# Apply ShamanMeta class to Shaman
ShamanMeta = _ShamanMeta('Shaman', (object,), {})
class Shaman(unittest.TestCase, ShamanMeta):
"""
The Shaman will help illuminate flaws in your code and
help you to heal them.
"""
#: The spell that is being tested
#:
#: .. note::
#: You usually never have to set this value, it should get
#: detected automatically. However, if you **do** set this value
#: ``Shaman`` won't overwrite it
spell = None
#: An instantiated version of ``spell``.
spell_obj = None
@classmethod
def setUpClass(cls):
"""
A class-level fixture that gets called automatically by ``unittest``;
there should be no need to call this function directly.
This fixture takes care of the following:
* The module being tested is automatically detected
* The following mocks are configured:
* **state** -- creates a temporary storage for
the spell's state
* **config** -- a mock config that can be configured
in the test and "loaded" by the config
* **web** -- overwrites ``lib.spell.BaseSpell.fetch`` with
a ``lib.test.WebMock`` function which intercepts
requests which are then served by predefined
datafiles so that the test can be run offline
"""
spellRegistry = lib.registry.lookup_by_name(test=cls.__name__)
root = spellRegistry['root']
cls.state = dict()
cls.config = dict()
cls.queries = dict()
cls.web = WebMock(os.path.join(root, 'test_data'))
cls.patches = [cls.web.mock]
lib.registry.collect()
# Search for spell if one is not already provided
# (a spell that is in the same directory)
if cls.spell is None:
try:
cls.spell = spellRegistry['spell']
except KeyError:
raise ValueError(
'Unable to detect associated spell for: %s.%s'
% (inspect.getfile(cls), cls)
)
cls.spell_obj = cls.spell()
@classmethod
def generate(cls, *args, **kwargs):
"""
A utility method that uses the arguments to duplicate the decorated
test function
:type args: list
:param args: The positional arguments
:type kwargs: dict
:param kwargs: The keyword arguments
The generated functions are named by taking the name of
decorated function and then:
* using numerically incrementing ids for input elements in ``*args``
* using the keyword provided for the input elements in ``**kwargs``
For example:
.. code-block:: python
@Shaman.generate('happy', 'sad', sleepy='Zzzz')
def test_example(input):
pass
Would generate the following:
.. code-block:: python
def test_example_1(input='happy'):
pass
def test_example_2(input='sad'):
pass
def test_example_sleepy(input='Zzzz'):
pass
.. warning::
The decorated/original function gets destroyed when
the generated functions get created
"""
def wrapper(func):
inputs = list(kwargs.items())
ids = itertools.count(1)
for arg in args:
if isinstance(arg, types.GeneratorType):
inputs.extend(zip(ids, arg))
else:
inputs.append((next(ids), arg))
func.shaman_generate_inputs = inputs
return func
return wrapper
@classmethod
def collectQueries(cls):
"""
Run the test (silently) and return the queries issued,
along with the associated results
:rtype: dict
:return: A dictionary mapping input queries to expected output
"""
clsObj = cls()
clsObj.setUpClass()
for attr in dir(clsObj):
obj = getattr(clsObj, attr)
if not attr.find('test_') and hasattr(obj, '__call__'):
clsObj.setUp()
obj()
return clsObj.queries
def setUp(self):
pass
def assertLooksLike(self, first, second, msg=None):
"""
Compare ``first`` and ``second``, ignoring differences in
whitespace and case
:type first: str
:param first: The first value to use in the comparison
:type second: str
:param second: The second value to use in the comparison
:type msg: str or None
:param msg: Use a custom error message if the values
don't look alike
:raises: ``AssertionError``
"""
normalize = lambda string: re.sub(r'\s+', ' ', string).lower().strip()
if normalize(first) != normalize(second):
default_msg = (
'%s\n<[---------- does not look like ----------]>\n%s'
% (first, second)
)
msg = self._formatMessage(msg, default_msg)
raise self.failureException(msg)
def runTest(self):
pass
def query(self, query):
"""
Pass query to spell with mocked functions enabled,
returning the result
:type query: str
:param query: The query to give to the spell
:returns: returns the result of the spell
:rtype: str
"""
for patch in self.patches:
patch.start()
score, cls, _query = self.spell_obj.parse(query)
result, self.state = self.spell_obj.incantation(
_query, self.config, self.state
)
for patch in self.patches:
patch.stop()
self.queries[query] = result
return result
def today(self, year, month, day):
"""
Replace ``lib.spell.BaseSpell.today`` with a mock function
which returns a constant date
:type year: int
:param year: ex: 2010
:type month: int
:param month: valid values are 1 (January) to 12 (December)
:type day: int
:param day: valid values are 1 - 31, depending on the month
"""
patch = mock.patch(
'lib.spell.BaseSpell.today',
lambda self: datetime.date(year, month, day)
)
self.patches.append(patch)
| {
"content_hash": "dd28240e0cafe1c994a7dd8a90b8e8c1",
"timestamp": "",
"source": "github",
"line_count": 434,
"max_line_length": 78,
"avg_line_length": 30.320276497695854,
"alnum_prop": 0.5502697773387035,
"repo_name": "dvrasp/TheTroz",
"id": "33d0c14ee5d81f412cd8140481786df395ff1034",
"size": "13159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "65975"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2016, Jose Dolz .All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Jose Dolz. Dec, 2016.
email: jose.dolz.upv@gmail.com
LIVIA Department, ETS, Montreal.
"""
import theano
import theano.tensor as T
from theano.tensor.nnet import conv2d
import theano.tensor.nnet.conv3d2d
import pdb
import sys
import os
import numpy as np
import numpy
import random
from Modules.General.Utils import initializeWeights
from Modules.NeuralNetwork.ActivationFunctions import *
from Modules.NeuralNetwork.layerOperations import *
#################################################################
# Layer Types #
#################################################################
class LiviaNet3DConvLayer(object):
"""Convolutional Layer of the Livia network """
def __init__(self,
rng,
layerID,
inputSample_Train,
inputSample_Test,
inputToLayerShapeTrain,
inputToLayerShapeTest,
filterShape,
useBatchNorm,
numberEpochApplyRolling,
maxPoolingParameters,
weights_initMethodType,
weights,
activationType,
dropoutRate=0.0) :
self.inputTrain = None
self.inputTest = None
self.inputShapeTrain = None
self.inputShapeTest = None
self._numberOfFeatureMaps = 0
self._maxPoolingParameters = None
self._appliedBnInLayer = None
self.params = []
self.W = None
self._gBn = None
self._b = None
self._aPrelu = None
self.numberOfTrainableParams = 0
self.muBatchNorm = None
self._varBnsArrayForRollingAverage = None
self.numberEpochApplyRolling = numberEpochApplyRolling
self.rollingIndex = 0
self._sharedNewMu_B = None
self._sharedNewVar_B = None
self._newMu_B = None
self._newVar_B = None
self.outputTrain = None
self.outputTest = None
self.outputShapeTrain = None
self.outputShapeTest = None
# === After all the parameters has been initialized, create the layer
# Set all the inputs and parameters
self.inputTrain = inputSample_Train
self.inputTest = inputSample_Test
self.inputShapeTrain = inputToLayerShapeTrain
self.inputShapeTest = inputToLayerShapeTest
self._numberOfFeatureMaps = filterShape[0]
assert self.inputShapeTrain[1] == filterShape[1]
self._maxPoolingParameters = maxPoolingParameters
print(" --- [STATUS] --------- Creating layer {} --------- ".format(layerID))
## Process the input layer through all the steps over the block
(inputToConvTrain,
inputToConvTest) = self.passInputThroughLayerElements(inputSample_Train,
inputToLayerShapeTrain,
inputSample_Test,
inputToLayerShapeTest,
useBatchNorm,
numberEpochApplyRolling,
activationType,
weights,
dropoutRate,
rng
)
# input shapes for the convolutions
inputToConvShapeTrain = inputToLayerShapeTrain
inputToConvShapeTest = inputToLayerShapeTest
# -------------- Weights initialization -------------
# Initialize weights with random weights if W is empty
# Otherwise, use loaded weights
self.W = initializeWeights(filterShape,
weights_initMethodType,
weights)
self.params = [self.W] + self.params
self.numberOfTrainableParams += 1
##---------- Convolve --------------
(convolvedOutput_Train, convolvedOutputShape_Train) = convolveWithKernel(self.W, filterShape, inputToConvTrain, inputToConvShapeTrain)
(convolvedOutput_Test, convolvedOutputShape_Test) = convolveWithKernel(self.W , filterShape, inputToConvTest, inputToConvShapeTest)
self.outputTrain = convolvedOutput_Train
self.outputTest = convolvedOutput_Test
self.outputShapeTrain = convolvedOutputShape_Train
self.outputShapeTest = convolvedOutputShape_Test
def updateLayerMatricesBatchNorm(self):
if self._appliedBnInLayer :
muArrayValue = self.muBatchNorm.get_value()
muArrayValue[self.rollingIndex] = self._sharedNewMu_B.get_value()
self.muBatchNorm.set_value(muArrayValue, borrow=True)
varArrayValue = self._varBnsArrayForRollingAverage.get_value()
varArrayValue[self.rollingIndex] = self._sharedNewVar_B.get_value()
self._varBnsArrayForRollingAverage.set_value(varArrayValue, borrow=True)
self.rollingIndex = (self.rollingIndex + 1) % self.numberEpochApplyRolling
def getUpdatesForBnRollingAverage(self) :
if self._appliedBnInLayer :
return [(self._sharedNewMu_B, self._newMu_B),
(self._sharedNewVar_B, self._newVar_B) ]
else :
return []
def passInputThroughLayerElements(self,
inputSample_Train,
inputSampleShape_Train,
inputSample_Test,
inputSampleShape_Test,
useBatchNorm,
numberEpochApplyRolling,
activationType,
weights,
dropoutRate,
rndState):
""" Through each block the following steps are applied, according to Kamnitsas:
1 - Batch Normalization or biases
2 - Activation function
3 - Dropout
4 - (Optional) Max pooling
Ref: He et al "Identity Mappings in Deep Residual Networks" 2016
https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua """
# ________________________________________________________
# 1 : Batch Normalization
# ________________________________________________________
""" Implemenation taken from Kamnitsas work.
A batch normalization implementation in TensorFlow:
http://r2rt.com/implementing-batch-normalization-in-tensorflow.html
"Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift",
Proceedings of the 32nd International Conference on Machine Learning, Lille, France, 2015.
Journal of Machine Learning Research: W&CP volume 37
"""
if useBatchNorm > 0 :
self._appliedBnInLayer = True
(inputToNonLinearityTrain,
inputToNonLinearityTest,
self._gBn,
self._b,
self.muBatchNorm,
self._varBnsArrayForRollingAverage,
self._sharedNewMu_B,
self._sharedNewVar_B,
self._newMu_B,
self._newVar_B) = applyBn( numberEpochApplyRolling,
inputSample_Train,
inputSample_Test,
inputSampleShape_Train)
self.params = self.params + [self._gBn, self._b]
else :
self._appliedBnInLayer = False
numberOfInputFeatMaps = inputSampleShape_Train[1]
b_values = np.zeros( (self._numberOfFeatureMaps), dtype = 'float32')
self._b = theano.shared(value=b_values, borrow=True)
inputToNonLinearityTrain = applyBiasToFeatureMaps( self._b, inputSample_Train )
inputToNonLinearityTest = applyBiasToFeatureMaps( self._b, inputSample_Test )
self.params = self.params + [self._b]
# ________________________________________________________
# 2 : Apply the corresponding activation function
# ________________________________________________________
def Linear():
print " --- Activation function: Linear"
self.activationFunctionType = "Linear"
output_Train = inputToNonLinearityTrain
output_Test = inputToNonLinearityTest
return (output_Train, output_Test)
def ReLU():
print " --- Activation function: ReLU"
self.activationFunctionType = "ReLU"
output_Train = applyActivationFunction_ReLU_v1(inputToNonLinearityTrain)
output_Test = applyActivationFunction_ReLU_v1(inputToNonLinearityTest)
return (output_Train, output_Test)
def PReLU():
print " --- Activation function: PReLU"
self.activationFunctionType = "PReLU"
numberOfInputFeatMaps = inputSampleShape_Train[1]
PReLU_Values = np.ones( (numberOfInputFeatMaps), dtype = 'float32' )*0.01
self._aPrelu = theano.shared(value=PReLU_Values, borrow=True)
output_Train = applyActivationFunction_PReLU(inputToNonLinearityTrain, self._aPrelu)
output_Test = applyActivationFunction_PReLU(inputToNonLinearityTest, self._aPrelu)
self.params = self.params + [self._aPrelu]
self.numberOfTrainableParams += 1
return (output_Train,output_Test)
def LeakyReLU():
print " --- Activation function: Leaky ReLU "
self.activationFunctionType = "Leky ReLU"
leakiness = 0.2 # TODO. Introduce this value in the config.ini
output_Train = applyActivationFunction_LeakyReLU(inputToNonLinearityTrain,leakiness)
output_Test = applyActivationFunction_LeakyReLU(inputToNonLinearityTest,leakiness)
return (output_Train, output_Test)
optionsActFunction = {0 : Linear,
1 : ReLU,
2 : PReLU,
3 : LeakyReLU}
(inputToDropout_Train, inputToDropout_Test) = optionsActFunction[activationType]()
# ________________________________________________________
# 3 : Apply Dropout
# ________________________________________________________
output_Train = apply_Dropout(rndState,dropoutRate,inputSampleShape_Train,inputToDropout_Train, 0)
output_Test = apply_Dropout(rndState,dropoutRate,inputSampleShape_Train,inputToDropout_Test, 1)
# ________________________________________________________
# This will go as input to the convolutions
# ________________________________________________________
return (output_Train, output_Test)
| {
"content_hash": "37f45bfa6d7fc539f726bc5a357655f7",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 143,
"avg_line_length": 43.78472222222222,
"alnum_prop": 0.5390959555908009,
"repo_name": "josedolz/LiviaNET",
"id": "5bad562b4188259356f937db54eecfb20dc34aac",
"size": "12610",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/LiviaNet/LiviaNet3DConvLayer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "159013"
}
],
"symlink_target": ""
} |
from .text import slugify
from .validators import slug_re
def patch_django_urls():
import django.utils.text as django_text
import django.core.validators as django_validators
django_text.slugify = slugify
django_validators.slug_re = slug_re
| {
"content_hash": "a7f865d31c1c69224fea12bd7916735b",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 54,
"avg_line_length": 25.9,
"alnum_prop": 0.749034749034749,
"repo_name": "Alexx-G/django-unicode-urls",
"id": "7e551144a2176f2b345fb1e10d7bd0bee3e82465",
"size": "259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unicode_urls/django/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2842"
}
],
"symlink_target": ""
} |
from setuptools import find_packages, setup
if __name__ == "__main__":
setup(
name="benchmarks",
packages=find_packages(),
)
| {
"content_hash": "60f01bc49364629f0e22a333ecec302f",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 43,
"avg_line_length": 21.428571428571427,
"alnum_prop": 0.5733333333333334,
"repo_name": "dedupeio/dedupe",
"id": "73f83b12a26f64506c18c6d1e9517a729a09c17d",
"size": "190",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "benchmarks/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "1477"
},
{
"name": "Python",
"bytes": "228051"
},
{
"name": "Shell",
"bytes": "194"
}
],
"symlink_target": ""
} |
"""
Checks for the Attribute Conventions for Dataset Discovery (ACDD)
This module contains classes defined as checks part of the compliance checker
project for the verification and scoring of attributes for datasets.
"""
from datetime import timedelta
from functools import partial
import numpy as np
import pendulum
from cftime import num2pydate
from pygeoif import from_wkt
from compliance_checker import cfutil
from compliance_checker.base import (
BaseCheck,
BaseNCCheck,
Result,
check_has,
ratable_result,
)
from compliance_checker.cf.util import _possiblexunits, _possibleyunits
from compliance_checker.util import dateparse, datetime_is_iso, kvp_convert
class ACDDBaseCheck(BaseCheck):
_cc_spec = "acdd"
_cc_description = "Attribute Conventions for Dataset Discovery (ACDD)"
_cc_url = "http://wiki.esipfed.org/index.php?title=Category:Attribute_Conventions_Dataset_Discovery"
_cc_display_headers = {3: "Highly Recommended", 2: "Recommended", 1: "Suggested"}
def __init__(self):
self.high_rec_atts = ["title", "keywords", "summary"]
self.rec_atts = [
"id",
"naming_authority",
"history",
"comment",
"date_created",
"creator_name",
"creator_url",
"creator_email",
"institution",
"project",
"processing_level",
("geospatial_bounds", self.verify_geospatial_bounds),
"geospatial_lat_min",
"geospatial_lat_max",
"geospatial_lon_min",
"geospatial_lon_max",
"geospatial_vertical_min",
"geospatial_vertical_max",
"time_coverage_start",
"time_coverage_end",
"time_coverage_duration",
"time_coverage_resolution",
"standard_name_vocabulary",
"license",
]
self.sug_atts = [
"contributor_name",
"contributor_role",
"date_modified",
"date_issued",
"geospatial_lat_units",
"geospatial_lat_resolution",
"geospatial_lon_units",
"geospatial_lon_resolution",
"geospatial_vertical_units",
"geospatial_vertical_resolution",
]
# This variable is used to cache the results of applicable variables so
# the method isn't executed repeatedly.
self._applicable_variables = None
# to be used to format variable Result groups headers
self._var_header = 'variable "{}" missing the following attributes:'
# set up attributes according to version
@check_has(BaseCheck.HIGH, gname="Global Attributes")
def check_high(self, ds):
"""
Performs a check on each highly recommended attributes' existence in the dataset
:param netCDF4.Dataset ds: An open netCDF dataset
"""
return self.high_rec_atts
@check_has(BaseCheck.MEDIUM, gname="Global Attributes")
def check_recommended(self, ds):
"""
Performs a check on each recommended attributes' existence in the dataset
:param netCDF4.Dataset ds: An open netCDF dataset
"""
return self.rec_atts
@check_has(BaseCheck.LOW, gname="Global Attributes")
def check_suggested(self, ds):
"""
Performs a check on each suggested attributes' existence in the dataset
:param netCDF4.Dataset ds: An open netCDF dataset
"""
return self.sug_atts
def get_applicable_variables(self, ds):
"""
Returns a list of variable names that are applicable to ACDD Metadata
Checks for variables. This includes geophysical and coordinate
variables only.
:param netCDF4.Dataset ds: An open netCDF dataset
"""
if self._applicable_variables is None:
self.applicable_variables = cfutil.get_geophysical_variables(ds)
varname = cfutil.get_time_variable(ds)
# avoid duplicates by checking if already present
if varname and (varname not in self.applicable_variables):
self.applicable_variables.append(varname)
varname = cfutil.get_lon_variable(ds)
if varname and (varname not in self.applicable_variables):
self.applicable_variables.append(varname)
varname = cfutil.get_lat_variable(ds)
if varname and (varname not in self.applicable_variables):
self.applicable_variables.append(varname)
varname = cfutil.get_z_variable(ds)
if varname and (varname not in self.applicable_variables):
self.applicable_variables.append(varname)
return self.applicable_variables
def check_var_long_name(self, ds):
"""
Checks each applicable variable for the long_name attribute
:param netCDF4.Dataset ds: An open netCDF dataset
"""
results = []
# ACDD Variable Metadata applies to all coordinate variables and
# geophysical variables only.
for variable in self.get_applicable_variables(ds):
msgs = []
long_name = getattr(ds.variables[variable], "long_name", None)
check = long_name is not None
if not check:
msgs.append("long_name")
results.append(
Result(BaseCheck.HIGH, check, self._var_header.format(variable), msgs)
)
return results
def check_var_standard_name(self, ds):
"""
Checks each applicable variable for the standard_name attribute
:param netCDF4.Dataset ds: An open netCDF dataset
"""
results = []
for variable in self.get_applicable_variables(ds):
msgs = []
std_name = getattr(ds.variables[variable], "standard_name", None)
check = std_name is not None
if not check:
msgs.append("standard_name")
results.append(
Result(BaseCheck.HIGH, check, self._var_header.format(variable), msgs)
)
return results
def check_var_units(self, ds):
"""
Checks each applicable variable for the units attribute
:param netCDF4.Dataset ds: An open netCDF dataset
"""
results = []
for variable in self.get_applicable_variables(ds):
msgs = []
# Check units and dims for variable
unit_check = hasattr(ds.variables[variable], "units")
no_dim_check = getattr(ds.variables[variable], "dimensions") == tuple()
# Check if we have no dimensions. If no dims, skip test
if no_dim_check:
continue
# Check if we have no units
if not unit_check:
msgs.append("units")
results.append(
Result(
BaseCheck.HIGH, unit_check, self._var_header.format(variable), msgs
)
)
return results
def check_acknowledgment(self, ds):
"""
Check if acknowledgment/acknowledgment attribute is present. Because
acknowledgement has its own check, we are keeping it out of the Global
Attributes (even though it is a Global Attr).
:param netCDF4.Dataset ds: An open netCDF dataset
"""
check = False
messages = []
if hasattr(ds, "acknowledgment") or hasattr(ds, "acknowledgement"):
check = True
else:
messages.append("acknowledgment/acknowledgement not present")
# name="Global Attributes" so gets grouped with Global Attributes
return Result(BaseCheck.MEDIUM, check, "Global Attributes", msgs=messages)
def check_lat_extents(self, ds):
"""
Check that the values of geospatial_lat_min/geospatial_lat_max
approximately match the data.
:param netCDF4.Dataset ds: An open netCDF dataset
"""
if not (hasattr(ds, "geospatial_lat_min") or hasattr(ds, "geospatial_lat_max")):
return Result(
BaseCheck.MEDIUM,
False,
"geospatial_lat_extents_match",
["geospatial_lat_min/max attribute not found, CF-1.6 spec chapter 4.1"],
)
try: # type cast
lat_min = float(ds.geospatial_lat_min)
lat_max = float(ds.geospatial_lat_max)
except ValueError:
return Result(
BaseCheck.MEDIUM,
False,
"geospatial_lat_extents_match",
[
"Could not convert one of geospatial_lat_min ({}) or max ({}) to float see CF-1.6 spec chapter 4.1"
"".format(ds.geospatial_lat_min, ds.geospatial_lat_max)
],
)
# identify lat var(s) as per CF 4.1
lat_vars = {} # var -> number of criteria passed
for name, var in ds.variables.items():
# must have units
if not hasattr(var, "units"):
continue
lat_vars[var] = 0
# units in this set
if var.units in _possibleyunits:
lat_vars[var] += 1
# standard name of "latitude"
if hasattr(var, "standard_name") and var.standard_name == "latitude":
lat_vars[var] += 1
# axis of "Y"
if hasattr(var, "axis") and var.axis == "Y":
lat_vars[var] += 1
# trim out any zeros
lat_vars = {k: v for k, v in lat_vars.items() if v > 0}
if len(lat_vars) == 0:
return Result(
BaseCheck.MEDIUM,
False,
"geospatial_lat_extents_match",
[
"Could not find lat variable to test extent of geospatial_lat_min/max, see CF-1.6 spec chapter 4.1"
],
)
# sort by criteria passed
final_lats = sorted(lat_vars, key=lambda x: lat_vars[x], reverse=True)
obs_mins = {
var._name: np.nanmin(var) for var in final_lats if not np.isnan(var).all()
}
obs_maxs = {
var._name: np.nanmax(var) for var in final_lats if not np.isnan(var).all()
}
min_pass = any((np.isclose(lat_min, min_val) for min_val in obs_mins.values()))
max_pass = any((np.isclose(lat_max, max_val) for max_val in obs_maxs.values()))
allpass = sum((min_pass, max_pass))
msgs = []
if not min_pass:
msgs.append(
"Data for possible latitude variables (%s) did not match geospatial_lat_min value (%s)"
% (obs_mins, lat_min)
)
if not max_pass:
msgs.append(
"Data for possible latitude variables (%s) did not match geospatial_lat_max value (%s)"
% (obs_maxs, lat_max)
)
return Result(
BaseCheck.MEDIUM, (allpass, 2), "geospatial_lat_extents_match", msgs
)
def check_lon_extents(self, ds):
"""
Check that the values of geospatial_lon_min/geospatial_lon_max
approximately match the data.
:param netCDF4.Dataset ds: An open netCDF dataset
"""
if not (
hasattr(ds, "geospatial_lon_min") and hasattr(ds, "geospatial_lon_max")
):
return Result(
BaseCheck.MEDIUM,
False,
"geospatial_lon_extents_match",
["geospatial_lon_min/max attribute not found, CF-1.6 spec chapter 4.1"],
)
try: # type cast
lon_min = float(ds.geospatial_lon_min)
lon_max = float(ds.geospatial_lon_max)
except ValueError:
return Result(
BaseCheck.MEDIUM,
False,
"geospatial_lon_extents_match",
[
"Could not convert one of geospatial_lon_min ({}) or max ({}) to float see CF-1.6 spec chapter 4.1"
"".format(ds.geospatial_lon_min, ds.geospatial_lon_max)
],
)
# identify lon var(s) as per CF 4.2
lon_vars = {} # var -> number of criteria passed
for name, var in ds.variables.items():
# must have units
if not hasattr(var, "units"):
continue
lon_vars[var] = 0
# units in this set
if var.units in _possiblexunits:
lon_vars[var] += 1
# standard name of "longitude"
if hasattr(var, "standard_name") and var.standard_name == "longitude":
lon_vars[var] += 1
# axis of "Y"
if hasattr(var, "axis") and var.axis == "X":
lon_vars[var] += 1
# trim out any zeros
lon_vars = {k: v for k, v in lon_vars.items() if v > 0}
if len(lon_vars) == 0:
return Result(
BaseCheck.MEDIUM,
False,
"geospatial_lon_extents_match",
[
"Could not find lon variable to test extent of geospatial_lon_min/max, see CF-1.6 spec chapter 4.2"
],
)
# sort by criteria passed
final_lons = sorted(lon_vars, key=lambda x: lon_vars[x], reverse=True)
obs_mins = {
var._name: np.nanmin(var) for var in final_lons if not np.isnan(var).all()
}
obs_maxs = {
var._name: np.nanmax(var) for var in final_lons if not np.isnan(var).all()
}
min_pass = any((np.isclose(lon_min, min_val) for min_val in obs_mins.values()))
max_pass = any((np.isclose(lon_max, max_val) for max_val in obs_maxs.values()))
allpass = sum((min_pass, max_pass))
msgs = []
if not min_pass:
msgs.append(
"Data for possible longitude variables (%s) did not match geospatial_lon_min value (%s)"
% (obs_mins, lon_min)
)
if not max_pass:
msgs.append(
"Data for possible longitude variables (%s) did not match geospatial_lon_max value (%s)"
% (obs_maxs, lon_max)
)
return Result(
BaseCheck.MEDIUM, (allpass, 2), "geospatial_lon_extents_match", msgs
)
def verify_geospatial_bounds(self, ds):
"""Checks that the geospatial bounds is well formed OGC WKT"""
var = getattr(ds, "geospatial_bounds", None)
check = var is not None
if not check:
return ratable_result(
False,
"Global Attributes", # grouped with Globals
["geospatial_bounds not present"],
)
try:
# TODO: verify that WKT is valid given CRS (defaults to EPSG:4326
# in ACDD.
from_wkt(ds.geospatial_bounds)
except AttributeError:
return ratable_result(
False,
"Global Attributes", # grouped with Globals
[
(
"Could not parse WKT from geospatial_bounds,"
' possible bad value: "{}"'.format(ds.geospatial_bounds)
)
],
variable_name="geospatial_bounds",
)
# parsed OK
else:
return ratable_result(True, "Global Attributes", tuple())
def _check_total_z_extents(self, ds, z_variable):
"""
Check the entire array of Z for minimum and maximum and compare that to
the vertical extents defined in the global attributes
:param netCDF4.Dataset ds: An open netCDF dataset
:param str z_variable: Name of the variable representing the Z-Axis
"""
msgs = []
total = 2
try:
vert_min = float(ds.geospatial_vertical_min)
except ValueError:
msgs.append("geospatial_vertical_min cannot be cast to float")
try:
vert_max = float(ds.geospatial_vertical_max)
except ValueError:
msgs.append("geospatial_vertical_max cannot be cast to float")
if len(msgs) > 0:
return Result(
BaseCheck.MEDIUM, (0, total), "geospatial_vertical_extents_match", msgs
)
zvalue = ds.variables[z_variable][:]
# If the array has fill values, which is allowed in the case of point
# features
if hasattr(zvalue, "mask"):
zvalue = zvalue[~zvalue.mask]
if zvalue.size == 0:
msgs.append(
"Cannot compare geospatial vertical extents "
"against min/max of data, as non-masked data "
"length is zero"
)
return Result(
BaseCheck.MEDIUM, (0, total), "geospatial_vertical_extents_match", msgs
)
else:
zmin = zvalue.min()
zmax = zvalue.max()
if not np.isclose(vert_min, zmin):
msgs.append(
"geospatial_vertical_min != min(%s) values, %s != %s"
% (z_variable, vert_min, zmin)
)
if not np.isclose(vert_max, zmax):
msgs.append(
"geospatial_vertical_max != max(%s) values, %s != %s"
% (z_variable, vert_min, zmax)
)
return Result(
BaseCheck.MEDIUM,
(total - len(msgs), total),
"geospatial_vertical_extents_match",
msgs,
)
def _check_scalar_vertical_extents(self, ds, z_variable):
"""
Check the scalar value of Z compared to the vertical extents which
should also be equivalent
:param netCDF4.Dataset ds: An open netCDF dataset
:param str z_variable: Name of the variable representing the Z-Axis
"""
vert_min = ds.geospatial_vertical_min
vert_max = ds.geospatial_vertical_max
msgs = []
total = 2
zvalue = ds.variables[z_variable][:].item()
if not np.isclose(vert_min, vert_max):
msgs.append(
"geospatial_vertical_min != geospatial_vertical_max for scalar depth values, %s != %s"
% (vert_min, vert_max)
)
if not np.isclose(vert_max, zvalue):
msgs.append(
"geospatial_vertical_max != %s values, %s != %s"
% (z_variable, vert_max, zvalue)
)
return Result(
BaseCheck.MEDIUM,
(total - len(msgs), total),
"geospatial_vertical_extents_match",
msgs,
)
def check_vertical_extents(self, ds):
"""
Check that the values of geospatial_vertical_min/geospatial_vertical_max approximately match the data.
:param netCDF4.Dataset ds: An open netCDF dataset
"""
if not (
hasattr(ds, "geospatial_vertical_min")
and hasattr(ds, "geospatial_vertical_max")
):
return
z_variable = cfutil.get_z_variable(ds)
if not z_variable:
return Result(
BaseCheck.MEDIUM,
False,
"geospatial_vertical_extents_match",
[
"Could not find vertical variable to test extent of geospatial_vertical_min/geospatial_vertical_max, see CF-1.6 spec chapter 4.3"
],
)
if ds.variables[z_variable].dimensions == tuple():
return self._check_scalar_vertical_extents(ds, z_variable)
return self._check_total_z_extents(ds, z_variable)
def check_time_extents(self, ds):
"""
Check that the values of time_coverage_start/time_coverage_end approximately match the data.
"""
if not (
hasattr(ds, "time_coverage_start") and hasattr(ds, "time_coverage_end")
):
return
# Parse the ISO 8601 formatted dates
try:
t_min = dateparse(ds.time_coverage_start)
t_max = dateparse(ds.time_coverage_end)
except:
return Result(
BaseCheck.MEDIUM,
False,
"time_coverage_extents_match",
[
"time_coverage attributes are not formatted properly. Use the ISO 8601:2004 date format, preferably the extended format."
],
)
timevar = cfutil.get_time_variable(ds)
if not timevar:
return Result(
BaseCheck.MEDIUM,
False,
"time_coverage_extents_match",
[
"Could not find time variable to test extent of time_coverage_start/time_coverage_end, see CF-1.6 spec chapter 4.4"
],
)
# Time should be monotonically increasing, so we make that assumption here so we don't have to download THE ENTIRE ARRAY
try:
# num2date returns as naive date, but with time adjusted to UTC
# we need to attach timezone information here, or the date
# subtraction from t_min/t_max will assume that a naive timestamp is
# in the same time zone and cause erroneous results.
# Pendulum uses UTC by default, but we are being explicit here
time0 = pendulum.instance(
num2pydate(ds.variables[timevar][0], ds.variables[timevar].units), "UTC"
)
time1 = pendulum.instance(
num2pydate(ds.variables[timevar][-1], ds.variables[timevar].units),
"UTC",
)
except:
return Result(
BaseCheck.MEDIUM,
False,
"time_coverage_extents_match",
["Failed to retrieve and convert times for variables %s." % timevar],
)
start_dt = abs(time0 - t_min)
end_dt = abs(time1 - t_max)
score = 2
msgs = []
if start_dt > timedelta(hours=1):
msgs.append(
"Date time mismatch between time_coverage_start and actual "
"time values %s (time_coverage_start) != %s (time[0])"
% (t_min.isoformat(), time0.isoformat())
)
score -= 1
if end_dt > timedelta(hours=1):
msgs.append(
"Date time mismatch between time_coverage_end and actual "
"time values %s (time_coverage_end) != %s (time[N])"
% (t_max.isoformat(), time1.isoformat())
)
score -= 1
return Result(BaseCheck.MEDIUM, (score, 2), "time_coverage_extents_match", msgs)
def verify_convention_version(self, ds):
"""
Verify that the version in the Conventions field is correct
"""
try:
for convention in (
getattr(ds, "Conventions", "").replace(" ", "").split(",")
):
if convention == "ACDD-" + self._cc_spec_version:
return ratable_result(
(2, 2), None, []
) # name=None so grouped with Globals
# if no/wrong ACDD convention, return appropriate result
# Result will have name "Global Attributes" to group with globals
m = ["Conventions does not contain 'ACDD-{}'".format(self._cc_spec_version)]
return ratable_result((1, 2), "Global Attributes", m)
except AttributeError: # NetCDF attribute not found
m = [
"No Conventions attribute present; must contain ACDD-{}".format(
self._cc_spec_version
)
]
# Result will have name "Global Attributes" to group with globals
return ratable_result((0, 2), "Global Attributes", m)
class ACDDNCCheck(BaseNCCheck, ACDDBaseCheck):
pass
class ACDD1_1Check(ACDDNCCheck):
_cc_spec_version = "1.1"
_cc_description = "Attribute Conventions for Dataset Discovery (ACDD) 1.1"
register_checker = True
def __init__(self):
super(ACDD1_1Check, self).__init__()
self.rec_atts.extend(["keywords_vocabulary"])
self.sug_atts.extend(
[
"publisher_name", # publisher,dataCenter
"publisher_url", # publisher
"publisher_email", # publisher
"geospatial_vertical_positive",
]
)
class ACDD1_3Check(ACDDNCCheck):
_cc_spec_version = "1.3"
_cc_description = "Attribute Conventions for Dataset Discovery (ACDD) 1.3"
register_checker = True
def __init__(self):
super(ACDD1_3Check, self).__init__()
self.high_rec_atts.extend([("Conventions", self.verify_convention_version)])
self.rec_atts.extend(
[
"geospatial_vertical_positive",
"geospatial_bounds_crs",
"geospatial_bounds_vertical_crs",
"publisher_name", # publisher,dataCenter
"publisher_url", # publisher
"publisher_email", # publisher
"source",
]
)
self.sug_atts.extend(
[
("creator_type", ["person", "group", "institution", "position"]),
"creator_institution",
"platform",
"platform_vocabulary",
"keywords_vocabulary",
"instrument",
"metadata_link",
"product_version",
"references",
("publisher_type", ["person", "group", "institution", "position"]),
"instrument_vocabulary",
"date_metadata_modified",
"program",
"publisher_institution",
]
)
# override the ISO date checks in
def _check_attr_is_iso_date(attr, ds):
result_name = "{}_is_iso".format(attr)
if not hasattr(ds, attr):
return ratable_result(
(0, 2), result_name, ["Attr {} is not present".format(attr)]
)
else:
iso_check, msgs = datetime_is_iso(getattr(ds, attr))
return ratable_result((1 + iso_check, 2), result_name, msgs)
# run ISO 8601 date checks against the date_created, date_issued,
# date_modified, and date_metadata_modified global attributes
self.rec_atts = kvp_convert(self.rec_atts)
self.rec_atts["date_created"] = partial(_check_attr_is_iso_date, "date_created")
self.sug_atts = kvp_convert(self.sug_atts)
for k in (
"date_{}".format(suffix)
for suffix in ("issued", "modified", "metadata_modified")
):
self.sug_atts[k] = partial(_check_attr_is_iso_date, k)
def check_metadata_link(self, ds):
"""
Checks if metadata link is formed in a rational manner
:param netCDF4.Dataset ds: An open netCDF dataset
"""
if not hasattr(ds, u"metadata_link"):
return
msgs = []
meta_link = getattr(ds, "metadata_link")
if "http" not in meta_link:
msgs.append("Metadata URL should include http:// or https://")
valid_link = len(msgs) == 0
return Result(BaseCheck.LOW, valid_link, "metadata_link_valid", msgs)
def check_id_has_no_blanks(self, ds):
"""
Check if there are blanks in the id field
:param netCDF4.Dataset ds: An open netCDF dataset
"""
if not hasattr(ds, u"id"):
return
if " " in getattr(ds, u"id"):
return Result(
BaseCheck.MEDIUM,
False,
"no_blanks_in_id",
msgs=[u"There should be no blanks in the id field"],
)
else:
return Result(BaseCheck.MEDIUM, True, "no_blanks_in_id", msgs=[])
def check_var_coverage_content_type(self, ds):
"""
Check coverage content type against valid ISO-19115-1 codes
:param netCDF4.Dataset ds: An open netCDF dataset
"""
results = []
for variable in cfutil.get_geophysical_variables(ds):
msgs = []
ctype = getattr(ds.variables[variable], "coverage_content_type", None)
check = ctype is not None
if not check:
msgs.append("coverage_content_type")
results.append(
Result(
BaseCheck.HIGH, check, self._var_header.format(variable), msgs
)
)
continue
# ISO 19115-1 codes
valid_ctypes = {
"image",
"thematicClassification",
"physicalMeasurement",
"auxiliaryInformation",
"qualityInformation",
"referenceInformation",
"modelResult",
"coordinate",
}
if ctype not in valid_ctypes:
msgs.append(
'coverage_content_type in "%s"' % (variable, sorted(valid_ctypes))
)
results.append(
Result(
BaseCheck.HIGH,
check, # append to list
self._var_header.format(variable),
msgs,
)
)
return results
| {
"content_hash": "38e513ec1ee0c95f3a5fb7430286dc33",
"timestamp": "",
"source": "github",
"line_count": 843,
"max_line_length": 149,
"avg_line_length": 35.27876631079478,
"alnum_prop": 0.5357767316745125,
"repo_name": "ocefpaf/compliance-checker",
"id": "f19903d5a94a782f1dec51fee600cc16526e4875",
"size": "29740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compliance_checker/acdd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "12681"
},
{
"name": "Python",
"bytes": "796514"
}
],
"symlink_target": ""
} |
from cpgReport import *
from CGATReport.Tracker import *
from CGATReport.odict import OrderedDict as odict
##########################################################################
##########################################################################
class genesCoveredByNMI(cpgTracker):
''''''
ANNOTATIONS_NAME = P['annotations_name']
mPattern = "_replicated_" + ANNOTATIONS_NAME + "_genes_capseq_overlap$"
def __call__(self, track, slice=None):
query = '''select distinct o.gene_id, i.gene_name, capseq_nover, length, capseq_pover1, capseq_pover2
from %(track)s_replicated_%(ANNOTATIONS_NAME)s_genes_capseq_overlap o, annotations.transcript_info i
where capseq_pover1>90
and o.gene_id=i.gene_id
and o.length > 1000
order by length desc '''
print(query)
data = self.getAll(query)
return data
##########################################################################
class genesWithNMI(cpgTracker):
''''''
ANNOTATIONS_NAME = P['annotations_name']
mPattern = "_replicated_" + ANNOTATIONS_NAME + "_genes_capseq_overlap$"
def __call__(self, track, slice=None):
query = '''select distinct o.gene_id, i.gene_name, capseq_nover, length, capseq_pover1, capseq_pover2
from %(track)s_replicated_%(ANNOTATIONS_NAME)s_genes_capseq_overlap o, annotations.transcript_info i
where capseq_pover1 <10
and capseq_pover1 >0
and o.gene_id=i.gene_id
and o.length > 1000
and o.length < 15000
order by length desc '''
data = self.getAll(query)
return data
##########################################################################
class overlappedGenesGOAnalysisBP(cpgTracker):
'''GO analysis biological process'''
mPattern = "_overlapped_genes_go_biol_process$"
def __call__(self, track, slice=None):
query = '''select distinct goid, description, scount as genes, bcount as genes_with_term, spercent as percent_of_list, ratio as Enrichment, fdr
from %(track)s_overlapped_genes_go_biol_process
where fdr < 0.05
order by fdr asc, scount desc '''
data = self.getAll(query)
return data
##########################################################################
class overlappedGenesGOAnalysisCL(cpgTracker):
'''GO analysis '''
mPattern = "_overlapped_genes_go_cell_location$"
def __call__(self, track, slice=None):
query = '''select distinct goid, description, scount as genes, bcount as genes_with_term, spercent as percent_of_list, ratio as Enrichment, fdr
from %(track)s_overlapped_genes_go_cell_location
where fdr < 0.05
order by fdr asc, scount desc '''
data = self.getAll(query)
return data
##########################################################################
class overlappedGenesGOAnalysisMF(cpgTracker):
'''GO analysis '''
mPattern = "_overlapped_genes_go_mol_function$"
def __call__(self, track, slice=None):
query = '''select distinct goid, description, scount as genes, bcount as genes_with_term, spercent as percent_of_list, ratio as Enrichment, fdr
from %(track)s_overlapped_genes_go_mol_function
where fdr < 0.05
order by fdr asc, scount desc '''
data = self.getAll(query)
return data
##########################################################################
class overlappedGenesGOSlimAnalysisBP(cpgTracker):
'''GO slim analysis '''
mPattern = "_overlapped_genes_goslim_biol_process$"
def __call__(self, track, slice=None):
query = '''select distinct goid, description, scount as genes, bcount as genes_with_term, spercent as percent_of_list, ratio as Enrichment, fdr
from %(track)s_overlapped_genes_goslim_biol_process
where fdr < 0.05
order by fdr asc, scount desc '''
data = self.getAll(query)
return data
##########################################################################
class overlappedGenesCapseqProfile(TrackerImages):
"""CAPseq profile per gene """
##########################################################################
class overlappedGenesH3K27Profile(TrackerImages):
"""Chromatin profile per gene"""
##########################################################################
class overlappedGenesH3K4Profile(TrackerImages):
"""Chromatin profile per gene"""
##########################################################################
class overlappedGenesH3K27Venn(TrackerImages):
"""intersection of overlapped genes and H3K27Me3 intervals"""
##########################################################################
class overlappedGenesTissueVenn(TrackerImages):
"""Conservation of overlapped genes across tissues"""
##########################################################################
class polycombGAT(cpgTracker):
"""genomic assocation of H3K27Me3 intervals and genes overlapped >90% by NMIs"""
mPattern = "overlapped_genes_gat_results$"
def __call__(self, track, slice=None):
data = self.get(
"SELECT track, annotation, round(expected,0) as expected, observed, round(fold,1) as fold, pvalue FROM overlapped_genes_gat_results ")
return odict(list(zip(("Dataset1", "Dataset2", "Expected overlap", "Observed overlap", "Fold Enrichment", "P-value"), list(zip(*data)))))
##########################################################################
class polycombIntersection(cpgTracker):
"""Intersection of H3K27Me3 intervals and genes overlapped >90% by NMIs"""
mPattern = "overlapped_genes_h3k27me3_venn$"
def __call__(self, track, slice=None):
query = '''select track, chromatin_track, total_merged_intervals, track_and_chromatin_track, track_only, chromatin_track_only,
round((0.0+track_and_chromatin_track)/(track_and_chromatin_track+track_only+0.0)*100,2) as percent_track,
round((0.0+track_and_chromatin_track)/(track_and_chromatin_track+chromatin_track_only+0.0)*100,2) as percent_chromatin_track
from overlapped_genes_h3k27me3_venn'''
data = self.getAll(query)
return data
| {
"content_hash": "c4f3128469b9bcadd3e6587566c101e2",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 152,
"avg_line_length": 37.6878612716763,
"alnum_prop": 0.5315950920245399,
"repo_name": "CGATOxford/CGATPipelines",
"id": "cdc43caed54d83f75da2a79910d39453c611ff5d",
"size": "6520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CGATPipelines/pipeline_docs/pipeline_proj007/trackers/macs_replicated_overlapped_genes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4360"
},
{
"name": "HTML",
"bytes": "40732"
},
{
"name": "JavaScript",
"bytes": "302029"
},
{
"name": "Jupyter Notebook",
"bytes": "4393775"
},
{
"name": "Makefile",
"bytes": "45084"
},
{
"name": "Python",
"bytes": "5357820"
},
{
"name": "R",
"bytes": "62312"
},
{
"name": "Shell",
"bytes": "67312"
}
],
"symlink_target": ""
} |
from typing import Optional, Callable
from datetime import datetime, timedelta
import inspect
import logging
import os.path
import re
import subprocess
import time
from .. import BaseSystem
class Windows(BaseSystem):
def __init__(self):
super(Windows, self).__init__()
self.__ntp = None
self.__uptime = None
@property
def system(self) -> str:
return 'Windows'
@property
def temperature(self) -> Optional[float]:
# @ the Windows users: Sorry folks! Windows needs admin rights to access the temp sensor.
# As long as this is the case Temperature display will not be supported on Windows!!
return None
@property
def uptime(self) -> Optional[str]:
if self.__uptime is None:
# Due to the situation that there is no 'reliable' way to retrieve the uptime
# on Windows, we rely on a third party tool:
# uptime version 1.1.0: http://uptimeexe.codeplex.com/
# Using v1.1.0 is critical/mandatory as the output changed from prior versions!
# We expect to find this uptime.exe in ./uptime!
path = os.path.dirname(inspect.getfile(Windows))
path = os.path.join(path, 'uptime/uptime.exe')
try:
upt_v = subprocess.check_output('{} -v'.format(path)).decode('utf-8').strip()
except:
log = logging.getLogger('theonionbox')
log.debug("Failed to run 'uptime' tool (http://uptimeexe.codeplex.com). "
"Check documentation for further instructions!")
else:
# expected output format is exactly 'version 1.1.0'
if upt_v == 'version 1.1.0':
try:
uptimes = subprocess.check_output(path).decode('utf-8').split()
# expected output format is now e.g. '22:23:43 uptime 02:16:21'
if len(uptimes) == 3 and uptimes[1] == 'uptime':
upt = uptimes[2].split(':')
if len(upt) == 3:
its_now = datetime.fromtimestamp(time.time())
upt_diff = timedelta(hours=int(upt[0]),
minutes=int(upt[1]),
seconds=int(upt[2]),
microseconds=its_now.microsecond)
self.__uptime = (its_now - upt_diff).strftime('%Y-%m-%d %H:%M')
except Exception:
pass
else:
log = logging.getLogger('theonionbox')
log.debug("Found 'uptime' tool yet version is not v1.1.0. "
"Check documentation for further instructions!")
return self.__uptime
@property
def ntp(self) -> Optional[str]:
if self.__ntp is None:
try:
# Get the known timeservers from the registry
reg = subprocess.check_output(['reg',
'query',
'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\DateTime\Servers'])
reg = reg.decode("utf-8")
# HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\DateTime\Servers
# (Standard) REG_SZ 1
# 1 REG_SZ time.windows.com <===
# 2 REG_SZ time.nist.gov <===
regex = r"^\s+\d+\s+REG_SZ\s+(.+)"
ntps = re.findall(regex, reg)
if len(ntps) > 0:
self.__ntp = ntps[0]
except Exception:
pass
return self.__ntp | {
"content_hash": "48e369cbb1597fecf2a6e40edaf75b21",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 128,
"avg_line_length": 37.883495145631066,
"alnum_prop": 0.4917990773962071,
"repo_name": "ralphwetzel/theonionbox",
"id": "f18bff3774db91acbbba8ef63e38250c48c33cce",
"size": "3902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "theonionbox/tob/system/windows/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "25508"
},
{
"name": "CSS",
"bytes": "151046"
},
{
"name": "Dockerfile",
"bytes": "279"
},
{
"name": "HTML",
"bytes": "330425"
},
{
"name": "JavaScript",
"bytes": "935134"
},
{
"name": "Makefile",
"bytes": "385"
},
{
"name": "Python",
"bytes": "327442"
},
{
"name": "Shell",
"bytes": "4568"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r"^$", views.index),
url(r"^data$", views.data),
]
| {
"content_hash": "4fb3fa218668920b51d71da7e0d4a232",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 32,
"avg_line_length": 15,
"alnum_prop": 0.6222222222222222,
"repo_name": "noahbkim/finances",
"id": "468e3bf442da2ac0f36fe03377b37afb47197f14",
"size": "135",
"binary": false,
"copies": "2",
"ref": "refs/heads/django",
"path": "heatmap/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "50025"
},
{
"name": "HTML",
"bytes": "14745"
},
{
"name": "JavaScript",
"bytes": "119927"
},
{
"name": "Python",
"bytes": "20861"
}
],
"symlink_target": ""
} |
from ....testing import assert_equal
from ..minc import Dump
def test_Dump_inputs():
input_map = dict(annotations_brief=dict(argstr='-b %s',
xor=(u'annotations_brief', u'annotations_full'),
),
annotations_full=dict(argstr='-f %s',
xor=(u'annotations_brief', u'annotations_full'),
),
args=dict(argstr='%s',
),
coordinate_data=dict(argstr='-c',
xor=(u'coordinate_data', u'header_data'),
),
environ=dict(nohash=True,
usedefault=True,
),
header_data=dict(argstr='-h',
xor=(u'coordinate_data', u'header_data'),
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
input_file=dict(argstr='%s',
mandatory=True,
position=-2,
),
line_length=dict(argstr='-l %d',
usedefault=False,
),
netcdf_name=dict(argstr='-n %s',
),
out_file=dict(argstr='> %s',
genfile=True,
position=-1,
),
output_file=dict(hash_files=False,
keep_extension=False,
name_source=[u'input_file'],
name_template='%s_dump.txt',
position=-1,
),
precision=dict(argstr='%s',
),
terminal_output=dict(nohash=True,
),
variables=dict(argstr='-v %s',
sep=',',
),
)
inputs = Dump.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Dump_outputs():
output_map = dict(output_file=dict(),
)
outputs = Dump.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| {
"content_hash": "f058a6dfc584d0cf29b1ce7ff6c8581b",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 78,
"avg_line_length": 25.597014925373134,
"alnum_prop": 0.5982507288629738,
"repo_name": "carolFrohlich/nipype",
"id": "c1de6510cfe3dfbcf13e463d1a23cc8ce1751b0a",
"size": "1769",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nipype/interfaces/minc/tests/test_auto_Dump.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "2320"
},
{
"name": "Matlab",
"bytes": "1717"
},
{
"name": "Python",
"bytes": "5451077"
},
{
"name": "Shell",
"bytes": "3302"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
"""Streamable BEL as JSON."""
import gzip
import json
from typing import Any, Iterable, List, TextIO, Union
from networkx.utils import open_file
from .nodelink import _augment_node, _prepare_graph_dict, _recover_graph_dict
from ..constants import CITATION, SOURCE_MODIFIER, TARGET_MODIFIER
from ..language import CitationDict
from ..struct.graph import BELGraph, _handle_modifier
from ..tokens import parse_result_to_dsl
from ..utils import hash_edge
__all__ = [
"to_sbel_file",
"to_sbel",
"to_sbel_gz",
"from_sbel",
"from_sbel_gz",
"from_sbel_file",
]
SBEL = Any
@open_file(1, mode="w")
def to_sbel_file(graph: BELGraph, path: Union[str, TextIO], separators=(",", ":"), **kwargs) -> None:
"""Write this graph as BEL JSONL to a file.
:param graph: A BEL graph
:param separators: The separators used in :func:`json.dumps`
:param path: A path or file-like
"""
for i in iterate_sbel(graph):
print(
json.dumps(i, ensure_ascii=False, separators=separators, **kwargs),
file=path,
)
def to_sbel_gz(graph: BELGraph, path: str, separators=(",", ":"), **kwargs) -> None:
"""Write a graph as BEL JSONL to a gzip file.
:param graph: A BEL graph
:param separators: The separators used in :func:`json.dumps`
:param path: A path for a gzip file
"""
with gzip.open(path, "wt") as file:
to_sbel_file(graph, file, separators=separators, **kwargs)
def to_sbel(graph: BELGraph) -> List[SBEL]:
"""Create a list of JSON dictionaries corresponding to lines in BEL JSONL."""
return list(iterate_sbel(graph))
def iterate_sbel(graph: BELGraph) -> Iterable[SBEL]:
"""Iterate over JSON dictionaries corresponding to lines in BEL JSONL."""
g = graph.graph.copy()
_prepare_graph_dict(g)
yield g
for u, v, k, d in graph.edges(data=True, keys=True):
yield {
"source": _augment_node(u),
"target": _augment_node(v),
"key": k,
**d,
}
def from_sbel(it: Iterable[SBEL], includes_metadata: bool = True) -> BELGraph:
"""Load a BEL graph from an iterable of dictionaries corresponding to lines in BEL JSONL.
:param it: An iterable of dictionaries.
:param includes_metadata: By default, interprets the first element of the iterable as the graph's metadata.
Switch to ``False`` to disable.
:return: A BEL graph
"""
it = iter(it)
rv = BELGraph()
if includes_metadata:
rv.graph.update(next(it))
_recover_graph_dict(rv)
add_sbel(rv, it)
return rv
def add_sbel(graph: BELGraph, it: Iterable[SBEL]) -> None:
"""Add dictionaries to a BEL graph.
:param graph: A BEL graph
:param it: An iterable of dictionaries.
"""
for data in it:
add_sbel_row(graph, data)
def add_sbel_row(graph: BELGraph, data: SBEL) -> str:
"""Add a single SBEL data dictionary to a graph."""
u = parse_result_to_dsl(data["source"])
v = parse_result_to_dsl(data["target"])
edge_data = {k: v for k, v in data.items() if k not in {"source", "target", "key"}}
for side in (SOURCE_MODIFIER, TARGET_MODIFIER):
side_data = edge_data.get(side)
if side_data:
_handle_modifier(side_data)
if CITATION in edge_data:
edge_data[CITATION] = CitationDict(**edge_data[CITATION])
return graph.add_edge(u, v, key=hash_edge(u, v, edge_data), **edge_data)
@open_file(0, mode="r")
def from_sbel_file(path: Union[str, TextIO]) -> BELGraph:
"""Build a graph from the BEL JSONL contained in the given file.
:param path: A path or file-like
"""
return from_sbel((json.loads(line) for line in path))
def from_sbel_gz(path: str) -> BELGraph:
"""Read a graph as BEL JSONL from a gzip file."""
with gzip.open(path, "rt") as file:
return from_sbel_file(file)
| {
"content_hash": "cef834dfc2f8148079fcad75fc118b34",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 111,
"avg_line_length": 30.761904761904763,
"alnum_prop": 0.6341589267285862,
"repo_name": "pybel/pybel",
"id": "c2168d7723a47a9187be564c4515c93bc283efd6",
"size": "3901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pybel/io/sbel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "880"
},
{
"name": "JavaScript",
"bytes": "9473"
},
{
"name": "Jupyter Notebook",
"bytes": "52170"
},
{
"name": "Python",
"bytes": "1475429"
}
],
"symlink_target": ""
} |
from ctypes import (
byref,
c_long,
c_longlong,
CDLL,
Structure,
)
from platform import uname
from . import (
Clock,
ClockTime,
)
from ._arithmetic import nano_divmod
__all__ = [
"SafeClock",
"PEP564Clock",
"LibCClock",
]
class SafeClock(Clock):
""" Clock implementation that should work for any variant of Python.
This clock is guaranteed microsecond precision.
"""
@classmethod
def precision(cls):
return 6
@classmethod
def available(cls):
return True
def utc_time(self):
from time import time
seconds, nanoseconds = nano_divmod(int(time() * 1000000), 1000000)
return ClockTime(seconds, nanoseconds * 1000)
class PEP564Clock(Clock):
""" Clock implementation based on the PEP564 additions to Python 3.7.
This clock is guaranteed nanosecond precision.
"""
@classmethod
def precision(cls):
return 9
@classmethod
def available(cls):
try:
from time import time_ns
except ImportError:
return False
else:
return True
def utc_time(self):
from time import time_ns
t = time_ns()
seconds, nanoseconds = divmod(t, 1000000000)
return ClockTime(seconds, nanoseconds)
class LibCClock(Clock):
""" Clock implementation that works only on platforms that provide
libc. This clock is guaranteed nanosecond precision.
"""
__libc = "libc.dylib" if uname()[0] == "Darwin" else "libc.so.6"
class _TimeSpec(Structure):
_fields_ = [
("seconds", c_longlong),
("nanoseconds", c_long),
]
@classmethod
def precision(cls):
return 9
@classmethod
def available(cls):
try:
_ = CDLL(cls.__libc)
except OSError:
return False
else:
return True
def utc_time(self):
libc = CDLL(self.__libc)
ts = self._TimeSpec()
status = libc.clock_gettime(0, byref(ts))
if status == 0:
return ClockTime(ts.seconds, ts.nanoseconds)
else:
raise RuntimeError("clock_gettime failed with status %d" % status)
| {
"content_hash": "af728e2e7510928b5bc30157f5b3efe3",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 78,
"avg_line_length": 22.059405940594058,
"alnum_prop": 0.5870736086175943,
"repo_name": "neo4j/neo4j-python-driver",
"id": "60f82cc8bf10f647e3b1532d982465ba38554d5e",
"size": "2871",
"binary": false,
"copies": "1",
"ref": "refs/heads/5.0",
"path": "neo4j/time/_clock_implementations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2068"
},
{
"name": "Python",
"bytes": "1654566"
},
{
"name": "Shell",
"bytes": "4165"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name='FreeCAD-SEA',
version='0.0',
description="Statistical Energy Analysis module for FreeCAD.",
long_description=open('README.txt').read(),
author='Frederik Rietdijk',
author_email='fridh@fridh.nl',
license='LICENSE.txt',
packages=['Sea', 'Sea.model', 'Sea.adapter', 'Sea.actions', 'gui', 'gui.analysis', 'gui.addItem'],
zip_safe=False,
install_requires=[
'numpy',
],
) | {
"content_hash": "0f36b047fff6ec88617af9a105bc443c",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 104,
"avg_line_length": 30.875,
"alnum_prop": 0.5951417004048583,
"repo_name": "FRidh/Sea",
"id": "52090d57ef636257f542683ccea71818a2735abf",
"size": "494",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "220852"
},
{
"name": "Shell",
"bytes": "5106"
}
],
"symlink_target": ""
} |
"""
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import pylab as pl
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
pl.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
pl.subplot(4, 1, ii)
pl.title(name)
for sig, color in zip(model.T, colors):
pl.plot(sig, color=color)
pl.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
pl.show()
| {
"content_hash": "bead22a9cec3e9d0663d891de4814b1e",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 79,
"avg_line_length": 30.26027397260274,
"alnum_prop": 0.6115889542779538,
"repo_name": "treycausey/scikit-learn",
"id": "4ac3b60b9e43db9b05bf90c512209aad095038ca",
"size": "2209",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/decomposition/plot_ica_blind_source_separation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "18150950"
},
{
"name": "C++",
"bytes": "1807769"
},
{
"name": "JavaScript",
"bytes": "20564"
},
{
"name": "Python",
"bytes": "5083789"
},
{
"name": "Shell",
"bytes": "3768"
}
],
"symlink_target": ""
} |
import itertools
import sys
import mock
from neutronclient.neutron.v2_0 import port
from neutronclient import shell
from neutronclient.tests.unit import test_cli20
class CLITestV20PortJSON(test_cli20.CLITestV20Base):
def setUp(self):
super(CLITestV20PortJSON, self).setUp(plurals={'tags': 'tag'})
def test_create_port(self):
# Create port: netid.
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = [netid, '--description', 'DESC']
position_names = ['network_id']
position_values = []
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
description='DESC')
def test_create_port_extra_dhcp_opts_args(self):
# Create port: netid --extra_dhcp_opt.
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
extra_dhcp_opts = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.45'}]
args = [netid]
for dhcp_opt in extra_dhcp_opts:
args += ['--extra-dhcp-opt',
('opt_name=%(opt_name)s,opt_value=%(opt_value)s' %
dhcp_opt)]
position_names = ['network_id', 'extra_dhcp_opts']
position_values = [netid, extra_dhcp_opts]
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_extra_dhcp_opts_args_ip_version(self):
# Create port: netid --extra_dhcp_opt.
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
extra_dhcp_opts = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0',
'ip_version': "4"},
{'opt_name': 'tftp-server',
'opt_value': '2001:192:168::1',
'ip_version': "6"},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.45',
'ip_version': "4"}]
args = [netid]
for dhcp_opt in extra_dhcp_opts:
args += ['--extra-dhcp-opt',
('opt_name=%(opt_name)s,opt_value=%(opt_value)s,'
'ip_version=%(ip_version)s' %
dhcp_opt)]
position_names = ['network_id', 'extra_dhcp_opts']
position_values = [netid, extra_dhcp_opts]
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_full(self):
# Create port: --mac_address mac --device_id deviceid netid.
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--mac_address', 'mac', '--device_id', 'deviceid', netid]
position_names = ['network_id', 'mac_address', 'device_id']
position_values = [netid, 'mac', 'deviceid']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = ['--mac-address', 'mac', '--device-id', 'deviceid', netid]
position_names = ['network_id', 'mac_address', 'device_id']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_vnic_type_normal(self):
# Create port: --vnic_type normal netid.
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--vnic_type', 'normal', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['normal', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = ['--vnic-type', 'normal', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['normal', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_vnic_type_direct(self):
# Create port: --vnic_type direct netid.
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--vnic_type', 'direct', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['direct', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = ['--vnic-type', 'direct', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['direct', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_vnic_type_direct_physical(self):
# Create port: --vnic_type direct-physical netid.
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--vnic_type', 'direct-physical', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['direct-physical', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = ['--vnic-type', 'direct-physical', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['direct-physical', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_vnic_type_macvtap(self):
# Create port: --vnic_type macvtap netid.
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--vnic_type', 'macvtap', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['macvtap', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = ['--vnic-type', 'macvtap', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['macvtap', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_vnic_type_baremetal(self):
# Create port: --vnic_type baremetal netid.
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--vnic_type', 'baremetal', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['baremetal', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = ['--vnic-type', 'baremetal', netid]
position_names = ['binding:vnic_type', 'network_id']
position_values = ['baremetal', netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_with_binding_profile(self):
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--binding_profile', '{"foo":"bar"}', netid]
position_names = ['binding:profile', 'network_id']
position_values = [{'foo': 'bar'}, netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = ['--binding-profile', '{"foo":"bar"}', netid]
position_names = ['binding:profile', 'network_id']
position_values = [{'foo': 'bar'}, netid]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_tenant(self):
# Create port: --tenant_id tenantid netid.
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--tenant_id', 'tenantid', netid, ]
position_names = ['network_id']
position_values = []
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
# Test dashed options
args = ['--tenant-id', 'tenantid', netid, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_port_tags(self):
# Create port: netid mac_address device_id --tags a b.
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = [netid, '--tags', 'a', 'b']
position_names = ['network_id']
position_values = []
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tags=['a', 'b'])
def test_create_port_secgroup(self):
# Create port: --security-group sg1_id netid.
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--security-group', 'sg1_id', netid]
position_names = ['network_id', 'security_groups']
position_values = [netid, ['sg1_id']]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_secgroups(self):
# Create port: <security_groups> netid
# The <security_groups> are --security-group sg1_id
# --security-group sg2_id
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--security-group', 'sg1_id',
'--security-group', 'sg2_id',
netid]
position_names = ['network_id', 'security_groups']
position_values = [netid, ['sg1_id', 'sg2_id']]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_secgroup_off(self):
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--no-security-groups', netid]
position_names = ['network_id', 'security_groups']
position_values = [netid, []]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_secgroups_list(self):
# Create port: netid <security_groups>
# The <security_groups> are --security-groups list=true sg_id1 sg_id2
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = [netid, '--security-groups', 'list=true', 'sg_id1', 'sg_id2']
position_names = ['network_id', 'security_groups']
position_values = [netid, ['sg_id1', 'sg_id2']]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_with_qos_policy(self):
# Create port: --qos-policy mypolicy.
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
qos_policy_name = 'mypolicy'
args = [netid, '--qos-policy', qos_policy_name]
position_names = ['network_id', 'qos_policy_id']
position_values = [netid, qos_policy_name]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_with_dns_name(self):
# Create port: --dns-name my-port.
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
dns_name_name = 'my-port'
args = [netid, '--dns-name', dns_name_name]
position_names = ['network_id', 'dns_name']
position_values = [netid, dns_name_name]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_with_allowed_address_pair_ipaddr(self):
# Create port:
# --allowed-address-pair ip_address=addr0
# --allowed-address-pair ip_address=addr1
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
pairs = [{'ip_address': '123.123.123.123'},
{'ip_address': '123.123.123.45'}]
args = [netid,
'--allowed-address-pair',
'ip_address=123.123.123.123',
'--allowed-address-pair',
'ip_address=123.123.123.45']
position_names = ['network_id', 'allowed_address_pairs']
position_values = [netid, pairs]
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_with_allowed_address_pair(self):
# Create port:
# --allowed-address-pair ip_address=addr0,mac_address=mac0
# --allowed-address-pair ip_address=addr1,mac_address=mac1
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
pairs = [{'ip_address': '123.123.123.123',
'mac_address': '10:00:00:00:00:00'},
{'ip_address': '123.123.123.45',
'mac_address': '10:00:00:00:00:01'}]
args = [netid,
'--allowed-address-pair',
'ip_address=123.123.123.123,mac_address=10:00:00:00:00:00',
'--allowed-address-pair',
'ip_address=123.123.123.45,mac_address=10:00:00:00:00:01']
position_names = ['network_id', 'allowed_address_pairs']
position_values = [netid, pairs]
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_list_ports(self):
# List ports: -D.
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_ports_pagination(self):
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_ports_sort(self):
# list ports:
# --sort-key name --sort-key id --sort-key asc --sort-key desc
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_ports_limit(self):
# list ports: -P.
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_list_ports_tags(self):
# List ports: -- --tags a b.
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, tags=['a', 'b'])
def test_list_ports_detail_tags(self):
# List ports: -D -- --tags a b.
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, detail=True, tags=['a', 'b'])
def test_list_ports_fields(self):
# List ports: --fields a --fields b -- --fields c d.
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
fields_1=['a', 'b'], fields_2=['c', 'd'])
def test_list_ports_with_fixed_ips_in_csv(self):
# List ports: -f csv.
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
fixed_ips = [{"subnet_id": "30422057-d6df-4c90-8314-aefb5e326666",
"ip_address": "10.0.0.12"},
{"subnet_id": "30422057-d6df-4c90-8314-aefb5e326666",
"ip_address": "10.0.0.4"}]
contents = [{'name': 'name1', 'fixed_ips': fixed_ips}]
self._test_list_resources(resources, cmd, True,
response_contents=contents,
output_format='csv')
def _test_list_router_port(self, resources, cmd,
myid, detail=False, tags=(),
fields_1=(), fields_2=()):
reses = {resources: [{'id': 'myid1', },
{'id': 'myid2', }, ], }
resstr = self.client.serialize(reses)
# url method body
query = ""
args = detail and ['-D', ] or []
if fields_1:
for field in fields_1:
args.append('--fields')
args.append(field)
args.append(myid)
if tags:
args.append('--')
args.append("--tag")
for tag in tags:
args.append(tag)
if (not tags) and fields_2:
args.append('--')
if fields_2:
args.append("--fields")
for field in fields_2:
args.append(field)
for field in itertools.chain(fields_1, fields_2):
if query:
query += "&fields=" + field
else:
query = "fields=" + field
for tag in tags:
if query:
query += "&tag=" + tag
else:
query = "tag=" + tag
if detail:
query = query and query + '&verbose=True' or 'verbose=True'
query = query and query + '&device_id=%s' or 'device_id=%s'
path = getattr(self.client, resources + "_path")
with mock.patch.object(cmd, "get_client",
return_value=self.client) as mock_get_client, \
mock.patch.object(self.client.httpclient, "request",
return_value=(test_cli20.MyResp(200),
resstr)) as mock_request:
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.assert_mock_multiple_calls_with_same_arguments(
mock_get_client, mock.call(), 2)
mock_request.assert_called_once_with(
test_cli20.MyUrlComparator(
test_cli20.end_url(path, query % myid), self.client),
'GET',
body=None,
headers=test_cli20.ContainsKeyValue(
{'X-Auth-Token': test_cli20.TOKEN}))
_str = self.fake_stdout.make_string()
self.assertIn('myid1', _str)
def test_list_router_ports(self):
# List router ports: -D.
resources = "ports"
cmd = port.ListRouterPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd,
self.test_id, True)
def test_list_router_ports_tags(self):
# List router ports: -- --tags a b.
resources = "ports"
cmd = port.ListRouterPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd,
self.test_id, tags=['a', 'b'])
def test_list_router_ports_detail_tags(self):
# List router ports: -D -- --tags a b.
resources = "ports"
cmd = port.ListRouterPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd, self.test_id,
detail=True, tags=['a', 'b'])
def test_list_router_ports_fields(self):
# List ports: --fields a --fields b -- --fields c d.
resources = "ports"
cmd = port.ListRouterPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd, self.test_id,
fields_1=['a', 'b'],
fields_2=['c', 'd'])
def test_update_port(self):
# Update port: myid --name myname --admin-state-up False --tags a b.
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--admin-state-up', 'False',
'--description', 'garbage',
'--tags', 'a', 'b'],
{'name': 'myname',
'admin_state_up': 'False',
'description': 'garbage',
'tags': ['a', 'b'], })
def test_update_port_secgroup(self):
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = ['--security-group', 'sg1_id', myid]
updatefields = {'security_groups': ['sg1_id']}
self._test_update_resource(resource, cmd, myid, args, updatefields)
def test_update_port_secgroups(self):
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = ['--security-group', 'sg1_id',
'--security-group', 'sg2_id',
myid]
updatefields = {'security_groups': ['sg1_id', 'sg2_id']}
self._test_update_resource(resource, cmd, myid, args, updatefields)
def test_update_port_extra_dhcp_opts(self):
# Update port: myid --extra_dhcp_opt.
resource = 'port'
myid = 'myid'
args = [myid,
'--extra-dhcp-opt',
"opt_name=bootfile-name,opt_value=pxelinux.0",
'--extra-dhcp-opt',
"opt_name=tftp-server,opt_value=123.123.123.123",
'--extra-dhcp-opt',
"opt_name=server-ip-address,opt_value=123.123.123.45"
]
updatedfields = {'extra_dhcp_opts': [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.45'}]}
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, myid, args, updatedfields)
def test_update_port_fixed_ip(self):
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
subnet_id = 'subnet_id'
ip_addr = '123.123.123.123'
args = [myid,
'--fixed-ip',
"subnet_id=%(subnet_id)s,ip_address=%(ip_addr)s" %
{'subnet_id': subnet_id,
'ip_addr': ip_addr}]
updated_fields = {"fixed_ips": [{'subnet_id': subnet_id,
'ip_address': ip_addr}]}
self._test_update_resource(resource, cmd, myid, args, updated_fields)
def test_update_port_device_id_device_owner(self):
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = ['--device-id', 'dev_id', '--device-owner', 'fake', myid]
updatefields = {'device_id': 'dev_id',
'device_owner': 'fake'}
self._test_update_resource(resource, cmd, myid, args, updatefields)
def test_update_port_extra_dhcp_opts_ip_version(self):
# Update port: myid --extra_dhcp_opt.
resource = 'port'
myid = 'myid'
args = [myid,
'--extra-dhcp-opt',
"opt_name=bootfile-name,opt_value=pxelinux.0,ip_version=4",
'--extra-dhcp-opt',
"opt_name=tftp-server,opt_value=2001:192:168::1,ip_version=6",
'--extra-dhcp-opt',
"opt_name=server-ip-address,opt_value=null,ip_version=4"
]
updatedfields = {'extra_dhcp_opts': [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0',
'ip_version': '4'},
{'opt_name': 'tftp-server',
'opt_value': '2001:192:168::1',
'ip_version': '6'},
{'opt_name': 'server-ip-address',
'opt_value': None,
'ip_version': '4'}]}
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, myid, args, updatedfields)
def test_update_port_with_qos_policy(self):
# Update port: myid --qos-policy mypolicy.
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--qos-policy', 'mypolicy'],
{'qos_policy_id': 'mypolicy', })
def test_update_port_with_no_qos_policy(self):
# Update port: myid --no-qos-policy.
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--no-qos-policy'],
{'qos_policy_id': None, })
def test_update_port_with_dns_name(self):
# Update port: myid --dns-name my-port.
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--dns-name', 'my-port'],
{'dns_name': 'my-port', })
def test_update_port_with_no_dns_name(self):
# Update port: myid --no-dns-name
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--no-dns-name'],
{'dns_name': "", })
def test_delete_extra_dhcp_opts_from_port(self):
resource = 'port'
myid = 'myid'
args = [myid,
'--extra-dhcp-opt',
"opt_name=bootfile-name,opt_value=null",
'--extra-dhcp-opt',
"opt_name=tftp-server,opt_value=123.123.123.123",
'--extra-dhcp-opt',
"opt_name=server-ip-address,opt_value=123.123.123.45"
]
# the client code will change the null to None and send to server,
# where its interpreted as delete the DHCP option on the port.
updatedfields = {'extra_dhcp_opts': [{'opt_name': 'bootfile-name',
'opt_value': None},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.45'}]}
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, myid, args, updatedfields)
def test_update_port_security_group_off(self):
# Update port: --no-security-groups myid.
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['--no-security-groups', 'myid'],
{'security_groups': []})
def test_update_port_allowed_address_pair_ipaddr(self):
# Update port(ip_address only):
# --allowed-address-pairs ip_address=addr0
# --allowed-address-pairs ip_address=addr1
import sys
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
pairs = [{'ip_address': '123.123.123.123'},
{'ip_address': '123.123.123.45'}]
args = [myid,
'--allowed-address-pair',
'ip_address=123.123.123.123',
'--allowed-address-pair',
'ip_address=123.123.123.45']
updatefields = {'allowed_address_pairs': pairs}
self._test_update_resource(resource, cmd, myid, args, updatefields)
def test_update_port_allowed_address_pair(self):
# Update port:
# --allowed-address-pair ip_address=addr0,mac_address=mac0
# --allowed-address-pair ip_address_addr1,mac_address=mac1
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
pairs = [{'ip_address': '123.123.123.123',
'mac_address': '10:00:00:00:00:00'},
{'ip_address': '123.123.123.45',
'mac_address': '10:00:00:00:00:01'}]
args = [myid,
'--allowed-address-pair',
'ip_address=123.123.123.123,mac_address=10:00:00:00:00:00',
'--allowed-address-pair',
'ip_address=123.123.123.45,mac_address=10:00:00:00:00:01']
updatefields = {'allowed_address_pairs': pairs}
self._test_update_resource(resource, cmd, myid, args, updatefields)
def test_update_port_allowed_address_pairs_off(self):
# Update port: --no-allowed-address-pairs.
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['--no-allowed-address-pairs', 'myid'],
{'allowed_address_pairs': []})
def test_show_port(self):
# Show port: --fields id --fields name myid.
resource = 'port'
cmd = port.ShowPort(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
def test_delete_port(self):
# Delete port: myid.
resource = 'port'
cmd = port.DeletePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
| {
"content_hash": "bc4954724a902dce7f19c1e59821b2e9",
"timestamp": "",
"source": "github",
"line_count": 758,
"max_line_length": 79,
"avg_line_length": 44.233509234828496,
"alnum_prop": 0.5154344000715798,
"repo_name": "noironetworks/python-neutronclient",
"id": "f6fc6178312afda6bc1da0c3918b39e6bbfd8b3c",
"size": "34167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutronclient/tests/unit/test_cli20_port.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1738805"
},
{
"name": "Shell",
"bytes": "10126"
}
],
"symlink_target": ""
} |
from flask import Flask
import twilio.twiml
app = Flask(__name__)
@app.route("/enqueue_call", methods=['GET', 'POST'])
def enqueue_call():
resp = twilio.twiml.Response()
with resp.enqueue(None, workflowSid="WW0123456789abcdef0123456789abcdef"):
return str(resp)
| {
"content_hash": "601095a01f4af00eaf99c33f68d75783",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 78,
"avg_line_length": 25.545454545454547,
"alnum_prop": 0.697508896797153,
"repo_name": "teoreteetik/api-snippets",
"id": "acea915456b8d809aecb5532d99d3f4f27734ee9",
"size": "354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest/taskrouter/twiml/example1/example/example.5.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "643369"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "943336"
},
{
"name": "JavaScript",
"bytes": "539577"
},
{
"name": "M",
"bytes": "117"
},
{
"name": "Mathematica",
"bytes": "93"
},
{
"name": "Objective-C",
"bytes": "46198"
},
{
"name": "PHP",
"bytes": "538312"
},
{
"name": "Python",
"bytes": "467248"
},
{
"name": "Ruby",
"bytes": "470316"
},
{
"name": "Shell",
"bytes": "1564"
},
{
"name": "Swift",
"bytes": "36563"
}
],
"symlink_target": ""
} |
import contextlib
import datetime
import functools
import logging
import os
import shutil
import tempfile
import threading
from devil import base_error
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_list
from devil.android import device_utils
from devil.android import logcat_monitor
from devil.utils import file_utils
from devil.utils import parallelizer
from pylib import constants
from pylib.base import environment
from py_trace_event import trace_event
from tracing_build import trace2html
def _DeviceCachePath(device):
file_name = 'device_cache_%s.json' % device.adb.GetDeviceSerial()
return os.path.join(constants.GetOutDirectory(), file_name)
def handle_shard_failures(f):
"""A decorator that handles device failures for per-device functions.
Args:
f: the function being decorated. The function must take at least one
argument, and that argument must be the device.
"""
return handle_shard_failures_with(None)(f)
# TODO(jbudorick): Refactor this to work as a decorator or context manager.
def handle_shard_failures_with(on_failure):
"""A decorator that handles device failures for per-device functions.
This calls on_failure in the event of a failure.
Args:
f: the function being decorated. The function must take at least one
argument, and that argument must be the device.
on_failure: A binary function to call on failure.
"""
def decorator(f):
@functools.wraps(f)
def wrapper(dev, *args, **kwargs):
try:
return f(dev, *args, **kwargs)
except device_errors.CommandTimeoutError:
logging.exception('Shard timed out: %s(%s)', f.__name__, str(dev))
except device_errors.DeviceUnreachableError:
logging.exception('Shard died: %s(%s)', f.__name__, str(dev))
except base_error.BaseError:
logging.exception('Shard failed: %s(%s)', f.__name__, str(dev))
except SystemExit:
logging.exception('Shard killed: %s(%s)', f.__name__, str(dev))
raise
if on_failure:
on_failure(dev, f.__name__)
return None
return wrapper
return decorator
class LocalDeviceEnvironment(environment.Environment):
def __init__(self, args, _error_func):
super(LocalDeviceEnvironment, self).__init__()
self._blacklist = (device_blacklist.Blacklist(args.blacklist_file)
if args.blacklist_file
else None)
self._device_serial = args.test_device
self._devices_lock = threading.Lock()
self._devices = None
self._concurrent_adb = args.enable_concurrent_adb
self._enable_device_cache = args.enable_device_cache
self._logcat_monitors = []
self._logcat_output_dir = args.logcat_output_dir
self._logcat_output_file = args.logcat_output_file
self._max_tries = 1 + args.num_retries
self._skip_clear_data = args.skip_clear_data
self._target_devices_file = args.target_devices_file
self._tool_name = args.tool
self._trace_output = args.trace_output
#override
def SetUp(self):
pass
def _InitDevices(self):
device_arg = 'default'
if self._target_devices_file:
device_arg = device_list.GetPersistentDeviceList(
self._target_devices_file)
if not device_arg:
logging.warning('No target devices specified. Falling back to '
'running on all available devices.')
device_arg = 'default'
else:
logging.info(
'Read device list %s from target devices file.', str(device_arg))
elif self._device_serial:
device_arg = self._device_serial
self._devices = device_utils.DeviceUtils.HealthyDevices(
self._blacklist, enable_device_files_cache=self._enable_device_cache,
default_retries=self._max_tries - 1, device_arg=device_arg)
if not self._devices:
raise device_errors.NoDevicesError
if self._logcat_output_file:
self._logcat_output_dir = tempfile.mkdtemp()
@handle_shard_failures_with(on_failure=self.BlacklistDevice)
def prepare_device(d):
d.WaitUntilFullyBooted(timeout=10)
if self._enable_device_cache:
cache_path = _DeviceCachePath(d)
if os.path.exists(cache_path):
logging.info('Using device cache: %s', cache_path)
with open(cache_path) as f:
d.LoadCacheData(f.read())
# Delete cached file so that any exceptions cause it to be cleared.
os.unlink(cache_path)
if self._logcat_output_dir:
logcat_file = os.path.join(
self._logcat_output_dir,
'%s_%s' % (d.adb.GetDeviceSerial(),
datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%S')))
monitor = logcat_monitor.LogcatMonitor(
d.adb, clear=True, output_file=logcat_file)
self._logcat_monitors.append(monitor)
monitor.Start()
self.parallel_devices.pMap(prepare_device)
@staticmethod
def _JsonToTrace(json_path, html_path, delete_json=True):
# First argument is call site.
cmd = [__file__, json_path, '--title', 'Android Test Runner Trace',
'--output', html_path]
trace2html.Main(cmd)
if delete_json:
os.remove(json_path)
@property
def blacklist(self):
return self._blacklist
@property
def concurrent_adb(self):
return self._concurrent_adb
@property
def devices(self):
# Initialize lazily so that host-only tests do not fail when no devices are
# attached.
if self._devices is None:
self._InitDevices()
if not self._devices:
raise device_errors.NoDevicesError()
return self._devices
@property
def max_tries(self):
return self._max_tries
@property
def parallel_devices(self):
return parallelizer.SyncParallelizer(self.devices)
@property
def skip_clear_data(self):
return self._skip_clear_data
@property
def tool(self):
return self._tool_name
@property
def trace_output(self):
return self._trace_output
#override
def TearDown(self):
if self._devices is None:
return
@handle_shard_failures_with(on_failure=self.BlacklistDevice)
def tear_down_device(d):
# Write the cache even when not using it so that it will be ready the
# first time that it is enabled. Writing it every time is also necessary
# so that an invalid cache can be flushed just by disabling it for one
# run.
cache_path = _DeviceCachePath(d)
if os.path.exists(os.path.dirname(cache_path)):
with open(cache_path, 'w') as f:
f.write(d.DumpCacheData())
logging.info('Wrote device cache: %s', cache_path)
else:
logging.warning(
'Unable to write device cache as %s directory does not exist',
os.path.dirname(cache_path))
self.parallel_devices.pMap(tear_down_device)
for m in self._logcat_monitors:
try:
m.Stop()
m.Close()
_, temp_path = tempfile.mkstemp()
with open(m.output_file, 'r') as infile:
with open(temp_path, 'w') as outfile:
for line in infile:
outfile.write('Device(%s) %s' % (m.adb.GetDeviceSerial(), line))
shutil.move(temp_path, m.output_file)
except base_error.BaseError:
logging.exception('Failed to stop logcat monitor for %s',
m.adb.GetDeviceSerial())
except IOError:
logging.exception('Failed to locate logcat for device %s',
m.adb.GetDeviceSerial())
if self._logcat_output_file:
file_utils.MergeFiles(
self._logcat_output_file,
[m.output_file for m in self._logcat_monitors
if os.path.exists(m.output_file)])
shutil.rmtree(self._logcat_output_dir)
def BlacklistDevice(self, device, reason='local_device_failure'):
device_serial = device.adb.GetDeviceSerial()
if self._blacklist:
self._blacklist.Extend([device_serial], reason=reason)
with self._devices_lock:
self._devices = [d for d in self._devices if str(d) != device_serial]
def DisableTracing(self):
if not trace_event.trace_is_enabled():
logging.warning('Tracing is not running.')
else:
trace_event.trace_disable()
self._JsonToTrace(self._trace_output + '.json',
self._trace_output)
def EnableTracing(self):
if trace_event.trace_is_enabled():
logging.warning('Tracing is already running.')
else:
trace_event.trace_enable(self._trace_output + '.json')
@contextlib.contextmanager
def Tracing(self):
try:
self.EnableTracing()
yield
finally:
self.DisableTracing()
| {
"content_hash": "4b825994a2b41dca1c348d1094079afa",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 79,
"avg_line_length": 32.57303370786517,
"alnum_prop": 0.656203288490284,
"repo_name": "google-ar/WebARonARCore",
"id": "2aaf88b20a897228fa67d0086d7ce4d8135a27e6",
"size": "8860",
"binary": false,
"copies": "1",
"ref": "refs/heads/webarcore_57.0.2987.5",
"path": "build/android/pylib/local/device/local_device_environment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""
Support for Clementine Music Player as media player.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.clementine/
"""
from datetime import timedelta
import logging
import time
import voluptuous as vol
from homeassistant.components.media_player import (
MEDIA_TYPE_MUSIC, PLATFORM_SCHEMA, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE,
SUPPORT_PLAY, SUPPORT_PREVIOUS_TRACK, SUPPORT_SELECT_SOURCE,
SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP, MediaPlayerDevice)
from homeassistant.const import (
CONF_ACCESS_TOKEN, CONF_HOST, CONF_NAME, CONF_PORT, STATE_OFF,
STATE_PAUSED, STATE_PLAYING)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['python-clementine-remote==1.0.1']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Clementine Remote'
DEFAULT_PORT = 5500
SCAN_INTERVAL = timedelta(seconds=5)
SUPPORT_CLEMENTINE = SUPPORT_PAUSE | SUPPORT_VOLUME_STEP | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_VOLUME_SET | \
SUPPORT_NEXT_TRACK | \
SUPPORT_SELECT_SOURCE | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_ACCESS_TOKEN): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Clementine platform."""
from clementineremote import ClementineRemote
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
token = config.get(CONF_ACCESS_TOKEN)
client = ClementineRemote(host, port, token, reconnect=True)
add_entities([ClementineDevice(client, config[CONF_NAME])])
class ClementineDevice(MediaPlayerDevice):
"""Representation of Clementine Player."""
def __init__(self, client, name):
"""Initialize the Clementine device."""
self._client = client
self._name = name
self._muted = False
self._volume = 0.0
self._track_id = 0
self._last_track_id = 0
self._track_name = ''
self._track_artist = ''
self._track_album_name = ''
self._state = None
def update(self):
"""Retrieve the latest data from the Clementine Player."""
try:
client = self._client
if client.state == 'Playing':
self._state = STATE_PLAYING
elif client.state == 'Paused':
self._state = STATE_PAUSED
elif client.state == 'Disconnected':
self._state = STATE_OFF
else:
self._state = STATE_PAUSED
if client.last_update and (time.time() - client.last_update > 40):
self._state = STATE_OFF
self._volume = float(client.volume) if client.volume else 0.0
if client.current_track:
self._track_id = client.current_track['track_id']
self._track_name = client.current_track['title']
self._track_artist = client.current_track['track_artist']
self._track_album_name = client.current_track['track_album']
except Exception:
self._state = STATE_OFF
raise
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume / 100.0
@property
def source(self):
"""Return current source name."""
source_name = "Unknown"
client = self._client
if client.active_playlist_id in client.playlists:
source_name = client.playlists[client.active_playlist_id]['name']
return source_name
@property
def source_list(self):
"""List of available input sources."""
source_names = [s["name"] for s in self._client.playlists.values()]
return source_names
def select_source(self, source):
"""Select input source."""
client = self._client
sources = [s for s in client.playlists.values() if s['name'] == source]
if len(sources) == 1:
client.change_song(sources[0]['id'], 0)
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_title(self):
"""Title of current playing media."""
return self._track_name
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._track_artist
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._track_album_name
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_CLEMENTINE
@property
def media_image_hash(self):
"""Hash value for media image."""
if self._client.current_track:
return self._client.current_track['track_id']
return None
async def async_get_media_image(self):
"""Fetch media image of current playing image."""
if self._client.current_track:
image = bytes(self._client.current_track['art'])
return (image, 'image/png')
return None, None
def volume_up(self):
"""Volume up the media player."""
newvolume = min(self._client.volume + 4, 100)
self._client.set_volume(newvolume)
def volume_down(self):
"""Volume down media player."""
newvolume = max(self._client.volume - 4, 0)
self._client.set_volume(newvolume)
def mute_volume(self, mute):
"""Send mute command."""
self._client.set_volume(0)
def set_volume_level(self, volume):
"""Set volume level."""
self._client.set_volume(int(100 * volume))
def media_play_pause(self):
"""Simulate play pause media player."""
if self._state == STATE_PLAYING:
self.media_pause()
else:
self.media_play()
def media_play(self):
"""Send play command."""
self._state = STATE_PLAYING
self._client.play()
def media_pause(self):
"""Send media pause command to media player."""
self._state = STATE_PAUSED
self._client.pause()
def media_next_track(self):
"""Send next track command."""
self._client.next()
def media_previous_track(self):
"""Send the previous track command."""
self._client.previous()
| {
"content_hash": "bf8155274f7abfc1332f7e7800220cef",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 79,
"avg_line_length": 31.181818181818183,
"alnum_prop": 0.6088921282798834,
"repo_name": "tinloaf/home-assistant",
"id": "2add2bd682a6044ff7dd086567377144e2c14837",
"size": "6860",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/media_player/clementine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1099"
},
{
"name": "Python",
"bytes": "13135313"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17137"
}
],
"symlink_target": ""
} |
import thread # If this fails, we can't test this module
import asyncore, asynchat, socket, threading, time
HOST = "127.0.0.1"
PORT = 54321
class echo_server(threading.Thread):
def run(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((HOST, PORT))
sock.listen(1)
conn, client = sock.accept()
buffer = ""
while "\n" not in buffer:
data = conn.recv(10)
if not data:
break
buffer = buffer + data
while buffer:
n = conn.send(buffer)
buffer = buffer[n:]
conn.close()
sock.close()
class echo_client(asynchat.async_chat):
def __init__(self):
asynchat.async_chat.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((HOST, PORT))
self.set_terminator("\n")
self.buffer = ""
def handle_connect(self):
print "Connected"
def collect_incoming_data(self, data):
self.buffer = self.buffer + data
def found_terminator(self):
print "Received:", repr(self.buffer)
self.buffer = ""
self.close()
def main():
s = echo_server()
s.start()
time.sleep(1) # Give server time to initialize
c = echo_client()
c.push("hello ")
c.push("world\n")
asyncore.loop()
main()
| {
"content_hash": "54a1e9b4228a37da74a0ab69ea19f98b",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 66,
"avg_line_length": 25.839285714285715,
"alnum_prop": 0.572218382861092,
"repo_name": "loongson-community/EFI-MIPS",
"id": "e91c572162cb606b262d07c432543d3469789cab",
"size": "1486",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "ToolKit/cmds/python/Lib/test/skipped/test_asynchat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "271282"
},
{
"name": "Batchfile",
"bytes": "318"
},
{
"name": "C",
"bytes": "32642014"
},
{
"name": "C++",
"bytes": "1058125"
},
{
"name": "CSS",
"bytes": "2547"
},
{
"name": "GAP",
"bytes": "111381"
},
{
"name": "Groff",
"bytes": "1245691"
},
{
"name": "HTML",
"bytes": "1328432"
},
{
"name": "Lex",
"bytes": "14559"
},
{
"name": "M",
"bytes": "748"
},
{
"name": "Makefile",
"bytes": "468567"
},
{
"name": "Mask",
"bytes": "3420"
},
{
"name": "NSIS",
"bytes": "8743"
},
{
"name": "Objective-C",
"bytes": "3415447"
},
{
"name": "Pascal",
"bytes": "3368"
},
{
"name": "Python",
"bytes": "7763565"
},
{
"name": "R",
"bytes": "546"
},
{
"name": "Shell",
"bytes": "10084"
},
{
"name": "Yacc",
"bytes": "30661"
}
],
"symlink_target": ""
} |
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from .mixins import Hashable
from .utils import snowflake_time, MISSING
from typing import (
SupportsInt,
TYPE_CHECKING,
Type,
Union,
)
if TYPE_CHECKING:
import datetime
from . import abc
SupportsIntCast = Union[SupportsInt, str, bytes, bytearray]
# fmt: off
__all__ = (
'Object',
)
# fmt: on
class Object(Hashable):
"""Represents a generic Discord object.
The purpose of this class is to allow you to create 'miniature'
versions of data classes if you want to pass in just an ID. Most functions
that take in a specific data class with an ID can also take in this class
as a substitute instead. Note that even though this is the case, not all
objects (if any) actually inherit from this class.
There are also some cases where some websocket events are received
in :issue:`strange order <21>` and when such events happened you would
receive this class rather than the actual data class. These cases are
extremely rare.
.. container:: operations
.. describe:: x == y
Checks if two objects are equal.
.. describe:: x != y
Checks if two objects are not equal.
.. describe:: hash(x)
Returns the object's hash.
Attributes
-----------
id: :class:`int`
The ID of the object.
type: Type[:class:`abc.Snowflake`]
The discord.py model type of the object, if not specified, defaults to this class.
.. note::
In instances where there are multiple applicable types, use a shared base class.
for example, both :class:`Member` and :class:`User` are subclasses of :class:`abc.User`.
.. versionadded:: 2.0
"""
def __init__(self, id: SupportsIntCast, *, type: Type[abc.Snowflake] = MISSING):
try:
id = int(id)
except ValueError:
raise TypeError(f'id parameter must be convertible to int not {id.__class__.__name__}') from None
self.id: int = id
self.type: Type[abc.Snowflake] = type or self.__class__
def __repr__(self) -> str:
return f'<Object id={self.id!r} type={self.type!r}>'
def __eq__(self, other: object) -> bool:
if isinstance(other, self.type):
return self.id == other.id
return NotImplemented
__hash__ = Hashable.__hash__
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the snowflake's creation time in UTC."""
return snowflake_time(self.id)
OLDEST_OBJECT = Object(id=0)
| {
"content_hash": "8a5599f96c97b6fb887c09e6e6253341",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 109,
"avg_line_length": 31.641025641025642,
"alnum_prop": 0.6769313884386818,
"repo_name": "Rapptz/discord.py",
"id": "2243a040836d3f9e910452215b9830e005cc7c75",
"size": "3702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "discord/object.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2493009"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.append('/home3/redwards/bioinformatics/Modules')
import re
import rob
import gzip
'''Combine .gbff and .fna files to get just the coding sequences. We need to get the data from RefSeq and they have
split DNA sequences out of GenBank files so it is not clear that biopython etc will work.
This is just a quick parser and then we get the strings.'''
try:
gbff = sys.argv[1]
fnaf = sys.argv[2]
except:
sys.stderr.write(sys.argv[0] + " <gbff file> <fna file>\n")
sys.exit(-1)
locusre = re.compile('LOCUS\s+(\S+)')
locustagre = re.compile('\s+\/locus_tag=\"(.*)\"')
locationre = re.compile('\s+gene\s+(\d+)\.\.(\d+)$')
locationrerc = re.compile('\s+gene\s+complement\((\d+)\.\.(\d+)\)$')
locus = ""
locustag = ""
[start, end]=['0','0']
complement = False
locations={}
try:
if gbff.endswith('.gz'):
gbfin=gzip.open(gbff, 'rb')
else:
gbfin=open(gbff, 'r')
except:
sys.exit("Unable to open file " + gbff)
for line in gbfin:
line = line.rstrip()
if line == "//":
if start != '0' or end != '0':
# print "\t".join([locus, locustag, start, end, str(complement)])
locations[locus][locustag]=[start, end, complement]
locus = ""
locustag = ""
[start, end]=['0','0']
complement = False
continue
if line.startswith('LOCUS'):
m = locusre.match(line)
locus = m.group(1)
locations[locus]={}
continue
if '/locus_tag' in line:
m = locustagre.match(line)
if m:
locustag = m.group(1)
else:
sys.stderr.write("Couldn't parse |" + line + "|\n")
if '..' in line and 'gene' in line:
if start != '0' or end != '0':
# print "\t".join([locus, locustag, start, end, str(complement)])
locations[locus][locustag]=[start, end, complement]
locustag = ""
[start, end]=['0','0']
complement = False
m = locationre.match(line)
if m:
start = m.group(1)
end = m.group(2)
else:
m = locationrerc.match(line)
if m:
complement = True
start = m.group(1)
end = m.group(2)
else:
sys.stderr.write("Can't parse an apparent location at : " + line + "\n")
fa = rob.readFasta(fnaf)
#ncre = re.compile('.*ref\|(\w+)')
ncre = re.compile('(NC_\d+)')
for id in fa:
m = ncre.match(id)
if not m:
sys.stderr.write("No apparent NC_ idenitifer in this sequence id: " + id + "\n")
continue
locus = m.group(1)
for l in locations[locus]:
[start, end, complement] = locations[locus][l]
if complement:
print ">" + l + " " + locus + " " + end + "_" + start + " COMPLEMENT"
print rob.rc(fa[id][int(start)-1:int(end)])
else:
print ">" + l + " " + locus + " " + start + "_" + end
print fa[id][int(start)-1:int(end)]
| {
"content_hash": "b7b4723800f47ae4262248492a90faa8",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 116,
"avg_line_length": 27.463636363636365,
"alnum_prop": 0.5306190003310162,
"repo_name": "linsalrob/PhageHosts",
"id": "e269aa6d274f9dfd7ae2f987dae444d0e8eb475b",
"size": "3021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/combine_gbff_fna.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "8127"
},
{
"name": "Perl",
"bytes": "36107"
},
{
"name": "Perl 6",
"bytes": "453"
},
{
"name": "Python",
"bytes": "184154"
}
],
"symlink_target": ""
} |
from basil.HL.arduino_base import ArduinoBase
class I2CTransmissionError(ValueError):
pass
class SerialToI2C(ArduinoBase):
CMDS = {
'write': 'W',
'read': 'R',
'address': 'A',
'check': 'T'
}
ERRORS = {
'error': "Serial transmission error" # Custom return code for unsuccesful serial communciation
}
# Check https://www.arduino.cc/en/Reference/WireEndTransmission
I2C_RETURN_CODES = {
'0': "Success",
'1': "Data too long to fit in transmit buffer",
'2': "Received NACK on transmit of address",
'3': "Received NACK on transmit of data",
'4': "Other error"
}
@property
def i2c_address(self):
"""
Read back the I2C address property from the firmware.
Returns
-------
int
I2C address
"""
return int(self.query(self.create_command(self.CMDS['address'])))
@i2c_address.setter
def i2c_address(self, addr):
"""
Set the I2C address of the device on the bus to talk to.
Parameters
----------
addr : int
I2C address
Raises
------
I2CTransmissionError
If the set address on the Arduino does not match with what has been sent
"""
self._set_and_retrieve(cmd='address', val=int(addr), exception_=I2CTransmissionError)
def __init__(self, intf, conf):
super(SerialToI2C, self).__init__(intf, conf)
def query_i2c(self, msg):
"""
Queries a message *msg* and reads the i2c return code.
Checks the return code of the Arduino Wire.endTransmission.
Additional data after the query can be retrive using a self.read
Parameters
----------
msg : str, bytes
Message to be queried
Returns
-------
str
I2C return code as in self.I2C_RETURN_CODES
Raises
------
NotImplementedError
return_code is unknown
I2CTransmissionError
dedicated error code from Wire library
"""
try:
i2c_return_code = self.query(msg)
if i2c_return_code != '0':
if i2c_return_code not in self.I2C_RETURN_CODES:
raise NotImplementedError(f"Unknown return code {i2c_return_code}")
raise I2CTransmissionError(self.I2C_RETURN_CODES[i2c_return_code])
return i2c_return_code
except RuntimeError:
self.reset_buffers() # Serial error, just reset buffers
def read_register(self, reg):
"""
Read data from register *reg*
Parameters
----------
reg : int
Register to read from
Returns
-------
int
Data read from *reg*
"""
self.query_i2c(self.create_command(self.CMDS['read'], reg))
return int(self.read())
def write_register(self, reg, data):
"""
Write *data* to register *reg*
Parameters
----------
reg : int
Register to write to
data : int
Data to write to register *reg*
"""
self.query_i2c(self.create_command(self.CMDS['write'], reg, data))
def check_i2c_connection(self):
"""
Checks the i2c connection from arduino to bus device
"""
self.query_i2c(self.create_command(self.CMDS['check']))
| {
"content_hash": "724d78d1bf62631c8d2d2e411150f85e",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 103,
"avg_line_length": 26.454545454545453,
"alnum_prop": 0.5438144329896907,
"repo_name": "SiLab-Bonn/basil",
"id": "d8c39c87034ede35740d736203a7ddd123799d17",
"size": "3492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basil/HL/arduino_serial_to_i2c.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "17821"
},
{
"name": "Python",
"bytes": "502781"
},
{
"name": "SystemVerilog",
"bytes": "2358"
},
{
"name": "Verilog",
"bytes": "428771"
}
],
"symlink_target": ""
} |
import datetime
from decimal import Decimal
DEFAULT_CHARSET = 'utf-8'
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
super().__init__(*args)
def __str__(self):
return '%s. You passed in %r (%s)' % (
super().__str__(),
self.obj,
type(self.obj),
)
_PROTECTED_TYPES = (
type(None),
int,
float,
Decimal,
datetime.datetime,
datetime.date,
datetime.time,
)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, _PROTECTED_TYPES)
def force_str(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_str(), except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if issubclass(type(s), str):
return s
if strings_only and is_protected_type(s):
return s
try:
if isinstance(s, bytes):
s = str(s, encoding, errors)
else:
s = str(s)
except UnicodeDecodeError as e:
raise DjangoUnicodeDecodeError(s, *e.args)
return s
def punycode(domain):
"""Return the Punycode of the given domain if it's non-ASCII."""
return domain.encode('idna').decode('ascii')
| {
"content_hash": "d754f2de4d0fb22cfb17d93a374b4c40",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 74,
"avg_line_length": 24.71875,
"alnum_prop": 0.6131479140328698,
"repo_name": "ThiefMaster/indico",
"id": "588b2fc2002c995750187b01d5606507bd0e1d85",
"size": "2151",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/vendor/django_mail/encoding_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34704"
},
{
"name": "HTML",
"bytes": "1411006"
},
{
"name": "JavaScript",
"bytes": "2083786"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "5133951"
},
{
"name": "SCSS",
"bytes": "476568"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23327"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
"""
for debugging, profiling. May not work right now.
"""
import cProfile
from pstats import Stats
profFN = "RinexObsReader.pstats"
cProfile.run("rinexobs(rinexfn)", profFN)
Stats(profFN).sort_stats("time", "cumulative").print_stats(20)
| {
"content_hash": "062eeee05d10bef8a82b0014d18d4811",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 62,
"avg_line_length": 23.9,
"alnum_prop": 0.7447698744769874,
"repo_name": "geospace-code/georinex",
"id": "77b7eb42db9efabb94fa396bececcd22eb89eaa7",
"size": "261",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": ".archive/profileme.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "MATLAB",
"bytes": "663"
},
{
"name": "Python",
"bytes": "146951"
}
],
"symlink_target": ""
} |
'''Cable Rigging Tool. Drag this script to shelf, or execute from editor to run'''
import maya.cmds as cmds
class RigCurveTool(object):
'''Creates a rig from the given curve.
Rig is a NURBS strip that is skinned to the controls.
The strip in turn has a series of locators attached to it using
a setup meant to slide and stretch nicely.
A chain of "skin joints" is created and attached to the locators.
Use the skin joints to drive your mesh.
The stretch attr is put on the first control, which is larger.
Rigging the same curve twice may cause errors, since everything is named
based on the curve name.
'''
def __init__(self):
object.__init__(self)
self.widgets = dict()
self.defaults = dict()
#default UI Values
self.defaults['joints'] = 10
self.defaults['ctrls'] = 4
self.defaults['size']=0.1
self.defaults['spans']=10
self.defaults['width']=0.1
self.defaults['uMin']=0.0
self.defaults['uMax']=1.0
self.showWindow()
def showWindow(self):
window = cmds.window(title='Rig Curve')
cmds.columnLayout()
#Read stored options
if cmds.optionVar(exists='CableRigger_joints'):
defaultJoints = cmds.optionVar(q='CableRigger_joints')
else:
defaultJoints = self.defaults['joints']
if cmds.optionVar(exists='CableRigger_ctrls'):
defaultCtrls = cmds.optionVar(q='CableRigger_ctrls')
else:
defaultCtrls = self.defaults['ctrls']
if cmds.optionVar(exists='CableRigger_size'):
defaultSize = cmds.optionVar(q='CableRigger_size')
else:
defaultSize = self.defaults['size']
if cmds.optionVar(exists='CableRigger_spans'):
defaultSpans = cmds.optionVar(q='CableRigger_spans')
else:
defaultSpans = self.defaults['spans']
if cmds.optionVar(exists='CableRigger_width'):
defaultWidth = cmds.optionVar(q='CableRigger_width')
else:
defaultWidth = self.defaults['width']
if cmds.optionVar(exists='CableRigger_uMin'):
defaultuMin = cmds.optionVar(q='CableRigger_uMin')
else:
defaultuMin = self.defaults['uMin']
if cmds.optionVar(exists='CableRigger_uMax'):
defaultuMax = cmds.optionVar(q='CableRigger_uMax')
else:
defaultuMax = self.defaults['uMax']
#Curve Selector
sel = cmds.ls(sl=True)
initialText = ''
if sel:
initialText = sel[0]
self.widgets['curveNameGrp'] = cmds.textFieldButtonGrp(
label='Curve:',
text=initialText,
buttonLabel='<<<<',
bc=self.curveNameButtonPush
)
#Geo
sel = cmds.ls(sl=True)
initialText = ''
self.widgets['geoNameGrp'] = cmds.textFieldButtonGrp(
label='Geo:',
text=initialText,
buttonLabel='<<<<',
bc=self.geoNameButtonPush
)
#Other Widgets\
cmds.button(label='Reset below to default',width=500,command=self.setDefaults)
cmds.text(label="Adjust Rig:")
self.widgets['jointGrp'] = cmds.intSliderGrp(
label='Joints',
field=True,
fieldMinValue=2,
minValue=2,
maxValue=150,
value=defaultJoints
)
self.widgets['controlsGrp'] = cmds.intSliderGrp(
label='Controls',
field=True,
fieldMinValue=2,
minValue=2,
maxValue=50,
value=defaultCtrls
)
self.widgets['sizeGrp'] = cmds.floatSliderGrp(
label='Control Size',
field=True,
fieldMinValue=0.01,
minValue=.1,
maxValue=10.0,
value=defaultSize
)
self.widgets['uMinGrp'] = cmds.floatSliderGrp(
label='U min',
field=True,
fieldMinValue=0.0,
minValue=0,
maxValue=1.0,
fieldMaxValue=1.0,
value=defaultuMin
)
self.widgets['uMaxGrp'] = cmds.floatSliderGrp(
label='U max',
field=True,
fieldMinValue=0,
minValue=0,
maxValue=1,
fieldMaxValue=1,
value=defaultuMax
)
cmds.text(label='')
cmds.text(label="Adjust NURBS Strip:")
self.widgets['spansGrp'] = cmds.intSliderGrp(
label='Strip Spans',
field=True,
fieldMinValue=2,
minValue=2,
maxValue=150,
value=defaultSpans
)
self.widgets['widthGrp'] = cmds.floatSliderGrp(
label='Strip Width',
field=True,
fieldMinValue=0.005,
minValue=0.01,
maxValue=5.0,
value=defaultWidth
)
cmds.text(label='')
cmds.button(label="\nRig Curve!",h=60,w=500,command=self.doIt)
cmds.button(label="Wire Geo Only (Rig already built)",h=30,w=500,command=self.wireOnly)
cmds.showWindow(window)
def curveNameButtonPush(self,*args,**kwargs):
'''pops the selection into the text field'''
sel = cmds.ls(sl=True)
if not sel:
raise RuntimeError("select a curve")
cmds.textFieldButtonGrp(self.widgets['curveNameGrp'],e=True,text=sel[0])
def geoNameButtonPush(self,*args,**kwargs):
'''pops the selection into the text field'''
sel = cmds.ls(sl=True)
if not sel:
raise RuntimeError("select the cable geo")
cmds.textFieldButtonGrp(self.widgets['geoNameGrp'],e=True,text=sel[0])
def setDefaults(self,*args,**kwargs):
'''sets the sliders to defaults'''
cmds.intSliderGrp(self.widgets['jointGrp'],e=True,v=self.defaults['joints'])
cmds.intSliderGrp( self.widgets['controlsGrp'], e=True,v=self.defaults['ctrls'])
cmds.floatSliderGrp(self.widgets['sizeGrp'], e=True,v=self.defaults['size'])
cmds.intSliderGrp(self.widgets['spansGrp'], e=True,v=self.defaults['spans'])
cmds.floatSliderGrp(self.widgets['widthGrp'] , e=True,v=self.defaults['width'])
cmds.floatSliderGrp(self.widgets['uMinGrp'] , e=True,v=self.defaults['uMin'])
cmds.floatSliderGrp(self.widgets['uMaxGrp'] , e=True,v=self.defaults['uMax'])
def wireOnly(self,*args,**kwargs):
'''if the rig already exists, just wire geo'''
crv = cmds.textFieldButtonGrp(self.widgets["curveNameGrp"],q=True,text=True)
geo = cmds.textFieldButtonGrp(self.widgets["geoNameGrp"],q=True,text=True)
if not crv or not geo or not cmds.objExists(geo):
raise RuntimeError("Specify a curve and a geo to wire to an already existing rig")
#Find nodes based on name, do some error checking
rigNode = crv + "_Rig"
hiddenStuff = crv + "_NOTOUCH"
wireCrv = crv + "_skinned"
if not cmds.objExists(rigNode):
raise RuntimeError("%s not found in scene, rig not built yet?"%rigNode)
allKids = cmds.listRelatives(rigNode,ad=True)
if not cmds.objExists(wireCrv) and not wireCrv in allKids:
raise RuntimeError("wire curve %s not found under %s, wire curve deleted or not rigged?" %(wireCrv,rigNode))
if not cmds.objExists(hiddenStuff) and not hiddenStuff in allKids:
raise RuntimeError("Couldn't find the NOTOUCH node for this rig, curve not rigged?")
#Make wire
cmds.wire(geo,w=wireCrv,n=crv + "_wire",dds=(0,10),en=1.0,ce=0,li=0)
print "wire done"
def doIt(self,*args,**kwargs):
'''reads widget values and calls rigFromCurve'''
joints = cmds.intSliderGrp(self.widgets["jointGrp"],q=True,v=True)
ctrls = cmds.intSliderGrp(self.widgets["controlsGrp"],q=True,v=True)
size = cmds.floatSliderGrp(self.widgets["sizeGrp"],q=True,v=True)
spans = cmds.intSliderGrp(self.widgets["spansGrp"],q=True,v=True)
width = cmds.floatSliderGrp(self.widgets["widthGrp"],q=True,v=True)
crv = cmds.textFieldButtonGrp(self.widgets["curveNameGrp"],q=True,text=True)
geo = cmds.textFieldButtonGrp(self.widgets["geoNameGrp"],q=True,text=True)
uMin = cmds.floatSliderGrp(self.widgets["uMinGrp"],q=True,v=True)
uMax = cmds.floatSliderGrp(self.widgets["uMaxGrp"],q=True,v=True)
#save options
cmds.optionVar( iv=('CableRigger_joints', joints))
cmds.optionVar( iv=('CableRigger_ctrls', ctrls))
cmds.optionVar( fv=('CableRigger_size', size))
cmds.optionVar( iv=('CableRigger_spans', spans))
cmds.optionVar( fv=('CableRigger_width', width))
cmds.optionVar( fv=('CableRigger_uMin', uMin))
cmds.optionVar( fv=('CableRigger_uMax', uMax))
if not crv or not cmds.objExists(crv):
raise RuntimeError("%s not found in scene" % crv)
shapes = cmds.listRelatives(crv,s=1)
if not shapes or cmds.nodeType(shapes[0]) != 'nurbsCurve':
raise RuntimeError("Selection is not a curve")
self.rigFromCurve(crv,numSpans=spans,
numJoints=joints,
numCtrls=ctrls,
stripWidth=width,
ctrlWidth=size,
geo=geo,
uMin=uMin,
uMax=uMax
)
print "cable rig complete"
def rigFromCurve(self,crv,numSpans=8,numJoints=10,numCtrls=5,stripWidth = 1.0,ctrlWidth=2.0,geo=None,uMin=0.0,uMax=1.0):
'''make a cable rig from the given curve
numSpans = number of spans in Nurbs strip
numJoints = number of joints riding on nurbs strip
numCtrls = number of controls to make
stripWidth = width of nurbs strip (can make it easier to paint weights if wider)
ctrlWidth = size of ctrls
'''
shapes = cmds.listRelatives(crv,s=1)
crvShape = shapes[0]
#Make rig top nulls to parent stuff under
topNull = cmds.createNode('transform',n=crv + "_Rig")
hiddenStuff = cmds.createNode('transform',n=crv + "_NOTOUCH",p=topNull)
cmds.setAttr(hiddenStuff + ".inheritsTransform", 0)
cmds.setAttr(hiddenStuff + ".visibility", 0)
cmds.addAttr(topNull, ln="stretchAmount",dv=1.0,min=0,max=1)
cmds.addAttr(topNull, ln='slideAmount',dv=0.0)
#make nurbs strip using extrude
crossCurve = cmds.curve(d=1,p=[(0,0,-0.5 * stripWidth),(0,0,0.5 * stripWidth)],k=(0,1))
cmds.select([crossCurve,crv],r=1)
surf = cmds.extrude(ch=False,po=0,et=2,ucp=1,fpt=1,upn=1,rotation=0,scale=1,rsp=1)[0]
cmds.delete(crossCurve)
surf = cmds.rename(surf, crv + "_driverSurf")
cmds.parent(surf,hiddenStuff)
#Rebuild strip to proper number of spans
cmds.rebuildSurface(surf,ch=0,rpo=1,rt=0,end=1,kr=0,kcp=0,kc=1,sv=numSpans,su=0,du=1,tol=0.01,fr=0,dir=2)
#make live curve on surface down the middle
#this is used later for noStretch
curvMaker = cmds.createNode('curveFromSurfaceIso', n = surf+"CurveIso")
cmds.setAttr(curvMaker + ".isoparmValue", 0.5)
cmds.setAttr(curvMaker + ".isoparmDirection", 1)
cmds.connectAttr(surf + ".worldSpace[0]", curvMaker + ".inputSurface")
offsetCrvShp = cmds.createNode("nurbsCurve", n=crv + "_driverSurfCrvShape")
offsetCrv = cmds.listRelatives(p=1)[0]
offsetCrv = cmds.rename(offsetCrv,crv + "_driverSurfCrv")
cmds.connectAttr(curvMaker + ".outputCurve", offsetCrvShp + ".create")
cmds.parent(offsetCrv, hiddenStuff)
#Measure curve length and divide by start length.
#This turns curve length into a normalized value that is
#useful for multiplying by UV values later to control stretch
crvInfo = cmds.createNode('curveInfo', n=offsetCrv + "Info")
cmds.connectAttr(offsetCrv + ".worldSpace[0]", crvInfo + ".ic")
arcLength = cmds.getAttr(crvInfo + ".al")
stretchAmountNode = cmds.createNode('multiplyDivide', n=offsetCrv + "Stretch")
cmds.setAttr(stretchAmountNode + ".op" , 2) #divide
cmds.setAttr(stretchAmountNode + ".input1X", arcLength)
cmds.connectAttr( crvInfo + ".al",stretchAmountNode + ".input2X")
#Stretch Blender blends start length with current length
#and pipes it back into stretchAmoundNode's startLength, to "trick" it into
#thinking there is no stretch..
#That way, when user turns on this "noStretch" attr, the startLength will
#be made to equal current length, and stretchAmountNode will always be 1.
#so the chain will not stretch.
stretchBlender = cmds.createNode('blendColors', n =offsetCrv + "StretchBlender")
cmds.setAttr(stretchBlender + ".c1r", arcLength)
cmds.connectAttr(crvInfo + ".al", stretchBlender + ".c2r")
cmds.connectAttr(stretchBlender + ".opr", stretchAmountNode + ".input1X")
cmds.connectAttr(topNull + ".stretchAmount",stretchBlender + ".blender")
#make skin joints and attach to surface
skinJoints = []
skinJointParent = cmds.createNode('transform',n=crv + "_skinJoints",p=topNull)
for i in range(numJoints):
cmds.select(clear=True)
jnt = cmds.joint(p=(0,0,0),n=crv + "_driverJoint%02d"%i)
locator = cmds.spaceLocator(n=crv + "driverLoc%02d"%i)[0]
cmds.setAttr(locator + ".localScale",stripWidth,stripWidth,stripWidth)
cmds.parent(locator,hiddenStuff)
percentage = float(i)/(numJoints-1.0)
print "percentage:", percentage
print i
if i > 1 and i < numJoints-2:
percentage = uMin + (percentage * (uMax-uMin))
print "\tinterp percent", percentage
posNode,aimCnss,moPath,slider = self.attachObjToSurf(locator,surf,offsetCrv,stretchAmountNode,percentage)
cmds.connectAttr(topNull + ".slideAmount", slider + ".i2")
cmds.parentConstraint(locator,jnt,mo=False)
if len(skinJoints):
cmds.parent(jnt,skinJoints[-1])
else:
cmds.parent(jnt,skinJointParent)
skinJoints.append(jnt)
cmds.setAttr(jnt + ".radius",stripWidth) #just cosmetic
#add controls
ctrls = []
stripJoints = []
stripJointParent = cmds.createNode('transform',n=crv + "_stripJoints",p=hiddenStuff)
ctrlParent = cmds.createNode('transform',n=crv+"_Ctrls",p=topNull)
for i in range(numCtrls):
#The first control is larger, and has the stretch attr
if i == 0:
zero,ctrl = self.makeCubeCtrl(crv + "_Ctrl%02d"%i,size=ctrlWidth*1.8)
cmds.addAttr(ctrl,ln="noStretch",dv=0.0,min=0,max=1,k=1,s=1)
cmds.addAttr(ctrl,ln='slideAmount',dv=0.0,min=-1.0,max=1.0,k=1,s=1)
cmds.connectAttr(ctrl + ".noStretch",topNull + ".stretchAmount")
cmds.connectAttr(ctrl + ".slideAmount",topNull + ".slideAmount")
else:
zero,ctrl = self.makeCubeCtrl(crv + "_Ctrl%02d"%i,size=ctrlWidth)
#Make the joint the control. These drive the nurbs strip.
cmds.select(clear=True)
jnt = cmds.joint(p=(0,0,0),n=ctrl + "StripJnt")
cmds.parentConstraint(ctrl,jnt,mo=False)
cmds.setAttr(jnt + ".radius", stripWidth * 1.3) #just cosmetic
#briefly attach ctrls to strip to align them
percentage = float(i)/(numCtrls-1.0)
print "ctrl percentage:",percentage
if i > 0 and i < numCtrls-1:
percentage = uMin + (percentage * (uMax-uMin))
print '\tinterp percentage:', percentage
cmds.delete(self.attachObjToSurf(zero,surf,offsetCrv,stretchAmountNode,percentage))
ctrls.append(ctrl)
cmds.parent(jnt,stripJointParent)
stripJoints.append(jnt)
cmds.parent(zero,ctrlParent)
#skin strip to controls
#Can get some different behavior by chaning the strip's weights
#or perhaps using dual quat. mode on the skinCluster
skinObjs = stripJoints + [surf]
cmds.skinCluster(skinObjs,
bindMethod=0, #closest Point
sm=0, #standard bind method
ih=True, #ignore hierarchy
)
#rebuild curve and skin to joints
newCurve = cmds.duplicate(crv)[0]
newCurve = cmds.rename(newCurve, crv + "_skinned")
cmds.parent(newCurve, topNull)
cmds.rebuildCurve(newCurve,ch=0,rpo=1,rt=0,end=1,kr=0,kcp=0,kep=1,kt=0,s=numJoints-2,d=3,tol=0.01)
skinObjs = skinJoints + [newCurve]
cmds.skinCluster(skinObjs,
bindMethod = 0,
sm = 0,
ih=True,
mi=1
)
if geo:
wireDef,wireCrv = cmds.wire(geo,w=newCurve,n=crv + "_wire",dds=(0,10),en=1.0,ce=0,li=0)
print wireDef
cmds.parent(wireCrv,hiddenStuff)
if cmds.objExists(wireCrv+"BaseWire"):
cmds.parent(wireCrv+"BaseWire",hiddenStuff)
def attachObjToSurf(self,obj,surf,path,stretchAmountNode,percentage):
'''Given an object and a surface, attach object.
Returns created nodes like (poinOnSurface,aimCns)
'''
#Make nodes
aimCns = cmds.createNode('aimConstraint',n=obj + "Cns")
moPath = cmds.createNode('motionPath', n=obj + "MoPath")
slider = cmds.createNode('addDoubleLinear',n=obj + "Slider")
cmds.setAttr(moPath + ".uValue", percentage)
closePnt = cmds.createNode('closestPointOnSurface', n=obj + "ClsPnt")
posNode1 = cmds.pointOnSurface(surf,
constructionHistory=True,
normal=True,
normalizedNormal=True,
normalizedTangentU=True,
normalizedTangentV=True,
parameterV=0.5,
parameterU=0.5,
turnOnPercentage=True
)
#Connect motion Path to closest point, then closest point to surface info node
cmds.setAttr(moPath + ".fractionMode", 1) #distance instead of param
cmds.connectAttr(path + ".worldSpace[0]", moPath + ".geometryPath")
cmds.connectAttr(surf + ".worldSpace[0]", closePnt + ".inputSurface")
cmds.connectAttr(moPath + ".xCoordinate", closePnt + ".ipx")
cmds.connectAttr(moPath + ".yCoordinate", closePnt + ".ipy")
cmds.connectAttr(moPath + ".zCoordinate", closePnt + ".ipz")
cmds.connectAttr(closePnt + ".result.u", posNode1 + ".u")
cmds.connectAttr(closePnt + ".result.v", posNode1 + ".v")
#Create Stretch Setup using stretchAmountNode node
stretchCtrl = cmds.createNode("multDoubleLinear", n=obj + "StretchCtrl")
cmds.setAttr(stretchCtrl + ".i1", percentage)
cmds.connectAttr(stretchAmountNode + ".outputX",stretchCtrl + ".i2")
cmds.connectAttr(stretchCtrl + ".o", slider + ".i1")
cmds.connectAttr(slider + ".o", moPath + ".uValue")
#Hook up surface info attrs to aimCns to calculate rotation values
#Then hook pointOnSurface and aimCns to locator
posNode1 = cmds.rename(posNode1,obj + 'SurfInfo')
cmds.setAttr(aimCns + ".worldUpType", 3)
cmds.connectAttr(posNode1 + ".position", obj + ".translate")
cmds.connectAttr(posNode1 + '.tv',aimCns + '.target[0].targetTranslate')
cmds.connectAttr(posNode1 + '.tu',aimCns + '.worldUpVector')
for axis in ('X','Y','Z'):
cmds.connectAttr(aimCns + ".constraintRotate" + axis, obj + ".rotate" + axis)
cmds.parent(aimCns,obj) #just for tidyness, doesn't matter
return (posNode1,aimCns,moPath,slider)
def makeCubeCtrl(self,name,size=1.0):
'''
Make a nurbs curve cube with given name. Also can take size.
Returns the cube parented under a zero null
'''
wd = 0.5*size
crn = [
(-wd,wd,-wd),
(wd,wd,-wd),
(wd,-wd,-wd),
(-wd,-wd,-wd),
(-wd,wd,wd),
(wd,wd,wd),
(wd,-wd,wd),
(-wd,-wd,wd),
]
verts = (crn[0],crn[1],crn[2],crn[3],crn[0],crn[4],crn[5],crn[6],
crn[7],crn[4],crn[5],crn[1],crn[0],crn[4],crn[7],crn[3],crn[0],
crn[1],crn[2],crn[6])
crv = cmds.curve(d=1,
p=verts,
k=range(len(verts))
)
crv = cmds.rename(crv,name)
zero = cmds.createNode('transform',n=crv + "_Zero")
self.hideChannels(zero)
for attr in ('sx','sy','sz'):
cmds.setAttr(crv + "." + attr,l=True,k=False,cb=False)
cmds.parent(crv,zero)
return (zero,crv)
def hideChannels(self,obj,lock=False):
'''hide anim channels on given obj'''
for attr in ('s','r','t'):
for axis in ('x','y','z'):
cmds.setAttr(obj + ".%s%s"%(attr,axis), keyable=False,channelBox=False,lock=lock)
cmds.setAttr(obj + ".v", keyable=False,channelBox=False)
#Run the tool
RigCurveTool()
| {
"content_hash": "4d94a1aebb8c374f318ffe4797b70d27",
"timestamp": "",
"source": "github",
"line_count": 489,
"max_line_length": 124,
"avg_line_length": 44.061349693251536,
"alnum_prop": 0.5900863269284322,
"repo_name": "BenBarker/CurveRigger",
"id": "53b9688ab38c75c5e106ae9f63487a5bd839aa53",
"size": "21547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "curveRigger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21547"
}
],
"symlink_target": ""
} |
from django_blog.profiles.models import Profile
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
def create_user(request):
if request.method == 'POST':
print "User submited the form"
create_form = UserCreationForm(request.POST)
if create_form.is_valid():
print "User is created"
user = create_form.save()
# Create user profile
profile = Profile()
profile.user = user
profile.save()
return HttpResponseRedirect(profile.get_absolute_url())
else:
create_form = UserCreationForm()
return render_to_response("profiles/create_user.html", { 'form': create_form })
def display_profile(request, username):
user = get_object_or_404(User, username=username)
return render_to_response("profiles/profile_show.html", { 'profile': user.get_profile() })
| {
"content_hash": "b3e9bc45c753ac2c92f1088eda106fd8",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 91,
"avg_line_length": 32.65625,
"alnum_prop": 0.7588516746411483,
"repo_name": "jhjguxin/blogserver",
"id": "77c992d1c20d87fac7cbba40d9234f366d847282",
"size": "1045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "profiles/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "28053"
},
{
"name": "Python",
"bytes": "268607"
}
],
"symlink_target": ""
} |
"""
WSGI config for mServer project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
'''
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mServer.settings")
application = get_wsgi_application()
'''
import os
from os.path import join,dirname,abspath
PROJECT_DIR = dirname(dirname(abspath(__file__)))
import sys
sys.path.insert(0,PROJECT_DIR)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kekangpai.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application() | {
"content_hash": "d48e1d9bb7a75c8b6d0815c6ec5fb627",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 78,
"avg_line_length": 24.17241379310345,
"alnum_prop": 0.7674750356633381,
"repo_name": "Jameeeees/Mag1C_baNd",
"id": "85cc13cd1154a97f22f6fba942da76fe4e4cb443",
"size": "701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kekangpai/kekangpai/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "960"
},
{
"name": "Java",
"bytes": "522199"
},
{
"name": "Python",
"bytes": "70708"
}
],
"symlink_target": ""
} |
"""A module for creating a saturated vapour pressure table"""
import warnings
import iris
import numpy as np
from iris.cube import Cube
from numpy import ndarray
from improver import BasePlugin
from improver.constants import TRIPLE_PT_WATER
class SaturatedVapourPressureTable(BasePlugin):
"""
Plugin to create a saturated vapour pressure lookup table.
"""
MAX_VALID_TEMPERATURE = 373.0
MIN_VALID_TEMPERATURE = 173.0
def __init__(
self, t_min: float = 183.15, t_max: float = 338.25, t_increment: float = 0.1
) -> None:
"""
Create a table of saturated vapour pressures that can be interpolated
through to obtain an SVP value for any temperature within the range
t_min --> (t_max - t_increment).
The default min/max values create a table that provides SVP values
covering the temperature range -90C to +65.1C. Note that the last
bin is not used, so the SVP value corresponding to +65C is the highest
that will be used.
Args:
t_min:
The minimum temperature for the range, in Kelvin.
t_max:
The maximum temperature for the range, in Kelvin.
t_increment:
The temperature increment at which to create values for the
saturated vapour pressure between t_min and t_max.
"""
self.t_min = t_min
self.t_max = t_max
self.t_increment = t_increment
def __repr__(self) -> str:
"""Represent the configured plugin instance as a string."""
result = (
"<SaturatedVapourPressureTable: t_min: {}; t_max: {}; "
"t_increment: {}>".format(self.t_min, self.t_max, self.t_increment)
)
return result
def saturation_vapour_pressure_goff_gratch(self, temperature: ndarray) -> ndarray:
"""
Saturation Vapour pressure in a water vapour system calculated using
the Goff-Gratch Equation (WMO standard method).
Args:
temperature:
Temperature values in Kelvin. Valid from 173K to 373K
Returns:
Corresponding values of saturation vapour pressure for a pure
water vapour system, in hPa.
References:
Numerical data and functional relationships in science and
technology. New series. Group V. Volume 4. Meteorology.
Subvolume b. Physical and chemical properties of the air, P35.
"""
constants = {
1: 10.79574,
2: 5.028,
3: 1.50475e-4,
4: -8.2969,
5: 0.42873e-3,
6: 4.76955,
7: 0.78614,
8: -9.09685,
9: 3.56654,
10: 0.87682,
11: 0.78614,
}
triple_pt = TRIPLE_PT_WATER
# Values for which method is considered valid (see reference).
# WetBulbTemperature.check_range(temperature.data, 173., 373.)
if (
temperature.max() > self.MAX_VALID_TEMPERATURE
or temperature.min() < self.MIN_VALID_TEMPERATURE
):
msg = "Temperatures out of SVP table range: min {}, max {}"
warnings.warn(msg.format(temperature.min(), temperature.max()))
svp = temperature.copy()
for cell in np.nditer(svp, op_flags=["readwrite"]):
if cell > triple_pt:
n0 = constants[1] * (1.0 - triple_pt / cell)
n1 = constants[2] * np.log10(cell / triple_pt)
n2 = constants[3] * (
1.0 - np.power(10.0, (constants[4] * (cell / triple_pt - 1.0)))
)
n3 = constants[5] * (
np.power(10.0, (constants[6] * (1.0 - triple_pt / cell))) - 1.0
)
log_es = n0 - n1 + n2 + n3 + constants[7]
cell[...] = np.power(10.0, log_es)
else:
n0 = constants[8] * ((triple_pt / cell) - 1.0)
n1 = constants[9] * np.log10(triple_pt / cell)
n2 = constants[10] * (1.0 - (cell / triple_pt))
log_es = n0 - n1 + n2 + constants[11]
cell[...] = np.power(10.0, log_es)
return svp
def process(self) -> Cube:
"""
Create a lookup table of saturation vapour pressure in a pure water
vapour system for the range of required temperatures.
Returns:
A cube of saturated vapour pressure values at temperature
points defined by t_min, t_max, and t_increment (defined above).
"""
temperatures = np.arange(
self.t_min, self.t_max + 0.5 * self.t_increment, self.t_increment
)
svp_data = self.saturation_vapour_pressure_goff_gratch(temperatures)
temperature_coord = iris.coords.DimCoord(
temperatures, "air_temperature", units="K"
)
# Output of the Goff-Gratch is in hPa, but we want to return in Pa.
svp = iris.cube.Cube(
svp_data,
long_name="saturated_vapour_pressure",
units="hPa",
dim_coords_and_dims=[(temperature_coord, 0)],
)
svp.convert_units("Pa")
svp.attributes["minimum_temperature"] = self.t_min
svp.attributes["maximum_temperature"] = self.t_max
svp.attributes["temperature_increment"] = self.t_increment
return svp
| {
"content_hash": "0abfea480beb91c56ec8478d00b25a2e",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 86,
"avg_line_length": 36,
"alnum_prop": 0.5599705665930832,
"repo_name": "metoppv/improver",
"id": "20339385426f7fbc7d752f2f05e7a5b2dce518a5",
"size": "7093",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "improver/generate_ancillaries/generate_svp_table.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5073745"
},
{
"name": "Shell",
"bytes": "9493"
}
],
"symlink_target": ""
} |
import os
import pkg_resources
import sqlite3
import sys
import traceback
import uuid
from django.core.cache import cache
from django.core.cache.backends.locmem import LocMemCache
from django.db.backends.postgresql.base import DatabaseWrapper as BaseDatabaseWrapper
from awx.main.utils import memoize
__loc__ = LocMemCache(str(uuid.uuid4()), {})
__all__ = ['DatabaseWrapper']
class RecordedQueryLog(object):
def __init__(self, log, db, dest='/var/log/tower/profile'):
self.log = log
self.db = db
self.dest = dest
try:
self.threshold = cache.get('awx-profile-sql-threshold')
except Exception:
# if we can't reach the cache, just assume profiling's off
self.threshold = None
def append(self, query):
ret = self.log.append(query)
try:
self.write(query)
except Exception:
# not sure what else to do her e- we can't really safely
# *use* our loggers because it'll just generate more DB queries
# and potentially recurse into this state again
_, _, tb = sys.exc_info()
traceback.print_tb(tb)
return ret
def write(self, query):
if self.threshold is None:
return
seconds = float(query['time'])
# if the query is slow enough...
if seconds >= self.threshold:
sql = query['sql']
if sql.startswith('EXPLAIN'):
return
# build a printable Python stack
bt = ' '.join(traceback.format_stack())
# and re-run the same query w/ EXPLAIN
explain = ''
cursor = self.db.cursor()
cursor.execute('EXPLAIN VERBOSE {}'.format(sql))
for line in cursor.fetchall():
explain += line[0] + '\n'
# write a row of data into a per-PID sqlite database
if not os.path.isdir(self.dest):
os.makedirs(self.dest)
progname = ' '.join(sys.argv)
for match in ('uwsgi', 'dispatcher', 'callback_receiver', 'wsbroadcast'):
if match in progname:
progname = match
break
else:
progname = os.path.basename(sys.argv[0])
filepath = os.path.join(
self.dest,
'{}.sqlite'.format(progname)
)
version = pkg_resources.get_distribution('awx').version
log = sqlite3.connect(filepath, timeout=3)
log.execute(
'CREATE TABLE IF NOT EXISTS queries ('
' id INTEGER PRIMARY KEY,'
' version TEXT,'
' pid INTEGER,'
' stamp DATETIME DEFAULT CURRENT_TIMESTAMP,'
' argv REAL,'
' time REAL,'
' sql TEXT,'
' explain TEXT,'
' bt TEXT'
');'
)
log.commit()
log.execute(
'INSERT INTO queries (pid, version, argv, time, sql, explain, bt) '
'VALUES (?, ?, ?, ?, ?, ?, ?);',
(os.getpid(), version, ' ' .join(sys.argv), seconds, sql, explain, bt)
)
log.commit()
def __len__(self):
return len(self.log)
def __iter__(self):
return iter(self.log)
def __getattr__(self, attr):
return getattr(self.log, attr)
class DatabaseWrapper(BaseDatabaseWrapper):
"""
This is a special subclass of Django's postgres DB backend which - based on
the value of a special flag in cache - captures slow queries and
writes profile and Python stack metadata to the disk.
"""
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
# Django's default base wrapper implementation has `queries_log`
# which is a `collections.deque` that every query is appended to
#
# this line wraps the deque with a proxy that can capture each query
# and - if it's slow enough - record profiling metadata to the file
# system for debugging purposes
self.queries_log = RecordedQueryLog(self.queries_log, self)
@property
@memoize(ttl=1, cache=__loc__)
def force_debug_cursor(self):
# in Django's base DB implementation, `self.force_debug_cursor` is just
# a simple boolean, and this value is used to signal to Django that it
# should record queries into `self.queries_log` as they're executed (this
# is the same mechanism used by libraries like the django-debug-toolbar)
#
# in _this_ implementation, we represent it as a property which will
# check the cache for a special flag to be set (when the flag is set, it
# means we should start recording queries because somebody called
# `awx-manage profile_sql`)
#
# it's worth noting that this property is wrapped w/ @memoize because
# Django references this attribute _constantly_ (in particular, once
# per executed query); doing a cache.get() _at most_ once per
# second is a good enough window to detect when profiling is turned
# on/off by a system administrator
try:
threshold = cache.get('awx-profile-sql-threshold')
except Exception:
# if we can't reach the cache, just assume profiling's off
threshold = None
self.queries_log.threshold = threshold
return threshold is not None
@force_debug_cursor.setter
def force_debug_cursor(self, v):
return
| {
"content_hash": "ffc236cb3a12c11f98b5c3b19c6e70c6",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 86,
"avg_line_length": 36.70967741935484,
"alnum_prop": 0.5706502636203866,
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"id": "2a449437ce15da542ae323b1c666ee9a6fad5785",
"size": "5690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/github.com/ansible/awx/awx/main/db/profiled_pg/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from functools import partial
import zlib
import json
from io import BytesIO
import sys
import platform
from electrum.plugins import BasePlugin, hook
from electrum_gui.qt.util import WaitingDialog, EnterButton, WindowModalDialog
from electrum.util import print_msg, print_error
from electrum.i18n import _
from PyQt4.QtGui import *
from PyQt4.QtCore import *
try:
import amodem.audio
import amodem.main
import amodem.config
print_error('Audio MODEM is available.')
amodem.log.addHandler(amodem.logging.StreamHandler(sys.stderr))
amodem.log.setLevel(amodem.logging.INFO)
except ImportError:
amodem = None
print_error('Audio MODEM is not found.')
class Plugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
if self.is_available():
self.modem_config = amodem.config.slowest()
self.library_name = {
'Linux': 'libportaudio.so'
}[platform.system()]
def is_available(self):
return amodem is not None
def requires_settings(self):
return True
def settings_widget(self, window):
return EnterButton(_('Settings'), partial(self.settings_dialog, window))
def settings_dialog(self, window):
d = WindowModalDialog(window, _("Audio Modem Settings"))
layout = QGridLayout(d)
layout.addWidget(QLabel(_('Bit rate [kbps]: ')), 0, 0)
bitrates = list(sorted(amodem.config.bitrates.keys()))
def _index_changed(index):
bitrate = bitrates[index]
self.modem_config = amodem.config.bitrates[bitrate]
combo = QComboBox()
combo.addItems(map(str, bitrates))
combo.currentIndexChanged.connect(_index_changed)
layout.addWidget(combo, 0, 1)
ok_button = QPushButton(_("OK"))
ok_button.clicked.connect(d.accept)
layout.addWidget(ok_button, 1, 1)
return bool(d.exec_())
@hook
def transaction_dialog(self, dialog):
b = QPushButton()
b.setIcon(QIcon(":icons/speaker.png"))
def handler():
blob = json.dumps(dialog.tx.as_dict())
self._send(parent=dialog, blob=blob)
b.clicked.connect(handler)
dialog.sharing_buttons.insert(-1, b)
@hook
def scan_text_edit(self, parent):
parent.addButton(':icons/microphone.png', partial(self._recv, parent),
_("Read from microphone"))
@hook
def show_text_edit(self, parent):
def handler():
blob = str(parent.toPlainText())
self._send(parent=parent, blob=blob)
parent.addButton(':icons/speaker.png', handler, _("Send to speaker"))
def _audio_interface(self):
interface = amodem.audio.Interface(config=self.modem_config)
return interface.load(self.library_name)
def _send(self, parent, blob):
def sender_thread():
with self._audio_interface() as interface:
src = BytesIO(blob)
dst = interface.player()
amodem.main.send(config=self.modem_config, src=src, dst=dst)
print_msg('Sending:', repr(blob))
blob = zlib.compress(blob)
kbps = self.modem_config.modem_bps / 1e3
msg = 'Sending to Audio MODEM ({0:.1f} kbps)...'.format(kbps)
WaitingDialog(parent, msg, sender_thread)
def _recv(self, parent):
def receiver_thread():
with self._audio_interface() as interface:
src = interface.recorder()
dst = BytesIO()
amodem.main.recv(config=self.modem_config, src=src, dst=dst)
return dst.getvalue()
def on_finished(blob):
if blob:
blob = zlib.decompress(blob)
print_msg('Received:', repr(blob))
parent.setText(blob)
kbps = self.modem_config.modem_bps / 1e3
msg = 'Receiving from Audio MODEM ({0:.1f} kbps)...'.format(kbps)
WaitingDialog(parent, msg, receiver_thread, on_finished)
| {
"content_hash": "4c98903ceb9e37c3ffdb7380651cb4ba",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 80,
"avg_line_length": 32.173228346456696,
"alnum_prop": 0.6120900636319139,
"repo_name": "aasiutin/electrum",
"id": "6e59f007ea04b497fc136672bd63e36dc6abddd5",
"size": "4086",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "plugins/audio_modem/qt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "3867"
},
{
"name": "Makefile",
"bytes": "844"
},
{
"name": "NSIS",
"bytes": "6901"
},
{
"name": "PHP",
"bytes": "404"
},
{
"name": "Protocol Buffer",
"bytes": "2354"
},
{
"name": "Python",
"bytes": "1153867"
},
{
"name": "Shell",
"bytes": "6932"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name='sfdc-bulk',
packages=['sfdc_bulk'],
version='0.2',
description='Python client library for SFDC bulk API',
url='https://github.com/donaldrauscher/sfdc-bulk',
author='Donald Rauscher',
author_email='donald.rauscher@gmail.com',
license='MIT',
install_requires=[
'requests',
'simple_salesforce',
'pandas',
'pyyaml'
]
)
| {
"content_hash": "3caf9c5718730e1661a89f90be0bd403",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 58,
"avg_line_length": 23.88888888888889,
"alnum_prop": 0.6139534883720931,
"repo_name": "donaldrauscher/sfdc-bulk",
"id": "ed5c1f0097b76ad6988c4b6100ac48272123d53f",
"size": "430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13941"
}
],
"symlink_target": ""
} |
"""This example demonstrates how to handle partial failures.
To get ad groups, run get_ad_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: AdGroupCriterionService.mutate
"""
__author__ = ('api.kwinter@gmail.com (Kevin Winter)'
'Joseph DiLallo')
import re
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
# Enable partial failure.
client.partial_failure = True
# Initialize appropriate service.
ad_group_criterion_service = client.GetService(
'AdGroupCriterionService', version='v201406')
# Construct keyword ad group criteria objects.
keywords = [
{
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'Keyword',
'matchType': 'BROAD',
'text': 'mars cruise'
}
},
{
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'Keyword',
'matchType': 'BROAD',
'text': r'inv\@lid cruise'
}
},
{
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'Keyword',
'matchType': 'BROAD',
'text': 'venus cruise'
}
},
{
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'Keyword',
'matchType': 'BROAD',
'text': r'b\(a\)d keyword cruise'
}
}
]
# Construct operations and add ad group criteria.
operations = []
for keyword in keywords:
operations.append(
{
'operator': 'ADD',
'operand': keyword
})
result = ad_group_criterion_service.mutate(operations)
# Display results.
for criterion in result['value']:
if criterion['AdGroupCriterion.Type'] == 'BiddableAdGroupCriterion':
print ('Added keyword ad group criterion with ad group id \'%s\', '
'criterion id \'%s\', text \'%s\', and match type \'%s\' was '
'added.'
% (criterion['adGroupId'], criterion['criterion']['id'],
criterion['criterion']['text'],
criterion['criterion']['matchType']))
for error in result['partialFailureErrors']:
index = re.findall(r'operations\[(.*)\]\.', error['fieldPath'])
if index:
print ('Keyword ad group criterion with ad group id \'%s\' and text '
'\'%s\' triggered a failure for the following reason: \'%s\'.'
% (keywords[int(index[0])]['adGroupId'],
keywords[int(index[0])]['criterion']['text'],
error['errorString']))
else:
print 'The following failure has occurred: \'%s\'.' % error['errorString']
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
| {
"content_hash": "048a87f8321482e0bba7d73d497f629c",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 80,
"avg_line_length": 30.48148148148148,
"alnum_prop": 0.5692588092345079,
"repo_name": "dietrichc/streamline-ppc-reports",
"id": "c801d34ef2c87c7964c44b9054d8cac7030111e9",
"size": "3910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/adwords/v201406/error_handling/handle_partial_failures.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2235969"
}
],
"symlink_target": ""
} |
#/* author:@shivkrthakur */
# Enter your code here. Read input from STDIN. Print output to STDOUT
input1 = int(raw_input().strip())
input2 = int(raw_input().strip())
print input1 + input2
print input1 - input2
print input1 * input2 | {
"content_hash": "c078450ed4decbb209e0b93f85e1933f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 69,
"avg_line_length": 26,
"alnum_prop": 0.7136752136752137,
"repo_name": "shivkrthakur/HackerRankSolutions",
"id": "41231fd5c65220a9113c3d0f6c9856b061b9d647",
"size": "234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Practice/AllDomains/Languages/Python/Introduction/ArithmeticOperators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "175716"
},
{
"name": "Java",
"bytes": "15673"
},
{
"name": "JavaScript",
"bytes": "113116"
},
{
"name": "Python",
"bytes": "11480"
},
{
"name": "SQLPL",
"bytes": "1603"
}
],
"symlink_target": ""
} |
import sys
import os
# -- Aplus configuration --------------------------------------------------
course_open_date = '2021-01-01'
course_close_date = '2021-12-31'
aplusmeta_substitutions = {
'open01': '2016-01-02 12:00',
'close01': '2016-03-01 12:00'
}
questionnaire_default_submissions = 5
program_default_submissions = 10
ae_default_submissions = 0
use_wide_column = False
static_host = os.environ.get('STATIC_CONTENT_HOST', None)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath('a-plus-rst-tools'))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'aplus_setup',
]
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Sample'
copyright = '2021, NN'
author = 'NN'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build',]
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# A+ requires the aplus theme, which is defined in a-plus-rst-tools.
html_theme = 'aplus'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'use_wide_column': use_wide_column,
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['a-plus-rst-tools/theme']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| {
"content_hash": "5e3bdf07185a47f3c671098bef3b1b30",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 79,
"avg_line_length": 32.93877551020408,
"alnum_prop": 0.6970260223048327,
"repo_name": "Aalto-LeTech/a-plus-rst-tools",
"id": "7d256c6afb8ac6338d0f33f4dca8e80fd9cd1b76",
"size": "3630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21135"
},
{
"name": "HTML",
"bytes": "10587"
},
{
"name": "JavaScript",
"bytes": "23024"
},
{
"name": "Python",
"bytes": "200090"
}
],
"symlink_target": ""
} |
"""
1D Forward Simulation for a Single Sounding
===========================================
Here we use the module *SimPEG.electromangetics.frequency_domain_1d* to predict
frequency domain data for a single sounding over a 1D layered Earth.
In this tutorial, we focus on the following:
- Defining receivers, sources and the survey
- How to predict total field, secondary field or ppm data
- The units of the model and resulting data
- Defining and running the 1D simulation for a single sounding
Our survey geometry consists of a vertical magnetic dipole source
located 30 m above the Earth's surface. The receiver is offset
10 m horizontally from the source.
"""
#####################################################
# Import Modules
# --------------
#
import numpy as np
import os
from matplotlib import pyplot as plt
from discretize import TensorMesh
from SimPEG import maps
from SimPEG.electromagnetics import frequency_domain as fdem
from SimPEG.utils import plot_1d_layer_model
plt.rcParams.update({"font.size": 16})
write_output = False
# sphinx_gallery_thumbnail_number = 2
#####################################################################
# Create Survey
# -------------
#
# Here we demonstrate a general way to define the receivers, sources and survey.
# For this tutorial, the source is a vertical magnetic dipole that will be used
# to simulate data at a number of frequencies. The receivers measure real and
# imaginary ppm data.
#
# Frequencies being observed in Hz
frequencies = np.array([382, 1822, 7970, 35920, 130100], dtype=float)
# Define a list of receivers. The real and imaginary components are defined
# as separate receivers.
receiver_location = np.array([10.0, 0.0, 30.0])
receiver_orientation = "z" # "x", "y" or "z"
data_type = "ppm" # "secondary", "total" or "ppm"
receiver_list = []
receiver_list.append(
fdem.receivers.PointMagneticFieldSecondary(
receiver_location,
orientation=receiver_orientation,
data_type=data_type,
component="real",
)
)
receiver_list.append(
fdem.receivers.PointMagneticFieldSecondary(
receiver_location,
orientation=receiver_orientation,
data_type=data_type,
component="imag",
)
)
# Define the source list. A source must be defined for each frequency.
source_location = np.array([0.0, 0.0, 30.0])
source_orientation = "z" # "x", "y" or "z"
moment = 1.0 # dipole moment
source_list = []
for freq in frequencies:
source_list.append(
fdem.sources.MagDipole(
receiver_list=receiver_list,
frequency=freq,
location=source_location,
orientation=source_orientation,
moment=moment,
)
)
# Define a 1D FDEM survey
survey = fdem.survey.Survey(source_list)
###############################################
# Defining a 1D Layered Earth Model
# ---------------------------------
#
# Here, we define the layer thicknesses and electrical conductivities for our
# 1D simulation. If we have N layers, we define N electrical conductivity
# values and N-1 layer thicknesses. The lowest layer is assumed to extend to
# infinity. If the Earth is a halfspace, the thicknesses can be defined by
# an empty array, and the physical property values by an array of length 1.
#
# In this case, we have a more conductive layer within a background halfspace.
# This can be defined as a 3 layered Earth model.
#
# Physical properties
background_conductivity = 1e-1
layer_conductivity = 1e0
# Layer thicknesses
thicknesses = np.array([20.0, 40.0])
n_layer = len(thicknesses) + 1
# physical property model (conductivity model)
model = background_conductivity * np.ones(n_layer)
model[1] = layer_conductivity
# Define a mapping from model parameters to conductivities
model_mapping = maps.IdentityMap(nP=n_layer)
# Plot conductivity model
thicknesses_for_plotting = np.r_[thicknesses, 40.0]
mesh_for_plotting = TensorMesh([thicknesses_for_plotting])
fig = plt.figure(figsize=(6, 5))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.75])
plot_1d_layer_model(thicknesses_for_plotting, model, ax=ax, show_layers=False)
plt.gca().invert_yaxis()
#######################################################################
# Define the Forward Simulation, Predict Data and Plot
# ----------------------------------------------------
#
# Here we define the simulation and predict the 1D FDEM sounding data.
# The simulation requires the user define the survey, the layer thicknesses
# and a mapping from the model to the conductivities of the layers.
#
# When using the *SimPEG.electromagnetics.frequency_domain_1d* module,
# predicted data are organized by source, then by receiver, then by frequency.
#
# Define the simulation
simulation = fdem.Simulation1DLayered(
survey=survey,
thicknesses=thicknesses,
sigmaMap=model_mapping,
)
# Predict sounding data
dpred = simulation.dpred(model)
# Plot sounding data
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.75])
ax.semilogx(frequencies, np.abs(dpred[0::2]), "k-o", lw=3, ms=10)
ax.semilogx(frequencies, np.abs(dpred[1::2]), "k:o", lw=3, ms=10)
ax.set_xlabel("Frequency (Hz)")
ax.set_ylabel("|Hs/Hp| (ppm)")
ax.set_title("Secondary Magnetic Field as ppm")
ax.legend(["Real", "Imaginary"])
#######################################################################
# Optional: Export Data
# ---------------------
#
# Write the predicted data. Note that noise has been added.
#
if write_output:
dir_path = os.path.dirname(__file__).split(os.path.sep)
dir_path.extend(["outputs"])
dir_path = os.path.sep.join(dir_path) + os.path.sep
if not os.path.exists(dir_path):
os.mkdir(dir_path)
np.random.seed(222)
noise = 0.05 * np.abs(dpred) * np.random.rand(len(dpred))
dpred += noise
fname = dir_path + "em1dfm_data.txt"
np.savetxt(
fname,
np.c_[frequencies, dpred[0 : len(frequencies)], dpred[len(frequencies) :]],
fmt="%.4e",
header="FREQUENCY HZ_REAL HZ_IMAG",
)
| {
"content_hash": "313de741afdad4dea879ab85ee8f7937",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 83,
"avg_line_length": 30.984615384615385,
"alnum_prop": 0.6519364448857994,
"repo_name": "simpeg/simpeg",
"id": "188c9d44681d350b7e77cbcc3295af1bab26910b",
"size": "6042",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tutorials/07-fdem/plot_fwd_1_em1dfm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Python",
"bytes": "3476002"
}
],
"symlink_target": ""
} |
from unittest import main, TestCase
from simhash import Simhash, SimhashIndex
class TestSimhash(TestCase):
def test_value(self):
self.assertEqual(Simhash(['aaa', 'bbb']).value, 8637903533912358349)
def test_distance(self):
sh = Simhash('How are you? I AM fine. Thanks. And you?')
sh2 = Simhash('How old are you ? :-) i am fine. Thanks. And you?')
self.assertTrue(sh.distance(sh2) > 0)
sh3 = Simhash(sh2)
self.assertEqual(sh2.distance(sh3), 0)
self.assertNotEqual(Simhash('1').distance(Simhash('2')), 0)
def test_chinese(self):
self.maxDiff = None
sh1 = Simhash(u'你好 世界! 呼噜。')
sh2 = Simhash(u'你好,世界 呼噜')
sh4 = Simhash(u'How are you? I Am fine. ablar ablar xyz blar blar blar blar blar blar blar Thanks.')
sh5 = Simhash(u'How are you i am fine.ablar ablar xyz blar blar blar blar blar blar blar than')
sh6 = Simhash(u'How are you i am fine.ablar ablar xyz blar blar blar blar blar blar blar thank')
self.assertEqual(sh1.distance(sh2), 0)
self.assertTrue(sh4.distance(sh6) < 3)
self.assertTrue(sh5.distance(sh6) < 3)
def test_short(self):
shs = [Simhash(s).value for s in ('aa', 'aaa', 'aaaa', 'aaaab', 'aaaaabb', 'aaaaabbb')]
for i, sh1 in enumerate(shs):
for j, sh2 in enumerate(shs):
if i != j:
self.assertNotEqual(sh1, sh2)
def test_sparse_features(self):
# only test if sklearn is present
try:
from sklearn.feature_extraction.text import TfidfVectorizer
except ImportError:
return
data = [
'How are you? I Am fine. blar blar blar blar blar Thanks.',
'How are you i am fine. blar blar blar blar blar than',
'This is simhash test.',
'How are you i am fine. blar blar blar blar blar thank1'
]
vec = TfidfVectorizer()
D = vec.fit_transform(data)
voc = dict((i, w) for w, i in vec.vocabulary_.items())
# Verify that distance between data[0] and data[1] is < than
# data[2] and data[3]
shs = []
for i in range(D.shape[0]):
Di = D.getrow(i)
# features as list of (token, weight) tuples)
features = zip([voc[j] for j in Di.indices], Di.data)
shs.append(Simhash(features))
self.assertNotEqual(shs[0].distance(shs[1]), 0)
self.assertNotEqual(shs[2].distance(shs[3]), 0)
self.assertLess(shs[0].distance(shs[1]), shs[2].distance(shs[3]))
# features as token -> weight dicts
D0 = D.getrow(0)
dict_features = dict(zip([voc[j] for j in D0.indices], D0.data))
self.assertEqual(Simhash(dict_features).value, 17583409636488780916)
# the sparse and non-sparse features should obviously yield
# different results
self.assertNotEqual(Simhash(dict_features).value,
Simhash(data[0]).value)
class TestSimhashIndex(TestCase):
data = {
1: 'How are you? I Am fine. blar blar blar blar blar Thanks.',
2: 'How are you i am fine. blar blar blar blar blar than',
3: 'This is simhash test.',
4: 'How are you i am fine. blar blar blar blar blar thank1',
}
def setUp(self):
objs = [(str(k), Simhash(v)) for k, v in self.data.items()]
self.index = SimhashIndex(objs, k=10)
def test_get_near_dup(self):
s1 = Simhash(u'How are you i am fine.ablar ablar xyz blar blar blar blar blar blar blar thank')
dups = self.index.get_near_dups(s1)
self.assertEqual(len(dups), 3)
self.index.delete('1', Simhash(self.data[1]))
dups = self.index.get_near_dups(s1)
self.assertEqual(len(dups), 2)
self.index.delete('1', Simhash(self.data[1]))
dups = self.index.get_near_dups(s1)
self.assertEqual(len(dups), 2)
self.index.add('1', Simhash(self.data[1]))
dups = self.index.get_near_dups(s1)
self.assertEqual(len(dups), 3)
self.index.add('1', Simhash(self.data[1]))
dups = self.index.get_near_dups(s1)
self.assertEqual(len(dups), 3)
if __name__ == '__main__':
main()
| {
"content_hash": "8ca55284fbbf1f975d058e930bb51e41",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 108,
"avg_line_length": 36.144067796610166,
"alnum_prop": 0.5896834701055099,
"repo_name": "cjauvin/simhash",
"id": "2bd8839f2aa7d4b6583524aa4731b1bfd99c4f7d",
"size": "4327",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_simhash.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "795918"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from collections import defaultdict
from ..nlp.stemmers import null_stemmer
from ._summarizer import AbstractSummarizer
from .edmundson_cue import EdmundsonCueMethod
from .edmundson_key import EdmundsonKeyMethod
from .edmundson_title import EdmundsonTitleMethod
from .edmundson_location import EdmundsonLocationMethod
_EMPTY_SET = frozenset()
class EdmundsonSummarizer(AbstractSummarizer):
_bonus_words = _EMPTY_SET
_stigma_words = _EMPTY_SET
_null_words = _EMPTY_SET
def __init__(self, stemmer=null_stemmer, cue_weight=1.0, key_weight=0.0,
title_weight=1.0, location_weight=1.0):
super(EdmundsonSummarizer, self).__init__(stemmer)
self._ensure_correct_weights(cue_weight, key_weight, title_weight,
location_weight)
self._cue_weight = float(cue_weight)
self._key_weight = float(key_weight)
self._title_weight = float(title_weight)
self._location_weight = float(location_weight)
def _ensure_correct_weights(self, *weights):
for w in weights:
if w < 0.0:
raise ValueError("Negative wights are not allowed.")
@property
def bonus_words(self):
return self._bonus_words
@bonus_words.setter
def bonus_words(self, collection):
self._bonus_words = frozenset(map(self.stem_word, collection))
@property
def stigma_words(self):
return self._stigma_words
@stigma_words.setter
def stigma_words(self, collection):
self._stigma_words = frozenset(map(self.stem_word, collection))
@property
def null_words(self):
return self._null_words
@null_words.setter
def null_words(self, collection):
self._null_words = frozenset(map(self.stem_word, collection))
def __call__(self, document, sentences_count):
ratings = defaultdict(int)
if self._cue_weight > 0.0:
method = self._build_cue_method_instance()
ratings = self._update_ratings(ratings, method.rate_sentences(document))
if self._key_weight > 0.0:
method = self._build_key_method_instance()
ratings = self._update_ratings(ratings, method.rate_sentences(document))
if self._title_weight > 0.0:
method = self._build_title_method_instance()
ratings = self._update_ratings(ratings, method.rate_sentences(document))
if self._location_weight > 0.0:
method = self._build_location_method_instance()
ratings = self._update_ratings(ratings, method.rate_sentences(document))
return self._get_best_sentences(document.sentences, sentences_count, ratings)
def _update_ratings(self, ratings, new_ratings):
assert len(ratings) == 0 or len(ratings) == len(new_ratings)
for sentence, rating in new_ratings.items():
ratings[sentence] += rating
return ratings
def cue_method(self, document, sentences_count, bunus_word_value=1, stigma_word_value=1):
summarization_method = self._build_cue_method_instance()
return summarization_method(document, sentences_count, bunus_word_value,
stigma_word_value)
def _build_cue_method_instance(self):
self.__check_bonus_words()
self.__check_stigma_words()
return EdmundsonCueMethod(self._stemmer, self._bonus_words, self._stigma_words)
def key_method(self, document, sentences_count, weight=0.5):
summarization_method = self._build_key_method_instance()
return summarization_method(document, sentences_count, weight)
def _build_key_method_instance(self):
self.__check_bonus_words()
return EdmundsonKeyMethod(self._stemmer, self._bonus_words)
def title_method(self, document, sentences_count):
summarization_method = self._build_title_method_instance()
return summarization_method(document, sentences_count)
def _build_title_method_instance(self):
self.__check_null_words()
return EdmundsonTitleMethod(self._stemmer, self._null_words)
def location_method(self, document, sentences_count, w_h=1, w_p1=1, w_p2=1, w_s1=1, w_s2=1):
summarization_method = self._build_location_method_instance()
return summarization_method(document, sentences_count, w_h, w_p1, w_p2, w_s1, w_s2)
def _build_location_method_instance(self):
self.__check_null_words()
return EdmundsonLocationMethod(self._stemmer, self._null_words)
def __check_bonus_words(self):
if not self._bonus_words:
raise ValueError("Set of bonus words is empty. Please set attribute 'bonus_words' with collection of words.")
def __check_stigma_words(self):
if not self._stigma_words:
raise ValueError("Set of stigma words is empty. Please set attribute 'stigma_words' with collection of words.")
def __check_null_words(self):
if not self._null_words:
raise ValueError("Set of null words is empty. Please set attribute 'null_words' with collection of words.")
| {
"content_hash": "1782d2bb9fcfaf8a07fcf6cd28436b9a",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 123,
"avg_line_length": 37.93382352941177,
"alnum_prop": 0.6673773987206824,
"repo_name": "WangWenjun559/Weiss",
"id": "dad164acf71663c1c485ca15a6ab745c369aa6e3",
"size": "5183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "summary/sumy/summarizers/edmundson.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "385829"
},
{
"name": "C++",
"bytes": "139482"
},
{
"name": "HTML",
"bytes": "1778"
},
{
"name": "Java",
"bytes": "222411"
},
{
"name": "JavaScript",
"bytes": "1293"
},
{
"name": "Python",
"bytes": "5454801"
},
{
"name": "Shell",
"bytes": "5102"
}
],
"symlink_target": ""
} |
import os
import logging
import yaml
from schema import Use, Schema, SchemaError, Optional
class InvalidConfig(Exception):
pass
class MissingConfig(Exception):
pass
default_config = {
'debug': False
}
schema = Schema({
'tracked_repositories': [Use(str)],
'unstable_tests': [Use(str)],
'api_key': Use(str),
'max_attempts': Use(int),
'debug': bool
})
def get_config_path():
current_directory = os.getcwd()
while True:
try:
with open(
os.path.join(current_directory, 'vakautin.yaml'),
'rb'
) as fp:
return os.path.join(current_directory, 'vakautin.yaml')
except IOError:
pass
current_directory = os.path.abspath(
os.path.join(current_directory, '..')
)
if current_directory == '/':
return None
def load_config():
config = {}
current_directory = os.getcwd()
while True:
try:
with open(
os.path.join(current_directory, 'vakautin.yaml'),
'rb'
) as fp:
config = yaml.safe_load(fp)
break
except IOError:
pass
current_directory = os.path.abspath(
os.path.join(current_directory, '..')
)
if current_directory == '/':
break
if not config:
raise MissingConfig()
for k, v in default_config.items():
if k not in config:
config[k] = v
try:
return schema.validate(config)
except SchemaError as e:
raise InvalidConfig(e)
def save_config(config):
logging.getLogger(__name__).debug('save_config()')
with open(get_config_path(), "w") as fp:
yaml.dump(config, fp)
| {
"content_hash": "8319caf7fe722834be997626c3e9079c",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 71,
"avg_line_length": 21.951219512195124,
"alnum_prop": 0.5383333333333333,
"repo_name": "fastmonkeys/vakautin",
"id": "5aebb12e8ffab4d08321d16ddcba28d765d6e21c",
"size": "1800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vakautin/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7648"
}
],
"symlink_target": ""
} |
from pyramid.renderers import get_renderer
from pyramid.view import view_config
from . import get_employee_info
from . import get_report_info
from . import list_employees
def get_main_template(request):
main_template = get_renderer('templates/main.pt')
return main_template.implementation()
@view_config(route_name='home', renderer='templates/home.pt')
def home_page(request):
return {}
@view_config(route_name='employees', renderer='templates/employees.pt')
def show_employees(request):
return {'employees': list_employees()}
def fixup_report(report):
if report['status'] == 'paid':
report['status'] = 'paid, check #%s' % report.pop('memo')
elif report['status'] == 'rejected':
report['status'] = 'rejected, #%s' % report.pop('memo')
return report
@view_config(route_name='employee', renderer='templates/employee.pt')
def show_employee(request):
employee_id = request.matchdict['employee_id']
info = get_employee_info(employee_id)
info['reports'] = [fixup_report(report) for report in info['reports']]
return info
@view_config(route_name='report', renderer='templates/report.pt')
def show_report(request):
employee_id = request.matchdict['employee_id']
report_id = request.matchdict['report_id']
return {'report':
fixup_report(get_report_info(employee_id, report_id))}
def includeme(config):
config.add_request_method(callable=get_main_template,
name='main_template',
property=True,
reify=True,
)
| {
"content_hash": "648fecf64599c6cb594832e1a2831df2",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 74,
"avg_line_length": 32.34,
"alnum_prop": 0.647495361781076,
"repo_name": "GoogleCloudPlatform/google-cloud-python-expenses-demo",
"id": "71ad5239b29987b56722446bdf24f47ddf3ab237",
"size": "1617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gcloud_expenses/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2800"
},
{
"name": "Python",
"bytes": "51139"
},
{
"name": "Shell",
"bytes": "1088"
}
],
"symlink_target": ""
} |
import sys
import requests
from xml.etree import cElementTree as ElementTree # for zillow API
from .pyzillowerrors import ZillowError, ZillowFail, ZillowNoResults
from . import __version__
class ZillowWrapper(object):
"""
"""
def __init__(self, api_key=None):
"""
"""
self.api_key = api_key
def get_deep_search_results(self, address, zipcode):
"""
GetDeepSearchResults API
"""
url = 'http://www.zillow.com/webservice/GetDeepSearchResults.htm'
params = {
'address': address,
'citystatezip': zipcode,
'zws-id': self.api_key
}
return self.get_data(url, params)
def get_updated_property_details(self, zpid):
"""
GetUpdatedPropertyDetails API
"""
url = 'http://www.zillow.com/webservice/GetUpdatedPropertyDetails.htm'
params = {
'zpid': zpid,
'zws-id': self.api_key
}
return self.get_data(url, params)
def get_data(self, url, params):
"""
"""
try:
request = requests.get(
url=url,
params=params,
headers={
'User-Agent': ''.join([
'pyzillow/',
__version__,
' (Python)'
])
})
except (
requests.exceptions.ConnectionError,
requests.exceptions.TooManyRedirects,
requests.exceptions.Timeout):
raise ZillowFail
try:
request.raise_for_status()
except requests.exceptions.HTTPError:
raise ZillowFail
try:
response = ElementTree.fromstring(request.text)
except ElementTree.ParseError:
print (
"Zillow response is not a valid XML (%s)" % (
params['address']
)
)
raise ZillowFail
if response.findall('message/code')[0].text is not '0':
raise ZillowError(int(response.findall('message/code')[0].text))
else:
if not response.findall('response'):
print (
"Zillow returned no results for (%s)" % (
params['address']
)
)
raise ZillowNoResults
return response
class ZillowResults(object):
"""
"""
attribute_mapping = {}
def get_attr(self, attr):
"""
"""
try:
return self.data.find(self.attribute_mapping[attr]).text
except AttributeError:
return None
def __unicode__(self):
return self.zillow_id
if sys.version_info[0] >= 3: # Python 3
def __str__(self):
return self.__unicode__()
else: # Python 2
def __str__(self):
return self.__unicode__().encode('utf8')
@property
def area_unit(self):
"""
lotSizeSqFt
"""
return u'SqFt'
@property
def last_sold_price_currency(self):
"""
lastSoldPrice currency
"""
return self.data.find(
self.attribute_mapping['last_sold_price']).attrib["currency"]
class GetDeepSearchResults(ZillowResults):
"""
"""
attribute_mapping = {
'zillow_id': 'result/zpid',
'home_type': 'result/useCode',
'home_detail_link': 'result/links/homedetails',
'graph_data_link': 'result/links/graphsanddata',
'map_this_home_link': 'result/links/mapthishome',
'latitude': 'result/address/latitude',
'longitude': 'result/address/longitude',
'tax_year': 'result/taxAssessmentYear',
'tax_value': 'result/taxAssessment',
'year_built': 'result/yearBuilt',
'property_size': 'result/lotSizeSqFt',
'home_size': 'result/finishedSqFt',
'bathrooms': 'result/bathrooms',
'bedrooms': 'result/bedrooms',
'last_sold_date': 'result/lastSoldDate',
'last_sold_price': 'result/lastSoldPrice',
'zestimate_amount': 'result/zestimate/amount',
'zestimate_last_updated': 'result/zestimate/last-updated',
'zestimate_value_change': 'result/zestimate/valueChange',
'zestimate_valuation_range_high':
'result/zestimate/valuationRange/high',
'zestimate_valuationRange_low': 'result/zestimate/valuationRange/low',
'zestimate_percentile': 'result/zestimate/percentile',
}
def __init__(self, data, *args, **kwargs):
"""
Creates instance of GeocoderResult from the provided XML data array
"""
self.data = data.findall('response/results')[0]
for attr in self.attribute_mapping.__iter__():
try:
self.__setattr__(attr, self.get_attr(attr))
except AttributeError:
print ('AttributeError with %s' % attr)
class GetUpdatedPropertyDetails(ZillowResults):
"""
"""
attribute_mapping = {
# attributes in common with GetDeepSearchResults
'zillow_id': 'zpid',
'home_type': 'editedFacts/useCode',
'home_detail_link': 'links/homeDetails',
'graph_data_link': '',
'map_this_home_link': '',
'latitude': 'address/latitude',
'longitude': 'address/longitude',
'tax_year': '',
'tax_value': '',
'year_built': 'editedFacts/yearBuilt',
'property_size': 'editedFacts/lotSizeSqFt',
'home_size': 'editedFacts/finishedSqFt',
'bathrooms': 'editedFacts/bathrooms',
'bedrooms': 'editedFacts/bedrooms',
'last_sold_date': '',
'last_sold_price': '',
# new attributes in GetUpdatedPropertyDetails
'photo_gallery': 'links/photoGallery',
'home_info': 'links/homeInfo',
'year_updated': 'editedFacts/yearUpdated',
'floor_material': 'editedFacts/floorCovering',
'num_floors': 'editedFacts/numFloors',
'basement': 'editedFacts/basement',
'roof': 'editedFacts/roof',
'view': 'editedFacts/view',
'parking_type': 'editedFacts/parkingType',
'heating_sources': 'editedFacts/heatingSources',
'heating_system': 'editedFacts/heatingSystem',
'rooms': 'editedFacts/rooms',
'num_rooms': 'editedFacts/numRooms',
'appliances': 'editedFacts/appliances',
'neighborhood': 'neighborhood',
'school_district': 'schoolDistrict',
'home_description': 'homeDescription',
}
def __init__(self, data, *args, **kwargs):
"""
Creates instance of GeocoderResult from the provided XML data array
"""
self.data = data.findall('response')[0]
for attr in self.attribute_mapping.__iter__():
try:
self.__setattr__(attr, self.get_attr(attr))
except AttributeError:
print ('AttributeError with %s' % attr)
| {
"content_hash": "2f2d5eed3f5471733ef1eb968199d135",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 78,
"avg_line_length": 31.154867256637168,
"alnum_prop": 0.5477915068882261,
"repo_name": "kbussell/pyzillow",
"id": "599b15f50c8c494a4ec7cf4f3075197ad76a4fa7",
"size": "7041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyzillow/pyzillow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1298"
},
{
"name": "Python",
"bytes": "25286"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import numpy as np
import codecs
# Setup unicode encoding to work
import sys
reload(sys)
sys.setdefaultencoding('utf8')
sys.stdout = codecs.getwriter("utf-8")(sys.stdout)
line_seps = [u'0x000A', u'0x000B', u'0x000C', u'0x000D', u'0x001C', u'0x001D', u'0x001E', u'0x0085', u'0x2028', u'0x2029']
# Returns the number of lines of a given file
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
# Returns set of n line #s randomly selected from the file
def getRandLineNums(file, n):
totalLines = np.arange(file_len(input))
linesToPull = np.random.choice(totalLines, n, False)
return set(linesToPull)
# Grab command line args (input, output, size)
input = sys.argv[1]
output = sys.argv[2]
n = sys.argv[3] if sys.argv.length > 3 else file_len(input)
# Pull out SAMPLE_SIZE random lines from the input file and print to putput file
lineNums = getRandLineNums(input, n)
with open(output, 'w') as output:
with open(input) as input:
for i, l in enumerate(input):
if i in lineNums:
# l = l.decode('unicode-escape')
# Handles weird occasional unicode newlines that are created in parsing
new = ''.join(l.splitlines()) + '\n'
output.write(new)
| {
"content_hash": "631b504d18e7d3d1fbfc17d67129ec22",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 122,
"avg_line_length": 33.525,
"alnum_prop": 0.6562266964951529,
"repo_name": "kahliloppenheimer/Web-page-classification",
"id": "53f9afda02552ea575c8a04c6bc0541e63ec2a42",
"size": "1451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sample-lines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9924"
},
{
"name": "Shell",
"bytes": "10156"
}
],
"symlink_target": ""
} |
import warnings
from wtforms import fields, validators
from sqlalchemy import Boolean, Column
from flask_admin import form
from flask_admin.model.form import (converts, ModelConverterBase,
InlineModelConverterBase, FieldPlaceholder)
from flask_admin.model.fields import AjaxSelectField, AjaxSelectMultipleField
from flask_admin.model.helpers import prettify_name
from flask_admin._backwards import get_property
from flask_admin._compat import iteritems
from .validators import Unique
from .fields import (QuerySelectField, QuerySelectMultipleField,
InlineModelFormList, InlineHstoreList, HstoreForm)
from flask_admin.model.fields import InlineFormField
from .tools import (has_multiple_pks, filter_foreign_columns,
get_field_with_path)
from .ajax import create_ajax_loader
class AdminModelConverter(ModelConverterBase):
"""
SQLAlchemy model to form converter
"""
def __init__(self, session, view):
super(AdminModelConverter, self).__init__()
self.session = session
self.view = view
def _get_label(self, name, field_args):
"""
Label for field name. If it is not specified explicitly,
then the views prettify_name method is used to find it.
:param field_args:
Dictionary with additional field arguments
"""
if 'label' in field_args:
return field_args['label']
column_labels = get_property(self.view, 'column_labels', 'rename_columns')
if column_labels:
return column_labels.get(name)
prettify_override = getattr(self.view, 'prettify_name', None)
if prettify_override:
return prettify_override(name)
return prettify_name(name)
def _get_description(self, name, field_args):
if 'description' in field_args:
return field_args['description']
column_descriptions = getattr(self.view, 'column_descriptions', None)
if column_descriptions:
return column_descriptions.get(name)
def _get_field_override(self, name):
form_overrides = getattr(self.view, 'form_overrides', None)
if form_overrides:
return form_overrides.get(name)
return None
def _model_select_field(self, prop, multiple, remote_model, **kwargs):
loader = getattr(self.view, '_form_ajax_refs', {}).get(prop.key)
if loader:
if multiple:
return AjaxSelectMultipleField(loader, **kwargs)
else:
return AjaxSelectField(loader, **kwargs)
if 'query_factory' not in kwargs:
kwargs['query_factory'] = lambda: self.session.query(remote_model)
if multiple:
return QuerySelectMultipleField(**kwargs)
else:
return QuerySelectField(**kwargs)
def _convert_relation(self, prop, kwargs):
# Check if relation is specified
form_columns = getattr(self.view, 'form_columns', None)
if form_columns and prop.key not in form_columns:
return None
remote_model = prop.mapper.class_
column = prop.local_remote_pairs[0][0]
# If this relation points to local column that's not foreign key, assume
# that it is backref and use remote column data
if not column.foreign_keys:
column = prop.local_remote_pairs[0][1]
kwargs['label'] = self._get_label(prop.key, kwargs)
kwargs['description'] = self._get_description(prop.key, kwargs)
# determine optional/required, or respect existing
requirement_options = (validators.Optional, validators.InputRequired)
if not any(isinstance(v, requirement_options) for v in kwargs['validators']):
if column.nullable or prop.direction.name != 'MANYTOONE':
kwargs['validators'].append(validators.Optional())
else:
kwargs['validators'].append(validators.InputRequired())
# Contribute model-related parameters
if 'allow_blank' not in kwargs:
kwargs['allow_blank'] = column.nullable
# Override field type if necessary
override = self._get_field_override(prop.key)
if override:
return override(**kwargs)
if prop.direction.name == 'MANYTOONE' or not prop.uselist:
return self._model_select_field(prop, False, remote_model, **kwargs)
elif prop.direction.name == 'ONETOMANY':
return self._model_select_field(prop, True, remote_model, **kwargs)
elif prop.direction.name == 'MANYTOMANY':
return self._model_select_field(prop, True, remote_model, **kwargs)
def convert(self, model, mapper, prop, field_args, hidden_pk):
# Properly handle forced fields
if isinstance(prop, FieldPlaceholder):
return form.recreate_field(prop.field)
kwargs = {
'validators': [],
'filters': []
}
if field_args:
kwargs.update(field_args)
if kwargs['validators']:
# Create a copy of the list since we will be modifying it.
kwargs['validators'] = list(kwargs['validators'])
# Check if it is relation or property
if hasattr(prop, 'direction'):
return self._convert_relation(prop, kwargs)
elif hasattr(prop, 'columns'): # Ignore pk/fk
# Check if more than one column mapped to the property
if len(prop.columns) > 1:
columns = filter_foreign_columns(model.__table__, prop.columns)
if len(columns) > 1:
warnings.warn('Can not convert multiple-column properties (%s.%s)' % (model, prop.key))
return None
column = columns[0]
else:
# Grab column
column = prop.columns[0]
form_columns = getattr(self.view, 'form_columns', None) or ()
# Do not display foreign keys - use relations, except when explicitly instructed
if column.foreign_keys and prop.key not in form_columns:
return None
# Only display "real" columns
if not isinstance(column, Column):
return None
unique = False
if column.primary_key:
if hidden_pk:
# If requested to add hidden field, show it
return fields.HiddenField()
else:
# By default, don't show primary keys either
# If PK is not explicitly allowed, ignore it
if prop.key not in form_columns:
return None
# Current Unique Validator does not work with multicolumns-pks
if not has_multiple_pks(model):
kwargs['validators'].append(Unique(self.session,
model,
column))
unique = True
# If field is unique, validate it
if column.unique and not unique:
kwargs['validators'].append(Unique(self.session,
model,
column))
optional_types = getattr(self.view, 'form_optional_types', (Boolean,))
if (
not column.nullable
and not isinstance(column.type, optional_types)
and not column.default
and not column.server_default
):
kwargs['validators'].append(validators.InputRequired())
# Apply label and description if it isn't inline form field
if self.view.model == mapper.class_:
kwargs['label'] = self._get_label(prop.key, kwargs)
kwargs['description'] = self._get_description(prop.key, kwargs)
# Figure out default value
default = getattr(column, 'default', None)
value = None
if default is not None:
value = getattr(default, 'arg', None)
if value is not None:
if getattr(default, 'is_callable', False):
value = lambda: default.arg(None)
else:
if not getattr(default, 'is_scalar', True):
value = None
if value is not None:
kwargs['default'] = value
# Check nullable
if column.nullable:
kwargs['validators'].append(validators.Optional())
# Override field type if necessary
override = self._get_field_override(prop.key)
if override:
return override(**kwargs)
# Check choices
form_choices = getattr(self.view, 'form_choices', None)
if mapper.class_ == self.view.model and form_choices:
choices = form_choices.get(column.key)
if choices:
return form.Select2Field(
choices=choices,
allow_blank=column.nullable,
**kwargs
)
# Run converter
converter = self.get_converter(column)
if converter is None:
return None
return converter(model=model, mapper=mapper, prop=prop,
column=column, field_args=kwargs)
return None
@classmethod
def _string_common(cls, column, field_args, **extra):
if isinstance(column.type.length, int) and column.type.length:
field_args['validators'].append(validators.Length(max=column.type.length))
@converts('String') # includes VARCHAR, CHAR, and Unicode
def conv_String(self, column, field_args, **extra):
if hasattr(column.type, 'enums'):
accepted_values = list(column.type.enums)
field_args['choices'] = [(f, f) for f in column.type.enums]
if column.nullable:
field_args['allow_blank'] = column.nullable
accepted_values.append(None)
field_args['validators'].append(validators.AnyOf(accepted_values))
return form.Select2Field(**field_args)
if column.nullable:
filters = field_args.get('filters', [])
filters.append(lambda x: x or None)
field_args['filters'] = filters
self._string_common(column=column, field_args=field_args, **extra)
return fields.StringField(**field_args)
@converts('Text', 'LargeBinary', 'Binary') # includes UnicodeText
def conv_Text(self, field_args, **extra):
self._string_common(field_args=field_args, **extra)
return fields.TextAreaField(**field_args)
@converts('Boolean', 'sqlalchemy.dialects.mssql.base.BIT')
def conv_Boolean(self, field_args, **extra):
return fields.BooleanField(**field_args)
@converts('Date')
def convert_date(self, field_args, **extra):
field_args['widget'] = form.DatePickerWidget()
return fields.DateField(**field_args)
@converts('DateTime') # includes TIMESTAMP
def convert_datetime(self, field_args, **extra):
return form.DateTimeField(**field_args)
@converts('Time')
def convert_time(self, field_args, **extra):
return form.TimeField(**field_args)
@converts('Integer') # includes BigInteger and SmallInteger
def handle_integer_types(self, column, field_args, **extra):
unsigned = getattr(column.type, 'unsigned', False)
if unsigned:
field_args['validators'].append(validators.NumberRange(min=0))
return fields.IntegerField(**field_args)
@converts('Numeric') # includes DECIMAL, Float/FLOAT, REAL, and DOUBLE
def handle_decimal_types(self, column, field_args, **extra):
# override default decimal places limit, use database defaults instead
field_args.setdefault('places', None)
return fields.DecimalField(**field_args)
@converts('sqlalchemy.dialects.postgresql.base.INET')
def conv_PGInet(self, field_args, **extra):
field_args.setdefault('label', u'IP Address')
field_args['validators'].append(validators.IPAddress())
return fields.StringField(**field_args)
@converts('sqlalchemy.dialects.postgresql.base.MACADDR')
def conv_PGMacaddr(self, field_args, **extra):
field_args.setdefault('label', u'MAC Address')
field_args['validators'].append(validators.MacAddress())
return fields.StringField(**field_args)
@converts('sqlalchemy.dialects.postgresql.base.UUID')
def conv_PGUuid(self, field_args, **extra):
field_args.setdefault('label', u'UUID')
field_args['validators'].append(validators.UUID())
return fields.StringField(**field_args)
@converts('sqlalchemy.dialects.postgresql.base.ARRAY')
def conv_ARRAY(self, field_args, **extra):
return form.Select2TagsField(save_as_list=True, **field_args)
@converts('HSTORE')
def conv_HSTORE(self, field_args, **extra):
inner_form = field_args.pop('form', HstoreForm)
return InlineHstoreList(InlineFormField(inner_form), **field_args)
@converts('JSON')
def convert_JSON(self, field_args, **extra):
return form.JSONField(**field_args)
def _resolve_prop(prop):
"""
Resolve proxied property
:param prop:
Property to resolve
"""
# Try to see if it is proxied property
if hasattr(prop, '_proxied_property'):
return prop._proxied_property
return prop
# Get list of fields and generate form
def get_form(model, converter,
base_class=form.BaseForm,
only=None,
exclude=None,
field_args=None,
hidden_pk=False,
ignore_hidden=True,
extra_fields=None):
"""
Generate form from the model.
:param model:
Model to generate form from
:param converter:
Converter class to use
:param base_class:
Base form class
:param only:
Include fields
:param exclude:
Exclude fields
:param field_args:
Dictionary with additional field arguments
:param hidden_pk:
Generate hidden field with model primary key or not
:param ignore_hidden:
If set to True (default), will ignore properties that start with underscore
"""
# TODO: Support new 0.8 API
if not hasattr(model, '_sa_class_manager'):
raise TypeError('model must be a sqlalchemy mapped model')
mapper = model._sa_class_manager.mapper
field_args = field_args or {}
properties = ((p.key, p) for p in mapper.iterate_properties)
if only:
def find(name):
# If field is in extra_fields, it has higher priority
if extra_fields and name in extra_fields:
return name, FieldPlaceholder(extra_fields[name])
column, path = get_field_with_path(model, name)
if path and not hasattr(column.prop, 'direction'):
raise Exception("form column is located in another table and "
"requires inline_models: {0}".format(name))
name = column.key
if column is not None and hasattr(column, 'property'):
return name, column.property
raise ValueError('Invalid model property name %s.%s' % (model, name))
# Filter properties while maintaining property order in 'only' list
properties = (find(x) for x in only)
elif exclude:
properties = (x for x in properties if x[0] not in exclude)
field_dict = {}
for name, p in properties:
# Ignore protected properties
if ignore_hidden and name.startswith('_'):
continue
prop = _resolve_prop(p)
field = converter.convert(model, mapper, prop, field_args.get(name), hidden_pk)
if field is not None:
field_dict[name] = field
# Contribute extra fields
if not only and extra_fields:
for name, field in iteritems(extra_fields):
field_dict[name] = form.recreate_field(field)
return type(model.__name__ + 'Form', (base_class, ), field_dict)
class InlineModelConverter(InlineModelConverterBase):
"""
Inline model form helper.
"""
inline_field_list_type = InlineModelFormList
"""
Used field list type.
If you want to do some custom rendering of inline field lists,
you can create your own wtforms field and use it instead
"""
def __init__(self, session, view, model_converter):
"""
Constructor.
:param session:
SQLAlchemy session
:param view:
Flask-Admin view object
:param model_converter:
Model converter class. Will be automatically instantiated with
appropriate `InlineFormAdmin` instance.
"""
super(InlineModelConverter, self).__init__(view)
self.session = session
self.model_converter = model_converter
def get_info(self, p):
info = super(InlineModelConverter, self).get_info(p)
# Special case for model instances
if info is None:
if hasattr(p, '_sa_class_manager'):
return self.form_admin_class(p)
else:
model = getattr(p, 'model', None)
if model is None:
raise Exception('Unknown inline model admin: %s' % repr(p))
attrs = dict()
for attr in dir(p):
if not attr.startswith('_') and attr != 'model':
attrs[attr] = getattr(p, attr)
return self.form_admin_class(model, **attrs)
info = self.form_admin_class(model, **attrs)
# Resolve AJAX FKs
info._form_ajax_refs = self.process_ajax_refs(info)
return info
def process_ajax_refs(self, info):
refs = getattr(info, 'form_ajax_refs', None)
result = {}
if refs:
for name, opts in iteritems(refs):
new_name = '%s-%s' % (info.model.__name__.lower(), name)
loader = None
if isinstance(opts, dict):
loader = create_ajax_loader(info.model, self.session, new_name, name, opts)
else:
loader = opts
result[name] = loader
self.view._form_ajax_refs[new_name] = loader
return result
def contribute(self, model, form_class, inline_model):
"""
Generate form fields for inline forms and contribute them to
the `form_class`
:param converter:
ModelConverterBase instance
:param session:
SQLAlchemy session
:param model:
Model class
:param form_class:
Form to add properties to
:param inline_model:
Inline model. Can be one of:
- ``tuple``, first value is related model instance,
second is dictionary with options
- ``InlineFormAdmin`` instance
- Model class
:return:
Form class
"""
mapper = model._sa_class_manager.mapper
info = self.get_info(inline_model)
# Find property from target model to current model
# Use the base mapper to support inheritance
target_mapper = info.model._sa_class_manager.mapper.base_mapper
reverse_prop = None
for prop in target_mapper.iterate_properties:
if hasattr(prop, 'direction') and prop.direction.name in ('MANYTOONE', 'MANYTOMANY'):
if issubclass(model, prop.mapper.class_):
reverse_prop = prop
break
else:
raise Exception('Cannot find reverse relation for model %s' % info.model)
# Find forward property
forward_prop = None
if prop.direction.name == 'MANYTOONE':
candidate = 'ONETOMANY'
else:
candidate = 'MANYTOMANY'
for prop in mapper.iterate_properties:
if hasattr(prop, 'direction') and prop.direction.name == candidate:
if prop.mapper.class_ == target_mapper.class_:
forward_prop = prop
break
else:
raise Exception('Cannot find forward relation for model %s' % info.model)
# Remove reverse property from the list
ignore = [reverse_prop.key]
if info.form_excluded_columns:
exclude = ignore + list(info.form_excluded_columns)
else:
exclude = ignore
# Create converter
converter = self.model_converter(self.session, info)
# Create form
child_form = info.get_form()
if child_form is None:
child_form = get_form(info.model,
converter,
base_class=info.form_base_class or form.BaseForm,
only=info.form_columns,
exclude=exclude,
field_args=info.form_args,
hidden_pk=True,
extra_fields=info.form_extra_fields)
# Post-process form
child_form = info.postprocess_form(child_form)
kwargs = dict()
label = self.get_label(info, forward_prop.key)
if label:
kwargs['label'] = label
if self.view.form_args:
field_args = self.view.form_args.get(forward_prop.key, {})
kwargs.update(**field_args)
# Contribute field
setattr(form_class,
forward_prop.key,
self.inline_field_list_type(child_form,
self.session,
info.model,
reverse_prop.key,
info,
**kwargs))
return form_class
| {
"content_hash": "37a8b19463484fce8a3f6437e635e76a",
"timestamp": "",
"source": "github",
"line_count": 636,
"max_line_length": 107,
"avg_line_length": 35.605345911949684,
"alnum_prop": 0.5672775447118569,
"repo_name": "closeio/flask-admin",
"id": "e3434ca324c88d97908ad564376764684e0e4e93",
"size": "22645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_admin/contrib/sqla/form.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "120"
},
{
"name": "HTML",
"bytes": "94070"
},
{
"name": "JavaScript",
"bytes": "31882"
},
{
"name": "Makefile",
"bytes": "5587"
},
{
"name": "Python",
"bytes": "672565"
},
{
"name": "Shell",
"bytes": "1316"
}
],
"symlink_target": ""
} |
"""
Wyrd In: Time tracker and task manager
CC-Share Alike 2012 © The Wyrd In team
https://github.com/WyrdIn
This module collects rather universal utility functions.
"""
from contextlib import contextmanager
import os.path
from shutil import copy2, move
@contextmanager
def open_backed_up(fname, mode='r', suffix='~'):
"""A context manager for opening a file with a backup. If an exception is
raised during manipulating the file, the file is restored from the backup
before the exception is reraised.
Keyword arguments:
- fname: path towards the file to be opened
- mode: mode of opening the file (passed on to open()) (default: "r")
- suffix: the suffix to use for the backup file (default: "~")
"""
# If the file does not exist, create it.
if not os.path.exists(fname):
open(fname, 'w').close()
bak_fname = None
# If it does exist, create a backup.
else:
bak_fname = fname + suffix
copy2(fname, bak_fname)
try:
f = open(fname, mode)
yield f
except Exception as e:
if bak_fname is not None:
move(bak_fname, fname)
raise e
# Closing.
f.close()
def group_by(objects, attrs, single_attr=False):
"""Groups `objects' by the values of their attributes `attrs'.
Returns a dictionary mapping from a tuple of attribute values to a list of
objects with those attribute values.
keyword arguments:
- single_attr: specifies that there is just one attribute to use as the
key, and that the keys should be directly values of the
attribute, rather than one-tuples
"""
# Specifying one string shall be interpreted as the single attribute name,
# rather than a sequence of one-letter attribute names.
if isinstance(attrs, str):
attrs = (attrs, )
if single_attr and len(attrs) != 1:
raise ValueError("single_attr specified, but multiple attrs used " + \
"for indexing.")
if single_attr:
attr = attrs[0]
groups = dict()
for obj in objects:
if single_attr:
key = obj.__dict__[attr]
else:
key = tuple(obj.__dict__[attr] for attr in attrs)
groups.setdefault(key, []).append(obj)
return groups
def format_timedelta(timedelta):
"""Formats a timedelta object to a string by throwing off the microsecond
part from the standard timedelta string representation.
"""
whole_repr = str(timedelta) + '.'
return whole_repr[:whole_repr.find('.')]
| {
"content_hash": "593562045b8a9fbed30880c87b02785e",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 79,
"avg_line_length": 30.988095238095237,
"alnum_prop": 0.6319631194775259,
"repo_name": "RichardLitt/wyrd-django-dev",
"id": "4031c351832ffa8b95bb43646fa5f8c9fe880fe1",
"size": "2723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wyrdin/core/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "94313"
},
{
"name": "Python",
"bytes": "8430112"
},
{
"name": "Shell",
"bytes": "6521"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.contrib.auth.decorators import permission_required
from django.conf import settings
from django.template import RequestContext
from django.template.loader import render_to_string
from wagtail.wagtailadmin.forms import SearchForm
from wagtail.wagtailcore import hooks
from wagtail.wagtailcore.models import Page, PageRevision, UserPagePermissionsProxy
from wagtail.wagtaildocs.models import Document
from wagtail.wagtailimages.models import get_image_model
# Panels for the homepage
class SiteSummaryPanel(object):
name = 'site_summary'
order = 100
def __init__(self, request):
self.request = request
def render(self):
return render_to_string('wagtailadmin/home/site_summary.html', {
'total_pages': Page.objects.count() - 1, # subtract 1 because the root node is not a real page
'total_images': get_image_model().objects.count(),
'total_docs': Document.objects.count(),
'search_form': SearchForm(),
}, RequestContext(self.request))
class PagesForModerationPanel(object):
name = 'pages_for_moderation'
order = 200
def __init__(self, request):
self.request = request
user_perms = UserPagePermissionsProxy(request.user)
self.page_revisions_for_moderation = user_perms.revisions_for_moderation().select_related('page', 'user').order_by('-created_at')
def render(self):
return render_to_string('wagtailadmin/home/pages_for_moderation.html', {
'page_revisions_for_moderation': self.page_revisions_for_moderation,
}, RequestContext(self.request))
class RecentEditsPanel(object):
name = 'recent_edits'
order = 300
def __init__(self, request):
self.request = request
# Last n edited pages
self.last_edits = PageRevision.objects.raw(
"""
select wp.* FROM
wagtailcore_pagerevision wp JOIN (
SELECT max(created_at) as max_created_at, page_id FROM wagtailcore_pagerevision group by page_id
) as max_rev on max_rev.max_created_at = wp.created_at and wp.user_id = %s order by wp.created_at desc
""", [request.user.id])[:5]
def render(self):
return render_to_string('wagtailadmin/home/recent_edits.html', {
'last_edits': self.last_edits,
}, RequestContext(self.request))
@permission_required('wagtailadmin.access_admin')
def home(request):
panels = [
SiteSummaryPanel(request),
PagesForModerationPanel(request),
RecentEditsPanel(request),
]
for fn in hooks.get_hooks('construct_homepage_panels'):
fn(request, panels)
return render(request, "wagtailadmin/home.html", {
'site_name': settings.WAGTAIL_SITE_NAME,
'panels': sorted(panels, key=lambda p: p.order),
'user': request.user
})
def error_test(request):
raise Exception("This is a test of the emergency broadcast system.")
| {
"content_hash": "5e7411f963885977fab5d155bffdef0d",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 137,
"avg_line_length": 33.96629213483146,
"alnum_prop": 0.6685411842540523,
"repo_name": "benemery/wagtail",
"id": "e68ed0cbf17ae2c4596c70ec3c04e4b45eafa792",
"size": "3023",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "wagtail/wagtailadmin/views/home.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "134640"
},
{
"name": "D",
"bytes": "2012"
},
{
"name": "JavaScript",
"bytes": "52648"
},
{
"name": "Python",
"bytes": "1033196"
},
{
"name": "Ruby",
"bytes": "1275"
},
{
"name": "Shell",
"bytes": "9525"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys
import cPickle as pickle
import pandas as pd
import numpy as np
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Convolution1D, MaxPooling1D
from keras.preprocessing.text import Tokenizer
from keras.callbacks import EarlyStopping
from keras.utils import np_utils
from keras.models import Graph
from keras.regularizers import l1,l2
from data_util import load_csvs, load_other
import ml_metrics as metrics
"""
following https://gist.github.com/xccds/8f0e5b0fe4eb6193261d to do 1d-CNN sentiment detection on the mr data.
Group syntax from https://github.com/fchollet/keras/issues/233 has several issues.
Will follow Kai Xiao's verbose method
- test syntax of using multiple inputs (done on Feb 09th)
- switch from MR to ASAP2 (one set).
"""
np.random.seed(75513) # for reproducibility
print
"loading data..."
train_df = pd.read_csv('data/tpov4/train_1.csv')
test_df = pd.read_csv('data/tpov4/test_1.csv')
nb_words = 6500
maxlen = 175
embd_dim = 100
other_col_dim = 4
X_train, Y_train, X_test, Y_test, nb_classes = load_csvs('data/tpov4/train_1.csv',
'data/tpov4/test_1.csv',
nb_words, maxlen, 'self', w2v=None)
# read _other.csv
pos_train = load_other('data/tpov4/train_1_other.csv', maxlen, other_col_dim)
pos_test = load_other('data/tpov4/test_1_other.csv', maxlen, other_col_dim)
print('other tensor:', pos_train.shape)
# get #char to be a feature.
len_char_train = np.array([len(x.split()) for x in train_df.text.values.tolist()], dtype='float32')
len_char_test = np.array([len(x.split()) for x in test_df.text.values.tolist()], dtype='float32')
# normalize
len_max = np.max(len_char_train)
len_char_train /= len_max
len_char_test /= len_max
# IMPORTANT! reshape to make sure dim=2 (#sample, 1)
len_char_train = len_char_train.reshape(len_char_train.shape[0], 1)
len_char_test = len_char_test.reshape(len_char_test.shape[0], 1)
print('len_char_train shape:', len_char_train.shape)
nb_filter = 100
nb_epoch = 30
batch_size = 32
print('Build model...')
ngram_filters = [2, 5, 8]
nd_convs = ['conv_'+str(n) for n in ngram_filters]
nd_pools = ['pool_'+str(n) for n in ngram_filters]
nd_flats = ['flat_'+str(n) for n in ngram_filters]
model = Graph()
model.add_input(name='input', input_shape=(maxlen,), dtype=int)
# 2nd input from len_char
# model.add_input(name='lenchar', input_shape=(1,), dtype=float)
model.add_node(Embedding(nb_words, embd_dim, input_length=maxlen),
name='embedding', input='input')
# three word-based CNNs
for i, n_gram in enumerate(ngram_filters):
pool_length = maxlen - n_gram + 1
model.add_node(Convolution1D(nb_filter=nb_filter,
filter_length=n_gram,
border_mode="valid",
activation="relu"),
name=nd_convs[i], input='embedding')
model.add_node(MaxPooling1D(pool_length=pool_length),
name=nd_pools[i], input=nd_convs[i])
model.add_node(Flatten(), name=nd_flats[i], input=nd_pools[i])
model.add_node(Dropout(0.5), name='dropout', inputs=nd_flats, merge_mode='concat')
# other CNN
pos_f_len = 10
pos_pool_len = maxlen - pos_f_len + 1
model.add_input(name='posinput', input_shape=(maxlen, other_col_dim), dtype='float')
model.add_node(Convolution1D(nb_filter=nb_filter,
filter_length=pos_f_len,
border_mode='valid',
activation='relu',
input_shape=(maxlen, other_col_dim)),
name='poscnn', input='posinput')
model.add_node(MaxPooling1D(pool_length=pos_pool_len),
name='pospool', input='poscnn')
model.add_node(Flatten(), name='posflat', input='pospool')
model.add_node(Dropout(0.5), name='posdropout', input='posflat')
# using three CNNs to predict with L1
model.add_node(Dense(nb_classes, activation='softmax'), name='softmax',
inputs=['dropout', 'posdropout'],
merge_mode='concat')
model.add_output(name='output', input='softmax')
model.compile('rmsprop', loss={'output': 'categorical_crossentropy'})
# model.compile('rmsprop', loss={'output': 'mean_squared_error'})
# early stopping
earlystop = EarlyStopping(monitor='val_loss', patience=1, verbose=1)
model.fit({'input': X_train, 'posinput': pos_train, 'output': Y_train},
nb_epoch=nb_epoch, batch_size=batch_size,
validation_split=0.1, callbacks=[earlystop])
# Graph doesn't have several arg/func existing in Sequential()
# - fit no show-accuracy
# - no predict_classes
classes = model.predict({'input': X_test, 'posinput': pos_test},
batch_size=batch_size)['output'].argmax(axis=1)
acc = np_utils.accuracy(classes, np_utils.categorical_probas_to_classes(Y_test)) # accuracy only supports classes
print('Test accuracy:', acc)
kappa = metrics.quadratic_weighted_kappa(classes, np_utils.categorical_probas_to_classes(Y_test))
print('Test Kappa:', kappa)
| {
"content_hash": "7bbb55fb4d8657b374b197d59ccca036",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 114,
"avg_line_length": 37.75714285714286,
"alnum_prop": 0.6632614453272796,
"repo_name": "leocnj/dl_response_rater",
"id": "2cc7f7db73805a0b401a9aaf4c8edf5148e39a92",
"size": "5286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "archv/cnn1d_multi_tpo.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "115561"
}
],
"symlink_target": ""
} |
import dbhelper
Database = dbhelper.Database
| {
"content_hash": "63332307efab71749f9aef7f8981515a",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 28,
"avg_line_length": 15.333333333333334,
"alnum_prop": 0.8260869565217391,
"repo_name": "wangjun/BT-Share",
"id": "1d280de54a135412b31535ad9d0327b1810ff082",
"size": "87",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "web/database/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2500"
},
{
"name": "CoffeeScript",
"bytes": "1262"
},
{
"name": "HTML",
"bytes": "14733"
},
{
"name": "JavaScript",
"bytes": "640500"
},
{
"name": "Python",
"bytes": "74415"
},
{
"name": "Shell",
"bytes": "242"
}
],
"symlink_target": ""
} |
import shutil
from nose.tools import *
from holland.lib.lvm import LogicalVolume
from holland.lib.lvm.snapshot import *
from tests.constants import *
class TestSnapshot(object):
def setup(self):
self.tmpdir = tempfile.mkdtemp()
def teardown(self):
shutil.rmtree(self.tmpdir)
def test_snapshot_fsm(self):
lv = LogicalVolume.lookup('%s/%s' % (TEST_VG, TEST_LV))
name = lv.lv_name + '_snapshot'
size = 1 # extent
snapshot = Snapshot(name, size, self.tmpdir)
snapshot.start(lv)
def test_snapshot_fsm_with_callbacks(self):
lv = LogicalVolume.lookup('%s/%s' % (TEST_VG, TEST_LV))
name = lv.lv_name + '_snapshot'
size = 1 # extent
snapshot = Snapshot(name, size, self.tmpdir)
def handle_event(event, *args, **kwargs):
pass
snapshot.register('pre-mount', handle_event)
snapshot.register('post-mount', handle_event)
snapshot.start(lv)
def test_snapshot_fsm_with_failures(self):
lv = LogicalVolume.lookup('%s/%s' % (TEST_VG, TEST_LV))
name = lv.lv_name + '_snapshot'
size = 1 # extent
snapshot = Snapshot(name, size, self.tmpdir)
def bad_callback(event, *args, **kwargs):
raise Exception("Oooh nooo!")
for evt in ('initialize', 'pre-snapshot', 'post-snapshot',
'pre-mount', 'post-mount', 'pre-unmount', 'post-unmount',
'pre-remove', 'post-remove', 'finish'):
snapshot.register(evt, bad_callback)
assert_raises(CallbackFailuresError, snapshot.start, lv)
snapshot.unregister(evt, bad_callback)
if snapshot.sigmgr._handlers:
raise Exception("WTF. sigmgr handlers still exist when checking event => %r", evt)
| {
"content_hash": "67366718d0cab5624b39f33c2fcd1431",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 98,
"avg_line_length": 35.07692307692308,
"alnum_prop": 0.600328947368421,
"repo_name": "m00dawg/holland",
"id": "d5846b72013188d68b2e5949bd8de4747ea8754d",
"size": "1824",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plugins/holland.lib.lvm/tests/ext3/test_snapshot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "7884"
},
{
"name": "Python",
"bytes": "1720427"
},
{
"name": "Roff",
"bytes": "3761"
},
{
"name": "Shell",
"bytes": "5001"
}
],
"symlink_target": ""
} |
import BoostBuild
t = BoostBuild.Tester()
t.write("Jamroot.jam", """\
import type : type ;
ECHO [ type source.c ] ;
ECHO [ type source.cc ] ;
ECHO [ type source.cxx ] ;
ECHO [ type source.cpp ] ;
ECHO [ type source.o ] ;
ECHO [ type source.obj ] ;
ECHO [ type boost_system.lib ] ;
ECHO [ type boost_system.so ] ;
ECHO [ type boost_system.dll ] ;
EXIT [ type boost_system.so.1.66.0 ] : 0 ;
""")
t.run_build_system(stdout="""\
C
CPP
CPP
CPP
OBJ
OBJ
STATIC_LIB
SHARED_LIB
SHARED_LIB
SHARED_LIB
""")
t.cleanup()
| {
"content_hash": "2093d1a7bbb9d5ac569dd79a8ddaa6a6",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 42,
"avg_line_length": 16,
"alnum_prop": 0.6484375,
"repo_name": "davehorton/drachtio-server",
"id": "9924e336a5d009031178cbb1418c94fbac08e529",
"size": "829",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "deps/boost_1_77_0/tools/build/test/file_types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "662596"
},
{
"name": "Dockerfile",
"bytes": "1330"
},
{
"name": "JavaScript",
"bytes": "60639"
},
{
"name": "M4",
"bytes": "35273"
},
{
"name": "Makefile",
"bytes": "5960"
},
{
"name": "Shell",
"bytes": "47298"
}
],
"symlink_target": ""
} |
"""convert Comma-Separated Value (.csv) files to a TermBase eXchange (.tbx) glossary file"""
from translate.misc import sparse
from translate.storage import tbx
from translate.storage import csvl10n
class csv2tbx:
"""a class that takes translations from a .csv file and puts them in a .tbx file"""
def __init__(self, charset=None):
"""construct the converter..."""
self.charset = charset
def convertfile(self, thecsvfile):
"""converts a csvfile to a tbxfile, and returns it. uses templatepo if given at construction"""
mightbeheader = True
self.tbxfile = tbx.tbxfile()
for thecsv in thecsvfile.units:
if mightbeheader:
# ignore typical header strings...
mightbeheader = False
if [item.strip().lower() for item in thecsv.comment, thecsv.source, thecsv.target] == \
["comment", "original", "translation"]:
continue
if len(thecsv.comment.strip()) == 0 and thecsv.source.find("Content-Type:") != -1:
continue
term = tbx.tbxunit.buildfromunit(thecsv)
# TODO: we might want to get the location or other information from CSV
self.tbxfile.addunit(term)
return self.tbxfile
def convertcsv(inputfile, outputfile, templatefile, charset=None, columnorder=None):
"""reads in inputfile using csvl10n, converts using csv2tbx, writes to outputfile"""
inputstore = csvl10n.csvfile(inputfile, fieldnames=columnorder)
convertor = csv2tbx(charset=charset)
outputstore = convertor.convertfile(inputstore)
if len(outputstore.units) == 0:
return 0
outputfile.write(str(outputstore))
return 1
def main():
from translate.convert import convert
formats = {("csv", "tbx"): ("tbx", convertcsv), ("csv", None): ("tbx", convertcsv)}
parser = convert.ConvertOptionParser(formats, usetemplates=False, description=__doc__)
parser.add_option("", "--charset", dest="charset", default=None,
help="set charset to decode from csv files", metavar="CHARSET")
parser.add_option("", "--columnorder", dest="columnorder", default=None,
help="specify the order and position of columns (comment,source,target)")
parser.passthrough.append("charset")
parser.passthrough.append("columnorder")
parser.run()
if __name__ == '__main__':
main()
| {
"content_hash": "da2ef56e6b74b36a71e7e41e728dd751",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 103,
"avg_line_length": 43.357142857142854,
"alnum_prop": 0.6482701812191104,
"repo_name": "dbbhattacharya/kitsune",
"id": "a1333de0a2e9f6bdc6b77cc11d535a84730932a0",
"size": "3256",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "vendor/packages/translate-toolkit/translate/convert/csv2tbx.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2694"
},
{
"name": "CSS",
"bytes": "276585"
},
{
"name": "HTML",
"bytes": "600145"
},
{
"name": "JavaScript",
"bytes": "800276"
},
{
"name": "Python",
"bytes": "2762831"
},
{
"name": "Shell",
"bytes": "6720"
},
{
"name": "Smarty",
"bytes": "1752"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.