text stringlengths 4 1.02M | meta dict |
|---|---|
"""'bad' is our exceptions module, here are our package specific exceptions defined."""
class City(Exception):
"""Cannot fetch weather forcast for city."""
class PluginExistsNot(Exception):
"""Plugin does not exist."""
pass
| {
"content_hash": "1639145af3358fc006204390a9810010",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 87,
"avg_line_length": 29.75,
"alnum_prop": 0.7016806722689075,
"repo_name": "BCCN-Prog/webscraping",
"id": "922cc927e6f1a88c65c902bc94c6b620c53602de",
"size": "238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ws/bad.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "85085"
}
],
"symlink_target": ""
} |
import tensorflow as tf
import numpy as np
class BB_CNN:
"""
Convolutional neural network to predict a bounding box around an object.
Network architecture
(Conv - ReLu - Max Pool) * M - (Fc - ReLu - Dropout) * N - Fc
Output
out[, 0]: score for probability, i.e. probability = sigmoid(score),
out[, 1]: rel. x coord of bounding box
out[, 2]: rel. y coord of bounding box
out[, 3]: logarithm of rel. width of bounding box
out[, 4]: logarithm of rel. height of bounding box
"""
def __init__(self, kernel_size = [3], kernel_stride = [1], num_filters = [4],
pool_size = [2], pool_stride = [2], hidden_dim = [100], dropout = 0.5,
weight_decay = 1e-4, weight_decay_bb = 0., weight_scale = 0.001,
loss_bb_weight = 1., file_name = None):
"""
Initialize the bounding boxes CNN by storing its characteristics
:param kernel_size: list of kernel sizes; all kernels are quadratic
:param kernel_stride: list of strides of convolutional layers
:param num_filters: list of number of filters of convolutional layers
:param pool_size: list of pool sizes; all pool layers use quadratic kernels; if you do not want a pool after a convolutional layer, set pool_size = 1
:param pool_stride: list of pool strides; if you do not want a pool after a convolutional layer, set pool_stride = 1
:param hidden_dim: list of number of hidden units of fully connected layers
:param dropout: dropout probability; set to 0 to not use dropout
:param weight_scale: max value of positiv truncated normal distribution to draw weights from
:param loss_bb_weight: weight for bounding boxes L2 norm loss in total loss (bb loss + cross entropy loss of classification)
:param file_name: file name of numpy file where weights are stored in
"""
self.kernel_size = kernel_size
self.kernel_stride = kernel_stride
self.num_filters = num_filters
self.pool_size = pool_size
self.pool_stride = pool_stride
self.hidden_dim = hidden_dim
self.dropout = dropout
self.weight_decay = weight_decay
self.weight_decay_bb = weight_decay_bb
self.weight_scale = weight_scale
self.loss_bb_weight = loss_bb_weight
self.var_dict = {}
# Load coefficients if file name is provided
if file_name is not None:
self.data_dict = np.load(file_name, encoding='latin1').item()
else:
self.data_dict = None
def build(self, x, train_mode=None):
"""
Method to build the computational graph
:param x: batch of images of size [batch_size, height, width, in_channels]
:param train_mode: True for training, False or None otherwise. Dropout is only done during training
"""
_, height, width, in_channels = x.get_shape().as_list()
self.out = x
# Convolutional layers
num_filters = self.num_filters
num_filters.insert(0, in_channels)
pool_count = 1
conv_count = 1
for i in range(len(self.kernel_size)):
self.out = self.conv_layer(self.out, self.kernel_size[i], self.kernel_stride[i],
num_filters[i], num_filters[i + 1], 'conv' + str(pool_count) + '_' + str(conv_count))
conv_count += 1
height = np.ceil(1.0 * height / self.kernel_stride[i]).astype('int')
width = np.ceil(1.0 * width / self.kernel_stride[i]).astype('int')
if (self.pool_size[i] > 1) & (self.pool_stride[i] > 1):
self.out = self.max_pool(self.out, self.pool_size[i], self.pool_stride[i], 'pool' + str(pool_count))
height = np.ceil(1.0 * height / self.pool_stride[i]).astype('int')
width = np.ceil(1.0 * width / self.pool_stride[i]).astype('int')
pool_count += 1
conv_count = 1
# Fully connected layers
hidden_dim = self.hidden_dim
hidden_dim.insert(0, num_filters[-1] * height * width)
for i in range(len(hidden_dim) - 1):
self.out = self.fc_layer(self.out, hidden_dim[i], hidden_dim[i + 1], 'fc' + str(i + 1))
self.out = tf.nn.relu(self.out)
if train_mode is not None:
self.out = tf.cond(train_mode, lambda: tf.nn.dropout(self.out, self.dropout), lambda: self.out)
# Output layer
self.out = self.fc_layer(self.out, hidden_dim[-1], 5, 'out')
def predict(self):
"""
Method to get the prediction for the bounding box
Can only be used after network was build!
"""
score_prob, self.score_bb = tf.split(self.out, [1, 4], 1)
score_pos, score_size = tf.split(self.score_bb, [2, 2], 1)
self.score_prob = tf.reshape(score_prob, [-1])
self.pred_prob = tf.sigmoid(self.score_prob)
pos = tf.map_fn(lambda x: tf.minimum(tf.nn.relu(x), tf.constant(1.)), score_pos)
size = tf.minimum(tf.nn.relu(score_size), 1. - pos)
self.pred_bb = tf.concat([pos, size], 1)
def loss(self, target_prob, target_bb):
"""
Method to get the loss
Can only be used after network was build and prediction done!
:param target_prob: indicator whether object is there or not
:param target_bb: ground truth coordinates of the bounding box
"""
# BB loss
weight_sum = tf.reduce_sum(target_prob)
abs_diff = tf.abs(self.score_bb - target_bb)
abs_diff_lt_1 = tf.less(abs_diff, 1)
loss_bb = tf.cond(tf.greater(weight_sum, 0), lambda: tf.reduce_sum(target_prob * tf.reduce_sum(tf.where(abs_diff_lt_1, 0.5 * tf.square(abs_diff), abs_diff - 0.5), 1)) / weight_sum, lambda: 0.)
# Classification loss
loss_prob = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=target_prob, logits=self.score_prob))
# Regularization loss
regularization_variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
regularizer = tf.contrib.layers.l2_regularizer(self.weight_decay)
loss_reg = tf.contrib.layers.apply_regularization(regularizer, regularization_variables)
weight_out = self.var_dict[('out', 0)]
bias_out = self.var_dict[('out', 1)]
loss_reg_bb = self.weight_decay_bb * (tf.nn.l2_loss(tf.slice(weight_out, [0, 1], [-1, -1])) + tf.nn.l2_loss(tf.slice(bias_out, [1], [-1])))
self.loss = self.loss_bb_weight * loss_bb + loss_prob + loss_reg + loss_reg_bb
def max_pool(self, x, size, stride, name):
"""
Pass x through a max pooling layer
"""
return tf.nn.max_pool(x, ksize=[1, size, size, 1], strides=[1, stride, stride, 1], padding='SAME', name=name)
def conv_layer(self, x, size, stride, in_channels, out_channels, name):
"""
Pass x through a convolutional layer followed by a relu
"""
with tf.variable_scope(name):
filters, biases = self.get_conv_var(size, in_channels, out_channels, name)
out_conv = tf.nn.conv2d(x, filters, [1, stride, stride, 1], padding='SAME')
out_bias = tf.nn.bias_add(out_conv, biases)
out_relu = tf.nn.relu(out_bias)
return out_relu
def fc_layer(self, x, in_size, out_size, name):
"""
Pass x through a fully connected layer
"""
with tf.variable_scope(name):
weights, biases = self.get_fc_var(in_size, out_size, name)
reshaped_x = tf.reshape(x, [-1, in_size])
out_fc = tf.nn.bias_add(tf.matmul(reshaped_x, weights), biases)
return out_fc
def get_conv_var(self, filter_size, in_channels, out_channels, name):
"""
Create parameters of convolutional layer as tf.Variable
"""
initial_value = tf.random_normal([filter_size, filter_size, in_channels, out_channels], 0.0, self.weight_scale)
filters = self.get_var(initial_value, name, 0, name + "_filters")
if name != 'out':
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, filters)
initial_value = tf.random_normal([out_channels], 0.0, self.weight_scale)
biases = self.get_var(initial_value, name, 1, name + "_biases")
return filters, biases
def get_fc_var(self, in_size, out_size, name):
"""
Create parameters of fully connected layer as tf.Variable
"""
initial_value = tf.random_normal([in_size, out_size], 0.0, self.weight_scale)
weights = self.get_var(initial_value, name, 0, name + "_weights")
if name != 'out':
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, weights)
initial_value = tf.random_normal([out_size], 0.0, self.weight_scale)
biases = self.get_var(initial_value, name, 1, name + "_biases")
return weights, biases
def get_var(self, initial_value, name, idx, var_name):
if self.data_dict is not None and name in self.data_dict:
value = self.data_dict[name][idx]
else:
value = initial_value
var = tf.Variable(value, name=var_name)
self.var_dict[(name, idx)] = var
return var
def save(self, sess, file_name='./bb_cnn.npy'):
"""
Save variables to file
"""
data_dict = {}
for (name, idx), var in list(self.var_dict.items()):
var_out = sess.run(var)
if name not in data_dict:
data_dict[name] = {}
data_dict[name][idx] = var_out
np.save(file_name, data_dict)
return file_name
| {
"content_hash": "bcc43021191ec9027580e487db1fd5af",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 194,
"avg_line_length": 38.04867256637168,
"alnum_prop": 0.6742644493545761,
"repo_name": "TUM-AERIUS/Aerius",
"id": "54ad42878ee74455646b018e6fde5bb746825bf5",
"size": "8599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BoundingBoxesNN/BB_CNN.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "41972"
},
{
"name": "Java",
"bytes": "8298"
},
{
"name": "Python",
"bytes": "143954"
},
{
"name": "Shell",
"bytes": "2630"
}
],
"symlink_target": ""
} |
import logging
import urllib
from pprint import pprint as pp
from flask import url_for
from flask_login import current_user
from flask_mail import Message
from scout.server.extensions import mail as ex_mail
from scout.server.links import external_primer_order_link
from .controllers import variant as variant_controller
LOG = logging.getLogger(__name__)
class MissingVerificationRecipientError(Exception):
pass
def variant_verification(
store,
institute_id,
case_name,
variant_id,
sender,
variant_url,
order,
comment,
url_builder=None,
mail=None,
user_obj=None,
):
"""Sand a verification email and register the verification in the database
Args:
store(scout.adapter.MongoAdapter)
institute_obj(dict): an institute object
case_obj(dict): a case object
user_obj(dict): a user object
variant_obj(dict): a variant object (snv or sv)
sender(str): current_app.config['MAIL_USERNAME']
variant_url(str): the complete url to the variant (snv or sv), a link that works from outside scout domain.
order(str): False == cancel order, True==order verification
comment(str): sender's entered comment from form
url_builder(flask.url_for): for testing purposes, otherwise test verification email fails because out of context
"""
url_builder = url_builder or url_for
mail = mail or ex_mail
user_obj = user_obj or store.user(current_user.email)
data = variant_controller(
store,
institute_id,
case_name,
variant_id=variant_id,
add_case=True,
add_other=False,
get_overlapping=False,
)
variant_obj = data["variant"]
case_obj = data["case"]
institute_obj = data["institute"]
pp(variant_obj)
recipients = institute_obj["sanger_recipients"]
if len(recipients) == 0:
raise MissingVerificationRecipientError()
view_type = None
email_subject = None
category = variant_obj.get("category", "snv")
display_name = variant_obj.get("display_name")
chromosome = variant_obj["chromosome"]
position = variant_obj["position"]
end_chrom = variant_obj.get("end_chrom", chromosome)
chr_position = (
":".join([chromosome, str(variant_obj["position"])]) if category in ["snv"] else "-"
)
breakpoint_1 = (
":".join([chromosome, str(variant_obj["position"])])
if category in ["sv", "cancer_sv"]
else "-"
)
breakpoint_2 = (
":".join([end_chrom, str(variant_obj.get("end"))])
if category in ["sv", "cancer_sv"]
else "-"
)
variant_size = variant_obj.get("length")
panels = ", ".join(variant_obj.get("panels", []))
gene_identifiers = [
str(ident) for ident in variant_obj.get("hgnc_symbols", variant_obj.get("hgnc_ids", []))
]
hgnc_symbol = ", ".join(gene_identifiers)
email_subj_gene_symbol = None
if len(gene_identifiers) > 3:
email_subj_gene_symbol = " ".join([str(len(gene_identifiers)) + "genes"])
else:
email_subj_gene_symbol = hgnc_symbol
gtcalls = [
"<li>{}: {}</li>".format(sample_obj["display_name"], sample_obj["genotype_call"])
for sample_obj in variant_obj["samples"]
]
tx_changes = []
external_primer_link = ""
if category == "snv": # SNV
view_type = "variant.variant"
tx_changes = []
external_primer_link = external_primer_order_link(variant_obj, case_obj["genome_build"])
for gene_obj in variant_obj.get("genes", []):
for tx_obj in gene_obj["transcripts"]:
# select refseq transcripts as "primary"
if not tx_obj.get("refseq_id"):
continue
for refseq_id in tx_obj.get("refseq_identifiers"):
transcript_line = []
transcript_line.append(gene_obj.get("hgnc_symbol", gene_obj["hgnc_id"]))
transcript_line.append("-".join([refseq_id, tx_obj["transcript_id"]]))
if "exon" in tx_obj:
transcript_line.append("".join(["exon", tx_obj["exon"]]))
elif "intron" in tx_obj:
transcript_line.append("".join(["intron", tx_obj["intron"]]))
else:
transcript_line.append("intergenic")
if "coding_sequence_name" in tx_obj:
transcript_line.append(urllib.parse.unquote(tx_obj["coding_sequence_name"]))
else:
transcript_line.append("")
if "protein_sequence_name" in tx_obj:
transcript_line.append(
urllib.parse.unquote(tx_obj["protein_sequence_name"])
)
else:
transcript_line.append("")
if "strand" in tx_obj:
transcript_line.append(tx_obj["strand"])
else:
transcript_line.append("")
if refseq_id in gene_obj["common"]["primary_transcripts"]:
transcript_line.append("<b>primary</b>")
else:
transcript_line.append("")
tx_changes.append("<li>{}</li>".format(":".join(transcript_line)))
else: # SV
view_type = "variant.sv_variant"
display_name = "_".join([breakpoint_1, variant_obj.get("sub_category").upper()])
# body of the email
html = verification_email_body(
case_name=case_obj["display_name"],
url=variant_url, # this is the complete url to the variant, accessible when clicking on the email link
display_name=display_name,
category=category.upper(),
subcategory=variant_obj.get("sub_category").upper(),
breakpoint_1=breakpoint_1,
breakpoint_2=breakpoint_2,
chr_position=chr_position,
hgnc_symbol=hgnc_symbol,
panels=panels,
gtcalls="".join(gtcalls),
tx_changes="".join(tx_changes) or "Not available",
name=user_obj["name"].encode("utf-8"),
comment=comment,
external_primer_link=external_primer_link,
)
# build a local the link to the variant to be included in the events objects (variant and case) created in the event collection.
local_link = url_builder(
view_type,
institute_id=institute_obj["_id"],
case_name=case_obj["display_name"],
variant_id=variant_obj["_id"],
)
if order == "True": # variant verification should be ordered
# pin variant if it's not already pinned
if case_obj.get("suspects") is None or variant_obj["_id"] not in case_obj["suspects"]:
store.pin_variant(institute_obj, case_obj, user_obj, local_link, variant_obj)
email_subject = "SCOUT: validation of {} variant {}, ({})".format(
category.upper(), display_name, email_subj_gene_symbol
)
store.order_verification(
institute=institute_obj,
case=case_obj,
user=user_obj,
link=local_link,
variant=variant_obj,
)
else: # variant verification should be cancelled
email_subject = "SCOUT: validation of {} variant {}, ({}), was CANCELLED!".format(
category.upper(), display_name, email_subj_gene_symbol
)
store.cancel_verification(
institute=institute_obj,
case=case_obj,
user=user_obj,
link=local_link,
variant=variant_obj,
)
kwargs = dict(
subject=email_subject,
html=html,
sender=sender,
recipients=recipients,
# cc the sender of the email for confirmation
cc=[user_obj["email"]],
)
message = Message(**kwargs)
# send email using flask_mail
mail.send(message)
def verification_email_body(
case_name,
url,
display_name,
category,
subcategory,
chr_position,
breakpoint_1,
breakpoint_2,
hgnc_symbol,
panels,
gtcalls,
tx_changes,
name,
comment,
external_primer_link,
):
"""
Builds the html code for the variant verification emails (order verification and cancel verification)
Args:
case_name(str): case display name
url(str): the complete url to the variant, accessible when clicking on the email link
display_name(str): a display name for the variant
category(str): category of the variant
subcategory(str): sub-category of the variant
chr_position(str): chromosomal position for SNVs (format is 'chr:start')
breakpoint_1(str): breakpoint 1 (format is 'chr:start')
breakpoint_2(str): breakpoint 2 (format is 'chr:stop')
hgnc_symbol(str): a gene or a list of genes separated by comma
panels(str): a gene panel of a list of panels separated by comma
gtcalls(str): genotyping calls of any sample in the family
tx_changes(str): amino acid changes caused by the variant, only for snvs otherwise 'Not available'
name(str): user_obj['name'], uft-8 encoded
comment(str): sender's comment from form
external_primer_link(str): optional URL to an external primer ordering page
Returns:
html(str): the html body of the variant verification email
"""
external_primer_link_html = ""
if external_primer_link:
external_primer_link_html = f'<li><a href="{external_primer_link}">Order primers</a>'
html = """
<ul>
<li>
<strong>Case {case_name}</strong>: <a href="{url}">{display_name}</a>
</li>
<li><strong>Variant type</strong>: {category} ({subcategory})
<li><strong>Chromosomal position</strong>: {chr_position}</li>
<li><strong>Breakpoint 1</strong>: {breakpoint_1}</li>
<li><strong>Breakpoint 2</strong>: {breakpoint_2}</li>
<li><strong>HGNC symbols</strong>: {hgnc_symbol}</li>
<li><strong>Gene panels</strong>: {panels}</li>
<li><strong>GT call</strong></li>
{gtcalls}
<li><strong>Amino acid changes</strong></li>
{tx_changes}
{external_primer_link_html}
<li><strong>Comment</strong>: {comment}</li>
<li><strong>Ordered by</strong>: {name}</li>
</ul>
""".format(
case_name=case_name,
url=url,
display_name=display_name,
category=category,
subcategory=subcategory,
chr_position=chr_position,
breakpoint_1=breakpoint_1,
breakpoint_2=breakpoint_2,
hgnc_symbol=hgnc_symbol,
panels=panels,
gtcalls=gtcalls,
tx_changes=tx_changes,
name=name,
comment=comment,
external_primer_link_html=external_primer_link_html,
)
return html
| {
"content_hash": "7ff0e84db660dff9084ad07837e973b7",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 132,
"avg_line_length": 35.71986970684039,
"alnum_prop": 0.586996169979938,
"repo_name": "Clinical-Genomics/scout",
"id": "1596e56ba4c8595525ad4ae3cc344c83a401e28b",
"size": "10966",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scout/server/blueprints/variant/verification_controllers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "12516"
},
{
"name": "Dockerfile",
"bytes": "1451"
},
{
"name": "HTML",
"bytes": "911931"
},
{
"name": "JavaScript",
"bytes": "32692"
},
{
"name": "Makefile",
"bytes": "1046"
},
{
"name": "Python",
"bytes": "2419990"
}
],
"symlink_target": ""
} |
import datetime
import logging
from kombu import BrokerConnection
from kombu.common import maybe_declare
from kombu.pools import producers
import base
from entropy.queues import entropy_exchange
import libvirt
LOG = logging.getLogger(__name__)
class Audit(base.AuditBase):
def get_vm_count(self, **kwargs):
# http://libvirt.org/guide/html/
# Application_Development_Guide-Architecture-Remote_URIs.html
#only one hv for this audit script
uri = '%(driver)s+%(transport)s://%(username)s@%(compute_hosts)s\
:%(port)s/%(path)s' % kwargs
try:
conn = libvirt.openReadOnly(uri)
return {kwargs['compute_hosts']:
len([domain for domain in conn.listAllDomains(0x3fff)])}
except libvirt.libvirtError as err:
LOG.error('Failed to open connection to the hypervisor: %s', err)
return {kwargs['compute_hosts']: -1}
def send_message(self, **kwargs):
connection = BrokerConnection('amqp://%(mq_user)s:%(mq_password)s@'
'%(mq_host)s:%(mq_port)s//'
% kwargs['mq_args'])
message = {'From': __name__,
'Date': str(datetime.datetime.now())}
with producers[connection].acquire(block=True) as producer:
maybe_declare(entropy_exchange, producer.channel)
msg_args = {'vm_count': self.get_vm_count(**kwargs)}
message['payload'] = msg_args
producer.publish(message,
exchange=entropy_exchange,
routing_key='vmcount',
serializer='json')
| {
"content_hash": "32a8e34f8a0c15bcaa8ada827e5bf8b1",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 77,
"avg_line_length": 37.130434782608695,
"alnum_prop": 0.569672131147541,
"repo_name": "ddutta/entropy",
"id": "9281f2f617eb9d50756d270349dd70a7f4d30505",
"size": "2305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "entropy/audit/vm_count.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6766"
},
{
"name": "Python",
"bytes": "36317"
},
{
"name": "Shell",
"bytes": "6703"
}
],
"symlink_target": ""
} |
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from Evaluator import Evaluator
from Constraint import Constraint
from VyPy.data import IndexableDict
from VyPy.data.input_output import flatten_list
from VyPy.tools.arrays import atleast_2d, atleast_2d_row
# ----------------------------------------------------------------------
# Inequality Function
# ----------------------------------------------------------------------
class Inequality(Constraint):
Container = None
def __init__( self, evaluator=None,
tag='cieq', sense='<', edge=0.0,
scale=1.0,
variables=None):
Constraint.__init__(self,evaluator,
tag,sense,edge,
scale,variables)
def function(self,x):
snz = self.sense
edg = self.edge
scl = self.scale
result = Constraint.function(self,x)
if snz == '>':
result = edg/scl - result
elif snz == '<':
result = result - edg/scl
else:
raise Exception, 'unrecognized sense %s' % snz
return result
def gradient(self,x):
snz = self.sense
result = Constraint.gradient(self,x)
if snz == '>':
result = -1 * result
elif snz == '<':
result = +1 * result
else:
raise Exception, 'unrecognized sense %s' % snz
return result
def hessian(self,x):
raise NotImplementedError
def __repr__(self):
return "<Inequality '%s'>" % self.tag
# ----------------------------------------------------------------------
# Inequality Container
# ----------------------------------------------------------------------
class Inequalities(IndexableDict):
def __init__(self,variables):
self.variables = variables
def __set__(self,problem,arg_list):
self.clear()
self.extend(arg_list)
def append(self, evaluator,
tag=None, sense='=', edge=0.0,
scale=1.0 ):
if tag is None and isinstance(evaluator,Inequality):
inequality = evaluator
inequality.variables = self.variables
else:
args = flatten_list(args) + [self.variables]
inequality = Inequality(evaluator,tag,sense,edge,scale,self.variables)
inequality.__check__()
tag = inequality.tag
self[tag] = inequality
def extend(self,arg_list):
for args in arg_list:
self.append(*args)
def tags(self):
return self.keys()
def senses(self):
return [ con.sense for con in self.values() ]
def edges(self):
return [ con.edge for con in self.values() ]
def scales(self):
return [ con.scale for con in self.values() ]
def evaluators(self):
return [ con.evaluator for con in self.values() ]
def edges_array(self):
return np.vstack([ atleast_2d(x,'col') for x in self.edges() ])
def set(senses=None,edges=None,scales=None):
if senses:
for i,s in enumerate(senses):
self[i].sense = s
if edges:
for i,e in enumerate(edges):
self[i].edge = e
if scales:
for i,s in enumerate(scales):
self[i].scale = s
# ----------------------------------------------------------------------
# Inequality Container
# ----------------------------------------------------------------------
Inequality.Container = Inequalities
| {
"content_hash": "4ecde22f210c443fa08c392c0c2d3db1",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 82,
"avg_line_length": 31.248062015503876,
"alnum_prop": 0.42222773505333666,
"repo_name": "aerialhedgehog/VyPy",
"id": "da309d48c48d8ba371803cf5a00c8bf25d18246b",
"size": "4031",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "trunk/VyPy/optimize/Inequality.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "737107"
}
],
"symlink_target": ""
} |
from Player import Player
from Gamemaster import Gamemaster
import random
def evolution(iterations=None, players=[]):
# Must have 2 or more players to play round robin!
if len(players) >= 2:
if iterations is None:
iterations = random.randint(100, 150)
gm = Gamemaster(iterations=iterations)
for player in players:
gm.add_player(*player)
gm.generate_matches()
gm.start_tournament()
# d = gm.get_overall_points()
# for v in sorted(d, key=d.get, reverse=False):
# print v, d[v]
output = {
'points': gm.get_overall_points(),
'winner': gm.get_winner(),
'results': gm.get_match_results()
}
else:
output = {
'points': {},
'winner': None,
'results': []
}
return output
if __name__ == '__main__':
players = [
[1, 'def decide(context): return context.iterationstest'],
[2, 'def decide(context): return "C"']
]
print round_robin(iterations=random.randint(100, 150), players=players) | {
"content_hash": "daf12b1f00b65c34062b6dc84ee65a1f",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 75,
"avg_line_length": 24.41304347826087,
"alnum_prop": 0.5467497773820125,
"repo_name": "jianyuan/prisoners-dilemma-compiler",
"id": "202daf40fd8b2ff762da6ae658a911980cf1153e",
"size": "1161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tourney/evolution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29553"
}
],
"symlink_target": ""
} |
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
if __name__ == "__main__":
app.run()
| {
"content_hash": "a4f648208de8b1128a6b6bb52ffbebb9",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 26,
"avg_line_length": 12.25,
"alnum_prop": 0.54421768707483,
"repo_name": "PyNSK/ArticlesCode",
"id": "d3b36628699f107cbb585594558687665ef09697",
"size": "174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask-base/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4587"
},
{
"name": "HTML",
"bytes": "8477"
},
{
"name": "Python",
"bytes": "24625"
}
],
"symlink_target": ""
} |
"""
Fast Lomb-Scargle Algorithm, following Press & Rybicki 1989
"""
from __future__ import print_function, division
__all__ = ['LombScargleFast']
import warnings
import numpy as np
from .lomb_scargle import LombScargle
# Precomputed factorials
FACTORIALS = [1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880, 3628800]
def factorial(N):
"""Compute the factorial of N.
If N <= 10, use a fast lookup table; otherwise use scipy.special.factorial
"""
if N < len(FACTORIALS):
return FACTORIALS[N]
else:
from scipy import special
return int(special.factorial(N))
def bitceil(N):
"""
Find the bit (i.e. power of 2) immediately greater than or equal to N
Note: this works for numbers up to 2 ** 64.
Roughly equivalent to int(2 ** np.ceil(np.log2(N)))
"""
# Note: for Python 2.7 and 3.x, this is faster:
# return 1 << int(N - 1).bit_length()
N = int(N) - 1
for i in [1, 2, 4, 8, 16, 32]:
N |= N >> i
return N + 1
def extirpolate(x, y, N=None, M=4):
"""
Extirpolate the values (x, y) onto an integer grid range(N),
using lagrange polynomial weights on the M nearest points.
Parameters
----------
x : array_like
array of abscissas
y : array_like
array of ordinates
N : int
number of integer bins to use. For best performance, N should be larger
than the maximum of x
M : int
number of adjoining points on which to extirpolate.
Returns
-------
yN : ndarray
N extirpolated values associated with range(N)
Example
-------
>>> rng = np.random.RandomState(0)
>>> x = 100 * rng.rand(20)
>>> y = np.sin(x)
>>> y_hat = extirpolate(x, y)
>>> x_hat = np.arange(len(y_hat))
>>> f = lambda x: np.sin(x / 10)
>>> np.allclose(np.sum(y * f(x)), np.sum(y_hat * f(x_hat)))
True
Notes
-----
This code is based on the C implementation of spread() presented in
Numerical Recipes in C, Second Edition (Press et al. 1989; p.583).
"""
x, y = map(np.ravel, np.broadcast_arrays(x, y))
if N is None:
N = int(np.max(x) + 0.5 * M + 1)
# Now use legendre polynomial weights to populate the results array;
# This is an efficient recursive implementation (See Press et al. 1989)
result = np.zeros(N, dtype=y.dtype)
# first take care of the easy cases where x is an integer
integers = (x % 1 == 0)
np.add.at(result, x[integers].astype(int), y[integers])
x, y = x[~integers], y[~integers]
# For each remaining x, find the index describing the extirpolation range.
# i.e. ilo[i] < x[i] < ilo[i] + M with x[i] in the center,
# adjusted so that the limits are within the range 0...N
ilo = np.clip((x - M // 2).astype(int), 0, N - M)
numerator = y * np.prod(x - ilo - np.arange(M)[:, np.newaxis], 0)
denominator = factorial(M - 1)
for j in range(M):
if j > 0:
denominator *= j / (j - M)
ind = ilo + (M - 1 - j)
np.add.at(result, ind, numerator / (denominator * (x - ind)))
return result
def trig_sum(t, h, df, N, f0=0, freq_factor=1,
oversampling=5, use_fft=True, Mfft=4):
"""Compute (approximate) trigonometric sums for a number of frequencies
This routine computes weighted sine and cosine sums:
S_j = sum_i { h_i * sin(2 pi * f_j * t_i) }
C_j = sum_i { h_i * cos(2 pi * f_j * t_i) }
Where f_j = freq_factor * (f0 + j * df) for the values j in 1 ... N.
The sums can be computed either by a brute force O[N^2] method, or
by an FFT-based O[Nlog(N)] method.
Parameters
----------
t : array_like
array of input times
h : array_like
array weights for the sum
df : float
frequency spacing
N : int
number of frequency bins to return
f0 : float (optional, default=0)
The low frequency to use
freq_factor : float (optional, default=1)
Factor which multiplies the frequency
use_fft : bool
if True, use the approximate FFT algorithm to compute the result.
This uses the FFT with Press & Rybicki's Lagrangian extirpolation.
oversampling : int (default = 5)
oversampling freq_factor for the approximation; roughtly the number of
time samples across the highest-frequency sinusoid. This parameter
contains the tradeoff between accuracy and speed. Not referenced
if use_fft is False.
Mfft : int
The number of adjacent points to use in the FFT approximation.
Not referenced if use_fft is False.
Returns
-------
S, C : ndarrays
summation arrays for frequencies f = df * np.arange(1, N + 1)
"""
df *= freq_factor
f0 *= freq_factor
assert df > 0
t, h = map(np.ravel, np.broadcast_arrays(t, h))
if use_fft:
Mfft = int(Mfft)
assert(Mfft > 0)
# required size of fft is the power of 2 above the oversampling rate
Nfft = bitceil(N * oversampling)
t0 = t.min()
if f0 > 0:
h = h * np.exp(2j * np.pi * f0 * (t - t0))
tnorm = ((t - t0) * Nfft * df) % Nfft
grid = extirpolate(tnorm, h, Nfft, Mfft)
fftgrid = np.fft.ifft(grid)
if t0 != 0:
f = f0 + df * np.arange(Nfft)
fftgrid *= np.exp(2j * np.pi * t0 * f)
fftgrid = fftgrid[:N]
C = Nfft * fftgrid.real
S = Nfft * fftgrid.imag
else:
f = f0 + df * np.arange(N)
C = np.dot(h, np.cos(2 * np.pi * f * t[:, np.newaxis]))
S = np.dot(h, np.sin(2 * np.pi * f * t[:, np.newaxis]))
return S, C
def lomb_scargle_fast(t, y, dy=1, f0=0, df=None, Nf=None,
center_data=True, fit_offset=True,
use_fft=True, freq_oversampling=5, nyquist_factor=2,
trig_sum_kwds=None):
"""Compute a lomb-scargle periodogram for the given data
This implements both an O[N^2] method if use_fft==False, or an
O[NlogN] method if use_fft==True.
Parameters
----------
t, y, dy : array_like
times, values, and errors of the data points. These should be
broadcastable to the same shape. If dy is not specified, a
constant error will be used.
f0, df, Nf : (float, float, int)
parameters describing the frequency grid, f = f0 + df * arange(Nf).
Defaults, with T = t.max() - t.min():
- f0 = 0
- df is set such that there are ``freq_oversampling`` points per
peak width. ``freq_oversampling`` defaults to 5.
- Nf is set such that the highest frequency is ``nyquist_factor``
times the so-called "average Nyquist frequency".
``nyquist_factor`` defaults to 2.
Note that for unevenly-spaced data, the periodogram can be sensitive
to frequencies far higher than the average Nyquist frequency.
center_data : bool (default=True)
Specify whether to subtract the mean of the data before the fit
fit_offset : bool (default=True)
If True, then compute the floating-mean periodogram; i.e. let the mean
vary with the fit.
use_fft : bool (default=True)
If True, then use the Press & Rybicki O[NlogN] algorithm to compute
the result. Otherwise, use a slower O[N^2] algorithm
Other Parameters
----------------
freq_oversampling : float (default=5)
Oversampling factor for the frequency bins. Only referenced if
``df`` is not specified
nyquist_factor : float (default=2)
Parameter controlling the highest probed frequency. Only referenced
if ``Nf`` is not specified.
trig_sum_kwds : dict or None (optional)
extra keyword arguments to pass to the ``trig_sum`` utility.
Options are ``oversampling`` and ``Mfft``. See documentation
of ``trig_sum`` for details.
References
----------
.. [1] Press W.H. and Rybicki, G.B, "Fast algorithm for spectral analysis
of unevenly sampled data". ApJ 1:338, p277, 1989
.. [2] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [3] W. Press et al, Numerical Recipies in C (2002)
"""
# Validate and setup input data
t, y, dy = map(np.ravel, np.broadcast_arrays(t, y, dy))
w = 1. / (dy ** 2)
w /= w.sum()
# Validate and setup frequency grid
if df is None:
peak_width = 1. / (t.max() - t.min())
df = peak_width / freq_oversampling
if Nf is None:
avg_Nyquist = 0.5 * len(t) / (t.max() - t.min())
Nf = max(16, (nyquist_factor * avg_Nyquist - f0) / df)
Nf = int(Nf)
assert(df > 0)
assert(Nf > 0)
freq = f0 + df * np.arange(Nf)
# Center the data. Even if we're fitting the offset,
# this step makes the expressions below more succinct
if center_data or fit_offset:
y = y - np.dot(w, y)
else:
# TODO: can we relax this?
raise NotImplementedError("lomb_scargle_fast requires either "
"center_data or fit_offset to be True")
# set up arguments to trig_sum
kwargs = dict.copy(trig_sum_kwds or {})
kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf)
#----------------------------------------------------------------------
# 1. compute functions of the time-shift tau at each frequency
Sh, Ch = trig_sum(t, w * y, **kwargs)
S2, C2 = trig_sum(t, w, freq_factor=2, **kwargs)
if fit_offset:
S, C = trig_sum(t, w, **kwargs)
with warnings.catch_warnings():
# Filter "invalid value in divide" warnings for zero-frequency
if f0 == 0:
warnings.simplefilter("ignore")
tan_2omega_tau = (S2 - 2 * S * C) / (C2 - (C * C - S * S))
# fix NaN at zero frequency
if np.isnan(tan_2omega_tau[0]):
tan_2omega_tau[0] = 0
else:
tan_2omega_tau = S2 / C2
# slower/less stable way: we'll use trig identities instead
# omega_tau = 0.5 * np.arctan(tan_2omega_tau)
# S2w, C2w = np.sin(2 * omega_tau), np.cos(2 * omega_tau)
# Sw, Cw = np.sin(omega_tau), np.cos(omega_tau)
S2w = tan_2omega_tau / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)
C2w = 1 / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)
Cw = np.sqrt(0.5) * np.sqrt(1 + C2w)
Sw = np.sqrt(0.5) * np.sign(S2w) * np.sqrt(1 - C2w)
#----------------------------------------------------------------------
# 2. Compute the periodogram, following Zechmeister & Kurster
# and using tricks from Press & Rybicki.
YY = np.dot(w, y ** 2)
YC = Ch * Cw + Sh * Sw
YS = Sh * Cw - Ch * Sw
CC = 0.5 * (1 + C2 * C2w + S2 * S2w)
SS = 0.5 * (1 - C2 * C2w - S2 * S2w)
if fit_offset:
CC -= (C * Cw + S * Sw) ** 2
SS -= (S * Cw - C * Sw) ** 2
with warnings.catch_warnings():
# Filter "invalid value in divide" warnings for zero-frequency
if fit_offset and f0 == 0:
warnings.simplefilter("ignore")
power = (YC * YC / CC + YS * YS / SS) / YY
# fix NaN and INF at zero frequency
if np.isnan(power[0]) or np.isinf(power[0]):
power[0] = 0
return freq, power
class LombScargleFast(LombScargle):
"""Fast FFT-based Lomb-Scargle Periodogram Implementation
This implements the O[N log N] lomb-scargle periodogram, described in
Press & Rybicki (1989) [1].
To compute the periodogram via the fast algorithm, use the
``score_frequency_grid()`` method. The ``score()`` method of
``periodogram()`` method will default to the slower algorithm.
Parameters
----------
optimizer : PeriodicOptimizer instance
Optimizer to use to find the best period. If not specified, the
LinearScanOptimizer will be used.
center_data : boolean (default = True)
If True, then compute the weighted mean of the input data and subtract
before fitting the model.
fit_offset : boolean (default = True)
If True, then fit a floating-mean sinusoid model.
use_fft : boolean (default = True)
Specify whether to use the Press & Rybicki FFT algorithm to compute
the result
ls_kwds : dict
Dictionary of keywords to pass to the ``lomb_scargle_fast`` routine.
fit_period : bool (optional)
If True, then fit for the best period when fit() method is called.
optimizer_kwds : dict (optional
Dictionary of keyword arguments for constructing the optimizer
Examples
--------
>>> rng = np.random.RandomState(0)
>>> t = 100 * rng.rand(100)
>>> dy = 0.1
>>> omega = 10
>>> y = np.sin(omega * t) + dy * rng.randn(100)
>>> ls = LombScargleFast().fit(t, y, dy)
>>> ls.optimizer.period_range = (0.2, 1.2)
>>> ls.best_period
Finding optimal frequency:
- Estimated peak width = 0.0639
- Using 5 steps per peak; omega_step = 0.0128
- User-specified period range: 0.2 to 1.2
- Computing periods at 2051 steps
Zooming-in on 5 candidate peaks:
- Computing periods at 1000 steps
0.62826265739259146
>>> ls.predict([0, 0.5])
array([-0.02019474, -0.92910567])
Notes
-----
Currently, a NotImplementedError will be raised if both center_data
and fit_offset are False.
See Also
--------
LombScargle
LombScargleAstroML
References
----------
.. [1] Press W.H. and Rybicki, G.B, "Fast algorithm for spectral analysis
of unevenly sampled data". ApJ 1:338, p277, 1989
"""
def __init__(self, optimizer=None, center_data=True, fit_offset=True,
use_fft=True, ls_kwds=None, Nterms=1,
fit_period=False, optimizer_kwds=None):
self.use_fft = use_fft
self.ls_kwds = ls_kwds
if Nterms != 1:
raise ValueError("LombScargleFast supports only Nterms = 1")
LombScargle.__init__(self, optimizer=optimizer,
center_data=center_data, fit_offset=fit_offset,
Nterms=1, regularization=None,
fit_period=fit_period,
optimizer_kwds=optimizer_kwds)
def _score_frequency_grid(self, f0, df, N):
freq, P = lomb_scargle_fast(self.t, self.y, self.dy,
f0=f0, df=df, Nf=N,
center_data=self.center_data,
fit_offset=self.fit_offset,
use_fft=self.use_fft,
**(self.ls_kwds or {}))
return P
def _score(self, periods):
warnings.warn("The score() method defaults to a slower O[N^2] "
"algorithm. Use the score_frequency_grid() method "
"to access the fast FFT-based algorithm")
return LombScargle._score(self, periods)
| {
"content_hash": "0ececf3a37ef57556e28faaaffe0dc1c",
"timestamp": "",
"source": "github",
"line_count": 423,
"max_line_length": 79,
"avg_line_length": 35.35224586288416,
"alnum_prop": 0.5741607596629664,
"repo_name": "nhuntwalker/gatspy",
"id": "7d2531466b71eb4be006e008d960b81532e872a8",
"size": "14954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gatspy/periodic/lomb_scargle_fast.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "217"
},
{
"name": "Python",
"bytes": "114268"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
#
# Copyright (c) 2014, 2016 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import logging
from generator import Generator, ucfirst
from models import PrimitiveType, ObjectType, ArrayType, EnumType, AliasedType, Frameworks
log = logging.getLogger('global')
def join_type_and_name(type_str, name_str):
if type_str.endswith('*'):
return type_str + name_str
return type_str + ' ' + name_str
def strip_block_comment_markers(str):
return str.replace('/*', '').replace('*/', '')
def remove_duplicate_from_str(str, possible_duplicate):
return str.replace(possible_duplicate + possible_duplicate, possible_duplicate)
_OBJC_IDENTIFIER_RENAME_MAP = {
'this': 'thisObject', # Debugger.CallFrame.this
'description': 'stringRepresentation', # Runtime.RemoteObject.description
'id': 'identifier', # Page.Frame.id, Runtime.ExecutionContextDescription.id, Debugger.BreakpointAction.id
}
_OBJC_IDENTIFIER_REVERSE_RENAME_MAP = dict((v, k) for k, v in _OBJC_IDENTIFIER_RENAME_MAP.iteritems())
class ObjCTypeCategory:
Simple = 0
String = 1
Object = 2
Array = 3
@staticmethod
def category_for_type(_type):
if (isinstance(_type, PrimitiveType)):
if _type.raw_name() is 'string':
return ObjCTypeCategory.String
if _type.raw_name() in ['object', 'any']:
return ObjCTypeCategory.Object
if _type.raw_name() is 'array':
return ObjCTypeCategory.Array
return ObjCTypeCategory.Simple
if (isinstance(_type, ObjectType)):
return ObjCTypeCategory.Object
if (isinstance(_type, ArrayType)):
return ObjCTypeCategory.Array
if (isinstance(_type, AliasedType)):
return ObjCTypeCategory.category_for_type(_type.aliased_type)
if (isinstance(_type, EnumType)):
return ObjCTypeCategory.category_for_type(_type.primitive_type)
return None
# Almost all Objective-C class names require the use of a prefix that depends on the
# target framework's 'objc_prefix' setting. So, most helpers are instance methods.
class ObjCGenerator(Generator):
# Do not use a dynamic prefix for RWIProtocolJSONObject since it's used as a common
# base class and needs a consistent Objective-C prefix to be in a reusable framework.
OBJC_HELPER_PREFIX = 'RWI'
OBJC_SHARED_PREFIX = 'Protocol'
OBJC_STATIC_PREFIX = '%s%s' % (OBJC_HELPER_PREFIX, OBJC_SHARED_PREFIX)
def __init__(self, *args, **kwargs):
Generator.__init__(self, *args, **kwargs)
# The 'protocol name' is used to prefix filenames for a protocol group (a set of domains generated together).
def protocol_name(self):
protocol_group = self.model().framework.setting('objc_protocol_group', '')
return '%s%s' % (protocol_group, ObjCGenerator.OBJC_SHARED_PREFIX)
# The 'ObjC prefix' is used to prefix Objective-C class names and enums with a
# framework-specific prefix. It is separate from filename prefixes.
def objc_prefix(self):
framework_prefix = self.model().framework.setting('objc_prefix', None)
if not framework_prefix:
return ''
else:
return '%s%s' % (framework_prefix, ObjCGenerator.OBJC_SHARED_PREFIX)
# Adjust identifier names that collide with ObjC keywords.
@staticmethod
def identifier_to_objc_identifier(name):
return _OBJC_IDENTIFIER_RENAME_MAP.get(name, name)
@staticmethod
def objc_identifier_to_identifier(name):
return _OBJC_IDENTIFIER_REVERSE_RENAME_MAP.get(name, name)
# Generate ObjC types, command handlers, and event dispatchers for a subset of domains.
DOMAINS_TO_GENERATE = ['CSS', 'DOM', 'DOMStorage', 'Network', 'Page', 'Automation', 'GenericTypes']
def should_generate_types_for_domain(self, domain):
if not len(self.type_declarations_for_domain(domain)):
return False
if self.model().framework is Frameworks.Test:
return True
whitelist = set(ObjCGenerator.DOMAINS_TO_GENERATE)
whitelist.update(set(['Console', 'Debugger', 'Runtime']))
return domain.domain_name in whitelist
def should_generate_commands_for_domain(self, domain):
if not len(self.commands_for_domain(domain)):
return False
if self.model().framework is Frameworks.Test:
return True
whitelist = set(ObjCGenerator.DOMAINS_TO_GENERATE)
return domain.domain_name in whitelist
def should_generate_events_for_domain(self, domain):
if not len(self.events_for_domain(domain)):
return False
if self.model().framework is Frameworks.Test:
return True
whitelist = set(ObjCGenerator.DOMAINS_TO_GENERATE)
whitelist.add('Console')
return domain.domain_name in whitelist
# ObjC enum and type names.
def objc_name_for_type(self, type):
name = type.qualified_name().replace('.', '')
name = remove_duplicate_from_str(name, type.type_domain().domain_name)
return '%s%s' % (self.objc_prefix(), name)
def objc_enum_name_for_anonymous_enum_declaration(self, declaration):
domain_name = declaration.type.type_domain().domain_name
name = '%s%s' % (domain_name, declaration.type.raw_name())
name = remove_duplicate_from_str(name, domain_name)
return '%s%s' % (self.objc_prefix(), name)
def objc_enum_name_for_anonymous_enum_member(self, declaration, member):
domain_name = member.type.type_domain().domain_name
name = '%s%s%s' % (domain_name, declaration.type.raw_name(), ucfirst(member.member_name))
name = remove_duplicate_from_str(name, domain_name)
return '%s%s' % (self.objc_prefix(), name)
def objc_enum_name_for_anonymous_enum_parameter(self, domain, event_or_command_name, parameter):
domain_name = domain.domain_name
name = '%s%s%s' % (domain_name, ucfirst(event_or_command_name), ucfirst(parameter.parameter_name))
name = remove_duplicate_from_str(name, domain_name)
return '%s%s' % (self.objc_prefix(), name)
def objc_enum_name_for_non_anonymous_enum(self, _type):
domain_name = _type.type_domain().domain_name
name = _type.qualified_name().replace('.', '')
name = remove_duplicate_from_str(name, domain_name)
return '%s%s' % (self.objc_prefix(), name)
# Miscellaneous name handling.
@staticmethod
def variable_name_prefix_for_domain(domain):
domain_name = domain.domain_name
if domain_name.startswith('DOM'):
return 'dom' + domain_name[3:]
if domain_name.startswith('CSS'):
return 'css' + domain_name[3:]
return domain_name[:1].lower() + domain_name[1:]
# Type basics.
@staticmethod
def objc_accessor_type_for_raw_name(raw_name):
if raw_name in ['string', 'array']:
return 'copy'
if raw_name in ['integer', 'number', 'boolean']:
return 'assign'
if raw_name in ['any', 'object']:
return 'retain'
return None
@staticmethod
def objc_type_for_raw_name(raw_name):
if raw_name is 'string':
return 'NSString *'
if raw_name is 'array':
return 'NSArray *'
if raw_name is 'integer':
return 'int'
if raw_name is 'number':
return 'double'
if raw_name is 'boolean':
return 'BOOL'
if raw_name in ['any', 'object']:
return '%sJSONObject *' % ObjCGenerator.OBJC_STATIC_PREFIX
return None
@staticmethod
def objc_class_for_raw_name(raw_name):
if raw_name is 'string':
return 'NSString'
if raw_name is 'array':
return 'NSArray'
if raw_name in ['integer', 'number', 'boolean']:
return 'NSNumber'
if raw_name in ['any', 'object']:
return '%sJSONObject' % ObjCGenerator.OBJC_STATIC_PREFIX
return None
# FIXME: Can these protocol_type functions be removed in favor of C++ generators functions?
@staticmethod
def protocol_type_for_raw_name(raw_name):
if raw_name is 'string':
return 'String'
if raw_name is 'integer':
return 'int'
if raw_name is 'number':
return 'double'
if raw_name is 'boolean':
return 'bool'
if raw_name in ['any', 'object']:
return 'InspectorObject'
return None
@staticmethod
def protocol_type_for_type(_type):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
return ObjCGenerator.protocol_type_for_raw_name(_type.raw_name())
if (isinstance(_type, EnumType)):
return ObjCGenerator.protocol_type_for_type(_type.primitive_type)
if (isinstance(_type, ObjectType)):
return 'Inspector::Protocol::%s::%s' % (_type.type_domain().domain_name, _type.raw_name())
if (isinstance(_type, ArrayType)):
sub_type = ObjCGenerator.protocol_type_for_type(_type.element_type)
return 'Inspector::Protocol::Array<%s>' % sub_type
return None
@staticmethod
def is_type_objc_pointer_type(_type):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
return _type.raw_name() in ['string', 'array', 'any', 'object']
if (isinstance(_type, EnumType)):
return False
if (isinstance(_type, ObjectType)):
return True
if (isinstance(_type, ArrayType)):
return True
return None
def objc_class_for_type(self, _type):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
return ObjCGenerator.objc_class_for_raw_name(_type.raw_name())
if (isinstance(_type, EnumType)):
return ObjCGenerator.objc_class_for_raw_name(_type.primitive_type.raw_name())
if (isinstance(_type, ObjectType)):
return self.objc_name_for_type(_type)
if (isinstance(_type, ArrayType)):
sub_type = strip_block_comment_markers(self.objc_class_for_type(_type.element_type))
return 'NSArray/*<%s>*/' % sub_type
return None
def objc_class_for_array_type(self, _type):
if isinstance(_type, AliasedType):
_type = _type.aliased_type
if isinstance(_type, ArrayType):
return self.objc_class_for_type(_type.element_type)
return None
def objc_accessor_type_for_member(self, member):
return self.objc_accessor_type_for_member_internal(member.type)
def objc_accessor_type_for_member_internal(self, _type):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
return self.objc_accessor_type_for_raw_name(_type.raw_name())
if (isinstance(_type, EnumType)):
return 'assign'
if (isinstance(_type, ObjectType)):
return 'retain'
if (isinstance(_type, ArrayType)):
return 'copy'
return None
def objc_type_for_member(self, declaration, member):
return self.objc_type_for_member_internal(member.type, declaration, member)
def objc_type_for_member_internal(self, _type, declaration, member):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
return self.objc_type_for_raw_name(_type.raw_name())
if (isinstance(_type, EnumType)):
if (_type.is_anonymous):
return self.objc_enum_name_for_anonymous_enum_member(declaration, member)
return self.objc_enum_name_for_non_anonymous_enum(_type)
if (isinstance(_type, ObjectType)):
return self.objc_name_for_type(_type) + ' *'
if (isinstance(_type, ArrayType)):
sub_type = strip_block_comment_markers(self.objc_class_for_type(_type.element_type))
return 'NSArray/*<%s>*/ *' % sub_type
return None
def objc_type_for_param(self, domain, event_or_command_name, parameter, respect_optional=True):
objc_type = self.objc_type_for_param_internal(parameter.type, domain, event_or_command_name, parameter)
if respect_optional and parameter.is_optional:
if objc_type.endswith('*'):
return objc_type + '*'
return objc_type + ' *'
return objc_type
def objc_type_for_param_internal(self, _type, domain, event_or_command_name, parameter):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
return self.objc_type_for_raw_name(_type.raw_name())
if (isinstance(_type, EnumType)):
if _type.is_anonymous:
return self.objc_enum_name_for_anonymous_enum_parameter(domain, event_or_command_name, parameter)
return self.objc_enum_name_for_non_anonymous_enum(_type)
if (isinstance(_type, ObjectType)):
return self.objc_name_for_type(_type) + ' *'
if (isinstance(_type, ArrayType)):
sub_type = strip_block_comment_markers(self.objc_class_for_type(_type.element_type))
return 'NSArray/*<%s>*/ *' % sub_type
return None
# ObjC <-> Protocol conversion for commands and events.
# - convert a command call parameter received from Protocol to ObjC for handler
# - convert a command return parameter in callback block from ObjC to Protocol to send
# - convert an event parameter from ObjC API to Protocol to send
def objc_protocol_export_expression_for_variable(self, var_type, var_name):
category = ObjCTypeCategory.category_for_type(var_type)
if category in [ObjCTypeCategory.Simple, ObjCTypeCategory.String]:
if isinstance(var_type, EnumType):
return 'toProtocolString(%s)' % var_name
return var_name
if category is ObjCTypeCategory.Object:
return '[%s toInspectorObject]' % var_name
if category is ObjCTypeCategory.Array:
protocol_type = ObjCGenerator.protocol_type_for_type(var_type.element_type)
objc_class = self.objc_class_for_type(var_type.element_type)
if protocol_type == 'Inspector::Protocol::Array<String>':
return 'inspectorStringArrayArray(%s)' % var_name
if protocol_type is 'String' and objc_class is 'NSString':
return 'inspectorStringArray(%s)' % var_name
if protocol_type is 'int' and objc_class is 'NSNumber':
return 'inspectorIntegerArray(%s)' % var_name
if protocol_type is 'double' and objc_class is 'NSNumber':
return 'inspectorDoubleArray(%s)' % var_name
return 'inspectorObjectArray(%s)' % var_name
def objc_protocol_import_expression_for_member(self, name, declaration, member):
if isinstance(member.type, EnumType):
if member.type.is_anonymous:
return 'fromProtocolString<%s>(%s)' % (self.objc_enum_name_for_anonymous_enum_member(declaration, member), name)
return 'fromProtocolString<%s>(%s)' % (self.objc_enum_name_for_non_anonymous_enum(member.type), name)
return self.objc_protocol_import_expression_for_variable(member.type, name)
def objc_protocol_import_expression_for_parameter(self, name, domain, event_or_command_name, parameter):
if isinstance(parameter.type, EnumType):
if parameter.type.is_anonymous:
return 'fromProtocolString<%s>(%s)' % (self.objc_enum_name_for_anonymous_enum_parameter(domain, event_or_command_name, parameter), name)
return 'fromProtocolString<%s>(%s)' % (self.objc_enum_name_for_non_anonymous_enum(parameter.type), name)
return self.objc_protocol_import_expression_for_variable(parameter.type, name)
def objc_protocol_import_expression_for_variable(self, var_type, var_name):
category = ObjCTypeCategory.category_for_type(var_type)
if category in [ObjCTypeCategory.Simple, ObjCTypeCategory.String]:
return var_name
if category is ObjCTypeCategory.Object:
objc_class = self.objc_class_for_type(var_type)
return '[[[%s alloc] initWithInspectorObject:%s] autorelease]' % (objc_class, var_name)
if category is ObjCTypeCategory.Array:
objc_class = self.objc_class_for_type(var_type.element_type)
if objc_class is 'NSString':
return 'objcStringArray(%s)' % var_name
if objc_class is 'NSNumber': # FIXME: Integer or Double?
return 'objcIntegerArray(%s)' % var_name
return 'objcArray<%s>(%s)' % (objc_class, var_name)
# ObjC <-> JSON object conversion for types getters/setters.
# - convert a member setter from ObjC API to JSON object setter
# - convert a member getter from JSON object to ObjC API
def objc_to_protocol_expression_for_member(self, declaration, member, sub_expression):
category = ObjCTypeCategory.category_for_type(member.type)
if category in [ObjCTypeCategory.Simple, ObjCTypeCategory.String]:
if isinstance(member.type, EnumType):
return 'toProtocolString(%s)' % sub_expression
return sub_expression
if category is ObjCTypeCategory.Object:
return sub_expression
if category is ObjCTypeCategory.Array:
objc_class = self.objc_class_for_type(member.type.element_type)
if objc_class is 'NSString':
return 'inspectorStringArray(%s)' % sub_expression
if objc_class is 'NSNumber':
protocol_type = ObjCGenerator.protocol_type_for_type(member.type.element_type)
if protocol_type is 'double':
return 'inspectorDoubleArray(%s)' % sub_expression
return 'inspectorIntegerArray(%s)' % sub_expression
return 'inspectorObjectArray(%s)' % sub_expression
def protocol_to_objc_expression_for_member(self, declaration, member, sub_expression):
category = ObjCTypeCategory.category_for_type(member.type)
if category in [ObjCTypeCategory.Simple, ObjCTypeCategory.String]:
if isinstance(member.type, EnumType):
if member.type.is_anonymous:
return 'fromProtocolString<%s>(%s).value()' % (self.objc_enum_name_for_anonymous_enum_member(declaration, member), sub_expression)
return 'fromProtocolString<%s>(%s).value()' % (self.objc_enum_name_for_non_anonymous_enum(member.type), sub_expression)
return sub_expression
if category is ObjCTypeCategory.Object:
raise Exception("protocol_to_objc_expression_for_member does not support an Object type. See: protocol_to_objc_code_block_for_object_member")
if category is ObjCTypeCategory.Array:
protocol_type = ObjCGenerator.protocol_type_for_type(member.type.element_type)
objc_class = self.objc_class_for_type(member.type.element_type)
if objc_class is 'NSString':
return 'objcStringArray(%s)' % sub_expression
if objc_class is 'NSNumber':
protocol_type = ObjCGenerator.protocol_type_for_type(member.type.element_type)
if protocol_type is 'double':
return 'objcDoubleArray(%s)' % sub_expression
return 'objcIntegerArray(%s)' % sub_expression
return 'objcArray<%s>(%s)' % (objc_class, sub_expression)
def protocol_to_objc_code_block_for_object_member(self, declaration, member, sub_expression):
objc_class = self.objc_class_for_type(member.type)
lines = []
lines.append(' %sJSONObject *object = %s;' % (ObjCGenerator.OBJC_STATIC_PREFIX, sub_expression))
lines.append(' if (!object)')
lines.append(' return nil;')
lines.append(' return [[%s alloc] initWithInspectorObject:[%s toInspectorObject].get()];' % (objc_class, sub_expression))
return '\n'.join(lines)
def payload_to_objc_expression_for_member(self, declaration, member):
_type = member.type
if isinstance(_type, AliasedType):
_type = _type.aliased_type
if isinstance(_type, PrimitiveType):
sub_expression = 'payload[@"%s"]' % member.member_name
raw_name = _type.raw_name()
if raw_name is 'boolean':
return '[%s boolValue]' % sub_expression
if raw_name is 'integer':
return '[%s integerValue]' % sub_expression
if raw_name is 'number':
return '[%s doubleValue]' % sub_expression
if raw_name in ['any', 'object', 'array', 'string']:
return sub_expression # The setter will check the incoming value.
return None
if isinstance(member.type, EnumType):
sub_expression = 'payload[@"%s"]' % member.member_name
if member.type.is_anonymous:
return 'fromProtocolString<%s>(%s)' % (self.objc_enum_name_for_anonymous_enum_member(declaration, member), sub_expression)
else:
return 'fromProtocolString<%s>(%s)' % (self.objc_enum_name_for_non_anonymous_enum(member.type), sub_expression)
if isinstance(_type, ObjectType):
objc_class = self.objc_class_for_type(member.type)
return '[[%s alloc] initWithPayload:payload[@"%s"]]' % (objc_class, member.member_name)
if isinstance(_type, ArrayType):
element_type = member.type.element_type
if isinstance(element_type, EnumType):
element_type = element_type.primitive_type
# In this case, there is no conversion that needs to be done, the array already contains an ObjC type.
if isinstance(element_type, PrimitiveType):
return 'payload[@"%s"]' % member.member_name
else:
objc_class = self.objc_class_for_type(element_type)
return 'objcArrayFromPayload<%s>(payload[@"%s"])' % (objc_class, member.member_name)
# JSON object setter/getter selectors for types.
@staticmethod
def objc_setter_method_for_member(declaration, member):
return ObjCGenerator.objc_setter_method_for_member_internal(member.type, declaration, member)
@staticmethod
def objc_setter_method_for_member_internal(_type, declaration, member):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
raw_name = _type.raw_name()
if raw_name is 'boolean':
return 'setBool'
if raw_name is 'integer':
return 'setInteger'
if raw_name is 'number':
return 'setDouble'
if raw_name is 'string':
return 'setString'
if raw_name in ['any', 'object']:
return 'setObject'
if raw_name is 'array':
return 'setInspectorArray'
return None
if (isinstance(_type, EnumType)):
return 'setString'
if (isinstance(_type, ObjectType)):
return 'setObject'
if (isinstance(_type, ArrayType)):
return 'setInspectorArray'
return None
@staticmethod
def objc_getter_method_for_member(declaration, member):
return ObjCGenerator.objc_getter_method_for_member_internal(member.type, declaration, member)
@staticmethod
def objc_getter_method_for_member_internal(_type, declaration, member):
if (isinstance(_type, AliasedType)):
_type = _type.aliased_type
if (isinstance(_type, PrimitiveType)):
raw_name = _type.raw_name()
if raw_name is 'boolean':
return 'boolForKey'
if raw_name is 'integer':
return 'integerForKey'
if raw_name is 'number':
return 'doubleForKey'
if raw_name is 'string':
return 'stringForKey'
if raw_name in ['any', 'object']:
return 'objectForKey'
if raw_name is 'array':
return 'inspectorArrayForKey'
return None
if (isinstance(_type, EnumType)):
return 'stringForKey'
if (isinstance(_type, ObjectType)):
return 'objectForKey'
if (isinstance(_type, ArrayType)):
return 'inspectorArrayForKey'
return None
| {
"content_hash": "4c487a5c439cf3ac6f09962d2838d6b9",
"timestamp": "",
"source": "github",
"line_count": 570,
"max_line_length": 153,
"avg_line_length": 45.96666666666667,
"alnum_prop": 0.6290599595435289,
"repo_name": "hxxft/lynx-native",
"id": "944d2e614143189abbbc8b63b721c20be472eb72",
"size": "26201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Core/third_party/JavaScriptCore/inspector/scripts/codegen/objc_generator.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8846"
},
{
"name": "C++",
"bytes": "1644015"
},
{
"name": "CMake",
"bytes": "16371"
},
{
"name": "HTML",
"bytes": "132541"
},
{
"name": "Java",
"bytes": "545560"
},
{
"name": "JavaScript",
"bytes": "6469482"
},
{
"name": "Objective-C",
"bytes": "58832"
},
{
"name": "Objective-C++",
"bytes": "174916"
},
{
"name": "Python",
"bytes": "55792"
},
{
"name": "Ruby",
"bytes": "1520"
},
{
"name": "Shell",
"bytes": "515"
},
{
"name": "TypeScript",
"bytes": "1851"
},
{
"name": "Vue",
"bytes": "37901"
}
],
"symlink_target": ""
} |
from zope.i18nmessageid import MessageFactory
# Set up the i18n message factory for our package
MessageFactory = MessageFactory('xdash.boards')
| {
"content_hash": "3dc2b7deb032fb73b2974bb2ea753ad4",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 49,
"avg_line_length": 36.25,
"alnum_prop": 0.8206896551724138,
"repo_name": "potzenheimer/xdash",
"id": "7b0e25c0166666a929e219b344eb2d88bcea4882",
"size": "145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/xdash.boards/xdash/boards/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "290985"
},
{
"name": "JavaScript",
"bytes": "1332757"
},
{
"name": "Python",
"bytes": "193370"
},
{
"name": "VCL",
"bytes": "4519"
}
],
"symlink_target": ""
} |
import sys
import os
# -- General configuration ------------------------------------------------
source_suffix = '.rst'
master_doc = 'index'
project = u'Document Title'
copyright = u'years, Author Name'
version = '0.0'
release = version
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
'papersize': 'letterpaper',
'pointsize': '11pt',
'fontpkg': '\\usepackage[defaultsans]{droidsans}\n\\renewcommand*\\familydefault{\\sfdefault}',
'fncychap': '',
'preamble': '\\usepackage{strat}',
}
latex_documents = [
('index', 'output_file.tex', '', u'Author Name', 'howto'),
]
latex_additional_files = ['strat.sty']
| {
"content_hash": "400937f86df5b98805e8c468e49366ba",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 95,
"avg_line_length": 20.12121212121212,
"alnum_prop": 0.5737951807228916,
"repo_name": "allisonrandal/sphinx-strat-doc-base",
"id": "785c5e24f32ff8f2c4c215ced091fd054f979040",
"size": "793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "793"
},
{
"name": "TeX",
"bytes": "1371"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
"""
Organization Registry - Controllers
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
redirect(URL(f="organisation", args="summary"))
# -----------------------------------------------------------------------------
def index_alt():
"""
Module homepage for non-Admin users when no CMS content found
"""
# @ToDo: Move this to the Template (separate deployment_setting or else a customise for non-REST controllers)
template = settings.get_template()
if template == "SandyRelief":
# Just redirect to the Facilities
redirect(URL(f="facility"))
else:
# Just redirect to the list of Organisations
redirect(URL(f="organisation", args="summary"))
# -----------------------------------------------------------------------------
def group():
""" RESTful CRUD controller """
return s3_rest_controller(rheader = s3db.org_rheader)
# -----------------------------------------------------------------------------
def group_membership():
""" RESTful CRUD controller for options.s3json lookups """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def group_membership_status():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def group_person():
""" REST controller for options.s3json lookups """
s3.prep = lambda r: r.representation == "s3json" and r.method == "options"
return s3_rest_controller()
# -----------------------------------------------------------------------------
def group_person_status():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def region():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def sector():
""" RESTful CRUD controller """
# Pre-processor
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def subsector():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def site():
"""
RESTful CRUD controller
- used by S3SiteAutocompleteWidget
which doesn't yet support filtering to just updateable sites
- used by site_contact_person()
- used by S3OptionsFilter (e.g. Asset Log)
"""
# Pre-processor
def prep(r):
if r.representation != "json" and \
r.method not in ("search_ac", "search_address_ac", "site_contact_person"):
return False
# Location Filter
s3db.gis_location_filter(r)
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def sites_for_org():
"""
Used to provide the list of Sites for an Organisation
- used in User Registration & Assets
"""
try:
org = request.args[0]
except:
result = current.xml.json_message(False, 400, "No Org provided!")
else:
try:
org = int(org)
except:
result = current.xml.json_message(False, 400, "Invalid Org provided!")
else:
stable = s3db.org_site
if settings.get_org_branches():
# Find all branches for this Organisation
btable = s3db.org_organisation_branch
query = (btable.organisation_id == org) & \
(btable.deleted != True)
rows = db(query).select(btable.branch_id)
org_ids = [row.branch_id for row in rows] + [org]
query = (stable.organisation_id.belongs(org_ids)) & \
(stable.deleted != True)
else:
query = (stable.organisation_id == org) & \
(stable.deleted != True)
rows = db(query).select(stable.site_id,
stable.name,
orderby=stable.name)
result = rows.json()
finally:
response.headers["Content-Type"] = "application/json"
return result
# -----------------------------------------------------------------------------
def facility():
""" RESTful CRUD controller """
return s3db.org_facility_controller()
# -----------------------------------------------------------------------------
def facility_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def office_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def organisation_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def organisation():
""" RESTful CRUD controller """
# Defined in the Model for use from Multiple Controllers for unified menus
return s3db.org_organisation_controller()
# -----------------------------------------------------------------------------
def org_search():
"""
Organisation REST controller
- limited to just search_ac for use in Autocompletes
- allows differential access permissions
"""
s3.prep = lambda r: r.method == "search_ac"
return s3_rest_controller(module, "organisation")
# -----------------------------------------------------------------------------
def organisation_list_represent(l):
organisation_represent = s3db.org_organisation_represent
if l:
max_length = 4
if len(l) > max_length:
return "%s, etc" % \
organisation_represent.multiple(l[:max_length])
else:
return organisation_represent.multiple(l)
else:
return NONE
# -----------------------------------------------------------------------------
def office():
""" RESTful CRUD controller """
# Defined in the Model for use from Multiple Controllers for unified menus
return s3db.org_office_controller()
# -----------------------------------------------------------------------------
def person():
""" Person controller for AddPersonWidget """
def prep(r):
if r.representation != "s3json":
# Do not serve other representations here
return False
else:
current.xml.show_ids = True
return True
s3.prep = prep
return s3_rest_controller("pr", "person")
# -----------------------------------------------------------------------------
def room():
""" RESTful CRUD controller """
def prep(r):
field = r.table.site_id
field.readable = field.writable = True
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def mailing_list():
""" RESTful CRUD controller """
tablename = "pr_group"
table = s3db[tablename]
# Only groups with a group_type of 5
s3.filter = (table.group_type == 5)
table.group_type.writable = False
table.group_type.readable = False
table.name.label = T("Mailing List Name")
s3.crud_strings[tablename] = s3.pr_mailing_list_crud_strings
# define the list_fields
list_fields = s3db.configure(tablename,
list_fields = ["id",
"name",
"description",
])
# Components
_rheader = s3db.pr_rheader
_tabs = [(T("Organization"), "organisation/"),
(T("Mailing List Details"), None),
]
if len(request.args) > 0:
_tabs.append((T("Members"), "group_membership"))
if "viewing" in request.vars:
tablename, record_id = request.vars.viewing.rsplit(".", 1)
if tablename == "org_organisation":
table = s3db[tablename]
_rheader = s3db.org_rheader
_tabs = []
s3db.add_components("pr_group", pr_group_membership="group_id")
rheader = lambda r: _rheader(r, tabs = _tabs)
return s3_rest_controller("pr",
"group",
rheader=rheader)
# -----------------------------------------------------------------------------
def donor():
""" RESTful CRUD controller """
tablename = "org_donor"
table = s3db[tablename]
tablename = "org_donor"
s3.crud_strings[tablename] = Storage(
label_create = ADD_DONOR,
title_display = T("Donor Details"),
title_list = T("Donors Report"),
title_update = T("Edit Donor"),
label_list_button = T("List Donors"),
label_delete_button = T("Delete Donor"),
msg_record_created = T("Donor added"),
msg_record_modified = T("Donor updated"),
msg_record_deleted = T("Donor deleted"),
msg_list_empty = T("No Donors currently registered"))
s3db.configure(tablename, listadd=False)
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def resource():
""" RESTful CRUD controller """
def prep(r):
if r.interactive:
if r.method in ("create", "update"):
# Context from a Profile page?"
table = r.table
location_id = get_vars.get("(location)", None)
if location_id:
field = table.location_id
field.default = location_id
field.readable = field.writable = False
organisation_id = get_vars.get("(organisation)", None)
if organisation_id:
field = table.organisation_id
field.default = organisation_id
field.readable = field.writable = False
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def resource_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def service():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def req_match():
""" Match Requests for Sites """
return s3db.req_match()
# -----------------------------------------------------------------------------
def incoming():
"""
Incoming Shipments for Sites
Used from Requests rheader when looking at Transport Status
"""
# @ToDo: Create this function!
return s3db.inv_incoming()
# -----------------------------------------------------------------------------
def facility_geojson():
"""
Create GeoJSON[P] of Facilities for use by a high-traffic website
- controller just for testing
- function normally run on a schedule
"""
s3db.org_facility_geojson()
# END =========================================================================
| {
"content_hash": "ae7ecf1edfd8716c4a439bab962f605d",
"timestamp": "",
"source": "github",
"line_count": 380,
"max_line_length": 113,
"avg_line_length": 31.81578947368421,
"alnum_prop": 0.4566583953680728,
"repo_name": "flavour/Turkey",
"id": "acbd83980ab708866200b48833b326a292ec6399",
"size": "12090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controllers/org.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2021594"
},
{
"name": "HTML",
"bytes": "1310585"
},
{
"name": "JavaScript",
"bytes": "19245058"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "28627483"
},
{
"name": "Ruby",
"bytes": "2051"
},
{
"name": "Shell",
"bytes": "4860"
},
{
"name": "XSLT",
"bytes": "2678742"
}
],
"symlink_target": ""
} |
import os
import fixtures
from oslo_config import cfg
from oslo_policy import _parser
from oslo_policy import opts as policy_opts
from magnum.common import policy as magnum_policy
from magnum.tests import fake_policy
CONF = cfg.CONF
class PolicyFixture(fixtures.Fixture):
def __init__(self, compat=None):
self.compat = compat
def _setUp(self):
self.policy_dir = self.useFixture(fixtures.TempDir())
self.policy_file_name = os.path.join(self.policy_dir.path,
'policy.json')
with open(self.policy_file_name, 'w') as policy_file:
policy_file.write(fake_policy.get_policy_data(self.compat))
policy_opts.set_defaults(CONF)
CONF.set_override('policy_file', self.policy_file_name, 'oslo_policy')
magnum_policy._ENFORCER = None
self.addCleanup(magnum_policy.init().clear)
def set_rules(self, rules):
policy = magnum_policy._ENFORCER
policy.set_rules({k: _parser.parse_rule(v)
for k, v in rules.items()})
| {
"content_hash": "2efd17dab30a73c429f2f02ea2673da8",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 78,
"avg_line_length": 33.65625,
"alnum_prop": 0.6388115134633241,
"repo_name": "paulczar/magnum",
"id": "574a03a156ce52e6e264e3dc1deb758d839862d2",
"size": "1684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magnum/tests/policy_fixture.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "569"
},
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "1685855"
},
{
"name": "Shell",
"bytes": "22511"
}
],
"symlink_target": ""
} |
"""
[path]
cd /Users/brunoflaven/Documents/01_work/blog_articles/extending_streamlit_usage/001_nlp_spacy_python_realp/
[file]
python 006_nlp_spacy_python.py
# source
Source: https://realpython.com/natural-language-processing-spacy-python/
"""
import spacy
# EN
nlp = spacy.load('en_core_web_sm')
# FR
# nlp = spacy.load('fr_core_news_sm')
print("\n --- result_1")
# EN
print("EN spacy loaded")
# FR
# print("FR spacy loaded")
# STOP WORDS
# EN
spacy_stopwords = spacy.lang.en.stop_words.STOP_WORDS
# FR
# spacy_stopwords = spacy.lang.fr.stop_words.STOP_WORDS
nb_spacy_stopwords = len(spacy_stopwords)
print("\n --- nb_spacy_stopwords")
print(nb_spacy_stopwords)
all_file_text = (
'Bruno Flaven has been a Project Manager in a wide variety of Internet business applications both in Mobile and in Desktop for 20 years now.'
'You can find more information about his professional life on his personal website (www.flaven.fr) or his linkedin profile: https://fr.linkedin.com/in/brunoflaven. He is currently working for a Paris-base In France Media Monde (FMM) mostly on mobile applications (iOS and Android). He is currently P.O for a Backoffice project made with Symfony.'
'He also made few trainings to facilitate the handling of the tools that he helps to make.')
# file_name = 'article_bf_1.txt'
# file_name = 'article_bf_2.txt'
# all_file_text = open(file_name).read()
"""
# Stop words like is, a, for, the, and in are not printed in the output
print("\n --- result_2")
all_file_doc = nlp(all_file_text)
for token in all_file_doc:
if not token.is_stop:
print(token)
"""
print("\n --- result_3")
# You can also create a list of tokens not containing stop words.
all_file_doc = nlp(all_file_text)
all_file_no_stopword_doc = [
token for token in all_file_doc if not token.is_stop]
print(all_file_no_stopword_doc)
| {
"content_hash": "8172e3c084ca94a9341702f3cc5a2c00",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 346,
"avg_line_length": 26.041666666666668,
"alnum_prop": 0.7061333333333333,
"repo_name": "bflaven/BlogArticlesExamples",
"id": "e551639e538e1ba50f58c700690058843312859d",
"size": "1927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extending_streamlit_usage/001_nlp_spacy_python_realp/006_nlp_spacy_python.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "2756"
},
{
"name": "CSS",
"bytes": "3497"
},
{
"name": "CoffeeScript",
"bytes": "1785"
},
{
"name": "Dockerfile",
"bytes": "993"
},
{
"name": "HTML",
"bytes": "23687927"
},
{
"name": "JavaScript",
"bytes": "12838"
},
{
"name": "Jupyter Notebook",
"bytes": "2918640"
},
{
"name": "Makefile",
"bytes": "4058"
},
{
"name": "PHP",
"bytes": "223161"
},
{
"name": "Python",
"bytes": "1461699"
},
{
"name": "Shell",
"bytes": "12291"
}
],
"symlink_target": ""
} |
import unittest
from datetime import timedelta
import pytest
from airflow import models
from airflow.api.common.experimental.mark_tasks import (
_create_dagruns,
set_dag_run_state_to_failed,
set_dag_run_state_to_running,
set_dag_run_state_to_success,
set_state,
)
from airflow.models import DagRun
from airflow.utils import timezone
from airflow.utils.dates import days_ago
from airflow.utils.session import create_session, provide_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
from tests.test_utils.db import clear_db_runs
DEV_NULL = "/dev/null"
class TestMarkTasks(unittest.TestCase):
@classmethod
def setUpClass(cls):
models.DagBag(include_examples=True, read_dags_from_db=False).sync_to_db()
dagbag = models.DagBag(include_examples=False, read_dags_from_db=True)
cls.dag1 = dagbag.get_dag('miscellaneous_test_dag')
cls.dag2 = dagbag.get_dag('example_subdag_operator')
cls.dag3 = dagbag.get_dag('example_trigger_target_dag')
cls.execution_dates = [days_ago(2), days_ago(1)]
start_date3 = cls.dag3.start_date
cls.dag3_execution_dates = [
start_date3,
start_date3 + timedelta(days=1),
start_date3 + timedelta(days=2),
]
def setUp(self):
clear_db_runs()
drs = _create_dagruns(
self.dag1, self.execution_dates, state=State.RUNNING, run_type=DagRunType.SCHEDULED
)
for dr in drs:
dr.dag = self.dag1
drs = _create_dagruns(
self.dag2, [self.dag2.start_date], state=State.RUNNING, run_type=DagRunType.SCHEDULED
)
for dr in drs:
dr.dag = self.dag2
drs = _create_dagruns(
self.dag3, self.dag3_execution_dates, state=State.SUCCESS, run_type=DagRunType.MANUAL
)
for dr in drs:
dr.dag = self.dag3
def tearDown(self):
clear_db_runs()
@staticmethod
def snapshot_state(dag, execution_dates):
TI = models.TaskInstance
with create_session() as session:
return (
session.query(TI)
.filter(TI.dag_id == dag.dag_id, TI.execution_date.in_(execution_dates))
.all()
)
@provide_session
def verify_state(self, dag, task_ids, execution_dates, state, old_tis, session=None):
TI = models.TaskInstance
tis = session.query(TI).filter(TI.dag_id == dag.dag_id, TI.execution_date.in_(execution_dates)).all()
assert len(tis) > 0
for ti in tis:
assert ti.operator == dag.get_task(ti.task_id).task_type
if ti.task_id in task_ids and ti.execution_date in execution_dates:
assert ti.state == state
if state in State.finished:
assert ti.end_date is not None
else:
for old_ti in old_tis:
if old_ti.task_id == ti.task_id and old_ti.execution_date == ti.execution_date:
assert ti.state == old_ti.state
def test_mark_tasks_now(self):
# set one task to success but do not commit
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(
tasks=[task],
execution_date=self.execution_dates[0],
upstream=False,
downstream=False,
future=False,
past=False,
state=State.SUCCESS,
commit=False,
)
assert len(altered) == 1
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]], None, snapshot)
# set one and only one task to success
altered = set_state(
tasks=[task],
execution_date=self.execution_dates[0],
upstream=False,
downstream=False,
future=False,
past=False,
state=State.SUCCESS,
commit=True,
)
assert len(altered) == 1
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]], State.SUCCESS, snapshot)
# set no tasks
altered = set_state(
tasks=[task],
execution_date=self.execution_dates[0],
upstream=False,
downstream=False,
future=False,
past=False,
state=State.SUCCESS,
commit=True,
)
assert len(altered) == 0
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]], State.SUCCESS, snapshot)
# set task to other than success
altered = set_state(
tasks=[task],
execution_date=self.execution_dates[0],
upstream=False,
downstream=False,
future=False,
past=False,
state=State.FAILED,
commit=True,
)
assert len(altered) == 1
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]], State.FAILED, snapshot)
# don't alter other tasks
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_0")
altered = set_state(
tasks=[task],
execution_date=self.execution_dates[0],
upstream=False,
downstream=False,
future=False,
past=False,
state=State.SUCCESS,
commit=True,
)
assert len(altered) == 1
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]], State.SUCCESS, snapshot)
# set one task as FAILED. dag3 has schedule_interval None
snapshot = TestMarkTasks.snapshot_state(self.dag3, self.dag3_execution_dates)
task = self.dag3.get_task("run_this")
altered = set_state(
tasks=[task],
execution_date=self.dag3_execution_dates[1],
upstream=False,
downstream=False,
future=False,
past=False,
state=State.FAILED,
commit=True,
)
# exactly one TaskInstance should have been altered
assert len(altered) == 1
# task should have been marked as failed
self.verify_state(self.dag3, [task.task_id], [self.dag3_execution_dates[1]], State.FAILED, snapshot)
# tasks on other days should be unchanged
self.verify_state(self.dag3, [task.task_id], [self.dag3_execution_dates[0]], None, snapshot)
self.verify_state(self.dag3, [task.task_id], [self.dag3_execution_dates[2]], None, snapshot)
def test_mark_downstream(self):
# test downstream
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
relatives = task.get_flat_relatives(upstream=False)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(
tasks=[task],
execution_date=self.execution_dates[0],
upstream=False,
downstream=True,
future=False,
past=False,
state=State.SUCCESS,
commit=True,
)
assert len(altered) == 3
self.verify_state(self.dag1, task_ids, [self.execution_dates[0]], State.SUCCESS, snapshot)
def test_mark_upstream(self):
# test upstream
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("run_after_loop")
relatives = task.get_flat_relatives(upstream=True)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(
tasks=[task],
execution_date=self.execution_dates[0],
upstream=True,
downstream=False,
future=False,
past=False,
state=State.SUCCESS,
commit=True,
)
assert len(altered) == 4
self.verify_state(self.dag1, task_ids, [self.execution_dates[0]], State.SUCCESS, snapshot)
def test_mark_tasks_future(self):
# set one task to success towards end of scheduled dag runs
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(
tasks=[task],
execution_date=self.execution_dates[0],
upstream=False,
downstream=False,
future=True,
past=False,
state=State.SUCCESS,
commit=True,
)
assert len(altered) == 2
self.verify_state(self.dag1, [task.task_id], self.execution_dates, State.SUCCESS, snapshot)
snapshot = TestMarkTasks.snapshot_state(self.dag3, self.dag3_execution_dates)
task = self.dag3.get_task("run_this")
altered = set_state(
tasks=[task],
execution_date=self.dag3_execution_dates[1],
upstream=False,
downstream=False,
future=True,
past=False,
state=State.FAILED,
commit=True,
)
assert len(altered) == 2
self.verify_state(self.dag3, [task.task_id], [self.dag3_execution_dates[0]], None, snapshot)
self.verify_state(self.dag3, [task.task_id], self.dag3_execution_dates[1:], State.FAILED, snapshot)
def test_mark_tasks_past(self):
# set one task to success towards end of scheduled dag runs
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(
tasks=[task],
execution_date=self.execution_dates[1],
upstream=False,
downstream=False,
future=False,
past=True,
state=State.SUCCESS,
commit=True,
)
assert len(altered) == 2
self.verify_state(self.dag1, [task.task_id], self.execution_dates, State.SUCCESS, snapshot)
snapshot = TestMarkTasks.snapshot_state(self.dag3, self.dag3_execution_dates)
task = self.dag3.get_task("run_this")
altered = set_state(
tasks=[task],
execution_date=self.dag3_execution_dates[1],
upstream=False,
downstream=False,
future=False,
past=True,
state=State.FAILED,
commit=True,
)
assert len(altered) == 2
self.verify_state(self.dag3, [task.task_id], self.dag3_execution_dates[:2], State.FAILED, snapshot)
self.verify_state(self.dag3, [task.task_id], [self.dag3_execution_dates[2]], None, snapshot)
def test_mark_tasks_multiple(self):
# set multiple tasks to success
snapshot = TestMarkTasks.snapshot_state(self.dag1, self.execution_dates)
tasks = [self.dag1.get_task("runme_1"), self.dag1.get_task("runme_2")]
altered = set_state(
tasks=tasks,
execution_date=self.execution_dates[0],
upstream=False,
downstream=False,
future=False,
past=False,
state=State.SUCCESS,
commit=True,
)
assert len(altered) == 2
self.verify_state(
self.dag1, [task.task_id for task in tasks], [self.execution_dates[0]], State.SUCCESS, snapshot
)
# TODO: this backend should be removed once a fixing solution is found later
# We skip it here because this test case is working with Postgres & SQLite
# but not with MySQL
@pytest.mark.backend("sqlite", "postgres")
def test_mark_tasks_subdag(self):
# set one task to success towards end of scheduled dag runs
task = self.dag2.get_task("section-1")
relatives = task.get_flat_relatives(upstream=False)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(
tasks=[task],
execution_date=self.execution_dates[0],
upstream=False,
downstream=True,
future=False,
past=False,
state=State.SUCCESS,
commit=True,
)
assert len(altered) == 14
# cannot use snapshot here as that will require drilling down the
# sub dag tree essentially recreating the same code as in the
# tested logic.
self.verify_state(self.dag2, task_ids, [self.execution_dates[0]], State.SUCCESS, [])
class TestMarkDAGRun(unittest.TestCase):
INITIAL_TASK_STATES = {
'runme_0': State.SUCCESS,
'runme_1': State.SKIPPED,
'runme_2': State.UP_FOR_RETRY,
'also_run_this': State.QUEUED,
'run_after_loop': State.RUNNING,
'run_this_last': State.FAILED,
}
@classmethod
def setUpClass(cls):
dagbag = models.DagBag(include_examples=True, read_dags_from_db=False)
cls.dag1 = dagbag.dags['miscellaneous_test_dag']
cls.dag1.sync_to_db()
cls.dag2 = dagbag.dags['example_subdag_operator']
cls.dag2.sync_to_db()
cls.execution_dates = [days_ago(2), days_ago(1), days_ago(0)]
def setUp(self):
clear_db_runs()
def _get_num_tasks_with_starting_state(self, state: State, inclusion: bool):
"""
If ``inclusion=True``, get num tasks with initial state ``state``.
Otherwise, get number tasks with initial state not equal to ``state``
:param state: State to compare against
:param inclusion: whether to look for inclusion or exclusion
:return: number of tasks meeting criteria
"""
states = self.INITIAL_TASK_STATES.values()
def compare(x, y):
return x == y if inclusion else x != y
return len([s for s in states if compare(s, state)])
def _set_default_task_instance_states(self, dr):
for task_id, state in self.INITIAL_TASK_STATES.items():
dr.get_task_instance(task_id).set_state(state)
def _verify_task_instance_states_remain_default(self, dr):
for task_id, state in self.INITIAL_TASK_STATES.items():
assert dr.get_task_instance(task_id).state == state
@provide_session
def _verify_task_instance_states(self, dag, date, state, session=None):
TI = models.TaskInstance
tis = session.query(TI).filter(TI.dag_id == dag.dag_id, TI.execution_date == date)
for ti in tis:
assert ti.state == state
def _create_test_dag_run(self, state, date):
return self.dag1.create_dagrun(
run_type=DagRunType.MANUAL, state=state, start_date=date, execution_date=date
)
def _verify_dag_run_state(self, dag, date, state):
drs = models.DagRun.find(dag_id=dag.dag_id, execution_date=date)
dr = drs[0]
assert dr.get_state() == state
@provide_session
def _verify_dag_run_dates(self, dag, date, state, middle_time, session=None):
# When target state is RUNNING, we should set start_date,
# otherwise we should set end_date.
DR = DagRun
dr = session.query(DR).filter(DR.dag_id == dag.dag_id, DR.execution_date == date).one()
if state == State.RUNNING:
# Since the DAG is running, the start_date must be updated after creation
assert dr.start_date > middle_time
# If the dag is still running, we don't have an end date
assert dr.end_date is None
else:
# If the dag is not running, there must be an end time
assert dr.start_date < middle_time
assert dr.end_date > middle_time
def test_set_running_dag_run_to_success(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.RUNNING, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_success(self.dag1, date, commit=True)
# All except the SUCCESS task should be altered.
expected = self._get_num_tasks_with_starting_state(State.SUCCESS, inclusion=False)
assert len(altered) == expected
self._verify_dag_run_state(self.dag1, date, State.SUCCESS)
self._verify_task_instance_states(self.dag1, date, State.SUCCESS)
self._verify_dag_run_dates(self.dag1, date, State.SUCCESS, middle_time)
def test_set_running_dag_run_to_failed(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.RUNNING, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_failed(self.dag1, date, commit=True)
# Only running task should be altered.
expected = self._get_num_tasks_with_starting_state(State.RUNNING, inclusion=True)
assert len(altered) == expected
self._verify_dag_run_state(self.dag1, date, State.FAILED)
assert dr.get_task_instance('run_after_loop').state == State.FAILED
self._verify_dag_run_dates(self.dag1, date, State.FAILED, middle_time)
def test_set_running_dag_run_to_running(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.RUNNING, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_running(self.dag1, date, commit=True)
# None of the tasks should be altered, only the dag itself
assert len(altered) == 0
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
self._verify_dag_run_dates(self.dag1, date, State.RUNNING, middle_time)
def test_set_success_dag_run_to_success(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_success(self.dag1, date, commit=True)
# All except the SUCCESS task should be altered.
expected = self._get_num_tasks_with_starting_state(State.SUCCESS, inclusion=False)
assert len(altered) == expected
self._verify_dag_run_state(self.dag1, date, State.SUCCESS)
self._verify_task_instance_states(self.dag1, date, State.SUCCESS)
self._verify_dag_run_dates(self.dag1, date, State.SUCCESS, middle_time)
def test_set_success_dag_run_to_failed(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_failed(self.dag1, date, commit=True)
# Only running task should be altered.
expected = self._get_num_tasks_with_starting_state(State.RUNNING, inclusion=True)
assert len(altered) == expected
self._verify_dag_run_state(self.dag1, date, State.FAILED)
assert dr.get_task_instance('run_after_loop').state == State.FAILED
self._verify_dag_run_dates(self.dag1, date, State.FAILED, middle_time)
def test_set_success_dag_run_to_running(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_running(self.dag1, date, commit=True)
# None of the tasks should be altered, but only the dag object should be changed
assert len(altered) == 0
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
self._verify_dag_run_dates(self.dag1, date, State.RUNNING, middle_time)
def test_set_failed_dag_run_to_success(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_success(self.dag1, date, commit=True)
# All except the SUCCESS task should be altered.
expected = self._get_num_tasks_with_starting_state(State.SUCCESS, inclusion=False)
assert len(altered) == expected
self._verify_dag_run_state(self.dag1, date, State.SUCCESS)
self._verify_task_instance_states(self.dag1, date, State.SUCCESS)
self._verify_dag_run_dates(self.dag1, date, State.SUCCESS, middle_time)
def test_set_failed_dag_run_to_failed(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_failed(self.dag1, date, commit=True)
# Only running task should be altered.
expected = self._get_num_tasks_with_starting_state(State.RUNNING, inclusion=True)
assert len(altered) == expected
self._verify_dag_run_state(self.dag1, date, State.FAILED)
assert dr.get_task_instance('run_after_loop').state == State.FAILED
self._verify_dag_run_dates(self.dag1, date, State.FAILED, middle_time)
def test_set_failed_dag_run_to_running(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
middle_time = timezone.utcnow()
self._set_default_task_instance_states(dr)
altered = set_dag_run_state_to_running(self.dag1, date, commit=True)
# None of the tasks should be altered, since we've only altered the DAG itself
assert len(altered) == 0
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
self._verify_dag_run_dates(self.dag1, date, State.RUNNING, middle_time)
def test_set_state_without_commit(self):
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.RUNNING, date)
self._set_default_task_instance_states(dr)
will_be_altered = set_dag_run_state_to_running(self.dag1, date, commit=False)
# None of the tasks will be altered.
assert len(will_be_altered) == 0
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
will_be_altered = set_dag_run_state_to_failed(self.dag1, date, commit=False)
# Only the running task should be altered.
expected = self._get_num_tasks_with_starting_state(State.RUNNING, inclusion=True)
assert len(will_be_altered) == expected
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
will_be_altered = set_dag_run_state_to_success(self.dag1, date, commit=False)
# All except the SUCCESS task should be altered.
expected = self._get_num_tasks_with_starting_state(State.SUCCESS, inclusion=False)
assert len(will_be_altered) == expected
self._verify_dag_run_state(self.dag1, date, State.RUNNING)
self._verify_task_instance_states_remain_default(dr)
@provide_session
def test_set_state_with_multiple_dagruns(self, session=None):
self.dag2.create_dagrun(
run_type=DagRunType.MANUAL,
state=State.FAILED,
execution_date=self.execution_dates[0],
session=session,
)
self.dag2.create_dagrun(
run_type=DagRunType.MANUAL,
state=State.FAILED,
execution_date=self.execution_dates[1],
session=session,
)
self.dag2.create_dagrun(
run_type=DagRunType.MANUAL,
state=State.RUNNING,
execution_date=self.execution_dates[2],
session=session,
)
altered = set_dag_run_state_to_success(self.dag2, self.execution_dates[1], commit=True)
# Recursively count number of tasks in the dag
def count_dag_tasks(dag):
count = len(dag.tasks)
subdag_counts = [count_dag_tasks(subdag) for subdag in dag.subdags]
count += sum(subdag_counts)
return count
assert len(altered) == count_dag_tasks(self.dag2)
self._verify_dag_run_state(self.dag2, self.execution_dates[1], State.SUCCESS)
# Make sure other dag status are not changed
models.DagRun.find(dag_id=self.dag2.dag_id, execution_date=self.execution_dates[0])
self._verify_dag_run_state(self.dag2, self.execution_dates[0], State.FAILED)
models.DagRun.find(dag_id=self.dag2.dag_id, execution_date=self.execution_dates[2])
self._verify_dag_run_state(self.dag2, self.execution_dates[2], State.RUNNING)
def test_set_dag_run_state_edge_cases(self):
# Dag does not exist
altered = set_dag_run_state_to_success(None, self.execution_dates[0])
assert len(altered) == 0
altered = set_dag_run_state_to_failed(None, self.execution_dates[0])
assert len(altered) == 0
altered = set_dag_run_state_to_running(None, self.execution_dates[0])
assert len(altered) == 0
# Invalid execution date
altered = set_dag_run_state_to_success(self.dag1, None)
assert len(altered) == 0
altered = set_dag_run_state_to_failed(self.dag1, None)
assert len(altered) == 0
altered = set_dag_run_state_to_running(self.dag1, None)
assert len(altered) == 0
# This will throw ValueError since dag.latest_execution_date
# need to be 0 does not exist.
with pytest.raises(ValueError):
set_dag_run_state_to_success(self.dag2, timezone.make_naive(self.execution_dates[0]))
# altered = set_dag_run_state_to_success(self.dag1, self.execution_dates[0])
# DagRun does not exist
# This will throw ValueError since dag.latest_execution_date does not exist
with pytest.raises(ValueError):
set_dag_run_state_to_success(self.dag2, self.execution_dates[0])
def test_set_dag_run_state_to_failed_no_running_tasks(self):
"""
set_dag_run_state_to_failed when there are no running tasks to update
"""
date = self.execution_dates[0]
dr = self._create_test_dag_run(State.SUCCESS, date)
for task in self.dag1.tasks:
dr.get_task_instance(task.task_id).set_state(State.SUCCESS)
set_dag_run_state_to_failed(self.dag1, date)
def tearDown(self):
self.dag1.clear()
self.dag2.clear()
with create_session() as session:
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
| {
"content_hash": "35e8ae29a3cdbd94453400f26749692b",
"timestamp": "",
"source": "github",
"line_count": 666,
"max_line_length": 109,
"avg_line_length": 40.3963963963964,
"alnum_prop": 0.6148528099910794,
"repo_name": "dhuang/incubator-airflow",
"id": "49008d386ba200d3b0117a97aff10338b244546f",
"size": "27692",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/api/common/experimental/test_mark_tasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "264851"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "3357958"
},
{
"name": "Shell",
"bytes": "34442"
}
],
"symlink_target": ""
} |
"""
GPT2 SeeKeR Agent for Language Modeling.
"""
from typing import Optional, Tuple, List, Union
import torch
import torch.nn
from parlai.agents.fid.fid import (
FidModel,
FidAgent,
SearchQuerySearchEngineFiDAgent,
WizIntGoldDocRetrieverFiDAgent,
)
from parlai.agents.hugging_face.gpt2 import Gpt2Agent
from parlai.agents.rag.rag import BaseGenerationAgentMixin
from parlai.agents.rag.retrievers import Document
from parlai.core.dict import DictionaryAgent
from parlai.core.message import Message
from parlai.core.opt import Opt
from parlai.core.params import ParlaiParser
from parlai.core.torch_agent import Batch, Output
from parlai.core.torch_generator_agent import TorchGeneratorAgent
from parlai.utils.torch import padded_tensor
from projects.seeker.agents.seeker import SeekerAgent
from projects.seeker.agents.gpt2_seeker_modules import (
GPT2WithRetrieverModel,
ComboGPT2Model,
)
class GPT2WithRetrieverAgentBase(Gpt2Agent, BaseGenerationAgentMixin):
@staticmethod
def build_rag_model(
opt: Opt, dictionary: DictionaryAgent
) -> "GPT2WithRetrieverModel":
return GPT2WithRetrieverModel(opt, dictionary)
class GPT2WithRetrieverAgent(FidAgent, GPT2WithRetrieverAgentBase):
"""
GPT2 with Retriever agent.
This agent packs in the retrieved documents and input context as one big "prompt" to
the language model.
"""
def __init__(self, opt, shared=None):
opt['generation_model'] = 'gpt2'
super().__init__(opt, shared)
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
FidAgent.add_cmdline_args(parser, partial_opt)
Gpt2Agent.add_cmdline_args(parser, partial_opt)
group = parser.add_argument_group('GPT2 Retriever Agent')
group.add_argument(
'--filter-docs-with-label',
type='bool',
default=False,
help='If true, we make sure that we do not retrieve any docs containing '
'label string VERBATIM.',
)
return parser
@staticmethod
def build_rag_model(
opt: Opt, dictionary: DictionaryAgent
) -> "GPT2WithRetrieverModel":
return GPT2WithRetrieverModel(opt, dictionary)
def build_model(self) -> FidModel:
return GPT2WithRetrieverModel(self.opt, self.dict)
@property
def generation_model(self) -> str:
return self._generation_model
@generation_model.setter
def generation_model(self, model: str):
"""
Override to always be GPT2.
"""
self._generation_model = model
self._generation_agent = GPT2WithRetrieverAgentBase
def _pad_tensor(
self, items: List[Union[List[int], torch.LongTensor]], is_label: bool = False
) -> Tuple[torch.LongTensor, List[int]]:
"""
Override to always set fp16friendly to False and left_pad to True.
"""
return padded_tensor(
items, pad_idx=self.NULL_IDX, left_padded=True, fp16friendly=False
)
def _model_input(
self, batch: Batch
) -> Tuple[
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
Optional[torch.LongTensor],
]:
"""
Override model input to return target lengths as well.
"""
return (
batch.text_vec,
batch.text_vec.ne(self.NULL_IDX).sum(1),
batch.query_vec,
batch.input_turn_cnt_vec,
batch.label_vec.ne(self.NULL_IDX).sum(1)
if batch.label_vec is not None
else None,
)
def _encoder_input(
self, batch: Batch
) -> Tuple[
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
Optional[torch.LongTensor],
]:
"""
During generation, we send the label truncation to the model.
"""
return (
batch.text_vec,
batch.text_vec.ne(self.NULL_IDX).sum(1),
batch.query_vec,
batch.input_turn_cnt_vec,
batch.text_vec.new(batch.batchsize).fill_(self.label_truncate),
)
def eval_step(self, batch: Batch) -> Optional[Output]:
"""
Override to cache the labels for retrieval.
"""
if batch.label_vec is not None and self.opt.get('filter_docs_with_label'):
self.model_api.set_labels(batch.label_vec)
output = TorchGeneratorAgent.eval_step(self, batch)
return output
class GPT2WithRetrieverSearchQueryAgent(
GPT2WithRetrieverAgent, SearchQuerySearchEngineFiDAgent
):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
GPT2WithRetrieverAgent.add_cmdline_args(parser, partial_opt)
SearchQuerySearchEngineFiDAgent.add_cmdline_args(parser, partial_opt)
return parser
class GPT2WithRetrieverGoldDocumentAgent(
GPT2WithRetrieverAgent, WizIntGoldDocRetrieverFiDAgent
):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
GPT2WithRetrieverAgent.add_cmdline_args(parser, partial_opt)
WizIntGoldDocRetrieverFiDAgent.add_cmdline_args(parser, partial_opt)
return parser
####################
# Combo Agent Code #
####################
class GPT2ComboAgent(GPT2WithRetrieverAgent):
"""
Combo GPT2 Agent.
This agent can handle retrieval for some contexts, and vanilla decoder-only
computation for others.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
"""
Add Combo Args.
"""
super().add_cmdline_args(parser, partial_opt)
gpt2_combo = parser.add_argument_group('GPT2 Combo Group')
gpt2_combo.add_argument(
'--skip-retrieval-key',
type=str,
default='skip_retrieval',
help='key in observation determining whether to skip retrieval.',
)
def build_model(self) -> ComboGPT2Model:
"""
Build and return ComboGPT2Model.
"""
model = ComboGPT2Model(self.opt, self.dict)
if self.opt['embedding_type'] != 'random':
self._copy_embeddings(
model.encoder.embeddings.weight, self.opt['embedding_type']
)
return model
def batchify(self, obs_batch: List[Message], sort: bool = False) -> Batch:
"""
Overrides batchify to add skip retrieval input vec.
"""
batch = super().batchify(obs_batch, sort)
valid_exs = [ex for ex in obs_batch if self.is_valid(ex)]
if valid_exs:
skip_retrieval = [
ex.get(self.opt['skip_retrieval_key'], False) for ex in valid_exs
]
batch.skip_retrieval_vec = torch.BoolTensor(skip_retrieval)
return batch
def _model_input(
self, batch: Batch
) -> Tuple[
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
torch.BoolTensor,
Optional[torch.LongTensor],
]:
"""
Override _model_input to add skip_retrieval_vec.
"""
return (
batch.text_vec,
batch.text_vec.ne(self.NULL_IDX).sum(1),
batch.query_vec,
batch.input_turn_cnt_vec,
batch.skip_retrieval_vec,
batch.label_vec.ne(self.NULL_IDX).sum(1)
if batch.label_vec is not None
else None,
)
def _encoder_input(
self, batch: Batch
) -> Tuple[
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
torch.BoolTensor,
Optional[torch.LongTensor],
]:
"""
For the encoder, during generation, we don't send label vec but rather the label
truncation.
"""
return (
batch.text_vec,
batch.text_vec.ne(self.NULL_IDX).sum(1),
batch.query_vec,
batch.input_turn_cnt_vec,
batch.skip_retrieval_vec,
batch.text_vec.new(batch.batchsize).fill_(self.label_truncate),
)
def get_retrieved_knowledge(self, message: Message) -> List[Document]:
if message.get('skip_retrieval'):
return []
return super().get_retrieved_knowledge(message)
def eval_step(self, batch: Batch) -> Optional[Output]:
"""
Override to potentially filter docs with the label.
Additionally add top docs to the output.
"""
if batch.label_vec is not None and self.opt.get('filter_docs_with_label'):
self.model_api.set_labels(batch.label_vec)
output = TorchGeneratorAgent.eval_step(self, batch)
if output is not None:
output.top_docs = self.model_api.get_top_docs()
return output
class GPT2ComboSearchQueryAgent(GPT2ComboAgent, SearchQuerySearchEngineFiDAgent):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
GPT2ComboAgent.add_cmdline_args(parser, partial_opt)
SearchQuerySearchEngineFiDAgent.add_cmdline_args(parser, partial_opt)
return parser
class GPT2ComboGpt2GoldDocumentAgent(GPT2ComboAgent, WizIntGoldDocRetrieverFiDAgent):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
GPT2ComboAgent.add_cmdline_args(parser, partial_opt)
WizIntGoldDocRetrieverFiDAgent.add_cmdline_args(parser, partial_opt)
return parser
class GPT2SeekerAgent(SeekerAgent):
@classmethod
def get_additional_agent_args(cls) -> ParlaiParser:
"""
Return a parser with arguments sourced from several sub models.
"""
additional_agent_parser = SeekerAgent.get_additional_agent_args()
GPT2ComboAgent.add_cmdline_args(additional_agent_parser)
return additional_agent_parser
| {
"content_hash": "6add48e17f69960c5db21d2dd431dca4",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 88,
"avg_line_length": 31.990625,
"alnum_prop": 0.6283090749242942,
"repo_name": "facebookresearch/ParlAI",
"id": "83109363031a4bbb402ba4f6bdcf183ee9cbc235",
"size": "10436",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "projects/seeker/agents/gpt2_seeker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2000"
},
{
"name": "CSS",
"bytes": "38474"
},
{
"name": "Cuda",
"bytes": "4118"
},
{
"name": "Dockerfile",
"bytes": "1218"
},
{
"name": "HTML",
"bytes": "645771"
},
{
"name": "JavaScript",
"bytes": "405110"
},
{
"name": "Makefile",
"bytes": "289"
},
{
"name": "Python",
"bytes": "6802410"
},
{
"name": "Shell",
"bytes": "26147"
}
],
"symlink_target": ""
} |
from thumbor.filters import BaseFilter, filter_method
from thumbor.ext.filters import _sharpen
class Filter(BaseFilter):
@filter_method(
BaseFilter.DecimalNumber,
BaseFilter.DecimalNumber,
BaseFilter.Boolean,
BaseFilter.DecimalNumber
)
def conditional_sharpen(
self,
amount,
radius,
luminance_only,
resize_ratio_threshold):
width, height = self.engine.size
try:
original_width = self.context.request.source_width
except AttributeError:
original_width = self.engine.source_width
try:
original_height = self.context.request.source_height
except AttributeError:
original_height = self.engine.source_height
mode, data = self.engine.image_data_as_rgb()
source_sum = float(original_width + original_height)
destination_sum = float(width + height)
resize_ratio = destination_sum / source_sum
if resize_ratio < resize_ratio_threshold:
imgdata = _sharpen.apply(
mode, width, height, amount, radius,
luminance_only, data
)
self.engine.set_image_data(imgdata)
| {
"content_hash": "4ebea1781e469a52378e9ccc57ed5971",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 64,
"avg_line_length": 30.634146341463413,
"alnum_prop": 0.6035031847133758,
"repo_name": "wikimedia/thumbor-conditional-sharpen",
"id": "f0e48f3ad1d56bd61d4c6aa24b2f69a50ec0faff",
"size": "1754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wikimedia_thumbor_conditional_sharpen/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2673"
}
],
"symlink_target": ""
} |
from mixbox import fields
import cybox.bindings.win_hook_object as win_hook_binding
from cybox.objects.library_object import Library
from cybox.objects.win_handle_object import WinHandle
from cybox.common import ObjectProperties, String, NonNegativeInteger
class WinHook(ObjectProperties):
_binding = win_hook_binding
_binding_class = win_hook_binding.WindowsHookObjectType
_namespace = 'http://cybox.mitre.org/objects#WinHookObject-1'
_XSI_NS = "WinHookObj"
_XSI_TYPE = "WindowsHookObjectType"
type_ = fields.TypedField("Type", String)
handle = fields.TypedField("Handle", WinHandle)
hooking_function_name = fields.TypedField("Hooking_Function_Name", String)
hooking_module = fields.TypedField("Hooking_Module", Library)
thread_id = fields.TypedField("Thread_ID", NonNegativeInteger)
| {
"content_hash": "3ad44055cdb170b566842d142f310170",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 78,
"avg_line_length": 41.45,
"alnum_prop": 0.7611580217129071,
"repo_name": "CybOXProject/python-cybox",
"id": "55b66d4b5c08f8b2f6dd9d35552380c0d0a495c4",
"size": "934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cybox/objects/win_hook_object.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4610747"
}
],
"symlink_target": ""
} |
import sys
import os
import codecs
import traceback
from xml.sax.saxutils import unescape
import untangle
def getFiles(directoryName):
fileNames = []
getFilesIter(directoryName, fileNames)
return fileNames
# return file names
def getFilesIter(directoryName, item_list):
if not os.path.isdir(directoryName):
raise Exception("directory is not found")
for item in list_dir(directoryName):
item = os.path.join(directoryName, item)
if os.path.isdir(item):
getFilesIter(item, item_list)
else:
item_list.append(item)
def list_dir(directoryName):
raw_list = os.listdir(directoryName)
try:
out_list = sorted(raw_list, key=int)
except Exception as e:
out_list = raw_list
return out_list
class EmotionCheck:
emotion_dict = {
u'愤怒': 0,
u'厌恶': 1,
u'恐惧': 2,
u'悲伤': 3,
u'高兴': 4,
u'喜好': 5,
u'惊讶': 6,
u'': 7
}
def __init__(self):
pass
@staticmethod
def get_emotion_type(emotion):
if isinstance(emotion, unicode):
if emotion in EmotionCheck.emotion_dict:
emotion_type = EmotionCheck.emotion_dict[emotion]
else:
raise Exception("input {} is not a valid emotion type".format(emotion.encode('utf-8')))
if emotion_type == 7:
return 2
if emotion_type > 3:
return 1
else:
return 0
else:
raise Exception("input {} is not unicode".format(emotion))
def extractXmlFile(filename):
# with codecs.open(filename, "r", encoding="utf-16-be") as f:
# content = f.read()
# obj = untangle.parse(content)
obj = untangle.parse(filename)
weibo_list = obj.TestingData.weibo
comment_list = []
sentiment_list = []
for weibo in weibo_list:
weibo_id = int(weibo['id'])
for sentence in weibo.sentence:
sentence_id = int(sentence['id'])
cdata = unescape(sentence.cdata, {"'": "'", """: '"'})
comment_list.append(cdata)
if sentence["opinionated"] != "Y":
# drop sentences which opinion are N
sentiment_list.append(3)
else:
emotion = sentence["emotion-1-type"]
try:
sentiment = EmotionCheck.get_emotion_type(emotion)
except Exception as e:
print "weibo {} sentence {} error: {}".format(weibo_id, sentence_id, str(e))
sentiment_list.append(sentiment)
return comment_list, sentiment_list
if __name__ == '__main__':
# reload(sys)
# sys.setdefaultencoding('utf-8')
fileNames = getFiles("../dataset/13")
# fileNames = ["dataset/raw/jiu_lin_hou_dang_jiao_shou.xml"]
comment_list = []
sentiment_list = []
for fileName in fileNames:
(comment, sentiment) = extractXmlFile(fileName)
comment_list += comment
sentiment_list += sentiment
print len(comment_list)
with open("13.txt", "wb") as f:
for i in range(len(comment_list)):
content = comment_list[i]
f.write("{}|{}\n".format(content.encode('utf-8'), sentiment_list[i]))
print 'hello'
| {
"content_hash": "c38c4ae9ff390810fe8726b9d4bfc19f",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 103,
"avg_line_length": 27.716666666666665,
"alnum_prop": 0.5640408899579074,
"repo_name": "kevinlee9/cnn-text-classification-tf",
"id": "b5dfa23e21cbe8a6626a2a5c414a969de508de35",
"size": "3378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "preprocessing/13_text_preprocess.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "52800"
}
],
"symlink_target": ""
} |
from db_manager import db
import datetime
class Membership(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
email = db.Column(db.String(255), unique=True)
roles = db.Column(db.String(100))
created_at = db.Column(db.DateTime, nullable=False)
created_by = db.Column(db.Integer, nullable=False)
updated_at = db.Column(db.DateTime, nullable=True)
updated_by = db.Column(db.Integer, nullable=True)
status = db.Column(db.Boolean, default=True) | {
"content_hash": "043ee94b45b918c91c1e196f59a9cf25",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 55,
"avg_line_length": 37.93333333333333,
"alnum_prop": 0.6994727592267135,
"repo_name": "cackharot/ngen-milk-pos",
"id": "7b459c2f8061a994f50f5a0783dbaf83a414e3e7",
"size": "569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mpos/models/membership.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "204718"
},
{
"name": "Python",
"bytes": "142162"
},
{
"name": "Scheme",
"bytes": "6249"
},
{
"name": "Shell",
"bytes": "3290"
}
],
"symlink_target": ""
} |
from basic_context import BasicGameWorld
from basic_context import BasicGameEngine
from basic_context import NORTH, SOUTH, EAST, WEST, UP, DOWN, RIGHT, LEFT, IN, OUT, FORWARD, BACK, NORTH_WEST, NORTH_EAST, SOUTH_WEST, SOUTH_EAST, NOT_DIRECTION
from basic_context import Object
world = BasicGameWorld()
puffy_cloud = world.create_location("puffy cloud","a cloud with a tall ladder reaching up to more clouds.")
puffy_cloud.create_object("key", "small rubbed-down silver key")
key = puffy_cloud.create_object("key", "small rubbed-down silver key")
gray_rain_cloud = world.create_location(
"gray rain cloud",
"""a larger cloud that looks gray and sad. small raindrops are falling. It starts raining hard now, and there is no other way to go but down with the rain""")
world.create_connection("ladder", puffy_cloud, gray_rain_cloud, [IN, UP], [OUT, DOWN])
museum = world.create_location("museum", "the rain takes you down as the cloud falls apart. you fall and fall untill you are sucked through a chimney leading into a museum. You climb out ad find yourself in a huge room with dinosaur skeletons everywhere. There's a hallway to your right, door to your left, and a flight of stairs leading up")
world.create_connection("rain", gray_rain_cloud, museum, [IN, DOWN], [NOT_DIRECTION])
hallway = world.create_location ("hallway", "a long and dark hallway. You are not sure what it leads to.")
dark_room = world.create_location("dark room", "a small dark room. You can't see anything.")
world.create_connection("spacebetweenroomandhallway", museum, hallway, [IN, RIGHT], [OUT, LEFT])
world.create_connection("door", museum, dark_room, [IN, LEFT], [OUT, RIGHT])
end_of_hallway = world.create_location("end of hallway", "the hallway ends")
world.create_connection("spacebetweenhandeoh", hallway, end_of_hallway, [IN, FORWARD], [OUT, BACK])
end_of_hallway.create_container("small wooden box", "a box at the end of the hallway. its a dead end. there is a lock on it.")
small_wooden_box = end_of_hallway.create_container("small wooden box", "a box at the end of the hallway. its a dead end. there is a lock on it.")
small_wooden_box.make_requirement(key)
end_of_hallway.create_object("carrot", "an orange carrot")
insert
game = BasicGameEngine(world)
game.run()
| {
"content_hash": "2b3f3c7561a335a5e6166266b952eb7a",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 342,
"avg_line_length": 48.702127659574465,
"alnum_prop": 0.7391874180865007,
"repo_name": "sleepinghungry/wwif",
"id": "f58d07ee8e5b854d8ba6038718bdb3254a23b0c3",
"size": "2289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "students/flannery/firstgame.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "643"
},
{
"name": "HTML",
"bytes": "54502"
},
{
"name": "Makefile",
"bytes": "1246"
},
{
"name": "Python",
"bytes": "10872274"
},
{
"name": "Shell",
"bytes": "429"
}
],
"symlink_target": ""
} |
from fastapi import FastAPI
from fastapi.middleware.gzip import GZipMiddleware
app = FastAPI()
app.add_middleware(GZipMiddleware, minimum_size=1000)
@app.get("/")
async def main():
return "somebigcontent"
| {
"content_hash": "521990ca21d73f30b49d003391ebbbe5",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 53,
"avg_line_length": 19.363636363636363,
"alnum_prop": 0.7605633802816901,
"repo_name": "tiangolo/fastapi",
"id": "b99e3edd19bffe5f96a85399555a6a660b1ac129",
"size": "213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs_src/advanced_middleware/tutorial003.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25"
},
{
"name": "HTML",
"bytes": "187"
},
{
"name": "Python",
"bytes": "1928986"
},
{
"name": "Shell",
"bytes": "1383"
}
],
"symlink_target": ""
} |
import json
import operator
import logging
import re
import time
from socket import socket, AF_INET, SOCK_DGRAM
logger = logging.getLogger(__name__)
def ip():
"""Find default IP"""
ip = None
s = socket(AF_INET, SOCK_DGRAM)
try:
s.connect(('239.255.0.0', 9))
ip = s.getsockname()[0]
except socket.error:
raise RuntimeError("Cannot determine host IP")
finally:
del s
return ip
def serialize(data):
try:
if type(data) != dict:
raise TypeError('Must be a dict')
return json.dumps(data)
except Exception, e:
logger.warn('Cannot serialize: %s [%s]', data, e)
return '{}'
def unserialize(serialized):
if not serialized:
return {}
try:
data = json.loads(serialized)
if type(data) != dict:
raise TypeError('Not a dict')
return data
except Exception, e:
logger.warn('Cannot unserialize: %s [%s]', serialized, e)
return {}
def dict_get_path(the_dict, path):
try:
return reduce(operator.getitem, [the_dict] + path.split('.'))
except:
return None
def dict_set_path(the_dict, path, value):
current = the_dict
for component in path.split('.')[:-1]:
if component not in current or type(current[component]) != dict:
current[component] = {}
current = current[component]
current[path.split('.')[-1]] = value
def dict_filter(the_dict, field_or_fields=None):
if field_or_fields is None:
return the_dict
elif type(field_or_fields) == list:
fields = {}
for f in field_or_fields:
fields[f] = dict_get_path(the_dict, f)
return fields
elif isinstance(field_or_fields, (str, unicode)):
return dict_get_path(the_dict, field_or_fields)
else:
raise TypeError('Invalid type for field path: %s' % type(field_or_fields))
def get_operator(op):
try:
return {"==": operator.eq,
"=": operator.eq,
"!=": operator.ne,
">=": operator.ge,
"<=": operator.le,
">": operator.gt,
"<": operator.lt}[op]
except KeyError:
raise ValueError('Unknown operator: %s' % op)
def match_predicates(predicates, the_dict):
for predicate in predicates:
m1, m2 = (dict_get_path(the_dict, predicate['path']), predicate['value'])
if m1 is None and m2 is not None:
return False
try:
int(m1)
int(m2)
m1 = int(m1)
m2 = int(m2)
except (ValueError, TypeError):
pass
if not predicate['op'](m1, m2):
return False
return True
def create_filter(filters):
if not filters:
return lambda a_dict: True
predicates = []
for f in filters.replace(' ', '').split(','):
predicate = {}
match = re.split('(!?[^><!=]+)(?:(>=|<=|!=|=|<|>)(.*))?', f, 2)
predicate['path'] = match[1]
if match[2]:
predicate['op'] = get_operator(match[2])
predicate['value'] = match[3]
else:
# predicate with not operator/value means "fields exists"
if predicate['path'][0] == '!':
predicate['path'] = predicate['path'][1:]
predicate['op'] = operator.is_
else:
predicate['op'] = operator.is_not
predicate['value'] = None
predicates.append(predicate)
return lambda the_dict: match_predicates(predicates, the_dict)
class ColorizingStreamHandler(logging.StreamHandler):
"""Provide a nicer logging output to error output with colors"""
colors = ['black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white']
color_map = dict([(x, colors.index(x)) for x in colors])
level_map = {
logging.DEBUG: (None, 'blue', " DBG"),
logging.INFO: (None, 'green', "INFO"),
logging.WARNING: (None, 'yellow', "WARN"),
logging.ERROR: (None, 'red', " ERR"),
logging.CRITICAL: ('red', 'white', "CRIT")
}
csi = '\x1b['
reset = '\x1b[0m'
@property
def is_tty(self):
isatty = getattr(self.stream, 'isatty', None)
return isatty and isatty()
def colorize(self, message, record):
if record.levelno in self.level_map:
params = []
if bg in self.color_map:
params.append(str(self.color_map[bg] + 40))
if fg in self.color_map:
params.append(str(self.color_map[fg] + 30))
if bold:
params.append('1')
if params:
message = ''.join((self.csi, ';'.join(params),
'm', message, self.reset))
return message
def format(self, record):
message = logging.StreamHandler.format(self, record)
# Build the prefix
params = []
levelno = record.levelno
if levelno not in self.level_map:
levelno = logging.WARNING
bg, fg, level = self.level_map[levelno]
if bg in self.color_map:
params.append(str(self.color_map[bg] + 40))
if fg in self.color_map:
params.append(str(self.color_map[fg] + 30))
params.append("1m")
level = "[%s]" % level
return "\n".join(["%s %s: %s" % (
time.strftime("%Y-%m-%dT%H:%M:%S"),
self.is_tty and params and ''.join((self.csi, ';'.join(params),
level, self.reset)) or level,
line)
for line in message.split('\n')])
| {
"content_hash": "87090b683edb654bf67996d5f6acfbb4",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 87,
"avg_line_length": 31.773480662983424,
"alnum_prop": 0.524604416623196,
"repo_name": "rs/zkfarmer",
"id": "8c9e997111be8ef007bcbfa3a2b62a56aee89ed0",
"size": "5751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zkfarmer/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "99032"
}
],
"symlink_target": ""
} |
"""
Data structures for representing market data.
"""
import datetime
from string import Template
from emds import __version__
from emds.exceptions import ItemAlreadyPresentError
from emds.common_utils import check_for_naive_dtime
class MarketOrderList(object):
"""
A list of MarketOrder objects, with some added features for assisting
with serializing to the Unified Uploader Data Interchange format.
:attr list_type: This may be used in your logic to separate orders from history.
"""
list_type = "orders"
def __init__(self, upload_keys=None, order_generator=None,
*args, **kwargs):
self._orders = {}
self.upload_keys = upload_keys or []
if not isinstance(self.upload_keys, list):
raise TypeError('upload_keys must be a list.')
self.order_generator = order_generator or {'name': 'EMDS', 'version': __version__}
if not isinstance(self.order_generator, dict):
raise TypeError('order_generator must be a dict.')
def __iter__(self):
"""
Uses a generator to return all orders within. :py:class:`MarketOrder`
objects are yielded directly, instead of being grouped in
:py:class:`MarketItemsInRegionList` instances.
.. note:: This is a generator!
:rtype: generator
:returns: Generates a list of :py:class:`MarketOrder` instances.
"""
return self.get_all_order_groups()
def __repr__(self):
"""
Basic string representation of the order.
"""
list_repr = "<MarketOrderList: \n"
for order_list in [olist.orders for olist in self._orders.values()]:
for order in order_list:
list_repr += repr(order)
return list_repr
def __len__(self):
"""
Implements a somewhat inefficient length tracking method. Returns the
total number of orders contained within.
:rtype: int
:returns: The number of orders contained within the list.
"""
total = 0
for orders in self._orders.values():
total += len(orders)
return total
def __contains__(self, item):
"""
Used for checking whether an order ID is contained within the order list.
:param item: The MarketOrder or order ID to look for.
:type item: int or MarketOrder
:rtype: bool
:returns: True if the given order can be found, False if not.
"""
if isinstance(item, MarketOrder):
order_id = item.order_id
else:
order_id = int(item)
for order in self.get_all_orders_ungrouped():
if order.order_id == order_id:
return True
# No matches.
return False
def get_all_orders_ungrouped(self):
"""
Uses a generator to return all orders within. :py:class:`MarketOrder`
objects are yielded directly, instead of being grouped in
:py:class:`MarketItemsInRegionList` instances.
.. note:: This is a generator!
:rtype: generator
:returns: Generates a list of :py:class:`MarketOrder` instances.
"""
for olist in self._orders.values():
for order in olist.orders:
yield order
def get_all_order_groups(self):
"""
Uses a generator to return all grouped Item+Region combos, in the form
of :py:class:`MarketItemsInRegionList` instances. This is useful in
that it is possible to express a lack of an item in a specific region.
.. note:: This is a generator!
:rtype: generator
:returns: Generates a list of :py:class:`MarketItemsInRegionList`
instances, which contain :py:class:`MarketOrder` instances.
"""
for olist in self._orders.values():
yield olist
def add_order(self, order):
"""
Adds a MarketOrder instance to the list of market orders contained
within this order list. Does some behind-the-scenes magic to get it
all ready for serialization.
:param MarketOrder order: The order to add to this order list.
"""
# This key is used to group the orders based on region.
key = '%s_%s' % (order.region_id, order.type_id)
if not self._orders.has_key(key):
# We don't have any orders for this yet. Prep the region+item
# combo by instantiating a new MarketItemsInRegionList for
# the MarketOrders.
self.set_empty_region(
order.region_id,
order.type_id,
order.generated_at
)
# The MarketOrder gets stuffed into the MarketItemsInRegionList for this
# item+region combo.
self._orders[key].add_order(order)
def set_empty_region(self, region_id, type_id, generated_at,
error_if_orders_present=True):
"""
Prepares for the given region+item combo by instantiating a
:py:class:`MarketItemsInRegionList` instance, which will track
region ID, type ID, and generated time. This is mostly used for
the JSON deserialization process in case there are no orders for
the given region+item combo.
:param int region_id: The region ID.
:param int type_id: The item's type ID.
:param datetime.datetime generated_at: The time that the order set
was generated.
:keyword bool error_if_orders_present: If True, raise an exception if
an order already exists for this item+region combo when this is
called. This failsafe may be disabled by passing False here.
"""
key = '%s_%s' % (region_id, type_id)
if error_if_orders_present and self._orders.has_key(key):
raise ItemAlreadyPresentError(
"Orders already exist for the given region and type ID. "
"Pass error_if_orders_present=False to disable this failsafe, "
"if desired."
)
self._orders[key] = MarketItemsInRegionList(
region_id, type_id, generated_at)
class MarketItemsInRegionList(object):
"""
This is an intermediary container that stores MarketOrders for a particular
item+region combo. The primary reason it exists is to store generation
times for item+region combos that lack orders, since we can't just yank
from the first order's time in that case.
:attr orders: A list of MarketOrder objects.
"""
def __init__(self, region_id, type_id, generated_at):
"""
:param int region_id: The region ID that the data set pertains to.
:param int type_id: The type ID of the item contained in the order set.
:param datetime.datetime generated_at: When the data set was first
generated.
"""
# This can be a None or an int.
self.region_id = int(region_id) if region_id else None
self.type_id = int(type_id)
if not isinstance(generated_at, datetime.datetime):
raise TypeError('generated_at should be a datetime.')
self.generated_at = check_for_naive_dtime(generated_at)
self.orders = []
def __len__(self):
"""
:rtype: int
:returns: The number of orders contained within the region list.
"""
return len(self.orders)
def __iter__(self):
"""
Uses a generator to return all orders within.
.. note:: This is a generator!
:rtype: generator
:returns: Generates a list of :py:class:`MarketOrder` instances.
"""
for order in self.orders:
yield order
def __contains__(self, item):
"""
Used for checking whether an order ID is contained within the order list.
:param item: The MarketOrder or order ID to look for.
:type item: int or MarketOrder
:rtype: bool
:returns: True if the given order can be found, False if not.
"""
if isinstance(item, MarketOrder):
order_id = item.order_id
else:
order_id = int(item)
for order in self.orders:
if order.order_id == order_id:
return True
# No matches.
return False
def add_order(self, order):
"""
Adds a :py:class:`MarketOrder` instance to this region+item list.
:param MarketOrder order: The order to add.
"""
self.orders.append(order)
class MarketOrder(object):
"""
Represents a market buy or sell order.
"""
def __init__(self, order_id, is_bid, region_id, solar_system_id,
station_id, type_id, price, volume_entered, volume_remaining,
minimum_volume, order_issue_date, order_duration, order_range,
generated_at):
"""
:param int order_id: The unique order ID for this order.
:param bool is_bid: If ``True``, this is a bid (buy order). If ``False``,
it's a sell order.
:param int region_id: The region the order is in.
:param int solar_system_id: The solar system the order is in.
:param int station_id: The station the order is in.
:param int type_id: The item type of the order.
:param float price: The buy/sell price per item.
:param int volume_entered: The original amount of the buy/sell order.
:param int volume_remaining: The quantity remaining in the order.
:param int minimum_volume: The minimum volume that may remain
before the order is removed.
:param datetime.datetime order_issue_date: The time at which the order
was first posted.
:param int order_duration: The duration (in days) of the order.
:param int order_range: No idea what this is.
:param datetime.datetime generated_at: Time of generation.
"""
self.order_id = int(order_id)
if not isinstance(is_bid, bool):
raise TypeError('is_bid should be a bool.')
self.is_bid = is_bid
if region_id:
self.region_id = int(region_id)
else:
# Client lacked the data for result rows.
self.region_id = None
if solar_system_id:
self.solar_system_id = int(solar_system_id)
else:
# Client lacked the data for result rows.
self.solar_system_id = None
self.station_id = int(station_id)
self.type_id = int(type_id)
self.price = float(price)
self.volume_entered = int(volume_entered)
self.volume_remaining = int(volume_remaining)
self.minimum_volume = int(minimum_volume)
if not isinstance(order_issue_date, datetime.datetime):
raise TypeError('order_issue_date should be a datetime.')
self.order_issue_date = check_for_naive_dtime(order_issue_date)
self.order_duration = int(order_duration)
self.order_range = int(order_range)
if not isinstance(generated_at, datetime.datetime):
raise TypeError('generated_at should be a datetime.')
self.generated_at = check_for_naive_dtime(generated_at)
def __repr__(self):
"""
Basic string representation of the order.
"""
template = Template(
"<Market Order: \n"
" order_id: $order_id\n"
" is_bid: $is_bid\n"
" region_id: $region_id\n"
" solar_system_id: $solar_system_id\n"
" station_id: $station_id\n"
" type_id: $type_id\n"
" price: $price\n"
" volume_entered: $volume_entered\n"
" volume_remaining: $volume_remaining\n"
" minimum_volume: $minimum_volume\n"
" order_issue_date: $order_issue_date\n"
" order_duration: $order_duration\n"
" order_range: $order_range>\n"
)
return template.substitute(
order_id = self.order_id,
is_bid = self.is_bid,
region_id = self.region_id,
solar_system_id = self.solar_system_id,
station_id = self.station_id,
type_id = self.type_id,
price = self.price,
volume_entered = self.volume_entered,
volume_remaining = self.volume_remaining,
minimum_volume = self.minimum_volume,
order_issue_date = self.order_issue_date,
order_duration = self.order_duration,
order_range = self.order_range,
)
class MarketHistoryList(object):
"""
A class for storing market order history for serialization.
:attr list_type: This may be used in your logic to separate orders from history.
"""
list_type = "history"
def __init__(self, upload_keys=None, history_generator=None,
*args, **kwargs):
# Will hold an organized store of history items.
self._history = {}
self.upload_keys = upload_keys or []
if not isinstance(self.upload_keys, list):
raise TypeError('upload_keys must be a list.')
self.history_generator = history_generator or {'name': 'EMDS', 'version': __version__}
if not isinstance(self.history_generator, dict):
raise TypeError('history_generator must be a dict.')
def __len__(self):
"""
Implements a somewhat inefficient length tracking method. Returns the
total number of orders contained within.
"""
total = 0
for orders in self._history.values():
total += len(orders)
return total
def __iter__(self):
"""
Uses a generator to return all history entries within.
.. note:: This is a generator!
:rtype: generator
:returns: Generates a list of :py:class:`MarketHistoryEntry` instances.
"""
for entry_list in self._history.values():
for entry in entry_list:
yield entry
def __contains__(self, item):
"""
Used for checking whether an type ID is contained within the history list.
:param item: The MarketHistoryEntry or type ID to look for.
:type item: int or MarketHistoryEntry
:rtype: bool
:returns: True if the given type ID can be found, False if not.
"""
if isinstance(item, MarketHistoryEntry):
type_id = item.type_id
else:
type_id = int(item)
for entry in self.get_all_entries_ungrouped():
if entry.type_id == type_id:
return True
# No matches.
return False
def __repr__(self):
"""
Basic string representation of the history.
"""
list_repr = "<MarketHistoryList: \n"
for history_entry_list in self._history.values():
for entry in history_entry_list:
list_repr += repr(entry)
return list_repr
def get_all_entries_ungrouped(self):
"""
Uses a generator to return all history entries within.
:py:class:`MarketHistoryEntry` objects are yielded directly, instead of
being grouped in :py:class:`HistoryItemsInRegion` instances.
.. note:: This is a generator!
:rtype: generator
:returns: Generates a list of :py:class:`MarketHistoryEntry` instances.
"""
for entry_list in self._history.values():
for entry in entry_list:
yield entry
def get_all_entries_grouped(self):
"""
Uses a generator to return all grouped Item+Region combos, in the form
of :py:class:`HistoryItemsInRegion` instances. This is useful in
that it is possible to express a lack of an item in a specific region.
.. note:: This is a generator!
:rtype: generator
:returns: Generates a list of :py:class:`HistoryItemsInRegion`
instances, which contain :py:class:`MarketHistoryEntry` instances.
"""
for history_entry_list in self._history.values():
yield history_entry_list
def add_entry(self, entry):
"""
Adds a MarketHistoryEntry instance to the list of market history entries
contained within this instance. Does some behind-the-scenes magic to
get it all ready for serialization.
:param MarketHistoryEntry entry: The history entry to add to
instance.
"""
# This key is used to group the orders based on region.
key = '%s_%s' % (entry.region_id, entry.type_id)
if not self._history.has_key(key):
# We don't have any orders for this yet. Prep the region+item
# combo by instantiating a new MarketItemsInRegionList for
# the MarketOrders.
self.set_empty_region(
entry.region_id,
entry.type_id,
entry.generated_at
)
# The MarketOrder gets stuffed into the MarketItemsInRegionList for this
# item+region combo.
self._history[key].add_entry(entry)
def set_empty_region(self, region_id, type_id, generated_at,
error_if_entries_present=True):
"""
Prepares for the given region+item combo by instantiating a
:py:class:`HistoryItemsInRegionList` instance, which will track
region ID, type ID, and generated time. This is mostly used for
the JSON deserialization process in case there are no orders for
the given region+item combo.
:param int region_id: The region ID.
:param int type_id: The item's type ID.
:param datetime.datetime generated_at: The time that the order set
was generated.
:keyword bool error_if_entries_present: If True, raise an exception if
an entry already exists for this item+region combo when this is
called. This failsafe may be disabled by passing False here.
"""
key = '%s_%s' % (region_id, type_id)
if error_if_entries_present and self._history.has_key(key):
raise ItemAlreadyPresentError(
"Orders already exist for the given region and type ID. "
"Pass error_if_orders_present=False to disable this failsafe, "
"if desired."
)
self._history[key] = HistoryItemsInRegionList(
region_id, type_id, generated_at)
class HistoryItemsInRegionList(object):
"""
This is an intermediary container that stores MarketHistoryEntry for a
particular item+region combo. The primary reason it exists is to store
generation times for item+region combos that lack orders, since we can't
just yank from the first order's time in that case.
:attr orders: A list of MarketHistoryEntry objects.
"""
def __init__(self, region_id, type_id, generated_at):
"""
:param int region_id: The region ID that the data set pertains to.
:param int type_id: The type ID of the item contained in the order set.
:param datetime.datetime generated_at: When the data set was first
generated.
"""
# This can be a None or an int.
self.region_id = int(region_id) if region_id else None
self.type_id = int(type_id)
if not isinstance(generated_at, datetime.datetime):
raise TypeError('generated_at should be a datetime.')
self.generated_at = check_for_naive_dtime(generated_at)
self.entries = []
def __iter__(self):
"""
Uses a generator to return all history entries within.
.. note:: This is a generator!
:rtype: generator
:returns: Generates a list of :py:class:`MarketHistoryEntry` instances.
"""
for entry in self.entries:
yield entry
def __len__(self):
"""
:rtype: int
:returns: The number of entries contained within the region list.
"""
return len(self.entries)
def add_entry(self, entry):
"""
Adds a :py:class:`MarketHistoryEntry` instance to this region+item list.
:param MarketHistoryEntry entry: The history entry to add to
instance.
"""
self.entries.append(entry)
class MarketHistoryEntry(object):
"""
Represents a single point of market history data.
"""
def __init__(self, type_id, region_id, historical_date, num_orders,
low_price, high_price, average_price, total_quantity,
generated_at):
self.type_id = int(type_id)
if region_id:
self.region_id = int(region_id)
else:
# Client lacked the data for result rows.
self.region_id = None
if not isinstance(historical_date, datetime.datetime):
raise TypeError('historical_date should be a datetime, not %s.' % type(historical_date))
self.historical_date = check_for_naive_dtime(historical_date)
self.num_orders = int(num_orders)
self.low_price = float(low_price)
self.high_price = float(high_price)
self.average_price = float(average_price)
self.total_quantity = int(total_quantity)
if not isinstance(generated_at, datetime.datetime):
raise TypeError('generated_at should be a datetime.')
self.generated_at = check_for_naive_dtime(generated_at)
def __repr__(self):
"""
Basic string representation of the history entry.
"""
template = Template(
"<Market History Entry: \n"
" type_id: $type_id\n"
" region_id: $region_id\n"
" historical_date: $historical_date\n"
" num_orders: $num_orders\n"
" low_price: $low_price\n"
" high_price: $high_price\n"
" average_price: $average_price\n"
" total_quantity: $total_quantity\n"
" generated_at: $generated_at\n"
)
return template.substitute(
type_id = self.type_id,
region_id = self.region_id,
historical_date = self.historical_date,
num_orders = self.num_orders,
low_price = self.low_price,
high_price = self.high_price,
average_price = self.average_price,
total_quantity = self.total_quantity,
generated_at = self.generated_at,
) | {
"content_hash": "b8d2a23b4f31413bc38d6e62138f6462",
"timestamp": "",
"source": "github",
"line_count": 609,
"max_line_length": 100,
"avg_line_length": 36.97865353037767,
"alnum_prop": 0.5972912966252221,
"repo_name": "gtaylor/EVE-Market-Data-Structures",
"id": "7414e66d25e8dc6f1727e4dc9b47eeab4e2c3005",
"size": "22520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emds/data_structures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76021"
}
],
"symlink_target": ""
} |
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
import numpy as np
from IPython import embed
from helper import color
def get_surface_data():
Y = np.arange(-5, 5, 0.25)
X = np.arange(-5, 5, 0.25)
X, Y = np.meshgrid(X, Y)
R = np.sqrt(X**2 + Y**2)
Z = np.sin(R)
print color("Estamos en la función: get_surface_data()")
print color("\tExplora que variables existe (%who) y cambia sus valores")
print color("\tPara salir de IPython utiliza Ctr-D.")
embed()
Z[R>3.0] = 0
return X, Y, Z
def get_points():
N = 10
x = -5 + 10*np.random.rand(N)
y = -5 + 10*np.random.rand(N)
z = 1 + np.random.rand(N)
print color("Estamos en la función: get_points()")
print color("\tExplora los valores de las variables (%whos)")
print color("\tPara salir de IPython utiliza Ctr-D.")
embed()
return x, y, z
def plot_surface_data(X,Y,Z):
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, alpha=0.5, cmap=cm.coolwarm)
ax.set_zlim(-1.01, 1.01)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
fig.colorbar(surf, shrink=0.5, aspect=5)
print color("Estamos en la función: plot_surface_data()")
print color("\t¿Todavía existe la variable R? ¿Que le pasó?")
print color("\tPara salir de IPython utiliza Ctr-D.")
embed()
return
def plot_points(x,y,z):
ax.scatter(x, y, z, s=40)
ax.set_ylabel("eje y")
print color("Estamos en la función: plot_points()")
print color("\t¿Que pasó con X, Y y Z? ¿Porque las variables están en minúsculas?")
print color("\tPara salir de IPython utiliza Ctr-D.")
embed()
return
# Helping mat281 students
print color("Este es un programa muy sencillo para mostrar las capacidades de IPython.embed")
print color("Los mensajes de ayuda de este ejemplo estarán en verde.")
print color("Para salir de IPython utiliza Ctr-D.")
print color("El programa continuará su ejecución normalmente.")
print color("")
# Get the data
X,Y,Z = get_surface_data()
x,y,z = get_points()
print color("Estamos en el contexto principal (fuera de toda función)")
print color("\tExplora que variables existe (%who) y cambia sus valores")
print color("\tPara salir de IPython utiliza Ctr-D.")
embed()
# Do the plots
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
plot_surface_data(X,Y,Z)
plot_points(x,y,z)
plt.show()
| {
"content_hash": "004f9610d5c26af43d943d5548ffb9f2",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 93,
"avg_line_length": 32.857142857142854,
"alnum_prop": 0.6758893280632411,
"repo_name": "usantamaria/ipynb_para_docencia",
"id": "41e82cd68188e33ffd9480e3a6d3dd6ff7e822d9",
"size": "2571",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "05_python_errores/code/ejemplo_ipython.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3856"
},
{
"name": "Jupyter Notebook",
"bytes": "904167"
},
{
"name": "Python",
"bytes": "9719"
}
],
"symlink_target": ""
} |
from ...errors.httpconflictexception import HttpConflictException
import saklient
# module saklient.cloud.errors.deleteipv6netsfirstexception
class DeleteIpV6NetsFirstException(HttpConflictException):
## 要求された操作を行えません。ルータを削除する前に、IPv6ネットワークの割当を解除してください。
## @param {int} status
# @param {str} code=None
# @param {str} message=""
def __init__(self, status, code=None, message=""):
super(DeleteIpV6NetsFirstException, self).__init__(status, code, "要求された操作を行えません。ルータを削除する前に、IPv6ネットワークの割当を解除してください。" if message is None or message == "" else message)
| {
"content_hash": "3dfd46ae13fd97ce1a2064e1faebafaa",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 173,
"avg_line_length": 41.714285714285715,
"alnum_prop": 0.7397260273972602,
"repo_name": "hnakamur/saklient.python",
"id": "fc918637d5c54e3cdbc5f0c3f595eed17500a8e6",
"size": "784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saklient/cloud/errors/deleteipv6netsfirstexception.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "539448"
},
{
"name": "Shell",
"bytes": "874"
}
],
"symlink_target": ""
} |
"""
This code implements the streaming version of scikit learn's IPCA algorithm -
https://github.com/scikit-learn/scikit-learn/blob/7813f7efb/sklearn/decomposition/incremental_pca.py
"""
import numpy as np
import random
import pickle as p
from scipy import linalg
from sklearn.utils.extmath import svd_flip
import sys
sys.path.append("../")
from IoTPy.core.stream import Stream, StreamArray, run
from IoTPy.agent_types.op import map_window
from IoTPy.agent_types.source import source_list, source_list_to_stream
from IoTPy.agent_types.sink import sink_window
from IoTPy.helper_functions.recent_values import recent_values
from IoTPy.helper_functions.print_stream import print_stream
from IoTPy.concurrency.multicore import get_processes_and_procs
from IoTPy.concurrency.multicore import terminate_stream, extend_stream
from examples.PCA.IPCAutils import _incremental_mean_and_var
source_list = [[1,5,0.1],[3,1,-0.1],[3,7,0.2],[-4,4,-0.3],
[-1,-5,0.1],[2,0,0.2],[3,3,-0.2],[-4,8,0],
[-7,-2,0.1],'eof']
#source_list = np.array(source_list)
def source(out_stream):
return source_list_to_stream(source_list, out_stream)
## def make_and_run_process(compute_func):
## proc = shared_memory_process(
## compute_func=compute_func,
## in_stream_names=['in'],
## out_stream_names=[],
## connect_sources=[('in', source)],
## connect_actuators=[],
## name='proc')
## mp = Multiprocess(processes=[proc], connections=[])
## mp.run()
def IPCA(k = 5,batches = 25):
class IPCAValues(object):
def __init__(self,k):
self.n_components_ = k
self.n_samples_seen_ = 0
self.components_ = None
self.mean_ = .0
self.var_ = .0
self.singular_values_ = None
self.explained_variance_ = None
self.explained_variance_ratio_ = None
self.singular_values_ = None
self.noise_variance_ = None
self.count = 0
state = IPCAValues(k)
def gh(in_stream):
def partial_fit(X, state):
"""Incremental fit with X. All of X is processed as a single batch.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
end = False
print(X[0])
if X[-1] == 'eof':
end = True
X= X[:-1]
X = np.array(X,dtype = 'float64')
n_samples, n_features = X.shape
state.count += n_samples
# Update stats - they are 0 if this is the fisrt step
col_mean, col_var, n_total_samples = \
_incremental_mean_and_var(
X, last_mean=state.mean_, last_variance=state.var_,
last_sample_count=np.repeat(state.n_samples_seen_, X.shape[1]))
n_total_samples = n_total_samples[0]
# Whitening
if state.n_samples_seen_ == 0:
# If it is the first step, simply whiten X
X -= col_mean
else:
col_batch_mean = np.mean(X, axis=0)
X -= col_batch_mean
# Build matrix of combined previous basis and new data
mean_correction = \
np.sqrt((state.n_samples_seen_ * n_samples) /
n_total_samples) * (state.mean_ - col_batch_mean)
X = np.vstack((state.singular_values_.reshape((-1, 1)) *
state.components_, X, mean_correction))
U, S, V = linalg.svd(X, full_matrices=False)
U, V = svd_flip(U, V, u_based_decision=False)
explained_variance = S ** 2 / (n_total_samples - 1)
explained_variance_ratio = S ** 2 / np.sum(col_var * n_total_samples)
state.n_samples_seen_ = n_total_samples
state.components_ = V[:state.n_components_]
state.singular_values_ = S[:state.n_components_]
state.mean_ = col_mean
state.var_ = col_var
state.explained_variance_ = explained_variance[:state.n_components_]
state.explained_variance_ratio_ = \
explained_variance_ratio[:state.n_components_]
if state.n_components_ < n_features:
state.noise_variance_ = \
explained_variance[state.n_components_:].mean()
else:
state.noise_variance_ = 0.
if state.count >= 999:
state.count = 0
print (state.components_)
if end:
np.savetxt("IPCA.dat", state.components_)
return state
sink_window(func = partial_fit, in_stream = in_stream,
window_size = batches, step_size=batches, state = state)
s = Stream('s')
gh(s)
s.extend(source_list)
run()
if __name__ == '__main__':
print('Running the reverberation test ... ')
IPCA(k = 2,batches = 5)
print('reverb done')
| {
"content_hash": "78fb55f5af3c144f23bfadff1c5692e5",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 100,
"avg_line_length": 33.63354037267081,
"alnum_prop": 0.5397968605724839,
"repo_name": "AssembleSoftware/IoTPy",
"id": "f0a50e639770d0f143cc946baf594fab5fbdb5b7",
"size": "5415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/PCA/IPCA.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "373452"
},
{
"name": "Python",
"bytes": "786724"
}
],
"symlink_target": ""
} |
import flask
import functools
import redis
import time
from discograph import exceptions
redis_client = redis.StrictRedis()
def limit(max_requests=10, period=60):
def decorator(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
# For testing error handlers:
# max_requests = 2
key = 'ratelimit:{}:{}'.format(
flask.request.endpoint,
flask.request.remote_addr,
)
try:
remaining = max_requests - int(redis_client.get(key))
except (ValueError, TypeError):
remaining = max_requests
redis_client.setex(key, period, 0)
ttl = redis_client.ttl(key)
if not ttl:
redis_client.expire(key, period)
ttl = period
flask.g.view_limits = (max_requests, remaining - 1, time.time() + ttl)
if 0 < remaining:
redis_client.incr(key, 1)
print(key, remaining, ttl)
return f(*args, **kwargs)
else:
print(key, remaining, ttl)
raise exceptions.RateLimitError()
return wrapped
return decorator | {
"content_hash": "be084bdae390fdb2d21a584eddc09015",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 82,
"avg_line_length": 26.382978723404257,
"alnum_prop": 0.5241935483870968,
"repo_name": "josiah-wolf-oberholtzer/discograph",
"id": "9d38b09b4535e28b31548ddec5848c73ddb49336",
"size": "1266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "discograph/decorators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15028"
},
{
"name": "HTML",
"bytes": "21647"
},
{
"name": "JavaScript",
"bytes": "118075"
},
{
"name": "Makefile",
"bytes": "128"
},
{
"name": "Python",
"bytes": "322033"
}
],
"symlink_target": ""
} |
import bisect
def find_highest_lesser(item, seq):
index=bisect.bisect_right(seq, item)
if index==0:
return None
else:
return seq[index-1]
def find_lowest_higher(item, seq):
index=bisect.bisect_right(seq, item)
if index==len(seq):
return None
else:
return seq[index+1]
| {
"content_hash": "965970de36700e6f960ac0f2b6c2c1ae",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 40,
"avg_line_length": 21.733333333333334,
"alnum_prop": 0.6196319018404908,
"repo_name": "Bolt64/my_code",
"id": "32094ea90b28224aa8c0e885bf1c237364a21fa6",
"size": "350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "other_contests/hackerrank/strange_function/strange_function.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "57094"
},
{
"name": "C++",
"bytes": "12255"
},
{
"name": "Haskell",
"bytes": "27215"
},
{
"name": "Jupyter Notebook",
"bytes": "18418"
},
{
"name": "Python",
"bytes": "308871"
},
{
"name": "Racket",
"bytes": "3888"
},
{
"name": "Rust",
"bytes": "22856"
},
{
"name": "Scala",
"bytes": "51026"
},
{
"name": "Shell",
"bytes": "514"
},
{
"name": "Vim script",
"bytes": "341"
}
],
"symlink_target": ""
} |
import logging
logger = logging.getLogger(__name__)
from objbrowser.utils import cut_off_str
# Maximum number of characters used in the __str__ method to represent the underlying object
MAX_OBJ_STR_LEN = 50
def name_is_special(method_name):
"Returns true if the method name starts and ends with two underscores"
return method_name.startswith('__') and method_name.endswith('__')
class TreeItem(object):
""" Tree node class that can be used to build trees of objects.
"""
def __init__(self, obj, name, obj_path, is_attribute, parent=None):
self.parent_item = parent
self.obj = obj
self.obj_name = str(name)
self.obj_path = str(obj_path)
self.is_attribute = is_attribute
self.child_items = []
self.has_children = True
self.children_fetched = False
def __str__(self):
n_children = len(self.child_items)
if n_children == 0:
return "<TreeItem(0x{:x}): {} = {}>" \
.format(id(self.obj), self.obj_path, cut_off_str(self.obj, MAX_OBJ_STR_LEN))
else:
return "<TreeItem(0x{:x}): {} ({:d} children)>" \
.format(id(self.obj), self.obj_path, len(self.child_items))
def __repr__(self):
n_children = len(self.child_items)
return "<TreeItem(0x{:x}): {} ({:d} children)>" \
.format(id(self.obj), self.obj_path, n_children)
@property
def is_special_attribute(self):
" Return true if the items is an attribute and its name begins and end with 2 underscores"
return self.is_attribute and name_is_special(self.obj_name)
@property
def is_callable_attribute(self):
" Return true if the items is an attribute and it is callable."
return self.is_attribute and self.is_callable
@property
def is_callable(self):
" Return true if the underlying object is callable "
return callable(self.obj)
def append_child(self, item):
item.parent_item = self
self.child_items.append(item)
def insert_children(self, idx, items):
self.child_items[idx:idx] = items
for item in items:
item.parent_item = self
def child(self, row):
return self.child_items[row]
def child_count(self):
return len(self.child_items)
def parent(self):
return self.parent_item
def row(self):
if self.parent_item:
return self.parent_item.child_items.index(self)
else:
return 0
def pretty_print(self, indent=0):
if 0:
print(indent * " " + str(self))
else:
logger.debug(indent * " " + str(self))
for child_item in self.child_items:
child_item.pretty_print(indent + 1)
| {
"content_hash": "cd61827a1be890bd0d94d9f1308c69a6",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 99,
"avg_line_length": 30.548387096774192,
"alnum_prop": 0.5874692009855684,
"repo_name": "titusjan/objbrowser",
"id": "94c174aa00368a4132dc7bb09d230a5c1fe11d41",
"size": "3072",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "objbrowser/treeitem.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "92"
},
{
"name": "Python",
"bytes": "127266"
},
{
"name": "Shell",
"bytes": "176"
}
],
"symlink_target": ""
} |
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_messaging import rpc
from oslo_messaging import target
CONF = cfg.CONF
TRANSPORT = None
class ApiClient(object):
def __init__(self, transport):
client_target = target.Target('murano', 'results')
self._client = rpc.RPCClient(transport, client_target, timeout=15)
def process_result(self, result, environment_id):
return self._client.call({}, 'process_result', result=result,
environment_id=environment_id)
class EngineClient(object):
def __init__(self, transport):
client_target = target.Target('murano', 'tasks')
self._client = rpc.RPCClient(transport, client_target, timeout=15)
def handle_task(self, task):
return self._client.cast({}, 'handle_task', task=task)
def call_static_action(self, task):
return self._client.call({}, 'call_static_action', task=task)
def generate_schema(self, credentials, class_name, method_names=None,
class_version=None, package_name=None):
return self._client.call(
credentials, 'generate_schema',
class_name=class_name,
method_names=method_names,
class_version=class_version,
package_name=package_name
)
def api():
global TRANSPORT
if TRANSPORT is None:
TRANSPORT = messaging.get_transport(CONF)
return ApiClient(TRANSPORT)
def engine():
global TRANSPORT
if TRANSPORT is None:
TRANSPORT = messaging.get_transport(CONF)
return EngineClient(TRANSPORT)
| {
"content_hash": "04d52ebdee076dac03d7acde78fef453",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 74,
"avg_line_length": 29.017857142857142,
"alnum_prop": 0.6443076923076924,
"repo_name": "DavidPurcell/murano_temp",
"id": "1167e11bda93ff3014c3459f1cda6efa92e6e6ac",
"size": "2239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "murano/common/rpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "304"
},
{
"name": "Mako",
"bytes": "2026"
},
{
"name": "PowerShell",
"bytes": "2966"
},
{
"name": "Puppet",
"bytes": "86"
},
{
"name": "Python",
"bytes": "1758483"
},
{
"name": "Ruby",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "34202"
}
],
"symlink_target": ""
} |
from selenium.webdriver.firefox.webdriver import WebDriver
import unittest
from group import Group
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def open_home_page(self, wd):
wd.get("http://localhost/addressbook/")
def login(self, wd, username, password):
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("%s" % password)
wd.find_element_by_css_selector("input[type=\"submit\"]").click()
def open_groups_page(self, wd):
wd.find_element_by_link_text("groups").click()
def create_group(self, wd, group):
# init group creation
wd.find_element_by_name("new").click()
# fill group name
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("%s" % group.name)
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("%s" % group.header)
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("%s" % group.footer)
# submit group creation
wd.find_element_by_name("submit").click()
def return_to_groups_page(self, wd):
wd.find_element_by_link_text("group page").click()
def logout(self, wd):
wd.find_element_by_link_text("Logout").click()
def test_add_group(self):
wd = self.wd
self.open_home_page(wd)
self.login(wd, username="admin", password="secret")
self.open_groups_page(wd)
self.create_group(wd, Group(name="10", header="11", footer="12"))
self.return_to_groups_page(wd)
self.logout(wd)
def test_add_empty_group(self):
wd = self.wd
self.open_home_page(wd)
self.login(wd, username="admin", password="secret")
self.open_groups_page(wd)
self.create_group(wd, Group(name="", header="", footer=""))
self.return_to_groups_page(wd)
self.logout(wd)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "eba01f809fee524c63f1e4605bd81b71",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 78,
"avg_line_length": 35.05263157894737,
"alnum_prop": 0.6073573573573574,
"repo_name": "VladimirShvidler/python_training",
"id": "84a1fe8b6c7b884597b629054ec3f2b9ee202ecc",
"size": "2688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_add_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2853"
}
],
"symlink_target": ""
} |
from unittest import mock
from openstack import proxy
from openstack.network.v2 import floating_ip
from openstack.tests.unit import base
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'created_at': '0',
'fixed_ip_address': '1',
'floating_ip_address': '127.0.0.1',
'floating_network_id': '3',
'id': IDENTIFIER,
'port_id': '5',
'qos_policy_id': '51',
'tenant_id': '6',
'router_id': '7',
'description': '8',
'dns_domain': '9',
'dns_name': '10',
'status': 'ACTIVE',
'revision_number': 12,
'updated_at': '13',
'subnet_id': '14',
'tags': ['15', '16']
}
class TestFloatingIP(base.TestCase):
def test_basic(self):
sot = floating_ip.FloatingIP()
self.assertEqual('floatingip', sot.resource_key)
self.assertEqual('floatingips', sot.resources_key)
self.assertEqual('/floatingips', sot.base_path)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_fetch)
self.assertTrue(sot.allow_commit)
self.assertTrue(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = floating_ip.FloatingIP(**EXAMPLE)
self.assertEqual(EXAMPLE['created_at'], sot.created_at)
self.assertEqual(EXAMPLE['fixed_ip_address'], sot.fixed_ip_address)
self.assertEqual(EXAMPLE['floating_ip_address'],
sot.floating_ip_address)
self.assertEqual(EXAMPLE['floating_network_id'],
sot.floating_network_id)
self.assertEqual(EXAMPLE['id'], sot.id)
self.assertEqual(EXAMPLE['port_id'], sot.port_id)
self.assertEqual(EXAMPLE['tenant_id'], sot.project_id)
self.assertEqual(EXAMPLE['router_id'], sot.router_id)
self.assertEqual(EXAMPLE['description'], sot.description)
self.assertEqual(EXAMPLE['dns_domain'], sot.dns_domain)
self.assertEqual(EXAMPLE['dns_name'], sot.dns_name)
self.assertEqual(EXAMPLE['status'], sot.status)
self.assertEqual(EXAMPLE['revision_number'], sot.revision_number)
self.assertEqual(EXAMPLE['updated_at'], sot.updated_at)
self.assertEqual(EXAMPLE['subnet_id'], sot.subnet_id)
self.assertEqual(EXAMPLE['tags'], sot.tags)
def test_find_available(self):
mock_session = mock.Mock(spec=proxy.Proxy)
mock_session.get_filter = mock.Mock(return_value={})
mock_session.default_microversion = None
mock_session.session = self.cloud.session
data = {'id': 'one', 'floating_ip_address': '10.0.0.1'}
fake_response = mock.Mock()
body = {floating_ip.FloatingIP.resources_key: [data]}
fake_response.json = mock.Mock(return_value=body)
fake_response.status_code = 200
mock_session.get = mock.Mock(return_value=fake_response)
result = floating_ip.FloatingIP.find_available(mock_session)
self.assertEqual('one', result.id)
mock_session.get.assert_called_with(
floating_ip.FloatingIP.base_path,
headers={'Accept': 'application/json'},
params={},
microversion=None)
def test_find_available_nada(self):
mock_session = mock.Mock(spec=proxy.Proxy)
mock_session.default_microversion = None
fake_response = mock.Mock()
body = {floating_ip.FloatingIP.resources_key: []}
fake_response.json = mock.Mock(return_value=body)
fake_response.status_code = 200
mock_session.get = mock.Mock(return_value=fake_response)
self.assertIsNone(floating_ip.FloatingIP.find_available(mock_session))
| {
"content_hash": "2b5abcc7f5bed3845fa8d6649fc45e09",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 78,
"avg_line_length": 38.8494623655914,
"alnum_prop": 0.6324384168281206,
"repo_name": "stackforge/python-openstacksdk",
"id": "d443884b596559d9870c0d6ec1612c324cbf2fc3",
"size": "4159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack/tests/unit/network/v2/test_floating_ip.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1138292"
},
{
"name": "Shell",
"bytes": "1792"
}
],
"symlink_target": ""
} |
"""
Provides classes for handling soap multirefs.
"""
from logging import getLogger
log = getLogger(__name__)
soapenc = (None, 'http://schemas.xmlsoap.org/soap/encoding/')
class MultiRef:
"""
Resolves and replaces multirefs.
@ivar nodes: A list of non-multiref nodes.
@type nodes: list
@ivar catalog: A dictionary of multiref nodes by id.
@type catalog: dict
"""
def __init__(self):
self.nodes = []
self.catalog = {}
def process(self, body):
"""
Process the specified soap envelope body and replace I{multiref} node
references with the contents of the referenced node.
@param body: A soap envelope body node.
@type body: L{Element}
@return: The processed I{body}
@rtype: L{Element}
"""
self.nodes = []
self.catalog = {}
self.build_catalog(body)
self.update(body)
body.children = self.nodes
return body
def update(self, node):
"""
Update the specified I{node} by replacing the I{multiref} references
with the contents of the referenced nodes and remove the I{href}
attribute.
@param node: A node to update.
@type node: L{Element}
@return: The updated node
@rtype: L{Element}
"""
self.replace_references(node)
for c in node.children:
self.update(c)
return node
def replace_references(self, node):
"""
Replacing the I{multiref} references with the contents of the
referenced nodes and remove the I{href} attribute. Warning: since
the I{ref} is not cloned,
@param node: A node to update.
@type node: L{Element}
"""
href = node.getAttribute('href')
if href is None:
return
id = href.getValue()
ref = self.catalog.get(id)
if ref is None:
log.error('soap multiref: %s, not-resolved', id)
return
node.append(ref.children)
node.setText(ref.getText())
for a in ref.attributes:
if a.name != 'id':
node.append(a)
node.remove(href)
def build_catalog(self, body):
"""
Create the I{catalog} of multiref nodes by id and the list of
non-multiref nodes.
@param body: A soap envelope body node.
@type body: L{Element}
"""
for child in body.children:
if self.soaproot(child):
self.nodes.append(child)
id = child.get('id')
if id is None:
self.build_catalog(child)
else:
key = '#%s' % id
self.catalog[key] = child
def soaproot(self, node):
"""
Get whether the specified I{node} is a soap encoded root.
This is determined by examining @soapenc:root='1'.
The node is considered to be a root when the attribute
is not specified.
@param node: A node to evaluate.
@type node: L{Element}
@return: True if a soap encoded root.
@rtype: bool
"""
root = node.getAttribute('root', ns=soapenc)
if root is None:
return True
else:
return root.value == '1'
| {
"content_hash": "46e9ac73bb2d721f87c84d0d2b75a2c1",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 77,
"avg_line_length": 29.954954954954953,
"alnum_prop": 0.553984962406015,
"repo_name": "ronreiter/interactive-tutorials",
"id": "9f82e16aa9c59eb7727027f336f39c5f5e03c5b4",
"size": "4157",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "suds/bindings/multiref.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "58440"
},
{
"name": "Dockerfile",
"bytes": "304"
},
{
"name": "HTML",
"bytes": "38066"
},
{
"name": "JavaScript",
"bytes": "263446"
},
{
"name": "Makefile",
"bytes": "225"
},
{
"name": "Python",
"bytes": "634473"
}
],
"symlink_target": ""
} |
import os
import sys
import signal
import logging
from optparse import OptionParser
import crypt
from twisted.internet import reactor, ssl, task
from twisted.application.internet import TCPServer
from twisted.application.service import Application
from twisted.web import server, resource, http, guard, static
from twisted.web.util import redirectTo
from twisted.python import log
from OpenSSL import SSL
from twisted.cred.portal import Portal, IRealm
from twisted.cred.checkers import FilePasswordDB
from zope.interface import implements
for path in [
os.path.join('opt', 'qasino', 'lib'),
os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'lib'))
]:
if os.path.exists(os.path.join(path, '__init__.py')):
sys.path.insert(0, path)
break
from txzmq import ZmqFactory
import sql_receiver
import data_manager
import zmq_receiver
import zmq_requestor
import http_receiver
import http_receiver_ui
import zmq_publisher
import constants
import util
def signal_handler(signum, frame):
sig_names = dict((k, v) for v, k in signal.__dict__.iteritems() if v.startswith('SIG'))
logging.info("Caught %s. Exiting...", sig_names[signum])
if data_manager:
data_manager.shutdown()
reactor.stop()
if __name__ == "__main__":
logging.basicConfig(format="%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO)
parser = OptionParser()
parser.add_option("-i", "--identity", dest="identity",
help="Use IDENTITY as identity", metavar="IDENTITY")
parser.add_option("-f", "--db-file", dest="db_file",
help="Use FILE as the sqlite database", metavar="FILE")
parser.add_option("-d", "--db-dir", dest="db_dir", default="/ramdisk/qasino/dbs",
help="Use DIR as the sqlite database", metavar="DIR")
parser.add_option("-k", "--archive-db-dir", dest="archive_db_dir",
help="Save database files to DIR after finished (otherwise they are deleted).",
metavar="DIR")
parser.add_option("-g", "--generation-duration", dest="generation_duration_s", default=30,
help="The length of a collection interval (generation) in seconds.",
metavar="SECONDS")
parser.add_option("-v", "--views-file", dest="views_file", default='views.conf',
help="A file containing a list of views to create.", metavar="FILE")
parser.add_option("-K", "--keys-dir", dest="keys_dir", default='/opt/qasino/etc/keys/',
help="Directory where server keys can be found.", metavar="DIR")
parser.add_option("-p", "--htpasswd-file", dest="htpasswd_file",
default='/opt/qasino/etc/htpasswd',
help="Path to htpasswd file.", metavar="FILE")
parser.add_option("-s", "--static-content-dir", dest="static_content_dir",
default='/opt/qasino/etc/htdocs/static',
help="Path to static content dir.", metavar="DIR")
parser.add_option("-t", "--templates-dir", dest="templates_dir",
default='/opt/qasino/etc/htdocs/templates',
help="Path to template dir.", metavar="DIR")
(options, args) = parser.parse_args()
logging.info("Qasino server starting")
if options.identity != None:
util.Identity.set_identity(options.identity)
logging.info("Identity is %s", util.Identity.get_identity())
if not os.path.exists(options.db_dir):
logging.info("Making directory: %s", options.db_dir)
os.makedirs(options.db_dir)
# Catch signals
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# For verbose adbapi logging...
##log.startLogging(sys.stdout)
# Create a ZMQ factory
zmq_factory = ZmqFactory()
# Create a Pub/sub channel to blast out new generation signals.
logging.info("Listening for ZeroMQ pub/sub clients on port %d.", constants.ZMQ_PUBSUB_PORT)
zmq_publisher = zmq_publisher.ZmqPublisher(zmq_factory, constants.ZMQ_PUBSUB_PORT, data_manager)
# Create a Data Manager instance that changes the sql backend's
# pointers for which db is queried and which db is updated.
data_manager = data_manager.DataManager(options.db_file, db_dir=options.db_dir,
signal_channel=zmq_publisher,
archive_db_dir=options.archive_db_dir,
generation_duration_s=options.generation_duration_s)
def reread_views(views_file):
try:
mtime = os.path.getmtime(views_file)
except:
return
if reread_views.last_mtime < mtime:
logging.info("Reading views file '%s'.", views_file)
reread_views.last_mtime = mtime
data_manager.read_views(views_file)
reread_views.last_mtime = 0
if options.views_file != None:
#data_manager.read_views(options.views_file)
reread_views_task = task.LoopingCall(reread_views, options.views_file)
reread_views_task.start(10.0)
# Open a lister to receiver SQL queries.
logging.info("Listening for SQL queries on port %d", constants.SQL_PORT)
reactor.listenTCP(constants.SQL_PORT, sql_receiver.SqlReceiverFactory(data_manager))
# Create a listener for responding to http requests.
logging.info("Listening for HTTP requests on port %d", constants.HTTP_PORT)
http_root = resource.Resource()
http_root.putChild("request", http_receiver.HttpReceiver(data_manager))
http_root.putChild("static", static.File(options.static_content_dir))
http_root.putChild("", http_receiver_ui.UIResourceTables(options.templates_dir, data_manager))
http_root.putChild("tables", http_receiver_ui.UIResourceTables(options.templates_dir, data_manager))
http_root.putChild("desc", http_receiver_ui.UIResourceDesc(options.templates_dir, data_manager))
http_root.putChild("query", http_receiver_ui.UIResourceQuery(options.templates_dir, data_manager))
http.HTTPFactory.protocol = http_receiver.MyLoggingHTTPChannel
site = server.Site(http_root)
reactor.listenTCP(constants.HTTP_PORT, site)
logging.info("Listening for HTTPS requests on port %d", constants.HTTPS_PORT)
class SimpleRealm(object):
"""
A realm which gives out L{GuardedResource} instances for authenticated
users.
"""
implements(IRealm)
def requestAvatar(self, avatarId, mind, *interfaces):
if resource.IResource in interfaces:
return resource.IResource, http_root, lambda: None
raise NotImplementedError()
def cmp_pass(uname, password, storedpass):
sizeof_hash = len(storedpass)
if sizeof_hash == 13:
return crypt.crypt(password, storedpass[:2])
else:
return util.get_apache_md5(password, storedpass)
checkers = [ FilePasswordDB(options.htpasswd_file, hash=cmp_pass) ]
wrapper = guard.HTTPAuthSessionWrapper( Portal(SimpleRealm(), checkers),
[ guard.BasicCredentialFactory('qasino.com') ])
ssl_site = server.Site(wrapper)
try:
if not os.path.isfile(options.htpasswd_file):
raise Exception("htpasswd file '%s' does not exist" % options.htpasswd_file)
reactor.listenSSL(constants.HTTPS_PORT,
ssl_site,
ssl.DefaultOpenSSLContextFactory(options.keys_dir + 'server.key',
options.keys_dir + 'server.crt')
)
except Exception as e:
logging.info("Failed to listen on SSL port %d, continuing anyway (%s).",
constants.HTTPS_PORT, str(e))
# If the http port isn't port 80 make port 80 redirect to SSL.
if constants.HTTP_PORT != 80:
class SimpleRedirect(resource.Resource):
isLeaf = True
def render_GET(self, request):
return redirectTo('https://{}:{}'.format(request.getRequestHostname(), constants.HTTPS_PORT), request)
try:
reactor.listenTCP(80, server.Site(SimpleRedirect()))
logging.info("Listening for HTTP requests on port 80 to redirect to 443")
except Exception as e:
logging.info("Warning: failed to listen for HTTP requests on port 80 to redirect to 443: {}".format(str(e)))
# Create a listener for responding to ZeroMQ requests.
logging.info("Listening for ZeroMQ rpc clients on port %d", constants.ZMQ_RPC_PORT)
zmq_receiver = zmq_receiver.ZmqReceiver(constants.ZMQ_RPC_PORT, zmq_factory, data_manager)
# For testing connect to ourselves...
# zmq_requestor = zmq_requestor.ZmqRequestor('127.0.0.1', constants.ZMQ_RPC_PORT, zmq_factory, data_manager)
# Request metadata at fixed intervals.
# request_metadata_task = task.LoopingCall(zmq_requestor.request_metadata)
# request_metadata_task.start(8.0)
# Run the event loop
reactor.run()
logging.info("Qasino server exiting")
| {
"content_hash": "cfa8af4010a3eb70cc07df3feedc6fbc",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 120,
"avg_line_length": 37.86938775510204,
"alnum_prop": 0.6376374218581591,
"repo_name": "MediaMath/qasino",
"id": "21626e9cab15826b7825c121edb1035d5a962f3a",
"size": "9908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/qasino_server.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "330"
},
{
"name": "Makefile",
"bytes": "527"
},
{
"name": "Python",
"bytes": "169951"
},
{
"name": "Shell",
"bytes": "314"
}
],
"symlink_target": ""
} |
import alsaaudio, time
from array import array
from itertools import izip_longest
def _chunks(n, iterable, padvalue=None):
return izip_longest(*[iter(iterable)]*n, fillvalue=padvalue)
class PlayFloatBuffer:
CHUNK = 32
FORMAT = alsaaudio.PCM_FORMAT_FLOAT_LE
CARD = "default"
def __init__(self, buffer, framerate, chunk = CHUNK):
""" Init audio stream """
self.chunk = chunk
if (len(buffer) % self.chunk):
print("Warning: buffer length is not a multiple of {0}, padding with zeros".format(self.chunk))
self.buffer = map(lambda x: array('f',x).tostring(), _chunks(self.chunk,buffer,0.))
self.pcm = alsaaudio.PCM(card=self.CARD)
self.framerate = framerate
self._setup()
def _setup(self):
self.pcm.setchannels(1)
self.pcm.setrate(self.framerate)
self.pcm.setformat(self.FORMAT)
self.pcm.setperiodsize(self.chunk)
def play(self):
""" Play entire buffer """
for data in self.buffer:
while self.pcm.write(data) == 0:
time.sleep(0)
if __name__=='__main__':
from math import sin, pi
f = 2 * pi / 8000 * 600
sine = [sin(f*x) for x in xrange(16000)]
PlayFloatBuffer(sine,8000).play()
| {
"content_hash": "299b5e5577a561979f9a28634f7c7dd5",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 101,
"avg_line_length": 29.121951219512194,
"alnum_prop": 0.6490787269681743,
"repo_name": "DominikAuras/pyUnnenberg",
"id": "e3fc0733e3bc6ab1c762990bf200686bc68d235a",
"size": "1241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyUnnenberg/play_buffer.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23944"
}
],
"symlink_target": ""
} |
"""
Django settings for postero project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Get the environment
ENV = os.environ.get('ENV', default='Development')
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY',
default='0mbp$m$ibl@b%mqd(6)zj8#hcks&u@e*zadt_nt)!ijsrdke99')
# SECURITY WARNING: don't run with debug turned on in production!
if ENV == 'Production':
DEBUG = False
else:
DEBUG = True
if ENV == 'Production':
ALLOWED_HOSTS = ['portal-postero.herokuapp.com', 'portal.postero.com.br']
else:
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'portal.apps.PortalConfig'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'postero.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'postero.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if ENV == 'Production':
# Update database configuration with $DATABASE_URL.
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Fortaleza'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
if ENV == 'Production':
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
if ENV == 'Production':
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
| {
"content_hash": "a009fd5a6d9ae313aeba862f99a8defa",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 91,
"avg_line_length": 27.649006622516556,
"alnum_prop": 0.6905389221556886,
"repo_name": "PosteroCompany/postero.com.br",
"id": "4e8bb0825d302ed6d94ad4cb1bd236ce59483890",
"size": "4175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "postero/postero/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "31652"
},
{
"name": "HTML",
"bytes": "15069"
},
{
"name": "JavaScript",
"bytes": "2217"
},
{
"name": "Python",
"bytes": "6285"
}
],
"symlink_target": ""
} |
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base.domain import Domain
from twilio.rest.verify.v2 import V2
class Verify(Domain):
def __init__(self, twilio):
"""
Initialize the Verify Domain
:returns: Domain for Verify
:rtype: twilio.rest.verify.Verify
"""
super(Verify, self).__init__(twilio)
self.base_url = 'https://verify.twilio.com'
# Versions
self._v2 = None
@property
def v2(self):
"""
:returns: Version v2 of verify
:rtype: twilio.rest.verify.v2.V2
"""
if self._v2 is None:
self._v2 = V2(self)
return self._v2
@property
def forms(self):
"""
:rtype: twilio.rest.verify.v2.form.FormList
"""
return self.v2.forms
@property
def safelist(self):
"""
:rtype: twilio.rest.verify.v2.safelist.SafelistList
"""
return self.v2.safelist
@property
def services(self):
"""
:rtype: twilio.rest.verify.v2.service.ServiceList
"""
return self.v2.services
@property
def verification_attempts(self):
"""
:rtype: twilio.rest.verify.v2.verification_attempt.VerificationAttemptList
"""
return self.v2.verification_attempts
@property
def verification_attempts_summary(self):
"""
:rtype: twilio.rest.verify.v2.verification_attempts_summary.VerificationAttemptsSummaryList
"""
return self.v2.verification_attempts_summary
@property
def templates(self):
"""
:rtype: twilio.rest.verify.v2.template.TemplateList
"""
return self.v2.templates
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Verify>'
| {
"content_hash": "9b89990743b00e9e66f1ad3350a36d4a",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 99,
"avg_line_length": 22.804597701149426,
"alnum_prop": 0.5554435483870968,
"repo_name": "twilio/twilio-python",
"id": "1dae555fd8f5d0e945a731fc31276c5e9888046e",
"size": "1999",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "twilio/rest/verify/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "234"
},
{
"name": "Makefile",
"bytes": "2157"
},
{
"name": "Python",
"bytes": "11241545"
}
],
"symlink_target": ""
} |
import sys
import copy
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
if __name__ == '__main__':
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('move_group_python_interface_tutorial',
anonymous=True)
robot = moveit_commander.RobotCommander()
scene = moveit_commander.PlanningSceneInterface()
group = moveit_commander.MoveGroupCommander("arm")
display_trajectory_publisher = rospy.Publisher(
'/move_group/display_planned_path',
moveit_msgs.msg.DisplayTrajectory)
print "============ Printing robot state"
# print robot.get_current_state()
print "============"
print group.get_random_pose()
| {
"content_hash": "54911c4b27c1f35a8300f892dc8637e0",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 59,
"avg_line_length": 31.304347826086957,
"alnum_prop": 0.6763888888888889,
"repo_name": "pouyaAB/ros_teleoperate",
"id": "f23565be43c973b6f8fd3b0865a65f6e12ba14b7",
"size": "720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mico_controller/scripts/RNN_control.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "506306"
},
{
"name": "CMake",
"bytes": "6273"
},
{
"name": "Python",
"bytes": "64583"
}
],
"symlink_target": ""
} |
from django.urls import re_path
from .views import (
ArchivedView,
CaseCRUDL,
CaseExportCRUDL,
ClosedCasesView,
FlaggedView,
InboxView,
OpenCasesView,
PartnerCRUDL,
PingView,
SentView,
StatusView,
UnlabelledView,
)
urlpatterns = CaseCRUDL().as_urlpatterns()
urlpatterns += CaseExportCRUDL().as_urlpatterns()
urlpatterns += PartnerCRUDL().as_urlpatterns()
urlpatterns += [
re_path(r"^$", InboxView.as_view(), name="cases.inbox"),
re_path(r"^flagged/$", FlaggedView.as_view(), name="cases.flagged"),
re_path(r"^archived/$", ArchivedView.as_view(), name="cases.archived"),
re_path(r"^unlabelled/$", UnlabelledView.as_view(), name="cases.unlabelled"),
re_path(r"^sent/$", SentView.as_view(), name="cases.sent"),
re_path(r"^open/$", OpenCasesView.as_view(), name="cases.open"),
re_path(r"^closed/$", ClosedCasesView.as_view(), name="cases.closed"),
re_path(r"^status$", StatusView.as_view(), name="internal.status"),
re_path(r"^ping$", PingView.as_view(), name="internal.ping"),
]
| {
"content_hash": "0b8e145eb0dced43fe2381390119f61f",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 81,
"avg_line_length": 33.21875,
"alnum_prop": 0.6613358419567262,
"repo_name": "rapidpro/casepro",
"id": "7b762a7ccc4c6ea5c0d815b33b937a8001b3a411",
"size": "1063",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "casepro/cases/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "196784"
},
{
"name": "HTML",
"bytes": "10550"
},
{
"name": "Haml",
"bytes": "98371"
},
{
"name": "Less",
"bytes": "3180"
},
{
"name": "PLpgSQL",
"bytes": "4274"
},
{
"name": "Python",
"bytes": "914926"
},
{
"name": "Shell",
"bytes": "816"
}
],
"symlink_target": ""
} |
from kivy.clock import Clock
from kivy.event import EventDispatcher
from kivy.logger import Logger
from functools import partial
from jnius import autoclass, PythonJavaClass, java_method, cast
from android import activity
from android.runnable import run_on_ui_thread
Builder = autoclass('android.app.AlertDialog$Builder')
DialogFragment = autoclass('android.support.v4.app.DialogFragment')
String = autoclass('java.lang.String')
context = autoclass('org.renpy.android.PythonActivity').mActivity
class _OnClickListener(PythonJavaClass):
__javainterfaces__ = ['android.content.DialogInterface$OnClickListener',]
__javacontext__ = 'app'
def __init__(self, action):
self.action = action
super(_OnClickListener, self).__init__()
@java_method('(Landroid/content/DialogInterface;I)V')
def onClick(self, dialog, which):
self.action()
class AndroidDialog(EventDispatcher):
__events__ = ('on_dismiss',)
def __init__(self,
callback,
action_name = 'okay',
cancel_name = 'cancel',
text = 'Are you sure?',
title = 'Alert!',
**kwargs):
self.callback = callback if callback else lambda *args: None
self.title = title
self.text = text
self.action_name = action_name
self.cancel_name = cancel_name
def answer(self, yesno):
''' Callbacks in prompts that open prompts lead to errant clicks'''
#Clock.schedule_once(lambda dt: self.callback(yesno), 1/30.0)
self.callback(yesno)
@run_on_ui_thread
def open(self):
''' using dialog builder. simplest way'''
builder = self.builder = Builder(
cast('android.app.Activity', context))
builder.setMessage(String(self.text))
builder.setTitle(String(self.title))
self.positive = _OnClickListener(partial(self.answer, True))
self.negative = _OnClickListener(partial(self.answer, False))
builder.setPositiveButton(String(self.action_name),
self.positive)
builder.setNegativeButton(String(self.cancel_name),
self.negative)
self.dialog = builder.create()
self.dialog.show()
def dismiss(self):
self.dispatch('on_dismiss')
def on_dismiss(self):
# guessing fragment activity will get back button
# instead of app like my usual setup
pass
| {
"content_hash": "05e5b5404a6133d982c38e77773c5742",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 77,
"avg_line_length": 34.013513513513516,
"alnum_prop": 0.6249503377036154,
"repo_name": "knappador/kivy-dialog",
"id": "0cbf8e8c38448fa1f40dbb1014358350cc1a23c8",
"size": "2517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/dialog/androiddialog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6096"
},
{
"name": "Shell",
"bytes": "782"
}
],
"symlink_target": ""
} |
import glob
import os
import sys
os.chdir("/global/projecta/projectdirs/cosmo/work/legacysurvey/dr7-attic/logs/")
dirlist = glob.glob("./*")
for dirs in dirlist:
dirname = dirs[2:]
filename = "legacysurvey_dr7_logs_"+dirname+".tar.gz"
cmd = "tar -czvf "+filename+" "+dirname
#print(cmd)
#sys.exit()
os.system(cmd)
| {
"content_hash": "b5a7812acd9d02cfa7708a096f07f90e",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 80,
"avg_line_length": 24.285714285714285,
"alnum_prop": 0.6617647058823529,
"repo_name": "legacysurvey/legacypipe",
"id": "3ae5d1ce14236e247425d54b96acfa6e7cba9632",
"size": "340",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "bin/tar-logfiles.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "22009"
},
{
"name": "Jupyter Notebook",
"bytes": "63901618"
},
{
"name": "Makefile",
"bytes": "2283"
},
{
"name": "Python",
"bytes": "3506201"
},
{
"name": "Shell",
"bytes": "218351"
}
],
"symlink_target": ""
} |
import datetime
import threading
import json
import hashlib
import time
import codecs
import urllib
import os
import io
import random
import re
import string
import sys
import unicodedata
import logging
import roman
import requests
from num2words import num2words
from fuzzywuzzy import fuzz, process
from ConfigParser import SafeConfigParser
from .cache import KodiCache
log = logging.getLogger(__name__)
SORT_RATING = {"method": "rating", "order": "descending"}
SORT_RANDOM = {"method": "random", "order": "descending"}
SORT_YEAR = {"method": "year", "order": "descending"}
SORT_TITLE = {"method": "title", "order": "ascending"}
SORT_DATEADDED = {"method": "dateadded", "order": "descending"}
SORT_LASTPLAYED = {"method": "lastplayed", "order": "descending"}
SORT_EPISODE = {"method": "episode", "order": "ascending"}
FILTER_UNWATCHED = {"operator": "lessthan", "field": "playcount", "value": "1"}
FILTER_WATCHED = {"operator": "isnot", "field": "playcount", "value": "0"}
# For recommendations, we sort (not filter) the results on rating and then
# choose a random item from the results. To keep the quality high, we want to
# limit the number of results before picking a random item.
LIMIT_RECOMMENDED_MOVIES = (0, 40)
LIMIT_RECOMMENDED_SHOWS = (0, 20)
LIMIT_RECOMMENDED_EPISODES = (0, 100)
LIMIT_RECOMMENDED_MUSICVIDEOS = (0, 40)
LIMIT_RECOMMENDED_ARTISTS = (0, 20)
LIMIT_RECOMMENDED_ALBUMS = (0, 40)
LIMIT_RECOMMENDED_SONGS = (0, 100)
def sanitize_name(media_name, normalize=True):
if normalize:
try:
# Normalize string
name = unicodedata.normalize('NFKD', media_name).encode('ASCII', 'ignore')
except:
name = media_name
else:
name = media_name
# Remove invalid characters, per Amazon:
# Slot type values can contain alphanumeric characters, spaces, commas,
# apostrophes, periods, hyphens, ampersands and the @ symbol only.
name = re.sub(r'[`~!#$%^*()_=+\[\]{}\\|;:"<>/?]', '', name)
# Slot items cannot exceed 140 chars, per Amazon
if len(name) > 140:
name = name[:140].rsplit(' ', 1)[0]
name = name.strip()
return name
# Remove extra slashes
def http_normalize_slashes(url):
url = str(url)
segments = url.split('/')
correct_segments = []
for segment in segments:
if segment != '':
correct_segments.append(segment)
first_segment = str(correct_segments[0])
if first_segment.find('http') == -1:
correct_segments = ['http:'] + correct_segments
correct_segments[0] = correct_segments[0] + '/'
normalized_url = '/'.join(correct_segments)
return normalized_url
def RPCString(method, params=None, sort=None, filters=None, fields=None, limits=None, filtertype=None):
j = {"jsonrpc": "2.0", "method": method, "id": 1}
j["params"] = {}
if params:
j["params"] = params
if sort:
j["params"]["sort"] = sort
if filters:
if not filtertype:
filtertype = "and"
if len(filters) > 1:
j["params"]["filter"] = {filtertype: filters}
else:
j["params"]["filter"] = filters[0]
if fields:
j["params"]["properties"] = fields
if limits:
j["params"]["limits"] = {"start": limits[0], "end": limits[1]}
return json.dumps(j)
# Replace digits with word-form numbers.
def digits2words(phrase, lang='en'):
wordified = ''
for word in phrase.split():
if word.isnumeric():
word = num2words(float(word), lang=lang)
wordified = wordified + word + " "
return wordified[:-1]
# Replace word-form numbers with digits.
def words2digits(phrase, lang='en'):
numwords = {}
numwords_file = os.path.join(os.path.dirname(__file__), "NUMWORDS." + lang + ".txt")
f = codecs.open(numwords_file, 'rb', 'utf-8')
for line in f:
l = line.encode("utf-8").strip().split('|')
if l[0] == 'connectors':
connectors = l[1:]
for words in connectors:
for word in words.strip().split():
numwords[word.decode('utf-8')] = (1, 0, 0)
if l[0] == 'units':
units = l[1:]
for idx, words in enumerate(units):
for word in words.strip().split():
numwords[word.decode('utf-8')] = (1, idx, 1)
if l[0] == 'tens':
tens = l[1:]
for idx, words in enumerate(tens):
for word in words.strip().split():
numwords[word.decode('utf-8')] = (1, idx * 10, 2)
if l[0] == 'scales':
scales = l[1:]
for idx, words in enumerate(scales):
for word in words.strip().split():
numwords[word.decode('utf-8')] = (10 ** (idx * 3 or 2), 0, 3)
f.close()
wordified = ''
current = result = 0
prev_level = sys.maxint
in_number = False
phrase = re.sub(r'[-]', ' ', phrase)
for word in phrase.split():
if word not in numwords:
if in_number:
wordified = wordified + str(current + result) + " "
current = result = 0
prev_level = sys.maxint
in_number = False
wordified = wordified + word + " "
else:
in_number = True
scale, increment, level = numwords[word]
# Handle things like "nine o two one o" (9 0 2 1 0)
if level == prev_level == 1:
wordified = wordified + str(current) + " "
current = result = 0
prev_level = level
# account for things like "hundred fifty" vs "one hundred fifty"
if scale >= 100 and current == 0:
current = 1
current = current * scale + increment
if scale > 100:
result += current
current = 0
if in_number:
wordified = wordified + str(current + result) + " "
return wordified[:-1]
# Replace digits with roman numerals.
def digits2roman(phrase, lang='en'):
wordified = ''
for word in phrase.split():
if word.isnumeric():
word = roman.toRoman(int(word))
wordified = wordified + word + " "
return wordified[:-1]
# Replace word-form numbers with roman numerals.
def words2roman(phrase, lang='en'):
return digits2roman(words2digits(phrase, lang=lang), lang=lang)
# Provide a map from ISO code (both bibliographic and terminologic)
# in ISO 639-2 to a dict with the two letter ISO 639-2 codes (alpha2)
# English and french names
#
# "bibliographic" iso codes are derived from English word for the language
# "terminologic" iso codes are derived from the pronunciation in the target
# language (if different to the bibliographic code)
#
# Source
# http://stackoverflow.com/questions/2879856/get-system-language-in-iso-639-3-letter-codes-in-python/2879958#2879958
#
# Usage
# country_dic = getisocodes_dict()
# print country_dic['eng']
def getisocodes_dict():
D = {}
country_dic_file = os.path.join(os.path.dirname(__file__), "ISO-639-2_utf-8.txt")
f = codecs.open(country_dic_file, 'rb', 'utf-8')
for line in f:
iD = {}
iD['bibliographic'], iD['terminologic'], iD['alpha2'], iD['en'], iD['fr'], iD['de'] = line.encode("utf-8").strip().split('|')
D[iD['bibliographic']] = iD
if iD['terminologic']:
D[iD['terminologic']] = iD
if iD['alpha2']:
D[iD['alpha2']] = iD
for k in iD:
# Assign `None` when columns not available from the data
iD[k] = iD[k] or None
f.close()
return D
class KodiConfigParser(SafeConfigParser):
def __init__(self, config_file=None, *args, **kwargs):
SafeConfigParser.__init__(self, *args, **kwargs)
# Seed the default values from the example
self.config_file = os.path.join(os.path.dirname(__file__), "kodi.config.example")
self.read(self.config_file)
if not os.path.isfile(config_file):
# Fill out the rest of the config based on .env variables
SCHEME = os.getenv('KODI_SCHEME')
if SCHEME and SCHEME != 'None':
self.set('DEFAULT', 'scheme', SCHEME)
SUBPATH = os.getenv('KODI_SUBPATH')
if SUBPATH and SUBPATH != 'None':
self.set('DEFAULT', 'subpath', SUBPATH)
KODI_ADDRESS = os.getenv('KODI_ADDRESS')
if KODI_ADDRESS and KODI_ADDRESS != 'None':
self.set('DEFAULT', 'address', KODI_ADDRESS)
KODI_PORT = os.getenv('KODI_PORT')
if KODI_PORT and KODI_PORT != 'None':
self.set('DEFAULT', 'port', KODI_PORT)
KODI_USERNAME = os.getenv('KODI_USERNAME')
if KODI_USERNAME and KODI_USERNAME != 'None':
self.set('DEFAULT', 'username', KODI_USERNAME)
KODI_PASSWORD = os.getenv('KODI_PASSWORD')
if KODI_PASSWORD and KODI_PASSWORD != 'None':
self.set('DEFAULT', 'password', KODI_PASSWORD)
CACHE_BUCKET = os.getenv('CACHE_BUCKET')
if CACHE_BUCKET and CACHE_BUCKET != 'None':
self.set('DEFAULT', 'cache_bucket', CACHE_BUCKET)
S3_CACHE_AWS_ACCESS_KEY_ID = os.getenv('S3_CACHE_AWS_ACCESS_KEY_ID')
if S3_CACHE_AWS_ACCESS_KEY_ID and S3_CACHE_AWS_ACCESS_KEY_ID != 'None':
self.set('DEFAULT', 's3_cache_aws_access_key_id', S3_CACHE_AWS_ACCESS_KEY_ID)
S3_CACHE_AWS_SECRET_ACCESS_KEY = os.getenv('S3_CACHE_AWS_SECRET_ACCESS_KEY')
if S3_CACHE_AWS_SECRET_ACCESS_KEY and S3_CACHE_AWS_SECRET_ACCESS_KEY != 'None':
self.set('DEFAULT', 's3_cache_aws_secret_access_key', S3_CACHE_AWS_SECRET_ACCESS_KEY)
OWNCLOUD_CACHE_URL = os.getenv('OWNCLOUD_CACHE_URL')
if OWNCLOUD_CACHE_URL and OWNCLOUD_CACHE_URL != 'None':
self.set('DEFAULT', 'owncloud_cache_url', OWNCLOUD_CACHE_URL)
OWNCLOUD_CACHE_USER = os.getenv('OWNCLOUD_CACHE_USER')
if OWNCLOUD_CACHE_USER and OWNCLOUD_CACHE_USER != 'None':
self.set('DEFAULT', 'owncloud_cache_user', OWNCLOUD_CACHE_USER)
OWNCLOUD_CACHE_PASSWORD = os.getenv('OWNCLOUD_CACHE_PASSWORD')
if OWNCLOUD_CACHE_PASSWORD and OWNCLOUD_CACHE_PASSWORD != 'None':
self.set('DEFAULT', 'owncloud_cache_password', OWNCLOUD_CACHE_PASSWORD)
READ_TIMEOUT = os.getenv('READ_TIMEOUT')
if READ_TIMEOUT and READ_TIMEOUT != 'None':
self.set('DEFAULT', 'read_timeout', READ_TIMEOUT)
READ_TIMEOUT_ASYNC = os.getenv('READ_TIMEOUT_ASYNC')
if READ_TIMEOUT_ASYNC and READ_TIMEOUT_ASYNC != 'None':
self.set('DEFAULT', 'read_timeout_async', READ_TIMEOUT_ASYNC)
SHUTDOWN_MEANS_QUIT = os.getenv('SHUTDOWN_MEANS_QUIT')
if SHUTDOWN_MEANS_QUIT and SHUTDOWN_MEANS_QUIT != 'None':
self.set('DEFAULT', 'shutdown', SHUTDOWN_MEANS_QUIT)
SKILL_TZ = os.getenv('SKILL_TZ')
if SKILL_TZ and SKILL_TZ != 'None':
self.set('DEFAULT', 'timezone', SKILL_TZ)
ACCEPT_MUSIC_WARNING = os.getenv('ACCEPT_MUSIC_WARNING')
if ACCEPT_MUSIC_WARNING and ACCEPT_MUSIC_WARNING != 'None':
self.set('DEFAULT', 'accept_music_warning', ACCEPT_MUSIC_WARNING)
USE_PROXY = os.getenv('USE_PROXY')
if USE_PROXY and USE_PROXY != 'None':
self.set('DEFAULT', 'use_proxy', USE_PROXY)
ALT_PROXY = os.getenv('ALT_PROXY')
if ALT_PROXY and ALT_PROXY != 'None':
self.set('DEFAULT', 'alt_proxy', ALT_PROXY)
MONGODB_URI = os.getenv('MONGODB_URI')
if MONGODB_URI and MONGODB_URI != 'None':
self.set('DEFAULT', 'mongodb_uri', MONGODB_URI)
LANGUAGE = os.getenv('LANGUAGE', 'en') # Default to English
if LANGUAGE and LANGUAGE != 'None':
self.set('global', 'language', LANGUAGE)
DEEP_SEARCH = os.getenv('DEEP_SEARCH')
if DEEP_SEARCH and DEEP_SEARCH != 'None':
self.set('global', 'deep_search', DEEP_SEARCH)
MAX_PLAYLIST_ITEMS = os.getenv('PLAYLIST_ITEMS')
if MAX_PLAYLIST_ITEMS and MAX_PLAYLIST_ITEMS != 'None':
self.set('global', 'playlist_max_items', MAX_PLAYLIST_ITEMS)
MAX_UNWATCHED_SHOWS = os.getenv('MAX_UNWATCHED_SHOWS')
if MAX_UNWATCHED_SHOWS and MAX_UNWATCHED_SHOWS != 'None':
self.set('global', 'unwatched_shows_max_results', MAX_UNWATCHED_SHOWS)
MAX_UNWATCHED_EPISODES = os.getenv('MAX_UNWATCHED_EPISODES')
if MAX_UNWATCHED_EPISODES and MAX_UNWATCHED_EPISODES != 'None':
self.set('global', 'unwatched_episodes_max_results', MAX_UNWATCHED_EPISODES)
MAX_UNWATCHED_MOVIES = os.getenv('MAX_UNWATCHED_MOVIES')
if MAX_UNWATCHED_MOVIES and MAX_UNWATCHED_MOVIES != 'None':
self.set('global', 'unwatched_movies_max_results', MAX_UNWATCHED_MOVIES)
SKILL_LOGLEVEL = os.getenv('SKILL_LOGLEVEL')
if SKILL_LOGLEVEL and SKILL_LOGLEVEL != 'None':
self.set('global', 'loglevel', SKILL_LOGLEVEL)
# unconditionally disable this as Heroku deployments don't need to see the device IDs,
# since we don't support device mapping with Heroku.
self.set('alexa', 'logsensitive', 'no')
SKILL_APPID = os.getenv('SKILL_APPID')
if SKILL_APPID and SKILL_APPID != 'None':
self.set('alexa', 'skill_id', SKILL_APPID)
SLOT_ITEMS_MAX = os.getenv('slot_items_max')
if SLOT_ITEMS_MAX and SLOT_ITEMS_MAX != 'None':
self.set('alexa', 'slot_items_max', SLOT_ITEMS_MAX)
else:
self.config_file = config_file
self.read(self.config_file)
class Kodi:
def __init__(self, config=None, context=None):
self.config = config
self.config_error = False
# When testing from the web simulator there is no context object (04/2017)
try:
self.deviceId = context.System.device.deviceId
except:
self.deviceId = 'Unknown Device'
if self.config.has_section(self.deviceId):
self.dev_cfg_section = self.deviceId
else:
self.dev_cfg_section = 'DEFAULT'
self.language = self.config.get('global', 'language').lower()
self.playlist_limit = self.config.get('global', 'playlist_max_items')
if self.playlist_limit and self.playlist_limit != 'None':
self.playlist_limit = int(self.playlist_limit)
else:
self.playlist_limit = sys.maxint
self.max_unwatched_shows = int(self.config.get('global', 'unwatched_shows_max_results'))
self.max_unwatched_episodes = int(self.config.get('global', 'unwatched_episodes_max_results'))
self.max_unwatched_movies = int(self.config.get('global', 'unwatched_movies_max_results'))
self.logsensitive = self.config.getboolean('global', 'logsensitive')
try:
self.scheme = self.config.get(self.dev_cfg_section, 'scheme')
self.subpath = self.config.get(self.dev_cfg_section, 'subpath')
self.address = self.config.get(self.dev_cfg_section, 'address')
self.port = self.config.get(self.dev_cfg_section, 'port')
self.username = self.config.get(self.dev_cfg_section, 'username')
self.password = self.config.get(self.dev_cfg_section, 'password')
self.read_timeout = float(self.config.get(self.dev_cfg_section, 'read_timeout'))
self.read_timeout_async = float(self.config.get(self.dev_cfg_section, 'read_timeout_async'))
except:
self.config_error = True
if not self.scheme or not self.address or not self.port or not self.username or not self.password:
self.config_error = True
cache_bucket = self.config.get(self.dev_cfg_section, 'cache_bucket')
if not cache_bucket or cache_bucket == 'None':
cache_bucket = None
s3_cache_key_id = self.config.get(self.dev_cfg_section, 's3_cache_aws_access_key_id')
if not s3_cache_key_id or s3_cache_key_id == 'None':
s3_cache_key_id = None
s3_cache_key = self.config.get(self.dev_cfg_section, 's3_cache_aws_secret_access_key')
if not s3_cache_key or s3_cache_key == 'None':
s3_cache_key = None
oc_cache_url = self.config.get(self.dev_cfg_section, 'owncloud_cache_url')
if not oc_cache_url or oc_cache_url == 'None':
oc_cache_url = None
oc_cache_user = self.config.get(self.dev_cfg_section, 'owncloud_cache_user')
if not oc_cache_user or oc_cache_user == 'None':
oc_cache_user = None
oc_cache_pass = self.config.get(self.dev_cfg_section, 'owncloud_cache_password')
if not oc_cache_pass or oc_cache_pass == 'None':
oc_cache_pass = None
self.cache = KodiCache(cache_bucket,
aws_access_key_id=s3_cache_key_id, aws_secret_access_key=s3_cache_key,
oc_url=oc_cache_url, oc_user=oc_cache_user, oc_password=oc_cache_pass)
try:
# On a successful cache hit, this variable tells the skill to fetch a fresh
# copy from Kodi in the background on a worker thread.
#
# This is an undocumented/hidden option because a) it provides minimal real
# value, b) can be a source of confusion for the user, and c) doesn't work
# on most cloud deployments because the main thread terminates before the
# worker thread completes.
self.cache_bg_update = self.config.getboolean(self.dev_cfg_section, 'cache_bg_update')
except:
self.cache_bg_update = False
# Construct the JSON-RPC message and send it to the Kodi player
def SendCommand(self, command, wait_resp=True, cache_resp=False):
# Join the configuration variables into a url
url = "%s://%s:%s/%s/%s" % (self.scheme, self.address, self.port, self.subpath, 'jsonrpc')
# Remove any double slashes in the url
url = http_normalize_slashes(url)
log.info('Received request from device %s', self.deviceId if self.logsensitive else '[hidden]')
log.info('Sending request to %s', url if self.logsensitive else '[hidden]')
log.debug(command)
timeout = (10, self.read_timeout)
if not wait_resp:
# set the read timeout (the second value here) to something really small
# to 'fake' a non-blocking call. we want the connect and transmit to
# block, but just ignore the response from Kodi.
timeout = (10, self.read_timeout_async)
# Try to fetch from cache
r = None
cache_file = None
if self.cache.enabled and cache_resp and wait_resp:
h = hashlib.sha1()
h.update(command)
h.update(url)
cache_file = h.hexdigest()
del h
r = self.cache.get(cache_file)
auth = (self.username, self.password)
if self.cache.enabled and r:
# fetched the response from cache, so let's return it immediately but
# update the cache object in the background.
if self.cache_bg_update:
t = threading.Thread(target=self.cache.add, args=(cache_file, url, auth, command, (60, 120)))
t.daemon = True
t.start()
return r
else:
# no cached response found, so send the command directly to Kodi and,
# if caching is enabled, cache the response.
return self.cache.add(cache_file, url, auth, command, timeout, wait_resp)
# Utilities
def sanitize_name(self, *args, **kwargs):
return sanitize_name(*args, **kwargs)
# Helpers to find media
# Match heard string to something in the results
def matchHeard(self, heard, results, lookingFor='label', limit=10):
located = []
heard_lower = heard.lower()
# Very ugly hack for German Alexa. In English, if a user specifies
# 'percent', she converts it to a '%' symbol. In German, for whatever
# reason, she leaves it unconverted as 'prozent'. Let's convert here.
heard_lower = re.sub(r'prozent(?=[.,\s]|$)', '%', heard_lower)
log.info('Trying to match: %s', heard_lower.encode("utf-8"))
heard_ascii = sanitize_name(heard_lower)
for result in results:
result_lower = result[lookingFor].lower()
# Direct comparison
if type(heard_lower) is type(result_lower):
if result_lower == heard_lower:
log.info('Simple match on direct comparison')
located.append(result)
continue
# Strip out non-ascii symbols
result_name = sanitize_name(result_lower)
# Direct comparison (ASCII)
if result_name == heard_ascii:
log.info('Simple match on direct comparison (ASCII)')
located.append(result)
continue
if not located:
log.info('Simple match failed, trying fuzzy match')
log.info('Processing %d items with fuzzywuzzy...', len(results))
match_strings = []
for f in (None, digits2roman, words2roman, words2digits, digits2words):
try:
if f is not None:
match_string = f(heard_lower, self.language)
match_func = f.__name__
else:
match_string = heard_lower
match_func = 'heard'
match_strings.append(match_string)
log.info(' %s -> "%s"', match_func, match_string.encode("utf-8"))
except:
continue
fuzzy_results = []
for ms in set(match_strings):
log.info(' Trying with "%s"', ms.encode("utf-8"))
matches = process.extractBests(ms, [d[lookingFor] for d in results], limit=limit, scorer=fuzz.UQRatio, score_cutoff=75)
if matches:
log.info(' Best score %d%%', matches[0][1])
fuzzy_results += matches
# Got a match?
if fuzzy_results:
winners = sorted(fuzzy_results, key=lambda x: x[1], reverse=True)
log.info('BEST MATCH: "%s" @ %d%%', winners[0][0].encode("utf-8"), winners[0][1])
for winner in winners:
located.append((item for item in results if item[lookingFor] == winner[0]).next())
else:
log.info('BEST MATCH: "%s"', located[0][lookingFor].encode("utf-8"))
return located[:limit]
def FindVideoPlaylist(self, heard_search):
log.info('Searching for video playlist "%s"', heard_search.encode("utf-8"))
located = []
playlists = self.GetVideoPlaylists()
if 'result' in playlists and 'files' in playlists['result']:
ll = self.matchHeard(heard_search, playlists['result']['files'])
if ll:
located = [(item['file'], item['label']) for item in ll]
return located
def FindAudioPlaylist(self, heard_search):
log.info('Searching for audio playlist "%s"', heard_search.encode("utf-8"))
located = []
playlists = self.GetMusicPlaylists()
if 'result' in playlists and 'files' in playlists['result']:
ll = self.matchHeard(heard_search, playlists['result']['files'])
if ll:
located = [(item['file'], item['label']) for item in ll]
return located
def FindVideoGenre(self, heard_search, genretype='movie'):
log.info('Searching for %s genre "%s"', genretype, heard_search.encode("utf-8"))
located = []
genres = self.GetVideoGenres(genretype)
if 'result' in genres and 'genres' in genres['result']:
ll = self.matchHeard(heard_search, genres['result']['genres'])
if ll:
located = [(item['genreid'], item['label']) for item in ll]
return located
def FindMovie(self, heard_search):
log.info('Searching for movie "%s"', heard_search.encode("utf-8"))
located = []
movies = self.GetMovies()
if 'result' in movies and 'movies' in movies['result']:
ll = self.matchHeard(heard_search, movies['result']['movies'])
if ll:
located = [(item['movieid'], item['label']) for item in ll]
return located
def FindTvShow(self, heard_search):
log.info('Searching for show "%s"', heard_search.encode("utf-8"))
located = []
shows = self.GetShows()
if 'result' in shows and 'tvshows' in shows['result']:
ll = self.matchHeard(heard_search, shows['result']['tvshows'])
if ll:
located = [(item['tvshowid'], item['label']) for item in ll]
return located
# There is no JSON-RPC method for VideoLibrary.GetArtists, so we need a way
# to filter the library results here.
def FilterMusicVideosByArtist(self, results, artist):
log.info('Searching for music videos by "%s"', artist.encode("utf-8"))
# Kodi.matchHeard() expects to match on strings, but Kodi gives us arrays
# for the artist fields. I'm not entirely sure, but I presume it's for
# alternate artist names. For simplicity (and until someone complains),
# let's just choose the first artist label to match on.
artistvideos = [{k: (v if k != u'artist' else v[0]) for k, v in d.items()} for d in results]
return self.matchHeard(artist, artistvideos, 'artist', sys.maxint)
def FindMusicVideo(self, heard_search, heard_artist=None):
log.info('Searching for music video "%s"', heard_search.encode("utf-8"))
located = []
mvs = self.GetMusicVideos()
if 'result' in mvs and 'musicvideos' in mvs['result']:
if heard_artist:
musicvideos = self.FilterMusicVideosByArtist(mvs['result']['musicvideos'], heard_artist)
else:
musicvideos = mvs['result']['musicvideos']
ll = self.matchHeard(heard_search, musicvideos)
if ll:
located = [(item['musicvideoid'], item['label']) for item in ll]
return located
def FindMusicGenre(self, heard_search):
log.info('Searching for music genre "%s"', heard_search.encode("utf-8"))
located = []
genres = self.GetMusicGenres()
if 'result' in genres and 'genres' in genres['result']:
ll = self.matchHeard(heard_search, genres['result']['genres'])
if ll:
located = [(item['genreid'], item['label']) for item in ll]
return located
def FindArtist(self, heard_search):
log.info('Searching for artist "%s"', heard_search.encode("utf-8"))
located = []
artists = self.GetMusicArtists()
if 'result' in artists and 'artists' in artists['result']:
ll = self.matchHeard(heard_search, artists['result']['artists'], 'artist')
if ll:
located = [(item['artistid'], item['label']) for item in ll]
return located
def FindAlbum(self, heard_search, artist_id=None):
log.info('Searching for album "%s"', heard_search.encode("utf-8"))
located = []
if artist_id:
albums = self.GetArtistAlbums(artist_id)
else:
albums = self.GetAlbums()
if 'result' in albums and 'albums' in albums['result']:
albums_list = albums['result']['albums']
ll = self.matchHeard(heard_search, albums['result']['albums'])
if ll:
located = [(item['albumid'], item['label']) for item in ll]
return located
def FindSong(self, heard_search, artist_id=None, album_id=None):
log.info('Searching for song "%s"', heard_search.encode("utf-8"))
located = []
if album_id:
songs = self.GetAlbumSongs(album_id)
elif artist_id:
songs = self.GetArtistSongs(artist_id)
else:
songs = self.GetSongs()
if 'result' in songs and 'songs' in songs['result']:
ll = self.matchHeard(heard_search, songs['result']['songs'])
if ll:
located = [(item['songid'], item['label']) for item in ll]
return located
def FindAddon(self, heard_search):
log.info('Searching for addon "%s"', heard_search.encode("utf-8"))
located = []
for content in ['video', 'audio', 'image', 'executable']:
addons = self.GetAddons(content)
if 'result' in addons and 'addons' in addons['result']:
ll = self.matchHeard(heard_search, addons['result']['addons'], 'name')
if ll:
located = [(item['addonid'], item['name']) for item in ll]
return located
# Playlists
def ClearAudioPlaylist(self):
return self.SendCommand(RPCString("Playlist.Clear", {"playlistid": 0}))
def AddSongToPlaylist(self, song_id):
return self.SendCommand(RPCString("Playlist.Add", {"playlistid": 0, "item": {"songid": int(song_id)}}))
def AddSongsToPlaylist(self, song_ids, shuffle=False):
songs_array = []
if shuffle:
random.shuffle(song_ids)
songs_array = [dict(songid=song_id) for song_id in song_ids[:self.playlist_limit]]
# Segment the requests into chunks that Kodi will accept in a single call
for a in [songs_array[x:x+2000] for x in range(0, len(songs_array), 2000)]:
log.info('Adding %d items to the queue...', len(a))
res = self.SendCommand(RPCString("Playlist.Add", {"playlistid": 0, "item": a}))
return res
def AddAlbumToPlaylist(self, album_id, shuffle=False):
songs_result = self.GetAlbumSongs(album_id)
songs = songs_result['result']['songs']
songs_array = []
for song in songs:
songs_array.append(song['songid'])
return self.AddSongsToPlaylist(songs_array, shuffle)
def GetAudioPlaylistItems(self):
return self.SendCommand(RPCString("Playlist.GetItems", {"playlistid": 0}))
def StartAudioPlaylist(self, playlist_file=None):
if playlist_file:
# Note that subsequent shuffle commands won't work with this, as Kodi
# considers a playlist to be a single item.
#
# Further, Kodi seems to completely ignore "options":{"shuffled":True} here
return self.SendCommand(RPCString("Player.Open", {"item": {"file": playlist_file}}), False)
else:
return self.SendCommand(RPCString("Player.Open", {"item": {"playlistid": 0}}), False)
def ClearVideoPlaylist(self):
return self.SendCommand(RPCString("Playlist.Clear", {"playlistid": 1}))
def AddEpisodeToPlayList(self, ep_id):
return self.SendCommand(RPCString("Playlist.Add", {"playlistid": 1, "item": {"episodeid": int(ep_id)}}))
def AddEpisodesToPlaylist(self, episode_ids, shuffle=False):
if shuffle:
random.shuffle(episode_ids)
episodes_array = [dict(episodeid=episode_id) for episode_id in episode_ids[:self.playlist_limit]]
# Segment the requests into chunks that Kodi will accept in a single call
episode_groups = [episodes_array[x:x+2000] for x in range(0, len(episodes_array), 2000)]
for a in episode_groups:
log.info('Adding %d items to the queue...', len(a))
res = self.SendCommand(RPCString("Playlist.Add", {"playlistid": 1, "item": a}))
return res
def AddMusicVideosToPlaylist(self, musicvideo_ids, shuffle=False):
if shuffle:
random.shuffle(musicvideo_ids)
musicvideos_array = [dict(musicvideoid=musicvideo_id) for musicvideo_id in musicvideo_ids[:self.playlist_limit]]
# Segment the requests into chunks that Kodi will accept in a single call
musicvideo_groups = [musicvideos_array[x:x+2000] for x in range(0, len(musicvideos_array), 2000)]
for a in musicvideo_groups:
log.info('Adding %d items to the queue...', len(a))
res = self.SendCommand(RPCString("Playlist.Add", {"playlistid": 1, "item": a}))
return res
def AddMovieToPlaylist(self, movie_id):
return self.SendCommand(RPCString("Playlist.Add", {"playlistid": 1, "item": {"movieid": int(movie_id)}}))
def AddVideosToPlaylist(self, video_files, shuffle=False):
if shuffle:
random.shuffle(video_files)
videos_array = [dict(file=video_file) for video_file in video_files[:self.playlist_limit]]
# Segment the requests into chunks that Kodi will accept in a single call
video_groups = [videos_array[x:x+2000] for x in range(0, len(videos_array), 2000)]
for a in video_groups:
log.info('Adding %d items to the queue...', len(a))
res = self.SendCommand(RPCString("Playlist.Add", {"playlistid": 1, "item": a}))
return res
def GetVideoPlaylistItems(self):
return self.SendCommand(RPCString("Playlist.GetItems", {"playlistid": 1}))
# Note that subsequent shuffle commands won't work with this, as Kodi
# considers a playlist to be a single item.
def StartVideoPlaylist(self, playlist_file=None):
if playlist_file:
return self.SendCommand(RPCString("Player.Open", {"item": {"file": playlist_file}}), False)
else:
return self.SendCommand(RPCString("Player.Open", {"item": {"playlistid": 1}}), False)
# Direct plays
def PlayFile(self, path):
return self.SendCommand(RPCString("Player.Open", {"item": {"file": path}}), False)
def PlayEpisode(self, ep_id, resume=True):
return self.SendCommand(RPCString("Player.Open", {"item": {"episodeid": ep_id}, "options": {"resume": resume}}), False)
def PlayMovie(self, movie_id, resume=True):
return self.SendCommand(RPCString("Player.Open", {"item": {"movieid": movie_id}, "options": {"resume": resume}}), False)
def PlayMusicVideo(self, musicvideo_id):
return self.SendCommand(RPCString("Player.Open", {"item": {"musicvideoid": musicvideo_id}}), False)
def PartyPlayMusic(self):
return self.SendCommand(RPCString("Player.Open", {"item": {"partymode": "music"}}), False)
# Tell Kodi to update its video or music libraries
def UpdateVideo(self):
self.cache.clear()
return self.SendCommand(RPCString("VideoLibrary.Scan"), False)
def CleanVideo(self):
self.cache.clear()
return self.SendCommand(RPCString("VideoLibrary.Clean"), False)
def UpdateMusic(self):
self.cache.clear()
return self.SendCommand(RPCString("AudioLibrary.Scan"), False)
def CleanMusic(self):
self.cache.clear()
return self.SendCommand(RPCString("AudioLibrary.Clean"), False)
# Perform UI actions that match the normal remote control buttons
def PageUp(self):
return self.SendCommand(RPCString("Input.ExecuteAction", {"action": "pageup"}), False)
def PageDown(self):
return self.SendCommand(RPCString("Input.ExecuteAction", {"action": "pagedown"}), False)
def ToggleWatched(self):
return self.SendCommand(RPCString("Input.ExecuteAction", {"action": "togglewatched"}))
def Info(self):
return self.SendCommand(RPCString("Input.Info"), False)
def Menu(self):
return self.SendCommand(RPCString("Input.ContextMenu"), False)
def Home(self):
return self.SendCommand(RPCString("Input.Home"), False)
def Select(self):
return self.SendCommand(RPCString("Input.Select"), False)
def Up(self):
return self.SendCommand(RPCString("Input.Up"), False)
def Down(self):
return self.SendCommand(RPCString("Input.Down"), False)
def Left(self):
return self.SendCommand(RPCString("Input.Left"), False)
def Right(self):
return self.SendCommand(RPCString("Input.Right"), False)
def Back(self):
return self.SendCommand(RPCString("Input.Back"), False)
def DownloadSubtitles(self):
return self.SendCommand(RPCString("GUI.ActivateWindow", {"window": "subtitlesearch"}), False)
def ShowMovies(self, genre_id=None):
if genre_id:
win = 'videodb://movies/genres/%d/' % (genre_id)
else:
win = 'MovieTitles'
return self.SendCommand(RPCString("GUI.ActivateWindow", {"window": "videos", "parameters": [win, "return"]}), False)
def ShowTvShows(self, genre_id=None):
if genre_id:
win = 'videodb://tvshows/genres/%d/' % (genre_id)
else:
win = 'TVShowTitles'
return self.SendCommand(RPCString("GUI.ActivateWindow", {"window": "videos", "parameters": [win, "return"]}), False)
def ShowMusicVideos(self, genre_id=None):
if genre_id:
win = 'videodb://musicvideos/genres/%d/' % (genre_id)
else:
win = 'MusicVideoTitles'
return self.SendCommand(RPCString("GUI.ActivateWindow", {"window": "videos", "parameters": [win, "return"]}), False)
def ShowMusic(self, genre_id=None):
if genre_id:
win = 'musicdb://genres/%d/' % (genre_id)
return self.SendCommand(RPCString("GUI.ActivateWindow", {"window": "music", "parameters": [win, "return"]}), False)
else:
return self.SendCommand(RPCString("GUI.ActivateWindow", {"window": "music"}), False)
def ShowMusicArtists(self):
return self.SendCommand(RPCString("GUI.ActivateWindow", {"window": "music", "parameters": ["Artists", "return"]}), False)
def ShowMusicAlbums(self):
return self.SendCommand(RPCString("GUI.ActivateWindow", {"window": "music", "parameters": ["Albums", "return"]}), False)
def ShowVideoPlaylist(self, playlist_path):
return self.SendCommand(RPCString("GUI.ActivateWindow", {"window": "videos", "parameters": [playlist_path, "return"]}), False)
def ShowMusicPlaylist(self, playlist_path):
return self.SendCommand(RPCString("GUI.ActivateWindow", {"window": "music", "parameters": [playlist_path, "return"]}), False)
def ToggleFullscreen(self):
return self.SendCommand(RPCString("GUI.SetFullscreen", {"fullscreen": "toggle"}), False)
def ToggleStereoscopicMode(self):
return self.SendCommand(RPCString("Input.ExecuteAction", {"action": "togglestereomode"}))
def ToggleAudioPassthrough(self):
return self.SendCommand(RPCString("Input.ExecuteAction", {"action": "audiotoggledigital"}), False)
def ToggleMute(self):
return self.SendCommand(RPCString("Application.SetMute", {"mute": "toggle"}), False)
def GetCurrentVolume(self):
return self.SendCommand(RPCString("Application.GetProperties", fields=["volume", "muted"]))
def VolumeUp(self):
resp = self.GetCurrentVolume()
vol = resp['result']['volume']
if vol % 10 == 0:
# already modulo 10, so just add 10
vol += 10
else:
# round up to nearest 10
vol -= vol % -10
if vol > 100:
vol = 100
return self.SendCommand(RPCString("Application.SetVolume", {"volume": vol}))
def VolumeDown(self):
resp = self.GetCurrentVolume()
vol = resp['result']['volume']
if vol % 10 != 0:
# round up to nearest 10 first
vol -= vol % -10
vol -= 10
if vol < 0:
vol = 0
return self.SendCommand(RPCString("Application.SetVolume", {"volume": vol}))
def VolumeSet(self, vol, percent=True):
if vol < 0:
vol = 0
if not percent:
# specified with scale of 0 to 10
vol *= 10
if vol > 100:
vol = 100
return self.SendCommand(RPCString("Application.SetVolume", {"volume": vol}))
def SendText(self, send_text):
return self.SendCommand(RPCString("Input.SendText", {"done": False, "text": send_text}))
# Player controls
def PlayerPlayPause(self):
playerid = self.GetPlayerID()
if playerid is not None:
return self.SendCommand(RPCString("Player.PlayPause", {"playerid": playerid}), False)
def PlayerSkip(self):
playerid = self.GetPlayerID()
if playerid is not None:
return self.SendCommand(RPCString("Player.GoTo", {"playerid": playerid, "to": "next"}), False)
def PlayerPrev(self):
playerid = self.GetPlayerID()
if playerid is not None:
self.SendCommand(RPCString("Player.GoTo", {"playerid": playerid, "to": "previous"}))
return self.SendCommand(RPCString("Player.GoTo", {"playerid": playerid, "to": "previous"}), False)
def PlayerStartOver(self):
playerid = self.GetPlayerID()
if playerid is not None:
return self.SendCommand(RPCString("Player.Seek", {"playerid": playerid, "value": 0}), False)
def PlayerStop(self):
playerid = self.GetPlayerID()
if playerid is not None:
return self.SendCommand(RPCString("Player.Stop", {"playerid": playerid}))
def PlayerSeek(self, seconds):
playerid = self.GetPlayerID()
if playerid:
return self.SendCommand(RPCString("Player.Seek", {"playerid": playerid, "value": {"seconds": seconds}}), False)
def PlayerSeekSmallForward(self):
playerid = self.GetPlayerID()
if playerid:
return self.SendCommand(RPCString("Player.Seek", {"playerid": playerid, "value": "smallforward"}), False)
def PlayerSeekSmallBackward(self):
playerid = self.GetPlayerID()
if playerid:
return self.SendCommand(RPCString("Player.Seek", {"playerid": playerid, "value": "smallbackward"}), False)
def PlayerSeekBigForward(self):
playerid = self.GetPlayerID()
if playerid:
return self.SendCommand(RPCString("Player.Seek", {"playerid": playerid, "value": "bigforward"}), False)
def PlayerSeekBigBackward(self):
playerid = self.GetPlayerID()
if playerid:
return self.SendCommand(RPCString("Player.Seek", {"playerid": playerid, "value": "bigbackward"}), False)
def PlayerShuffleOn(self):
playerid = self.GetPlayerID()
if playerid is not None:
return self.SendCommand(RPCString("Player.SetShuffle", {"playerid": playerid, "shuffle": True}))
def PlayerShuffleOff(self):
playerid = self.GetPlayerID()
if playerid is not None:
return self.SendCommand(RPCString("Player.SetShuffle", {"playerid": playerid, "shuffle": False}))
def PlayerLoopOn(self):
playerid = self.GetPlayerID()
if playerid is not None:
return self.SendCommand(RPCString("Player.SetRepeat", {"playerid": playerid, "repeat": "cycle"}))
def PlayerLoopOff(self):
playerid = self.GetPlayerID()
if playerid is not None:
return self.SendCommand(RPCString("Player.SetRepeat", {"playerid": playerid, "repeat": "off"}))
def PlayerSubtitlesOn(self):
playerid = self.GetVideoPlayerID()
if playerid:
return self.SendCommand(RPCString("Player.SetSubtitle", {"playerid": playerid, "subtitle": "on"}))
def PlayerSubtitlesOff(self):
playerid = self.GetVideoPlayerID()
if playerid:
return self.SendCommand(RPCString("Player.SetSubtitle", {"playerid": playerid, "subtitle": "off"}))
def PlayerSubtitlesNext(self):
playerid = self.GetVideoPlayerID()
if playerid:
return self.SendCommand(RPCString("Player.SetSubtitle", {"playerid": playerid, "subtitle": "next", "enable": True}))
def PlayerSubtitlesPrevious(self):
playerid = self.GetVideoPlayerID()
if playerid:
return self.SendCommand(RPCString("Player.SetSubtitle", {"playerid": playerid, "subtitle": "previous", "enable": True}))
def PlayerAudioStreamNext(self):
playerid = self.GetVideoPlayerID()
if playerid:
return self.SendCommand(RPCString("Player.SetAudioStream", {"playerid": playerid, "stream": "next"}))
def PlayerAudioStreamPrevious(self):
playerid = self.GetVideoPlayerID()
if playerid:
return self.SendCommand(RPCString("Player.SetAudioStream", {"playerid": playerid, "stream": "previous"}))
def PlayerMoveUp(self):
playerid = self.GetPicturePlayerID()
if playerid:
return self.SendCommand(RPCString("Player.Move", {"playerid": playerid, "direction": "up"}), False)
def PlayerMoveDown(self):
playerid = self.GetPicturePlayerID()
if playerid:
return self.SendCommand(RPCString("Player.Move", {"playerid": playerid, "direction": "down"}), False)
def PlayerMoveLeft(self):
playerid = self.GetPicturePlayerID()
if playerid:
return self.SendCommand(RPCString("Player.Move", {"playerid": playerid, "direction": "left"}), False)
def PlayerMoveRight(self):
playerid = self.GetPicturePlayerID()
if playerid:
return self.SendCommand(RPCString("Player.Move", {"playerid": playerid, "direction": "right"}), False)
def PlayerZoom(self, lvl=0):
playerid = self.GetPicturePlayerID()
if playerid and lvl > 0 and lvl < 11:
return self.SendCommand(RPCString("Player.Zoom", {"playerid": playerid, "zoom": lvl}), False)
def PlayerZoomIn(self):
playerid = self.GetPicturePlayerID()
if playerid:
return self.SendCommand(RPCString("Player.Zoom", {"playerid": playerid, "zoom": "in"}), False)
def PlayerZoomOut(self):
playerid = self.GetPicturePlayerID()
if playerid:
return self.SendCommand(RPCString("Player.Zoom", {"playerid": playerid, "zoom": "out"}), False)
def PlayerRotateClockwise(self):
playerid = self.GetPicturePlayerID()
if playerid:
return self.SendCommand(RPCString("Player.Rotate", {"playerid": playerid, "value": "clockwise"}), False)
def PlayerRotateCounterClockwise(self):
playerid = self.GetPicturePlayerID()
if playerid:
return self.SendCommand(RPCString("Player.Rotate", {"playerid": playerid, "value": "counterclockwise"}), False)
# Addons
def AddonExecute(self, addon_id, params={}):
return self.SendCommand(RPCString("Addons.ExecuteAddon", {"addonid": addon_id, "params": params}))
def AddonGlobalSearch(self, needle=''):
return self.AddonExecute("script.globalsearch", {"searchstring": needle.encode("utf-8")})
def AddonCinemaVision(self):
return self.AddonExecute("script.cinemavision", ["experience"])
# Library queries
# mediatype should be one of:
# movies, tvshows, episodes, musicvideos, artists, albums, songs
#
# returns a list like:
# [type, label, library_id, genre]
#
# where type is one of:
# movie, tvshow, episode, musicvideo, artist, album, song
def GetRecommendedItem(self, mediatype=None, mediagenre=None):
answer = ['', '', 0, mediagenre]
if not mediatype:
return answer
m = []
if mediatype == 'movies':
if mediagenre:
m = self.GetUnwatchedMoviesByGenre(mediagenre, sort=SORT_RATING, limits=LIMIT_RECOMMENDED_MOVIES)
else:
m = self.GetUnwatchedMovies(sort=SORT_RATING, limits=LIMIT_RECOMMENDED_MOVIES)
if not m:
# Fall back to all movies if no unwatched available
if mediagenre:
movies = self.GetMoviesByGenre(mediagenre, sort=SORT_RATING, limits=LIMIT_RECOMMENDED_MOVIES)
else:
movies = self.GetMovies(sort=SORT_RATING, limits=LIMIT_RECOMMENDED_MOVIES)
if 'result' in movies and 'movies' in movies['result']:
m = movies['result']['movies']
if m:
r = random.choice(m)
answer[0] = 'movie'
answer[1] = r['label']
answer[2] = r['movieid']
elif mediatype == 'tvshows':
if mediagenre:
m = self.GetUnwatchedShowsByGenre(mediagenre, sort=SORT_RATING, limits=LIMIT_RECOMMENDED_SHOWS)
else:
m = self.GetUnwatchedShows(sort=SORT_RATING, limits=LIMIT_RECOMMENDED_SHOWS)
if not m:
# Fall back to all shows if no unwatched available
if mediagenre:
shows = self.GetShowsByGenre(mediagenre, sort=SORT_RATING, limits=LIMIT_RECOMMENDED_SHOWS)
else:
shows = self.GetShows(sort=SORT_RATING, limits=LIMIT_RECOMMENDED_SHOWS)
if 'result' in shows and 'tvshows' in shows['result']:
m = shows['result']['tvshows']
if m:
r = random.choice(m)
answer[0] = 'tvshow'
answer[1] = r['label']
answer[2] = r['tvshowid']
elif mediatype == 'episodes':
if mediagenre:
shows = self.GetUnwatchedShowsByGenre(mediagenre, sort=SORT_RATING, limits=LIMIT_RECOMMENDED_SHOWS)
else:
shows = self.GetUnwatchedShows(sort=SORT_RATING, limits=LIMIT_RECOMMENDED_SHOWS)
if shows:
r = random.choice(shows)
m = self.GetUnwatchedEpisodesFromShow(r['tvshowid'], limits=(0, 1))
if not m:
# Fall back to all episodes if no unwatched available
if mediagenre:
episodes = self.GetEpisodesByGenre(mediagenre, sort=SORT_RATING, limits=LIMIT_RECOMMENDED_EPISODES)
else:
episodes = self.GetEpisodes(sort=SORT_RATING, limits=LIMIT_RECOMMENDED_EPISODES)
if 'result' in episodes and 'episodes' in episodes['result']:
m = episodes['result']['episodes']
if m:
r = random.choice(m)
answer[0] = 'episode'
answer[1] = r['label']
answer[2] = r['episodeid']
elif mediatype == 'musicvideos':
if mediagenre:
musicvideos = self.GetMusicVideosByGenre(mediagenre, sort=SORT_RATING, limits=LIMIT_RECOMMENDED_MUSICVIDEOS)
else:
musicvideos = self.GetMusicVideos(sort=SORT_RATING, limits=LIMIT_RECOMMENDED_MUSICVIDEOS)
if 'result' in musicvideos and 'musicvideos' in musicvideos['result']:
m = musicvideos['result']['musicvideos']
if m:
r = random.choice(m)
answer[0] = 'musicvideo'
answer[1] = r['label']
answer[2] = r['musicvideoid']
elif mediatype == 'artists':
if mediagenre:
artists = self.GetMusicArtistsByGenre(mediagenre, sort=SORT_RATING, limits=LIMIT_RECOMMENDED_ARTISTS)
else:
artists = self.GetMusicArtists(sort=SORT_RATING, limits=LIMIT_RECOMMENDED_ARTISTS)
if 'result' in artists and 'artists' in artists['result']:
m = artists['result']['artists']
if m:
r = random.choice(m)
answer[0] = 'artist'
answer[1] = r['label']
answer[2] = r['artistid']
elif mediatype == 'albums':
if mediagenre:
albums = self.GetAlbumsByGenre(mediagenre, sort=SORT_RATING, limits=LIMIT_RECOMMENDED_ALBUMS)
else:
albums = self.GetAlbums(sort=SORT_RATING, limits=LIMIT_RECOMMENDED_ALBUMS)
if 'result' in albums and 'albums' in albums['result']:
m = albums['result']['albums']
if m:
r = random.choice(m)
answer[0] = 'album'
answer[1] = r['label']
answer[2] = r['albumid']
elif mediatype == 'songs':
if mediagenre:
songs = self.GetSongsByGenre(mediagenre, sort=SORT_RATING, limits=LIMIT_RECOMMENDED_SONGS)
else:
songs = self.GetSongs(sort=SORT_RATING, limits=LIMIT_RECOMMENDED_SONGS)
if 'result' in songs and 'songs' in songs['result']:
m = songs['result']['songs']
if m:
r = random.choice(m)
answer[0] = 'song'
answer[1] = r['label']
answer[2] = r['songid']
return answer
def GetRecommendedVideoItem(self):
answer = []
items = []
for content in ['movies', 'tvshows', 'episodes', 'musicvideos']:
item = self.GetRecommendedItem(content)
if item[0]:
items.append(item)
if items:
answer = random.choice(items)
return answer
def GetRecommendedAudioItem(self):
answer = []
items = []
for content in ['musicvideos', 'artists', 'albums', 'songs']:
item = self.GetRecommendedItem(content)
if item[0]:
items.append(item)
if items:
answer = random.choice(items)
return answer
# content can be: video, audio, image, executable, or unknown
def GetAddons(self, content):
if content:
return self.SendCommand(RPCString("Addons.GetAddons", {"content": content}, fields=["name"]))
else:
return self.SendCommand(RPCString("Addons.GetAddons", fields=["name"]))
def GetAddonDetails(self, addon_id):
return self.SendCommand(RPCString("Addons.GetAddonDetails", {"addonid": addon_id}, fields=["name", "version", "description", "summary"]))
def GetPlaylistItems(self, playlist_file):
return self.SendCommand(RPCString("Files.GetDirectory", {"directory": playlist_file}))
def GetMusicPlaylists(self):
return self.SendCommand(RPCString("Files.GetDirectory", {"directory": "special://musicplaylists"}))
def GetMusicArtists(self, sort=None, filters=None, filtertype=None, limits=None):
return self.SendCommand(RPCString("AudioLibrary.GetArtists", {"albumartistsonly": False}, sort=sort, filters=filters, filtertype=filtertype, limits=limits), cache_resp=True)
def GetMusicArtistsByGenre(self, genre, sort=None, limits=None):
return self.GetMusicArtists(sort=sort, filters=[{"field": "genre", "operator": "is", "value": genre}], limits=limits)
def GetMusicGenres(self):
return self.SendCommand(RPCString("AudioLibrary.GetGenres"), cache_resp=True)
def GetArtistAlbums(self, artist_id):
return self.SendCommand(RPCString("AudioLibrary.GetAlbums", filters=[{"artistid": int(artist_id)}]), cache_resp=True)
def GetNewestAlbumFromArtist(self, artist_id):
data = self.SendCommand(RPCString("AudioLibrary.GetAlbums", sort=SORT_YEAR, filters=[{"artistid": int(artist_id)}], limits=(0, 1)), cache_resp=True)
if 'albums' in data['result']:
album = data['result']['albums'][0]
return album['albumid']
else:
return None
def GetSongs(self, sort=None, filters=None, filtertype=None, limits=None):
return self.SendCommand(RPCString("AudioLibrary.GetSongs", sort=sort, filters=filters, filtertype=filtertype, limits=limits), cache_resp=True)
def GetSongsByGenre(self, genre, sort=None, limits=None):
return self.GetSongs(sort=sort, filters=[{"field": "genre", "operator": "is", "value": genre}], limits=limits)
def GetSongsPath(self):
return self.SendCommand(RPCString("AudioLibrary.GetSongs", fields=["file"]))
def GetSongIdPath(self, song_id):
return self.SendCommand(RPCString("AudioLibrary.GetSongDetails", {"songid": int(song_id)}, fields=["file"]))
def GetSongDetails(self, song_id):
data = self.SendCommand(RPCString("AudioLibrary.GetSongDetails", {"songid": int(song_id)}, fields=["artist"]))
return data['result']['songdetails']
def GetArtistSongs(self, artist_id, sort=None, limits=None):
return self.GetSongs(sort=sort, filters=[{"artistid": int(artist_id)}], limits=limits)
def GetArtistSongsByGenre(self, artist, genre, sort=None, limits=None):
return self.GetSongs(sort=sort, filters=[{"field": "artist", "operator": "is", "value": artist}, {"field": "genre", "operator": "is", "value": genre}], limits=limits)
def GetArtistSongsPath(self, artist_id):
return self.SendCommand(RPCString("AudioLibrary.GetSongs", filters=[{"artistid": int(artist_id)}], fields=["file"]), cache_resp=True)
def GetAlbums(self, sort=None, filters=None, filtertype=None, limits=None):
return self.SendCommand(RPCString("AudioLibrary.GetAlbums", sort=sort, filters=filters, filtertype=filtertype, limits=limits), cache_resp=True)
def GetAlbumsByGenre(self, genre, sort=None, limits=None):
return self.GetAlbums(sort=sort, filters=[{"field": "genre", "operator": "is", "value": genre}], limits=limits)
def GetAlbumDetails(self, album_id):
data = self.SendCommand(RPCString("AudioLibrary.GetAlbumDetails", {"albumid": int(album_id)}, fields=["artist"]))
return data['result']['albumdetails']
def GetAlbumSongs(self, album_id, sort=None, limits=None):
return self.GetSongs(sort=sort, filters=[{"albumid": int(album_id)}], limits=limits)
def GetAlbumSongsPath(self, album_id):
return self.SendCommand(RPCString("AudioLibrary.GetSongs", filters=[{"albumid": int(album_id)}], fields=["file"]), cache_resp=True)
def GetRecentlyAddedAlbums(self):
return self.SendCommand(RPCString("AudioLibrary.GetRecentlyAddedAlbums", fields=["artist"]))
def GetRecentlyAddedSongs(self):
return self.SendCommand(RPCString("AudioLibrary.GetRecentlyAddedSongs", fields=["artist"]))
def GetRecentlyAddedSongsPath(self):
return self.SendCommand(RPCString("AudioLibrary.GetRecentlyAddedSongs", fields=["artist", "file"]))
def GetVideoPlaylists(self):
return self.SendCommand(RPCString("Files.GetDirectory", {"directory": "special://videoplaylists"}))
def GetVideoGenres(self, genretype='movie'):
return self.SendCommand(RPCString("VideoLibrary.GetGenres", {"type": genretype}), cache_resp=True)
def GetMusicVideos(self, sort=None, filters=None, filtertype=None, limits=None):
return self.SendCommand(RPCString("VideoLibrary.GetMusicVideos", fields=["artist"], sort=sort, filters=filters, filtertype=filtertype, limits=limits), cache_resp=True)
def GetMusicVideosByGenre(self, genre, sort=None, limits=None):
return self.GetMusicVideos(sort=sort, filters=[{"genre": genre}], limits=None)
def GetMusicVideoDetails(self, mv_id):
data = self.SendCommand(RPCString("VideoLibrary.GetMusicVideoDetails", {"musicvideoid": int(mv_id)}, fields=["artist"]))
return data['result']['musicvideodetails']
def GetMovies(self, sort=None, filters=None, filtertype=None, limits=None):
return self.SendCommand(RPCString("VideoLibrary.GetMovies", sort=sort, filters=filters, filtertype=filtertype, limits=limits), cache_resp=True)
def GetMoviesByGenre(self, genre, sort=None, limits=None):
return self.GetMovies(sort=sort, fiters=[{"genre": genre}], limits=limits)
def GetMovieDetails(self, movie_id):
data = self.SendCommand(RPCString("VideoLibrary.GetMovieDetails", {"movieid": movie_id}, fields=["resume", "trailer"]))
return data['result']['moviedetails']
def GetShows(self, sort=None, filters=None, filtertype=None, limits=None):
return self.SendCommand(RPCString("VideoLibrary.GetTVShows", sort=sort, filters=filters, filtertype=filtertype, limits=limits), cache_resp=True)
def GetShowsByGenre(self, genre, sort=None, limits=None):
return self.GetShows(sort=sort, filters=[{"genre": genre}], limits=limits)
def GetShowDetails(self, show_id):
data = self.SendCommand(RPCString("VideoLibrary.GetTVShowDetails", {"tvshowid": show_id}, fields=["art"]))
return data['result']['tvshowdetails']
def GetEpisodes(self, sort=None, filters=None, filtertype=None, limits=None):
return self.SendCommand(RPCString("VideoLibrary.GetEpisodes", sort=sort, filters=filters, filtertype=filtertype, limits=limits), cache_resp=True)
def GetEpisodesByGenre(self, genre, sort=None, limits=None):
return self.GetEpisodes(sort=sort, filters=[{"field": "genre", "operator": "is", "value": genre}], limits=limits)
def GetEpisodesFromShow(self, show_id):
return self.SendCommand(RPCString("VideoLibrary.GetEpisodes", {"tvshowid": int(show_id)}), cache_resp=True)
def GetEpisodeDetails(self, ep_id):
data = self.SendCommand(RPCString("VideoLibrary.GetEpisodeDetails", {"episodeid": int(ep_id)}, fields=["showtitle", "season", "episode", "resume"]))
return data['result']['episodedetails']
def GetNewestEpisodeFromShow(self, show_id):
data = self.SendCommand(RPCString("VideoLibrary.GetEpisodes", {"tvshowid": int(show_id)}, sort=SORT_DATEADDED, limits=(0, 1)))
if 'episodes' in data['result']:
episode = data['result']['episodes'][0]
return episode['episodeid']
else:
return None
def GetNextUnwatchedEpisode(self, show_id):
data = self.SendCommand(RPCString("VideoLibrary.GetEpisodes", {"tvshowid": int(show_id)}, filters=[FILTER_UNWATCHED], sort=SORT_EPISODE, fields=["playcount"], limits=(0, 1)))
if 'episodes' in data['result']:
episode = data['result']['episodes'][0]
return episode['episodeid']
else:
return None
def GetLastWatchedShow(self):
return self.SendCommand(RPCString("VideoLibrary.GetEpisodes", sort=SORT_LASTPLAYED, filters=[FILTER_WATCHED, {"field": "lastplayed", "operator": "isnot", "value": "0"}], fields=["tvshowid", "showtitle"], limits=(0, 1)))
def GetSpecificEpisode(self, show_id, season, episode):
data = self.SendCommand(RPCString("VideoLibrary.GetEpisodes", {"tvshowid": int(show_id), "season": int(season)}, fields=["season", "episode"]))
if 'episodes' in data['result']:
correct_id = None
for episode_data in data['result']['episodes']:
if int(episode_data['episode']) == int(episode):
correct_id = episode_data['episodeid']
break
return correct_id
else:
return None
def GetEpisodesFromShowDetails(self, show_id):
return self.SendCommand(RPCString("VideoLibrary.GetEpisodes", {"tvshowid": int(show_id)}, fields=["season", "episode"]))
# Returns a list of dictionaries with information about unwatched movies. Useful for
# telling/showing users what's ready to be watched. Setting max to very high values
# can take a long time.
def GetUnwatchedMovies(self, sort=SORT_DATEADDED, limits=None):
if not limits:
limits = (0, self.max_unwatched_movies)
data = self.SendCommand(RPCString("VideoLibrary.GetMovies", sort=sort, filters=[FILTER_UNWATCHED], fields=["title", "playcount", "dateadded"], limits=limits))
answer = []
if 'movies' in data['result']:
for d in data['result']['movies']:
answer.append({'title': d['title'], 'movieid': d['movieid'], 'label': d['label'], 'dateadded': datetime.datetime.strptime(d['dateadded'], "%Y-%m-%d %H:%M:%S")})
return answer
# Returns a list of dictionaries with information about unwatched movies in a particular genre. Useful for
# telling/showing users what's ready to be watched. Setting max to very high values
# can take a long time.
def GetUnwatchedMoviesByGenre(self, genre, sort=SORT_DATEADDED, limits=None):
if not limits:
limits = (0, self.max_unwatched_movies)
data = self.SendCommand(RPCString("VideoLibrary.GetMovies", sort=sort, filters=[FILTER_UNWATCHED, {"field": "genre", "operator": "contains", "value": genre}], fields=["title", "playcount", "dateadded"], limits=limits))
answer = []
if 'movies' in data['result']:
for d in data['result']['movies']:
answer.append({'title': d['title'], 'movieid': d['movieid'], 'label': d['label'], 'dateadded': datetime.datetime.strptime(d['dateadded'], "%Y-%m-%d %H:%M:%S")})
return answer
# Returns a list of dictionaries with information about unwatched shows. Useful for
# telling/showing users what's ready to be watched. Setting max to very high values
# can take a long time.
def GetUnwatchedShows(self, sort=SORT_DATEADDED, limits=None):
if not limits:
limits = (0, self.max_unwatched_shows)
data = self.SendCommand(RPCString("VideoLibrary.GetTVShows", sort=sort, filters=[FILTER_UNWATCHED], fields=["title", "playcount", "dateadded"], limits=limits))
answer = []
if 'tvshows' in data['result']:
for d in data['result']['tvshows']:
answer.append({'title': d['title'], 'tvshowid': d['tvshowid'], 'label': d['label'], 'dateadded': datetime.datetime.strptime(d['dateadded'], "%Y-%m-%d %H:%M:%S")})
return answer
# Returns a list of dictionaries with information about unwatched shows in a particular genre. Useful for
# telling/showing users what's ready to be watched. Setting max to very high values
# can take a long time.
def GetUnwatchedShowsByGenre(self, genre, sort=SORT_DATEADDED, limits=None):
if not limits:
limits = (0, self.max_unwatched_shows)
data = self.SendCommand(RPCString("VideoLibrary.GetTVShows", sort=sort, filters=[FILTER_UNWATCHED, {"field": "genre", "operator": "contains", "value": genre}], fields=["title", "playcount", "dateadded"], limits=limits))
answer = []
if 'tvshows' in data['result']:
for d in data['result']['tvshows']:
answer.append({'title': d['title'], 'tvshowid': d['tvshowid'], 'label': d['label'], 'dateadded': datetime.datetime.strptime(d['dateadded'], "%Y-%m-%d %H:%M:%S")})
return answer
# Returns a list of dictionaries with information about episodes that have been watched.
def GetWatchedEpisodes(self, sort=None, limits=None):
return self.SendCommand(RPCString("VideoLibrary.GetEpisodes", sort=sort, filters=[FILTER_WATCHED], fields=["playcount", "showtitle", "season", "episode", "lastplayed"], limits=limits))
# Returns a list of dictionaries with information about unwatched episodes. Useful for
# telling/showing users what's ready to be watched. Setting max to very high values
# can take a long time.
def GetUnwatchedEpisodes(self, sort=SORT_DATEADDED, limits=None):
if not limits:
limits = (0, self.max_unwatched_shows)
data = self.SendCommand(RPCString("VideoLibrary.GetEpisodes", sort=sort, filters=[FILTER_UNWATCHED], fields=["title", "playcount", "showtitle", "tvshowid", "dateadded"], limits=limits))
answer = []
if 'episodes' in data['result']:
shows = set([d['tvshowid'] for d in data['result']['episodes']])
show_info = {}
for show in shows:
show_info[show] = self.GetShowDetails(show_id=show)
for d in data['result']['episodes']:
showinfo = show_info[d['tvshowid']]
answer.append({'title': d['title'], 'episodeid': d['episodeid'], 'show': d['showtitle'], 'label': d['label'], 'dateadded': datetime.datetime.strptime(d['dateadded'], "%Y-%m-%d %H:%M:%S")})
return answer
def GetUnwatchedEpisodesFromShow(self, show_id, limits=None):
data = self.SendCommand(RPCString("VideoLibrary.GetEpisodes", {"tvshowid": int(show_id)}, filters=[FILTER_UNWATCHED], fields=["title", "playcount", "showtitle", "tvshowid", "dateadded"], limits=limits))
answer = []
if 'episodes' in data['result']:
for d in data['result']['episodes']:
answer.append({'title': d['title'], 'episodeid': d['episodeid'], 'show': d['showtitle'], 'label': d['label'], 'dateadded': datetime.datetime.strptime(d['dateadded'], "%Y-%m-%d %H:%M:%S")})
return answer
# System commands
def ApplicationQuit(self):
return self.SendCommand(RPCString("Application.Quit"))
def SystemHibernate(self):
return self.SendCommand(RPCString("System.Hibernate"))
def SystemReboot(self):
return self.SendCommand(RPCString("System.Reboot"))
def SystemShutdown(self):
return self.SendCommand(RPCString("System.Shutdown"))
def SystemSuspend(self):
return self.SendCommand(RPCString("System.Suspend"))
def SystemEjectMedia(self):
return self.SendCommand(RPCString("System.EjectOpticalDrive"))
# Misc helpers
# Prepare file url for streaming
def PrepareDownload(self, path=""):
path = urllib.quote(path.encode('utf-8')).decode('utf-8')
# Join the environment variables into a url
url = "%s://%s:%s@%s:%s/%s/vfs" % (self.scheme, self.username, self.password, self.address, self.port, self.subpath)
# Remove any double slashes in the url
url = http_normalize_slashes(url)
url = url + '/' + path
accepted_answers = ['y', 'yes', 'Y', 'Yes', 'YES', 'true', 'True']
if self.config.get(self.dev_cfg_section, 'use_proxy') in accepted_answers:
stream_url = 'https://proxy.lexigr.am/proxy?file=' + url
elif self.config.get(self.dev_cfg_section, 'alt_proxy'):
stream_url = self.config.get(self.dev_cfg_section, 'alt_proxy') + url
else:
stream_url = url
return stream_url
# Get the first active player.
def GetPlayerID(self, playertype=['picture', 'audio', 'video']):
data = self.SendCommand(RPCString("Player.GetActivePlayers"))
result = data.get("result", [])
if result:
for curitem in result:
if curitem.get("type") in playertype:
return curitem.get("playerid")
return None
# Get the first active Video player.
def GetVideoPlayerID(self, playertype=['video']):
data = self.SendCommand(RPCString("Player.GetActivePlayers"))
result = data.get("result", [])
if result:
for curitem in result:
if curitem.get("type") in playertype:
return curitem.get("playerid")
return None
# Get the first active Audio player.
def GetAudioPlayerID(self, playertype=['audio']):
data = self.SendCommand(RPCString("Player.GetActivePlayers"))
result = data.get("result", [])
if result:
for curitem in result:
if curitem.get("type") in playertype:
return curitem.get("playerid")
return None
# Get the first active Picture player.
def GetPicturePlayerID(self, playertype=['picture']):
data = self.SendCommand(RPCString("Player.GetActivePlayers"))
result = data.get("result", [])
if result:
for curitem in result:
if curitem.get("type") in playertype:
return curitem.get("playerid")
return None
# Information about the video or audio that's currently playing
def GetActivePlayItem(self):
playerid = self.GetPlayerID()
if playerid is not None:
data = self.SendCommand(RPCString("Player.GetItem", {"playerid": playerid}, fields=["title", "album", "artist", "season", "episode", "showtitle", "tvshowid", "description"]))
return data['result']['item']
def GetActivePlayProperties(self):
playerid = self.GetPlayerID()
if playerid is not None:
data = self.SendCommand(RPCString("Player.GetProperties", {"playerid": playerid}, fields=["currentaudiostream", "currentsubtitle", "canshuffle", "shuffled", "canrepeat", "repeat", "canzoom", "canrotate", "canmove"]))
return data['result']
# Returns current subtitles as a speakable string
def GetCurrentSubtitles(self):
subs = ""
country_dic = getisocodes_dict()
curprops = self.GetActivePlayProperties()
if curprops is not None:
try:
# gets 3 character country code e.g. fre
lang = curprops['currentsubtitle']['language']
# looks up 3 character code in the dictionary e.g. fre|fra|fr|French|francais
subslang = country_dic[lang]
# matches 3 character code with the lang name
subs = subslang[self.language]
# joins full language name with the name of the subtitle file e.g. French External
name = curprops['currentsubtitle']['name']
if name:
subs += " " + name
except:
pass
return subs
# Returns current audio stream as a speakable string
def GetCurrentAudioStream(self):
stream = ""
country_dic = getisocodes_dict()
curprops = self.GetActivePlayProperties()
if curprops is not None:
try:
# gets 3 character country code e.g. fre
lang = curprops['currentaudiostream']['language']
# looks up 3 character code in the dictionary e.g. fre|fra|fr|French|francais
streamlang = country_dic[lang]
# matches 3 character code with the lang name
stream = streamlang[self.language]
# joins full language name with the name of the subtitle file e.g. French External
name = curprops['currentaudiostream']['name']
if name:
stream += " " + name
except:
pass
return stream
# Returns information useful for building a progress bar to show an item's play time
def GetPlayerStatus(self):
playerid = self.GetVideoPlayerID()
if playerid is None:
playerid = self.GetAudioPlayerID()
if playerid is not None:
data = self.SendCommand(RPCString("Player.GetProperties", {"playerid": playerid}, fields=["percentage", "speed", "time", "totaltime"]))
if 'result' in data:
hours_total = data['result']['totaltime']['hours']
hours_cur = data['result']['time']['hours']
mins_total = hours_total * 60 + data['result']['totaltime']['minutes']
mins_cur = hours_cur * 60 + data['result']['time']['minutes']
speed = data['result']['speed']
if hours_total > 0:
total = '%d:%02d:%02d' % (hours_total, data['result']['totaltime']['minutes'], data['result']['totaltime']['seconds'])
cur = '%d:%02d:%02d' % (data['result']['time']['hours'], data['result']['time']['minutes'], data['result']['time']['seconds'])
else:
total = '%02d:%02d' % (data['result']['totaltime']['minutes'], data['result']['totaltime']['seconds'])
cur = '%02d:%02d' % (data['result']['time']['minutes'], data['result']['time']['seconds'])
return {'state': 'play' if speed > 0 else 'pause', 'time': cur, 'time_hours': hours_cur, 'time_mins': mins_cur, 'totaltime': total, 'total_hours': hours_total, 'total_mins': mins_total, 'pct': data['result']['percentage']}
return {'state': 'stop'}
| {
"content_hash": "5475eef846415195954278e8d365e5c5",
"timestamp": "",
"source": "github",
"line_count": 1712,
"max_line_length": 230,
"avg_line_length": 40.78621495327103,
"alnum_prop": 0.6627760433076505,
"repo_name": "m0ngr31/kodi-voice",
"id": "c5e6f3e4cf5c2bdce7c6a1730679775145b7338b",
"size": "69930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kodi_voice/kodi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76893"
}
],
"symlink_target": ""
} |
from time import sleep
import time
import sys
import os
sys.path.insert(0, os.path.realpath(".."))
import swood
assert(os.path.realpath(swood.__file__) ==
os.path.realpath("../swood/__init__.py"))
def find_program(prog):
for path in os.environ["PATH"].split(os.pathsep):
prog_location = os.path.join(path.strip('"'), prog)
if os.path.isfile(prog_location):
return prog_location
elif os.name == "nt" and os.path.isfile(prog_location + ".exe"):
return prog_location + ".exe"
return None
def play_audio(clip):
import subprocess
if os.name == "nt":
if os.path.isfile("C:/Program Files (x86)/VideoLAN/VLC/vlc.exe"):
return subprocess.Popen(["C:/Program Files (x86)/VideoLAN/VLC/vlc.exe", clip, "vlc://quit"])
elif find_program("vlc.exe"):
return subprocess.Popen([find_program("vlc.exe"), clip, "vlc://quit"])
elif os.path.isfile("C:/Program Files (x86)/Windows Media Player/wmplayer.exe"):
return subprocess.Popen(["C:/Program Files (x86)/Windows Media Player/wmplayer.exe", clip, "/Play", "/Close"])
elif find_program("wmplayer.exe"):
return subprocess.Popen([find_program("wmplayer.exe"), clip, "/Play", "/Close"])
else:
raise FileNotFoundError("Can't find an audio player.")
running_player = None
def run(midi, *args, play=False, wait=False, soundfont=False):
global running_player
print("~~~~~~~~~~ Testing '{}' ~~~~~~~~~~".format(midi))
out = "outputs/" + midi + ".wav"
start = time.perf_counter()
swood.run_cmd(argv=["samples/test.swood" if soundfont else "samples/doot.wav",
"midis/" + midi + ".mid", out, "--no-pbar", *args])
print("Finished '{}' in {} seconds.".format(
midi, round(time.perf_counter() - start, 2)))
if play:
if not os.path.isfile(out):
return
if running_player:
running_player.wait()
os.remove(running_player.args[1])
running_player = play_audio(out)
if wait:
running_player.wait()
os.remove(out)
if sys.argv[1] == "playall":
run("dummy", play=True)
run("beethoven", play=True)
run("finalfantasy", play=True, soundfont=True)
#run("pitchbend", play=True, wait=True)
elif sys.argv[1] == "all":
run("dummy")
run("beethoven")
run("finalfantasy", soundfont=True)
# run("pitchbend")
elif sys.argv[1] == "bend":
run("pitchbend")
| {
"content_hash": "b9eaebfd44ca7538d474543ad16f4d53",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 122,
"avg_line_length": 35.885714285714286,
"alnum_prop": 0.5935509554140127,
"repo_name": "milkey-mouse/swood",
"id": "b1d68cdf704007a91d0f0da189b55c91d35a0634",
"size": "2512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "NSIS",
"bytes": "5748"
},
{
"name": "Python",
"bytes": "115815"
}
],
"symlink_target": ""
} |
import xml.etree.ElementTree as ET
data = '''
<person>
<name>Chuck</name>
<phone type="intl">
+1 734 303 4456
</phone>
<email hide="yes"/>
</person>'''
tree = ET.fromstring(data)
print 'Name:',tree.find('name').text
print 'Attr:',tree.find('email').get('hide')
| {
"content_hash": "04c86bce4b6c5885fa4cc5e94debd457",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 44,
"avg_line_length": 19.857142857142858,
"alnum_prop": 0.6223021582733813,
"repo_name": "johanfrisk/Python_at_web",
"id": "4fea58ebc2b7eda1a77935f262c2529a2bbad74b",
"size": "278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notebooks/code/xml1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "300"
},
{
"name": "CSS",
"bytes": "108"
},
{
"name": "Gherkin",
"bytes": "2062"
},
{
"name": "HTML",
"bytes": "8426"
},
{
"name": "JavaScript",
"bytes": "451061"
},
{
"name": "Jupyter Notebook",
"bytes": "54813"
},
{
"name": "Python",
"bytes": "291515"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User
from course.models import Course
class News(models.Model):
user = models.ForeignKey(User)
course = models.ForeignKey(Course)
text = models.TextField(blank=True, default='')
pub_date = models.DateTimeField(auto_now_add=True) | {
"content_hash": "4dd87116ad64ba9b01f8d86e25500ac1",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 54,
"avg_line_length": 28.636363636363637,
"alnum_prop": 0.7492063492063492,
"repo_name": "starkdee/courseware",
"id": "d8fc74fcc869b3453b14f6d33d3dbf4e1915acff",
"size": "315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "news/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3670"
},
{
"name": "HTML",
"bytes": "24011"
},
{
"name": "JavaScript",
"bytes": "3653"
},
{
"name": "Python",
"bytes": "61761"
},
{
"name": "Ruby",
"bytes": "977"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import collections
import logging
import os
import platform
import re
import subprocess
import types
import util
import json
from ebstall.versions import Version
from ebstall.util import normalize_string
logger = logging.getLogger(__name__)
CLI_DEFAULTS_DEFAULT = dict(
packager='source'
)
CLI_DEFAULTS_DEBIAN = dict(
packager='apt-get'
)
CLI_DEFAULTS_CENTOS = dict(
packager='yum'
)
CLI_DEFAULTS_DARWIN = dict(
packager='source'
)
FLAVORS = {
'debian': 'debian',
'ubuntu': 'debian',
'kubuntu': 'debian',
'kali': 'debian',
'centos': 'redhat',
'centos linux': 'redhat',
'fedora': 'redhat',
'red hat enterprise linux server': 'redhat',
'rhel': 'redhat',
'amazon': 'redhat',
'amzn': 'redhat',
'gentoo': 'gentoo',
'gentoo base system': 'gentoo',
'darwin': 'darwin',
'opensuse': 'suse',
'suse': 'suse',
}
CLI_DEFAULTS = {
"default": CLI_DEFAULTS_DEFAULT,
"debian": CLI_DEFAULTS_DEBIAN,
"ubuntu": CLI_DEFAULTS_DEBIAN,
"centos": CLI_DEFAULTS_CENTOS,
"centos linux": CLI_DEFAULTS_CENTOS,
"fedora": CLI_DEFAULTS_CENTOS,
"red hat enterprise linux server": CLI_DEFAULTS_CENTOS,
"rhel": CLI_DEFAULTS_CENTOS,
"amazon": CLI_DEFAULTS_CENTOS,
"amzn": CLI_DEFAULTS_CENTOS,
"gentoo": CLI_DEFAULTS_DEFAULT,
"gentoo base system": CLI_DEFAULTS_DEFAULT,
"darwin": CLI_DEFAULTS_DARWIN,
"opensuse": CLI_DEFAULTS_DEFAULT,
"suse": CLI_DEFAULTS_DEFAULT,
}
"""CLI defaults."""
# Start system
START_INITD = 'init.d'
START_SYSTEMD = 'systemd'
# Pkg manager
PKG_YUM = 'yum'
PKG_APT = 'apt-get'
FAMILY_REDHAT = 'redhat'
FAMILY_DEBIAN = 'debian'
# redhat / debian
YUMS = ['redhat', 'fedora', 'centos', 'rhel', 'amzn', 'amazon']
DEBS = ['debian', 'ubuntu', 'kali']
class OSInfo(object):
"""OS information, name, version, like - similarity"""
def __init__(self, name=None, version=None, version_major=None, like=None, family=None,
packager=None, start_system=None, has_os_release=False, fallback_detection=False, long_name=None,
*args, **kwargs):
self.name = name
self.long_name = long_name
self.version_major = version_major
self.version = version
self.like = like
self.family = family
self.packager = packager
self.start_system = start_system
self.has_os_release = has_os_release
self.fallback_detection = fallback_detection
def __str__(self):
return 'OSInfo(%r)' % json.dumps(self.to_json())
def __repr__(self):
return 'OSInfo(%r)' % json.dumps(self.to_json())
def to_json(self):
"""
Converts to the JSON
:return:
"""
js = collections.OrderedDict()
js['name'] = self.name
js['long_name'] = self.long_name
js['version_major'] = self.version_major
js['version'] = self.version
js['like'] = self.like
js['family'] = self.family
js['packager'] = self.packager
js['start_system'] = self.start_system
js['has_os_release'] = self.has_os_release
js['fallback_detection'] = self.fallback_detection
return js
class PackageInfo(object):
"""
Basic information about particular package
"""
def __init__(self, name, version, arch, repo, size=None, section=None):
self._version = None
self.name = name
self.version = version
self.arch = arch
self.repo = repo
self.size = size
self.section = section
@property
def version(self):
return self._version
@version.setter
def version(self, val):
self._version = Version(val)
def __str__(self):
return '%s-%s.%s' % (self.name, self.version, self.arch)
def __repr__(self):
return 'PackageInfo(name=%r, version=%r, arch=%r, repo=%r, size=%r, section=%r)' \
% (self.name, self.version, self.arch, self.repo, self.size, self.section)
def to_json(self):
"""
Converts to the JSON
:return:
"""
js = collections.OrderedDict()
js['name'] = self.name
js['version'] = str(self.version)
js['arch'] = self.arch
js['repo'] = self.repo
if self.size is not None:
js['size'] = self.size
if self.section is not None:
js['section'] = self.section
return js
@classmethod
def from_json(cls, js):
"""
Converts json dict to the object
:param js:
:return:
"""
obj = cls(name=js['name'], version=js['version'], arch=js['arch'], repo=js['repo'])
if 'size' in js:
obj.size = js['size']
if 'section' in js:
obj.section = js['section']
return obj
def get_os():
"""
Returns basic information about the OS.
:return: OSInfo
"""
# At first - parse os-release
ros = OSInfo()
os_release_path = '/etc/os-release'
if os.path.isfile(os_release_path):
ros.name = _get_systemd_os_release_var("ID", filepath=os_release_path)
ros.version = _get_systemd_os_release_var("VERSION_ID", filepath=os_release_path)
ros.like = _get_systemd_os_release_var("ID_LIKE", os_release_path).split(" ")
ros.long_name = _get_systemd_os_release_var("PRETTY_NAME", filepath=os_release_path)
ros.has_os_release = True
if not ros.long_name:
ros.long_name = _get_systemd_os_release_var("NAME", filepath=os_release_path)
# Try /etc/redhat-release and /etc/debian_version
if not ros.has_os_release or ros.like is None or ros.version is None or ros.name is None:
os_redhat_release(ros)
os_debian_version(ros)
os_issue(ros)
# like detection
os_like_detect(ros)
os_family_detect(ros)
# Major version
os_major_version(ros)
# Packager detection - yum / apt-get
os_packager(ros)
# Start system - init.d / systemd
os_start_system(ros)
return ros
def os_family_detect(ros):
"""
OS Family (redhat, debian, ...)
:param ros:
:return:
"""
if util.startswith(ros.like, YUMS):
ros.family = FAMILY_REDHAT
if util.startswith(ros.like, DEBS):
ros.family = FAMILY_DEBIAN
if ros.family is not None:
if sum([1 for x in YUMS if ros.name.lower().startswith(x)]) > 0:
ros.family = FAMILY_REDHAT
if sum([1 for x in DEBS if ros.name.lower().startswith(x)]) > 0:
ros.family = FAMILY_DEBIAN
return
def os_packager(ros):
if ros.like is not None:
if util.startswith(ros.like, YUMS):
ros.packager = PKG_YUM
if util.startswith(ros.like, DEBS):
ros.packager = PKG_APT
return ros
if ros.name is not None:
if sum([1 for x in YUMS if ros.name.lower().startswith(x)]) > 0:
ros.packager = PKG_YUM
if sum([1 for x in DEBS if ros.name.lower().startswith(x)]) > 0:
ros.packager = PKG_APT
return
if os.path.exists('/etc/yum'):
ros.packager = PKG_YUM
if os.path.exists('/etc/apt/sources.list'):
ros.packager = PKG_APT
def os_start_system(ros):
if os.path.exists('/etc/systemd'):
ros.start_system = START_SYSTEMD
else:
ros.start_system = START_INITD
return ros
def os_issue(ros):
if os.path.exists('/etc/issue'):
with open('/etc/issue', 'r') as fh:
issue = fh.readline().strip()
issue = re.sub(r'\\[a-z]', '', issue).strip()
match1 = re.match(r'^(.+?)\s+release\s+(.+?)$', issue, re.IGNORECASE)
match2 = re.match(r'^(.+?)\s+([0-9.]+)\s*(LTS)?$', issue, re.IGNORECASE)
if match1:
ros.long_name = match1.group(1).strip()
ros.version = match1.group(2).strip()
elif match2:
ros.long_name = match2.group(1).strip()
ros.version = match2.group(2).strip()
else:
ros.long_name = issue
return ros
def os_debian_version(ros):
if os.path.exists('/etc/debian_version'):
with open('/etc/debian_version', 'r') as fh:
debver = fh.readline().strip()
ros.like = 'debian'
ros.family = FAMILY_DEBIAN
if ros.version is None:
ros.version = debver.strip()
return ros
def os_redhat_release(ros):
if os.path.exists('/etc/redhat-release'):
with open('/etc/redhat-release', 'r') as fh:
redhatrel = fh.readline().strip()
ros.like = 'redhat'
ros.family = FAMILY_REDHAT
match = re.match(r'^(.+?)\s+release\s+(.+?)$', redhatrel, re.IGNORECASE)
if match is not None:
ros.long_name = match.group(1).strip()
ros.version = match.group(2).strip()
else:
ros.long_name = redhatrel
return ros
def os_like_detect(ros):
if not ros.like and ros.name is not None:
try:
ros.like = FLAVORS[ros.name.lower()]
except:
pass
if not ros.like and ros.long_name is not None:
try:
ros.like = FLAVORS[ros.long_name.lower()]
except:
pass
return ros
def os_major_version(ros):
if ros.version is not None:
match = re.match(r'(.+?)[/.]', ros.version)
if match:
ros.version_major = match.group(1)
return ros
def get_os_info(filepath="/etc/os-release"):
"""
Get OS name and version
:param str filepath: File path of os-release file
:returns: (os_name, os_version)
:rtype: `tuple` of `str`
"""
if os.path.isfile(filepath):
# Systemd os-release parsing might be viable
os_name, os_version = get_systemd_os_info(filepath=filepath)
if os_name:
return (os_name, os_version)
# Fallback to platform module
return get_python_os_info()
def get_os_info_ua(filepath="/etc/os-release"):
"""
Get OS name and version string for User Agent
:param str filepath: File path of os-release file
:returns: os_ua
:rtype: `str`
"""
if os.path.isfile(filepath):
os_ua = _get_systemd_os_release_var("PRETTY_NAME", filepath=filepath)
if not os_ua:
os_ua = _get_systemd_os_release_var("NAME", filepath=filepath)
if os_ua:
return os_ua
# Fallback
return " ".join(get_python_os_info())
def get_systemd_os_info(filepath="/etc/os-release"):
"""
Parse systemd /etc/os-release for distribution information
:param str filepath: File path of os-release file
:returns: (os_name, os_version)
:rtype: `tuple` of `str`
"""
os_name = _get_systemd_os_release_var("ID", filepath=filepath)
os_version = _get_systemd_os_release_var("VERSION_ID", filepath=filepath)
return (os_name, os_version)
def get_systemd_os_like(filepath="/etc/os-release"):
"""
Get a list of strings that indicate the distribution likeness to
other distributions.
:param str filepath: File path of os-release file
:returns: List of distribution acronyms
:rtype: `list` of `str`
"""
return _get_systemd_os_release_var("ID_LIKE", filepath).split(" ")
def _get_systemd_os_release_var(varname, filepath="/etc/os-release"):
"""
Get single value from systemd /etc/os-release
:param str varname: Name of variable to fetch
:param str filepath: File path of os-release file
:returns: requested value
:rtype: `str`
"""
var_string = varname+"="
if not os.path.isfile(filepath):
return ""
with open(filepath, 'r') as fh:
contents = fh.readlines()
for line in contents:
if line.strip().startswith(var_string):
# Return the value of var, normalized
return normalize_string(line.strip()[len(var_string):])
return ""
def get_python_os_info():
"""
Get Operating System type/distribution and major version
using python platform module
:returns: (os_name, os_version)
:rtype: `tuple` of `str`
"""
info = platform.system_alias(
platform.system(),
platform.release(),
platform.version()
)
os_type, os_ver, _ = info
os_type = os_type.lower()
if os_type.startswith('linux'):
info = platform.linux_distribution()
# On arch, platform.linux_distribution() is reportedly ('','',''),
# so handle it defensively
if info[0]:
os_type = info[0]
if info[1]:
os_ver = info[1]
elif os_type.startswith('darwin'):
os_ver = subprocess.Popen(
["sw_vers", "-productVersion"],
stdout=subprocess.PIPE
).communicate()[0].rstrip('\n')
elif os_type.startswith('freebsd'):
# eg "9.3-RC3-p1"
os_ver = os_ver.partition("-")[0]
os_ver = os_ver.partition(".")[0]
elif platform.win32_ver()[1]:
os_ver = platform.win32_ver()[1]
else:
# Cases known to fall here: Cygwin python
os_ver = ''
return os_type, os_ver
def os_like(key):
"""
Tries to transform OS ID to LIKE_ID
:param key:
:return: string or None
"""
try:
return FLAVORS[key.lower()]
except KeyError:
return None
def os_constant(key):
"""
Get a constant value for operating system
:param key: name of cli constant
:return: value of constant for active os
"""
os_info = get_os_info()
try:
constants = CLI_DEFAULTS[os_info[0].lower()]
except KeyError:
constants = os_like_constants()
if not constants:
constants = CLI_DEFAULTS["default"]
return constants[key]
def os_like_constants():
"""
Try to get constants for distribution with
similar layout and configuration, indicated by
/etc/os-release variable "LIKE"
:returns: Constants dictionary
:rtype: `dict`
"""
os_like = get_systemd_os_like()
if os_like:
for os_name in os_like:
if os_name in CLI_DEFAULTS.keys():
return CLI_DEFAULTS[os_name]
return {}
def get_yum_packages(out):
"""
List of all packages parsing
:param out:
:return:
"""
ret = []
lines = out if isinstance(out, types.ListType) else out.split('\n')
for line in lines:
line = line.strip()
match = re.match(r'^([a-zA-Z0-9.\-_]+)[\s\t]+([a-zA-Z0-9.:\-_]+)[\s\t]+([@a-zA-Z0-9.\-_]+)$', line)
if match is None:
continue
package = match.group(1).strip()
version = match.group(2).strip()
repo = match.group(3).strip()
arch = None
# Architecture extract
match_arch = re.match(r'^(.+?)\.([^.]+)$', package)
if match_arch:
package = match_arch.group(1).strip()
arch = match_arch.group(2).strip()
pkg = PackageInfo(name=package, version=version, arch=arch, repo=repo)
ret.append(pkg)
return ret
def get_yum_packages_update(out):
"""
List of packages to update parsing
:param out:
:return:
"""
ret = []
eqline = 0
cur_section = None
lines = out if isinstance(out, types.ListType) else out.split('\n')
for line in lines:
line = line.strip()
if line.startswith('====='):
eqline += 1
continue
# Process lines only after 2nd ====== line - should be the package list.
if eqline != 2:
continue
lmatch = re.match(r'^([a-zA-Z\s]+):$', line)
if lmatch is not None:
cur_section = lmatch.group(1)
continue
match = re.match(r'^([a-zA-Z0-9.\-_]+)[\s\t]+([a-zA-Z0-9.\-_]+)[\s\t]+([a-zA-Z0-9.:\-_]+)'
r'[\s\t]+([@a-zA-Z0-9.:\-_]+)[\s\t]+([a-zA-Z0-9.\-_\s]+?)$', line)
if match is None:
continue
package = match.group(1).strip()
version = match.group(3).strip()
repo = match.group(4).strip()
arch = match.group(2).strip()
size = match.group(5).strip()
pkg = PackageInfo(name=package, version=version, arch=arch, repo=repo, size=size, section=cur_section)
ret.append(pkg)
return ret
def check_package_restrictions(yum_output_packages, allowed_packages):
"""
Checks list of the yum output pakcages vs. allowed packages
:param yum_output_packages:
:param check_packages:
:return: (conflicting packages, new packages)
"""
new_packages = []
conflicting_packages = []
for out_package in yum_output_packages:
allowed_list = [x for x in allowed_packages if x.name == out_package.name]
if len(allowed_list) == 0:
new_packages.append(out_package)
continue
# Sort packages based on the version, highest first.
if len(allowed_list) > 1:
allowed_list.sort(key=lambda x: x.version, reverse=True)
allowed = allowed_list[0]
if out_package.version > allowed.version:
conflicting_packages.append(out_package)
return conflicting_packages, new_packages
def package_diff(a, b, only_in_b=False):
"""
Package diff a - b
package x \in a is removed from a if the same package (or higher version) is in b.
If there are more packages in b, the one with higher version is taken
Used for removing already installed packages (b) from the packages to install (a).
:param a:
:param b:
:param only_in_b: if True the element in a has to be in the b in the lower version.
:return:
"""
res = []
for pkg in a:
b_filtered = [x for x in b if x.name == pkg.name and x.arch == pkg.arch]
# New package, not in b
if len(b_filtered) == 0:
if not only_in_b:
res.append(pkg)
continue
# Sort packages based on the version, highest first.
if len(b_filtered) > 1:
b_filtered.sort(key=lambda x: x.version, reverse=True)
# b contains smaller version of the package, add to the result
if b_filtered[0].version < pkg.version:
res.append(pkg)
return res
| {
"content_hash": "25f343afab5c8e8efdfd30b635233cc7",
"timestamp": "",
"source": "github",
"line_count": 662,
"max_line_length": 114,
"avg_line_length": 27.71752265861027,
"alnum_prop": 0.5760531909095864,
"repo_name": "EnigmaBridge/ebstall.py",
"id": "5ceac0030f36df09f41ec05f7d5af93d85e49034",
"size": "18396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ebstall/osutil.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "277"
},
{
"name": "Python",
"bytes": "603787"
},
{
"name": "Shell",
"bytes": "30275"
}
],
"symlink_target": ""
} |
__all__ = ['constants','ising','models','phi','core','utils','plotting']
from .core import *
__author__ = "Alan R. Lowe"
__email__ = "a.lowe@ucl.ac.uk"
__version__ = constants.VERSION
| {
"content_hash": "6b847b12b8bbdd2b44caca2f873f6c7d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 72,
"avg_line_length": 26.571428571428573,
"alnum_prop": 0.6021505376344086,
"repo_name": "quantumjot/PyFolding",
"id": "d5728088c698b82ce48372822b9452d821bc2b9b",
"size": "186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyfolding/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "129818"
}
],
"symlink_target": ""
} |
from flask import Blueprint
info = Blueprint('alert_system', __name__, url_prefix='/alert')
@info.context_processor
def reuse_config():
# return dict(coordinator=current_user.coordinator)
return
@info.route('/', methods=['GET', 'POST'])
def index():
return | {
"content_hash": "a1b835a6b013c210f699d00ed3332365",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 63,
"avg_line_length": 22.583333333333332,
"alnum_prop": 0.6863468634686347,
"repo_name": "leitelm/RISE_scada",
"id": "3651a709a355b95cbb82aa0776775fb4ce6b6090",
"size": "286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/alert_system/controller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "435649"
},
{
"name": "HTML",
"bytes": "72668"
},
{
"name": "JavaScript",
"bytes": "127162"
},
{
"name": "Python",
"bytes": "4846"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'crawler.views.home', name='home'),
# url(r'^crawler/', include('crawler.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^',include('spider.urls',namespace='spider')),
url(r'^account/',include('account.urls',namespace='account')),
)
urlpatterns += patterns((''),
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': '/home/gs/blog/static'}
),
(r'^comments/', include('django.contrib.comments.urls')),
) | {
"content_hash": "4a9996d5383473a6e702c03c1a557e7b",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 71,
"avg_line_length": 35.56,
"alnum_prop": 0.6546681664791901,
"repo_name": "seraph0017/max-x.net",
"id": "1127831d399b6b869b9a1ebf62fd0cf41049227f",
"size": "889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crawler/crawler/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "49946"
},
{
"name": "JavaScript",
"bytes": "95948"
},
{
"name": "Python",
"bytes": "55794"
},
{
"name": "Shell",
"bytes": "1081"
}
],
"symlink_target": ""
} |
import psycopg2
import networkx as nx
# Connect to the MusicBrainz database and load graph from disk
connection = psycopg2.connect(database="musicbrainz", user="musicbrainz", password="", host="musicbrainz", port="5432")
cursor = connection.cursor()
graph = nx.read_gexf("graph/sdn-unweighted.gexf")
# Prepare the database
cursor.execute("DROP TABLE IF EXISTS snoopdogg_number_bfs;")
cursor.execute("""
CREATE TABLE snoopdogg_number_bfs (
artist TEXT NOT NULL,
distance INTEGER NOT NULL,
path TEXT NOT NULL,
PRIMARY KEY(artist)
);
""")
# Initialize dictionary with the Snoop Dogg as the base case
# TODO: Create class for storing artists' SDN and path.
sdn = {"Snoop Dogg" : (0, ["Snoop Dogg"])}
# Traverse the graph breadth-first and compute every artist's Snoop Dogg Number in O(V + E)
for edge in nx.bfs_edges(graph, "Snoop Dogg"):
parent = edge[0]
child = edge[1]
dist_to_snoopdogg = sdn[parent][0] + 1
path_to_snoopdogg = sdn[parent][1] + [child]
sdn[child] = (dist_to_snoopdogg, path_to_snoopdogg)
# Insert the data via one long query - this is an order of magnitude faster than one query per row
data_string = ','.join(cursor.mogrify('(%s,%s,%s)', (artist, sdn[artist][0], sdn[artist][1])) for artist in sdn) # mogrify requires python2
cursor.execute('INSERT INTO snoopdogg_number_bfs VALUES ' + data_string)
# TODO: Run query that adds all the artists from "nodes" table that have no path to Snoop Dogg.
# Apply all changes to the database
connection.commit()
connection.close()
print("Done!")
| {
"content_hash": "38eea6ef26fa350e3eedebde0544c4ad",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 139,
"avg_line_length": 38.4390243902439,
"alnum_prop": 0.7017766497461929,
"repo_name": "basimr/snoop-dogg-number",
"id": "d6d4963b67d8067d78bbbe32d026ae2a20bc60f7",
"size": "2137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compute_snoopdogg_number_bfs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "577"
},
{
"name": "Python",
"bytes": "17183"
}
],
"symlink_target": ""
} |
from pyexcel_io.sheet import SheetReader
from pyexcel_io.plugins import NEW_READERS
from pyexcel_io._compact import OrderedDict
def clean_keywords(keywords):
sheet_keywords = {}
native_sheet_keywords = {}
args_list = [
"start_row",
"row_limit",
"start_column",
"column_limit",
"skip_column_func",
"skip_row_func",
"skip_empty_rows",
"row_renderer",
"keep_trailing_empty_cells",
]
for arg in keywords:
if arg in args_list:
sheet_keywords[arg] = keywords[arg]
else:
native_sheet_keywords[arg] = keywords[arg]
return sheet_keywords, native_sheet_keywords
class Reader(object):
def __init__(self, file_type, library=None):
self.file_type = file_type
self.library = library
self.keywords = None
# if you know which reader class to use, this attribute allows
# you to set reader class externally. Since there is no
# so call private field in Python, I am not going to create
# useless setter and getter functions like Java.
# in pyexcel, this attribute is mainly used for testing
self.reader_class = None
def open(self, file_name, **keywords):
if self.reader_class is None:
self.reader_class = NEW_READERS.get_a_plugin(
self.file_type, location="file", library=self.library
)
self.keywords, native_sheet_keywords = clean_keywords(keywords)
self.reader = self.reader_class(
file_name, self.file_type, **native_sheet_keywords
)
return self.reader
def open_content(self, file_content, **keywords):
self.keywords, native_sheet_keywords = clean_keywords(keywords)
if self.reader_class is None:
self.reader_class = NEW_READERS.get_a_plugin(
self.file_type, location="content", library=self.library
)
self.reader = self.reader_class(
file_content, self.file_type, **native_sheet_keywords
)
return self.reader
def open_stream(self, file_stream, **keywords):
self.keywords, native_sheet_keywords = clean_keywords(keywords)
if self.reader_class is None:
self.reader_class = NEW_READERS.get_a_plugin(
self.file_type, location="memory", library=self.library
)
self.reader = self.reader_class(
file_stream, self.file_type, **native_sheet_keywords
)
return self.reader
def read_sheet_by_name(self, sheet_name):
"""
read a named sheet from a excel data book
"""
sheet_names = self.reader.sheet_names()
index = sheet_names.index(sheet_name)
return self.read_sheet_by_index(index)
def read_sheet_by_index(self, sheet_index):
sheet_reader = self.reader.read_sheet(sheet_index)
sheet_names = self.reader.sheet_names()
sheet = EncapsulatedSheetReader(sheet_reader, **self.keywords)
return {sheet_names[sheet_index]: sheet.to_array()}
def read_all(self):
"""
read everything from a excel data book
"""
result = OrderedDict()
for sheet_index in range(len(self.reader)):
content_dict = self.read_sheet_by_index(sheet_index)
result.update(content_dict)
return result
def read_many(self, sheets):
"""
read everything from a excel data book
"""
result = OrderedDict()
for sheet in sheets:
if isinstance(sheet, int):
result.update(self.read_sheet_by_index(sheet))
else:
result.update(self.read_sheet_by_name(sheet))
return result
def close(self):
return self.reader.close()
def __enter__(self):
return self
def __exit__(self, a_type, value, traceback):
self.close()
class EncapsulatedSheetReader(SheetReader):
def row_iterator(self):
yield from self._native_sheet.row_iterator()
def column_iterator(self, row):
yield from self._native_sheet.column_iterator(row)
| {
"content_hash": "e23812fbaa92ca8fc392f9d45c4d6b9f",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 72,
"avg_line_length": 33.17460317460318,
"alnum_prop": 0.6035885167464115,
"repo_name": "chfw/pyexcel-io",
"id": "f0276fac849e5bd88a4323b0f19cb1b22ef8eb36",
"size": "4180",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyexcel_io/reader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "129"
},
{
"name": "Makefile",
"bytes": "145"
},
{
"name": "Python",
"bytes": "109782"
},
{
"name": "Shell",
"bytes": "152"
}
],
"symlink_target": ""
} |
"""
Fine-tuning XLNet for question answering with beam search using a slightly adapted version of the 🤗 Trainer.
"""
# You can also adapt this script on your own question answering task. Pointers for this are left as comments.
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
from datasets import load_dataset
import evaluate
import transformers
from trainer_qa import QuestionAnsweringTrainer
from transformers import (
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TrainingArguments,
XLNetConfig,
XLNetForQuestionAnswering,
XLNetTokenizerFast,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
from utils_qa import postprocess_qa_predictions_with_beam_search
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.25.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input test data file to test the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: int = field(
default=384,
metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when"
" batching to the maximum length in the batch (which can be faster on GPU but will be slower on TPU)."
)
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
},
)
version_2_with_negative: bool = field(
default=False, metadata={"help": "If true, some of the examples do not have an answer."}
)
null_score_diff_threshold: float = field(
default=0.0,
metadata={
"help": (
"The threshold used to select the null answer: if the best answer has a score that is less than "
"the score of the null answer minus this threshold, the null answer is selected for this example. "
"Only useful when `version_2_with_negative=True`."
)
},
)
doc_stride: int = field(
default=128,
metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."},
)
n_best_size: int = field(
default=20,
metadata={"help": "The total number of n-best predictions to generate when looking for an answer."},
)
max_answer_length: int = field(
default=30,
metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
},
)
def __post_init__(self):
if (
self.dataset_name is None
and self.train_file is None
and self.validation_file is None
and self.test_file is None
):
raise ValueError("Need either a dataset name or a training/validation/test file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if self.test_file is not None:
extension = self.test_file.split(".")[-1]
assert extension in ["csv", "json"], "`test_file` should be a csv or a json file."
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_qa_beam_search", model_args, data_args)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
raw_datasets = load_dataset(
extension,
data_files=data_files,
field="data",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = XLNetConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = XLNetTokenizerFast.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = XLNetForQuestionAnswering.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
if training_args.do_train:
column_names = raw_datasets["train"].column_names
elif training_args.do_eval:
column_names = raw_datasets["validation"].column_names
else:
column_names = raw_datasets["test"].column_names
question_column_name = "question" if "question" in column_names else column_names[0]
context_column_name = "context" if "context" in column_names else column_names[1]
answer_column_name = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
pad_on_right = tokenizer.padding_side == "right"
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
# Training preprocessing
def prepare_train_features(examples):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples[question_column_name if pad_on_right else context_column_name],
examples[context_column_name if pad_on_right else question_column_name],
truncation="only_second" if pad_on_right else "only_first",
max_length=max_seq_length,
stride=data_args.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
return_special_tokens_mask=True,
return_token_type_ids=True,
padding="max_length",
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# The offset mappings will give us a map from token to character position in the original context. This will
# help us compute the start_positions and end_positions.
offset_mapping = tokenized_examples.pop("offset_mapping")
# The special tokens will help us build the p_mask (which indicates the tokens that can't be in answers).
special_tokens = tokenized_examples.pop("special_tokens_mask")
# Let's label those examples!
tokenized_examples["start_positions"] = []
tokenized_examples["end_positions"] = []
tokenized_examples["is_impossible"] = []
tokenized_examples["cls_index"] = []
tokenized_examples["p_mask"] = []
for i, offsets in enumerate(offset_mapping):
# We will label impossible answers with the index of the CLS token.
input_ids = tokenized_examples["input_ids"][i]
cls_index = input_ids.index(tokenizer.cls_token_id)
tokenized_examples["cls_index"].append(cls_index)
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples["token_type_ids"][i]
for k, s in enumerate(special_tokens[i]):
if s:
sequence_ids[k] = 3
context_idx = 1 if pad_on_right else 0
# Build the p_mask: non special tokens and context gets 0.0, the others get 1.0.
# The cls token gets 1.0 too (for predictions of empty answers).
tokenized_examples["p_mask"].append(
[
0.0 if (not special_tokens[i][k] and s == context_idx) or k == cls_index else 1.0
for k, s in enumerate(sequence_ids)
]
)
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
answers = examples[answer_column_name][sample_index]
# If no answers are given, set the cls_index as answer.
if len(answers["answer_start"]) == 0:
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
tokenized_examples["is_impossible"].append(1.0)
else:
# Start/end character index of the answer in the text.
start_char = answers["answer_start"][0]
end_char = start_char + len(answers["text"][0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != context_idx:
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != context_idx:
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char):
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
tokenized_examples["is_impossible"].append(1.0)
else:
# Otherwise move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char:
token_start_index += 1
tokenized_examples["start_positions"].append(token_start_index - 1)
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples["end_positions"].append(token_end_index + 1)
tokenized_examples["is_impossible"].append(0.0)
return tokenized_examples
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
# Select samples from Dataset, This will help to decrease processing time
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
# Create Training Features
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
prepare_train_features,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
if data_args.max_train_samples is not None:
# Select samples from dataset again since Feature Creation might increase number of features
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
# Validation preprocessing
def prepare_validation_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples[question_column_name if pad_on_right else context_column_name],
examples[context_column_name if pad_on_right else question_column_name],
truncation="only_second" if pad_on_right else "only_first",
max_length=max_seq_length,
stride=data_args.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
return_special_tokens_mask=True,
return_token_type_ids=True,
padding="max_length",
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# The special tokens will help us build the p_mask (which indicates the tokens that can't be in answers).
special_tokens = tokenized_examples.pop("special_tokens_mask")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
tokenized_examples["example_id"] = []
# We still provide the index of the CLS token and the p_mask to the model, but not the is_impossible label.
tokenized_examples["cls_index"] = []
tokenized_examples["p_mask"] = []
for i, input_ids in enumerate(tokenized_examples["input_ids"]):
# Find the CLS token in the input ids.
cls_index = input_ids.index(tokenizer.cls_token_id)
tokenized_examples["cls_index"].append(cls_index)
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples["token_type_ids"][i]
for k, s in enumerate(special_tokens[i]):
if s:
sequence_ids[k] = 3
context_idx = 1 if pad_on_right else 0
# Build the p_mask: non special tokens and context gets 0.0, the others 1.0.
tokenized_examples["p_mask"].append(
[
0.0 if (not special_tokens[i][k] and s == context_idx) or k == cls_index else 1.0
for k, s in enumerate(sequence_ids)
]
)
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
tokenized_examples["offset_mapping"][i] = [
(o if sequence_ids[k] == context_idx else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i])
]
return tokenized_examples
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_examples = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
# Selecting Eval Samples from Dataset
max_eval_samples = min(len(eval_examples), data_args.max_eval_samples)
eval_examples = eval_examples.select(range(max_eval_samples))
# Create Features from Eval Dataset
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
if data_args.max_eval_samples is not None:
# Selecting Samples from Dataset again since Feature Creation might increase samples size
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
if training_args.do_predict:
if "test" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset")
predict_examples = raw_datasets["test"]
if data_args.max_predict_samples is not None:
# We will select sample from whole data
predict_examples = predict_examples.select(range(data_args.max_predict_samples))
# Test Feature Creation
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
predict_dataset = predict_examples.map(
prepare_validation_features,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on prediction dataset",
)
if data_args.max_predict_samples is not None:
# During Feature creation dataset samples might increase, we will select required samples again
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
predict_dataset = predict_dataset.select(range(max_predict_samples))
# Data collator
# We have already padded to max length if the corresponding flag is True, otherwise we need to pad in the data
# collator.
data_collator = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None)
)
# Post-processing:
def post_processing_function(examples, features, predictions, stage="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
predictions, scores_diff_json = postprocess_qa_predictions_with_beam_search(
examples=examples,
features=features,
predictions=predictions,
version_2_with_negative=data_args.version_2_with_negative,
n_best_size=data_args.n_best_size,
max_answer_length=data_args.max_answer_length,
start_n_top=model.config.start_n_top,
end_n_top=model.config.end_n_top,
output_dir=training_args.output_dir,
log_level=log_level,
prefix=stage,
)
# Format the result to the format the metric expects.
if data_args.version_2_with_negative:
formatted_predictions = [
{"id": k, "prediction_text": v, "no_answer_probability": scores_diff_json[k]}
for k, v in predictions.items()
]
else:
formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad")
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids)
# Initialize our Trainer
trainer = QuestionAnsweringTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
eval_examples=eval_examples if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
post_process_function=post_processing_function,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***")
results = trainer.predict(predict_dataset, predict_examples)
metrics = results.metrics
max_predict_samples = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset)
)
metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset))
trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics)
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| {
"content_hash": "9e87eb9bc875f30dd29d0c7411d5d613",
"timestamp": "",
"source": "github",
"line_count": 699,
"max_line_length": 119,
"avg_line_length": 46.379113018598,
"alnum_prop": 0.632283537431753,
"repo_name": "huggingface/transformers",
"id": "0c5d69629364f290b44e9b54453edd8b3b697f9a",
"size": "33064",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/pytorch/question-answering/run_qa_beam_search.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6021"
},
{
"name": "C++",
"bytes": "12959"
},
{
"name": "Cuda",
"bytes": "175419"
},
{
"name": "Dockerfile",
"bytes": "18218"
},
{
"name": "Jsonnet",
"bytes": "937"
},
{
"name": "Makefile",
"bytes": "3430"
},
{
"name": "Python",
"bytes": "35742012"
},
{
"name": "Shell",
"bytes": "30374"
}
],
"symlink_target": ""
} |
import os
import tempfile
import sys
import codecs
RUN_SCRIPT = """
#!/bin/bash
{env}
python {file}
exit $?
"""
INIT_ENV = os.environ["__PY_T_SUBMIT_ENV"]
ROOT_DIR = os.environ["__PY_T_SUBMIT_DIRNAME"]
EXECUTOR = os.path.join(ROOT_DIR, 'torque_executor.py')
init_file = tempfile.NamedTemporaryFile(suffix=".sh", delete=False,)
with codecs.open(init_file.name, 'w', encoding='utf-8') as f:
f.write(RUN_SCRIPT.format(env=INIT_ENV, file=EXECUTOR))
sys.exit(os.system("bash " + init_file.name)) | {
"content_hash": "6646dd0224c91d557952e27b09fbc6ca",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 68,
"avg_line_length": 20.08,
"alnum_prop": 0.6892430278884463,
"repo_name": "jbzdak/torque-submitter",
"id": "48bbc6f64e0145d0c0e5861b18784b943e56ca03",
"size": "549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "torqsubmit/torque_wrapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19691"
}
],
"symlink_target": ""
} |
"""
This file contains all the functions to extract DOIs of citations from arXiv
papers.
"""
from libbmc.citations import bbl
from libbmc.repositories import arxiv
def get_plaintext_citations(arxiv_id):
"""
Get the citations of a given preprint, in plain text.
.. note::
Bulk download of sources from arXiv is not permitted by their API. \
You should have a look at http://arxiv.org/help/bulk_data_s3.
:param arxiv_id: The arXiv id (e.g. ``1401.2910`` or ``1401.2910v1``) in \
a canonical form.
:returns: A list of cleaned plaintext citations.
"""
plaintext_citations = []
# Get the list of bbl files for this preprint
bbl_files = arxiv.get_bbl(arxiv_id)
for bbl_file in bbl_files:
# Fetch the cited DOIs for each of the bbl files
plaintext_citations.extend(bbl.get_plaintext_citations(bbl_file))
return plaintext_citations
def get_cited_dois(arxiv_id):
"""
Get the DOIs of the papers cited in a .bbl file.
.. note::
Bulk download of sources from arXiv is not permitted by their API. \
You should have a look at http://arxiv.org/help/bulk_data_s3.
:param arxiv_id: The arXiv id (e.g. ``1401.2910`` or ``1401.2910v1``) in \
a canonical form.
:returns: A dict of cleaned plaintext citations and their associated DOI.
"""
dois = {}
# Get the list of bbl files for this preprint
bbl_files = arxiv.get_bbl(arxiv_id)
for bbl_file in bbl_files:
# Fetch the cited DOIs for each of the bbl files
dois.update(bbl.get_cited_dois(bbl_file))
return dois
| {
"content_hash": "c4b0ede58dd4c1e725f6fdb15a0b4ce9",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 78,
"avg_line_length": 32.88,
"alnum_prop": 0.6496350364963503,
"repo_name": "Phyks/libbmc",
"id": "1a86df696d30623fbc31ac8420473ec60dc701ad",
"size": "1644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libbmc/citations/repositories/arxiv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "72957"
}
],
"symlink_target": ""
} |
"""
fizzbuzz.py
Author: Daniel Wilson
Credit: Morgan M
Assignment:
Write a program that prints the numbers from 1 to 100. But for
multiples of three print “Fizz” instead of the number and for
the multiples of five print “Buzz”. For numbers which are multiples
of both three and five print “FizzBuzz”.
We will use a variation of this test in which the last number of
the series isn't necessarily 100, and the two numbers being tested
for multiples aren't necessarily three and five. For example, your
program should behave just like this:
How many numbers shall we print? 25
For multiples of what number shall we print 'Fizz'? 3
For multiples of what number shall we print 'Buzz'? 5
1
2
Fizz
4
Buzz
Fizz
7
8
Fizz
Buzz
11
Fizz
13
14
FizzBuzz
16
17
Fizz
19
Buzz
Fizz
22
23
Fizz
Buzz
"""
num = int(float(input("How many numbers shall we print? ")))
f = int(float(input("For multiples of what number shall we print 'Fizz'? ")))
b = int(float(input("For multiples of what number shall we print 'Buzz'? ")))
for x in range(1, num+1) :
if x%f==0 and x%b==0 :
print("FizzBuzz")
elif x%f==0 :
print ("Fizz")
elif x%b==0 :
print("Buzz")
else :
print(x)
| {
"content_hash": "3eb3b29ef587b04610e22d1a43b0695d",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 77,
"avg_line_length": 20.033333333333335,
"alnum_prop": 0.6980033277870217,
"repo_name": "danielwilson2017/fizzbuzz",
"id": "8505fc547a08c137b9b60327a077c3207de4932e",
"size": "1214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fizzbuzz.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1214"
},
{
"name": "Shell",
"bytes": "194"
}
],
"symlink_target": ""
} |
from collections import namedtuple
from oslo_log import log
import sqlalchemy as sa
from sqlalchemy.ext import baked
from gbpservice.neutron.db.grouppolicy.extensions import (
apic_auto_ptg_db as auto_ptg_db)
from gbpservice.neutron.db.grouppolicy.extensions import (
apic_segmentation_label_db as seg_label_db)
from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db as gpmdb
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import (
constants as md_const)
LOG = log.getLogger(__name__)
BAKERY = baked.bakery(_size_alert=lambda c: LOG.warning(
"sqlalchemy baked query cache size exceeded in %s", __name__))
EndpointPtInfo = namedtuple(
'EndpointPtInfo',
['pt_id',
'ptg_id',
'apg_id',
'inject_default_route',
'l3p_project_id',
'is_auto_ptg'])
class AIMMappingRPCMixin(object):
# The query_endpoint_rpc_info and update_endpoint_rpc_details
# methods below are called by the apic_aim mechanism driver while
# handling the request_endpoint_details (aka get_gbp_details) RPC
# from the agent.
def query_endpoint_rpc_info(self, session, info):
# This method is called within a transaction from the apic_aim
# MD's request_endpoint_details RPC handler to retrieve GBP
# state needed to build the RPC response, after the info param
# has already been populated with the data available within
# Neutron itself.
# Query for all needed scalar (non-list) state for the
# policies associated with the port, and make sure the port is
# owned by a policy target before continuing.
pt_infos = self._query_pt_info(
session, info['port_info'].port_id)
if not pt_infos:
return
# A list was returned by the PT info query, like all the other
# endpoint RPC queries, here and in the mechanism
# driver. Currently, there will be at most a single item in
# this list, but a join may later be added to this query in
# order to eliminate another query's round-trip to the DB
# server, resulting in multiple rows being returned. For now,
# we just need that single row.
pt_info = pt_infos[0]
info['gbp_pt_info'] = pt_info
# Query for policy target's segmentation labels.
info['gbp_segmentation_labels'] = self._query_segmentation_labels(
session, pt_info.pt_id)
def _query_pt_info(self, session, port_id):
query = BAKERY(lambda s: s.query(
gpmdb.PolicyTargetMapping.id,
gpmdb.PolicyTargetMapping.policy_target_group_id,
gpmdb.PolicyTargetGroupMapping.application_policy_group_id,
gpmdb.L2PolicyMapping.inject_default_route,
gpmdb.L3PolicyMapping.project_id,
auto_ptg_db.ApicAutoPtgDB.is_auto_ptg,
))
query += lambda q: q.join(
gpmdb.PolicyTargetGroupMapping,
gpmdb.PolicyTargetGroupMapping.id ==
gpmdb.PolicyTargetMapping.policy_target_group_id)
query += lambda q: q.join(
gpmdb.L2PolicyMapping,
gpmdb.L2PolicyMapping.id ==
gpmdb.PolicyTargetGroupMapping.l2_policy_id)
query += lambda q: q.join(
gpmdb.L3PolicyMapping,
gpmdb.L3PolicyMapping.id ==
gpmdb.L2PolicyMapping.l3_policy_id)
query += lambda q: q.outerjoin(
auto_ptg_db.ApicAutoPtgDB,
auto_ptg_db.ApicAutoPtgDB.policy_target_group_id ==
gpmdb.PolicyTargetMapping.policy_target_group_id)
query += lambda q: q.filter(
gpmdb.PolicyTargetMapping.port_id == sa.bindparam('port_id'))
return [EndpointPtInfo._make(row) for row in
query(session).params(
port_id=port_id)]
def _query_segmentation_labels(self, session, pt_id):
query = BAKERY(lambda s: s.query(
seg_label_db.ApicSegmentationLabelDB.segmentation_label))
query += lambda q: q.filter(
seg_label_db.ApicSegmentationLabelDB.policy_target_id ==
sa.bindparam('pt_id'))
return [x for x, in query(session).params(
pt_id=pt_id)]
def update_endpoint_rpc_details(self, info, details):
# This method is called outside a transaction from the
# apic_aim MD's request_endpoint_details RPC handler to add or
# update details within the RPC response, using data stored in
# info by query_endpoint_rpc_info.
# First, make sure the port is owned by a PolicyTarget before
# continuing.
pt_info = info.get('gbp_pt_info')
if not pt_info:
return
gbp_details = details['gbp_details']
# Replace EPG identity if not auto_ptg.
if not pt_info.is_auto_ptg:
gbp_details['app_profile_name'] = (
self.name_mapper.application_policy_group(
None, pt_info.apg_id) if pt_info.apg_id
else self.aim_mech_driver.ap_name)
gbp_details['endpoint_group_name'] = pt_info.ptg_id
gbp_details['ptg_tenant'] = (
self.name_mapper.project(None, pt_info.l3p_project_id))
# Update subnet gateway_ip and default_routes if needed.
if not pt_info.inject_default_route:
for subnet in gbp_details['subnets']:
del subnet['gateway_ip']
subnet['host_routes'] = [
r for r in subnet['host_routes']
if r['destination'] not in
[md_const.IPV4_ANY_CIDR, md_const.IPV4_METADATA_CIDR]]
# Add segmentation labels.
gbp_details['segmentation_labels'] = (
info.get('gbp_segmentation_labels'))
# REVISIT: If/when support for the proxy_group extension is
# added to the aim_mapping PD, update promiscuous_mode to True
# if this PT has a cluster_id that identifies a different PT
# whose group_default_gateway set.
| {
"content_hash": "703e2290d668ba426aa939758a29d6ad",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 78,
"avg_line_length": 41.641379310344824,
"alnum_prop": 0.6298443193110301,
"repo_name": "noironetworks/group-based-policy",
"id": "b50c0ea36502d16b941f87a08c1a112191320eb9",
"size": "6611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/aim_mapping_rpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1893"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "3947895"
},
{
"name": "Shell",
"bytes": "31729"
}
],
"symlink_target": ""
} |
from .class_aware_sampler import ClassAwareSampler
from .distributed_sampler import DistributedSampler
from .group_sampler import DistributedGroupSampler, GroupSampler
from .infinite_sampler import InfiniteBatchSampler, InfiniteGroupBatchSampler
__all__ = [
'DistributedSampler', 'DistributedGroupSampler', 'GroupSampler',
'InfiniteGroupBatchSampler', 'InfiniteBatchSampler', 'ClassAwareSampler'
]
| {
"content_hash": "a93b7332611f166c5da254bda7983e05",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 77,
"avg_line_length": 45.22222222222222,
"alnum_prop": 0.8230958230958231,
"repo_name": "open-mmlab/mmdetection",
"id": "a4c7ea135af652712e5a9f14a2002c516c44a16b",
"size": "455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mmdet/datasets/samplers/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2540"
},
{
"name": "Python",
"bytes": "4811377"
},
{
"name": "Shell",
"bytes": "47911"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from requests.auth import HTTPBasicAuth
import requests
from optparse import OptionParser
def subiterator(substats, master=False):
""" Second level of iterator, based on use or not of group in parameters"""
for subkey, subvalue in substats.iteritems():
for key, value in subvalue.iteritems():
if not key == "description":
if not master:
try:
print ("%s_%s:%s " % (
subkey, key, int(value)),
end=''
)
except:
print ("%s_%s:%s " % (subkey, key, value), end='')
else:
try:
print ("%s_%s_%s:%s " % (
master, subkey, key, int(value)),
end=''
)
except:
print ("%s_%s_%s:%s " %
(master, subkey, key, value),
end=''
)
def iterator(options, stats):
""" Iterate over the server response """
if options.group:
subiterator(stats[options.group])
else:
for master, subdict in stats.iteritems():
subiterator(subdict, master)
def connToDb(options):
""" Craft the db connection url based on options"""
if options.username and options.password:
r = requests.get(''.join(('http://', options.hostname,
':', options.port, '/_stats')),
auth=HTTPBasicAuth(options.username, options.password))
else:
r = requests.get(''.join(('http://', options.hostname,
':', options.port, '/_stats')))
# check if the response was valid
if r.status_code != 200:
raise Exception("connection failed. response: %s" % r.text)
return r
if __name__ == '__main__':
parser = OptionParser(usage="usage: %prog [-h] [-H HOSTNAME] [-P PORT]")
parser.add_option("-H", "--hostname",
dest="hostname",
action="store",
default="127.0.0.1",
help="default to localhost")
parser.add_option("-P", "--port",
dest="port",
action="store",
default="5984",
help="default port [5984]")
parser.add_option("-u", "--username",
dest="username",
action="store",
default=False,
help="Username [optional]")
parser.add_option("-p", "--password",
dest="password",
action="store",
default=False,
help="Password [optional]")
parser.add_option("-g", "--group",
dest="group",
action="store",
default=False,
help="group [couchd/httpd]")
(options, args) = parser.parse_args()
r = connToDb(options)
iterator(options, r.json())
| {
"content_hash": "6c1ac9874f32c9b6bb78311802d2cbab",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 79,
"avg_line_length": 36.22222222222222,
"alnum_prop": 0.4368098159509202,
"repo_name": "reiven/python-couchdb-stats",
"id": "edaec624b5f3977b4a9c2bab243bcd41d8b9d454",
"size": "3283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "couchdb_stats.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "3283"
}
],
"symlink_target": ""
} |
from .common import *
MEDIA_URL = "{{ scheme }}://{{ host_fqdn }}/media/"
STATIC_URL = "{{ scheme }}://{{ host_fqdn }}/static/"
ADMIN_MEDIA_PREFIX = "{{ scheme }}://{{ host_fqdn }}/static/admin/"
SITES["front"]["scheme"] = "{{ scheme }}"
SITES["front"]["domain"] = "{{ host_fqdn }}"
SECRET_KEY = "taiga_localdomain"
DEBUG = True
TEMPLATE_DEBUG = False
PUBLIC_REGISTER_ENABLED = True
DEFAULT_FROM_EMAIL = "no-reply@{{ host_fqdn }}"
SERVER_EMAIL = DEFAULT_FROM_EMAIL
# Uncomment and populate with proper connection parameters
# for enable email sending.
#EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
#EMAIL_USE_TLS = False
#EMAIL_HOST = "localhost"
#EMAIL_HOST_USER = ""
#EMAIL_HOST_PASSWORD = ""
#EMAIL_PORT = 25
# Uncomment and populate with proper connection parameters
# for enable github login/singin.
#GITHUB_API_CLIENT_ID = "yourgithubclientid"
#GITHUB_API_CLIENT_SECRET = "yourgithubclientsecret"
| {
"content_hash": "a8361a053721ea27f922dddfe4a223fd",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 67,
"avg_line_length": 30.866666666666667,
"alnum_prop": 0.6954643628509719,
"repo_name": "padajuan/taiga-deployer",
"id": "afe9968e698760e533136cc8882e6465ab083ae3",
"size": "926",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ansible/roles/taiga-backend/templates/local.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "926"
},
{
"name": "Ruby",
"bytes": "1199"
},
{
"name": "Shell",
"bytes": "230"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand, CommandError
from django.core.management import call_command
import datetime
class Command(BaseCommand):
help = 'Runs all build functions'
def add_arguments(self, parser):
parser.add_argument('-p', '--proc',
type=int,
action='store',
dest='proc',
default=1,
help='Number of processes to run')
parser.add_argument('-t', '--test',
action='store_true',
dest='test',
default=False,
help='Include only a subset of data for testing')
# parser.add_argument('--hommod',
# action='store_true',
# dest='hommod',
# default=False,
# help='Include build of homology models')
def handle(self, *args, **options):
if options['test']:
print('Running in test mode')
commands = [
['build_common'],
['build_human_proteins'],
['build_blast_database'],
['build_other_proteins', {'constructs_only': options['test'] ,'proc': options['proc']}], # build only constructs in test mode
['build_annotation', {'proc': options['proc']}],
['build_blast_database'],
['build_links'],
['build_construct_proteins'],
['build_structures', {'proc': options['proc']}],
['build_structure_angles'],
['build_distance_representative'],
['build_contact_representative'],
['build_construct_data'],
['update_construct_mutations'],
['build_ligands_from_cache', {'proc': options['proc'], 'test_run': options['test']}],
['build_ligand_assays', {'proc': options['proc'], 'test_run': options['test']}],
['build_mutant_data', {'proc': options['proc'], 'test_run': options['test']}],
['build_protein_sets'],
['build_consensus_sequences', {'proc': options['proc']}],
['build_g_proteins'],
['build_arrestins'],
['build_signprot_complex'],
['build_g_protein_structures'],
['build_drugs'],
['build_nhs'],
['build_mutational_landscape'],
['build_residue_sets'],
['build_dynamine_annotation', {'proc': options['proc']}],
['build_blast_database'],
['build_complex_interactions'],
# ['build_homology_models', ['--update', '-z'], {'proc': options['proc'], 'test_run': options['test']}],
['build_text'],
['build_release_notes'],
]
for c in commands:
print('{} Running {}'.format(
datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S'), c[0]))
if len(c) == 2:
call_command(c[0], **c[1])
elif len(c) == 3:
call_command(c[0], *c[1], **c[2])
else:
call_command(c[0])
print('{} Build completed'.format(datetime.datetime.strftime(
datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')))
| {
"content_hash": "b73d1fcdb55e78bfa21ec8af4b90dbe7",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 137,
"avg_line_length": 42.51898734177215,
"alnum_prop": 0.47692765704078594,
"repo_name": "cmunk/protwis",
"id": "4ca1c7ed1efec2e1a68a58337c2fef512ed3a503",
"size": "3359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/management/commands/build_all.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "139292"
},
{
"name": "HTML",
"bytes": "2035504"
},
{
"name": "JavaScript",
"bytes": "2234465"
},
{
"name": "Python",
"bytes": "3400624"
},
{
"name": "Shell",
"bytes": "386"
}
],
"symlink_target": ""
} |
"""Fichier contenant le paramètre 'liste' de la commande 'chantier'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmListe(Parametre):
"""Commande 'chantier liste'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "liste", "list")
self.aide_courte = "consulte vos navires"
self.aide_longue = \
"Cette commande vous permet de consulter la liste des navires " \
"que vous possédez, si ils se trouvent dans les eaux du chantier " \
"naval dans lequel vous vous trouvez. Si le navire sur " \
"lequel vous souhaitez effectuer une opération (changer son " \
"nom par exemple), il doit se trouver dans le chantier " \
"naval. Cette commande affiche la liste avec chaque navire " \
"numéroté. Ces numéros vous permettront d'effectuer d'autres " \
"actions dans le chantier naval et vous devrez le préciser " \
"lors des autres commandes."
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
salle = personnage.salle
chantier = importeur.navigation.get_chantier_naval(salle)
if chantier is None:
personnage << "|err|Vous ne vous trouvez pas dans un chantier " \
"naval.|ff|"
return
if salle.magasin is None:
personnage << "|err|Vous ne vous trouvez pas dans un chantier " \
"naval.|ff|"
return
magasin = salle.magasin
if magasin.vendeur is None:
personnage << "|err|Aucun vendeur n'est présent pour l'instant.|ff|"
return
navires = chantier.get_navires_possedes(personnage)
if navires:
en_tete = "+-" + "-" * 2 + "-+-" + "-" * 25 + "-+-" + "-" * 25 + \
"-+"
msg = en_tete + "\n"
msg += "| ID | " + "Type".ljust(25) + " | "
msg += "Nom".ljust(25) + " |\n" + en_tete
for i, navire in enumerate(navires):
msg += "\n| " + "{:>2} | ".format(i + 1)
msg += navire.nom.ljust(25) + " | "
if navire.nom_personnalise:
nom_personnalise = navire.nom_personnalise
else:
nom_personnalise = "Non précisé"
msg += nom_personnalise.ljust(25) + " |"
msg += "\n" + en_tete
personnage << msg
else:
personnage << "Vous n'avez aucun navire dans ce " \
"chantier naval."
| {
"content_hash": "fe4f31731334419681a1f311d93b7fe9",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 80,
"avg_line_length": 40.6,
"alnum_prop": 0.5316407730200834,
"repo_name": "vlegoff/tsunami",
"id": "7479061c719a18048d55133bbcdda1c37bc2ea38",
"size": "4221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/secondaires/navigation/commandes/chantier/liste.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7930908"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
"""
Withstand the test of time.
"""
from itertools import permutations
from enon import Enon, enon
def test_str():
assert str(enon) is ''
def test_int():
assert int(enon) == 0
def test_float():
assert float(enon) == 0.0
def test_iter():
count = 0
for entry in enon:
count += 1
assert count == 0
assert list(enon) == []
def test_len():
assert len(enon) == 0
def test_getitem():
assert enon['test'] is enon
assert enon[1] is enon
assert enon[-1:3:-2] is enon
def test_setitem():
enon[1] = 'test'
enon['test'] = 'test'
def test_get_set_attr():
assert enon.test is enon
enon.test = 'test'
assert enon.test is enon
def test_call():
args = (1, 'two', range(3))
kwargs = {'four': set([4])}
assert enon(*args, **kwargs) is enon
other_enon = Enon()
other_enon_result = other_enon(*args, **kwargs)
assert (other_enon is other_enon) and (other_enon is not enon)
def test_contains():
assert 'test' not in enon
assert 1 not in enon
def test_append():
enon.append(1)
def test_context_manager():
with enon as context:
assert context is enon
def test_open():
assert enon.open() is enon
with enon.open() as context:
assert context is enon
def test_close():
assert not enon.close()
def test_write():
enon.write('test')
def test_flush():
enon.flush()
def test_comps():
operable = (
set(),
-1, 0, 1,
-1.5, 0.0, 1.5,
'', 'test',
list(), tuple(),
)
for val in operable:
for left, right in permutations([val, enon], 2):
assert not (left == right)
assert not (left < right)
assert not (left <= right)
assert not (left > right)
assert not (left >= right)
def test_ops():
subable = (
set(), # Exclued from addable.
-1, 0, 1,
-1.5, 0.0, 1.5,
)
for val in subable:
assert (enon - val) == val
assert (val - enon) == val
addable = subable[1:] + (
'', 'test',
list(), tuple(),
)
for val in addable:
assert (enon + val) == val
assert (val + enon) == val
def test_usage_example():
enon = Enon()
with enon.open() as enon:
enon.append(enon['horse'])
enon_items = enon[1:5]
zero = len(enon_items)
enon.zero = zero
if enon.zero is zero:
raise Enon('Attribute assignment does nothing!')
elif enon.zero is enon:
# But this is correct.
enon.write(str(enon))
other_enon = Enon('Init ignores...', all='Arguments')
if enon is other_enon:
raise Enon('Not possible! They are not the same Enon!')
elif other_enon == enon:
same_enon = other_enon('Enon is callable.',
more='It always returns itself.')
assert other_enon is same_enon
| {
"content_hash": "9be7ae312898cfc254687d225e82ee4b",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 66,
"avg_line_length": 20.205479452054796,
"alnum_prop": 0.5454237288135593,
"repo_name": "ssangervasi/enon",
"id": "4642ffe38ca1bc85b1c083d0366d5adfcd99c494",
"size": "2950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_enon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6123"
}
],
"symlink_target": ""
} |
"""Tests for control_flow module."""
import collections
import numpy as np
from tensorflow.python.autograph.converters import break_statements
from tensorflow.python.autograph.converters import continue_statements
from tensorflow.python.autograph.converters import control_flow
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import test
from tensorflow.python.util import nest
for_unaffected_global = None
for_mixed_globals_nonglobals = None
for_test_global_local = None
class ControlFlowTestBase(converter_testing.TestCase):
def assertValuesEqual(self, actual, expected):
values = nest.map_structure(
lambda x: self.evaluate(x) if tensor_util.is_tf_type(x) else x,
actual)
self.assertAllEqual(values, expected)
def assertTransformedResult(self, f, inputs, expected):
if not isinstance(inputs, tuple):
inputs = (inputs,)
tr = self.transform(f, control_flow)
returns = tr(*inputs)
self.assertValuesEqual(returns, expected)
class NestedControlFlowTest(ControlFlowTestBase):
def test_basic(self):
def f(n):
i = 0
j = 0
s = 0
while i < n:
while j < i:
j += 3
u = i + j # 'u' is not defined within the inner loop
s += u
i += 1
j = 0
return s, i, j, n
self.assertTransformedResult(f, constant_op.constant(5),
(25, 5, 0, 5))
def test_mixed_globals_nonglobals(self):
def f(n):
global for_mixed_globals_nonglobals
i = 0
j = 0
for_mixed_globals_nonglobals = 0
while i < n:
while j < i:
j += 3
u = i + j # 'u' is not defined within the inner loop
for_mixed_globals_nonglobals += u
i += 1
j = 0
return for_mixed_globals_nonglobals, i, j, n
self.assertTransformedResult(f, constant_op.constant(5),
(25, 5, 0, 5))
def test_composite_state_complex(self):
class TestClassX(object):
def __init__(self, x):
self.x = x
class TestClassY(object):
def __init__(self, y):
self.y = y
def f(n):
tc = TestClassX(TestClassY({'z': TestClassX(n)}))
if n > 0:
while n > 0:
if n < 2:
tc.x.y['z'].x += 1
n -= 1
return n, tc
tr = self.transform(f, control_flow)
n, tc = tr(constant_op.constant(5))
self.assertValuesEqual((n, tc.x.y['z'].x), (0, 6))
class WhileStatementTest(ControlFlowTestBase):
def test_basic(self):
def f(n):
i = 0
s = 0
while i < n:
s += i
i += 1
return s, i, n
self.assertTransformedResult(f, constant_op.constant(5), (10, 5, 5))
def test_single_output(self):
def f(n):
while n > 0:
n -= 1
return n
self.assertTransformedResult(f, constant_op.constant(5), 0)
def test_composite_state_attr(self):
class TestClass(object):
def __init__(self):
self.x = constant_op.constant(3)
def f(n):
tc = TestClass()
while n > 0:
tc.x += 1
n -= 1
return n
self.assertTransformedResult(f, constant_op.constant(5), 0)
def test_composite_state_slice(self):
def f(n):
d = {'a': n}
k = 'a'
while n > 0:
d[k] += 1
n -= 1
return d[k], n
self.assertTransformedResult(f, constant_op.constant(5), (10, 0))
def test_composite_state_literal_slice(self):
def f(n):
d = {'a': n}
while n > 0:
d['a'] += 1
n -= 1
return d['a'], n
self.assertTransformedResult(f, constant_op.constant(5), (10, 0))
def test_composite_state_attr_initialized_in_loop(self):
class TestClass(object):
pass
def f(n, x):
tc = TestClass()
while n < 5:
if n == 0:
tc.subattr = x
else:
tc.subattr = tc.subattr + 1
n += 1
return tc.subattr
self.assertTransformedResult(f, (0, constant_op.constant(10)), 14)
tr = self.transform(f, control_flow)
with self.assertRaisesRegex(
ValueError, "'tc.subattr' must be defined before the loop"):
tr(constant_op.constant(0), 0)
def test_composite_state_slice_initialized_in_loop(self):
def f(n, x):
d = {}
k = 'subkey'
while n < 5:
if n == 0:
d[k] = x
else:
d[k] = d[k] + 1
n += 1
return d
self.assertTransformedResult(f, (0, constant_op.constant(10)),
{'subkey': 14})
tr = self.transform(f, control_flow)
with self.assertRaisesRegex(
ValueError, r"'d\[k\]' must be defined before the loop"):
tr(constant_op.constant(0), 0)
def test_composite_state_literal_slice_initialized_in_loop(self):
def f(n, x):
d = {}
while n < 5:
if n == 0:
d['subkey'] = x
else:
d['subkey'] = d['subkey'] + 1
n += 1
return d
self.assertTransformedResult(f, (0, constant_op.constant(10)),
{'subkey': 14})
tr = self.transform(f, control_flow)
with self.assertRaisesRegex(
ValueError, r"'d\['subkey'\]' must be defined before the loop"):
tr(constant_op.constant(0), 0)
def test_composite_state_slice_aliased_to_local(self):
def f(n, x):
d = {}
while n < 5:
k = 'subkey'
d[k] = x + 1
n += 1
return d
self.assertTransformedResult(f, (0, constant_op.constant(10)),
{'subkey': 11})
tr = self.transform(f, control_flow)
# TODO(b/136999953): Better error message.
# Note that this error happens at execution time.
with self.assertRaises(errors.InaccessibleTensorError):
graph_fn = def_function.function(tr, autograph=False)
self.evaluate(
graph_fn(constant_op.constant(0), constant_op.constant(5)))
def test_local_composite_attr(self):
class TestClass(object):
def __init__(self):
self.x = constant_op.constant(3)
def f(n):
while n > 0:
tc = TestClass()
tc.x = tc.x
n -= 1
return n
self.assertTransformedResult(f, constant_op.constant(5), 0)
def test_local_composite_slice(self):
def f(n):
while n > 0:
d = {'x': n}
k = 'x'
d[k] = d[k]
n -= 1
return n
self.assertTransformedResult(f, constant_op.constant(5), 0)
def test_local_composite_literal_slice(self):
def f(n):
while n > 0:
d = {'x': n}
d['x'] = d['x']
n -= 1
return n
self.assertTransformedResult(f, constant_op.constant(5), 0)
def test_non_tensor_state(self):
# This class is ok to be in a tf.while's state.
class TestClass(collections.namedtuple('TestClass', ('x'))):
pass
def f(n):
tc = TestClass([constant_op.constant(0)])
while n > 0:
tc = TestClass([constant_op.constant(3)])
tc.x[0] = tc.x[0] + 1
n -= 1
return tc.x[0]
self.assertTransformedResult(f, constant_op.constant(5), 4)
def test_non_tensor_state_illegal_type(self):
class TestClass(object):
def __init__(self):
self.x = [constant_op.constant(3)]
def f(n):
while n > 0:
tc = TestClass()
tc.x[0] = tc.x[0] + 1
n -= 1
return tc.x[0]
tr = self.transform(f, control_flow)
# The tested function would require `tc` to become part of the while loop
# state, but TensorFlow doesn't support classes at the moment.
with self.assertRaisesRegex(
ValueError, 'tc.*must be defined before the loop'):
tr(constant_op.constant(5))
def test_dispatches_by_cond_only(self):
class TensorIncompatibleNumeric(object):
"""Works in arithmetic expression, but errors out with TF ops."""
def __init__(self, val):
self.val = val
def __add__(self, other):
return TensorIncompatibleNumeric(self.val + other)
def f(n, s):
while n > 0:
n -= 1
s += n
return s
self.assertTransformedResult(f, (constant_op.constant(5), 0), 10)
tr = self.transform(f, control_flow)
# n alone controls the staging. When the loop is not staged, Python
# knows how to add the two objects. But when staged, tf.while will
# not know how to deal with the TensorIncompatibleNumeric object.
self.assertEqual(tr(5, TensorIncompatibleNumeric(0)).val, 10)
with self.assertRaises(TypeError):
tr(constant_op.constant(5), TensorIncompatibleNumeric(0))
class IfStatementTest(ControlFlowTestBase):
def test_basic(self):
def f(n):
a = 0
b = 0
if n > 0:
a = -n
else:
b = 2 * n
return a, b
self.assertTransformedResult(f, constant_op.constant(1), (-1, 0))
self.assertTransformedResult(f, constant_op.constant(-1), (0, -2))
def test_sparse_tensor(self):
def f(cond, a):
if cond:
a = -a
return a
st = sparse_tensor.SparseTensor(
indices=((0,),), values=(0,), dense_shape=(1,))
self.assertTransformedResult(f, (st, constant_op.constant(1)), -1)
self.assertTransformedResult(f, (None, constant_op.constant(1)), 1)
def test_complex_outputs(self):
class TestClass(object):
def __init__(self, a, b):
self.a = a
self.b = b
def f(n, obj):
obj.a = 0
obj.b = 0
if n > 0:
obj.a = -n
else:
obj.b = 2 * n
return obj
tr = self.transform(f, control_flow)
res_obj = tr(constant_op.constant(1), TestClass(0, 0))
self.assertValuesEqual((res_obj.a, res_obj.b), (-1, 0))
res_obj = tr(constant_op.constant(-1), TestClass(0, 0))
self.assertValuesEqual((res_obj.a, res_obj.b), (0, -2))
def test_single_output(self):
def f(n):
if n > 0:
n = -n
return n
self.assertTransformedResult(f, constant_op.constant(1), -1)
def test_unbalanced(self):
def f(n):
if n > 0:
n = 3
return n
self.assertTransformedResult(f, constant_op.constant(2), 3)
self.assertTransformedResult(f, constant_op.constant(-3), -3)
def test_unbalanced_raising(self):
def f(n):
if n > 0:
n = n + 1
raise ValueError()
return n
self.assertTransformedResult(f, -3, -3)
tr = self.transform(f, control_flow)
with self.assertRaises(ValueError):
tr(1)
def test_local_var(self):
def f(n):
if n > 0:
b = 4
n = b + 1
return n
self.assertTransformedResult(f, constant_op.constant(1), 5)
self.assertTransformedResult(f, constant_op.constant(-1), -1)
def test_local_remains_local(self):
def f(n):
if n > 0:
b = 4
n = b + 1
return n
self.assertTransformedResult(f, constant_op.constant(1), 5)
self.assertTransformedResult(f, constant_op.constant(-1), -1)
def test_global_local(self):
def f(n):
if n > 0:
global for_test_global_local
if for_test_global_local is None:
for_test_global_local = 1
else:
for_test_global_local += 1
n += for_test_global_local
return n
tr = self.transform(f, control_flow)
assert for_test_global_local is None
self.assertEqual(tr(1), 2)
self.assertEqual(for_test_global_local, 1)
def test_no_outputs(self):
def f(n):
if n > 0:
b = 4 # pylint:disable=unused-variable
return n
self.assertTransformedResult(f, constant_op.constant(1), 1)
self.assertTransformedResult(f, constant_op.constant(-1), -1)
def test_created_outputs(self):
def f(i):
if i == 0:
result = i - 1
else:
result = i + 1
return result
self.assertTransformedResult(f, 0, -1)
self.assertTransformedResult(f, 1, 2)
def test_created_loop_local_outputs(self):
def f(n, x):
for i in n:
if i == 0:
result = i - 1
else:
result = i + 1
if result > 0:
x += 1
return x
self.assertTransformedResult(f, (range(5), 10), 14)
def test_created_loop_variable(self):
def f(n, x):
for i in n:
if i == 0:
result = i - 1
if i > 0: # Using the result from previous iteration.
if result < 0:
x += 1
return x
self.assertTransformedResult(f, (range(5), 10), 14)
def test_unaffected_global(self):
global for_unaffected_global
for_unaffected_global = 3
def f(i):
global for_unaffected_global
if i == 0:
for_unaffected_global = i - 1
return for_unaffected_global
self.assertTransformedResult(f, 1, 3)
self.assertTransformedResult(f, 0, -1)
self.assertEqual(for_unaffected_global, -1)
def test_unaffected_nonlocal(self):
def f(i):
def inner_fn():
nonlocal n
if i == 0:
n = i - 1
n = 3
inner_fn()
return n
self.assertTransformedResult(f, 1, 3)
self.assertTransformedResult(f, 0, -1)
def test_output_defined_in_prior_except(self):
def f(i):
try:
raise ValueError()
except ValueError:
x = 1
if i == 0:
x = i - 1
return x
self.assertTransformedResult(f, 1, 1)
self.assertTransformedResult(f, 0, -1)
def test_unbalanced_multiple_composites(self):
class Foo(object):
def __init__(self):
self.b = 2
self.c = 3
def f(x, condition):
z = 5
if condition:
x.b = 7
x.c = 11
z = 13
return x.b, x.c, z
self.assertTransformedResult(f, (Foo(), constant_op.constant(True)),
(7, 11, 13))
self.assertTransformedResult(f, (Foo(), constant_op.constant(False)),
(2, 3, 5))
def test_unbalanced_composite(self):
class Foo(object):
def __init__(self):
self.b = 2
def f(x, condition):
z = 5
if condition:
x.b = 7
z = 13
return x.b, z
self.assertTransformedResult(f, (Foo(), constant_op.constant(True)),
(7, 13))
self.assertTransformedResult(f, (Foo(), constant_op.constant(False)),
(2, 5))
class ForStatementTest(ControlFlowTestBase):
def test_basic(self):
def f(l):
s1 = 0
s2 = 0
for e in l:
s1 += e
s2 += e * e
return s1, s2
self.assertTransformedResult(f, constant_op.constant([1, 3]), (4, 10))
empty_vector = constant_op.constant([], shape=(0,), dtype=dtypes.int32)
self.assertTransformedResult(f, empty_vector, (0, 0))
def test_single_output(self):
def f(l):
s = 0
for e in l:
s += e
return s
self.assertTransformedResult(f, constant_op.constant([1, 3]), 4)
empty_vector = constant_op.constant([], shape=(0,), dtype=dtypes.int32)
self.assertTransformedResult(f, empty_vector, 0)
def test_iterated_expression(self):
eval_count = [0]
def count_evals(x):
eval_count[0] += 1
return x
def f(n):
s = 0
for e in count_evals(range(n)):
s += e
return s
tr = self.transform(f, control_flow)
self.assertEqual(tr(5), 10)
self.assertEqual(eval_count[0], 1)
def test_composite_state_initialized_in_loop(self):
class TestClass(object):
pass
def f(n, x):
tc = TestClass()
for i in n:
if i == 0:
tc.x = x
else:
tc.x = tc.x + i
return tc.x
self.assertTransformedResult(f, (range(5), constant_op.constant(10)), 20)
tr = self.transform(f, control_flow)
with self.assertRaisesRegex(
ValueError, "'tc.x' must be defined before the loop"):
tr(constant_op.constant(list(range(5))), 0)
def test_tuple_unpacking(self):
def f(x_list):
z = constant_op.constant(0) # pylint:disable=undefined-variable
for i, x in enumerate(x_list):
z = z + x + i
return z
self.assertTransformedResult(f, [3, 3], 7)
def test_with_comprehension_in_body(self):
def f(l, n):
s = constant_op.constant(list(range(n)))
for _ in l:
s += constant_op.constant([a for a in range(n)])
return s
self.assertTransformedResult(f, (constant_op.constant([1, 2, 3]), 5),
np.array(range(5)) * 4)
class AdvancedControlFlowTest(ControlFlowTestBase):
def assertTransformedEquivalent(self, f, *inputs):
tr = self.transform(
f, (break_statements, continue_statements, control_flow))
self.assertEqual(f(*inputs), tr(*inputs))
def test_while_with_else(self):
def f(x):
while x > 2:
x /= 2
else:
x += 1
return x
self.assertTransformedEquivalent(f, 4)
self.assertTransformedEquivalent(f, 2)
def test_while_with_else_and_break(self):
def f(cond1):
x = 8
while x > 2:
x /= 2
if cond1:
break
else:
x += 1
return x
self.assertTransformedEquivalent(f, True)
self.assertTransformedEquivalent(f, False)
def test_for_with_else(self):
def f(l):
res = 0
for x in l:
res += x
else:
res += 1
return res
self.assertTransformedEquivalent(f, [])
self.assertTransformedEquivalent(f, [1, 2])
def test_for_with_else_and_break(self):
def f(flag):
l = [1, 2, 3]
res = 0
for x in l:
res += x
if flag:
break
else:
res += 1
return res
self.assertTransformedEquivalent(f, True)
self.assertTransformedEquivalent(f, False)
if __name__ == '__main__':
test.main()
| {
"content_hash": "e087070c12eb0fb2184e75ea07607807",
"timestamp": "",
"source": "github",
"line_count": 772,
"max_line_length": 77,
"avg_line_length": 23.496113989637305,
"alnum_prop": 0.5691603726776558,
"repo_name": "Intel-Corporation/tensorflow",
"id": "a66af59cea443d2e06e4d30a7d4c61402c1cb559",
"size": "18847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/autograph/converters/control_flow_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "183416"
},
{
"name": "C++",
"bytes": "24549804"
},
{
"name": "CMake",
"bytes": "160888"
},
{
"name": "Go",
"bytes": "849081"
},
{
"name": "HTML",
"bytes": "681293"
},
{
"name": "Java",
"bytes": "307123"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "218430"
},
{
"name": "Python",
"bytes": "21875003"
},
{
"name": "Shell",
"bytes": "337846"
},
{
"name": "TypeScript",
"bytes": "849555"
}
],
"symlink_target": ""
} |
import io
import re
import jinja2
import os
import ruamel.yaml
import collections
import requests
import tempfile
import tarfile
from .utils import tmp_directory, render_meta_yaml
class Str(ruamel.yaml.scalarstring.ScalarString):
__slots__ = ('lc')
style = ""
def __new__(cls, value):
return ruamel.yaml.scalarstring.ScalarString.__new__(cls, value)
class MyPreservedScalarString(ruamel.yaml.scalarstring.PreservedScalarString):
__slots__ = ('lc')
class MyDoubleQuotedScalarString(ruamel.yaml.scalarstring.DoubleQuotedScalarString):
__slots__ = ('lc')
class MySingleQuotedScalarString(ruamel.yaml.scalarstring.SingleQuotedScalarString):
__slots__ = ('lc')
class MyConstructor(ruamel.yaml.constructor.RoundTripConstructor):
def construct_scalar(self, node):
# type: (Any) -> Any
if not isinstance(node, ruamel.yaml.nodes.ScalarNode):
raise ruamel.yaml.constructor.ConstructorError(
None, None,
"expected a scalar node, but found %s" % node.id,
node.start_mark)
if node.style == '|' and isinstance(node.value, ruamel.yaml.compat.text_type):
ret_val = MyPreservedScalarString(node.value)
elif bool(self._preserve_quotes) and isinstance(node.value, ruamel.yaml.compat.text_type):
if node.style == "'":
ret_val = MySingleQuotedScalarString(node.value)
elif node.style == '"':
ret_val = MyDoubleQuotedScalarString(node.value)
else:
ret_val = Str(node.value)
else:
ret_val = Str(node.value)
ret_val.lc = ruamel.yaml.comments.LineCol()
ret_val.lc.line = node.start_mark.line
ret_val.lc.col = node.start_mark.column
return ret_val
class Section:
def __init__(self, section, start, end):
self.section = section
self.start = start
self.end = end
def __getitem__(self, item):
if item not in self.section:
return None
sect = self.section[item]
start = sect.lc.line
for other in self.section:
if other.lc.line > start:
end = other.lc.line
return Section(sect, start, end)
return Section(sect, start, self.end)
def get_compilers(url):
'''
Download the source and check for C/C++/Fortran
Also check if `np.get_include()` is present in the setup.py files
Return whether a C/C++/Fortran compiler is used and whether
numpy headers are used.
'''
if isinstance(url, list):
for u in url:
r = requests.get(u, allow_redirects=True)
if r.ok:
url = u
break
else:
r = requests.get(url, allow_redirects=True)
fname = url.split('/')[-1]
with tmp_directory() as tmp_dir:
with open(os.path.join(tmp_dir, fname), 'wb') as f:
f.write(r.content)
need_numpy_pin = False
with tarfile.open(os.path.join(tmp_dir, fname)) as tf:
need_f = any([f.name.lower().endswith(('.f', '.f90', '.f77')) for f in tf])
# Fortran builds use CC to perform the link (they do not call the linker directly).
need_c = True if need_f else \
any([f.name.lower().endswith(('.c', '.pyx')) for f in tf])
need_cxx = any([f.name.lower().endswith(('.cxx', '.cpp', '.cc', '.c++'))
for f in tf])
for f in tf:
if f.name.lower().endswith('setup.py'):
try:
content = tf.extractfile(f).read().decode("utf-8")
if 'numpy.get_include()' in content or 'np.get_include()' in content:
need_numpy_pin = True
except:
pass
return need_f, need_c, need_cxx, need_numpy_pin
def update_cb3(recipe_path, conda_build_config_path):
'''
Update the recipe in `recipe_path` to use conda-build=3 features according
to conda-build config yaml in `conda_build_config_path`.
Returns the updated recipe and a message about the changes made.
'''
yaml = ruamel.yaml.YAML()
yaml.Constructor = MyConstructor
yaml.allow_duplicate_keys = True
with io.open(recipe_path, 'rt') as fh:
lines = list(fh)
orig_content = ''.join(lines)
content = orig_content
jinjas = re.findall('{%(?:.+?)%}', content, re.DOTALL)
for j in jinjas:
new_j = ''
for c in j:
if c == '\n':
new_j += '\n'
else:
new_j += ' '
content = content.replace(j, new_j)
content = render_meta_yaml(content)
content2 = render_meta_yaml(orig_content)
meta_ = yaml.load(content)
orig_meta = yaml.load(content2)
content2 = content2.split('\n')
change_lines = {}
meta = Section(meta_, 0, len(content.split('\n')))
build_section = meta['build']
messages = collections.OrderedDict()
requirements_section = meta['requirements']
if not requirements_section:
return orig_content, ''
reqbuild_section = requirements_section['build']
if not reqbuild_section:
return orig_content, ''
reqbuild_s = reqbuild_section.start
reqbuild_line = lines[reqbuild_s-1]
messages['Renamed build with host'] = True
change_lines[reqbuild_s-1] = (reqbuild_line, reqbuild_line.replace('build:', 'host:'))
url = orig_meta['source']['url']
need_f, need_c, need_cxx, need_numpy_pin = get_compilers(url)
#need_f, need_c, need_cxx, need_numpy_pin = False, False, False, False
need_mingw_c = False
with io.open(conda_build_config_path, 'r') as fh:
config = ''.join(fh)
ind = config.index('# Pinning packages')
config = config[ind:]
config = yaml.load(config)
pinned_packages = list(config.keys())
build_lines = []
build_space = ''
need_boost_pin = False
python_win_matrix = False
python_dep = False
section = 'build'
reqs = {'build': [], 'run': []}
# Setup requirements
for i in range(requirements_section.start, requirements_section.end+1):
line = lines[i].strip()
if line == 'run:':
section = 'run'
if line.startswith('- '):
line = content2[i].strip()[2:].strip()
req = line.split(' ')[0]
reqs[section].append(req)
section = 'build'
# Remove build stuff
for i in range(requirements_section.start, requirements_section.end+1):
line = lines[i].strip()
if line == 'run:':
section = 'run'
if line.startswith('- '):
build_space = ' ' * (len(lines[i]) - len(lines[i].lstrip())) + '- '
line = lines[i].strip()[2:].strip()
req = line.replace('{{ ', '{{').replace(' }}', '}}').split(' ')[0]
req_rendered = content2[i].strip()[2:].strip().split(' ')[0].strip()
if len(req_rendered) == 0 or req_rendered not in req:
req_rendered = req
if req == 'libgfortran':
need_f = True
if req_rendered in ['toolchain', 'gcc', 'libgcc', 'libgfortran', 'vc', 'm2w64-toolchain',
'mingwpy', 'system', 'gcc-libs', 'm2w64-gcc-libs']:
messages['Removing {} in favour of compiler()'.format(req)] = True
change_lines[i] = (lines[i], None)
need_c = True
if req in ['m2w64-toolchain', 'mingwpy'] or \
(req != req_rendered and req_rendered == 'toolchain'):
need_mingw_c = True
continue
if req_rendered == 'cython' and not (need_c or need_cxx or need_f):
messages['Found cython requirement. Adding compiler'] = True
need_c = True
if req in ['ninja', 'jom', 'cmake', 'automake', 'autoconf', 'libtool',
'make', 'pkg-config', 'automake-wrapper', 'posix'] \
or req.startswith("{{p") or req.startswith("m2-") \
or (req_rendered in ['perl', 'texlive-core', 'curl', 'openssl', 'tar', 'gzip', 'patch']
and section == 'build' and req_rendered not in reqs['run']):
messages['Moving {} from host to build'.format(req)] = True
build_lines.append(lines[i].rstrip())
change_lines[i] = (lines[i], None)
continue
if req == 'python' and '# [win]' in line:
messages['Moving `python # [win]` which was used for vc matrix'.format(req)] = True
change_lines[i] = (lines[i], None)
python_win_matrix = True
continue
if req == 'python':
python_dep = True
if req.replace('-', '_') in pinned_packages or \
(req_rendered.replace('-', '_') in pinned_packages):
s = list(filter(None, lines[i].strip().split(' ')))
if len(s) > 2 and not s[2].startswith('#') and i not in change_lines:
if not req.replace('-', '_') in pinned_packages and \
not ('m2w64-' + req_rendered.replace('-', '_')) in pinned_packages and \
('# [not win]' not in line and '# [unix]' not in line):
msg = 'Not sure how to remove pinnings for {}'.format(req)
else:
change_lines[i] = (lines[i], lines[i].replace(s[2], ' '*len(s[2])))
msg = 'Removing pinnings for {} to use values from conda_build_config.yaml'.format(req)
if req == 'numpy':
if s[2].startswith('1') or s[2].startswith('x.x'):
need_numpy_pin = True
if need_numpy_pin and i > reqbuild_section.end:
line = lines[i].replace(s[2], ' '*len(s[2]))
msg = 'Pinning numpy using pin_compatible'
change_lines[i] = (lines[i], line.replace('numpy'+' '*len(s[2]),
"{{ pin_compatible('numpy') }}"))
messages[msg] = True
skip_lines = [(i, line) for i, line in enumerate(lines) if i >= build_section.start and \
i <= build_section.end and line.strip().startswith('skip:')]
if python_win_matrix and not python_dep:
for i, line in skip_lines:
skip_line = line.strip()
skip_line = skip_line[skip_line.find('#'):]
if len(skip_lines) == 1 and skip_line in [
'# [win and py36]',
'# [win and py35]',
'# [win and py>35]',
'# [win and py>=36]',
]:
messages["Removed skip for one of py35 or py36 as it's used for vc skipping"] = True
change_lines[i] = skip_line, None
if len(skip_lines) == 1 and skip_line in [
'# [win and py27]',
'# [win and py2k]',
'# [win and not py3k]',
'# [win and py<33]',
'# [win and py<34]',
'# [win and py<35]',
'# [win and not py35]',
'# [win and not py36]',
]:
messages["Removed skip for py2k and added skip for vc<14"] = True
change_lines[i] = line, line[:line.find('#')] + '# [win and vc<14]'
for i, line in enumerate(lines):
vc14 = 'msvc_compiler: 14.0'
if line.strip().startswith(vc14):
need_c = True
messages["Removed {} and added a skip".format(vc14)] = True
change_lines[i] = line, line.replace(vc14, 'skip: True # [vc<14]')
features_section = build_section['features']
remove_features_section = True
# Remove vc features
if features_section is not None:
for i in range(features_section.start, features_section.end):
line = lines[i].strip()
if line.startswith('-'):
line = line[2:]
if line.startswith('vc'):
messages['Removing vc features'] = True
change_lines[i] = (lines[i], None)
need_c = True
elif len(line) > 0:
remove_features_section = False
if remove_features_section:
messages['Removing features section as it is empty'] = True
change_lines[features_section.start-1] = (lines[features_section.start-1], None)
def add_compiler(name, p_name):
if need_mingw_c:
build_lines.append(build_space + "{{ compiler('"+ name + "') }} # [unix]")
build_lines.append(build_space + "{{ compiler('m2w64_"+ name + "') }} # [win]")
messages['Adding ' + p_name + ' compiler with mingw for windows'] = True
else:
build_lines.append(build_space + "{{ compiler('"+ name + "') }}")
messages['Adding ' + p_name + ' compiler'] = True
if need_f:
add_compiler('fortran', 'Fortran')
if need_c:
add_compiler('c', 'C')
if need_cxx:
add_compiler('cxx', 'C++')
if build_lines:
build_lines = [' '*(len(reqbuild_line) - len(reqbuild_line.lstrip())) +'build:'] + build_lines
pos = requirements_section.start - 1
change_lines[pos] = lines[pos], lines[pos] + '\n'.join(build_lines)
new_lines = []
for i, line in enumerate(lines):
if i in change_lines:
if change_lines[i][1]:
new_lines.append(change_lines[i][1].rstrip())
else:
new_lines.append(line.rstrip())
new_lines = ('\n'.join(new_lines)).split('\n')
if python_win_matrix and not python_dep:
for i, line in enumerate(new_lines):
l = line.strip()
ind = l.find('#')
if ind != -1:
select = l[ind:]
for x in ['py27', 'py<33', 'py<34', 'py<35', 'py2k', 'py<=27', 'py==27']:
if x in select:
new_lines[i] = line.replace(x, 'vc<14')
messages['Changed {} in selector {} to vc<14'.format(x, select)] = True
for x in ['py3k', 'py>27', 'py>=35', 'py>34', 'py>=34', 'py>=33', 'py>33']:
if x in select:
new_lines[i] = line.replace(x, 'vc==14')
messages['Changed {} in selector {} to vc==14'.format(x, select)] = True
return '\n'.join(new_lines) + '\n', '\n'.join(messages.keys())
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("recipe", help="Path to recipe meta.yaml")
parser.add_argument("output", help="Path where updated recipe is stored")
parser.add_argument("config", help="Path to conda_build_config.yaml file")
args = parser.parse_args()
new_meta, msg = update_cb3(args.recipe, args.config)
with io.open(args.output, 'w') as fh:
fh.write(new_meta)
print(msg)
| {
"content_hash": "2896002ac2868434223b24d530d10636",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 111,
"avg_line_length": 40.3377308707124,
"alnum_prop": 0.5232862375719518,
"repo_name": "shadowwalkersb/conda-smithy",
"id": "9c40a44da3565c8c473c2cdd57d3bcb646b5512a",
"size": "15288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conda_smithy/update_cb3.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "29"
},
{
"name": "Python",
"bytes": "278199"
},
{
"name": "Shell",
"bytes": "7983"
}
],
"symlink_target": ""
} |
# xml.etree test. This file contains enough tests to make sure that
# all included components work as they should.
# Large parts are extracted from the upstream test suite.
#
# PLEASE write all new tests using the standard unittest infrastructure and
# not doctest.
#
# IMPORTANT: the same tests are run from "test_xml_etree_c" in order
# to ensure consistency between the C implementation and the Python
# implementation.
#
# For this purpose, the module-level "ET" symbol is temporarily
# monkey-patched when running the "test_xml_etree_c" test suite.
# Don't re-import "xml.etree.ElementTree" module in the docstring,
# except if the test is specific to the Python implementation.
import html
import io
import sys
import unittest
import weakref
from test import support
from test.support import TESTFN, findfile, unlink, import_fresh_module, gc_collect
pyET = None
ET = None
SIMPLE_XMLFILE = findfile("simple.xml", subdir="xmltestdata")
try:
SIMPLE_XMLFILE.encode("utf-8")
except UnicodeEncodeError:
raise unittest.SkipTest("filename is not encodable to utf8")
SIMPLE_NS_XMLFILE = findfile("simple-ns.xml", subdir="xmltestdata")
SAMPLE_XML = """\
<body>
<tag class='a'>text</tag>
<tag class='b' />
<section>
<tag class='b' id='inner'>subtext</tag>
</section>
</body>
"""
SAMPLE_SECTION = """\
<section>
<tag class='b' id='inner'>subtext</tag>
<nexttag />
<nextsection>
<tag />
</nextsection>
</section>
"""
SAMPLE_XML_NS = """
<body xmlns="http://effbot.org/ns">
<tag>text</tag>
<tag />
<section>
<tag>subtext</tag>
</section>
</body>
"""
SAMPLE_XML_NS_ELEMS = """
<root>
<h:table xmlns:h="hello">
<h:tr>
<h:td>Apples</h:td>
<h:td>Bananas</h:td>
</h:tr>
</h:table>
<f:table xmlns:f="foo">
<f:name>African Coffee Table</f:name>
<f:width>80</f:width>
<f:length>120</f:length>
</f:table>
</root>
"""
def sanity():
"""
Import sanity.
>>> from xml.etree import ElementTree
>>> from xml.etree import ElementInclude
>>> from xml.etree import ElementPath
"""
def check_method(method):
if not hasattr(method, '__call__'):
print(method, "not callable")
def serialize(elem, to_string=True, encoding='unicode', **options):
import io
if encoding != 'unicode':
file = io.BytesIO()
else:
file = io.StringIO()
tree = ET.ElementTree(elem)
tree.write(file, encoding=encoding, **options)
if to_string:
return file.getvalue()
else:
file.seek(0)
return file
def summarize(elem):
if elem.tag == ET.Comment:
return "<Comment>"
return elem.tag
def summarize_list(seq):
return [summarize(elem) for elem in seq]
def normalize_crlf(tree):
for elem in tree.iter():
if elem.text:
elem.text = elem.text.replace("\r\n", "\n")
if elem.tail:
elem.tail = elem.tail.replace("\r\n", "\n")
def normalize_exception(func, *args, **kwargs):
# Ignore the exception __module__
try:
func(*args, **kwargs)
except Exception as err:
print("Traceback (most recent call last):")
print("{}: {}".format(err.__class__.__name__, err))
def check_string(string):
len(string)
for char in string:
if len(char) != 1:
print("expected one-character string, got %r" % char)
new_string = string + ""
new_string = string + " "
string[:0]
def check_mapping(mapping):
len(mapping)
keys = mapping.keys()
items = mapping.items()
for key in keys:
item = mapping[key]
mapping["key"] = "value"
if mapping["key"] != "value":
print("expected value string, got %r" % mapping["key"])
def check_element(element):
if not ET.iselement(element):
print("not an element")
if not hasattr(element, "tag"):
print("no tag member")
if not hasattr(element, "attrib"):
print("no attrib member")
if not hasattr(element, "text"):
print("no text member")
if not hasattr(element, "tail"):
print("no tail member")
check_string(element.tag)
check_mapping(element.attrib)
if element.text is not None:
check_string(element.text)
if element.tail is not None:
check_string(element.tail)
for elem in element:
check_element(elem)
# --------------------------------------------------------------------
# element tree tests
def interface():
"""
Test element tree interface.
>>> element = ET.Element("tag")
>>> check_element(element)
>>> tree = ET.ElementTree(element)
>>> check_element(tree.getroot())
>>> element = ET.Element("t\\xe4g", key="value")
>>> tree = ET.ElementTree(element)
>>> repr(element) # doctest: +ELLIPSIS
"<Element 't\\xe4g' at 0x...>"
>>> element = ET.Element("tag", key="value")
Make sure all standard element methods exist.
>>> check_method(element.append)
>>> check_method(element.extend)
>>> check_method(element.insert)
>>> check_method(element.remove)
>>> check_method(element.getchildren)
>>> check_method(element.find)
>>> check_method(element.iterfind)
>>> check_method(element.findall)
>>> check_method(element.findtext)
>>> check_method(element.clear)
>>> check_method(element.get)
>>> check_method(element.set)
>>> check_method(element.keys)
>>> check_method(element.items)
>>> check_method(element.iter)
>>> check_method(element.itertext)
>>> check_method(element.getiterator)
These methods return an iterable. See bug 6472.
>>> check_method(element.iterfind("tag").__next__)
>>> check_method(element.iterfind("*").__next__)
>>> check_method(tree.iterfind("tag").__next__)
>>> check_method(tree.iterfind("*").__next__)
These aliases are provided:
>>> assert ET.XML == ET.fromstring
>>> assert ET.PI == ET.ProcessingInstruction
>>> assert ET.XMLParser == ET.XMLTreeBuilder
"""
def simpleops():
"""
Basic method sanity checks.
>>> elem = ET.XML("<body><tag/></body>")
>>> serialize(elem)
'<body><tag /></body>'
>>> e = ET.Element("tag2")
>>> elem.append(e)
>>> serialize(elem)
'<body><tag /><tag2 /></body>'
>>> elem.remove(e)
>>> serialize(elem)
'<body><tag /></body>'
>>> elem.insert(0, e)
>>> serialize(elem)
'<body><tag2 /><tag /></body>'
>>> elem.remove(e)
>>> elem.extend([e])
>>> serialize(elem)
'<body><tag /><tag2 /></body>'
>>> elem.remove(e)
>>> element = ET.Element("tag", key="value")
>>> serialize(element) # 1
'<tag key="value" />'
>>> subelement = ET.Element("subtag")
>>> element.append(subelement)
>>> serialize(element) # 2
'<tag key="value"><subtag /></tag>'
>>> element.insert(0, subelement)
>>> serialize(element) # 3
'<tag key="value"><subtag /><subtag /></tag>'
>>> element.remove(subelement)
>>> serialize(element) # 4
'<tag key="value"><subtag /></tag>'
>>> element.remove(subelement)
>>> serialize(element) # 5
'<tag key="value" />'
>>> element.remove(subelement)
Traceback (most recent call last):
ValueError: list.remove(x): x not in list
>>> serialize(element) # 6
'<tag key="value" />'
>>> element[0:0] = [subelement, subelement, subelement]
>>> serialize(element[1])
'<subtag />'
>>> element[1:9] == [element[1], element[2]]
True
>>> element[:9:2] == [element[0], element[2]]
True
>>> del element[1:2]
>>> serialize(element)
'<tag key="value"><subtag /><subtag /></tag>'
"""
def cdata():
"""
Test CDATA handling (etc).
>>> serialize(ET.XML("<tag>hello</tag>"))
'<tag>hello</tag>'
>>> serialize(ET.XML("<tag>hello</tag>"))
'<tag>hello</tag>'
>>> serialize(ET.XML("<tag><![CDATA[hello]]></tag>"))
'<tag>hello</tag>'
"""
def find():
"""
Test find methods (including xpath syntax).
>>> elem = ET.XML(SAMPLE_XML)
>>> elem.find("tag").tag
'tag'
>>> ET.ElementTree(elem).find("tag").tag
'tag'
>>> elem.find("section/tag").tag
'tag'
>>> elem.find("./tag").tag
'tag'
>>> ET.ElementTree(elem).find("./tag").tag
'tag'
>>> ET.ElementTree(elem).find("/tag").tag
'tag'
>>> elem[2] = ET.XML(SAMPLE_SECTION)
>>> elem.find("section/nexttag").tag
'nexttag'
>>> ET.ElementTree(elem).find("section/tag").tag
'tag'
>>> ET.ElementTree(elem).find("tog")
>>> ET.ElementTree(elem).find("tog/foo")
>>> elem.findtext("tag")
'text'
>>> elem.findtext("section/nexttag")
''
>>> elem.findtext("section/nexttag", "default")
''
>>> elem.findtext("tog")
>>> elem.findtext("tog", "default")
'default'
>>> ET.ElementTree(elem).findtext("tag")
'text'
>>> ET.ElementTree(elem).findtext("tog/foo")
>>> ET.ElementTree(elem).findtext("tog/foo", "default")
'default'
>>> ET.ElementTree(elem).findtext("./tag")
'text'
>>> ET.ElementTree(elem).findtext("/tag")
'text'
>>> elem.findtext("section/tag")
'subtext'
>>> ET.ElementTree(elem).findtext("section/tag")
'subtext'
>>> summarize_list(elem.findall("."))
['body']
>>> summarize_list(elem.findall("tag"))
['tag', 'tag']
>>> summarize_list(elem.findall("tog"))
[]
>>> summarize_list(elem.findall("tog/foo"))
[]
>>> summarize_list(elem.findall("*"))
['tag', 'tag', 'section']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag', 'tag']
>>> summarize_list(elem.findall("section/tag"))
['tag']
>>> summarize_list(elem.findall("section//tag"))
['tag', 'tag']
>>> summarize_list(elem.findall("section/*"))
['tag', 'nexttag', 'nextsection']
>>> summarize_list(elem.findall("section//*"))
['tag', 'nexttag', 'nextsection', 'tag']
>>> summarize_list(elem.findall("section/.//*"))
['tag', 'nexttag', 'nextsection', 'tag']
>>> summarize_list(elem.findall("*/*"))
['tag', 'nexttag', 'nextsection']
>>> summarize_list(elem.findall("*//*"))
['tag', 'nexttag', 'nextsection', 'tag']
>>> summarize_list(elem.findall("*/tag"))
['tag']
>>> summarize_list(elem.findall("*/./tag"))
['tag']
>>> summarize_list(elem.findall("./tag"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag', 'tag']
>>> summarize_list(elem.findall("././tag"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag[@class]"))
['tag', 'tag', 'tag']
>>> summarize_list(elem.findall(".//tag[@class='a']"))
['tag']
>>> summarize_list(elem.findall(".//tag[@class='b']"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag[@id]"))
['tag']
>>> summarize_list(elem.findall(".//section[tag]"))
['section']
>>> summarize_list(elem.findall(".//section[element]"))
[]
>>> summarize_list(elem.findall("../tag"))
[]
>>> summarize_list(elem.findall("section/../tag"))
['tag', 'tag']
>>> summarize_list(ET.ElementTree(elem).findall("./tag"))
['tag', 'tag']
Following example is invalid in 1.2.
A leading '*' is assumed in 1.3.
>>> elem.findall("section//") == elem.findall("section//*")
True
ET's Path module handles this case incorrectly; this gives
a warning in 1.3, and the behaviour will be modified in 1.4.
>>> summarize_list(ET.ElementTree(elem).findall("/tag"))
['tag', 'tag']
>>> elem = ET.XML(SAMPLE_XML_NS)
>>> summarize_list(elem.findall("tag"))
[]
>>> summarize_list(elem.findall("{http://effbot.org/ns}tag"))
['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']
>>> summarize_list(elem.findall(".//{http://effbot.org/ns}tag"))
['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']
"""
def file_init():
"""
>>> import io
>>> stringfile = io.BytesIO(SAMPLE_XML.encode("utf-8"))
>>> tree = ET.ElementTree(file=stringfile)
>>> tree.find("tag").tag
'tag'
>>> tree.find("section/tag").tag
'tag'
>>> tree = ET.ElementTree(file=SIMPLE_XMLFILE)
>>> tree.find("element").tag
'element'
>>> tree.find("element/../empty-element").tag
'empty-element'
"""
def bad_find():
"""
Check bad or unsupported path expressions.
>>> elem = ET.XML(SAMPLE_XML)
>>> elem.findall("/tag")
Traceback (most recent call last):
SyntaxError: cannot use absolute path on element
"""
def path_cache():
"""
Check that the path cache behaves sanely.
>>> from xml.etree import ElementPath
>>> elem = ET.XML(SAMPLE_XML)
>>> for i in range(10): ET.ElementTree(elem).find('./'+str(i))
>>> cache_len_10 = len(ElementPath._cache)
>>> for i in range(10): ET.ElementTree(elem).find('./'+str(i))
>>> len(ElementPath._cache) == cache_len_10
True
>>> for i in range(20): ET.ElementTree(elem).find('./'+str(i))
>>> len(ElementPath._cache) > cache_len_10
True
>>> for i in range(600): ET.ElementTree(elem).find('./'+str(i))
>>> len(ElementPath._cache) < 500
True
"""
def copy():
"""
Test copy handling (etc).
>>> import copy
>>> e1 = ET.XML("<tag>hello<foo/></tag>")
>>> e2 = copy.copy(e1)
>>> e3 = copy.deepcopy(e1)
>>> e1.find("foo").tag = "bar"
>>> serialize(e1)
'<tag>hello<bar /></tag>'
>>> serialize(e2)
'<tag>hello<bar /></tag>'
>>> serialize(e3)
'<tag>hello<foo /></tag>'
"""
def attrib():
"""
Test attribute handling.
>>> elem = ET.Element("tag")
>>> elem.get("key") # 1.1
>>> elem.get("key", "default") # 1.2
'default'
>>> elem.set("key", "value")
>>> elem.get("key") # 1.3
'value'
>>> elem = ET.Element("tag", key="value")
>>> elem.get("key") # 2.1
'value'
>>> elem.attrib # 2.2
{'key': 'value'}
>>> attrib = {"key": "value"}
>>> elem = ET.Element("tag", attrib)
>>> attrib.clear() # check for aliasing issues
>>> elem.get("key") # 3.1
'value'
>>> elem.attrib # 3.2
{'key': 'value'}
>>> attrib = {"key": "value"}
>>> elem = ET.Element("tag", **attrib)
>>> attrib.clear() # check for aliasing issues
>>> elem.get("key") # 4.1
'value'
>>> elem.attrib # 4.2
{'key': 'value'}
>>> elem = ET.Element("tag", {"key": "other"}, key="value")
>>> elem.get("key") # 5.1
'value'
>>> elem.attrib # 5.2
{'key': 'value'}
>>> elem = ET.Element('test')
>>> elem.text = "aa"
>>> elem.set('testa', 'testval')
>>> elem.set('testb', 'test2')
>>> ET.tostring(elem)
b'<test testa="testval" testb="test2">aa</test>'
>>> sorted(elem.keys())
['testa', 'testb']
>>> sorted(elem.items())
[('testa', 'testval'), ('testb', 'test2')]
>>> elem.attrib['testb']
'test2'
>>> elem.attrib['testb'] = 'test1'
>>> elem.attrib['testc'] = 'test2'
>>> ET.tostring(elem)
b'<test testa="testval" testb="test1" testc="test2">aa</test>'
"""
def makeelement():
"""
Test makeelement handling.
>>> elem = ET.Element("tag")
>>> attrib = {"key": "value"}
>>> subelem = elem.makeelement("subtag", attrib)
>>> if subelem.attrib is attrib:
... print("attrib aliasing")
>>> elem.append(subelem)
>>> serialize(elem)
'<tag><subtag key="value" /></tag>'
>>> elem.clear()
>>> serialize(elem)
'<tag />'
>>> elem.append(subelem)
>>> serialize(elem)
'<tag><subtag key="value" /></tag>'
>>> elem.extend([subelem, subelem])
>>> serialize(elem)
'<tag><subtag key="value" /><subtag key="value" /><subtag key="value" /></tag>'
>>> elem[:] = [subelem]
>>> serialize(elem)
'<tag><subtag key="value" /></tag>'
>>> elem[:] = tuple([subelem])
>>> serialize(elem)
'<tag><subtag key="value" /></tag>'
"""
def parsefile():
"""
Test parsing from file.
>>> tree = ET.parse(SIMPLE_XMLFILE)
>>> normalize_crlf(tree)
>>> tree.write(sys.stdout, encoding='unicode')
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
>>> tree = ET.parse(SIMPLE_NS_XMLFILE)
>>> normalize_crlf(tree)
>>> tree.write(sys.stdout, encoding='unicode')
<ns0:root xmlns:ns0="namespace">
<ns0:element key="value">text</ns0:element>
<ns0:element>text</ns0:element>tail
<ns0:empty-element />
</ns0:root>
>>> with open(SIMPLE_XMLFILE) as f:
... data = f.read()
>>> parser = ET.XMLParser()
>>> parser.version # doctest: +ELLIPSIS
'Expat ...'
>>> parser.feed(data)
>>> print(serialize(parser.close()))
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
>>> parser = ET.XMLTreeBuilder() # 1.2 compatibility
>>> parser.feed(data)
>>> print(serialize(parser.close()))
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
>>> target = ET.TreeBuilder()
>>> parser = ET.XMLParser(target=target)
>>> parser.feed(data)
>>> print(serialize(parser.close()))
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
"""
def parseliteral():
"""
>>> element = ET.XML("<html><body>text</body></html>")
>>> ET.ElementTree(element).write(sys.stdout, encoding='unicode')
<html><body>text</body></html>
>>> element = ET.fromstring("<html><body>text</body></html>")
>>> ET.ElementTree(element).write(sys.stdout, encoding='unicode')
<html><body>text</body></html>
>>> sequence = ["<html><body>", "text</bo", "dy></html>"]
>>> element = ET.fromstringlist(sequence)
>>> ET.tostring(element)
b'<html><body>text</body></html>'
>>> b"".join(ET.tostringlist(element))
b'<html><body>text</body></html>'
>>> ET.tostring(element, "ascii")
b"<?xml version='1.0' encoding='ascii'?>\\n<html><body>text</body></html>"
>>> _, ids = ET.XMLID("<html><body>text</body></html>")
>>> len(ids)
0
>>> _, ids = ET.XMLID("<html><body id='body'>text</body></html>")
>>> len(ids)
1
>>> ids["body"].tag
'body'
"""
def iterparse():
"""
Test iterparse interface.
>>> iterparse = ET.iterparse
>>> context = iterparse(SIMPLE_XMLFILE)
>>> action, elem = next(context)
>>> print(action, elem.tag)
end element
>>> for action, elem in context:
... print(action, elem.tag)
end element
end empty-element
end root
>>> context.root.tag
'root'
>>> context = iterparse(SIMPLE_NS_XMLFILE)
>>> for action, elem in context:
... print(action, elem.tag)
end {namespace}element
end {namespace}element
end {namespace}empty-element
end {namespace}root
>>> events = ()
>>> context = iterparse(SIMPLE_XMLFILE, events)
>>> for action, elem in context:
... print(action, elem.tag)
>>> events = ()
>>> context = iterparse(SIMPLE_XMLFILE, events=events)
>>> for action, elem in context:
... print(action, elem.tag)
>>> events = ("start", "end")
>>> context = iterparse(SIMPLE_XMLFILE, events)
>>> for action, elem in context:
... print(action, elem.tag)
start root
start element
end element
start element
end element
start empty-element
end empty-element
end root
>>> events = ("start", "end", "start-ns", "end-ns")
>>> context = iterparse(SIMPLE_NS_XMLFILE, events)
>>> for action, elem in context:
... if action in ("start", "end"):
... print(action, elem.tag)
... else:
... print(action, elem)
start-ns ('', 'namespace')
start {namespace}root
start {namespace}element
end {namespace}element
start {namespace}element
end {namespace}element
start {namespace}empty-element
end {namespace}empty-element
end {namespace}root
end-ns None
>>> events = ("start", "end", "bogus")
>>> with open(SIMPLE_XMLFILE, "rb") as f:
... iterparse(f, events)
Traceback (most recent call last):
ValueError: unknown event 'bogus'
>>> import io
>>> source = io.BytesIO(
... b"<?xml version='1.0' encoding='iso-8859-1'?>\\n"
... b"<body xmlns='http://éffbot.org/ns'\\n"
... b" xmlns:cl\\xe9='http://effbot.org/ns'>text</body>\\n")
>>> events = ("start-ns",)
>>> context = iterparse(source, events)
>>> for action, elem in context:
... print(action, elem)
start-ns ('', 'http://\\xe9ffbot.org/ns')
start-ns ('cl\\xe9', 'http://effbot.org/ns')
>>> source = io.StringIO("<document />junk")
>>> try:
... for action, elem in iterparse(source):
... print(action, elem.tag)
... except ET.ParseError as v:
... print(v)
end document
junk after document element: line 1, column 12
"""
def writefile():
"""
>>> elem = ET.Element("tag")
>>> elem.text = "text"
>>> serialize(elem)
'<tag>text</tag>'
>>> ET.SubElement(elem, "subtag").text = "subtext"
>>> serialize(elem)
'<tag>text<subtag>subtext</subtag></tag>'
Test tag suppression
>>> elem.tag = None
>>> serialize(elem)
'text<subtag>subtext</subtag>'
>>> elem.insert(0, ET.Comment("comment"))
>>> serialize(elem) # assumes 1.3
'text<!--comment--><subtag>subtext</subtag>'
>>> elem[0] = ET.PI("key", "value")
>>> serialize(elem)
'text<?key value?><subtag>subtext</subtag>'
"""
def custom_builder():
"""
Test parser w. custom builder.
>>> with open(SIMPLE_XMLFILE) as f:
... data = f.read()
>>> class Builder:
... def start(self, tag, attrib):
... print("start", tag)
... def end(self, tag):
... print("end", tag)
... def data(self, text):
... pass
>>> builder = Builder()
>>> parser = ET.XMLParser(target=builder)
>>> parser.feed(data)
start root
start element
end element
start element
end element
start empty-element
end empty-element
end root
>>> with open(SIMPLE_NS_XMLFILE) as f:
... data = f.read()
>>> class Builder:
... def start(self, tag, attrib):
... print("start", tag)
... def end(self, tag):
... print("end", tag)
... def data(self, text):
... pass
... def pi(self, target, data):
... print("pi", target, repr(data))
... def comment(self, data):
... print("comment", repr(data))
>>> builder = Builder()
>>> parser = ET.XMLParser(target=builder)
>>> parser.feed(data)
pi pi 'data'
comment ' comment '
start {namespace}root
start {namespace}element
end {namespace}element
start {namespace}element
end {namespace}element
start {namespace}empty-element
end {namespace}empty-element
end {namespace}root
"""
def getchildren():
"""
Test Element.getchildren()
>>> with open(SIMPLE_XMLFILE, "rb") as f:
... tree = ET.parse(f)
>>> for elem in tree.getroot().iter():
... summarize_list(elem.getchildren())
['element', 'element', 'empty-element']
[]
[]
[]
>>> for elem in tree.getiterator():
... summarize_list(elem.getchildren())
['element', 'element', 'empty-element']
[]
[]
[]
>>> elem = ET.XML(SAMPLE_XML)
>>> len(elem.getchildren())
3
>>> len(elem[2].getchildren())
1
>>> elem[:] == elem.getchildren()
True
>>> child1 = elem[0]
>>> child2 = elem[2]
>>> del elem[1:2]
>>> len(elem.getchildren())
2
>>> child1 == elem[0]
True
>>> child2 == elem[1]
True
>>> elem[0:2] = [child2, child1]
>>> child2 == elem[0]
True
>>> child1 == elem[1]
True
>>> child1 == elem[0]
False
>>> elem.clear()
>>> elem.getchildren()
[]
"""
def writestring():
"""
>>> elem = ET.XML("<html><body>text</body></html>")
>>> ET.tostring(elem)
b'<html><body>text</body></html>'
>>> elem = ET.fromstring("<html><body>text</body></html>")
>>> ET.tostring(elem)
b'<html><body>text</body></html>'
"""
def check_encoding(encoding):
"""
>>> check_encoding("ascii")
>>> check_encoding("us-ascii")
>>> check_encoding("iso-8859-1")
>>> check_encoding("iso-8859-15")
>>> check_encoding("cp437")
>>> check_encoding("mac-roman")
"""
ET.XML("<?xml version='1.0' encoding='%s'?><xml />" % encoding)
def methods():
r"""
Test serialization methods.
>>> e = ET.XML("<html><link/><script>1 < 2</script></html>")
>>> e.tail = "\n"
>>> serialize(e)
'<html><link /><script>1 < 2</script></html>\n'
>>> serialize(e, method=None)
'<html><link /><script>1 < 2</script></html>\n'
>>> serialize(e, method="xml")
'<html><link /><script>1 < 2</script></html>\n'
>>> serialize(e, method="html")
'<html><link><script>1 < 2</script></html>\n'
>>> serialize(e, method="text")
'1 < 2\n'
"""
ENTITY_XML = """\
<!DOCTYPE points [
<!ENTITY % user-entities SYSTEM 'user-entities.xml'>
%user-entities;
]>
<document>&entity;</document>
"""
def entity():
"""
Test entity handling.
1) good entities
>>> e = ET.XML("<document title='舰'>test</document>")
>>> serialize(e, encoding="us-ascii")
b'<document title="舰">test</document>'
>>> serialize(e)
'<document title="\u8230">test</document>'
2) bad entities
>>> normalize_exception(ET.XML, "<document>&entity;</document>")
Traceback (most recent call last):
ParseError: undefined entity: line 1, column 10
>>> normalize_exception(ET.XML, ENTITY_XML)
Traceback (most recent call last):
ParseError: undefined entity &entity;: line 5, column 10
3) custom entity
>>> parser = ET.XMLParser()
>>> parser.entity["entity"] = "text"
>>> parser.feed(ENTITY_XML)
>>> root = parser.close()
>>> serialize(root)
'<document>text</document>'
"""
def namespace():
"""
Test namespace issues.
1) xml namespace
>>> elem = ET.XML("<tag xml:lang='en' />")
>>> serialize(elem) # 1.1
'<tag xml:lang="en" />'
2) other "well-known" namespaces
>>> elem = ET.XML("<rdf:RDF xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#' />")
>>> serialize(elem) # 2.1
'<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" />'
>>> elem = ET.XML("<html:html xmlns:html='http://www.w3.org/1999/xhtml' />")
>>> serialize(elem) # 2.2
'<html:html xmlns:html="http://www.w3.org/1999/xhtml" />'
>>> elem = ET.XML("<soap:Envelope xmlns:soap='http://schemas.xmlsoap.org/soap/envelope' />")
>>> serialize(elem) # 2.3
'<ns0:Envelope xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope" />'
3) unknown namespaces
>>> elem = ET.XML(SAMPLE_XML_NS)
>>> print(serialize(elem))
<ns0:body xmlns:ns0="http://effbot.org/ns">
<ns0:tag>text</ns0:tag>
<ns0:tag />
<ns0:section>
<ns0:tag>subtext</ns0:tag>
</ns0:section>
</ns0:body>
"""
def qname():
"""
Test QName handling.
1) decorated tags
>>> elem = ET.Element("{uri}tag")
>>> serialize(elem) # 1.1
'<ns0:tag xmlns:ns0="uri" />'
>>> elem = ET.Element(ET.QName("{uri}tag"))
>>> serialize(elem) # 1.2
'<ns0:tag xmlns:ns0="uri" />'
>>> elem = ET.Element(ET.QName("uri", "tag"))
>>> serialize(elem) # 1.3
'<ns0:tag xmlns:ns0="uri" />'
>>> elem = ET.Element(ET.QName("uri", "tag"))
>>> subelem = ET.SubElement(elem, ET.QName("uri", "tag1"))
>>> subelem = ET.SubElement(elem, ET.QName("uri", "tag2"))
>>> serialize(elem) # 1.4
'<ns0:tag xmlns:ns0="uri"><ns0:tag1 /><ns0:tag2 /></ns0:tag>'
2) decorated attributes
>>> elem.clear()
>>> elem.attrib["{uri}key"] = "value"
>>> serialize(elem) # 2.1
'<ns0:tag xmlns:ns0="uri" ns0:key="value" />'
>>> elem.clear()
>>> elem.attrib[ET.QName("{uri}key")] = "value"
>>> serialize(elem) # 2.2
'<ns0:tag xmlns:ns0="uri" ns0:key="value" />'
3) decorated values are not converted by default, but the
QName wrapper can be used for values
>>> elem.clear()
>>> elem.attrib["{uri}key"] = "{uri}value"
>>> serialize(elem) # 3.1
'<ns0:tag xmlns:ns0="uri" ns0:key="{uri}value" />'
>>> elem.clear()
>>> elem.attrib["{uri}key"] = ET.QName("{uri}value")
>>> serialize(elem) # 3.2
'<ns0:tag xmlns:ns0="uri" ns0:key="ns0:value" />'
>>> elem.clear()
>>> subelem = ET.Element("tag")
>>> subelem.attrib["{uri1}key"] = ET.QName("{uri2}value")
>>> elem.append(subelem)
>>> elem.append(subelem)
>>> serialize(elem) # 3.3
'<ns0:tag xmlns:ns0="uri" xmlns:ns1="uri1" xmlns:ns2="uri2"><tag ns1:key="ns2:value" /><tag ns1:key="ns2:value" /></ns0:tag>'
4) Direct QName tests
>>> str(ET.QName('ns', 'tag'))
'{ns}tag'
>>> str(ET.QName('{ns}tag'))
'{ns}tag'
>>> q1 = ET.QName('ns', 'tag')
>>> q2 = ET.QName('ns', 'tag')
>>> q1 == q2
True
>>> q2 = ET.QName('ns', 'other-tag')
>>> q1 == q2
False
>>> q1 == 'ns:tag'
False
>>> q1 == '{ns}tag'
True
"""
def doctype_public():
"""
Test PUBLIC doctype.
>>> elem = ET.XML('<!DOCTYPE html PUBLIC'
... ' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
... ' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
... '<html>text</html>')
"""
def xpath_tokenizer(p):
"""
Test the XPath tokenizer.
>>> # tests from the xml specification
>>> xpath_tokenizer("*")
['*']
>>> xpath_tokenizer("text()")
['text', '()']
>>> xpath_tokenizer("@name")
['@', 'name']
>>> xpath_tokenizer("@*")
['@', '*']
>>> xpath_tokenizer("para[1]")
['para', '[', '1', ']']
>>> xpath_tokenizer("para[last()]")
['para', '[', 'last', '()', ']']
>>> xpath_tokenizer("*/para")
['*', '/', 'para']
>>> xpath_tokenizer("/doc/chapter[5]/section[2]")
['/', 'doc', '/', 'chapter', '[', '5', ']', '/', 'section', '[', '2', ']']
>>> xpath_tokenizer("chapter//para")
['chapter', '//', 'para']
>>> xpath_tokenizer("//para")
['//', 'para']
>>> xpath_tokenizer("//olist/item")
['//', 'olist', '/', 'item']
>>> xpath_tokenizer(".")
['.']
>>> xpath_tokenizer(".//para")
['.', '//', 'para']
>>> xpath_tokenizer("..")
['..']
>>> xpath_tokenizer("../@lang")
['..', '/', '@', 'lang']
>>> xpath_tokenizer("chapter[title]")
['chapter', '[', 'title', ']']
>>> xpath_tokenizer("employee[@secretary and @assistant]")
['employee', '[', '@', 'secretary', '', 'and', '', '@', 'assistant', ']']
>>> # additional tests
>>> xpath_tokenizer("{http://spam}egg")
['{http://spam}egg']
>>> xpath_tokenizer("./spam.egg")
['.', '/', 'spam.egg']
>>> xpath_tokenizer(".//{http://spam}egg")
['.', '//', '{http://spam}egg']
"""
from xml.etree import ElementPath
out = []
for op, tag in ElementPath.xpath_tokenizer(p):
out.append(op or tag)
return out
def processinginstruction():
"""
Test ProcessingInstruction directly
>>> ET.tostring(ET.ProcessingInstruction('test', 'instruction'))
b'<?test instruction?>'
>>> ET.tostring(ET.PI('test', 'instruction'))
b'<?test instruction?>'
Issue #2746
>>> ET.tostring(ET.PI('test', '<testing&>'))
b'<?test <testing&>?>'
>>> ET.tostring(ET.PI('test', '<testing&>\xe3'), 'latin-1')
b"<?xml version='1.0' encoding='latin-1'?>\\n<?test <testing&>\\xe3?>"
"""
#
# xinclude tests (samples from appendix C of the xinclude specification)
XINCLUDE = {}
XINCLUDE["C1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>120 Mz is adequate for an average home user.</p>
<xi:include href="disclaimer.xml"/>
</document>
"""
XINCLUDE["disclaimer.xml"] = """\
<?xml version='1.0'?>
<disclaimer>
<p>The opinions represented herein represent those of the individual
and should not be interpreted as official policy endorsed by this
organization.</p>
</disclaimer>
"""
XINCLUDE["C2.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>This document has been accessed
<xi:include href="count.txt" parse="text"/> times.</p>
</document>
"""
XINCLUDE["count.txt"] = "324387"
XINCLUDE["C2b.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>This document has been <em>accessed</em>
<xi:include href="count.txt" parse="text"/> times.</p>
</document>
"""
XINCLUDE["C3.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is the source of the "data.xml" resource:</p>
<example><xi:include href="data.xml" parse="text"/></example>
</document>
"""
XINCLUDE["data.xml"] = """\
<?xml version='1.0'?>
<data>
<item><![CDATA[Brooks & Shields]]></item>
</data>
"""
XINCLUDE["C5.xml"] = """\
<?xml version='1.0'?>
<div xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="example.txt" parse="text">
<xi:fallback>
<xi:include href="fallback-example.txt" parse="text">
<xi:fallback><a href="mailto:bob@example.org">Report error</a></xi:fallback>
</xi:include>
</xi:fallback>
</xi:include>
</div>
"""
XINCLUDE["default.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>Example.</p>
<xi:include href="{}"/>
</document>
""".format(html.escape(SIMPLE_XMLFILE, True))
def xinclude_loader(href, parse="xml", encoding=None):
try:
data = XINCLUDE[href]
except KeyError:
raise OSError("resource not found")
if parse == "xml":
data = ET.XML(data)
return data
def xinclude():
r"""
Basic inclusion example (XInclude C.1)
>>> from xml.etree import ElementInclude
>>> document = xinclude_loader("C1.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print(serialize(document)) # C1
<document>
<p>120 Mz is adequate for an average home user.</p>
<disclaimer>
<p>The opinions represented herein represent those of the individual
and should not be interpreted as official policy endorsed by this
organization.</p>
</disclaimer>
</document>
Textual inclusion example (XInclude C.2)
>>> document = xinclude_loader("C2.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print(serialize(document)) # C2
<document>
<p>This document has been accessed
324387 times.</p>
</document>
Textual inclusion after sibling element (based on modified XInclude C.2)
>>> document = xinclude_loader("C2b.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print(serialize(document)) # C2b
<document>
<p>This document has been <em>accessed</em>
324387 times.</p>
</document>
Textual inclusion of XML example (XInclude C.3)
>>> document = xinclude_loader("C3.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print(serialize(document)) # C3
<document>
<p>The following is the source of the "data.xml" resource:</p>
<example><?xml version='1.0'?>
<data>
<item><![CDATA[Brooks & Shields]]></item>
</data>
</example>
</document>
Fallback example (XInclude C.5)
Note! Fallback support is not yet implemented
>>> document = xinclude_loader("C5.xml")
>>> ElementInclude.include(document, xinclude_loader)
Traceback (most recent call last):
OSError: resource not found
>>> # print(serialize(document)) # C5
"""
#
# badly formatted xi:include tags
XINCLUDE_BAD = {}
XINCLUDE_BAD["B1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>120 Mz is adequate for an average home user.</p>
<xi:include href="disclaimer.xml" parse="BAD_TYPE"/>
</document>
"""
XINCLUDE_BAD["B2.xml"] = """\
<?xml version='1.0'?>
<div xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:fallback></xi:fallback>
</div>
"""
def xinclude_failures():
r"""
Test failure to locate included XML file.
>>> from xml.etree import ElementInclude
>>> def none_loader(href, parser, encoding=None):
... return None
>>> document = ET.XML(XINCLUDE["C1.xml"])
>>> ElementInclude.include(document, loader=none_loader)
Traceback (most recent call last):
xml.etree.ElementInclude.FatalIncludeError: cannot load 'disclaimer.xml' as 'xml'
Test failure to locate included text file.
>>> document = ET.XML(XINCLUDE["C2.xml"])
>>> ElementInclude.include(document, loader=none_loader)
Traceback (most recent call last):
xml.etree.ElementInclude.FatalIncludeError: cannot load 'count.txt' as 'text'
Test bad parse type.
>>> document = ET.XML(XINCLUDE_BAD["B1.xml"])
>>> ElementInclude.include(document, loader=none_loader)
Traceback (most recent call last):
xml.etree.ElementInclude.FatalIncludeError: unknown parse type in xi:include tag ('BAD_TYPE')
Test xi:fallback outside xi:include.
>>> document = ET.XML(XINCLUDE_BAD["B2.xml"])
>>> ElementInclude.include(document, loader=none_loader)
Traceback (most recent call last):
xml.etree.ElementInclude.FatalIncludeError: xi:fallback tag must be child of xi:include ('{http://www.w3.org/2001/XInclude}fallback')
"""
# --------------------------------------------------------------------
# reported bugs
def bug_xmltoolkit21():
"""
marshaller gives obscure errors for non-string values
>>> elem = ET.Element(123)
>>> serialize(elem) # tag
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ET.Element("elem")
>>> elem.text = 123
>>> serialize(elem) # text
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ET.Element("elem")
>>> elem.tail = 123
>>> serialize(elem) # tail
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ET.Element("elem")
>>> elem.set(123, "123")
>>> serialize(elem) # attribute key
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ET.Element("elem")
>>> elem.set("123", 123)
>>> serialize(elem) # attribute value
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
"""
def bug_xmltoolkit25():
"""
typo in ElementTree.findtext
>>> elem = ET.XML(SAMPLE_XML)
>>> tree = ET.ElementTree(elem)
>>> tree.findtext("tag")
'text'
>>> tree.findtext("section/tag")
'subtext'
"""
def bug_xmltoolkit28():
"""
.//tag causes exceptions
>>> tree = ET.XML("<doc><table><tbody/></table></doc>")
>>> summarize_list(tree.findall(".//thead"))
[]
>>> summarize_list(tree.findall(".//tbody"))
['tbody']
"""
def bug_xmltoolkitX1():
"""
dump() doesn't flush the output buffer
>>> tree = ET.XML("<doc><table><tbody/></table></doc>")
>>> ET.dump(tree); print("tail")
<doc><table><tbody /></table></doc>
tail
"""
def bug_xmltoolkit39():
"""
non-ascii element and attribute names doesn't work
>>> tree = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?><t\\xe4g />")
>>> ET.tostring(tree, "utf-8")
b'<t\\xc3\\xa4g />'
>>> tree = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?><tag \\xe4ttr='välue' />")
>>> tree.attrib
{'\\xe4ttr': 'v\\xe4lue'}
>>> ET.tostring(tree, "utf-8")
b'<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />'
>>> tree = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?><t\\xe4g>text</t\\xe4g>")
>>> ET.tostring(tree, "utf-8")
b'<t\\xc3\\xa4g>text</t\\xc3\\xa4g>'
>>> tree = ET.Element("t\u00e4g")
>>> ET.tostring(tree, "utf-8")
b'<t\\xc3\\xa4g />'
>>> tree = ET.Element("tag")
>>> tree.set("\u00e4ttr", "v\u00e4lue")
>>> ET.tostring(tree, "utf-8")
b'<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />'
"""
def bug_xmltoolkit54():
"""
problems handling internally defined entities
>>> e = ET.XML("<!DOCTYPE doc [<!ENTITY ldots '舰'>]><doc>&ldots;</doc>")
>>> serialize(e, encoding="us-ascii")
b'<doc>舰</doc>'
>>> serialize(e)
'<doc>\u8230</doc>'
"""
def bug_xmltoolkit55():
"""
make sure we're reporting the first error, not the last
>>> normalize_exception(ET.XML, b"<!DOCTYPE doc SYSTEM 'doc.dtd'><doc>&ldots;&ndots;&rdots;</doc>")
Traceback (most recent call last):
ParseError: undefined entity &ldots;: line 1, column 36
"""
class ExceptionFile:
def read(self, x):
raise OSError
def xmltoolkit60():
"""
Handle crash in stream source.
>>> tree = ET.parse(ExceptionFile())
Traceback (most recent call last):
OSError
"""
XMLTOOLKIT62_DOC = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE patent-application-publication SYSTEM "pap-v15-2001-01-31.dtd" []>
<patent-application-publication>
<subdoc-abstract>
<paragraph id="A-0001" lvl="0">A new cultivar of Begonia plant named ‘BCT9801BEG’.</paragraph>
</subdoc-abstract>
</patent-application-publication>"""
def xmltoolkit62():
"""
Don't crash when using custom entities.
>>> xmltoolkit62()
'A new cultivar of Begonia plant named \u2018BCT9801BEG\u2019.'
"""
ENTITIES = {'rsquo': '\u2019', 'lsquo': '\u2018'}
parser = ET.XMLTreeBuilder()
parser.entity.update(ENTITIES)
parser.feed(XMLTOOLKIT62_DOC)
t = parser.close()
return t.find('.//paragraph').text
def xmltoolkit63():
"""
Check reference leak.
>>> xmltoolkit63()
>>> count = sys.getrefcount(None)
>>> for i in range(1000):
... xmltoolkit63()
>>> sys.getrefcount(None) - count
0
"""
tree = ET.TreeBuilder()
tree.start("tag", {})
tree.data("text")
tree.end("tag")
# --------------------------------------------------------------------
def bug_200708_newline():
r"""
Preserve newlines in attributes.
>>> e = ET.Element('SomeTag', text="def _f():\n return 3\n")
>>> ET.tostring(e)
b'<SomeTag text="def _f(): return 3 " />'
>>> ET.XML(ET.tostring(e)).get("text")
'def _f():\n return 3\n'
>>> ET.tostring(ET.XML(ET.tostring(e)))
b'<SomeTag text="def _f(): return 3 " />'
"""
def bug_200708_close():
"""
Test default builder.
>>> parser = ET.XMLParser() # default
>>> parser.feed("<element>some text</element>")
>>> summarize(parser.close())
'element'
Test custom builder.
>>> class EchoTarget:
... def close(self):
... return ET.Element("element") # simulate root
>>> parser = ET.XMLParser(EchoTarget())
>>> parser.feed("<element>some text</element>")
>>> summarize(parser.close())
'element'
"""
def bug_200709_default_namespace():
"""
>>> e = ET.Element("{default}elem")
>>> s = ET.SubElement(e, "{default}elem")
>>> serialize(e, default_namespace="default") # 1
'<elem xmlns="default"><elem /></elem>'
>>> e = ET.Element("{default}elem")
>>> s = ET.SubElement(e, "{default}elem")
>>> s = ET.SubElement(e, "{not-default}elem")
>>> serialize(e, default_namespace="default") # 2
'<elem xmlns="default" xmlns:ns1="not-default"><elem /><ns1:elem /></elem>'
>>> e = ET.Element("{default}elem")
>>> s = ET.SubElement(e, "{default}elem")
>>> s = ET.SubElement(e, "elem") # unprefixed name
>>> serialize(e, default_namespace="default") # 3
Traceback (most recent call last):
ValueError: cannot use non-qualified names with default_namespace option
"""
def bug_200709_register_namespace():
"""
>>> ET.tostring(ET.Element("{http://namespace.invalid/does/not/exist/}title"))
b'<ns0:title xmlns:ns0="http://namespace.invalid/does/not/exist/" />'
>>> ET.register_namespace("foo", "http://namespace.invalid/does/not/exist/")
>>> ET.tostring(ET.Element("{http://namespace.invalid/does/not/exist/}title"))
b'<foo:title xmlns:foo="http://namespace.invalid/does/not/exist/" />'
And the Dublin Core namespace is in the default list:
>>> ET.tostring(ET.Element("{http://purl.org/dc/elements/1.1/}title"))
b'<dc:title xmlns:dc="http://purl.org/dc/elements/1.1/" />'
"""
def bug_200709_element_comment():
"""
Not sure if this can be fixed, really (since the serializer needs
ET.Comment, not cET.comment).
>>> a = ET.Element('a')
>>> a.append(ET.Comment('foo'))
>>> a[0].tag == ET.Comment
True
>>> a = ET.Element('a')
>>> a.append(ET.PI('foo'))
>>> a[0].tag == ET.PI
True
"""
def bug_200709_element_insert():
"""
>>> a = ET.Element('a')
>>> b = ET.SubElement(a, 'b')
>>> c = ET.SubElement(a, 'c')
>>> d = ET.Element('d')
>>> a.insert(0, d)
>>> summarize_list(a)
['d', 'b', 'c']
>>> a.insert(-1, d)
>>> summarize_list(a)
['d', 'b', 'd', 'c']
"""
def bug_200709_iter_comment():
"""
>>> a = ET.Element('a')
>>> b = ET.SubElement(a, 'b')
>>> comment_b = ET.Comment("TEST-b")
>>> b.append(comment_b)
>>> summarize_list(a.iter(ET.Comment))
['<Comment>']
"""
# --------------------------------------------------------------------
# reported on bugs.python.org
def bug_1534630():
"""
>>> bob = ET.TreeBuilder()
>>> e = bob.data("data")
>>> e = bob.start("tag", {})
>>> e = bob.end("tag")
>>> e = bob.close()
>>> serialize(e)
'<tag />'
"""
def check_issue6233():
"""
>>> e = ET.XML(b"<?xml version='1.0' encoding='utf-8'?><body>t\\xc3\\xa3g</body>")
>>> ET.tostring(e, 'ascii')
b"<?xml version='1.0' encoding='ascii'?>\\n<body>tãg</body>"
>>> e = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?><body>t\\xe3g</body>")
>>> ET.tostring(e, 'ascii')
b"<?xml version='1.0' encoding='ascii'?>\\n<body>tãg</body>"
"""
def check_issue3151():
"""
>>> e = ET.XML('<prefix:localname xmlns:prefix="${stuff}"/>')
>>> e.tag
'{${stuff}}localname'
>>> t = ET.ElementTree(e)
>>> ET.tostring(e)
b'<ns0:localname xmlns:ns0="${stuff}" />'
"""
def check_issue6565():
"""
>>> elem = ET.XML("<body><tag/></body>")
>>> summarize_list(elem)
['tag']
>>> newelem = ET.XML(SAMPLE_XML)
>>> elem[:] = newelem[:]
>>> summarize_list(elem)
['tag', 'tag', 'section']
"""
def check_issue10777():
"""
Registering a namespace twice caused a "dictionary changed size during
iteration" bug.
>>> ET.register_namespace('test10777', 'http://myuri/')
>>> ET.register_namespace('test10777', 'http://myuri/')
"""
# --------------------------------------------------------------------
class BasicElementTest(unittest.TestCase):
def test_augmentation_type_errors(self):
e = ET.Element('joe')
self.assertRaises(TypeError, e.append, 'b')
self.assertRaises(TypeError, e.extend, [ET.Element('bar'), 'foo'])
self.assertRaises(TypeError, e.insert, 0, 'foo')
def test_cyclic_gc(self):
class Dummy:
pass
# Test the shortest cycle: d->element->d
d = Dummy()
d.dummyref = ET.Element('joe', attr=d)
wref = weakref.ref(d)
del d
gc_collect()
self.assertIsNone(wref())
# A longer cycle: d->e->e2->d
e = ET.Element('joe')
d = Dummy()
d.dummyref = e
wref = weakref.ref(d)
e2 = ET.SubElement(e, 'foo', attr=d)
del d, e, e2
gc_collect()
self.assertIsNone(wref())
# A cycle between Element objects as children of one another
# e1->e2->e3->e1
e1 = ET.Element('e1')
e2 = ET.Element('e2')
e3 = ET.Element('e3')
e1.append(e2)
e2.append(e2)
e3.append(e1)
wref = weakref.ref(e1)
del e1, e2, e3
gc_collect()
self.assertIsNone(wref())
def test_weakref(self):
flag = False
def wref_cb(w):
nonlocal flag
flag = True
e = ET.Element('e')
wref = weakref.ref(e, wref_cb)
self.assertEqual(wref().tag, 'e')
del e
self.assertEqual(flag, True)
self.assertEqual(wref(), None)
class ElementTreeTest(unittest.TestCase):
def test_istype(self):
self.assertIsInstance(ET.ParseError, type)
self.assertIsInstance(ET.QName, type)
self.assertIsInstance(ET.ElementTree, type)
self.assertIsInstance(ET.Element, type)
self.assertIsInstance(ET.TreeBuilder, type)
self.assertIsInstance(ET.XMLParser, type)
def test_Element_subclass_trivial(self):
class MyElement(ET.Element):
pass
mye = MyElement('foo')
self.assertIsInstance(mye, ET.Element)
self.assertIsInstance(mye, MyElement)
self.assertEqual(mye.tag, 'foo')
# test that attribute assignment works (issue 14849)
mye.text = "joe"
self.assertEqual(mye.text, "joe")
def test_Element_subclass_constructor(self):
class MyElement(ET.Element):
def __init__(self, tag, attrib={}, **extra):
super(MyElement, self).__init__(tag + '__', attrib, **extra)
mye = MyElement('foo', {'a': 1, 'b': 2}, c=3, d=4)
self.assertEqual(mye.tag, 'foo__')
self.assertEqual(sorted(mye.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4)])
def test_Element_subclass_new_method(self):
class MyElement(ET.Element):
def newmethod(self):
return self.tag
mye = MyElement('joe')
self.assertEqual(mye.newmethod(), 'joe')
class ElementIterTest(unittest.TestCase):
def _ilist(self, elem, tag=None):
return summarize_list(elem.iter(tag))
def test_basic(self):
doc = ET.XML("<html><body>this is a <i>paragraph</i>.</body>..</html>")
self.assertEqual(self._ilist(doc), ['html', 'body', 'i'])
self.assertEqual(self._ilist(doc.find('body')), ['body', 'i'])
self.assertEqual(next(doc.iter()).tag, 'html')
self.assertEqual(''.join(doc.itertext()), 'this is a paragraph...')
self.assertEqual(''.join(doc.find('body').itertext()),
'this is a paragraph.')
self.assertEqual(next(doc.itertext()), 'this is a ')
# iterparse should return an iterator
sourcefile = serialize(doc, to_string=False)
self.assertEqual(next(ET.iterparse(sourcefile))[0], 'end')
tree = ET.ElementTree(None)
self.assertRaises(AttributeError, tree.iter)
def test_corners(self):
# single root, no subelements
a = ET.Element('a')
self.assertEqual(self._ilist(a), ['a'])
# one child
b = ET.SubElement(a, 'b')
self.assertEqual(self._ilist(a), ['a', 'b'])
# one child and one grandchild
c = ET.SubElement(b, 'c')
self.assertEqual(self._ilist(a), ['a', 'b', 'c'])
# two children, only first with grandchild
d = ET.SubElement(a, 'd')
self.assertEqual(self._ilist(a), ['a', 'b', 'c', 'd'])
# replace first child by second
a[0] = a[1]
del a[1]
self.assertEqual(self._ilist(a), ['a', 'd'])
def test_iter_by_tag(self):
doc = ET.XML('''
<document>
<house>
<room>bedroom1</room>
<room>bedroom2</room>
</house>
<shed>nothing here
</shed>
<house>
<room>bedroom8</room>
</house>
</document>''')
self.assertEqual(self._ilist(doc, 'room'), ['room'] * 3)
self.assertEqual(self._ilist(doc, 'house'), ['house'] * 2)
# make sure both tag=None and tag='*' return all tags
all_tags = ['document', 'house', 'room', 'room',
'shed', 'house', 'room']
self.assertEqual(self._ilist(doc), all_tags)
self.assertEqual(self._ilist(doc, '*'), all_tags)
class TreeBuilderTest(unittest.TestCase):
sample1 = ('<!DOCTYPE html PUBLIC'
' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
'<html>text</html>')
sample2 = '''<toplevel>sometext</toplevel>'''
def test_dummy_builder(self):
class BaseDummyBuilder:
def close(self):
return 42
class DummyBuilder(BaseDummyBuilder):
data = start = end = lambda *a: None
parser = ET.XMLParser(target=DummyBuilder())
parser.feed(self.sample1)
self.assertEqual(parser.close(), 42)
parser = ET.XMLParser(target=BaseDummyBuilder())
parser.feed(self.sample1)
self.assertEqual(parser.close(), 42)
parser = ET.XMLParser(target=object())
parser.feed(self.sample1)
self.assertIsNone(parser.close())
def test_subclass(self):
class MyTreeBuilder(ET.TreeBuilder):
def foobar(self, x):
return x * 2
tb = MyTreeBuilder()
self.assertEqual(tb.foobar(10), 20)
parser = ET.XMLParser(target=tb)
parser.feed(self.sample1)
e = parser.close()
self.assertEqual(e.tag, 'html')
def test_element_factory(self):
lst = []
def myfactory(tag, attrib):
nonlocal lst
lst.append(tag)
return ET.Element(tag, attrib)
tb = ET.TreeBuilder(element_factory=myfactory)
parser = ET.XMLParser(target=tb)
parser.feed(self.sample2)
parser.close()
self.assertEqual(lst, ['toplevel'])
def test_doctype(self):
class DoctypeParser:
_doctype = None
def doctype(self, name, pubid, system):
self._doctype = (name, pubid, system)
def close(self):
return self._doctype
parser = ET.XMLParser(target=DoctypeParser())
parser.feed(self.sample1)
self.assertEqual(parser.close(),
('html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'))
class XincludeTest(unittest.TestCase):
def _my_loader(self, href, parse):
# Used to avoid a test-dependency problem where the default loader
# of ElementInclude uses the pyET parser for cET tests.
if parse == 'xml':
with open(href, 'rb') as f:
return ET.parse(f).getroot()
else:
return None
def test_xinclude_default(self):
from xml.etree import ElementInclude
doc = xinclude_loader('default.xml')
ElementInclude.include(doc, self._my_loader)
s = serialize(doc)
self.assertEqual(s.strip(), '''<document>
<p>Example.</p>
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
</document>''')
class XMLParserTest(unittest.TestCase):
sample1 = '<file><line>22</line></file>'
sample2 = ('<!DOCTYPE html PUBLIC'
' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
'<html>text</html>')
def _check_sample_element(self, e):
self.assertEqual(e.tag, 'file')
self.assertEqual(e[0].tag, 'line')
self.assertEqual(e[0].text, '22')
def test_constructor_args(self):
# Positional args. The first (html) is not supported, but should be
# nevertheless correctly accepted.
parser = ET.XMLParser(None, ET.TreeBuilder(), 'utf-8')
parser.feed(self.sample1)
self._check_sample_element(parser.close())
# Now as keyword args.
parser2 = ET.XMLParser(encoding='utf-8', html=[{}], target=ET.TreeBuilder())
parser2.feed(self.sample1)
self._check_sample_element(parser2.close())
def test_subclass(self):
class MyParser(ET.XMLParser):
pass
parser = MyParser()
parser.feed(self.sample1)
self._check_sample_element(parser.close())
def test_subclass_doctype(self):
_doctype = None
class MyParserWithDoctype(ET.XMLParser):
def doctype(self, name, pubid, system):
nonlocal _doctype
_doctype = (name, pubid, system)
parser = MyParserWithDoctype()
parser.feed(self.sample2)
parser.close()
self.assertEqual(_doctype,
('html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'))
class NamespaceParseTest(unittest.TestCase):
def test_find_with_namespace(self):
nsmap = {'h': 'hello', 'f': 'foo'}
doc = ET.fromstring(SAMPLE_XML_NS_ELEMS)
self.assertEqual(len(doc.findall('{hello}table', nsmap)), 1)
self.assertEqual(len(doc.findall('.//{hello}td', nsmap)), 2)
self.assertEqual(len(doc.findall('.//{foo}name', nsmap)), 1)
class ElementSlicingTest(unittest.TestCase):
def _elem_tags(self, elemlist):
return [e.tag for e in elemlist]
def _subelem_tags(self, elem):
return self._elem_tags(list(elem))
def _make_elem_with_children(self, numchildren):
"""Create an Element with a tag 'a', with the given amount of children
named 'a0', 'a1' ... and so on.
"""
e = ET.Element('a')
for i in range(numchildren):
ET.SubElement(e, 'a%s' % i)
return e
def test_getslice_single_index(self):
e = self._make_elem_with_children(10)
self.assertEqual(e[1].tag, 'a1')
self.assertEqual(e[-2].tag, 'a8')
self.assertRaises(IndexError, lambda: e[12])
def test_getslice_range(self):
e = self._make_elem_with_children(6)
self.assertEqual(self._elem_tags(e[3:]), ['a3', 'a4', 'a5'])
self.assertEqual(self._elem_tags(e[3:6]), ['a3', 'a4', 'a5'])
self.assertEqual(self._elem_tags(e[3:16]), ['a3', 'a4', 'a5'])
self.assertEqual(self._elem_tags(e[3:5]), ['a3', 'a4'])
self.assertEqual(self._elem_tags(e[3:-1]), ['a3', 'a4'])
self.assertEqual(self._elem_tags(e[:2]), ['a0', 'a1'])
def test_getslice_steps(self):
e = self._make_elem_with_children(10)
self.assertEqual(self._elem_tags(e[8:10:1]), ['a8', 'a9'])
self.assertEqual(self._elem_tags(e[::3]), ['a0', 'a3', 'a6', 'a9'])
self.assertEqual(self._elem_tags(e[::8]), ['a0', 'a8'])
self.assertEqual(self._elem_tags(e[1::8]), ['a1', 'a9'])
def test_getslice_negative_steps(self):
e = self._make_elem_with_children(4)
self.assertEqual(self._elem_tags(e[::-1]), ['a3', 'a2', 'a1', 'a0'])
self.assertEqual(self._elem_tags(e[::-2]), ['a3', 'a1'])
def test_delslice(self):
e = self._make_elem_with_children(4)
del e[0:2]
self.assertEqual(self._subelem_tags(e), ['a2', 'a3'])
e = self._make_elem_with_children(4)
del e[0:]
self.assertEqual(self._subelem_tags(e), [])
e = self._make_elem_with_children(4)
del e[::-1]
self.assertEqual(self._subelem_tags(e), [])
e = self._make_elem_with_children(4)
del e[::-2]
self.assertEqual(self._subelem_tags(e), ['a0', 'a2'])
e = self._make_elem_with_children(4)
del e[1::2]
self.assertEqual(self._subelem_tags(e), ['a0', 'a2'])
e = self._make_elem_with_children(2)
del e[::2]
self.assertEqual(self._subelem_tags(e), ['a1'])
class IOTest(unittest.TestCase):
def tearDown(self):
unlink(TESTFN)
def test_encoding(self):
# Test encoding issues.
elem = ET.Element("tag")
elem.text = "abc"
self.assertEqual(serialize(elem), '<tag>abc</tag>')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag>abc</tag>')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag>abc</tag>')
for enc in ("iso-8859-1", "utf-16", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag>abc</tag>" % enc).encode(enc))
elem = ET.Element("tag")
elem.text = "<&\"\'>"
self.assertEqual(serialize(elem), '<tag><&"\'></tag>')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag><&"\'></tag>')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag><&"\'></tag>')
for enc in ("iso-8859-1", "utf-16", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag><&\"'></tag>" % enc).encode(enc))
elem = ET.Element("tag")
elem.attrib["key"] = "<&\"\'>"
self.assertEqual(serialize(elem), '<tag key="<&"\'>" />')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag key="<&"\'>" />')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag key="<&"\'>" />')
for enc in ("iso-8859-1", "utf-16", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag key=\"<&"'>\" />" % enc).encode(enc))
elem = ET.Element("tag")
elem.text = '\xe5\xf6\xf6<>'
self.assertEqual(serialize(elem), '<tag>\xe5\xf6\xf6<></tag>')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag>\xc3\xa5\xc3\xb6\xc3\xb6<></tag>')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag>åöö<></tag>')
for enc in ("iso-8859-1", "utf-16", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag>åöö<></tag>" % enc).encode(enc))
elem = ET.Element("tag")
elem.attrib["key"] = '\xe5\xf6\xf6<>'
self.assertEqual(serialize(elem), '<tag key="\xe5\xf6\xf6<>" />')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag key="\xc3\xa5\xc3\xb6\xc3\xb6<>" />')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag key="åöö<>" />')
for enc in ("iso-8859-1", "utf-16", "utf-16le", "utf-16be", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag key=\"åöö<>\" />" % enc).encode(enc))
def test_write_to_filename(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
tree.write(TESTFN)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(), b'''<site />''')
def test_write_to_text_file(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
with open(TESTFN, 'w', encoding='utf-8') as f:
tree.write(f, encoding='unicode')
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(), b'''<site />''')
def test_write_to_binary_file(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
with open(TESTFN, 'wb') as f:
tree.write(f)
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(), b'''<site />''')
def test_write_to_binary_file_with_bom(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
# test BOM writing to buffered file
with open(TESTFN, 'wb') as f:
tree.write(f, encoding='utf-16')
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(),
'''<?xml version='1.0' encoding='utf-16'?>\n'''
'''<site />'''.encode("utf-16"))
# test BOM writing to non-buffered file
with open(TESTFN, 'wb', buffering=0) as f:
tree.write(f, encoding='utf-16')
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(),
'''<?xml version='1.0' encoding='utf-16'?>\n'''
'''<site />'''.encode("utf-16"))
def test_read_from_stringio(self):
tree = ET.ElementTree()
stream = io.StringIO('''<?xml version="1.0"?><site></site>''')
tree.parse(stream)
self.assertEqual(tree.getroot().tag, 'site')
def test_write_to_stringio(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
stream = io.StringIO()
tree.write(stream, encoding='unicode')
self.assertEqual(stream.getvalue(), '''<site />''')
def test_read_from_bytesio(self):
tree = ET.ElementTree()
raw = io.BytesIO(b'''<?xml version="1.0"?><site></site>''')
tree.parse(raw)
self.assertEqual(tree.getroot().tag, 'site')
def test_write_to_bytesio(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
raw = io.BytesIO()
tree.write(raw)
self.assertEqual(raw.getvalue(), b'''<site />''')
class dummy:
pass
def test_read_from_user_text_reader(self):
stream = io.StringIO('''<?xml version="1.0"?><site></site>''')
reader = self.dummy()
reader.read = stream.read
tree = ET.ElementTree()
tree.parse(reader)
self.assertEqual(tree.getroot().tag, 'site')
def test_write_to_user_text_writer(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
stream = io.StringIO()
writer = self.dummy()
writer.write = stream.write
tree.write(writer, encoding='unicode')
self.assertEqual(stream.getvalue(), '''<site />''')
def test_read_from_user_binary_reader(self):
raw = io.BytesIO(b'''<?xml version="1.0"?><site></site>''')
reader = self.dummy()
reader.read = raw.read
tree = ET.ElementTree()
tree.parse(reader)
self.assertEqual(tree.getroot().tag, 'site')
tree = ET.ElementTree()
def test_write_to_user_binary_writer(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
raw = io.BytesIO()
writer = self.dummy()
writer.write = raw.write
tree.write(writer)
self.assertEqual(raw.getvalue(), b'''<site />''')
def test_write_to_user_binary_writer_with_bom(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
raw = io.BytesIO()
writer = self.dummy()
writer.write = raw.write
writer.seekable = lambda: True
writer.tell = raw.tell
tree.write(writer, encoding="utf-16")
self.assertEqual(raw.getvalue(),
'''<?xml version='1.0' encoding='utf-16'?>\n'''
'''<site />'''.encode("utf-16"))
def test_tostringlist_invariant(self):
root = ET.fromstring('<tag>foo</tag>')
self.assertEqual(
ET.tostring(root, 'unicode'),
''.join(ET.tostringlist(root, 'unicode')))
self.assertEqual(
ET.tostring(root, 'utf-16'),
b''.join(ET.tostringlist(root, 'utf-16')))
class ParseErrorTest(unittest.TestCase):
def test_subclass(self):
self.assertIsInstance(ET.ParseError(), SyntaxError)
def _get_error(self, s):
try:
ET.fromstring(s)
except ET.ParseError as e:
return e
def test_error_position(self):
self.assertEqual(self._get_error('foo').position, (1, 0))
self.assertEqual(self._get_error('<tag>&foo;</tag>').position, (1, 5))
self.assertEqual(self._get_error('foobar<').position, (1, 6))
def test_error_code(self):
import xml.parsers.expat.errors as ERRORS
self.assertEqual(self._get_error('foo').code,
ERRORS.codes[ERRORS.XML_ERROR_SYNTAX])
class KeywordArgsTest(unittest.TestCase):
# Test various issues with keyword arguments passed to ET.Element
# constructor and methods
def test_issue14818(self):
x = ET.XML("<a>foo</a>")
self.assertEqual(x.find('a', None),
x.find(path='a', namespaces=None))
self.assertEqual(x.findtext('a', None, None),
x.findtext(path='a', default=None, namespaces=None))
self.assertEqual(x.findall('a', None),
x.findall(path='a', namespaces=None))
self.assertEqual(list(x.iterfind('a', None)),
list(x.iterfind(path='a', namespaces=None)))
self.assertEqual(ET.Element('a').attrib, {})
elements = [
ET.Element('a', dict(href="#", id="foo")),
ET.Element('a', attrib=dict(href="#", id="foo")),
ET.Element('a', dict(href="#"), id="foo"),
ET.Element('a', href="#", id="foo"),
ET.Element('a', dict(href="#", id="foo"), href="#", id="foo"),
]
for e in elements:
self.assertEqual(e.tag, 'a')
self.assertEqual(e.attrib, dict(href="#", id="foo"))
e2 = ET.SubElement(elements[0], 'foobar', attrib={'key1': 'value1'})
self.assertEqual(e2.attrib['key1'], 'value1')
with self.assertRaisesRegex(TypeError, 'must be dict, not str'):
ET.Element('a', "I'm not a dict")
with self.assertRaisesRegex(TypeError, 'must be dict, not str'):
ET.Element('a', attrib="I'm not a dict")
# --------------------------------------------------------------------
@unittest.skipUnless(pyET, 'only for the Python version')
class NoAcceleratorTest(unittest.TestCase):
# Test that the C accelerator was not imported for pyET
def test_correct_import_pyET(self):
self.assertEqual(pyET.Element.__module__, 'xml.etree.ElementTree')
self.assertEqual(pyET.SubElement.__module__, 'xml.etree.ElementTree')
# --------------------------------------------------------------------
class CleanContext(object):
"""Provide default namespace mapping and path cache."""
checkwarnings = None
def __init__(self, quiet=False):
if sys.flags.optimize >= 2:
# under -OO, doctests cannot be run and therefore not all warnings
# will be emitted
quiet = True
deprecations = (
# Search behaviour is broken if search path starts with "/".
("This search is broken in 1.3 and earlier, and will be fixed "
"in a future version. If you rely on the current behaviour, "
"change it to '.+'", FutureWarning),
# Element.getchildren() and Element.getiterator() are deprecated.
("This method will be removed in future versions. "
"Use .+ instead.", DeprecationWarning),
("This method will be removed in future versions. "
"Use .+ instead.", PendingDeprecationWarning))
self.checkwarnings = support.check_warnings(*deprecations, quiet=quiet)
def __enter__(self):
from xml.etree import ElementPath
self._nsmap = ET.register_namespace._namespace_map
# Copy the default namespace mapping
self._nsmap_copy = self._nsmap.copy()
# Copy the path cache (should be empty)
self._path_cache = ElementPath._cache
ElementPath._cache = self._path_cache.copy()
self.checkwarnings.__enter__()
def __exit__(self, *args):
from xml.etree import ElementPath
# Restore mapping and path cache
self._nsmap.clear()
self._nsmap.update(self._nsmap_copy)
ElementPath._cache = self._path_cache
self.checkwarnings.__exit__(*args)
def test_main(module=None):
# When invoked without a module, runs the Python ET tests by loading pyET.
# Otherwise, uses the given module as the ET.
if module is None:
global pyET
pyET = import_fresh_module('xml.etree.ElementTree',
blocked=['_elementtree'])
module = pyET
global ET
ET = module
test_classes = [
ElementSlicingTest,
BasicElementTest,
IOTest,
ParseErrorTest,
XincludeTest,
ElementTreeTest,
ElementIterTest,
TreeBuilderTest,
]
# These tests will only run for the pure-Python version that doesn't import
# _elementtree. We can't use skipUnless here, because pyET is filled in only
# after the module is loaded.
if pyET:
test_classes.extend([
NoAcceleratorTest,
])
try:
support.run_unittest(*test_classes)
# XXX the C module should give the same warnings as the Python module
with CleanContext(quiet=(module is not pyET)):
support.run_doctest(sys.modules[__name__], verbosity=True)
finally:
# don't interfere with subsequent tests
ET = pyET = None
if __name__ == '__main__':
test_main()
| {
"content_hash": "45e6af3d240f54e7266ef5de1cf14591",
"timestamp": "",
"source": "github",
"line_count": 2449,
"max_line_length": 137,
"avg_line_length": 30.14169048591262,
"alnum_prop": 0.560859964506821,
"repo_name": "MalloyPower/parsing-python",
"id": "da1ad0968686edcc9f11c65ff763ccd23f140924",
"size": "73823",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.3.0/Lib/test/test_xml_etree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
from cc_cedict import phrases_dict
class Counter:
def __init__(self):
self._data = {}
def add(self, item):
if item not in self._data:
self._data[item] = 0
self._data[item] += 1
def most_common(self):
items = self._data.items()
return sorted(items, key=lambda x: x[1], reverse=True)
def han_to_code(han):
return 'U+' + hex(ord(han))[2:].upper()
def main():
# { 'han': pinyin_counter }
han_counter = {}
for hans, pinyin_list in phrases_dict.items():
if len(hans) != len(pinyin_list):
continue
for i, han in enumerate(hans):
pinyins = pinyin_list[i]
for pinyin in pinyins:
if han not in han_counter:
han_counter[han] = Counter()
counter = han_counter[han]
counter.add(pinyin)
for han, counter in sorted(han_counter.items(), key=lambda x: ord(x[0])):
code = han_to_code(han)
pinyin = ','.join([x[0] for x in counter.most_common()])
if pinyin in ['xx']:
continue
print('{0}: {1} # {2}'.format(code, pinyin, han))
if __name__ == '__main__':
main()
| {
"content_hash": "63962446c033730d8351e8952845c9f2",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 77,
"avg_line_length": 26.8,
"alnum_prop": 0.5190713101160862,
"repo_name": "mozillazg/pinyin-data",
"id": "5df3d97102cd2a87ca7501472765545cf9eee631",
"size": "1230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/gen_cc_cedict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "902"
},
{
"name": "Python",
"bytes": "18391"
},
{
"name": "Shell",
"bytes": "421"
}
],
"symlink_target": ""
} |
from patterns.pattern_class import PatternClass
from exceptions import NotRegisteredPatternException
class PatternRepository(dict):
def add(self, pattern):
self[pattern.name] = pattern
return self
def find(self, pattern_name):
pattern = self.get(pattern_name)
if not pattern:
raise NotRegisteredPatternException(pattern_name)
return pattern
def extract(self, pattern_name):
pattern = self.find(pattern_name)
return pattern.extract
def extract_all(self, pattern_name):
pattern = self.find(pattern_name)
return pattern.extract_all
pattern_repository = PatternRepository()
pattern_repository.add(PatternClass())
| {
"content_hash": "b6d6fa92c93994bc518dabbc85d220f0",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 61,
"avg_line_length": 27.53846153846154,
"alnum_prop": 0.6941340782122905,
"repo_name": "harukaeru/SimplePythonParser",
"id": "bee8c983f1d782823d8cfea3d6affe6a7f65fef7",
"size": "716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "repository.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3289"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import wx
class Base(object):
def get_choices(self, partial_text):
raise NotImplementedError
def get_display_text(self, data):
raise NotImplementedError
class Model(Base):
def __init__(self, model=None, field=None, query=None, limit=None):
if model is None and field is not None and hasattr(field, 'class_'):
model = field.class_
if not query:
if model is None:
raise Exception('No query provided and could not derive on as '
'the model was missing.')
query = wx.GetApp().session.query(model)
self.field = field
self.query = query
self.limit = limit
def get_choices(self, partial_text=None):
if partial_text is not None and not partial_text:
return []
query = self.query
if callable(query):
query = query(partial_text)
if self.field is not None:
query = query.filter(self.field.like('%%%s%%' % partial_text))
if self.limit:
query = query[0:self.limit]
return [(data, self.get_display_text(data)) for data in query]
def get_display_text(self, data):
return unicode(data or '')
| {
"content_hash": "7a96dffbf8ddb0d843d43fedaf755027",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 79,
"avg_line_length": 32.175,
"alnum_prop": 0.5843045843045843,
"repo_name": "Bouke/mvvm",
"id": "f4111c683f192b5d4450ca74d86590cbca77f391",
"size": "1287",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "mvvm/viewmodel/choice_provider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63877"
}
],
"symlink_target": ""
} |
from congress.api import versions
from congress.api import webservice
class APIRouterV1(object):
def __init__(self, resource_mgr, process_dict):
"""Bootstrap data models and handlers for the API definition."""
# Setup /v1/
version_v1_handler = versions.VersionV1Handler(r'/v1[/]?')
resource_mgr.register_handler(version_v1_handler)
policies = process_dict['api-policy']
resource_mgr.register_model('policies', policies)
policy_collection_handler = webservice.CollectionHandler(
r'/v1/policies',
policies)
resource_mgr.register_handler(policy_collection_handler)
policy_path = r'/v1/policies/(?P<policy_id>[^/]+)'
policy_element_handler = webservice.ElementHandler(
policy_path,
policies,
policy_collection_handler,
allow_update=False,
allow_replace=False)
resource_mgr.register_handler(policy_element_handler)
policy_rules = process_dict['api-rule']
resource_mgr.register_model('rules', policy_rules)
rule_collection_handler = webservice.CollectionHandler(
r'/v1/policies/(?P<policy_id>[^/]+)/rules',
policy_rules,
"{policy_id}")
resource_mgr.register_handler(rule_collection_handler)
rule_path = (r'/v1/policies/(?P<policy_id>[^/]+)' +
r'/rules/(?P<rule_id>[^/]+)')
rule_element_handler = webservice.ElementHandler(
rule_path,
policy_rules,
"{policy_id}")
resource_mgr.register_handler(rule_element_handler)
# Setup /v1/data-sources
data_sources = process_dict['api-datasource']
resource_mgr.register_model('data_sources', data_sources)
ds_collection_handler = webservice.CollectionHandler(
r'/v1/data-sources',
data_sources)
resource_mgr.register_handler(ds_collection_handler)
# Setup /v1/data-sources/<ds_id>
ds_path = r'/v1/data-sources/(?P<ds_id>[^/]+)'
ds_element_handler = webservice.ElementHandler(ds_path, data_sources)
resource_mgr.register_handler(ds_element_handler)
# Setup /v1/data-sources/<ds_id>/schema
schema = process_dict['api-schema']
schema_path = "%s/schema" % ds_path
schema_element_handler = webservice.ElementHandler(schema_path, schema)
resource_mgr.register_handler(schema_element_handler)
# Setup /v1/data-sources/<ds_id>/tables/<table_id>/spec
table_schema_path = "%s/tables/(?P<table_id>[^/]+)/spec" % ds_path
table_schema_element_handler = webservice.ElementHandler(
table_schema_path,
schema)
resource_mgr.register_handler(table_schema_element_handler)
# Setup action handlers
actions = process_dict['api-action']
ds_actions_path = "%s/actions" % ds_path
ds_actions_collection_handler = webservice.CollectionHandler(
ds_actions_path, actions)
resource_mgr.register_handler(ds_actions_collection_handler)
# Setup status handlers
statuses = process_dict['api-status']
ds_status_path = "%s/status" % ds_path
ds_status_element_handler = webservice.ElementHandler(ds_status_path,
statuses)
resource_mgr.register_handler(ds_status_element_handler)
policy_status_path = "%s/status" % policy_path
policy_status_element_handler = webservice.ElementHandler(
policy_status_path,
statuses)
resource_mgr.register_handler(policy_status_element_handler)
rule_status_path = "%s/status" % rule_path
rule_status_element_handler = webservice.ElementHandler(
rule_status_path,
statuses)
resource_mgr.register_handler(rule_status_element_handler)
tables = process_dict['api-table']
resource_mgr.register_model('tables', tables)
tables_path = "(%s|%s)/tables" % (ds_path, policy_path)
table_collection_handler = webservice.CollectionHandler(
tables_path,
tables)
resource_mgr.register_handler(table_collection_handler)
table_path = "%s/(?P<table_id>[^/]+)" % tables_path
table_element_handler = webservice.ElementHandler(table_path, tables)
resource_mgr.register_handler(table_element_handler)
table_rows = process_dict['api-row']
resource_mgr.register_model('table_rows', table_rows)
rows_path = "%s/rows" % table_path
row_collection_handler = webservice.CollectionHandler(
rows_path,
table_rows)
resource_mgr.register_handler(row_collection_handler)
row_path = "%s/(?P<row_id>[^/]+)" % rows_path
row_element_handler = webservice.ElementHandler(row_path, table_rows)
resource_mgr.register_handler(row_element_handler)
# Setup /v1/system/datasource-drivers
system = process_dict['api-system']
resource_mgr.register_model('system', system)
# NOTE(arosen): start url out with datasource-drivers since we don't
# yet implement /v1/system/ yet.
system_collection_handler = webservice.CollectionHandler(
r'/v1/system/drivers',
system)
resource_mgr.register_handler(system_collection_handler)
# Setup /v1/system/datasource-drivers/<driver_id>
driver_path = r'/v1/system/drivers/(?P<driver_id>[^/]+)'
driver_element_handler = webservice.ElementHandler(driver_path, system)
resource_mgr.register_handler(driver_element_handler)
| {
"content_hash": "d0dab0ce20fa8315f3cc11ef8b04fac1",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 79,
"avg_line_length": 43.86923076923077,
"alnum_prop": 0.6247588988251798,
"repo_name": "ekcs/congress",
"id": "9978304c51ace45ec5e9c3c73392a18f85bda816",
"size": "6322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "congress/api/router.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2744"
},
{
"name": "GAP",
"bytes": "7778"
},
{
"name": "HTML",
"bytes": "19644"
},
{
"name": "JavaScript",
"bytes": "9896"
},
{
"name": "Makefile",
"bytes": "503"
},
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "1874341"
},
{
"name": "Shell",
"bytes": "8824"
}
],
"symlink_target": ""
} |
import pytest
import vcr
from django.core.management import call_command
from elvanto_sync import elvanto
from elvanto_sync.models import ElvantoGroup, ElvantoPerson
from elvanto_sync.tests.conftest import elvanto_vcr
@pytest.mark.django_db
class TestElvanto():
@elvanto_vcr
def test_pull_groups(self):
elvanto.pull_groups()
grp = ElvantoGroup.objects.get(e_id='7ebd2605-d3c7-11e4-95ba-068b656294b7')
assert str(grp) == 'All'
@elvanto_vcr
def test_pull_people(self):
elvanto.pull_people()
calvin = ElvantoPerson.objects.get(e_id='f7cfa258-d3c6-11e4-95ba-068b656294b7')
assert str(calvin) == 'John Calvin'
assert calvin.email == 'john.calvin@geneva.com'
chalmers = ElvantoPerson.objects.get(e_id='5a0a1cbc-d3c7-11e4-95ba-068b656294b7')
assert str(chalmers) == 'Thomas Chalmers'
assert chalmers.email == 'thomas.chalmers@edinburgh.com'
knox = ElvantoPerson.objects.get(e_id='c1136264-d3c7-11e4-95ba-068b656294b7')
assert str(knox) == 'John Knox'
assert knox.email == ''
owen = ElvantoPerson.objects.get(e_id='48366137-d3c7-11e4-95ba-068b656294b7')
assert str(owen) == 'John Owen'
assert owen.email == 'john.owen@cambridge.com'
@elvanto_vcr
def test_pull_groups(self):
elvanto.pull_people()
elvanto.pull_groups()
assert ElvantoGroup.objects.count() == 5
grp_all = ElvantoGroup.objects.get(e_id='7ebd2605-d3c7-11e4-95ba-068b656294b7')
e_emails = grp_all.elvanto_emails()
assert 'john.calvin@geneva.com' in e_emails
assert 'john.owen@cambridge.com' in e_emails
assert 'thomas.chalmers@edinburgh.com' in e_emails
assert grp_all.group_members.count() == 3
@elvanto_vcr
def test_refresh_data(self):
elvanto.refresh_elvanto_data()
@elvanto_vcr
def test_refresh_pull_management_command(self):
call_command('pull_from_elvanto')
@elvanto_vcr
def test_delete_old_groups(self):
elvanto.refresh_elvanto_data()
assert ElvantoGroup.objects.count() == 5
assert ElvantoPerson.objects.count() == 11
# construct synthetic elvanto data:
data = {
'groups': {
'group': [{
'id': '7ebd2605-d3c7-11e4-95ba-068b656294b7',
}]
}
}
elvanto.delete_missing_groups(data)
# check:
assert ElvantoGroup.objects.count() == 1
assert ElvantoPerson.objects.count() == 11
| {
"content_hash": "cbfdbb7c39002a4f28a7da870f0469e8",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 89,
"avg_line_length": 36.04225352112676,
"alnum_prop": 0.6381398983978116,
"repo_name": "monty5811/elvanto_mail_sync",
"id": "61eb4ab0769640f548be51ff7b8e900ba61179cc",
"size": "2559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elvanto_sync/tests/test_elvanto.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Elm",
"bytes": "30814"
},
{
"name": "HTML",
"bytes": "1638"
},
{
"name": "JavaScript",
"bytes": "1881"
},
{
"name": "Python",
"bytes": "48461"
},
{
"name": "Shell",
"bytes": "419"
}
],
"symlink_target": ""
} |
from decimal import Decimal
_ = lambda x:x
#from i18n import _
from electrum_cesc.wallet import WalletStorage, Wallet
from electrum_cesc.util import format_satoshis, set_verbosity, StoreDict
from electrum_cesc.bitcoin import is_valid, COIN, TYPE_ADDRESS
from electrum_cesc.network import filter_protocol
import sys, getpass, datetime
# minimal fdisk like gui for console usage
# written by rofl0r, with some bits stolen from the text gui (ncurses)
class ElectrumGui:
def __init__(self, config, daemon, plugins):
self.config = config
self.network = daemon.network
storage = WalletStorage(config.get_wallet_path())
if not storage.file_exists:
print "Wallet not found. try 'electrum-cesc create'"
exit()
self.done = 0
self.last_balance = ""
set_verbosity(False)
self.str_recipient = ""
self.str_description = ""
self.str_amount = ""
self.str_fee = ""
self.wallet = Wallet(storage)
self.wallet.start_threads(self.network)
self.contacts = StoreDict(self.config, 'contacts')
self.network.register_callback(self.on_network, ['updated', 'banner'])
self.commands = [_("[h] - displays this help text"), \
_("[i] - display transaction history"), \
_("[o] - enter payment order"), \
_("[p] - print stored payment order"), \
_("[s] - send stored payment order"), \
_("[r] - show own receipt addresses"), \
_("[c] - display contacts"), \
_("[b] - print server banner"), \
_("[q] - quit") ]
self.num_commands = len(self.commands)
def on_network(self, event, *args):
if event == 'updated':
self.updated()
elif event == 'banner':
self.print_banner()
def main_command(self):
self.print_balance()
c = raw_input("enter command: ")
if c == "h" : self.print_commands()
elif c == "i" : self.print_history()
elif c == "o" : self.enter_order()
elif c == "p" : self.print_order()
elif c == "s" : self.send_order()
elif c == "r" : self.print_addresses()
elif c == "c" : self.print_contacts()
elif c == "b" : self.print_banner()
elif c == "n" : self.network_dialog()
elif c == "e" : self.settings_dialog()
elif c == "q" : self.done = 1
else: self.print_commands()
def updated(self):
s = self.get_balance()
if s != self.last_balance:
print(s)
self.last_balance = s
return True
def print_commands(self):
self.print_list(self.commands, "Available commands")
def print_history(self):
width = [20, 40, 14, 14]
delta = (80 - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%" \
+ "%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
b = 0
messages = []
for item in self.wallet.get_history():
tx_hash, confirmations, value, timestamp, balance = item
if confirmations:
try:
time_str = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
except Exception:
time_str = "unknown"
else:
time_str = 'unconfirmed'
label = self.wallet.get_label(tx_hash)
messages.append( format_str%( time_str, label, format_satoshis(value, whitespaces=True), format_satoshis(balance, whitespaces=True) ) )
self.print_list(messages[::-1], format_str%( _("Date"), _("Description"), _("Amount"), _("Balance")))
def print_balance(self):
print(self.get_balance())
def get_balance(self):
if self.wallet.network.is_connected():
if not self.wallet.up_to_date:
msg = _( "Synchronizing..." )
else:
c, u, x = self.wallet.get_balance()
msg = _("Balance")+": %f "%(Decimal(c) / COIN)
if u:
msg += " [%f unconfirmed]"%(Decimal(u) / COIN)
if x:
msg += " [%f unmatured]"%(Decimal(x) / COIN)
else:
msg = _( "Not connected" )
return(msg)
def print_contacts(self):
messages = map(lambda x: "%20s %45s "%(x[0], x[1][1]), self.contacts.items())
self.print_list(messages, "%19s %25s "%("Key", "Value"))
def print_addresses(self):
messages = map(lambda addr: "%30s %30s "%(addr, self.wallet.labels.get(addr,"")), self.wallet.addresses())
self.print_list(messages, "%19s %25s "%("Address", "Label"))
def print_order(self):
print("send order to " + self.str_recipient + ", amount: " + self.str_amount \
+ "\nfee: " + self.str_fee + ", desc: " + self.str_description)
def enter_order(self):
self.str_recipient = raw_input("Pay to: ")
self.str_description = raw_input("Description : ")
self.str_amount = raw_input("Amount: ")
self.str_fee = raw_input("Fee: ")
def send_order(self):
self.do_send()
def print_banner(self):
for i, x in enumerate( self.wallet.network.banner.split('\n') ):
print( x )
def print_list(self, list, firstline):
self.maxpos = len(list)
if not self.maxpos: return
print(firstline)
for i in range(self.maxpos):
msg = list[i] if i < len(list) else ""
print(msg)
def main(self):
while self.done == 0: self.main_command()
def do_send(self):
if not is_valid(self.str_recipient):
print(_('Invalid Cryptoescudo address'))
return
try:
amount = int(Decimal(self.str_amount) * COIN)
except Exception:
print(_('Invalid Amount'))
return
try:
fee = int(Decimal(self.str_fee) * COIN)
except Exception:
print(_('Invalid Fee'))
return
if self.wallet.use_encryption:
password = self.password_dialog()
if not password:
return
else:
password = None
c = ""
while c != "y":
c = raw_input("ok to send (y/n)?")
if c == "n": return
try:
tx = self.wallet.mktx([(TYPE_ADDRESS, self.str_recipient, amount)], password, self.config, fee)
except Exception as e:
print(str(e))
return
if self.str_description:
self.wallet.labels[tx.hash()] = self.str_description
print(_("Please wait..."))
status, msg = self.network.broadcast(tx)
if status:
print(_('Payment sent.'))
#self.do_clear()
#self.update_contacts_tab()
else:
print(_('Error'))
def network_dialog(self):
print("use 'electrum-cesc setconfig server/proxy' to change your network settings")
return True
def settings_dialog(self):
print("use 'electrum-cesc setconfig' to change your settings")
return True
def password_dialog(self):
return getpass.getpass()
# XXX unused
def run_receive_tab(self, c):
#if c == 10:
# out = self.run_popup('Address', ["Edit label", "Freeze", "Prioritize"])
return
def run_contacts_tab(self, c):
pass
| {
"content_hash": "c2121daf3814eb587da9bd86d61dc2af",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 147,
"avg_line_length": 33.23580786026201,
"alnum_prop": 0.5255551175929576,
"repo_name": "Marcdnd/electrum-cesc",
"id": "e0e7cc9de9ccb978483a46d221073831c7f90c1b",
"size": "7611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gui/stdio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3536"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "3354"
},
{
"name": "Makefile",
"bytes": "849"
},
{
"name": "NSIS",
"bytes": "6970"
},
{
"name": "PHP",
"bytes": "404"
},
{
"name": "Protocol Buffer",
"bytes": "2354"
},
{
"name": "Python",
"bytes": "2163404"
},
{
"name": "Shell",
"bytes": "7908"
}
],
"symlink_target": ""
} |
from uuid import uuid4
import plistlib
from flask import current_app, render_template, abort, Blueprint, make_response, url_for, request, g
import os
from commandment.enroll import AllDeviceAttributes
from commandment.enroll.profiles import ca_trust_payload_from_configuration, scep_payload_from_configuration
from commandment.profiles.models import MDMPayload, Profile, PEMCertificatePayload, DERCertificatePayload, SCEPPayload
from commandment.profiles import PROFILE_CONTENT_TYPE, plist_schema as profile_schema, PayloadScope
from commandment.models import db, Organization, SCEPConfig
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from commandment.plistutil.nonewriter import dumps as dumps_none
from commandment.enroll.util import generate_enroll_profile
from commandment.cms.decorators import verify_cms_signers
enroll_app = Blueprint('enroll_app', __name__)
@enroll_app.route('/')
def index():
"""Show the enrollment page"""
return render_template('enroll.html')
def base64_to_pem(crypto_type, b64_text, width=76):
lines = ''
for pos in range(0, len(b64_text), width):
lines += b64_text[pos:pos + width] + '\n'
return '-----BEGIN %s-----\n%s-----END %s-----' % (crypto_type, lines, crypto_type)
@enroll_app.route('/trust.mobileconfig', methods=['GET'])
def trust_mobileconfig():
"""Generate a trust profile, if one is required.
:resheader Content-Type: application/x-apple-aspen-config
:statuscode 200:
:statuscode 500: The system has not been configured, so we can't produce anything.
"""
try:
org = db.session.query(Organization).one()
except NoResultFound:
abort(500, 'No organization is configured, cannot generate enrollment profile.')
except MultipleResultsFound:
abort(500, 'Multiple organizations, backup your database and start again')
profile = Profile(
identifier=org.payload_prefix + '.trust',
uuid=uuid4(),
display_name='Commandment Trust Profile',
description='Allows your device to trust the MDM server',
organization=org.name,
version=1,
scope=PayloadScope.System,
)
if 'CA_CERTIFICATE' in current_app.config:
# If you specified a CA certificate, we assume it isn't a CA trusted by Apple devices.
ca_payload = ca_trust_payload_from_configuration()
profile.payloads.append(ca_payload)
if 'SSL_CERTIFICATE' in current_app.config:
basepath = os.path.dirname(__file__)
certpath = os.path.join(basepath, current_app.config['SSL_CERTIFICATE'])
with open(certpath, 'rb') as fd:
pem_payload = PEMCertificatePayload(
uuid=uuid4(),
identifier=org.payload_prefix + '.ssl',
payload_content=fd.read(),
display_name='Web Server Certificate',
description='Required for your device to trust the server',
type='com.apple.security.pkcs1',
version=1
)
profile.payloads.append(pem_payload)
schema = profile_schema.ProfileSchema()
result = schema.dump(profile)
plist_data = dumps_none(result.data, skipkeys=True)
return plist_data, 200, {'Content-Type': PROFILE_CONTENT_TYPE,
'Content-Disposition': 'attachment; filename="trust.mobileconfig"'}
@enroll_app.route('/profile', methods=['GET', 'POST'])
def enroll():
"""Generate an enrollment profile."""
profile = generate_enroll_profile()
schema = profile_schema.ProfileSchema()
result = schema.dump(profile)
plist_data = dumps_none(result.data, skipkeys=True)
return plist_data, 200, {'Content-Type': PROFILE_CONTENT_TYPE}
@enroll_app.route('/ota')
def ota_enroll():
"""Over-The-Air Profile Delivery Phase 1.5.
This endpoint represents the delivery of the `Profile Service` profile that should be delivered AFTER the user has
successfully authenticated.
"""
try:
org = db.session.query(Organization).one()
except NoResultFound:
abort(500, 'No organization is configured, cannot generate enrollment profile.')
except MultipleResultsFound:
abort(500, 'Multiple organizations, backup your database and start again')
profile = {
'PayloadType': 'Profile Service',
'PayloadIdentifier': org.payload_prefix + '.ota.enroll',
'PayloadUUID': str(uuid4()),
'PayloadVersion': 1,
'PayloadDisplayName': 'Commandment Profile Service',
'PayloadDescription': 'Enrolls your device with Commandment',
'PayloadContent': [{
'URL': 'https://{}:{}/enroll/ota_authenticate'.format(current_app.config['PUBLIC_HOSTNAME'], current_app.config['PORT']),
'DeviceAttributes': list(AllDeviceAttributes),
'Challenge': 'TODO',
}],
}
plist_data = dumps_none(profile)
return plist_data, 200, {'Content-Type': PROFILE_CONTENT_TYPE}
@enroll_app.route('/ota_authenticate', methods=['POST'])
@verify_cms_signers
def ota_authenticate():
"""Over-The-Air Profile Delivery Phase 3 and 4.
This endpoint represents the OTA Phase 3 and 4, "/profile" endpoint as specified in apples document "Over-The-Air
Profile Delivery".
There are two types of requests made here:
- The first request is signed by the iPhone Device CA and contains the challenge in the `Profile Service` payload,
we respond with the SCEP detail.
- The second request is signed by the issued SCEP certificate. We should respond with an enrollment profile.
It also contains the same device attributes sent in the previous step, but this time they are authenticated by
our SCEP CA.
Examples:
Signed plist given in the first request::
{
'CHALLENGE': '<CHALLENGE FROM PROFILE HERE>',
'IMEI': 'empty if macOS',
'MEID': 'empty if macOS',
'NotOnConsole': False,
'PRODUCT': 'MacPro6,1',
'SERIAL': 'C020000000000',
'UDID': '00000000-0000-0000-0000-000000000000',
'UserID': '00000000-0000-0000-0000-000000000000',
'UserLongName': 'Joe User',
'UserShortName': 'juser',
'VERSION': '16F73'
}
See Also:
- `Over-the-Air Profile Delivery and Configuration <https://developer.apple.com/library/content/documentation/NetworkingInternet/Conceptual/iPhoneOTAConfiguration/Introduction/Introduction.html#//apple_ref/doc/uid/TP40009505-CH1-SW1>`_.
"""
signed_data = g.signed_data
signers = g.signers
# TODO: This should Validate to iPhone Device CA but we can't because:
# http://www.openradar.me/31423312
device_attributes = plistlib.loads(signed_data)
current_app.logger.debug(device_attributes)
try:
org = db.session.query(Organization).one()
except NoResultFound:
abort(500, 'No organization is configured, cannot generate enrollment profile.')
except MultipleResultsFound:
abort(500, 'Multiple organizations, backup your database and start again')
# Reply SCEP
profile = Profile(
identifier=org.payload_prefix + '.ota.phase3',
uuid=uuid4(),
display_name='Commandment OTA SCEP Enrollment',
description='Retrieves a SCEP Certificate to complete OTA Enrollment',
organization=org.name,
version=1,
scope=PayloadScope.System,
)
scep_payload = scep_payload_from_configuration()
profile.payloads.append(scep_payload)
schema = profile_schema.ProfileSchema()
result = schema.dump(profile)
plist_data = dumps_none(result.data, skipkeys=True)
return plist_data, 200, {'Content-Type': PROFILE_CONTENT_TYPE}
| {
"content_hash": "5d7dead44a56f0cf061b6b62c160e719",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 244,
"avg_line_length": 38.85,
"alnum_prop": 0.6718146718146718,
"repo_name": "mosen/commandment",
"id": "138629e4e45e9f2070b1434ff7e699579b199c7b",
"size": "7770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commandment/enroll/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2988"
},
{
"name": "HTML",
"bytes": "1265"
},
{
"name": "JavaScript",
"bytes": "6113"
},
{
"name": "Mako",
"bytes": "1110"
},
{
"name": "Python",
"bytes": "420945"
},
{
"name": "Shell",
"bytes": "148"
},
{
"name": "TypeScript",
"bytes": "292822"
}
],
"symlink_target": ""
} |
from pyplatformerengine.utilities.Color import Color
from pyplatformerengine.models.SpritesheetLoader import SpritesheetLoader
import pygame
"""
Class utility to build sprites.
"""
class SpritesheetFactory:
"""
Creates a visual game object based on configuration.
"""
def createImages(self, obj):
color = self.choosePredefinedColor(obj["spritesheetFill"])
spriteStages = {}
for img in obj["spriteSheetImages"]:
if img["type"] == "PYGAME_SURFACE":
spriteStages[img["label"]] = self.createGenericSurface(img, color)
elif img["type"] == "SPRITE_IMG":
spriteStages[img["label"]] = self.loadSpriteAnimations(img, Color.BLACK)
return spriteStages
"""
Convert color string to actual color.
"""
def choosePredefinedColor(self, colorName):
color = Color.BLACK
if colorName == "WHITE":
color = Color.WHITE
elif colorName == "RED":
color = Color.RED
elif colorName == "PINK":
color = Color.PINK
elif colorName == "BLUE":
color = Color.BLUE
elif colorName == "GREEN":
color = Color.GREEN
return color
"""
Creates a generic pygame surface.
"""
def createGenericSurface(self, img, color):
image = pygame.Surface((img["width"], img["height"]))
image.fill(color)
return image
"""
Loads the sprites from the spritemap.
"""
def loadSpriteAnimations(self, img, color):
spritesheetLoader = SpritesheetLoader()
spriteMap = spritesheetLoader.loadSpriteMap(img["file"])
image = spritesheetLoader.imageAt(spriteMap, (img["x"], img["y"], img["width"], img["height"]), color)
return image
| {
"content_hash": "d2126582653779fd56c55362cdd98355",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 110,
"avg_line_length": 31.96551724137931,
"alnum_prop": 0.5911542610571737,
"repo_name": "maximx1/PyPlatformerEngine",
"id": "8434d93630eb43cd35f1835537eec4a9b8c85603",
"size": "1854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyplatformerengine/entities/SpritesheetFactory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42190"
}
],
"symlink_target": ""
} |
"""Auth Components for Consoles."""
import time
from oslo.config import cfg
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import rpcapi as compute_rpcapi
from nova import manager
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import memorycache
LOG = logging.getLogger(__name__)
consoleauth_opts = [
cfg.IntOpt('console_token_ttl',
default=600,
help='How many seconds before deleting tokens'),
cfg.StrOpt('consoleauth_manager',
default='nova.consoleauth.manager.ConsoleAuthManager',
help='Manager for console auth'),
]
CONF = cfg.CONF
CONF.register_opts(consoleauth_opts)
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
class ConsoleAuthManager(manager.Manager):
"""Manages token based authentication."""
RPC_API_VERSION = '1.2'
def __init__(self, scheduler_driver=None, *args, **kwargs):
super(ConsoleAuthManager, self).__init__(service_name='consoleauth',
*args, **kwargs)
self.mc = memorycache.get_client()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _get_tokens_for_instance(self, instance_uuid):
tokens_str = self.mc.get(instance_uuid.encode('UTF-8'))
if not tokens_str:
tokens = []
else:
tokens = jsonutils.loads(tokens_str)
return tokens
def authorize_console(self, context, token, console_type, host, port,
internal_access_path, instance_uuid=None):
token_dict = {'token': token,
'instance_uuid': instance_uuid,
'console_type': console_type,
'host': host,
'port': port,
'internal_access_path': internal_access_path,
'last_activity_at': time.time()}
data = jsonutils.dumps(token_dict)
self.mc.set(token.encode('UTF-8'), data, CONF.console_token_ttl)
if instance_uuid is not None:
tokens = self._get_tokens_for_instance(instance_uuid)
tokens.append(token)
self.mc.set(instance_uuid.encode('UTF-8'),
jsonutils.dumps(tokens))
LOG.audit(_("Received Token: %(token)s, %(token_dict)s"),
{'token': token, 'token_dict': token_dict})
def _validate_token(self, context, token):
instance_uuid = token['instance_uuid']
if instance_uuid is None:
return False
# NOTE(comstud): consoleauth was meant to run in API cells. So,
# if cells is enabled, we must call down to the child cell for
# the instance.
if CONF.cells.enable:
return self.cells_rpcapi.validate_console_port(context,
instance_uuid, token['port'], token['console_type'])
instance = self.db.instance_get_by_uuid(context, instance_uuid)
return self.compute_rpcapi.validate_console_port(context,
instance,
token['port'],
token['console_type'])
def check_token(self, context, token):
token_str = self.mc.get(token.encode('UTF-8'))
token_valid = (token_str is not None)
LOG.audit(_("Checking Token: %(token)s, %(token_valid)s"),
{'token': token, 'token_valid': token_valid})
if token_valid:
token = jsonutils.loads(token_str)
if self._validate_token(context, token):
return token
def delete_tokens_for_instance(self, context, instance_uuid):
tokens = self._get_tokens_for_instance(instance_uuid)
for token in tokens:
self.mc.delete(token.encode('UTF-8'))
self.mc.delete(instance_uuid.encode('UTF-8'))
# NOTE(russellb) This method can be removed in 2.0 of this API. It is
# deprecated in favor of the method in the base API.
def get_backdoor_port(self, context):
return self.backdoor_port
| {
"content_hash": "ee15819d0555e3d50c4e58b3aac19381",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 76,
"avg_line_length": 38.486486486486484,
"alnum_prop": 0.5894194756554307,
"repo_name": "plumgrid/plumgrid-nova",
"id": "021914b8fe103af98de0baeb44c5afd535d99fb2",
"size": "4978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/consoleauth/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11944269"
},
{
"name": "Shell",
"bytes": "17148"
}
],
"symlink_target": ""
} |
from flask import url_for
import pytest
@pytest.fixture
def response(client):
return client.get(url_for('healthcheck.check_health'))
class WhenBrowsingToHealthcheckEndpoint(object):
def it_exists(self, response):
assert response.status_code == 200
def it_reports_site_ok(self, response):
assert response.json['site'] is True
| {
"content_hash": "4c0a91ccaa70f9cf7f6e55fce49a6b36",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 58,
"avg_line_length": 22.4375,
"alnum_prop": 0.7186629526462396,
"repo_name": "crossgovernmentservices/csd-notes",
"id": "ad53db094746387fb98ce23d210027f33a211cff",
"size": "359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/spec/test_healthcheck.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11735"
},
{
"name": "HTML",
"bytes": "4213"
},
{
"name": "JavaScript",
"bytes": "1199"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "52219"
}
],
"symlink_target": ""
} |
import hashlib
import base64
import re
import hmac
import os
import json
import ecdsa
import pyaes
from .util import bfh, bh2u, to_string
from . import version
from .util import print_error, InvalidPassword, assert_bytes, to_bytes, inv_dict
from . import segwit_addr
def read_json_dict(filename):
path = os.path.join(os.path.dirname(__file__), filename)
try:
r = json.loads(open(path, 'r').read())
except:
r = {}
return r
# Version numbers for BIP32 extended keys
# standard: xprv, xpub
# segwit in p2sh: yprv, ypub
# native segwit: zprv, zpub
XPRV_HEADERS = {
'standard': 0x0488ade4,
'segwit_p2sh': 0x049d7878,
'segwit': 0x4b2430c
}
XPUB_HEADERS = {
'standard': 0x0488b21e,
'segwit_p2sh': 0x049d7cb2,
'segwit': 0x4b24746
}
# Bitcoin network constants
TESTNET = False
ADDRTYPE_P2PKH = 0
ADDRTYPE_P2SH = 5
SEGWIT_HRP = "bc"
HEADERS_URL = "https://headers.electrum.org/blockchain_headers"
GENESIS = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"
SERVERLIST = 'servers.json'
DEFAULT_PORTS = {'t':'50001', 's':'50002'}
DEFAULT_SERVERS = read_json_dict('servers.json')
def set_testnet():
global ADDRTYPE_P2PKH, ADDRTYPE_P2SH
global TESTNET, HEADERS_URL
global GENESIS
global SEGWIT_HRP
global DEFAULT_PORTS, SERVERLIST, DEFAULT_SERVERS
TESTNET = True
ADDRTYPE_P2PKH = 111
ADDRTYPE_P2SH = 196
SEGWIT_HRP = "tb"
HEADERS_URL = "https://headers.electrum.org/testnet_headers"
GENESIS = "000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943"
SERVERLIST = 'servers_testnet.json'
DEFAULT_PORTS = {'t':'51001', 's':'51002'}
DEFAULT_SERVERS = read_json_dict('servers_testnet.json')
################################## transactions
FEE_STEP = 10000
MAX_FEE_RATE = 300000
FEE_TARGETS = [25, 10, 5, 2]
COINBASE_MATURITY = 100
COIN = 100000000
# supported types of transction outputs
TYPE_ADDRESS = 0
TYPE_PUBKEY = 1
TYPE_SCRIPT = 2
# AES encryption
try:
from Cryptodome.Cipher import AES
except:
AES = None
def aes_encrypt_with_iv(key, iv, data):
assert_bytes(key, iv, data)
if AES:
padlen = 16 - (len(data) % 16)
if padlen == 0:
padlen = 16
data += chr(padlen) * padlen
e = AES.new(key, AES.MODE_CBC, iv).encrypt(data)
return e
else:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Encrypter(aes_cbc)
e = aes.feed(data) + aes.feed() # empty aes.feed() appends pkcs padding
return e
def aes_decrypt_with_iv(key, iv, data):
assert_bytes(key, iv, data)
if AES:
cipher = AES.new(key, AES.MODE_CBC, iv)
data = cipher.decrypt(data)
padlen = ord(data[-1])
for i in data[-padlen:]:
if ord(i) != padlen:
raise InvalidPassword()
return data[0:-padlen]
else:
aes_cbc = pyaes.AESModeOfOperationCBC(key, iv=iv)
aes = pyaes.Decrypter(aes_cbc)
s = aes.feed(data) + aes.feed() # empty aes.feed() strips pkcs padding
return s
def EncodeAES(secret, s):
assert_bytes(s)
iv = bytes(os.urandom(16))
# aes_cbc = pyaes.AESModeOfOperationCBC(secret, iv=iv)
# aes = pyaes.Encrypter(aes_cbc)
# e = iv + aes.feed(s) + aes.feed()
ct = aes_encrypt_with_iv(secret, iv, s)
e = iv + ct
return base64.b64encode(e)
def DecodeAES(secret, e):
e = bytes(base64.b64decode(e))
iv, e = e[:16], e[16:]
# aes_cbc = pyaes.AESModeOfOperationCBC(secret, iv=iv)
# aes = pyaes.Decrypter(aes_cbc)
# s = aes.feed(e) + aes.feed()
s = aes_decrypt_with_iv(secret, iv, e)
return s
def pw_encode(s, password):
if password:
secret = Hash(password)
return EncodeAES(secret, to_bytes(s, "utf8")).decode('utf8')
else:
return s
def pw_decode(s, password):
if password is not None:
secret = Hash(password)
try:
d = to_string(DecodeAES(secret, s), "utf8")
except Exception:
raise InvalidPassword()
return d
else:
return s
def rev_hex(s):
return bh2u(bfh(s)[::-1])
def int_to_hex(i, length=1):
assert isinstance(i, int)
s = hex(i)[2:].rstrip('L')
s = "0"*(2*length - len(s)) + s
return rev_hex(s)
def var_int(i):
# https://en.bitcoin.it/wiki/Protocol_specification#Variable_length_integer
if i<0xfd:
return int_to_hex(i)
elif i<=0xffff:
return "fd"+int_to_hex(i,2)
elif i<=0xffffffff:
return "fe"+int_to_hex(i,4)
else:
return "ff"+int_to_hex(i,8)
def op_push(i):
if i<0x4c:
return int_to_hex(i)
elif i<0xff:
return '4c' + int_to_hex(i)
elif i<0xffff:
return '4d' + int_to_hex(i,2)
else:
return '4e' + int_to_hex(i,4)
def push_script(x):
return op_push(len(x)//2) + x
def sha256(x):
x = to_bytes(x, 'utf8')
return bytes(hashlib.sha256(x).digest())
def Hash(x):
x = to_bytes(x, 'utf8')
out = bytes(sha256(sha256(x)))
return out
hash_encode = lambda x: bh2u(x[::-1])
hash_decode = lambda x: bfh(x)[::-1]
hmac_sha_512 = lambda x, y: hmac.new(x, y, hashlib.sha512).digest()
def is_new_seed(x, prefix=version.SEED_PREFIX):
from . import mnemonic
x = mnemonic.normalize_text(x)
s = bh2u(hmac_sha_512(b"Seed version", x.encode('utf8')))
return s.startswith(prefix)
def is_old_seed(seed):
from . import old_mnemonic
words = seed.strip().split()
try:
old_mnemonic.mn_decode(words)
uses_electrum_words = True
except Exception:
uses_electrum_words = False
try:
seed = bfh(seed)
is_hex = (len(seed) == 16 or len(seed) == 32)
except Exception:
is_hex = False
return is_hex or (uses_electrum_words and (len(words) == 12 or len(words) == 24))
def seed_type(x):
if is_old_seed(x):
return 'old'
elif is_new_seed(x):
return 'standard'
elif is_new_seed(x, version.SEED_PREFIX_SW):
return 'segwit'
elif is_new_seed(x, version.SEED_PREFIX_2FA):
return '2fa'
return ''
is_seed = lambda x: bool(seed_type(x))
# pywallet openssl private key implementation
def i2o_ECPublicKey(pubkey, compressed=False):
# public keys are 65 bytes long (520 bits)
# 0x04 + 32-byte X-coordinate + 32-byte Y-coordinate
# 0x00 = point at infinity, 0x02 and 0x03 = compressed, 0x04 = uncompressed
# compressed keys: <sign> <x> where <sign> is 0x02 if y is even and 0x03 if y is odd
if compressed:
if pubkey.point.y() & 1:
key = '03' + '%064x' % pubkey.point.x()
else:
key = '02' + '%064x' % pubkey.point.x()
else:
key = '04' + \
'%064x' % pubkey.point.x() + \
'%064x' % pubkey.point.y()
return bfh(key)
# end pywallet openssl private key implementation
############ functions from pywallet #####################
def hash_160(public_key):
try:
md = hashlib.new('ripemd160')
md.update(sha256(public_key))
return md.digest()
except BaseException:
from . import ripemd
md = ripemd.new(sha256(public_key))
return md.digest()
def hash160_to_b58_address(h160, addrtype, witness_program_version=1):
s = bytes([addrtype])
s += h160
return base_encode(s+Hash(s)[0:4], base=58)
def b58_address_to_hash160(addr):
addr = to_bytes(addr, 'ascii')
_bytes = base_decode(addr, 25, base=58)
return _bytes[0], _bytes[1:21]
def hash160_to_p2pkh(h160):
return hash160_to_b58_address(h160, ADDRTYPE_P2PKH)
def hash160_to_p2sh(h160):
return hash160_to_b58_address(h160, ADDRTYPE_P2SH)
def public_key_to_p2pkh(public_key):
return hash160_to_p2pkh(hash_160(public_key))
def hash_to_segwit_addr(h):
return segwit_addr.encode(SEGWIT_HRP, 0, h)
def public_key_to_p2wpkh(public_key):
return hash_to_segwit_addr(hash_160(public_key))
def script_to_p2wsh(script):
return hash_to_segwit_addr(sha256(bfh(script)))
def p2wpkh_nested_script(pubkey):
pkh = bh2u(hash_160(bfh(pubkey)))
return '00' + push_script(pkh)
def p2wsh_nested_script(witness_script):
wsh = bh2u(sha256(bfh(witness_script)))
return '00' + push_script(wsh)
def pubkey_to_address(txin_type, pubkey):
if txin_type == 'p2pkh':
return public_key_to_p2pkh(bfh(pubkey))
elif txin_type == 'p2wpkh':
return hash_to_segwit_addr(hash_160(bfh(pubkey)))
elif txin_type == 'p2wpkh-p2sh':
scriptSig = p2wpkh_nested_script(pubkey)
return hash160_to_p2sh(hash_160(bfh(scriptSig)))
else:
raise NotImplementedError(txin_type)
def redeem_script_to_address(txin_type, redeem_script):
if txin_type == 'p2sh':
return hash160_to_p2sh(hash_160(bfh(redeem_script)))
elif txin_type == 'p2wsh':
return script_to_p2wsh(redeem_script)
elif txin_type == 'p2wsh-p2sh':
scriptSig = p2wsh_nested_script(redeem_script)
return hash160_to_p2sh(hash_160(bfh(scriptSig)))
else:
raise NotImplementedError(txin_type)
def address_to_script(addr):
witver, witprog = segwit_addr.decode(SEGWIT_HRP, addr)
if witprog is not None:
assert (0 <= witver <= 16)
OP_n = witver + 0x50 if witver > 0 else 0
script = bh2u(bytes([OP_n]))
script += push_script(bh2u(bytes(witprog)))
return script
addrtype, hash_160 = b58_address_to_hash160(addr)
if addrtype == ADDRTYPE_P2PKH:
script = '76a9' # op_dup, op_hash_160
script += push_script(bh2u(hash_160))
script += '88ac' # op_equalverify, op_checksig
elif addrtype == ADDRTYPE_P2SH:
script = 'a9' # op_hash_160
script += push_script(bh2u(hash_160))
script += '87' # op_equal
else:
raise BaseException('unknown address type')
return script
def address_to_scripthash(addr):
script = address_to_script(addr)
h = sha256(bytes.fromhex(script))[0:32]
return bh2u(bytes(reversed(h)))
def public_key_to_p2pk_script(pubkey):
script = push_script(pubkey)
script += 'ac' # op_checksig
return script
__b58chars = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
assert len(__b58chars) == 58
__b43chars = b'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ$*+-./:'
assert len(__b43chars) == 43
def base_encode(v, base):
""" encode v, which is a string of bytes, to base58."""
assert_bytes(v)
assert base in (58, 43)
chars = __b58chars
if base == 43:
chars = __b43chars
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * c
result = bytearray()
while long_value >= base:
div, mod = divmod(long_value, base)
result.append(chars[mod])
long_value = div
result.append(chars[long_value])
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == 0x00:
nPad += 1
else:
break
result.extend([chars[0]] * nPad)
result.reverse()
return result.decode('ascii')
def base_decode(v, length, base):
""" decode v into a string of len bytes."""
# assert_bytes(v)
v = to_bytes(v, 'ascii')
assert base in (58, 43)
chars = __b58chars
if base == 43:
chars = __b43chars
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += chars.find(bytes([c])) * (base**i)
result = bytearray()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result.append(mod)
long_value = div
result.append(long_value)
nPad = 0
for c in v:
if c == chars[0]:
nPad += 1
else:
break
result.extend(b'\x00' * nPad)
if length is not None and len(result) != length:
return None
result.reverse()
return bytes(result)
def EncodeBase58Check(vchIn):
hash = Hash(vchIn)
return base_encode(vchIn + hash[0:4], base=58)
def DecodeBase58Check(psz):
vchRet = base_decode(psz, None, base=58)
key = vchRet[0:-4]
csum = vchRet[-4:]
hash = Hash(key)
cs32 = hash[0:4]
if cs32 != csum:
return None
else:
return key
# extended key export format for segwit
SCRIPT_TYPES = {
'p2pkh':0,
'p2wpkh':1,
'p2wpkh-p2sh':2,
'p2sh':5,
'p2wsh':6,
'p2wsh-p2sh':7
}
def serialize_privkey(secret, compressed, txin_type):
prefix = bytes([(SCRIPT_TYPES[txin_type]+128)&255])
suffix = b'\01' if compressed else b''
vchIn = prefix + secret + suffix
return EncodeBase58Check(vchIn)
def deserialize_privkey(key):
# whether the pubkey is compressed should be visible from the keystore
vch = DecodeBase58Check(key)
if is_minikey(key):
return 'p2pkh', minikey_to_private_key(key), True
elif vch:
txin_type = inv_dict(SCRIPT_TYPES)[vch[0] - 128]
assert len(vch) in [33, 34]
compressed = len(vch) == 34
return txin_type, vch[1:33], compressed
else:
raise BaseException("cannot deserialize", key)
def regenerate_key(pk):
assert len(pk) == 32
return EC_KEY(pk)
def GetPubKey(pubkey, compressed=False):
return i2o_ECPublicKey(pubkey, compressed)
def GetSecret(pkey):
return bfh('%064x' % pkey.secret)
def is_compressed(sec):
return deserialize_privkey(sec)[2]
def public_key_from_private_key(pk, compressed):
pkey = regenerate_key(pk)
public_key = GetPubKey(pkey.pubkey, compressed)
return bh2u(public_key)
def address_from_private_key(sec):
txin_type, privkey, compressed = deserialize_privkey(sec)
public_key = public_key_from_private_key(privkey, compressed)
return pubkey_to_address(txin_type, public_key)
def is_segwit_address(addr):
witver, witprog = segwit_addr.decode(SEGWIT_HRP, addr)
return witprog is not None
def is_b58_address(addr):
try:
addrtype, h = b58_address_to_hash160(addr)
except Exception as e:
return False
if addrtype not in [ADDRTYPE_P2PKH, ADDRTYPE_P2SH]:
return False
return addr == hash160_to_b58_address(h, addrtype)
def is_address(addr):
return is_segwit_address(addr) or is_b58_address(addr)
def is_private_key(key):
try:
k = deserialize_privkey(key)
return k is not False
except:
return False
########### end pywallet functions #######################
def is_minikey(text):
# Minikeys are typically 22 or 30 characters, but this routine
# permits any length of 20 or more provided the minikey is valid.
# A valid minikey must begin with an 'S', be in base58, and when
# suffixed with '?' have its SHA256 hash begin with a zero byte.
# They are widely used in Casascius physical bitoins.
return (len(text) >= 20 and text[0] == 'S'
and all(ord(c) in __b58chars for c in text)
and sha256(text + '?')[0] == 0x00)
def minikey_to_private_key(text):
return sha256(text)
from ecdsa.ecdsa import curve_secp256k1, generator_secp256k1
from ecdsa.curves import SECP256k1
from ecdsa.ellipticcurve import Point
from ecdsa.util import string_to_number, number_to_string
def msg_magic(message):
length = bfh(var_int(len(message)))
return b"\x18Bitcoin Signed Message:\n" + length + message
def verify_message(address, sig, message):
assert_bytes(sig, message)
try:
h = Hash(msg_magic(message))
public_key, compressed = pubkey_from_signature(sig, h)
# check public key using the address
pubkey = point_to_ser(public_key.pubkey.point, compressed)
for txin_type in ['p2pkh','p2wpkh','p2wpkh-p2sh']:
addr = pubkey_to_address(txin_type, bh2u(pubkey))
if address == addr:
break
else:
raise Exception("Bad signature")
# check message
public_key.verify_digest(sig[1:], h, sigdecode = ecdsa.util.sigdecode_string)
return True
except Exception as e:
print_error("Verification error: {0}".format(e))
return False
def encrypt_message(message, pubkey):
return EC_KEY.encrypt_message(message, bfh(pubkey))
def chunks(l, n):
return [l[i:i+n] for i in range(0, len(l), n)]
def ECC_YfromX(x,curved=curve_secp256k1, odd=True):
_p = curved.p()
_a = curved.a()
_b = curved.b()
for offset in range(128):
Mx = x + offset
My2 = pow(Mx, 3, _p) + _a * pow(Mx, 2, _p) + _b % _p
My = pow(My2, (_p+1)//4, _p )
if curved.contains_point(Mx,My):
if odd == bool(My&1):
return [My,offset]
return [_p-My,offset]
raise Exception('ECC_YfromX: No Y found')
def negative_point(P):
return Point( P.curve(), P.x(), -P.y(), P.order() )
def point_to_ser(P, comp=True ):
if comp:
return bfh( ('%02x'%(2+(P.y()&1)))+('%064x'%P.x()) )
return bfh( '04'+('%064x'%P.x())+('%064x'%P.y()) )
def ser_to_point(Aser):
curve = curve_secp256k1
generator = generator_secp256k1
_r = generator.order()
assert Aser[0] in [0x02, 0x03, 0x04]
if Aser[0] == 0x04:
return Point( curve, string_to_number(Aser[1:33]), string_to_number(Aser[33:]), _r )
Mx = string_to_number(Aser[1:])
return Point( curve, Mx, ECC_YfromX(Mx, curve, Aser[0] == 0x03)[0], _r )
class MyVerifyingKey(ecdsa.VerifyingKey):
@classmethod
def from_signature(klass, sig, recid, h, curve):
""" See http://www.secg.org/download/aid-780/sec1-v2.pdf, chapter 4.1.6 """
from ecdsa import util, numbertheory
from . import msqr
curveFp = curve.curve
G = curve.generator
order = G.order()
# extract r,s from signature
r, s = util.sigdecode_string(sig, order)
# 1.1
x = r + (recid//2) * order
# 1.3
alpha = ( x * x * x + curveFp.a() * x + curveFp.b() ) % curveFp.p()
beta = msqr.modular_sqrt(alpha, curveFp.p())
y = beta if (beta - recid) % 2 == 0 else curveFp.p() - beta
# 1.4 the constructor checks that nR is at infinity
R = Point(curveFp, x, y, order)
# 1.5 compute e from message:
e = string_to_number(h)
minus_e = -e % order
# 1.6 compute Q = r^-1 (sR - eG)
inv_r = numbertheory.inverse_mod(r,order)
Q = inv_r * ( s * R + minus_e * G )
return klass.from_public_point( Q, curve )
def pubkey_from_signature(sig, h):
if len(sig) != 65:
raise Exception("Wrong encoding")
nV = sig[0]
if nV < 27 or nV >= 35:
raise Exception("Bad encoding")
if nV >= 31:
compressed = True
nV -= 4
else:
compressed = False
recid = nV - 27
return MyVerifyingKey.from_signature(sig[1:], recid, h, curve = SECP256k1), compressed
class MySigningKey(ecdsa.SigningKey):
"""Enforce low S values in signatures"""
def sign_number(self, number, entropy=None, k=None):
curve = SECP256k1
G = curve.generator
order = G.order()
r, s = ecdsa.SigningKey.sign_number(self, number, entropy, k)
if s > order//2:
s = order - s
return r, s
class EC_KEY(object):
def __init__( self, k ):
secret = string_to_number(k)
self.pubkey = ecdsa.ecdsa.Public_key( generator_secp256k1, generator_secp256k1 * secret )
self.privkey = ecdsa.ecdsa.Private_key( self.pubkey, secret )
self.secret = secret
def get_public_key(self, compressed=True):
return bh2u(point_to_ser(self.pubkey.point, compressed))
def sign(self, msg_hash):
private_key = MySigningKey.from_secret_exponent(self.secret, curve = SECP256k1)
public_key = private_key.get_verifying_key()
signature = private_key.sign_digest_deterministic(msg_hash, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_string)
assert public_key.verify_digest(signature, msg_hash, sigdecode = ecdsa.util.sigdecode_string)
return signature
def sign_message(self, message, is_compressed):
message = to_bytes(message, 'utf8')
signature = self.sign(Hash(msg_magic(message)))
for i in range(4):
sig = bytes([27 + i + (4 if is_compressed else 0)]) + signature
try:
self.verify_message(sig, message)
return sig
except Exception as e:
continue
else:
raise Exception("error: cannot sign message")
def verify_message(self, sig, message):
assert_bytes(message)
h = Hash(msg_magic(message))
public_key, compressed = pubkey_from_signature(sig, h)
# check public key
if point_to_ser(public_key.pubkey.point, compressed) != point_to_ser(self.pubkey.point, compressed):
raise Exception("Bad signature")
# check message
public_key.verify_digest(sig[1:], h, sigdecode = ecdsa.util.sigdecode_string)
# ECIES encryption/decryption methods; AES-128-CBC with PKCS7 is used as the cipher; hmac-sha256 is used as the mac
@classmethod
def encrypt_message(self, message, pubkey):
assert_bytes(message)
pk = ser_to_point(pubkey)
if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, pk.x(), pk.y()):
raise Exception('invalid pubkey')
ephemeral_exponent = number_to_string(ecdsa.util.randrange(pow(2,256)), generator_secp256k1.order())
ephemeral = EC_KEY(ephemeral_exponent)
ecdh_key = point_to_ser(pk * ephemeral.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
ciphertext = aes_encrypt_with_iv(key_e, iv, message)
ephemeral_pubkey = bfh(ephemeral.get_public_key(compressed=True))
encrypted = b'BIE1' + ephemeral_pubkey + ciphertext
mac = hmac.new(key_m, encrypted, hashlib.sha256).digest()
return base64.b64encode(encrypted + mac)
def decrypt_message(self, encrypted):
encrypted = base64.b64decode(encrypted)
if len(encrypted) < 85:
raise Exception('invalid ciphertext: length')
magic = encrypted[:4]
ephemeral_pubkey = encrypted[4:37]
ciphertext = encrypted[37:-32]
mac = encrypted[-32:]
if magic != b'BIE1':
raise Exception('invalid ciphertext: invalid magic bytes')
try:
ephemeral_pubkey = ser_to_point(ephemeral_pubkey)
except AssertionError as e:
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, ephemeral_pubkey.x(), ephemeral_pubkey.y()):
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
ecdh_key = point_to_ser(ephemeral_pubkey * self.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
if mac != hmac.new(key_m, encrypted[:-32], hashlib.sha256).digest():
raise InvalidPassword()
return aes_decrypt_with_iv(key_e, iv, ciphertext)
###################################### BIP32 ##############################
random_seed = lambda n: "%032x"%ecdsa.util.randrange( pow(2,n) )
BIP32_PRIME = 0x80000000
def get_pubkeys_from_secret(secret):
# public key
private_key = ecdsa.SigningKey.from_string( secret, curve = SECP256k1 )
public_key = private_key.get_verifying_key()
K = public_key.to_string()
K_compressed = GetPubKey(public_key.pubkey,True)
return K, K_compressed
# Child private key derivation function (from master private key)
# k = master private key (32 bytes)
# c = master chain code (extra entropy for key derivation) (32 bytes)
# n = the index of the key we want to derive. (only 32 bits will be used)
# If n is negative (i.e. the 32nd bit is set), the resulting private key's
# corresponding public key can NOT be determined without the master private key.
# However, if n is positive, the resulting private key's corresponding
# public key can be determined without the master private key.
def CKD_priv(k, c, n):
is_prime = n & BIP32_PRIME
return _CKD_priv(k, c, bfh(rev_hex(int_to_hex(n,4))), is_prime)
def _CKD_priv(k, c, s, is_prime):
order = generator_secp256k1.order()
keypair = EC_KEY(k)
cK = GetPubKey(keypair.pubkey,True)
data = bytes([0]) + k + s if is_prime else cK + s
I = hmac.new(c, data, hashlib.sha512).digest()
k_n = number_to_string( (string_to_number(I[0:32]) + string_to_number(k)) % order , order )
c_n = I[32:]
return k_n, c_n
# Child public key derivation function (from public key only)
# K = master public key
# c = master chain code
# n = index of key we want to derive
# This function allows us to find the nth public key, as long as n is
# non-negative. If n is negative, we need the master private key to find it.
def CKD_pub(cK, c, n):
if n & BIP32_PRIME: raise
return _CKD_pub(cK, c, bfh(rev_hex(int_to_hex(n,4))))
# helper function, callable with arbitrary string
def _CKD_pub(cK, c, s):
order = generator_secp256k1.order()
I = hmac.new(c, cK + s, hashlib.sha512).digest()
curve = SECP256k1
pubkey_point = string_to_number(I[0:32])*curve.generator + ser_to_point(cK)
public_key = ecdsa.VerifyingKey.from_public_point( pubkey_point, curve = SECP256k1 )
c_n = I[32:]
cK_n = GetPubKey(public_key.pubkey,True)
return cK_n, c_n
def xprv_header(xtype):
return bfh("%08x" % XPRV_HEADERS[xtype])
def xpub_header(xtype):
return bfh("%08x" % XPUB_HEADERS[xtype])
def serialize_xprv(xtype, c, k, depth=0, fingerprint=b'\x00'*4, child_number=b'\x00'*4):
xprv = xprv_header(xtype) + bytes([depth]) + fingerprint + child_number + c + bytes([0]) + k
return EncodeBase58Check(xprv)
def serialize_xpub(xtype, c, cK, depth=0, fingerprint=b'\x00'*4, child_number=b'\x00'*4):
xpub = xpub_header(xtype) + bytes([depth]) + fingerprint + child_number + c + cK
return EncodeBase58Check(xpub)
def deserialize_xkey(xkey, prv):
xkey = DecodeBase58Check(xkey)
if len(xkey) != 78:
raise BaseException('Invalid length')
depth = xkey[4]
fingerprint = xkey[5:9]
child_number = xkey[9:13]
c = xkey[13:13+32]
header = int('0x' + bh2u(xkey[0:4]), 16)
headers = XPRV_HEADERS if prv else XPUB_HEADERS
if header not in headers.values():
raise BaseException('Invalid xpub format', hex(header))
xtype = list(headers.keys())[list(headers.values()).index(header)]
n = 33 if prv else 32
K_or_k = xkey[13+n:]
return xtype, depth, fingerprint, child_number, c, K_or_k
def deserialize_xpub(xkey):
return deserialize_xkey(xkey, False)
def deserialize_xprv(xkey):
return deserialize_xkey(xkey, True)
def xpub_type(x):
return deserialize_xpub(x)[0]
def is_xpub(text):
try:
deserialize_xpub(text)
return True
except:
return False
def is_xprv(text):
try:
deserialize_xprv(text)
return True
except:
return False
def xpub_from_xprv(xprv):
xtype, depth, fingerprint, child_number, c, k = deserialize_xprv(xprv)
K, cK = get_pubkeys_from_secret(k)
return serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
def bip32_root(seed, xtype):
I = hmac.new(b"Bitcoin seed", seed, hashlib.sha512).digest()
master_k = I[0:32]
master_c = I[32:]
K, cK = get_pubkeys_from_secret(master_k)
xprv = serialize_xprv(xtype, master_c, master_k)
xpub = serialize_xpub(xtype, master_c, cK)
return xprv, xpub
def xpub_from_pubkey(xtype, cK):
assert cK[0] in [0x02, 0x03]
return serialize_xpub(xtype, b'\x00'*32, cK)
def bip32_derivation(s):
assert s.startswith('m/')
s = s[2:]
for n in s.split('/'):
if n == '': continue
i = int(n[:-1]) + BIP32_PRIME if n[-1] == "'" else int(n)
yield i
def is_bip32_derivation(x):
try:
[ i for i in bip32_derivation(x)]
return True
except :
return False
def bip32_private_derivation(xprv, branch, sequence):
assert sequence.startswith(branch)
if branch == sequence:
return xprv, xpub_from_xprv(xprv)
xtype, depth, fingerprint, child_number, c, k = deserialize_xprv(xprv)
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n[:-1]) + BIP32_PRIME if n[-1] == "'" else int(n)
parent_k = k
k, c = CKD_priv(k, c, i)
depth += 1
_, parent_cK = get_pubkeys_from_secret(parent_k)
fingerprint = hash_160(parent_cK)[0:4]
child_number = bfh("%08X"%i)
K, cK = get_pubkeys_from_secret(k)
xpub = serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
xprv = serialize_xprv(xtype, c, k, depth, fingerprint, child_number)
return xprv, xpub
def bip32_public_derivation(xpub, branch, sequence):
xtype, depth, fingerprint, child_number, c, cK = deserialize_xpub(xpub)
assert sequence.startswith(branch)
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n)
parent_cK = cK
cK, c = CKD_pub(cK, c, i)
depth += 1
fingerprint = hash_160(parent_cK)[0:4]
child_number = bfh("%08X"%i)
return serialize_xpub(xtype, c, cK, depth, fingerprint, child_number)
def bip32_private_key(sequence, k, chain):
for i in sequence:
k, chain = CKD_priv(k, chain, i)
return k
| {
"content_hash": "a31442fc6b267db1c9a536e5271100f9",
"timestamp": "",
"source": "github",
"line_count": 975,
"max_line_length": 133,
"avg_line_length": 30.575384615384614,
"alnum_prop": 0.6144040790312301,
"repo_name": "dabura667/electrum",
"id": "33ae75bd703c4d3bc54f09156edab13356df1958",
"size": "30978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/bitcoin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "3867"
},
{
"name": "Makefile",
"bytes": "837"
},
{
"name": "NSIS",
"bytes": "7125"
},
{
"name": "PHP",
"bytes": "404"
},
{
"name": "Protocol Buffer",
"bytes": "2373"
},
{
"name": "Python",
"bytes": "1313010"
},
{
"name": "Shell",
"bytes": "6888"
}
],
"symlink_target": ""
} |
import os
import json
import csv
from jinja2 import Markup
from db import export_sql
def render(vis, request, info):
info["message"] = []
reload = int(request.args.get("reload", 0))
table = request.args.get("table", '')
where = request.args.get("where", '1=1')
field = request.args.get("field", '')
view = request.args.get("view", '')
start = request.args.get("start", '0') # start at 0
limit = request.args.get("limit", '1000')
sfield = request.args.get("sfield", [])
pfield = request.args.get("pfield", [])
if len(sfield) == 1:
sfield.append(' count(*) ')
pfield.append(' count(*) ')
if len(table) == 0 or len(field) < 1:
info["message"].append("table or field missing.")
info["message_class"] = "failure"
else:
sql = "select %s as n from %s where %s group by 1 order by n desc limit %s offset %s"\
% (field, table, where, limit, start)
(datfile, reload, result) = export_sql(sql, vis.config, reload, None, view)
if len(result) > 0:
info["message"].append(result)
info["message_class"] = "failure"
else:
info["message_class"] = "success"
if reload > 0:
info["message"].append("Loaded fresh.")
else:
info["message"].append("Loading from cache. Use reload=1 to reload.")
info["datfile"] = datfile
json_file = datfile.replace('csv', 'json')
if reload > 0 or (not os.path.exists(os.path.realpath(json_file))):
# csv to json conversion
try:
reader = csv.DictReader(open(datfile, 'r'), fieldnames=( "name", "size" ))
out = [obj for obj in reader if len(obj['name']) > 0]
with open(json_file, 'w') as jf:
json.dump({"name": 'flare', "children": out}, jf)
except:
info["message"].append("Couldn't find CSV file")
info["message_class"] = "failure"
info["datfile"] = json_file
info["title"] = "FIELDS: <em>%s</em> from <br />TABLE: <em>%s</em>"\
% (', '.join(pfield[:2]), table)
info["title"] = Markup(info["title"])
info["message"] = Markup(''.join('<p>%s</p>' % m for m in info["message"] if len(m) > 0)) | {
"content_hash": "42e7984546fc06b3b0c002e0216e468c",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 94,
"avg_line_length": 36.857142857142854,
"alnum_prop": 0.5361757105943152,
"repo_name": "garthee/gnot",
"id": "722061b2698e75295a0980d09c1245baa29ec905",
"size": "2322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/explore_field.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "83634"
},
{
"name": "JavaScript",
"bytes": "866514"
},
{
"name": "Python",
"bytes": "113699"
}
],
"symlink_target": ""
} |
__author__ = 'Jwely'
from build_tex_figs_by_run import *
from build_tex_tables import *
from synthesize_piv_uncertainty_images import *
from test_piv_dynamic_plots import *
from test_piv_plots import *
| {
"content_hash": "a6452d85285359da2be151b5a949a031",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 47,
"avg_line_length": 29,
"alnum_prop": 0.7536945812807881,
"repo_name": "Jwely/pivpr",
"id": "e0c1adfa04a2d4aa9a6740497c34e6d35bf478a2",
"size": "203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/controler/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "MATLAB",
"bytes": "137127"
},
{
"name": "Python",
"bytes": "183435"
},
{
"name": "TeX",
"bytes": "1267545"
}
],
"symlink_target": ""
} |
import pika
from subprocess import check_output, CalledProcessError
import json
import os
from pymongo import MongoClient
import logging
logging.basicConfig()
def create_channel(connection, exchange, queue, key, type = 'direct'):
channel = connection.channel()
channel.exchange_declare(exchange=exchange, type=type)
channel.queue_declare(queue)
channel.queue_bind(exchange=exchange, queue=queue, routing_key=key)
return channel
dbcon = MongoClient()
NETSNIFF_UTIL = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'tools', 'netsniff.js')
exchange = 'perfmonitor'
queue_read = 'perf'
key_read = 'perftest'
queue_write = 'reinject'
key_write = 'perfreinject'
queuecon = pika.BlockingConnection(pika.ConnectionParameters(
'localhost'))
channelread = create_channel(queuecon, exchange, queue_read, key_read)
channelwrite = create_channel(queuecon, exchange, queue_write, key_write)
print ' [*] Waiting for messages. To exit press CTRL+C'
def sendback(msg):
if msg['nb'] > 0:
channelwrite.basic_publish(exchange=exchange,
routing_key=key_write,
body=json.dumps(msg))
print ' [x] Message sent to queue with count ', msg['nb']
def send_ack(ch, method):
ch.basic_ack(delivery_tag = method.delivery_tag)
print " [x] Acknoledgment sent"
def send_nack(ch, method):
ch.basic_nack(delivery_tag = method.delivery_tag)
print " [x] Nacknoledgment sent"
def callback(ch, method, properties, body):
print " [x] Received %r" % (body,)
content = json.loads(body)
# if the current count reached 0
# there are no more requests left
if content['nb'] <= 0:
# acknowledge the msg and quit
send_ack(ch, method)
print ' [x] No more requests left'
return
print ' [x] Executing browser', content['url']
try:
harcontent = check_output(['phantomjs', NETSNIFF_UTIL, content['url'], content['agent']])
except CalledProcessError:
print ' [x] Sub-process failed'
harcontent = None
if harcontent:
try:
jscontent = json.loads(harcontent)
except:
print ' [x] Unable to parse JSON output'
jscontent = None
if jscontent:
jscontent['site'] = content['site']
jscontent['agent'] = content['agent']
try:
dbcon.perfmonitor.har.insert(jscontent)
print ' [x] HAR response saved'
send_ack(ch, method)
content['nb'] -= 1
if content['nb'] > 0:
sendback(content)
return
except:
print ' [x] Unable to save HAR response, sending back'
else:
print ' [x] Unable to parse HAR file from sub-process, sending back'
else:
print ' [x] Unable to generate HAR file, sending back'
send_nack(ch, method)
try:
channelread.basic_consume(callback,
queue=queue_read)
channelread.start_consuming()
except pika.exceptions.ConnectionClosed, e:
print 'Connection closed', e
except KeyboardInterrupt:
channelread.stop_consuming()
| {
"content_hash": "1d21dc2d4f02238b9c1c1df976b4e5ef",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 97,
"avg_line_length": 28.616071428571427,
"alnum_prop": 0.6249609984399376,
"repo_name": "leibowitz/perfmonitor",
"id": "736337af931ba5eef1359da352707eddb47bd140",
"size": "3227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/receive.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8833"
},
{
"name": "JavaScript",
"bytes": "585980"
},
{
"name": "PHP",
"bytes": "177733"
},
{
"name": "Python",
"bytes": "10182"
}
],
"symlink_target": ""
} |
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/hongkongcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *hongkongcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("hongkongcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| {
"content_hash": "bb8158daef17650c3e080a6705776880",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 85,
"avg_line_length": 25.929577464788732,
"alnum_prop": 0.5741444866920152,
"repo_name": "digiwhite/hongkongcoin",
"id": "55c245ba1ece33a71983a9c1a9aa372c6b9d4744",
"size": "1859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "share/qt/extract_strings_qt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "94741"
},
{
"name": "C++",
"bytes": "2435947"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "IDL",
"bytes": "13918"
},
{
"name": "Objective-C++",
"bytes": "2734"
},
{
"name": "Python",
"bytes": "3818"
},
{
"name": "Shell",
"bytes": "848"
},
{
"name": "TypeScript",
"bytes": "5260834"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup
LONG_DESCRIPTION = """
Django confirmaction is a battery for confirm actions via email, sms etc.
"""
def long_description():
try:
return open(
os.path.join(os.path.dirname(__file__), "README.md")
).read()
except IOError:
return LONG_DESCRIPTION
setup(
name = "django-confirmaction",
version = "0.0.3",
author = "Aleksandr Aibulatov",
author_email = "zap.aibulatov@gmail.com",
description = "Django battery for confirm some action via email, sms, etc",
license = "BSD",
keywords = "django, battery, confirm action",
url = "https://github.com/Zapix/django-confirmaction",
packages=[
'confirmaction',
'confirmaction.migrations'
],
long_description=long_description(),
install_requires=[
'django>=1.6',
'south>=0.8.4',
'pycrypto'
],
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Framework :: Django",
"Programming Language :: Python :: 2.7",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
) | {
"content_hash": "1faed46d51e79b2c7c52e62a1a0be4b6",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 79,
"avg_line_length": 26.53488372093023,
"alnum_prop": 0.5977212971078002,
"repo_name": "Zapix/django-confirmaction",
"id": "b42fd0f5569032e424a53a9b28823d42885001d6",
"size": "1165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "46390"
},
{
"name": "Shell",
"bytes": "282"
}
],
"symlink_target": ""
} |
import copy
import json
import pytest
import tempfile
import subprocess
@pytest.fixture("session")
def built_json_file():
with tempfile.NamedTemporaryFile(mode="r") as f:
subprocess.call(["python", "scripts/build.py", "-V", "v1.0.1", "-f", f.name, "src"])
data = json.load(f)
return data
@pytest.fixture
def built_json(built_json_file):
return copy.deepcopy(built_json_file)
| {
"content_hash": "788e2896a31c276a14e669e12a320747",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 92,
"avg_line_length": 18.681818181818183,
"alnum_prop": 0.6715328467153284,
"repo_name": "virtool/virtool-database",
"id": "4adf2a93592c8d3a2077f7b28a12f68907605abc",
"size": "411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8733"
}
],
"symlink_target": ""
} |
"""Color functionality for Linux and Mac systems."""
| {
"content_hash": "2984f58f3996ea6bf595074a4815473c",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 52,
"avg_line_length": 53,
"alnum_prop": 0.7358490566037735,
"repo_name": "MisanthropicBit/colorise",
"id": "22b23c70f47b1a30ab885295e1d7efa996bea5b6",
"size": "53",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/colorise/nix/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "69152"
}
],
"symlink_target": ""
} |
people = 20
cats = 30
dogs = 15
if people < cats:
print("Too many cats! The world is doomed!")
if people > cats:
print("Not many cats! The world is saved!")
if people < dogs:
print("The world is drooled on!")
if people > dogs:
print("The world is dry!")
dogs += 5
if people >= dogs:
print("People are greater than or equal to dogs.")
if people <= dogs:
print("People are less than or equal to dogs.")
if people == dogs:
print("People are dogs")
| {
"content_hash": "ed89f8aec27f7628101603d57ce1fe4d",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 54,
"avg_line_length": 18.5,
"alnum_prop": 0.6340956340956341,
"repo_name": "davvi/Hardway3",
"id": "7bc5fa82f6d263b7f79d7b62f8e4115e66f62b81",
"size": "504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex29.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24375"
}
],
"symlink_target": ""
} |
import torch
from examples.speech_recognition.data.replabels import pack_replabels
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("asg_loss")
class ASGCriterion(FairseqCriterion):
@staticmethod
def add_args(parser):
group = parser.add_argument_group("ASG Loss")
group.add_argument(
"--asg-transitions-init",
help="initial diagonal value of transition matrix",
type=float,
default=0.0,
)
group.add_argument(
"--max-replabel", help="maximum # of replabels", type=int, default=2
)
group.add_argument(
"--linseg-updates",
help="# of training updates to use LinSeg initialization",
type=int,
default=0,
)
group.add_argument(
"--hide-linseg-messages",
help="hide messages about LinSeg initialization",
action="store_true",
)
def __init__(
self,
task,
silence_token,
asg_transitions_init,
max_replabel,
linseg_updates,
hide_linseg_messages,
):
from flashlight.lib.sequence.criterion import ASGLoss, CriterionScaleMode
super().__init__(task)
self.tgt_dict = task.target_dictionary
self.eos = self.tgt_dict.eos()
self.silence = (
self.tgt_dict.index(silence_token)
if silence_token in self.tgt_dict
else None
)
self.max_replabel = max_replabel
num_labels = len(self.tgt_dict)
self.asg = ASGLoss(num_labels, scale_mode=CriterionScaleMode.TARGET_SZ_SQRT)
self.asg.trans = torch.nn.Parameter(
asg_transitions_init * torch.eye(num_labels), requires_grad=True
)
self.linseg_progress = torch.nn.Parameter(
torch.tensor([0], dtype=torch.int), requires_grad=False
)
self.linseg_maximum = linseg_updates
self.linseg_message_state = "none" if hide_linseg_messages else "start"
@classmethod
def build_criterion(cls, args, task):
return cls(
task,
args.silence_token,
args.asg_transitions_init,
args.max_replabel,
args.linseg_updates,
args.hide_linseg_messages,
)
def linseg_step(self):
if not self.training:
return False
if self.linseg_progress.item() < self.linseg_maximum:
if self.linseg_message_state == "start":
print("| using LinSeg to initialize ASG")
self.linseg_message_state = "finish"
self.linseg_progress.add_(1)
return True
elif self.linseg_message_state == "finish":
print("| finished LinSeg initialization")
self.linseg_message_state = "none"
return False
def replace_eos_with_silence(self, tgt):
if tgt[-1] != self.eos:
return tgt
elif self.silence is None or (len(tgt) > 1 and tgt[-2] == self.silence):
return tgt[:-1]
else:
return tgt[:-1] + [self.silence]
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
emissions = net_output["encoder_out"].transpose(0, 1).contiguous()
B = emissions.size(0)
T = emissions.size(1)
device = emissions.device
target = torch.IntTensor(B, T)
target_size = torch.IntTensor(B)
using_linseg = self.linseg_step()
for b in range(B):
initial_target_size = sample["target_lengths"][b].item()
if initial_target_size == 0:
raise ValueError("target size cannot be zero")
tgt = sample["target"][b, :initial_target_size].tolist()
tgt = self.replace_eos_with_silence(tgt)
tgt = pack_replabels(tgt, self.tgt_dict, self.max_replabel)
tgt = tgt[:T]
if using_linseg:
tgt = [tgt[t * len(tgt) // T] for t in range(T)]
target[b][: len(tgt)] = torch.IntTensor(tgt)
target_size[b] = len(tgt)
loss = self.asg.forward(emissions, target.to(device), target_size.to(device))
if reduce:
loss = torch.sum(loss)
sample_size = (
sample["target"].size(0) if self.args.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / nsentences,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
return agg_output
| {
"content_hash": "d5d2da38a1d3ad70238706b9fb6a0400",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 85,
"avg_line_length": 34.77300613496933,
"alnum_prop": 0.5730416372618208,
"repo_name": "pytorch/fairseq",
"id": "41f50bbd70388ce723f2d316d4e9776bcd6be3c9",
"size": "5870",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/speech_recognition/criterions/ASG_loss.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "21106"
},
{
"name": "Cuda",
"bytes": "38166"
},
{
"name": "Cython",
"bytes": "13294"
},
{
"name": "Lua",
"bytes": "4210"
},
{
"name": "Python",
"bytes": "3699357"
},
{
"name": "Shell",
"bytes": "2182"
}
],
"symlink_target": ""
} |
from redhat_support_tool.helpers.confighelper import _
from redhat_support_tool.symptoms import AnalyzerPlugin
try:
from pyparsing import Word, Suppress, Combine, SkipTo, Regex, Literal
except ImportError:
from redhat_support_tool.tools.pyparsing import \
Word, Suppress, Combine, SkipTo, Regex, Literal
__author__ = 'Dan Varga <dvarga@redhat.com>'
class BtMinusA(AnalyzerPlugin):
'''
This is a pyparsing expression that will match bt -a output
that exist in a text file
'''
@classmethod
def get_symptom(self):
quitline = Literal("crash> quit")
analyze_expression = Combine(Regex(".*KERNEL:") +
SkipTo(Suppress(quitline), include=True))
return analyze_expression
@classmethod
def get_desc(cls):
'''
Simple explanation on what the expression tries to find at a high level
'''
return _('This analyzer attempts to locate bt -a output.')
@classmethod
def get_sample(cls):
'''
A sample pattern that would produce a match
'''
return '''
Matched Pattern Example:
KERNEL: /var/lib/redhat-support-tool/debugkernels/2.6.32-279.el6.x86_65-vmlinux
DUMPFILE: /tmp/task_2FNtazG.vmcore [PARTIAL DUMP]
CPUS: 2
DATE: Thu Nov 29 14:29:44 2012
...
R10: 00000000ffffffff R11: 0000000000000246 R12: 0000000000000002
R13: 00007f072163f780 R14: 0000000000000002 R15: 0000000000000000
ORIG_RAX: 0000000000000001 CS: 0033 SS: 002b
crash> quit
'''
@classmethod
def get_name(cls):
'''
Human readable name for the expression
'''
return _('Crash bt -a Analyzer')
| {
"content_hash": "9c244112eec5a09811fbd422a50ba1d9",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 79,
"avg_line_length": 30.490909090909092,
"alnum_prop": 0.654144305307096,
"repo_name": "redhataccess/redhat-support-tool",
"id": "9f901cdb27066cc888a5c2b1962487f76ef743dc",
"size": "2294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/redhat_support_tool/symptoms/btminusa.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gettext Catalog",
"bytes": "54422"
},
{
"name": "Python",
"bytes": "539404"
},
{
"name": "Shell",
"bytes": "26"
}
],
"symlink_target": ""
} |
"""The setup script."""
from setuptools import find_packages, setup
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
with open('requirements_dev.txt') as f:
# ignore the general requirements
requirements_dev = f.read().splitlines()[1:]
with open('requirements_doc.txt') as f:
requirements_doc = f.read().splitlines()
with open('requirements_vectorfile.txt') as f:
requirements_vectorfile = f.read().splitlines()
with open('requirements_proxy.txt') as f:
requirements_proxy = f.read().splitlines()
setup(
name='pydov',
version='2.2.0',
description=("A Python package to download data from Databank Ondergrond "
"Vlaanderen (DOV)."),
long_description=readme,
long_description_content_type='text/markdown',
author="DOV-Vlaanderen",
author_email='dov@vlaanderen.be',
url='https://github.com/DOV-Vlaanderen/pydov',
packages=find_packages(
include=['pydov']),
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='pydov',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Natural Language :: Dutch',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Scientific/Engineering',
],
test_suite='tests',
tests_require=requirements_dev,
extras_require={
'docs': requirements_doc,
'devs': requirements_dev,
'vectorfile': requirements_vectorfile,
'proxy': requirements_proxy
}
)
| {
"content_hash": "42909df653d5d6a2dd17a512da4753a7",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 78,
"avg_line_length": 33.9344262295082,
"alnum_prop": 0.6280193236714976,
"repo_name": "DOV-Vlaanderen/pydov",
"id": "e52e52b2f48df71da8f2164c9217dba6e537c0e8",
"size": "2117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "724748"
}
],
"symlink_target": ""
} |
from django.utils.translation import ugettext_lazy as _
import horizon
from horizon.dashboards.settings import dashboard
class Password(horizon.Panel):
name = _("Password")
slug = 'password'
dashboard.Settings.register(Password) | {
"content_hash": "c8d0aa3749ee298ee2f0d819372b61d5",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 55,
"avg_line_length": 21.818181818181817,
"alnum_prop": 0.7708333333333334,
"repo_name": "kionetworks/openstack-dashboard-essex",
"id": "2820276ec62241c33eace1c554373f5e6fcaa21d",
"size": "240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/overrides/password/panel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "415799"
},
{
"name": "JavaScript",
"bytes": "133515"
},
{
"name": "Python",
"bytes": "72486"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup_kwargs = {
'name': 'mach9',
'author': '38elements',
'url': 'https://github.com/silver-castle/mach9',
'description': 'a web application framework based ASGI and async/await.',
'version': '0.0.4',
'license': 'MIT License',
'packages': ['mach9'],
'classifiers': [
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
'install_requires': [
'httptools',
'uvloop',
'ujson',
'aiofiles',
'websockets',
]
}
setup(**setup_kwargs)
| {
"content_hash": "d905f9d377be56d7031b023fcf45f850",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 77,
"avg_line_length": 25.185185185185187,
"alnum_prop": 0.5558823529411765,
"repo_name": "silver-castle/mach9",
"id": "749b3a4e2b462cf635274dd29b5394b782c40f52",
"size": "680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "235646"
}
],
"symlink_target": ""
} |
from functools import partial
# from ..config_new import BTE_FILTERS
BTE_FILTERS = ["nodeDegree", "ngd", "drugPhase", "survivalProbability"]
def filter_response(res, criteria):
"""
Filter API response based on filtering criteria
:param res: API Response
:param criteria: filtering criteria
"""
def filter_by_operation(rec, key, val, operation):
if rec.get(key):
if isinstance(rec.get(key), list):
rec[key] = rec[key][0]
try:
if operation == "=" and type(val)(rec[key]) == val:
return True
if operation == ">" and type(val)(rec[key]) > val:
return True
if operation == "<" and type(val)(rec[key]) < val:
return True
return False
except (ValueError, TypeError):
return False
return False
if not res or not isinstance(res, list) or not len(res) > 0:
return res
if not isinstance(criteria, dict):
return res
for f, v in criteria.items():
if not isinstance(v, dict):
continue
if f not in BTE_FILTERS:
if "=" in v:
res = list(
filter(
partial(filter_by_operation, key=f, val=v["="], operation="="),
res,
)
)
continue
if ">" in v:
res = list(
filter(
partial(filter_by_operation, key=f, val=v[">"], operation=">"),
res,
)
)
elif "<" in v:
res = list(
filter(
partial(filter_by_operation, key=f, val=v["<"], operation="<"),
res,
)
)
return res
| {
"content_hash": "1c968154d1f411ccc33c57c85047fd80",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 87,
"avg_line_length": 32.416666666666664,
"alnum_prop": 0.4370179948586118,
"repo_name": "biothings/biothings_explorer",
"id": "332e03ecc6051c0506ed891c5c653041cb38df1a",
"size": "1945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "biothings_explorer/call_apis/filter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2401542"
},
{
"name": "Jupyter Notebook",
"bytes": "14468811"
},
{
"name": "Python",
"bytes": "585318"
}
],
"symlink_target": ""
} |
class SC2ReaderError(Exception):
pass
class SC2ReaderLocalizationError(SC2ReaderError):
pass
class CorruptTrackerFileError(SC2ReaderError):
pass
class MPQError(SC2ReaderError):
pass
class NoMatchingFilesError(SC2ReaderError):
pass
class MultipleMatchingFilesError(SC2ReaderError):
pass
class ReadError(SC2ReaderError):
def __init__(self, msg, type, location, replay=None, game_events=[], buffer=None):
self.__dict__.update(locals())
super().__init__(msg)
def __str__(self):
return f"{self.msg}, Type: {self.type}"
class ParseError(SC2ReaderError):
pass
class ProcessError(SC2ReaderError):
pass
class FileError(SC2ReaderError):
pass
| {
"content_hash": "70da4ef643d19009377e76a02a437494",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 86,
"avg_line_length": 16.72093023255814,
"alnum_prop": 0.6981919332406119,
"repo_name": "ggtracker/sc2reader",
"id": "5dc20e81926971611cbcf34993e183ecc80e2002",
"size": "719",
"binary": false,
"copies": "1",
"ref": "refs/heads/upstream",
"path": "sc2reader/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "6677"
},
{
"name": "Python",
"bytes": "433764"
}
],
"symlink_target": ""
} |
"""Behavioural FIFOs for myhdl.
"""
__author__ = 'Uri Nix'
__all__ = ['DCFifo', 'SCFifo']
### Module Globals ###########################################################
from Queue import Queue
from myhdl import always, instances
### Building Block Units #####################################################
class DCFifo(object):
def __init__(self, depth):
"""
Dual Clock FIFO using rdy/valid.
Parameters:
-----------
depth: int
maximum size of FIFO.
Returns:
--------
None
"""
self.depth_m1 = depth - 1
self.queue = Queue(maxsize=depth)
def generate(self,
i_wrclk, o_wrrdy, i_wrvalid, i_wrdata,
i_rdclk, i_rdrdy, o_rdvalid, o_rddata,
o_fullness):
"""
Generate instance.
Ports:
------
i_*clk: Signal(bool)
access clock
o_wrrdy: Signal(bool)
FIFO ready to accept data from source on next cycle
i_rdrdy: Signal(bool)
Sink ready to accept data from FIFO on next cycle
i_wrdata, o_rddata: Signal(any)
i_wrvalid, o_rdvalid: Signal(bool)
signify that applicable data lines can be sampled
o_fullness: Signal(int)
number of elements in FIFO
"""
@always(i_wrclk.posedge)
def wr_access():
o_wrrdy.next = (self.queue.qsize() < self.depth_m1)
if i_wrvalid and o_wrrdy:
self.queue.put_nowait(i_wrdata.val)
o_fullness.next = self.queue.qsize()
@always(i_rdclk.posedge)
def rd_access():
if i_rdrdy and (not self.queue.empty()):
o_rddata.next = self.queue.get_nowait()
o_fullness.next = self.queue.qsize()
o_rdvalid.next = True
else:
o_rdvalid.next = False
return instances()
class SCFifo(object):
def __init__(self, depth):
"""
Single Clock FIFO using rdy/valid.
Parameters:
-----------
depth: int
maximum size of FIFO.
Returns:
--------
None
"""
self.depth_m1 = depth - 1
self.queue = Queue(maxsize=depth)
def generate(self, i_clk,
o_wrrdy, i_wrvalid, i_wrdata,
i_rdrdy, o_rdvalid, o_rddata,
o_fullness):
"""
Generate instance.
Ports:
------
i_clk: Signal(bool)
access clock
o_wrrdy: Signal(bool)
FIFO ready to accept data from source on next cycle
i_rdrdy: Signal(bool)
Sink ready to accept data from FIFO on next cycle
i_wrdata, o_rddata: Signal(any)
i_wrvalid, o_rdvalid: Signal(bool)
signify that applicable data lines can be sampled
o_fullness: Signal(int)
number of elements in FIFO
"""
@always(i_clk.posedge)
def wr_access():
o_wrrdy.next = (self.queue.qsize() < self.depth_m1)
if i_wrvalid and o_wrrdy:
self.queue.put_nowait(i_wrdata.val)
o_fullness.next = self.queue.qsize()
@always(i_clk.posedge)
def rd_access():
if i_rdrdy and (not self.queue.empty()):
o_rddata.next = self.queue.get_nowait()
o_fullness.next = self.queue.qsize()
o_rdvalid.next = True
else:
o_rdvalid.next = False
return instances()
| {
"content_hash": "1cf95f1458d3e46a04130119007de5f7",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 78,
"avg_line_length": 28.118110236220474,
"alnum_prop": 0.49453934472136657,
"repo_name": "unixie/myhdl_arch",
"id": "5561d1c865c103ba6acd3a7ef2b50415eee370c8",
"size": "3571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myhdl_arch/fifos/_fifos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21759"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from ruamel.yaml.emitter import Emitter
from ruamel.yaml.serializer import Serializer
from ruamel.yaml.representer import Representer, SafeRepresenter, BaseRepresenter, \
RoundTripRepresenter
from ruamel.yaml.resolver import Resolver, BaseResolver, VersionedResolver
if False: # MYPY
from typing import Any, Dict, List, Union # NOQA
from ruamel.yaml.compat import StreamType, VersionType # NOQA
__all__ = ['BaseDumper', 'SafeDumper', 'Dumper', 'RoundTripDumper']
class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, block_seq_indent=None,
top_level_colon_align=None, prefix_colon=None):
# type: (Any, StreamType, Any, Any, bool, Union[None, int], Union[None, int], bool, Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> None # NOQA
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
block_seq_indent=block_seq_indent,
dumper=self)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version, tags=tags,
dumper=self)
BaseRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style,
dumper=self)
BaseResolver.__init__(self, loadumper=self)
class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, block_seq_indent=None,
top_level_colon_align=None, prefix_colon=None):
# type: (Any, StreamType, Any, Any, bool, Union[None, int], Union[None, int], bool, Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> None # NOQA
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
block_seq_indent=block_seq_indent,
dumper=self)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version, tags=tags,
dumper=self)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style,
dumper=self)
Resolver.__init__(self, loadumper=self)
class Dumper(Emitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, block_seq_indent=None,
top_level_colon_align=None, prefix_colon=None):
# type: (Any, StreamType, Any, Any, bool, Union[None, int], Union[None, int], bool, Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> None # NOQA
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
block_seq_indent=block_seq_indent,
dumper=self)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version, tags=tags,
dumper=self)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style,
dumper=self)
Resolver.__init__(self, loadumper=self)
class RoundTripDumper(Emitter, Serializer, RoundTripRepresenter, VersionedResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, block_seq_indent=None,
top_level_colon_align=None, prefix_colon=None):
# type: (Any, StreamType, Any, bool, Union[None, int], Union[None, int], bool, Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> None # NOQA
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
block_seq_indent=block_seq_indent,
top_level_colon_align=top_level_colon_align,
prefix_colon=prefix_colon,
dumper=self)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version, tags=tags,
dumper=self)
RoundTripRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style,
dumper=self)
VersionedResolver.__init__(self, loader=self)
| {
"content_hash": "8a4e7cbee480d028dbea9c096468e3c5",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 181,
"avg_line_length": 56.06956521739131,
"alnum_prop": 0.5527295285359801,
"repo_name": "Samuel789/MediPi",
"id": "b43260c334b6d4e9a90bb8e5770f0b19f8b5b3ae",
"size": "6465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MedManagementWeb/env/lib/python3.5/site-packages/ruamel/yaml/dumper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10012"
},
{
"name": "CSS",
"bytes": "847678"
},
{
"name": "HTML",
"bytes": "4238145"
},
{
"name": "Java",
"bytes": "1942198"
},
{
"name": "JavaScript",
"bytes": "2308166"
},
{
"name": "PHP",
"bytes": "1684"
},
{
"name": "Python",
"bytes": "66091"
},
{
"name": "Ruby",
"bytes": "1183"
},
{
"name": "Shell",
"bytes": "17053"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
import numpy as np
# Carga los datos
data = np.loadtxt('room-temperature.csv', skiprows=1, usecols=[1,2,3,4],delimiter=',')
front_left = data[:,0]
front_right = data[:,1]
back_left = data[:,2]
back_right = data[:,3]
# Grafica los datos en funcion del tiempo
figur, axarr = plt.subplots(4, 1, figsize= (10,10))
axarr[0].plot(front_left)
axarr[0].set_title('Front left')
axarr[0].set_ylabel('Temperature (AU)')
axarr[0].set_xlim([0,len(front_left)])
axarr[0].set_ylim([290,300])
axarr[1].plot(front_right)
axarr[1].set_title('Front right')
axarr[1].set_ylabel('Temperature (AU)')
axarr[1].set_xlim([0,len(front_left)])
axarr[1].set_ylim([290,300])
axarr[2].plot(back_left)
axarr[2].set_title('Back left')
axarr[2].set_ylabel('Temperature (AU)')
axarr[2].set_xlim([0,len(front_left)])
axarr[2].set_ylim([290,300])
axarr[3].plot(back_right)
axarr[3].set_title('Back right')
axarr[3].set_xlabel('Time (AU)')
axarr[3].set_ylabel('Temperature (AU)')
axarr[3].set_xlim([0,len(front_left)])
axarr[3].set_ylim([290,300])
figur.subplots_adjust(hspace=0.5)
plt.savefig('room.pdf')
plt.close()
# Realiza PCA, retorna los valores propios, vectores propios y los datos en la base de los vectores propios (scores)
# Tambien imprime los mensajes necesarios
def pca(data_matrix):
'''data_matrix must be the data matrix by COLUMNS i.e. a column is a variable and a row is an observation'''
data_matrix = data_matrix.T
cov_matrix = np.cov(data_matrix)
print('La matriz de covarianza es:')
print(cov_matrix)
print('')
values, vectors = np.linalg.eig(cov_matrix.T)
print('Las dos componentes principales en orden ascendente son:')
print(vectors[:,0], ' con valor ', values[0])
print(vectors[:,1], ' con valor ', values[1])
total_values = np.sum(values)
print('\nLa primera componente explica el', values[0]/total_values * 100, '% de la varianza')
print('La segunda componente explica el', values[1]/total_values * 100, '% de la varianza')
scores = np.dot(data_matrix.T, vectors)
return values, vectors, scores
# Escala y centra los datos, retorna la matriz de datos reescalada.
def center_scale(data):
data_scaled = np.zeros_like(data)
for i in range(len(data[0])):
av_col = np.mean(data[:,i])
std_col = np.std(data[:,i])
for j in range(len(data)):
data_scaled[j,i] = ( data[j,i] - av_col )/ std_col
return data_scaled
# Grafica la variable j vs la variable i del los datos junto con las dos componentes principales proyectadas en
# el plano de dichas variables.
def plot_eigen(data, i, j, vectors, labels, name):
'''Grafica las variables i, j de los datos junto con las dos componentes principales'''
plt.scatter(data[:,i], data[:,j])
x = np.linspace(min(data[:,i]), max(data[:,i]))
plt.plot(x, x*vectors[j,0]/vectors[i,0], linewidth = 1.0, c='r', label = 'Primer vector')
plt.plot(x, x*vectors[j,1]/vectors[i,1], linewidth = 1.0, c='y', label = 'Segundo Vector')
plt.title(labels[j]+ ' vs. '+ labels[i])
plt.xlabel(labels[i])
plt.ylabel(labels[j])
plt.ylim(min(data[:,j])-1, max(data[:,j])+1)
plt.legend(loc=0)
plt.savefig(name)
plt.close()
# Escala los datos
data_matrix = center_scale(data)
# Realiza PCA
values, vectors, scores = pca(data_matrix)
# Realiza las graficas de los vectores
labels = ['Front Left', 'Front Right', 'Back left', 'Back right']
plot_eigen(data_matrix, 0, 1, vectors, labels, 'pca_fr_fl.pdf')
plot_eigen(data_matrix, 0, 2, vectors, labels, 'pca_bl_fl.pdf')
| {
"content_hash": "1b9b3d401964338f59504f37fbf020cd",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 116,
"avg_line_length": 35.53465346534654,
"alnum_prop": 0.6642518807467261,
"repo_name": "ComputoCienciasUniandes/MetodosComputacionalesLaboratorio",
"id": "2ac46f043a5bd5671e83205ada9e2f191d03d05b",
"size": "3617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2017-1/lab7_EJ2/lab7SOL_EJ2/pca_room.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "17061"
},
{
"name": "C++",
"bytes": "6440"
},
{
"name": "Jupyter Notebook",
"bytes": "3003146"
},
{
"name": "Python",
"bytes": "28797"
},
{
"name": "Shell",
"bytes": "2731"
},
{
"name": "TeX",
"bytes": "67938"
}
],
"symlink_target": ""
} |
import deuce
from deuce.model import Block
from deuce.model import Vault
from deuce.util import log as logging
from deuce.drivers.metadatadriver import ConstraintError
import deuce.transport.wsgi.errors as errors
logger = logging.getLogger(__name__)
class BlockStorage(object):
@staticmethod
def get(vault_id):
vault = Vault.get(vault_id)
return BlockStorage(vault) if vault else None
def __init__(self, vault, storage_block_id=None):
self.Vault = vault
self.storage_block_id = storage_block_id
@property
def vault_id(self):
return self.Vault.id
def get_metadata_id(self, storage_block_id):
return deuce.metadata_driver.get_block_metadata_id(self.vault_id,
storage_block_id)
def delete_block(self, storage_block_id):
block_id = self.get_metadata_id(storage_block_id)
block = Block(self.vault_id,
block_id,
storage_block_id=storage_block_id) if block_id else None
ref_count = block.get_ref_count() if block else None
if block is None:
return deuce.storage_driver.delete_block(self.vault_id,
storage_block_id)
else:
msg = "Storage ID: {0} has {1} " \
"reference(s) in metadata".format(
storage_block_id,
ref_count)
raise ConstraintError(deuce.context.project_id,
self.vault_id,
msg)
def head_block(self, storage_block_id):
# this gets the block from storage so it's verified there
block = self.get_block(storage_block_id)
# Block doesn't exist in storage
if block is None:
logger.debug('Unable to locate block {0}'.format(
storage_block_id))
return None
# Default values
storage_block_info = {
'reference': {
'count': 0,
'modified': None
},
'id': {
'storage': storage_block_id,
'metadata': None
},
'length': 0,
'orphaned': True,
}
# Block exists in some form...
if block.metadata_block_id is not None:
# Block Exists in Metadata and storage
storage_block_info['reference']['count'] = \
block.get_ref_count()
storage_block_info['reference']['modified'] = \
block.get_ref_modified()
storage_block_info['id']['metadata'] = block.metadata_block_id
storage_block_info['length'] = block.get_block_length()
storage_block_info['orphaned'] = False
else:
# Block exists in only in storage (orphaned)
storage_block_info['length'] = \
deuce.storage_driver.get_block_object_length(self.vault_id,
storage_block_id)
return storage_block_info
def get_block(self, storage_block_id):
"""Get a block directly from storage
"""
metadata_block_id = self.get_metadata_id(storage_block_id)
obj = deuce.storage_driver.get_block_obj(self.vault_id,
storage_block_id)
return Block(self.vault_id,
metadata_block_id,
obj=obj,
storage_block_id=storage_block_id) if obj else None
def get_blocks_generator(self, marker, limit):
return (BlockStorage(Vault.get(self.vault_id), storage_block_id)
for storage_block_id in
deuce.storage_driver.get_vault_block_list(self.vault_id,
limit,
marker))
| {
"content_hash": "84ba3752e03cb53479bfe294069c5bdd",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 78,
"avg_line_length": 34.834782608695654,
"alnum_prop": 0.5184722915626561,
"repo_name": "rackerlabs/deuce",
"id": "2b5fa78cbde19c84d4f4af6ca56f6f17e0956087",
"size": "4006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deuce/model/blockstorage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "536028"
}
],
"symlink_target": ""
} |
"""
cn_client.tests
~~~~~~~~~~~~~~~~~~~
Tests of the ConsumerNotebook client API
"""
import ConfigParser
import json
import unittest
from functions import get_products, get_lists, get_users
config = ConfigParser.ConfigParser()
config.read('tests.cfg')
API_KEY = config.get('Params', 'api_key')
BASE_URL = config.get("Params", 'base_url')
def lprint(response):
""" Used to save html responses to help debug"""
f = open('results.html','w')
f.write(response.content)
f.close()
class TestFunctions(unittest.TestCase):
def test_https(self):
# Let's get a list of products
response = get_products(API_KEY, base_url="http://consumernotebook.com/api/v1/")
self.assertEquals(response.status_code, 200)
# We should just have one page
response = get_products(API_KEY)
self.assertEquals(response.status_code, 200)
"""
def test_lots(self):
response = get_lists(API_KEY, USERNAME, PASSWORD, base_url=BASE_URL)
self.assertEquals(response.status_code, 200)
def test_grids(self):
# Let's get a list of products
response = get_grids(API_KEY, USERNAME, PASSWORD, base_url=BASE_URL)
self.assertEquals(response.status_code, 200)
response = get_grids(API_KEY, USERNAME, PASSWORD, base_url=BASE_URL, depth=1)
def test_users(self):
response = get_users(API_KEY, USERNAME, PASSWORD, target_username="audreyr", base_url=BASE_URL)
self.assertEquals(response.status_code, 200)
def test_unfollow(self):
response = post_unfollow(API_KEY, USERNAME, PASSWORD, target_username="audreyr", base_url=BASE_URL)
self.assertEquals(response.status_code, 202)
def test_follow(self):
# this line makes sure we are not following audreyr
response = post_unfollow(API_KEY, USERNAME, PASSWORD, target_username="audreyr", base_url=BASE_URL)
# Follow audreyr
response = post_follow(API_KEY, USERNAME, PASSWORD, target_username="audreyr", base_url=BASE_URL)
self.assertEquals(response.status_code, 201)
# Follow audreyr and get a 400 because we are already doing so
response = post_follow(API_KEY, USERNAME, PASSWORD, target_username="audreyr", base_url=BASE_URL)
self.assertEquals(response.status_code, 400)
"""
if __name__ == "__main__":
unittest.main() | {
"content_hash": "52467983c4ecda11cf2bb7bc8e1cafc3",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 123,
"avg_line_length": 32.5921052631579,
"alnum_prop": 0.6378683891804602,
"repo_name": "consumerio/pycn",
"id": "dc83e421e173e4ee6283c46bf058e6e8bd5186db",
"size": "2502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycn/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16027"
},
{
"name": "Shell",
"bytes": "5116"
}
],
"symlink_target": ""
} |
"""Post-processing operations on detected boxes."""
import tensorflow as tf
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import standard_fields as fields
import sys
def multiclass_non_max_suppression(boxes,
scores,
score_thresh,
iou_thresh,
max_size_per_class,
max_total_size=0,
clip_window=None,
change_coordinate_frame=False,
masks=None,
transcriptions=None,
additional_fields=None,
scope=None):
"""Multi-class version of non maximum suppression.
This op greedily selects a subset of detection bounding boxes, pruning
away boxes that have high IOU (intersection over union) overlap (> thresh)
with already selected boxes. It operates independently for each class for
which scores are provided (via the scores field of the input box_list),
pruning boxes with score less than a provided threshold prior to
applying NMS.
Please note that this operation is performed on *all* classes, therefore any
background classes should be removed prior to calling this function.
Args:
boxes: A [k, q, 4] float32 tensor containing k detections. `q` can be either
number of classes or 1 depending on whether a separate box is predicted
per class.
scores: A [k, num_classes] float32 tensor containing the scores for each of
the k detections.
score_thresh: scalar threshold for score (low scoring boxes are removed).
iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap
with previously selected boxes are removed).
max_size_per_class: maximum number of retained boxes per class.
max_total_size: maximum number of boxes retained over all classes. By
default returns all boxes retained after capping boxes per class.
clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max]
representing the window to clip and normalize boxes to before performing
non-max suppression.
change_coordinate_frame: Whether to normalize coordinates after clipping
relative to clip_window (this can only be set to True if a clip_window
is provided)
masks: (optional) a [k, q, mask_height, mask_width] float32 tensor
containing box masks. `q` can be either number of classes or 1 depending
on whether a separate mask is predicted per class.
additional_fields: (optional) If not None, a dictionary that maps keys to
tensors whose first dimensions are all of size `k`. After non-maximum
suppression, all tensors corresponding to the selected boxes will be
added to resulting BoxList.
scope: name scope.
Returns:
a BoxList holding M boxes with a rank-1 scores field representing
corresponding scores for each box with scores sorted in decreasing order
and a rank-1 classes field representing a class label for each box.
If masks, keypoints, keypoint_heatmaps is not None, the boxlist will
contain masks, keypoints, keypoint_heatmaps corresponding to boxes.
Raises:
ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not have
a valid scores field.
"""
if not 0 <= iou_thresh <= 1.0:
raise ValueError('iou_thresh must be between 0 and 1')
if scores.shape.ndims != 2:
raise ValueError('scores field must be of rank 2')
if scores.shape[1].value is None:
raise ValueError('scores must have statically defined second '
'dimension')
if boxes.shape.ndims != 3:
raise ValueError('boxes must be of rank 3.')
if not (boxes.shape[1].value == scores.shape[1].value or
boxes.shape[1].value == 1):
raise ValueError('second dimension of boxes must be either 1 or equal '
'to the second dimension of scores')
if boxes.shape[2].value != 4:
raise ValueError('last dimension of boxes must be of size 4.')
if change_coordinate_frame and clip_window is None:
raise ValueError('if change_coordinate_frame is True, then a clip_window'
'must be specified.')
print ''
print 'multiclass_non_max_suppression'
print boxes
print transcriptions
with tf.name_scope(scope, 'MultiClassNonMaxSuppression'):
num_boxes = tf.shape(boxes)[0]
num_scores = tf.shape(scores)[0]
num_classes = scores.get_shape()[1]
length_assert = tf.Assert(
tf.equal(num_boxes, num_scores),
['Incorrect scores field length: actual vs expected.',
num_scores, num_boxes])
selected_boxes_list = []
per_class_boxes_list = tf.unstack(boxes, axis=1)
if masks is not None:
per_class_masks_list = tf.unstack(masks, axis=1)
boxes_ids = (range(num_classes) if len(per_class_boxes_list) > 1
else [0] * num_classes)
for class_idx, boxes_idx in zip(range(num_classes), boxes_ids):
per_class_boxes = per_class_boxes_list[boxes_idx]
boxlist_and_class_scores = box_list.BoxList(per_class_boxes)
with tf.control_dependencies([length_assert]):
class_scores = tf.reshape(
tf.slice(scores, [0, class_idx], tf.stack([num_scores, 1])), [-1])
boxlist_and_class_scores.add_field(fields.BoxListFields.scores,
class_scores)
if masks is not None:
per_class_masks = per_class_masks_list[boxes_idx]
boxlist_and_class_scores.add_field(fields.BoxListFields.masks,
per_class_masks)
if transcriptions is not None:
boxlist_and_class_scores.add_field(fields.BoxListFields.transcriptions,
transcriptions)
if additional_fields is not None:
for key, tensor in additional_fields.items():
boxlist_and_class_scores.add_field(key, tensor)
boxlist_filtered = box_list_ops.filter_greater_than(
boxlist_and_class_scores, score_thresh)
if clip_window is not None:
boxlist_filtered = box_list_ops.clip_to_window(
boxlist_filtered, clip_window)
if change_coordinate_frame:
boxlist_filtered = box_list_ops.change_coordinate_frame(
boxlist_filtered, clip_window)
max_selection_size = tf.minimum(max_size_per_class,
boxlist_filtered.num_boxes())
selected_indices = tf.image.non_max_suppression(
boxlist_filtered.get(),
boxlist_filtered.get_field(fields.BoxListFields.scores),
max_selection_size,
iou_threshold=iou_thresh)
nms_result = box_list_ops.gather(boxlist_filtered, selected_indices)
nms_result.add_field(
fields.BoxListFields.classes, (tf.zeros_like(
nms_result.get_field(fields.BoxListFields.scores)) + class_idx))
selected_boxes_list.append(nms_result)
selected_boxes = box_list_ops.concatenate(selected_boxes_list)
sorted_boxes = box_list_ops.sort_by_field(selected_boxes,
fields.BoxListFields.scores)
if max_total_size:
max_total_size = tf.minimum(max_total_size,
sorted_boxes.num_boxes())
sorted_boxes = box_list_ops.gather(sorted_boxes,
tf.range(max_total_size))
return sorted_boxes
def batch_multiclass_non_max_suppression(boxes,
scores,
score_thresh,
iou_thresh,
max_size_per_class,
max_total_size=0,
clip_window=None,
change_coordinate_frame=False,
num_valid_boxes=None,
masks=None,
transcriptions=None,
scope=None):
"""Multi-class version of non maximum suppression that operates on a batch.
This op is similar to `multiclass_non_max_suppression` but operates on a batch
of boxes and scores. See documentation for `multiclass_non_max_suppression`
for details.
Args:
boxes: A [batch_size, num_anchors, q, 4] float32 tensor containing
detections. If `q` is 1 then same boxes are used for all classes
otherwise, if `q` is equal to number of classes, class-specific boxes
are used.
scores: A [batch_size, num_anchors, num_classes] float32 tensor containing
the scores for each of the `num_anchors` detections.
score_thresh: scalar threshold for score (low scoring boxes are removed).
iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap
with previously selected boxes are removed).
max_size_per_class: maximum number of retained boxes per class.
max_total_size: maximum number of boxes retained over all classes. By
default returns all boxes retained after capping boxes per class.
clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max]
representing the window to clip boxes to before performing non-max
suppression.
change_coordinate_frame: Whether to normalize coordinates after clipping
relative to clip_window (this can only be set to True if a clip_window
is provided)
num_valid_boxes: (optional) a Tensor of type `int32`. A 1-D tensor of shape
[batch_size] representing the number of valid boxes to be considered
for each image in the batch. This parameter allows for ignoring zero
paddings.
masks: (optional) a [batch_size, num_anchors, q, mask_height, mask_width]
float32 tensor containing box masks. `q` can be either number of classes
or 1 depending on whether a separate mask is predicted per class.
scope: tf scope name.
Returns:
A dictionary containing the following entries:
'detection_boxes': A [batch_size, max_detections, 4] float32 tensor
containing the non-max suppressed boxes.
'detection_scores': A [bath_size, max_detections] float32 tensor containing
the scores for the boxes.
'detection_classes': A [batch_size, max_detections] float32 tensor
containing the class for boxes.
'num_detections': A [batchsize] float32 tensor indicating the number of
valid detections per batch item. Only the top num_detections[i] entries in
nms_boxes[i], nms_scores[i] and nms_class[i] are valid. the rest of the
entries are zero paddings.
'detection_masks': (optional) a
[batch_size, max_detections, mask_height, mask_width] float32 tensor
containing masks for each selected box.
Raises:
ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not have
a valid scores field.
"""
q = boxes.shape[2].value
num_classes = scores.shape[2].value
if q != 1 and q != num_classes:
raise ValueError('third dimension of boxes must be either 1 or equal '
'to the third dimension of scores')
with tf.name_scope(scope, 'BatchMultiClassNonMaxSuppression'):
per_image_boxes_list = tf.unstack(boxes)
per_image_scores_list = tf.unstack(scores)
num_valid_boxes_list = len(per_image_boxes_list) * [None]
per_image_masks_list = len(per_image_boxes_list) * [None]
per_image_transcriptions_list = len(per_image_boxes_list) * [None]
if num_valid_boxes is not None:
num_valid_boxes_list = tf.unstack(num_valid_boxes)
if masks is not None:
per_image_masks_list = tf.unstack(masks)
if transcriptions is not None:
per_image_transcriptions_list = tf.unstack(transcriptions)
detection_boxes_list = []
detection_scores_list = []
detection_classes_list = []
num_detections_list = []
detection_masks_list = []
detection_transcriptions_list = []
for (per_image_boxes, per_image_scores, per_image_masks, per_image_transcriptions, num_valid_boxes
) in zip(per_image_boxes_list, per_image_scores_list,
per_image_masks_list, per_image_transcriptions_list, num_valid_boxes_list):
if num_valid_boxes is not None:
per_image_boxes = tf.reshape(
tf.slice(per_image_boxes, 3*[0],
tf.stack([num_valid_boxes, -1, -1])), [-1, q, 4])
per_image_scores = tf.reshape(
tf.slice(per_image_scores, [0, 0],
tf.stack([num_valid_boxes, -1])), [-1, num_classes])
if masks is not None:
per_image_masks = tf.reshape(
tf.slice(per_image_masks, 4*[0],
tf.stack([num_valid_boxes, -1, -1, -1])),
[-1, q, masks.shape[3].value, masks.shape[4].value])
if transcriptions is not None:
per_image_transcriptions = tf.reshape(
tf.slice(per_image_transcriptions, [0, 0],
tf.stack([num_valid_boxes, -1])),
[-1, 16])
nmsed_boxlist = multiclass_non_max_suppression(
per_image_boxes,
per_image_scores,
score_thresh,
iou_thresh,
max_size_per_class,
max_total_size,
masks=per_image_masks,
transcriptions=per_image_transcriptions,
clip_window=clip_window,
change_coordinate_frame=change_coordinate_frame)
num_detections_list.append(tf.to_float(nmsed_boxlist.num_boxes()))
padded_boxlist = box_list_ops.pad_or_clip_box_list(nmsed_boxlist,
max_total_size)
detection_boxes_list.append(padded_boxlist.get())
detection_scores_list.append(
padded_boxlist.get_field(fields.BoxListFields.scores))
detection_classes_list.append(
padded_boxlist.get_field(fields.BoxListFields.classes))
if masks is not None:
detection_masks_list.append(
padded_boxlist.get_field(fields.BoxListFields.masks))
if transcriptions is not None:
detection_transcriptions_list.append(
padded_boxlist.get_field(fields.BoxListFields.transcriptions))
nms_dict = {
'detection_boxes': tf.stack(detection_boxes_list),
'detection_scores': tf.stack(detection_scores_list),
'detection_classes': tf.stack(detection_classes_list),
'num_detections': tf.stack(num_detections_list)
}
if masks is not None:
nms_dict['detection_masks'] = tf.stack(detection_masks_list)
if transcriptions is not None:
nms_dict['detection_transcriptions'] = tf.stack(detection_transcriptions_list)
return nms_dict
| {
"content_hash": "23298870b3b9eb8e309b3d7a342e297b",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 102,
"avg_line_length": 48.17363344051447,
"alnum_prop": 0.63082365505273,
"repo_name": "zbigniewwojna/text-rcnn",
"id": "e721066c298d4187d117f99211be0fde2a88ec21",
"size": "15672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/post_processing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1919352"
},
{
"name": "Protocol Buffer",
"bytes": "48648"
},
{
"name": "Python",
"bytes": "1471246"
}
],
"symlink_target": ""
} |
from netpyne import specs
from netpyne.batch import Batch
''' Example of adaptive stochastic descent algorithm optimization of a network using NetPyNE
'simple' network, 3 parameters are optimized to match target firing rates in 2 populations
To run use: mpiexec -np [num_cores] nrniv -mpi batchRun.py
'''
def batchASD():
# parameters space to explore
## simple net
params = specs.ODict()
params['prob'] = [0.01, 0.5, [0.4, 0.3]] # can add 3rd value for starting value (0)
params['weight'] = [0.001, 0.1, [0.1, 0.2]]
params['delay'] = [1, 20, [5, 3]]
pops = {}
pops['S'] = {'target': 5, 'width': 2, 'min': 2}
pops['M'] = {'target': 15, 'width': 2, 'min': 0.2}
# fitness function
fitnessFuncArgs = {}
fitnessFuncArgs['pops'] = pops
fitnessFuncArgs['maxFitness'] = 1000
def fitnessFunc(simData, **kwargs):
import numpy as np
pops = kwargs['pops']
maxFitness = kwargs['maxFitness']
popFitness = [None for i in pops.items()]
popFitness = [min(np.exp( abs(v['target'] - simData['popRates'][k]) / v['width']), maxFitness)
if simData["popRates"][k]>v['min'] else maxFitness for k,v in pops.items()]
fitness = np.mean(popFitness)
popInfo = '; '.join(['%s rate=%.1f fit=%1.f'%(p,r,f) for p,r,f in zip(list(simData['popRates'].keys()), list(simData['popRates'].values()), popFitness)])
print(' '+popInfo)
return fitness
# create Batch object with paramaters to modify, and specifying files to use
b = Batch(params=params)
# Set output folder, grid method (all param combinations), and run configuration
b.batchLabel = 'simple'
b.saveFolder = './'+b.batchLabel
b.method = 'asd'
b.runCfg = {
'type': 'mpi_direct',#'hpc_slurm',
'script': 'init.py',
# options required only for mpi_direct or hpc
'mpiCommand': 'mpiexec',
'nodes': 1,
'coresPerNode': 2,
# 'allocation': 'default',
# 'email': 'salvadordura@gmail.com',
# 'reservation': None,
# 'folder': '/home/salvadord/evol'
#'custom': 'export LD_LIBRARY_PATH="$HOME/.openmpi/lib"' # only for conda users
}
b.optimCfg = {
'fitnessFunc': fitnessFunc, # fitness expression (should read simData)
'fitnessFuncArgs': fitnessFuncArgs,
'maxFitness': fitnessFuncArgs['maxFitness'],
'stepsize': 0.1, # Initial step size as a fraction of each parameter
'sinc': 2, # Step size learning rate (increase)
'sdec': 2, # Step size learning rate (decrease)
'pinc': 2, # Parameter selection learning rate (increase)
'pdec': 2, # Parameter selection learning rate (decrease)
#'pinitial': None, # Set initial parameter selection probabilities
#'sinitial': None, # Set initial step sizes; if empty, calculated from stepsize instead
'maxiters': 2, # Maximum number of iterations (1 iteration = 1 function evaluation)
'maxtime': 3600, # Maximum time allowed, in seconds
'abstol': 1e-6, # Minimum absolute change in objective function
'reltol': 1e-3, # Minimum relative change in objective function
#'stalliters': 10*len(params)*len(params), # Number of iterations over which to calculate TolFun (n = number of parameters)
#'stoppingfunc': None, # External method that can be used to stop the calculation from the outside.
#'randseed': None, # The random seed to use
'verbose': 2, # How much information to print during the run
#'label': None # A label to use to annotate the output
'maxiter_wait': 10,
'time_sleep': 5,
'popsize': 1
}
# Run batch simulations
b.run()
# Main code
if __name__ == '__main__':
batchASD() # 'simple' or 'complex'
| {
"content_hash": "de88aafb6e8d1f7cc0e7043b3b8d4b6d",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 161,
"avg_line_length": 44.611111111111114,
"alnum_prop": 0.5880448318804483,
"repo_name": "Neurosim-lab/netpyne",
"id": "d21f1b1d6a476b31bfaa24bd5302dc8ca5d3db25",
"size": "4015",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "examples/asdOptim/batch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "25324"
},
{
"name": "Jupyter Notebook",
"bytes": "2588467"
},
{
"name": "Python",
"bytes": "1802020"
},
{
"name": "Shell",
"bytes": "915"
}
],
"symlink_target": ""
} |
"""Fichier contenant la classe Variable, détaillée plus bas."""
import re
from .expression import Expression
from .delimiteurs import DELIMITEURS
# Constantes
RE_VARIABLE = re.compile(r"^[A-Za-z][A-Za-z0-9_]*$")
class Variable(Expression):
"""Expression variable."""
nom = "variable"
def __init__(self):
"""Constructeur de l'expression."""
Expression.__init__(self)
self.nom = None
def __repr__(self):
return "Variable({})".format(self.nom)
def __str__(self):
return "|vr|" + self.nom + "|ff|"
@classmethod
def parsable(cls, chaine):
"""Retourne True si la chaîne est parsable, False sinon."""
chaine = chaine.lstrip()
fins = [chaine.index(delimiteur) for delimiteur in DELIMITEURS \
if delimiteur in chaine]
fin = fins and min(fins) or None
chaine = chaine[:fin]
return RE_VARIABLE.search(chaine)
@classmethod
def parser(cls, chaine):
"""Parse la chaîne.
Retourne l'objet créé et la partie non interprétée de la chaîne.
"""
objet = cls()
chaine = chaine.lstrip()
fins = [chaine.index(delimiteur) for delimiteur in DELIMITEURS \
if delimiteur in chaine]
if fins:
fin = min(fins)
else:
fin = None
chaine_interpreter = chaine[:fin]
objet.nom = chaine_interpreter
return objet, chaine[len(chaine_interpreter):]
def get_valeur(self, evt):
"""Retourne la variable ou lève une exception si non présente."""
espace = evt.espaces.variables
if self.nom in espace:
return espace[self.nom]
else:
raise ValueError("la variable {} est introuvable".format(self.nom))
@property
def code_python(self):
"""Retourne le code Python associé."""
return "variables['" + self.nom + "']"
| {
"content_hash": "51486a289a42e7a288c38870ffb6016c",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 79,
"avg_line_length": 29.52238805970149,
"alnum_prop": 0.5733063700707786,
"repo_name": "vlegoff/tsunami",
"id": "c137e93bf40ee31f6d0921c07a3b27c53c978386",
"size": "3560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/scripting/parser/variable.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7930908"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.