text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""Model Analyzer.
Analyze model, including shape, params, time, memory, structure, etc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tfprof.python.tools.tfprof import pywrap_tensorflow_print_model_analysis_lib as print_mdl
from tensorflow.contrib.tfprof.python.tools.tfprof import tfprof_logger
from tensorflow.python.framework import errors
from tensorflow.tools.tfprof import tfprof_options_pb2
from tensorflow.tools.tfprof import tfprof_output_pb2
# pylint: disable=bad-whitespace
# pylint: disable=bad-continuation
# 2 example tfprof_options for print_model_analysis API.
#
# Show the parameter statistics of trainable variables.
TRAINABLE_VARS_PARAMS_STAT_OPTIONS = {
'max_depth': 10000,
'min_bytes': 0,
'min_micros': 0,
'min_params': 0,
'min_float_ops': 0,
'order_by': 'name',
'account_type_regexes': [tfprof_logger.TRAINABLE_VARIABLES],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': True,
'select': ['params'],
'output': 'stdout',
'dump_to_file': ''
}
# Show the number float operations.
FLOAT_OPS_OPTIONS = {
'max_depth': 10000,
'min_bytes': 0,
'min_micros': 0,
'min_params': 0,
'min_float_ops': 1,
'order_by': 'float_ops',
'account_type_regexes': ['.*'],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': True,
'select': ['float_ops'],
'output': 'stdout',
'dump_to_file': ''
}
# Show number of parameters on parameter server 0.
# It is recommended to provide`run_meta` argument
# to have complete device placement info.
PRINT_PARAMS_ON_DEVICE = {
'max_depth': 1,
'min_bytes': 0,
'min_micros': 0,
'min_params': 0,
'min_float_ops': 0,
'order_by': 'name',
'account_type_regexes': ['.*ps.*task:0.*'],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': False,
'select': ['device', 'params'],
'output': 'stdout',
'dump_to_file': ''
}
# Show the timing stats and memory demands.
PRINT_ALL_TIMING_MEMORY = {
'max_depth': 10000,
'min_bytes': 1, # Only >=1
'min_micros': 1, # Only >=1
'min_params': 0,
'min_float_ops': 0,
'order_by': 'name',
'account_type_regexes': ['.*'],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': True,
'select': ['micros', 'bytes'],
'output': 'stdout',
'dump_to_file': ''
}
# pylint: enable=bad-whitespace
# pylint: enable=bad-continuation
def _build_options(tfprof_options):
"""Build tfprof.OptionsProto.
Args:
tfprof_options: A dictionary of options.
Returns:
tfprof.OptionsProto.
"""
opts = tfprof_options_pb2.OptionsProto()
opts.max_depth = tfprof_options.get('max_depth', 10)
opts.min_bytes = tfprof_options.get('min_bytes', 0)
opts.min_micros = tfprof_options.get('min_micros', 0)
opts.min_params = tfprof_options.get('min_params', 0)
opts.min_float_ops = tfprof_options.get('min_float_ops', 0)
opts.min_occurrence = tfprof_options.get('min_occurrence', 0)
opts.step = tfprof_options.get('step', -1)
opts.order_by = tfprof_options.get('order_by', 'name')
for p in tfprof_options.get('account_type_regexes', []):
opts.account_type_regexes.append(p)
for p in tfprof_options.get('start_name_regexes', []):
opts.start_name_regexes.append(p)
for p in tfprof_options.get('trim_name_regexes', []):
opts.trim_name_regexes.append(p)
for p in tfprof_options.get('show_name_regexes', []):
opts.show_name_regexes.append(p)
for p in tfprof_options.get('hide_name_regexes', []):
opts.hide_name_regexes.append(p)
opts.account_displayed_op_only = tfprof_options.get(
'account_displayed_op_only', False)
for p in tfprof_options.get('select', []):
opts.select.append(p)
opts.output = tfprof_options.get('output', 'stdout')
opts.dump_to_file = tfprof_options.get('dump_to_file', '')
return opts
class Profiler(object):
"""TensorFlow multi-step profiler.
See go/tfprof or README for details.
Typical use case:
# Currently we are only allowed to create 1 profiler per process.
profiler = Profile(sess.graph)
for i in xrange(total_steps):
if i % 10000 == 0:
run_meta = tf.RunMetadata()
_ = sess.run(...,
options=tf.RunOptions(
trace_level=tf.RunOptions.FULL_TRACE),
run_metadata=run_meta)
profiler.add_step(i, run_meta)
# Profile the parameters of your model.
profiler.profile_name_scope(options=TRAINABLE_VARS_PARAMS_STAT_OPTIONS)
# Or profile the timing of your model operations.
opts = PRINT_ALL_TIMING_MEMORY.copy()
opts['order_by'] = 'micros'
opts['select'] = ['micros', 'occurrence']
opts['max_depth'] = 20
profiler.profile_operations(options=opts)
# Or you can generate a timeline:
opts = PRINT_ALL_TIMING_MEMORY.copy()
opts['output'] = 'timeline:outfile=' + filename
opts['step'] = i
profiler.profile_graph(options=opts)
else:
_ = sess.run(...)
"""
def __init__(self, graph, op_log=None):
"""Constructor.
Args:
graph: tf.Graph.
op_log: optional. tensorflow::tfprof::OpLog proto. Used to define
extra op types.
"""
self._graph = graph
# pylint: disable=protected-access
op_log = tfprof_logger._merge_default_with_oplog(
self._graph, op_log=op_log)
# pylint: enable=protected-access
print_mdl.NewProfiler(
self._graph.as_graph_def().SerializeToString(),
op_log.SerializeToString())
def __del__(self):
print_mdl.DeleteProfiler()
def add_step(self, step, run_meta):
"""Add statistics of a step.
Args:
step: A step uint64 used to identify the RunMetadata. Must be different
across different AddStep() calls.
run_meta: RunMetadata proto that contains statistics of a session run.
"""
# pylint: disable=protected-access
op_log = tfprof_logger._merge_default_with_oplog(
self._graph, run_meta=run_meta, add_trace=False,
add_trainable_var=False)
# pylint: enable=protected-access
print_mdl.AddStep(
step, run_meta.SerializeToString(), op_log.SerializeToString())
def profile_python_codes(self, options):
"""Profile the statistics of the Python codes.
Hint: set options['show_name_regexes'] = ['.*my_code.py.*']
Args:
options: A dict of profiler options.
Returns:
a TFMultiGraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.TFMultiGraphNodeProto()
tfprof_node.ParseFromString(
print_mdl.Profile('code'.encode('utf-8'), opts.SerializeToString()))
return tfprof_node
def profile_operations(self, options):
"""Profile the statistics of the Operation types (e.g. MatMul, Conv2D).
Args:
options: A dict of profiler options.
Returns:
a TFMultiGraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.TFMultiGraphNodeProto()
tfprof_node.ParseFromString(
print_mdl.Profile('op'.encode('utf-8'), opts.SerializeToString()))
return tfprof_node
def profile_name_scope(self, options):
"""Profile the statistics of graph nodes, organized by name scope.
Args:
options: A dict of profiler options.
Returns:
a TFGraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.TFGraphNodeProto()
tfprof_node.ParseFromString(
print_mdl.Profile('scope'.encode('utf-8'), opts.SerializeToString()))
return tfprof_node
def profile_graph(self, options):
"""Profile the statistics of graph nodes, organized by dataflow graph.
Args:
options: A dict of profiler options.
Returns:
a TFGraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.TFGraphNodeProto()
tfprof_node.ParseFromString(
print_mdl.Profile('graph'.encode('utf-8'), opts.SerializeToString()))
return tfprof_node
def print_model_analysis(graph,
run_meta=None,
op_log=None,
tfprof_cmd='scope',
tfprof_options=TRAINABLE_VARS_PARAMS_STAT_OPTIONS):
"""Print model statistics.
See go/tfprof or README for examples and tutorials.
Run tfprof tool for help:
'bazel run third_party/tensorflow/tools/tfprof help'
Args:
graph: tf.Graph.
run_meta: tensorflow::RunMetadata proto. When provided, also shows valid
timing and memory information when 'select' option contains
'micros' and 'bytes'.
op_log: tensorflow::tfprof::OpLog proto. users can use this proto to
group together ops and use a op_type to select the group.
tfprof_cmd: string. Either 'op', 'scope', 'graph', 'code'.
'op' view organize outputs using operation type. (e.g. MatMul)
'scope' view organize outputs using graph node name scope.
'graph' view organize outputs using graph node inputs/outputs.
'code' view organize outputs using Python call stack.
tfprof_options: See 'tfprof help' for details.
Returns:
If tfprof_cmd is 'scope' or 'graph', returns TFGraphNodeProto proto.
If tfprof_cmd is 'op' or 'code', returns TFMultiGraphNodeProto proto.
Side effect: stdout/file/timeline.json depending on tfprof_options['output']
"""
# pylint: disable=protected-access
op_log = tfprof_logger._merge_default_with_oplog(
graph, op_log, run_meta, add_trace=tfprof_cmd == 'code')
# pylint: enable=protected-access
opts = _build_options(tfprof_options)
run_meta_str = run_meta.SerializeToString() if run_meta else b''
if tfprof_cmd == 'code' or tfprof_cmd == 'op':
tfprof_node = tfprof_output_pb2.TFMultiGraphNodeProto()
tfprof_node.ParseFromString(
print_mdl.PrintModelAnalysis(
graph.as_graph_def().SerializeToString(),
run_meta_str,
op_log.SerializeToString(),
tfprof_cmd.encode('utf-8'),
opts.SerializeToString()))
elif tfprof_cmd == 'graph' or tfprof_cmd == 'scope':
tfprof_node = tfprof_output_pb2.TFGraphNodeProto()
tfprof_node.ParseFromString(
print_mdl.PrintModelAnalysis(
graph.as_graph_def().SerializeToString(),
run_meta_str,
op_log.SerializeToString(),
tfprof_cmd.encode('utf-8'),
opts.SerializeToString()))
else:
raise errors.InvalidArgumentError(
None, None, 'unknown tfprof_cmd: %s\n' % tfprof_cmd)
return tfprof_node
|
{
"content_hash": "68d082c4d1be62ba2542b465b218ff17",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 113,
"avg_line_length": 33.36309523809524,
"alnum_prop": 0.639964317573595,
"repo_name": "jhaux/tensorflow",
"id": "b640fa75931fecd53fe196029c97b4c10e313981",
"size": "11899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/tfprof/python/tools/tfprof/model_analyzer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7908"
},
{
"name": "C",
"bytes": "186881"
},
{
"name": "C++",
"bytes": "25375441"
},
{
"name": "CMake",
"bytes": "166479"
},
{
"name": "Go",
"bytes": "858855"
},
{
"name": "HTML",
"bytes": "593130"
},
{
"name": "Java",
"bytes": "319061"
},
{
"name": "JavaScript",
"bytes": "1399"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63700"
},
{
"name": "Protocol Buffer",
"bytes": "227623"
},
{
"name": "Python",
"bytes": "22404212"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "338633"
},
{
"name": "TypeScript",
"bytes": "801168"
}
],
"symlink_target": ""
}
|
"""empty message
Revision ID: 465785295fcb
Revises: 40f2531a009a
Create Date: 2016-10-14 16:33:36.961293
"""
# revision identifiers, used by Alembic.
revision = '465785295fcb'
down_revision = '40f2531a009a'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('geographies',
sa.Column('geography_id', sa.SmallInteger(), nullable=False),
sa.Column('geography_type', sa.String(), nullable=False),
sa.Column('geography_name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('geography_id')
)
op.create_table('quarters',
sa.Column('quarter_id', sa.SmallInteger(), nullable=False),
sa.Column('year', sa.Integer(), nullable=False),
sa.Column('quarter', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('quarter_id')
)
op.create_table('jobs_importance',
sa.Column('quarter_id', sa.SmallInteger(), nullable=False),
sa.Column('geography_id', sa.SmallInteger(), nullable=False),
sa.Column('job_uuid', sa.String(), nullable=False),
sa.Column('importance', sa.Float(), nullable=True),
sa.ForeignKeyConstraint(['geography_id'], ['geographies.geography_id'], ),
sa.ForeignKeyConstraint(['quarter_id'], ['quarters.quarter_id'], ),
sa.PrimaryKeyConstraint('quarter_id', 'geography_id', 'job_uuid')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('jobs_importance')
op.drop_table('quarters')
op.drop_table('geographies')
### end Alembic commands ###
|
{
"content_hash": "f2ca2e861de917c1482e8629bca434ed",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 78,
"avg_line_length": 33.833333333333336,
"alnum_prop": 0.6785714285714286,
"repo_name": "agileronin/skills-api",
"id": "863e3869269aa01d53a6d1c7c105c6972feb2152",
"size": "1624",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "migrations/versions/465785295fcb_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "97395"
},
{
"name": "Shell",
"bytes": "11947"
}
],
"symlink_target": ""
}
|
import datetime
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import post_init, post_save
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from symposion.conference.models import Conference
from symposion.sponsorship.managers import SponsorManager
class SponsorLevel(models.Model):
conference = models.ForeignKey(Conference, verbose_name=_("conference"))
name = models.CharField(_("name"), max_length=100)
order = models.IntegerField(_("order"), default=0)
cost = models.PositiveIntegerField(_("cost"))
description = models.TextField(_("description"), blank=True, help_text=_("This is private."))
class Meta:
ordering = ["conference", "order"]
verbose_name = _("sponsor level")
verbose_name_plural = _("sponsor levels")
def __unicode__(self):
return self.name
def sponsors(self):
return self.sponsor_set.filter(active=True).order_by("added")
class Sponsor(models.Model):
applicant = models.ForeignKey(User, related_name="sponsorships", verbose_name=_("applicant"), null=True)
name = models.CharField(_("Sponsor Name"), max_length=100)
external_url = models.URLField(_("external URL"))
annotation = models.TextField(_("annotation"), blank=True)
contact_name = models.CharField(_("Contact Name"), max_length=100)
contact_email = models.EmailField(_(u"Contact Email"))
level = models.ForeignKey(SponsorLevel, verbose_name=_("level"))
added = models.DateTimeField(_("added"), default=datetime.datetime.now)
active = models.BooleanField(_("active"), default=False)
# Denormalization (this assumes only one logo)
sponsor_logo = models.ForeignKey("SponsorBenefit", related_name="+", null=True, blank=True, editable=False)
objects = SponsorManager()
def __unicode__(self):
return self.name
class Meta:
verbose_name = _("sponsor")
verbose_name_plural = _("sponsors")
def get_absolute_url(self):
if self.active:
return reverse("sponsor_detail", kwargs={"pk": self.pk})
return reverse("sponsor_list")
@property
def website_logo(self):
if self.sponsor_logo is None:
benefits = self.sponsor_benefits.filter(benefit__type="weblogo", upload__isnull=False)[:1]
if benefits.count():
if benefits[0].upload:
self.sponsor_logo = benefits[0]
self.save()
return self.sponsor_logo.upload
@property
def listing_text(self):
if not hasattr(self, "_listing_text"):
self._listing_text = None
# @@@ better than hard-coding a pk but still not good
benefits = self.sponsor_benefits.filter(benefit__name="Sponsor Description")
if benefits.count():
self._listing_text = benefits[0].text
return self._listing_text
def reset_benefits(self):
"""
Reset all benefits for this sponsor to the defaults for their
sponsorship level.
"""
level = None
try:
level = self.level
except SponsorLevel.DoesNotExist:
pass
allowed_benefits = []
if level:
for benefit_level in level.benefit_levels.all():
# Create all needed benefits if they don't exist already
sponsor_benefit, created = SponsorBenefit.objects.get_or_create(
sponsor=self, benefit=benefit_level.benefit)
# and set to default limits for this level.
sponsor_benefit.max_words = benefit_level.max_words
sponsor_benefit.other_limits = benefit_level.other_limits
# and set to active
sponsor_benefit.active = True
# @@@ We don't call sponsor_benefit.clean here. This means
# that if the sponsorship level for a sponsor is adjusted
# downwards, an existing too-long text entry can remain,
# and won't raise a validation error until it's next
# edited.
sponsor_benefit.save()
allowed_benefits.append(sponsor_benefit.pk)
# Any remaining sponsor benefits that don't normally belong to
# this level are set to inactive
self.sponsor_benefits.exclude(pk__in=allowed_benefits).update(active=False, max_words=None, other_limits="")
def send_coordinator_emails(self):
pass # @@@ should this just be done centrally?
def _store_initial_level(sender, instance, **kwargs):
if instance:
instance._initial_level_id = instance.level_id
post_init.connect(_store_initial_level, sender=Sponsor)
def _check_level_change(sender, instance, created, **kwargs):
if instance and (created or instance.level_id != instance._initial_level_id):
instance.reset_benefits()
post_save.connect(_check_level_change, sender=Sponsor)
BENEFIT_TYPE_CHOICES = [
("text", "Text"),
("file", "File"),
("weblogo", "Web Logo"),
("simple", "Simple")
]
class Benefit(models.Model):
name = models.CharField(_("name"), max_length=100)
description = models.TextField(_("description"), blank=True)
type = models.CharField(_("type"), choices=BENEFIT_TYPE_CHOICES, max_length=10, default="simple")
def __unicode__(self):
return self.name
class BenefitLevel(models.Model):
benefit = models.ForeignKey(Benefit, related_name="benefit_levels", verbose_name=_("benefit"))
level = models.ForeignKey(SponsorLevel, related_name="benefit_levels", verbose_name=_("level"))
# default limits for this benefit at given level
max_words = models.PositiveIntegerField(_("max words"), blank=True, null=True)
other_limits = models.CharField(_("other limits"), max_length=200, blank=True)
class Meta:
ordering = ["level"]
def __unicode__(self):
return u"%s - %s" % (self.level, self.benefit)
class SponsorBenefit(models.Model):
sponsor = models.ForeignKey(Sponsor, related_name="sponsor_benefits", verbose_name=_("sponsor"))
benefit = models.ForeignKey(Benefit, related_name="sponsor_benefits", verbose_name=_("benefit"))
active = models.BooleanField(default=True)
# Limits: will initially be set to defaults from corresponding BenefitLevel
max_words = models.PositiveIntegerField(_("max words"), blank=True, null=True)
other_limits = models.CharField(_("other limits"), max_length=200, blank=True)
# Data: zero or one of these fields will be used, depending on the
# type of the Benefit (text, file, or simple)
text = models.TextField(_("text"), blank=True)
upload = models.FileField(_("file"), blank=True, upload_to="sponsor_files")
class Meta:
ordering = ["-active"]
def __unicode__(self):
return u"%s - %s" % (self.sponsor, self.benefit)
def clean(self):
num_words = len(self.text.split())
if self.max_words and num_words > self.max_words:
raise ValidationError(
"Sponsorship level only allows for %s words, you provided %d." % (
self.max_words, num_words))
def data_fields(self):
"""
Return list of data field names which should be editable for
this ``SponsorBenefit``, depending on its ``Benefit`` type.
"""
if self.benefit.type == "file" or self.benefit.type == "weblogo":
return ["upload"]
elif self.benefit.type == "text":
return ["text"]
return []
|
{
"content_hash": "d4fffa7b91c1eeb1334d20ab8802108c",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 116,
"avg_line_length": 37.27358490566038,
"alnum_prop": 0.6223740825107568,
"repo_name": "mbrochh/symposion",
"id": "24a12d31728397c86e83ebd261b0e708d81d1b4e",
"size": "7902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "symposion/sponsorship/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13131"
},
{
"name": "JavaScript",
"bytes": "79954"
},
{
"name": "Python",
"bytes": "154962"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
# Create your views here.
def aefat(request):
return render(request, 'aefat_pages/home.html')
def home(request):
return render(request, 'aefat_pages/home.html')
|
{
"content_hash": "99cdf33f02218fac63b29a1141c10e29",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 51,
"avg_line_length": 26.625,
"alnum_prop": 0.7183098591549296,
"repo_name": "ouedraog/aefat",
"id": "cc4a518aa9e3df0b31756ffcf609dc0e8106e93e",
"size": "213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aefat/aefat_pages/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "987524"
},
{
"name": "JavaScript",
"bytes": "108597"
},
{
"name": "Python",
"bytes": "73573"
}
],
"symlink_target": ""
}
|
from test_framework.mininode import *
from test_framework.test_framework import AureusTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase
'''
SendHeadersTest -- test behavior of headers messages to announce blocks.
Setup:
- Two nodes, two p2p connections to node0. One p2p connection should only ever
receive inv's (omitted from testing description below, this is our control).
Second node is used for creating reorgs.
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
send getdata for the block [expect: block]
b. node mines another block [expect: inv]
send getheaders and getdata [expect: headers, then block]
c. node mines another block [expect: inv]
peer mines a block, announces with header [expect: getdata]
d. node mines another block [expect: inv]
Part 2: After "sendheaders", headers announcements should generally work.
a. peer sends sendheaders [expect: no response]
peer sends getheaders with current tip [expect: no response]
b. node mines a block [expect: tip header]
c. for N in 1, ..., 10:
* for announce-type in {inv, header}
- peer mines N blocks, announces with announce-type
[ expect: getheaders/getdata or getdata, deliver block(s) ]
- node mines a block [ expect: 1 header ]
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
- For response-type in {inv, getheaders}
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
* node mines an 8-block reorg [ expect: inv at tip ]
* peer responds with getblocks/getdata [expect: inv, blocks ]
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
* node mines another block at tip [ expect: inv ]
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
* peer requests block [ expect: block ]
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
* node mines 1 block [expect: 1 header, peer responds with getdata]
Part 4: Test direct fetch behavior
a. Announce 2 old block headers.
Expect: no getdata requests.
b. Announce 3 new blocks via 1 headers message.
Expect: one getdata request for all 3 blocks.
(Send blocks.)
c. Announce 1 header that forks off the last two blocks.
Expect: no response.
d. Announce 1 more header that builds on that fork.
Expect: one getdata request for two blocks.
e. Announce 16 more headers that build on that fork.
Expect: getdata request for 14 more blocks.
f. Announce 1 more header that builds on that fork.
Expect: no response.
'''
class BaseNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.last_inv = None
self.last_headers = None
self.last_block = None
self.ping_counter = 1
self.last_pong = msg_pong(0)
self.last_getdata = None
self.sleep_time = 0.05
self.block_announced = False
def clear_last_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_inv = None
self.last_headers = None
def add_connection(self, conn):
self.connection = conn
# Request data for a list of block hashes
def get_data(self, block_hashes):
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(2, x))
self.connection.send_message(msg)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(2, blockhash)]
self.connection.send_message(msg)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_inv(self, conn, message):
self.last_inv = message
self.block_announced = True
def on_headers(self, conn, message):
self.last_headers = message
self.block_announced = True
def on_block(self, conn, message):
self.last_block = message.block
self.last_block.calc_sha256()
def on_getdata(self, conn, message):
self.last_getdata = message
def on_pong(self, conn, message):
self.last_pong = message
# Test whether the last announcement we received had the
# right header or the right inv
# inv and headers should be lists of block hashes
def check_last_announcement(self, headers=None, inv=None):
expect_headers = headers if headers != None else []
expect_inv = inv if inv != None else []
test_function = lambda: self.block_announced
self.sync(test_function)
with mininode_lock:
self.block_announced = False
success = True
compare_inv = []
if self.last_inv != None:
compare_inv = [x.hash for x in self.last_inv.inv]
if compare_inv != expect_inv:
success = False
hash_headers = []
if self.last_headers != None:
# treat headers as a list of block hashes
hash_headers = [ x.sha256 for x in self.last_headers.headers ]
if hash_headers != expect_headers:
success = False
self.last_inv = None
self.last_headers = None
return success
# Syncing helpers
def sync(self, test_function, timeout=60):
while timeout > 0:
with mininode_lock:
if test_function():
return
time.sleep(self.sleep_time)
timeout -= self.sleep_time
raise AssertionError("Sync failed to complete")
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_pong.nonce == self.ping_counter
self.sync(test_function, timeout)
self.ping_counter += 1
return
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
self.sync(test_function, timeout)
return
def wait_for_getdata(self, hash_list, timeout=60):
if hash_list == []:
return
test_function = lambda: self.last_getdata != None and [x.hash for x in self.last_getdata.inv] == hash_list
self.sync(test_function, timeout)
return
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
self.send_message(headers_message)
def send_getblocks(self, locator):
getblocks_message = msg_getblocks()
getblocks_message.locator.vHave = locator
self.send_message(getblocks_message)
# InvNode: This peer should only ever receive inv's, because it doesn't ever send a
# "sendheaders" message.
class InvNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
# TestNode: This peer is the one we use for most of the testing.
class TestNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
class SendHeadersTest(AureusTestFramework):
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
self.nodes = []
self.nodes = start_nodes(2, self.options.tmpdir, [["-debug", "-logtimemicros=1"]]*2)
connect_nodes(self.nodes[0], 1)
# mine count blocks and return the new tip
def mine_blocks(self, count):
# Clear out last block announcement from each p2p listener
[ x.clear_last_announcement() for x in self.p2p_connections ]
self.nodes[0].generate(count)
return int(self.nodes[0].getbestblockhash(), 16)
# mine a reorg that invalidates length blocks (replacing them with
# length+1 blocks).
# Note: we clear the state of our p2p connections after the
# to-be-reorged-out blocks are mined, so that we don't break later tests.
# return the list of block hashes newly mined
def mine_reorg(self, length):
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
sync_blocks(self.nodes, wait=0.1)
[x.clear_last_announcement() for x in self.p2p_connections]
tip_height = self.nodes[1].getblockcount()
hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1))
self.nodes[1].invalidateblock(hash_to_invalidate)
all_hashes = self.nodes[1].generate(length+1) # Must be longer than the orig chain
sync_blocks(self.nodes, wait=0.1)
return [int(x, 16) for x in all_hashes]
def run_test(self):
# Setup the p2p connections and start up the network thread.
inv_node = InvNode()
test_node = TestNode()
self.p2p_connections = [inv_node, test_node]
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node))
# Set nServices to 0 for test_node, so no block download will occur outside of
# direct fetching
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0))
inv_node.add_connection(connections[0])
test_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
inv_node.wait_for_verack()
test_node.wait_for_verack()
tip = int(self.nodes[0].getbestblockhash(), 16)
# PART 1
# 1. Mine a block; expect inv announcements each time
print "Part 1: headers don't start before sendheaders message..."
for i in xrange(4):
old_tip = tip
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
# Try a few different responses; none should affect next announcement
if i == 0:
# first request the block
test_node.get_data([tip])
test_node.wait_for_block(tip, timeout=5)
elif i == 1:
# next try requesting header and block
test_node.get_headers(locator=[old_tip], hashstop=tip)
test_node.get_data([tip])
test_node.wait_for_block(tip)
test_node.clear_last_announcement() # since we requested headers...
elif i == 2:
# this time announce own block via headers
height = self.nodes[0].getblockcount()
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
new_block = create_block(tip, create_coinbase(height+1), block_time)
new_block.solve()
test_node.send_header_for_blocks([new_block])
test_node.wait_for_getdata([new_block.sha256], timeout=5)
test_node.send_message(msg_block(new_block))
test_node.sync_with_ping() # make sure this block is processed
inv_node.clear_last_announcement()
test_node.clear_last_announcement()
print "Part 1: success!"
print "Part 2: announce blocks with headers after sendheaders message..."
# PART 2
# 2. Send a sendheaders message and test that headers announcements
# commence and keep working.
test_node.send_message(msg_sendheaders())
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
test_node.get_headers(locator=[prev_tip], hashstop=0L)
test_node.sync_with_ping()
# Now that we've synced headers, headers announcements should work
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height = self.nodes[0].getblockcount()+1
block_time += 10 # Advance far enough ahead
for i in xrange(10):
# Mine i blocks, and alternate announcing either via
# inv (of tip) or via headers. After each, new blocks
# mined by the node should successfully be announced
# with block header, even though the blocks are never requested
for j in xrange(2):
blocks = []
for b in xrange(i+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
if j == 0:
# Announce via inv
test_node.send_block_inv(tip)
test_node.wait_for_getdata([tip], timeout=5)
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
inv_node.send_block_inv(tip)
# Should have received a getheaders as well!
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks[0:-1]], timeout=5)
[ inv_node.send_block_inv(x.sha256) for x in blocks[0:-1] ]
inv_node.sync_with_ping()
else:
# Announce via headers
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5)
# Test that duplicate headers won't result in duplicate
# getdata requests (the check is further down)
inv_node.send_header_for_blocks(blocks)
inv_node.sync_with_ping()
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
inv_node.sync_with_ping()
# This block should not be announced to the inv node (since it also
# broadcast it)
assert_equal(inv_node.last_inv, None)
assert_equal(inv_node.last_headers, None)
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height += 1
block_time += 1
print "Part 2: success!"
print "Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer..."
# PART 3. Headers announcements can stop after large reorg, and resume after
# getheaders or inv from peer.
for j in xrange(2):
# First try mining a reorg that can propagate with header announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=new_block_hashes), True)
block_time += 8
# Mine a too-large reorg, which should be announced with a single inv
new_block_hashes = self.mine_reorg(length=8)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
block_time += 9
fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"]
fork_point = int(fork_point, 16)
# Use getblocks/getdata
test_node.send_getblocks(locator = [fork_point])
assert_equal(test_node.check_last_announcement(inv=new_block_hashes), True)
test_node.get_data(new_block_hashes)
test_node.wait_for_block(new_block_hashes[-1])
for i in xrange(3):
# Mine another block, still should get only an inv
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
if i == 0:
# Just get the data -- shouldn't cause headers announcements to resume
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# Send a getheaders message that shouldn't trigger headers announcements
# to resume (best header sent will be too old)
test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 2:
test_node.get_data([tip])
test_node.wait_for_block(tip)
# This time, try sending either a getheaders to trigger resumption
# of headers announcements, or mine a new block and inv it, also
# triggering resumption of headers announcements.
if j == 0:
test_node.get_headers(locator=[tip], hashstop=0L)
test_node.sync_with_ping()
else:
test_node.send_block_inv(tip)
test_node.sync_with_ping()
# New blocks should now be announced with header
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
print "Part 3: success!"
print "Part 4: Testing direct fetch behavior..."
tip = self.mine_blocks(1)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
for b in xrange(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
inv_node.send_message(msg_block(blocks[-1]))
inv_node.sync_with_ping() # Make sure blocks are processed
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
# should not have received any getdata messages
with mininode_lock:
assert_equal(test_node.last_getdata, None)
# This time, direct fetch should work
blocks = []
for b in xrange(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=test_node.sleep_time)
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
# Now announce a header that forks the last two blocks
tip = blocks[0].sha256
height -= 1
blocks = []
# Create extra blocks for later
for b in xrange(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Announcing one block on fork should not trigger direct fetch
# (less work than tip)
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks[0:1])
test_node.sync_with_ping()
with mininode_lock:
assert_equal(test_node.last_getdata, None)
# Announcing one more block on fork should trigger direct fetch for
# both blocks (same work as tip)
test_node.send_header_for_blocks(blocks[1:2])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=test_node.sleep_time)
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node.send_header_for_blocks(blocks[2:18])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=test_node.sleep_time)
# Announcing 1 more header should not trigger any response
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks[18:19])
test_node.sync_with_ping()
with mininode_lock:
assert_equal(test_node.last_getdata, None)
print "Part 4: success!"
# Finally, check that the inv node never received a getdata request,
# throughout the test
assert_equal(inv_node.last_getdata, None)
if __name__ == '__main__':
SendHeadersTest().main()
|
{
"content_hash": "47319a3530d3ff3266ca2fe7a2cd1b73",
"timestamp": "",
"source": "github",
"line_count": 511,
"max_line_length": 115,
"avg_line_length": 42.342465753424655,
"alnum_prop": 0.6046124693811527,
"repo_name": "hideoussquid/aureus-12-bitcore",
"id": "861e8d7a8daecf66e8cd390635875166aec23ab6",
"size": "21806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/sendheaders.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "619477"
},
{
"name": "C++",
"bytes": "4294663"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2099"
},
{
"name": "M4",
"bytes": "141054"
},
{
"name": "Makefile",
"bytes": "90935"
},
{
"name": "Objective-C",
"bytes": "2785"
},
{
"name": "Objective-C++",
"bytes": "7236"
},
{
"name": "Python",
"bytes": "703055"
},
{
"name": "QMake",
"bytes": "2019"
},
{
"name": "Roff",
"bytes": "3753"
},
{
"name": "Shell",
"bytes": "35693"
}
],
"symlink_target": ""
}
|
"""Filename globbing utility."""
import sys
import os
import re
import fnmatch
try:
_unicode = unicode
except NameError:
# If Python is built without Unicode support, the unicode type
# will not exist. Fake one.
class _unicode(object):
pass
__all__ = ["glob", "iglob"]
def glob(pathname):
"""Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la
fnmatch. However, unlike fnmatch, filenames starting with a
dot are special cases that are not matched by '*' and '?'
patterns.
"""
return list(iglob(pathname))
def iglob(pathname):
"""Return an iterator which yields the paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la
fnmatch. However, unlike fnmatch, filenames starting with a
dot are special cases that are not matched by '*' and '?'
patterns.
"""
dirname, basename = os.path.split(pathname)
if not has_magic(pathname):
if basename:
if os.path.lexists(pathname):
yield pathname
else:
# Patterns ending with a slash should match only directories
if os.path.isdir(dirname):
yield pathname
return
if not dirname:
for name in glob1(os.curdir, basename):
yield name
return
# `os.path.split()` returns the argument itself as a dirname if it is a
# drive or UNC path. Prevent an infinite recursion if a drive or UNC path
# contains magic characters (i.e. r'\\?\C:').
if dirname != pathname and has_magic(dirname):
dirs = iglob(dirname)
else:
dirs = [dirname]
if has_magic(basename):
glob_in_dir = glob1
else:
glob_in_dir = glob0
for dirname in dirs:
for name in glob_in_dir(dirname, basename):
yield os.path.join(dirname, name)
# These 2 helper functions non-recursively glob inside a literal directory.
# They return a list of basenames. `glob1` accepts a pattern while `glob0`
# takes a literal basename (so it only has to check for its existence).
def glob1(dirname, pattern):
if not dirname:
dirname = os.curdir
if isinstance(pattern, _unicode) and not isinstance(dirname, unicode):
dirname = unicode(dirname, sys.getfilesystemencoding() or
sys.getdefaultencoding())
try:
names = os.listdir(dirname)
except os.error:
return []
if pattern[0] != '.':
names = filter(lambda x: x[0] != '.', names)
return fnmatch.filter(names, pattern)
def glob0(dirname, basename):
if basename == '':
# `os.path.split()` returns an empty basename for paths ending with a
# directory separator. 'q*x/' should match only directories.
if os.path.isdir(dirname):
return [basename]
else:
if os.path.lexists(os.path.join(dirname, basename)):
return [basename]
return []
magic_check = re.compile('[*?[]')
def has_magic(s):
return magic_check.search(s) is not None
|
{
"content_hash": "08b5c29831ec137ddaa1c3aa3f58d0a8",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 78,
"avg_line_length": 32.14,
"alnum_prop": 0.6113876789047915,
"repo_name": "kubaszostak/gdal-dragndrop",
"id": "b50e24ae640bfadc6fa053d688d908c1c3c76001",
"size": "3214",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "osgeo/apps/Python27/Lib/glob.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13000"
},
{
"name": "C",
"bytes": "5038286"
},
{
"name": "C#",
"bytes": "14671"
},
{
"name": "C++",
"bytes": "2529439"
},
{
"name": "CMake",
"bytes": "90844"
},
{
"name": "Fortran",
"bytes": "8281"
},
{
"name": "HTML",
"bytes": "1285524"
},
{
"name": "Objective-C",
"bytes": "45668"
},
{
"name": "Python",
"bytes": "16415309"
}
],
"symlink_target": ""
}
|
"""
Created on Mon Dec 14 11:37:39 2014
@author: sm1fg
"""
##============================================================================
##option parameters
##============================================================================
def set_options(model, l_mpi, l_gdf=True):
"""This module assigns the logical options for the model. If adding
new models with additional logical arguments add it to the default
list as false, include an if statement for True update the
dictionary option_pars
"""
#default arguments
option_pars = {
'l_hdonly': False,# set mag field zero to check background
'l_ambB': False,# include some ambient magnetic field b_z
'l_spruit': False,# thin flux tube model to check Spruit
'l_const': False,# axial Alfven speed const Z-depend (Spruit)
'l_sqrt': False,# axial Alfven speed sqrt Z-depend (Spruit)
'l_linear': False,# axial Alfven speed linear Z-depend (Spruit)
'l_square': False,# axial Alfven speed square Z-depend (Spruit)
'l_B0_expz': False,# Z-depend of Bz(r=0) exponentials
'l_B0_quadz': False,# Z-depend of Bz(r=0) polynomials + exponential
'l_B0_rootz': False,# Z-depend of Bz(r=0) sqrt polynomials
'l_single': False,# only one flux tube
'l_hmi': False,# construct photopheric map of Bz from HMI/SDI
'l_tube_pair': False,# pair of flux tubes
'l_multi_netwk': False,# multiple flux tubes as described in GFE (2014)
'l_multi_lanes': False,# multiple flux tubes as described in GFE (2014)
'l_multi_twist': False,# multiple flux tubes as described in GFE (2014)
'l_2D_loop': False,# make a 2D loop with sinusoidal Bz(x,0,0)
'l_mfe': False,# model Viktor's model from MFE (2014)
'l_atmos_val3c_mtw':False,# interpolate composite VAL3c+MTW atmosphere
'suffix': '.gdf'
}
#revise optional parameters depending on configuration required
if model['model'] == 'hmi_model':
option_pars['l_hmi'] = True
option_pars['l_B0_expz'] = True
option_pars['l_atmos_val3c_mtw'] = True
if model['model'] == 'mfe_setup':
option_pars['l_single'] = True
option_pars['l_mfe'] = True
option_pars['l_B0_expz'] = True
option_pars['l_atmos_val3c_mtw'] = True
if model['model'] == 'spruit':
option_pars['l_single'] = True
option_pars['l_spruit'] = True
if model['model'] == 'paper1':
option_pars['l_ambB'] = True
option_pars['l_B0_expz'] = True
option_pars['l_single'] = True
option_pars['l_atmos_val3c_mtw'] = True
if model['model'] == 'paper2a':
option_pars['l_ambB'] = True
option_pars['l_B0_expz'] = True
option_pars['l_tube_pair'] = True
option_pars['l_atmos_val3c_mtw'] = True
if model['model'] == 'paper2b':
option_pars['l_ambB'] = True
option_pars['l_B0_expz'] = True
option_pars['l_multi_twist' ] = True
option_pars['l_atmos_val3c_mtw'] = True
if model['model'] == 'paper2c':
option_pars['l_ambB'] = True
option_pars['l_B0_expz'] = True
option_pars['l_multi_netwk'] = True
option_pars['l_atmos_val3c_mtw'] = True
if model['model'] == 'paper2d':
option_pars['l_ambB'] = True
option_pars['l_B0_expz'] = True
option_pars['l_multi_lanes' ] = True
option_pars['l_atmos_val3c_mtw'] = True
if model['model'] == 'hmi_model':
option_pars['l_B0_quadz'] = True
option_pars['l_single'] = True
option_pars['l_hmi'] = True
option_pars['l_atmos_val3c_mtw'] = True
if model['model'] == 'loop_model':
option_pars['l_B0_quadz'] = True
option_pars['l_single'] = True
option_pars['l_2D_loop'] = True
option_pars['l_atmos_val3c_mtw'] = True
if l_mpi:
option_pars['l_mpi'] = True
else:
option_pars['l_mpi'] = False
return option_pars
|
{
"content_hash": "510953605aa226e420013b72b97f5d8b",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 79,
"avg_line_length": 47.064516129032256,
"alnum_prop": 0.5204477952935801,
"repo_name": "SWAT-Sheffield/pysac",
"id": "6c0c0ccb19102085e475e314ef51ec616ed75fd3",
"size": "4401",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pysac/mhs_atmosphere/parameters/options.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "251979"
}
],
"symlink_target": ""
}
|
__author__ = "TOSUKUi"
from . import steam_initialize
|
{
"content_hash": "41c998274bf90ab406d97b2351db6927",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 30,
"avg_line_length": 14,
"alnum_prop": 0.6785714285714286,
"repo_name": "TOSUKUi/yasukagiCrawler",
"id": "91b34c30df36c93e4539f31e43d1474facaa9def",
"size": "56",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/crowlexecuter/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39861"
}
],
"symlink_target": ""
}
|
__author__ = 'cgomezfandino@gmail.com'
import datetime as dt
import v20
from configparser import ConfigParser
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn
# Create an object config
config = ConfigParser()
# Read the config
config.read("../API_Connection_Oanda/pyalgo.cfg")
account_id = config['oanda_v20']['account_id']
access_token = config['oanda_v20']['access_token']
class MomentumStream(object):
def __init__(self, momentum, instrumemt, units, *args, **kwargs ):
self.ticks = 0
self.position = 0
self.data = pd.DataFrame()
self.momentum = momentum
self.account_id = account_id
self.instrumemt = instrumemt
self.units = units
self.ctx = v20.Context(
'api-fxpractice.oanda.com',
443,
True,
application='sample_code',
token= access_token,
datetime_format= 'RFC3339'
)
self.ctx_stream = v20.Context(
'stream-fxpractice.oanda.com',
443,
True,
application='sample_code',
token = access_token,
datetime_format= 'RFC3339'
)
def create_order(self, units):
''' Places orders with Oanda'''
request = self.ctx.order.market(
self.account_id,
instrument = self.instrumemt,
units = units,
)
order = request.get('orderFillTransaction')
print('\n\n', order.dict(), '\n')
def start(self):
''' Starts the streaming of data and the triggering of action'''
response = self.ctx_stream.pricing.stream(
self.account_id,
snapshot=True,
instruments=self.instrumemt
)
for msg_type, msg in response.parts():
if msg_type == 'pricing.Price':
self.on_success(msg.time, msg.asks[0].price)
if self.ticks == 255:
if self.position == 1:
self.create_order(-self.units)
elif self.position == -1:
self.create_order(self.units)
return 'Completed.'
def on_success(self, time, ask):
''' Takes action when new tick data arrives.'''
self.ticks += 1
print self.ticks,
self.data = self.data.append(
pd.DataFrame({'time': [time], 'ask': [ask]})
)
self.data.index = pd.DatetimeIndex(self.data['time'])
resam = self.data.resample('1min').last()
# resam = resam.ffill()
resam['returns'] = np.log(resam['ask'] / resam['ask'].shift(1))
resam['position'] = np.sign(resam['returns'].rolling(self.momentum).mean())
# print(resam[['ask', 'returns', 'position']].tail())
if resam['position'].ix[-1] == 1:
if self.position == 0:
self.create_order(self.units)
elif self.position == -1:
self.create_order(self.units * 2)
self.position = 1
elif resam['position'].ix[-1] == -1:
if self.position == 0:
self.create_order(-self.units)
elif self.position == 1:
self.create_order(-self.units * 2)
self.position = -1
mtStream = MomentumStream(momentum=6, instrumemt='EUR_USD', units= 50000)
mtStream.start()
|
{
"content_hash": "4ec97ac2ac803ce1fddbe86f7580dc7e",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 83,
"avg_line_length": 30.871559633027523,
"alnum_prop": 0.5521545319465082,
"repo_name": "cgomezfandino/Project_PTX",
"id": "714418981c11ac301b7a10e496e7ccc47a04e342",
"size": "3365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Stream/momt_stream_Oanda.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "111017"
}
],
"symlink_target": ""
}
|
def givetimes(schedarr, times):
import datetime
now_time = datetime.datetime.now() #Get time
x = 0 #Counter variable
hn = now_time.hour
mn = now_time.minute
commin = (hn * 60) + mn #Minutes from start of day by now
try: #Try to ensure you don't fall out of array boundaries
while int(schedarr[x][0])<hn :
x += 1
while(int(schedarr[x][0]) * 60) + int(schedarr[x][1]) <= commin :
x += 1
except:
print "--:--"
else:
try:
while times>=0:
print str(schedarr[x][0])+":"+str(schedarr[x][1])
x += 1
times -= 1
except:
print "--:--"
|
{
"content_hash": "df12c6de23bf774c5fd79b801ca0717d",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 68,
"avg_line_length": 24.82608695652174,
"alnum_prop": 0.5989492119089317,
"repo_name": "CRImier/RSEasy",
"id": "385a3f6fd5221cd5b4b4586ef6627f30699006bd",
"size": "586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "givetime.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6863"
}
],
"symlink_target": ""
}
|
import wx
import armid
from BasePanel import BasePanel
from Borg import Borg
from ThreatEnvironmentPanel import ThreatEnvironmentPanel
class ThreatPanel(BasePanel):
def __init__(self,parent):
BasePanel.__init__(self,parent,armid.THREAT_ID)
b = Borg()
self.dbProxy = b.dbProxy
self.theLikelihoods = self.dbProxy.getDimensionNames('likelihood')
def buildControls(self,isCreate,isUpdateable=True):
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(self.buildTextSizer('Name',(87,30),armid.THREAT_TEXTNAME_ID),0,wx.EXPAND)
mainSizer.Add(self.buildTagCtrlSizer((87,30),armid.THREAT_TAGS_ID),0,wx.EXPAND)
threatTypes = self.dbProxy.getDimensionNames('threat_type')
mainSizer.Add(self.buildComboSizerList('Type',(87,30),armid.THREAT_THREATTYPE_ID,threatTypes),0,wx.EXPAND)
mainSizer.Add(self.buildMLTextSizer('Method',(87,60),armid.THREAT_TEXTMETHOD_ID),0,wx.EXPAND)
mainSizer.Add(ThreatEnvironmentPanel(self,self.dbProxy),1,wx.EXPAND)
if (isUpdateable):
mainSizer.Add(self.buildCommitButtonSizer(armid.THREAT_BUTTONCOMMIT_ID,isCreate),0,wx.ALIGN_CENTRE)
self.SetSizer(mainSizer)
def loadControls(self,threat,isReadOnly = False):
nameCtrl = self.FindWindowById(armid.THREAT_TEXTNAME_ID)
tagsCtrl = self.FindWindowById(armid.THREAT_TAGS_ID)
typeCtrl = self.FindWindowById(armid.THREAT_THREATTYPE_ID)
methodCtrl = self.FindWindowById(armid.THREAT_TEXTMETHOD_ID)
environmentCtrl = self.FindWindowById(armid.THREAT_PANELENVIRONMENT_ID)
nameCtrl.SetValue(threat.name())
tagsCtrl.set(threat.tags())
typeCtrl.SetValue(threat.type())
methodCtrl.SetValue(threat.method())
environmentCtrl.loadControls(threat)
|
{
"content_hash": "6b2ad4533ddce210ca165c21b5d12f1b",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 110,
"avg_line_length": 43.794871794871796,
"alnum_prop": 0.7587822014051522,
"repo_name": "RobinQuetin/CAIRIS-web",
"id": "ce298eadaad14db8688cb88f1c34c3f67e17e990",
"size": "2507",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cairis/cairis/ThreatPanel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11265"
},
{
"name": "Mako",
"bytes": "13226"
},
{
"name": "Python",
"bytes": "3313365"
},
{
"name": "Shell",
"bytes": "19461"
},
{
"name": "XSLT",
"bytes": "35522"
}
],
"symlink_target": ""
}
|
from oslo.config import cfg
from examples.webhooks import version
api_opts = [
cfg.StrOpt('host', default='0.0.0.0', help='Simple-app API server host'),
cfg.IntOpt('port', default=8988, help='Simple-app API server port')
]
CONF = cfg.CONF
CONF.register_opts(api_opts, group='api')
def parse_args(args=None, usage=None, default_config_files=None):
CONF(args=args,
project='mistral-demo',
version=version,
usage=usage,
default_config_files=default_config_files)
|
{
"content_hash": "b4e8cbabdf9cc9956a97fe5c7d8a99e1",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 77,
"avg_line_length": 25.7,
"alnum_prop": 0.6731517509727627,
"repo_name": "nmakhotkin/mistral-extra",
"id": "eeb3e78caed1daa8060909cb66abf7743cbee769",
"size": "1148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/webhooks/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('autofilter07.xlsx')
self.set_text_file('autofilter_data.txt')
def test_create_file(self):
"""
Test the creation of a simple XlsxWriter file with an autofilter.
Test autofilters where column filter ids are relative to autofilter
range.
"""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
# Set the autofilter.
worksheet.autofilter('D3:G53')
# Add filter criteria.
worksheet.filter_column('D', 'region == East')
# Open a text file with autofilter example data.
textfile = open(self.txt_filename)
# Read the headers from the first line of the input file.
headers = textfile.readline().strip("\n").split()
# Write out the headers.
worksheet.write_row('D3', headers)
# Start writing data after the headers.
row = 3
# Read the rest of the text file and write it to the worksheet.
for line in textfile:
# Split the input data based on whitespace.
data = line.strip("\n").split()
# Convert the number data from the text file.
for i, item in enumerate(data):
try:
data[i] = float(item)
except ValueError:
pass
# Get some of the field data.
region = data[0]
# Check for rows that match the filter.
if region == 'East':
# Row matches the filter, no further action required.
pass
else:
# We need to hide rows that don't match the filter.
worksheet.set_row(row, options={'hidden': True})
# Write out the row data.
worksheet.write_row(row, 3, data)
# Move on to the next worksheet row.
row += 1
textfile.close()
workbook.close()
self.assertExcelEqual()
|
{
"content_hash": "61da58603cf47168830c255741d47352",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 75,
"avg_line_length": 29.2987012987013,
"alnum_prop": 0.5695921985815603,
"repo_name": "jmcnamara/XlsxWriter",
"id": "fcc9ac6d1ad4c0ac09571db8d3737eec45f5c2d2",
"size": "2469",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "xlsxwriter/test/comparison/test_autofilter07.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7748"
},
{
"name": "Perl",
"bytes": "3503"
},
{
"name": "Python",
"bytes": "2807230"
},
{
"name": "Shell",
"bytes": "7964"
}
],
"symlink_target": ""
}
|
from typing import Dict, List, Set, Callable, Optional
import numpy as np
import tensorflow as tf
from typeguard import check_argument_types
from neuralmonkey.decoders.sequence_regressor import SequenceRegressor
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.runners.base_runner import (
BaseRunner, Executable, ExecutionResult, NextExecute)
# pylint: disable=invalid-name
Postprocessor = Callable[[List[float]], List[float]]
# pylint: enable=invalid-name
class RegressionRunExecutable(Executable):
def __init__(self,
all_coders: Set[ModelPart],
fetches: Dict[str, tf.Tensor],
postprocess: Optional[Postprocessor]) -> None:
self._all_coders = all_coders
self._fetches = fetches
self._postprocess = postprocess
self.result = None # type: Optional[ExecutionResult]
def next_to_execute(self) -> NextExecute:
"""Get the feedables and tensors to run."""
return self._all_coders, self._fetches, []
def collect_results(self, results: List[Dict]) -> None:
predictions_sum = np.zeros_like(results[0]["prediction"])
mse_loss = 0.
for sess_result in results:
if "mse" in sess_result:
mse_loss += sess_result["mse"]
predictions_sum += sess_result["prediction"]
predictions = predictions_sum / len(results)
if self._postprocess is not None:
predictions = self._postprocess(predictions)
self.result = ExecutionResult(
outputs=predictions.tolist(),
losses=[mse_loss],
scalar_summaries=None,
histogram_summaries=None,
image_summaries=None)
class RegressionRunner(BaseRunner[SequenceRegressor]):
"""A runnner that takes the predictions of a sequence regressor."""
def __init__(self,
output_series: str,
decoder: SequenceRegressor,
postprocess: Postprocessor = None) -> None:
check_argument_types()
BaseRunner[SequenceRegressor].__init__(self, output_series, decoder)
self._postprocess = postprocess
# pylint: disable=unused-argument
def get_executable(self,
compute_losses: bool,
summaries: bool,
num_sessions: int) -> Executable:
fetches = {"prediction": self._decoder.predictions}
if compute_losses:
fetches["mse"] = self._decoder.cost
return RegressionRunExecutable(
self.all_coders, fetches, self._postprocess)
# pylint: enable=unused-argument
@property
def loss_names(self) -> List[str]:
return ["mse"]
|
{
"content_hash": "a98a5216769cb054c78d409147820d5f",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 76,
"avg_line_length": 33,
"alnum_prop": 0.6254107338444688,
"repo_name": "juliakreutzer/bandit-neuralmonkey",
"id": "4da6df67c3ccbf23b94a19f351597b4059275978",
"size": "2739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neuralmonkey/runners/regression_runner.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13780"
},
{
"name": "HTML",
"bytes": "3116"
},
{
"name": "JavaScript",
"bytes": "2070"
},
{
"name": "Makefile",
"bytes": "2564"
},
{
"name": "Mathematica",
"bytes": "1874"
},
{
"name": "Perl",
"bytes": "45129"
},
{
"name": "Python",
"bytes": "723799"
},
{
"name": "Shell",
"bytes": "4358"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.contrib.auth import models
from django.contrib.auth.decorators import login_required, permission_required
# Trigger CustomUser perm creation:
from django.contrib.auth.tests.custom_user import CustomUser
from django.contrib.auth.tests.test_views import AuthViewsTestCase
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.test import TestCase
from django.test.client import RequestFactory
@skipIfCustomUser
class LoginRequiredTestCase(AuthViewsTestCase):
"""
Tests the login_required decorators
"""
urls = 'django.contrib.auth.tests.urls'
def testCallable(self):
"""
Check that login_required is assignable to callable objects.
"""
class CallableView(object):
def __call__(self, *args, **kwargs):
pass
login_required(CallableView())
def testView(self):
"""
Check that login_required is assignable to normal views.
"""
def normal_view(request):
pass
login_required(normal_view)
def testLoginRequired(self, view_url='/login_required/', login_url=None):
"""
Check that login_required works on a simple view wrapped in a
login_required decorator.
"""
if login_url is None:
login_url = settings.LOGIN_URL
response = self.client.get(view_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(login_url in response.url)
self.login()
response = self.client.get(view_url)
self.assertEqual(response.status_code, 200)
def testLoginRequiredNextUrl(self):
"""
Check that login_required works on a simple view wrapped in a
login_required decorator with a login_url set.
"""
self.testLoginRequired(view_url='/login_required_login_url/',
login_url='/somewhere/')
class PermissionsRequiredDecoratorTest(TestCase):
"""
Tests for the permission_required decorator
"""
def setUp(self):
self.user = models.User.objects.create(username='joe', password='qwerty')
self.factory = RequestFactory()
# Add permissions auth.add_customuser and auth.change_customuser
perms = models.Permission.objects.filter(codename__in=('add_customuser', 'change_customuser'))
self.user.user_permissions.add(*perms)
def test_many_permissions_pass(self):
@permission_required(['auth.add_customuser', 'auth.change_customuser'])
def a_view(request):
return HttpResponse()
request = self.factory.get('/rand')
request.user = self.user
resp = a_view(request)
self.assertEqual(resp.status_code, 200)
def test_single_permission_pass(self):
@permission_required('auth.add_customuser')
def a_view(request):
return HttpResponse()
request = self.factory.get('/rand')
request.user = self.user
resp = a_view(request)
self.assertEqual(resp.status_code, 200)
def test_permissioned_denied_redirect(self):
@permission_required(['auth.add_customuser', 'auth.change_customuser', 'non-existant-permission'])
def a_view(request):
return HttpResponse()
request = self.factory.get('/rand')
request.user = self.user
resp = a_view(request)
self.assertEqual(resp.status_code, 302)
def test_permissioned_denied_exception_raised(self):
@permission_required(['auth.add_customuser', 'auth.change_customuser', 'non-existant-permission'], raise_exception=True)
def a_view(request):
return HttpResponse()
request = self.factory.get('/rand')
request.user = self.user
self.assertRaises(PermissionDenied, a_view, request)
|
{
"content_hash": "676be35a8019f4e70e8d5379572829d6",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 128,
"avg_line_length": 36.055045871559635,
"alnum_prop": 0.660559796437659,
"repo_name": "ericholscher/django",
"id": "35a2203ba9b62528c3b5fba19b4b9c9ffb30131d",
"size": "3930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/contrib/auth/tests/test_decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "51177"
},
{
"name": "JavaScript",
"bytes": "102377"
},
{
"name": "Python",
"bytes": "9011891"
},
{
"name": "Shell",
"bytes": "12137"
}
],
"symlink_target": ""
}
|
import psycopg2
from optparse import OptionParser
tables = {
'history':'daily',
'history_sync':'daily',
'history_uint':'daily',
'history_uint_sync':'daily',
'history_str':'daily',
'history_str_sync':'daily',
'history_log':'daily',
'history_text':'daily',
'trends':'monthly',
'trends_uint':'monthly',
'acknowledges':'monthly',
'alerts':'monthly',
'auditlog':'monthly',
'events':'monthly',
'service_alarms':'monthly',
}
#change these settings
db_user = 'zabbix'
db_pw = 'zabbix'
db = 'zabbix'
db_host = 'localhost'
#####
parser = OptionParser()
parser.add_option("-i", "--init", dest="init",help="partitioning init",action="store_true", default=False)
(options, args) = parser.parse_args()
if options.init:
init = 1
else:
init = 0
db_connection = psycopg2.connect(database=db, user=db_user, password=db_pw,host=db_host)
db_cursor = db_connection.cursor()
for table_key, table_value in tables.iteritems():
db_cursor.execute('''select create_zbx_partitions(%s,%s,%s)''',[table_key,table_value,init])
db_connection.commit()
db_cursor.close()
db_connection.close()
|
{
"content_hash": "f80d87e5b34283b4408f1c225f3194db",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 106,
"avg_line_length": 22.836734693877553,
"alnum_prop": 0.6657730116175157,
"repo_name": "lesovsky/uber-scripts",
"id": "3ab5e2bb4f738700520514a9707a3aebe2d067f6",
"size": "1137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "service-configs/zabbix/partitioning/zabbix-partitioning.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "1312"
},
{
"name": "PLpgSQL",
"bytes": "9087"
},
{
"name": "Python",
"bytes": "1137"
},
{
"name": "Shell",
"bytes": "91533"
},
{
"name": "Smalltalk",
"bytes": "62122"
}
],
"symlink_target": ""
}
|
import unittest
from test import support
from test import test_urllib
import os
import io
import socket
import array
import sys
import urllib.request
# The proxy bypass method imported below has logic specific to the OSX
# proxy config data structure but is testable on all platforms.
from urllib.request import (Request, OpenerDirector, HTTPBasicAuthHandler,
HTTPPasswordMgrWithPriorAuth, _parse_proxy,
_proxy_bypass_macosx_sysconf)
from urllib.parse import urlparse
import urllib.error
import http.client
# XXX
# Request
# CacheFTPHandler (hard to write)
# parse_keqv_list, parse_http_list, HTTPDigestAuthHandler
class TrivialTests(unittest.TestCase):
def test___all__(self):
# Verify which names are exposed
for module in 'request', 'response', 'parse', 'error', 'robotparser':
context = {}
exec('from urllib.%s import *' % module, context)
del context['__builtins__']
if module == 'request' and (os.name == 'nt' or os.name == 'uwp_os'):
u, p = context.pop('url2pathname'), context.pop('pathname2url')
self.assertEqual(u.__module__, 'nturl2path')
self.assertEqual(p.__module__, 'nturl2path')
for k, v in context.items():
self.assertEqual(v.__module__, 'urllib.%s' % module,
"%r is exposed in 'urllib.%s' but defined in %r" %
(k, module, v.__module__))
def test_trivial(self):
# A couple trivial tests
self.assertRaises(ValueError, urllib.request.urlopen, 'bogus url')
# XXX Name hacking to get this to work on Windows.
fname = os.path.abspath(urllib.request.__file__).replace(os.sep, '/')
if os.name == 'nt' or os.name == 'uwp_os':
file_url = "file:///%s" % fname
else:
file_url = "file://%s" % fname
f = urllib.request.urlopen(file_url)
f.read()
f.close()
def test_parse_http_list(self):
tests = [
('a,b,c', ['a', 'b', 'c']),
('path"o,l"og"i"cal, example', ['path"o,l"og"i"cal', 'example']),
('a, b, "c", "d", "e,f", g, h',
['a', 'b', '"c"', '"d"', '"e,f"', 'g', 'h']),
('a="b\\"c", d="e\\,f", g="h\\\\i"',
['a="b"c"', 'd="e,f"', 'g="h\\i"'])]
for string, list in tests:
self.assertEqual(urllib.request.parse_http_list(string), list)
def test_URLError_reasonstr(self):
err = urllib.error.URLError('reason')
self.assertIn(err.reason, str(err))
class RequestHdrsTests(unittest.TestCase):
def test_request_headers_dict(self):
"""
The Request.headers dictionary is not a documented interface. It
should stay that way, because the complete set of headers are only
accessible through the .get_header(), .has_header(), .header_items()
interface. However, .headers pre-dates those methods, and so real code
will be using the dictionary.
The introduction in 2.4 of those methods was a mistake for the same
reason: code that previously saw all (urllib2 user)-provided headers in
.headers now sees only a subset.
"""
url = "http://example.com"
self.assertEqual(Request(url,
headers={"Spam-eggs": "blah"}
).headers["Spam-eggs"], "blah")
self.assertEqual(Request(url,
headers={"spam-EggS": "blah"}
).headers["Spam-eggs"], "blah")
def test_request_headers_methods(self):
"""
Note the case normalization of header names here, to
.capitalize()-case. This should be preserved for
backwards-compatibility. (In the HTTP case, normalization to
.title()-case is done by urllib2 before sending headers to
http.client).
Note that e.g. r.has_header("spam-EggS") is currently False, and
r.get_header("spam-EggS") returns None, but that could be changed in
future.
Method r.remove_header should remove items both from r.headers and
r.unredirected_hdrs dictionaries
"""
url = "http://example.com"
req = Request(url, headers={"Spam-eggs": "blah"})
self.assertTrue(req.has_header("Spam-eggs"))
self.assertEqual(req.header_items(), [('Spam-eggs', 'blah')])
req.add_header("Foo-Bar", "baz")
self.assertEqual(sorted(req.header_items()),
[('Foo-bar', 'baz'), ('Spam-eggs', 'blah')])
self.assertFalse(req.has_header("Not-there"))
self.assertIsNone(req.get_header("Not-there"))
self.assertEqual(req.get_header("Not-there", "default"), "default")
req.remove_header("Spam-eggs")
self.assertFalse(req.has_header("Spam-eggs"))
req.add_unredirected_header("Unredirected-spam", "Eggs")
self.assertTrue(req.has_header("Unredirected-spam"))
req.remove_header("Unredirected-spam")
self.assertFalse(req.has_header("Unredirected-spam"))
def test_password_manager(self):
mgr = urllib.request.HTTPPasswordMgr()
add = mgr.add_password
find_user_pass = mgr.find_user_password
add("Some Realm", "http://example.com/", "joe", "password")
add("Some Realm", "http://example.com/ni", "ni", "ni")
add("c", "http://example.com/foo", "foo", "ni")
add("c", "http://example.com/bar", "bar", "nini")
add("b", "http://example.com/", "first", "blah")
add("b", "http://example.com/", "second", "spam")
add("a", "http://example.com", "1", "a")
add("Some Realm", "http://c.example.com:3128", "3", "c")
add("Some Realm", "d.example.com", "4", "d")
add("Some Realm", "e.example.com:3128", "5", "e")
self.assertEqual(find_user_pass("Some Realm", "example.com"),
('joe', 'password'))
#self.assertEqual(find_user_pass("Some Realm", "http://example.com/ni"),
# ('ni', 'ni'))
self.assertEqual(find_user_pass("Some Realm", "http://example.com"),
('joe', 'password'))
self.assertEqual(find_user_pass("Some Realm", "http://example.com/"),
('joe', 'password'))
self.assertEqual(
find_user_pass("Some Realm", "http://example.com/spam"),
('joe', 'password'))
self.assertEqual(
find_user_pass("Some Realm", "http://example.com/spam/spam"),
('joe', 'password'))
self.assertEqual(find_user_pass("c", "http://example.com/foo"),
('foo', 'ni'))
self.assertEqual(find_user_pass("c", "http://example.com/bar"),
('bar', 'nini'))
self.assertEqual(find_user_pass("b", "http://example.com/"),
('second', 'spam'))
# No special relationship between a.example.com and example.com:
self.assertEqual(find_user_pass("a", "http://example.com/"),
('1', 'a'))
self.assertEqual(find_user_pass("a", "http://a.example.com/"),
(None, None))
# Ports:
self.assertEqual(find_user_pass("Some Realm", "c.example.com"),
(None, None))
self.assertEqual(find_user_pass("Some Realm", "c.example.com:3128"),
('3', 'c'))
self.assertEqual(
find_user_pass("Some Realm", "http://c.example.com:3128"),
('3', 'c'))
self.assertEqual(find_user_pass("Some Realm", "d.example.com"),
('4', 'd'))
self.assertEqual(find_user_pass("Some Realm", "e.example.com:3128"),
('5', 'e'))
def test_password_manager_default_port(self):
"""
The point to note here is that we can't guess the default port if
there's no scheme. This applies to both add_password and
find_user_password.
"""
mgr = urllib.request.HTTPPasswordMgr()
add = mgr.add_password
find_user_pass = mgr.find_user_password
add("f", "http://g.example.com:80", "10", "j")
add("g", "http://h.example.com", "11", "k")
add("h", "i.example.com:80", "12", "l")
add("i", "j.example.com", "13", "m")
self.assertEqual(find_user_pass("f", "g.example.com:100"),
(None, None))
self.assertEqual(find_user_pass("f", "g.example.com:80"),
('10', 'j'))
self.assertEqual(find_user_pass("f", "g.example.com"),
(None, None))
self.assertEqual(find_user_pass("f", "http://g.example.com:100"),
(None, None))
self.assertEqual(find_user_pass("f", "http://g.example.com:80"),
('10', 'j'))
self.assertEqual(find_user_pass("f", "http://g.example.com"),
('10', 'j'))
self.assertEqual(find_user_pass("g", "h.example.com"), ('11', 'k'))
self.assertEqual(find_user_pass("g", "h.example.com:80"), ('11', 'k'))
self.assertEqual(find_user_pass("g", "http://h.example.com:80"),
('11', 'k'))
self.assertEqual(find_user_pass("h", "i.example.com"), (None, None))
self.assertEqual(find_user_pass("h", "i.example.com:80"), ('12', 'l'))
self.assertEqual(find_user_pass("h", "http://i.example.com:80"),
('12', 'l'))
self.assertEqual(find_user_pass("i", "j.example.com"), ('13', 'm'))
self.assertEqual(find_user_pass("i", "j.example.com:80"),
(None, None))
self.assertEqual(find_user_pass("i", "http://j.example.com"),
('13', 'm'))
self.assertEqual(find_user_pass("i", "http://j.example.com:80"),
(None, None))
class MockOpener:
addheaders = []
def open(self, req, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.req, self.data, self.timeout = req, data, timeout
def error(self, proto, *args):
self.proto, self.args = proto, args
class MockFile:
def read(self, count=None):
pass
def readline(self, count=None):
pass
def close(self):
pass
class MockHeaders(dict):
def getheaders(self, name):
return list(self.values())
class MockResponse(io.StringIO):
def __init__(self, code, msg, headers, data, url=None):
io.StringIO.__init__(self, data)
self.code, self.msg, self.headers, self.url = code, msg, headers, url
def info(self):
return self.headers
def geturl(self):
return self.url
class MockCookieJar:
def add_cookie_header(self, request):
self.ach_req = request
def extract_cookies(self, response, request):
self.ec_req, self.ec_r = request, response
class FakeMethod:
def __init__(self, meth_name, action, handle):
self.meth_name = meth_name
self.handle = handle
self.action = action
def __call__(self, *args):
return self.handle(self.meth_name, self.action, *args)
class MockHTTPResponse(io.IOBase):
def __init__(self, fp, msg, status, reason):
self.fp = fp
self.msg = msg
self.status = status
self.reason = reason
self.code = 200
def read(self):
return ''
def info(self):
return {}
def geturl(self):
return self.url
class MockHTTPClass:
def __init__(self):
self.level = 0
self.req_headers = []
self.data = None
self.raise_on_endheaders = False
self.sock = None
self._tunnel_headers = {}
def __call__(self, host, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.host = host
self.timeout = timeout
return self
def set_debuglevel(self, level):
self.level = level
def set_tunnel(self, host, port=None, headers=None):
self._tunnel_host = host
self._tunnel_port = port
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear()
def request(self, method, url, body=None, headers=None):
self.method = method
self.selector = url
if headers is not None:
self.req_headers += headers.items()
self.req_headers.sort()
if body:
self.data = body
if self.raise_on_endheaders:
raise OSError()
def getresponse(self):
return MockHTTPResponse(MockFile(), {}, 200, "OK")
def close(self):
pass
class MockHandler:
# useful for testing handler machinery
# see add_ordered_mock_handlers() docstring
handler_order = 500
def __init__(self, methods):
self._define_methods(methods)
def _define_methods(self, methods):
for spec in methods:
if len(spec) == 2:
name, action = spec
else:
name, action = spec, None
meth = FakeMethod(name, action, self.handle)
setattr(self.__class__, name, meth)
def handle(self, fn_name, action, *args, **kwds):
self.parent.calls.append((self, fn_name, args, kwds))
if action is None:
return None
elif action == "return self":
return self
elif action == "return response":
res = MockResponse(200, "OK", {}, "")
return res
elif action == "return request":
return Request("http://blah/")
elif action.startswith("error"):
code = action[action.rfind(" ")+1:]
try:
code = int(code)
except ValueError:
pass
res = MockResponse(200, "OK", {}, "")
return self.parent.error("http", args[0], res, code, "", {})
elif action == "raise":
raise urllib.error.URLError("blah")
assert False
def close(self):
pass
def add_parent(self, parent):
self.parent = parent
self.parent.calls = []
def __lt__(self, other):
if not hasattr(other, "handler_order"):
# No handler_order, leave in original order. Yuck.
return True
return self.handler_order < other.handler_order
def add_ordered_mock_handlers(opener, meth_spec):
"""Create MockHandlers and add them to an OpenerDirector.
meth_spec: list of lists of tuples and strings defining methods to define
on handlers. eg:
[["http_error", "ftp_open"], ["http_open"]]
defines methods .http_error() and .ftp_open() on one handler, and
.http_open() on another. These methods just record their arguments and
return None. Using a tuple instead of a string causes the method to
perform some action (see MockHandler.handle()), eg:
[["http_error"], [("http_open", "return request")]]
defines .http_error() on one handler (which simply returns None), and
.http_open() on another handler, which returns a Request object.
"""
handlers = []
count = 0
for meths in meth_spec:
class MockHandlerSubclass(MockHandler):
pass
h = MockHandlerSubclass(meths)
h.handler_order += count
h.add_parent(opener)
count = count + 1
handlers.append(h)
opener.add_handler(h)
return handlers
def build_test_opener(*handler_instances):
opener = OpenerDirector()
for h in handler_instances:
opener.add_handler(h)
return opener
class MockHTTPHandler(urllib.request.BaseHandler):
# useful for testing redirections and auth
# sends supplied headers and code as first response
# sends 200 OK as second response
def __init__(self, code, headers):
self.code = code
self.headers = headers
self.reset()
def reset(self):
self._count = 0
self.requests = []
def http_open(self, req):
import email, http.client, copy
self.requests.append(copy.deepcopy(req))
if self._count == 0:
self._count = self._count + 1
name = http.client.responses[self.code]
msg = email.message_from_string(self.headers)
return self.parent.error(
"http", req, MockFile(), self.code, name, msg)
else:
self.req = req
msg = email.message_from_string("\r\n\r\n")
return MockResponse(200, "OK", msg, "", req.get_full_url())
class MockHTTPSHandler(urllib.request.AbstractHTTPHandler):
# Useful for testing the Proxy-Authorization request by verifying the
# properties of httpcon
def __init__(self):
urllib.request.AbstractHTTPHandler.__init__(self)
self.httpconn = MockHTTPClass()
def https_open(self, req):
return self.do_open(self.httpconn, req)
class MockHTTPHandlerCheckAuth(urllib.request.BaseHandler):
# useful for testing auth
# sends supplied code response
# checks if auth header is specified in request
def __init__(self, code):
self.code = code
self.has_auth_header = False
def reset(self):
self.has_auth_header = False
def http_open(self, req):
if req.has_header('Authorization'):
self.has_auth_header = True
name = http.client.responses[self.code]
return MockResponse(self.code, name, MockFile(), "", req.get_full_url())
class MockPasswordManager:
def add_password(self, realm, uri, user, password):
self.realm = realm
self.url = uri
self.user = user
self.password = password
def find_user_password(self, realm, authuri):
self.target_realm = realm
self.target_url = authuri
return self.user, self.password
class OpenerDirectorTests(unittest.TestCase):
def test_add_non_handler(self):
class NonHandler(object):
pass
self.assertRaises(TypeError,
OpenerDirector().add_handler, NonHandler())
def test_badly_named_methods(self):
# test work-around for three methods that accidentally follow the
# naming conventions for handler methods
# (*_open() / *_request() / *_response())
# These used to call the accidentally-named methods, causing a
# TypeError in real code; here, returning self from these mock
# methods would either cause no exception, or AttributeError.
from urllib.error import URLError
o = OpenerDirector()
meth_spec = [
[("do_open", "return self"), ("proxy_open", "return self")],
[("redirect_request", "return self")],
]
add_ordered_mock_handlers(o, meth_spec)
o.add_handler(urllib.request.UnknownHandler())
for scheme in "do", "proxy", "redirect":
self.assertRaises(URLError, o.open, scheme+"://example.com/")
def test_handled(self):
# handler returning non-None means no more handlers will be called
o = OpenerDirector()
meth_spec = [
["http_open", "ftp_open", "http_error_302"],
["ftp_open"],
[("http_open", "return self")],
[("http_open", "return self")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
r = o.open(req)
# Second .http_open() gets called, third doesn't, since second returned
# non-None. Handlers without .http_open() never get any methods called
# on them.
# In fact, second mock handler defining .http_open() returns self
# (instead of response), which becomes the OpenerDirector's return
# value.
self.assertEqual(r, handlers[2])
calls = [(handlers[0], "http_open"), (handlers[2], "http_open")]
for expected, got in zip(calls, o.calls):
handler, name, args, kwds = got
self.assertEqual((handler, name), expected)
self.assertEqual(args, (req,))
def test_handler_order(self):
o = OpenerDirector()
handlers = []
for meths, handler_order in [([("http_open", "return self")], 500),
(["http_open"], 0)]:
class MockHandlerSubclass(MockHandler):
pass
h = MockHandlerSubclass(meths)
h.handler_order = handler_order
handlers.append(h)
o.add_handler(h)
o.open("http://example.com/")
# handlers called in reverse order, thanks to their sort order
self.assertEqual(o.calls[0][0], handlers[1])
self.assertEqual(o.calls[1][0], handlers[0])
def test_raise(self):
# raising URLError stops processing of request
o = OpenerDirector()
meth_spec = [
[("http_open", "raise")],
[("http_open", "return self")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
self.assertRaises(urllib.error.URLError, o.open, req)
self.assertEqual(o.calls, [(handlers[0], "http_open", (req,), {})])
def test_http_error(self):
# XXX http_error_default
# http errors are a special case
o = OpenerDirector()
meth_spec = [
[("http_open", "error 302")],
[("http_error_400", "raise"), "http_open"],
[("http_error_302", "return response"), "http_error_303",
"http_error"],
[("http_error_302")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
class Unknown:
def __eq__(self, other):
return True
req = Request("http://example.com/")
o.open(req)
assert len(o.calls) == 2
calls = [(handlers[0], "http_open", (req,)),
(handlers[2], "http_error_302",
(req, Unknown(), 302, "", {}))]
for expected, got in zip(calls, o.calls):
handler, method_name, args = expected
self.assertEqual((handler, method_name), got[:2])
self.assertEqual(args, got[2])
def test_processors(self):
# *_request / *_response methods get called appropriately
o = OpenerDirector()
meth_spec = [
[("http_request", "return request"),
("http_response", "return response")],
[("http_request", "return request"),
("http_response", "return response")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
o.open(req)
# processor methods are called on *all* handlers that define them,
# not just the first handler that handles the request
calls = [
(handlers[0], "http_request"), (handlers[1], "http_request"),
(handlers[0], "http_response"), (handlers[1], "http_response")]
for i, (handler, name, args, kwds) in enumerate(o.calls):
if i < 2:
# *_request
self.assertEqual((handler, name), calls[i])
self.assertEqual(len(args), 1)
self.assertIsInstance(args[0], Request)
else:
# *_response
self.assertEqual((handler, name), calls[i])
self.assertEqual(len(args), 2)
self.assertIsInstance(args[0], Request)
# response from opener.open is None, because there's no
# handler that defines http_open to handle it
if args[1] is not None:
self.assertIsInstance(args[1], MockResponse)
def sanepathname2url(path):
try:
path.encode("utf-8")
except UnicodeEncodeError:
raise unittest.SkipTest("path is not encodable to utf8")
urlpath = urllib.request.pathname2url(path)
if (os.name == "nt" or os.name == 'uwp_os') and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class HandlerTests(unittest.TestCase):
def test_ftp(self):
class MockFTPWrapper:
def __init__(self, data):
self.data = data
def retrfile(self, filename, filetype):
self.filename, self.filetype = filename, filetype
return io.StringIO(self.data), len(self.data)
def close(self):
pass
class NullFTPHandler(urllib.request.FTPHandler):
def __init__(self, data):
self.data = data
def connect_ftp(self, user, passwd, host, port, dirs,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.user, self.passwd = user, passwd
self.host, self.port = host, port
self.dirs = dirs
self.ftpwrapper = MockFTPWrapper(self.data)
return self.ftpwrapper
import ftplib
data = "rheum rhaponicum"
h = NullFTPHandler(data)
h.parent = MockOpener()
for url, host, port, user, passwd, type_, dirs, filename, mimetype in [
("ftp://localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "", "", "I",
["foo", "bar"], "baz.html", "text/html"),
("ftp://parrot@localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "parrot", "", "I",
["foo", "bar"], "baz.html", "text/html"),
("ftp://%25parrot@localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "%parrot", "", "I",
["foo", "bar"], "baz.html", "text/html"),
("ftp://%2542parrot@localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "%42parrot", "", "I",
["foo", "bar"], "baz.html", "text/html"),
("ftp://localhost:80/foo/bar/",
"localhost", 80, "", "", "D",
["foo", "bar"], "", None),
("ftp://localhost/baz.gif;type=a",
"localhost", ftplib.FTP_PORT, "", "", "A",
[], "baz.gif", None), # XXX really this should guess image/gif
]:
req = Request(url)
req.timeout = None
r = h.ftp_open(req)
# ftp authentication not yet implemented by FTPHandler
self.assertEqual(h.user, user)
self.assertEqual(h.passwd, passwd)
self.assertEqual(h.host, socket.gethostbyname(host))
self.assertEqual(h.port, port)
self.assertEqual(h.dirs, dirs)
self.assertEqual(h.ftpwrapper.filename, filename)
self.assertEqual(h.ftpwrapper.filetype, type_)
headers = r.info()
self.assertEqual(headers.get("Content-type"), mimetype)
self.assertEqual(int(headers["Content-length"]), len(data))
def test_file(self):
import email.utils
h = urllib.request.FileHandler()
o = h.parent = MockOpener()
TESTFN = support.TESTFN
urlpath = sanepathname2url(os.path.abspath(TESTFN))
towrite = b"hello, world\n"
urls = [
"file://localhost%s" % urlpath,
"file://%s" % urlpath,
"file://%s%s" % (socket.gethostbyname('localhost'), urlpath),
]
try:
localaddr = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
localaddr = ''
if localaddr:
urls.append("file://%s%s" % (localaddr, urlpath))
for url in urls:
f = open(TESTFN, "wb")
try:
try:
f.write(towrite)
finally:
f.close()
r = h.file_open(Request(url))
try:
data = r.read()
headers = r.info()
respurl = r.geturl()
finally:
r.close()
stats = os.stat(TESTFN)
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
finally:
os.remove(TESTFN)
self.assertEqual(data, towrite)
self.assertEqual(headers["Content-type"], "text/plain")
self.assertEqual(headers["Content-length"], "13")
self.assertEqual(headers["Last-modified"], modified)
self.assertEqual(respurl, url)
for url in [
"file://localhost:80%s" % urlpath,
"file:///file_does_not_exist.txt",
"file://not-a-local-host.com//dir/file.txt",
"file://%s:80%s/%s" % (socket.gethostbyname('localhost'),
os.getcwd(), TESTFN),
"file://somerandomhost.ontheinternet.com%s/%s" %
(os.getcwd(), TESTFN),
]:
try:
f = open(TESTFN, "wb")
try:
f.write(towrite)
finally:
f.close()
self.assertRaises(urllib.error.URLError,
h.file_open, Request(url))
finally:
os.remove(TESTFN)
h = urllib.request.FileHandler()
o = h.parent = MockOpener()
# XXXX why does // mean ftp (and /// mean not ftp!), and where
# is file: scheme specified? I think this is really a bug, and
# what was intended was to distinguish between URLs like:
# file:/blah.txt (a file)
# file://localhost/blah.txt (a file)
# file:///blah.txt (a file)
# file://ftp.example.com/blah.txt (an ftp URL)
for url, ftp in [
("file://ftp.example.com//foo.txt", False),
("file://ftp.example.com///foo.txt", False),
# XXXX bug: fails with OSError, should be URLError
("file://ftp.example.com/foo.txt", False),
("file://somehost//foo/something.txt", False),
("file://localhost//foo/something.txt", False),
]:
req = Request(url)
try:
h.file_open(req)
# XXXX remove OSError when bug fixed
except (urllib.error.URLError, OSError):
self.assertFalse(ftp)
else:
self.assertIs(o.req, req)
self.assertEqual(req.type, "ftp")
self.assertEqual(req.type == "ftp", ftp)
def test_http(self):
h = urllib.request.AbstractHTTPHandler()
o = h.parent = MockOpener()
url = "http://example.com/"
for method, data in [("GET", None), ("POST", b"blah")]:
req = Request(url, data, {"Foo": "bar"})
req.timeout = None
req.add_unredirected_header("Spam", "eggs")
http = MockHTTPClass()
r = h.do_open(http, req)
# result attributes
r.read; r.readline # wrapped MockFile methods
r.info; r.geturl # addinfourl methods
r.code, r.msg == 200, "OK" # added from MockHTTPClass.getreply()
hdrs = r.info()
hdrs.get; hdrs.__contains__ # r.info() gives dict from .getreply()
self.assertEqual(r.geturl(), url)
self.assertEqual(http.host, "example.com")
self.assertEqual(http.level, 0)
self.assertEqual(http.method, method)
self.assertEqual(http.selector, "/")
self.assertEqual(http.req_headers,
[("Connection", "close"),
("Foo", "bar"), ("Spam", "eggs")])
self.assertEqual(http.data, data)
# check OSError converted to URLError
http.raise_on_endheaders = True
self.assertRaises(urllib.error.URLError, h.do_open, http, req)
# Check for TypeError on POST data which is str.
req = Request("http://example.com/","badpost")
self.assertRaises(TypeError, h.do_request_, req)
# check adding of standard headers
o.addheaders = [("Spam", "eggs")]
for data in b"", None: # POST, GET
req = Request("http://example.com/", data)
r = MockResponse(200, "OK", {}, "")
newreq = h.do_request_(req)
if data is None: # GET
self.assertNotIn("Content-length", req.unredirected_hdrs)
self.assertNotIn("Content-type", req.unredirected_hdrs)
else: # POST
self.assertEqual(req.unredirected_hdrs["Content-length"], "0")
self.assertEqual(req.unredirected_hdrs["Content-type"],
"application/x-www-form-urlencoded")
# XXX the details of Host could be better tested
self.assertEqual(req.unredirected_hdrs["Host"], "example.com")
self.assertEqual(req.unredirected_hdrs["Spam"], "eggs")
# don't clobber existing headers
req.add_unredirected_header("Content-length", "foo")
req.add_unredirected_header("Content-type", "bar")
req.add_unredirected_header("Host", "baz")
req.add_unredirected_header("Spam", "foo")
newreq = h.do_request_(req)
self.assertEqual(req.unredirected_hdrs["Content-length"], "foo")
self.assertEqual(req.unredirected_hdrs["Content-type"], "bar")
self.assertEqual(req.unredirected_hdrs["Host"], "baz")
self.assertEqual(req.unredirected_hdrs["Spam"], "foo")
# Check iterable body support
def iterable_body():
yield b"one"
yield b"two"
yield b"three"
for headers in {}, {"Content-Length": 11}:
req = Request("http://example.com/", iterable_body(), headers)
if not headers:
# Having an iterable body without a Content-Length should
# raise an exception
self.assertRaises(ValueError, h.do_request_, req)
else:
newreq = h.do_request_(req)
# A file object.
# Test only Content-Length attribute of request.
file_obj = io.BytesIO()
file_obj.write(b"Something\nSomething\nSomething\n")
for headers in {}, {"Content-Length": 30}:
req = Request("http://example.com/", file_obj, headers)
if not headers:
# Having an iterable body without a Content-Length should
# raise an exception
self.assertRaises(ValueError, h.do_request_, req)
else:
newreq = h.do_request_(req)
self.assertEqual(int(newreq.get_header('Content-length')), 30)
file_obj.close()
# array.array Iterable - Content Length is calculated
iterable_array = array.array("I",[1,2,3,4])
for headers in {}, {"Content-Length": 16}:
req = Request("http://example.com/", iterable_array, headers)
newreq = h.do_request_(req)
self.assertEqual(int(newreq.get_header('Content-length')),16)
def test_http_doubleslash(self):
# Checks the presence of any unnecessary double slash in url does not
# break anything. Previously, a double slash directly after the host
# could cause incorrect parsing.
h = urllib.request.AbstractHTTPHandler()
h.parent = MockOpener()
data = b""
ds_urls = [
"http://example.com/foo/bar/baz.html",
"http://example.com//foo/bar/baz.html",
"http://example.com/foo//bar/baz.html",
"http://example.com/foo/bar//baz.html"
]
for ds_url in ds_urls:
ds_req = Request(ds_url, data)
# Check whether host is determined correctly if there is no proxy
np_ds_req = h.do_request_(ds_req)
self.assertEqual(np_ds_req.unredirected_hdrs["Host"], "example.com")
# Check whether host is determined correctly if there is a proxy
ds_req.set_proxy("someproxy:3128", None)
p_ds_req = h.do_request_(ds_req)
self.assertEqual(p_ds_req.unredirected_hdrs["Host"], "example.com")
def test_full_url_setter(self):
# Checks to ensure that components are set correctly after setting the
# full_url of a Request object
urls = [
'http://example.com?foo=bar#baz',
'http://example.com?foo=bar&spam=eggs#bash',
'http://example.com',
]
# testing a reusable request instance, but the url parameter is
# required, so just use a dummy one to instantiate
r = Request('http://example.com')
for url in urls:
r.full_url = url
parsed = urlparse(url)
self.assertEqual(r.get_full_url(), url)
# full_url setter uses splittag to split into components.
# splittag sets the fragment as None while urlparse sets it to ''
self.assertEqual(r.fragment or '', parsed.fragment)
self.assertEqual(urlparse(r.get_full_url()).query, parsed.query)
def test_full_url_deleter(self):
r = Request('http://www.example.com')
del r.full_url
self.assertIsNone(r.full_url)
self.assertIsNone(r.fragment)
self.assertEqual(r.selector, '')
def test_fixpath_in_weirdurls(self):
# Issue4493: urllib2 to supply '/' when to urls where path does not
# start with'/'
h = urllib.request.AbstractHTTPHandler()
h.parent = MockOpener()
weird_url = 'http://www.python.org?getspam'
req = Request(weird_url)
newreq = h.do_request_(req)
self.assertEqual(newreq.host, 'www.python.org')
self.assertEqual(newreq.selector, '/?getspam')
url_without_path = 'http://www.python.org'
req = Request(url_without_path)
newreq = h.do_request_(req)
self.assertEqual(newreq.host, 'www.python.org')
self.assertEqual(newreq.selector, '')
def test_errors(self):
h = urllib.request.HTTPErrorProcessor()
o = h.parent = MockOpener()
url = "http://example.com/"
req = Request(url)
# all 2xx are passed through
r = MockResponse(200, "OK", {}, "", url)
newr = h.http_response(req, r)
self.assertIs(r, newr)
self.assertFalse(hasattr(o, "proto")) # o.error not called
r = MockResponse(202, "Accepted", {}, "", url)
newr = h.http_response(req, r)
self.assertIs(r, newr)
self.assertFalse(hasattr(o, "proto")) # o.error not called
r = MockResponse(206, "Partial content", {}, "", url)
newr = h.http_response(req, r)
self.assertIs(r, newr)
self.assertFalse(hasattr(o, "proto")) # o.error not called
# anything else calls o.error (and MockOpener returns None, here)
r = MockResponse(502, "Bad gateway", {}, "", url)
self.assertIsNone(h.http_response(req, r))
self.assertEqual(o.proto, "http") # o.error called
self.assertEqual(o.args, (req, r, 502, "Bad gateway", {}))
def test_cookies(self):
cj = MockCookieJar()
h = urllib.request.HTTPCookieProcessor(cj)
h.parent = MockOpener()
req = Request("http://example.com/")
r = MockResponse(200, "OK", {}, "")
newreq = h.http_request(req)
self.assertIs(cj.ach_req, req)
self.assertIs(cj.ach_req, newreq)
self.assertEqual(req.origin_req_host, "example.com")
self.assertFalse(req.unverifiable)
newr = h.http_response(req, r)
self.assertIs(cj.ec_req, req)
self.assertIs(cj.ec_r, r)
self.assertIs(r, newr)
def test_redirect(self):
from_url = "http://example.com/a.html"
to_url = "http://example.com/b.html"
h = urllib.request.HTTPRedirectHandler()
o = h.parent = MockOpener()
# ordinary redirect behaviour
for code in 301, 302, 303, 307:
for data in None, "blah\nblah\n":
method = getattr(h, "http_error_%s" % code)
req = Request(from_url, data)
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
req.add_header("Nonsense", "viking=withhold")
if data is not None:
req.add_header("Content-Length", str(len(data)))
req.add_unredirected_header("Spam", "spam")
try:
method(req, MockFile(), code, "Blah",
MockHeaders({"location": to_url}))
except urllib.error.HTTPError:
# 307 in response to POST requires user OK
self.assertEqual(code, 307)
self.assertIsNotNone(data)
self.assertEqual(o.req.get_full_url(), to_url)
try:
self.assertEqual(o.req.get_method(), "GET")
except AttributeError:
self.assertFalse(o.req.data)
# now it's a GET, there should not be headers regarding content
# (possibly dragged from before being a POST)
headers = [x.lower() for x in o.req.headers]
self.assertNotIn("content-length", headers)
self.assertNotIn("content-type", headers)
self.assertEqual(o.req.headers["Nonsense"],
"viking=withhold")
self.assertNotIn("Spam", o.req.headers)
self.assertNotIn("Spam", o.req.unredirected_hdrs)
# loop detection
req = Request(from_url)
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
def redirect(h, req, url=to_url):
h.http_error_302(req, MockFile(), 302, "Blah",
MockHeaders({"location": url}))
# Note that the *original* request shares the same record of
# redirections with the sub-requests caused by the redirections.
# detect infinite loop redirect of a URL to itself
req = Request(from_url, origin_req_host="example.com")
count = 0
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
try:
while 1:
redirect(h, req, "http://example.com/")
count = count + 1
except urllib.error.HTTPError:
# don't stop until max_repeats, because cookies may introduce state
self.assertEqual(count, urllib.request.HTTPRedirectHandler.max_repeats)
# detect endless non-repeating chain of redirects
req = Request(from_url, origin_req_host="example.com")
count = 0
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
try:
while 1:
redirect(h, req, "http://example.com/%d" % count)
count = count + 1
except urllib.error.HTTPError:
self.assertEqual(count,
urllib.request.HTTPRedirectHandler.max_redirections)
def test_invalid_redirect(self):
from_url = "http://example.com/a.html"
valid_schemes = ['http','https','ftp']
invalid_schemes = ['file','imap','ldap']
schemeless_url = "example.com/b.html"
h = urllib.request.HTTPRedirectHandler()
o = h.parent = MockOpener()
req = Request(from_url)
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
for scheme in invalid_schemes:
invalid_url = scheme + '://' + schemeless_url
self.assertRaises(urllib.error.HTTPError, h.http_error_302,
req, MockFile(), 302, "Security Loophole",
MockHeaders({"location": invalid_url}))
for scheme in valid_schemes:
valid_url = scheme + '://' + schemeless_url
h.http_error_302(req, MockFile(), 302, "That's fine",
MockHeaders({"location": valid_url}))
self.assertEqual(o.req.get_full_url(), valid_url)
def test_relative_redirect(self):
from_url = "http://example.com/a.html"
relative_url = "/b.html"
h = urllib.request.HTTPRedirectHandler()
o = h.parent = MockOpener()
req = Request(from_url)
req.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
valid_url = urllib.parse.urljoin(from_url,relative_url)
h.http_error_302(req, MockFile(), 302, "That's fine",
MockHeaders({"location": valid_url}))
self.assertEqual(o.req.get_full_url(), valid_url)
def test_cookie_redirect(self):
# cookies shouldn't leak into redirected requests
from http.cookiejar import CookieJar
from test.test_http_cookiejar import interact_netscape
cj = CookieJar()
interact_netscape(cj, "http://www.example.com/", "spam=eggs")
hh = MockHTTPHandler(302, "Location: http://www.cracker.com/\r\n\r\n")
hdeh = urllib.request.HTTPDefaultErrorHandler()
hrh = urllib.request.HTTPRedirectHandler()
cp = urllib.request.HTTPCookieProcessor(cj)
o = build_test_opener(hh, hdeh, hrh, cp)
o.open("http://www.example.com/")
self.assertFalse(hh.req.has_header("Cookie"))
def test_redirect_fragment(self):
redirected_url = 'http://www.example.com/index.html#OK\r\n\r\n'
hh = MockHTTPHandler(302, 'Location: ' + redirected_url)
hdeh = urllib.request.HTTPDefaultErrorHandler()
hrh = urllib.request.HTTPRedirectHandler()
o = build_test_opener(hh, hdeh, hrh)
fp = o.open('http://www.example.com')
self.assertEqual(fp.geturl(), redirected_url.strip())
def test_proxy(self):
o = OpenerDirector()
ph = urllib.request.ProxyHandler(dict(http="proxy.example.com:3128"))
o.add_handler(ph)
meth_spec = [
[("http_open", "return response")]
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://acme.example.com/")
self.assertEqual(req.host, "acme.example.com")
o.open(req)
self.assertEqual(req.host, "proxy.example.com:3128")
self.assertEqual([(handlers[0], "http_open")],
[tup[0:2] for tup in o.calls])
def test_proxy_no_proxy(self):
os.environ['no_proxy'] = 'python.org'
o = OpenerDirector()
ph = urllib.request.ProxyHandler(dict(http="proxy.example.com"))
o.add_handler(ph)
req = Request("http://www.perl.org/")
self.assertEqual(req.host, "www.perl.org")
o.open(req)
self.assertEqual(req.host, "proxy.example.com")
req = Request("http://www.python.org")
self.assertEqual(req.host, "www.python.org")
o.open(req)
self.assertEqual(req.host, "www.python.org")
del os.environ['no_proxy']
def test_proxy_no_proxy_all(self):
os.environ['no_proxy'] = '*'
o = OpenerDirector()
ph = urllib.request.ProxyHandler(dict(http="proxy.example.com"))
o.add_handler(ph)
req = Request("http://www.python.org")
self.assertEqual(req.host, "www.python.org")
o.open(req)
self.assertEqual(req.host, "www.python.org")
del os.environ['no_proxy']
def test_proxy_https(self):
o = OpenerDirector()
ph = urllib.request.ProxyHandler(dict(https="proxy.example.com:3128"))
o.add_handler(ph)
meth_spec = [
[("https_open", "return response")]
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("https://www.example.com/")
self.assertEqual(req.host, "www.example.com")
o.open(req)
self.assertEqual(req.host, "proxy.example.com:3128")
self.assertEqual([(handlers[0], "https_open")],
[tup[0:2] for tup in o.calls])
def test_proxy_https_proxy_authorization(self):
o = OpenerDirector()
ph = urllib.request.ProxyHandler(dict(https='proxy.example.com:3128'))
o.add_handler(ph)
https_handler = MockHTTPSHandler()
o.add_handler(https_handler)
req = Request("https://www.example.com/")
req.add_header("Proxy-Authorization", "FooBar")
req.add_header("User-Agent", "Grail")
self.assertEqual(req.host, "www.example.com")
self.assertIsNone(req._tunnel_host)
o.open(req)
# Verify Proxy-Authorization gets tunneled to request.
# httpsconn req_headers do not have the Proxy-Authorization header but
# the req will have.
self.assertNotIn(("Proxy-Authorization", "FooBar"),
https_handler.httpconn.req_headers)
self.assertIn(("User-Agent", "Grail"),
https_handler.httpconn.req_headers)
self.assertIsNotNone(req._tunnel_host)
self.assertEqual(req.host, "proxy.example.com:3128")
self.assertEqual(req.get_header("Proxy-authorization"), "FooBar")
# TODO: This should be only for OSX
@unittest.skipUnless(sys.platform == 'darwin', "only relevant for OSX")
def test_osx_proxy_bypass(self):
bypass = {
'exclude_simple': False,
'exceptions': ['foo.bar', '*.bar.com', '127.0.0.1', '10.10',
'10.0/16']
}
# Check hosts that should trigger the proxy bypass
for host in ('foo.bar', 'www.bar.com', '127.0.0.1', '10.10.0.1',
'10.0.0.1'):
self.assertTrue(_proxy_bypass_macosx_sysconf(host, bypass),
'expected bypass of %s to be True' % host)
# Check hosts that should not trigger the proxy bypass
for host in ('abc.foo.bar', 'bar.com', '127.0.0.2', '10.11.0.1',
'notinbypass'):
self.assertFalse(_proxy_bypass_macosx_sysconf(host, bypass),
'expected bypass of %s to be False' % host)
# Check the exclude_simple flag
bypass = {'exclude_simple': True, 'exceptions': []}
self.assertTrue(_proxy_bypass_macosx_sysconf('test', bypass))
def test_basic_auth(self, quote_char='"'):
opener = OpenerDirector()
password_manager = MockPasswordManager()
auth_handler = urllib.request.HTTPBasicAuthHandler(password_manager)
realm = "ACME Widget Store"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm=%s%s%s\r\n\r\n' %
(quote_char, realm, quote_char))
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
self._test_basic_auth(opener, auth_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected",
)
def test_basic_auth_with_single_quoted_realm(self):
self.test_basic_auth(quote_char="'")
def test_basic_auth_with_unquoted_realm(self):
opener = OpenerDirector()
password_manager = MockPasswordManager()
auth_handler = urllib.request.HTTPBasicAuthHandler(password_manager)
realm = "ACME Widget Store"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm=%s\r\n\r\n' % realm)
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
with self.assertWarns(UserWarning):
self._test_basic_auth(opener, auth_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected",
)
def test_proxy_basic_auth(self):
opener = OpenerDirector()
ph = urllib.request.ProxyHandler(dict(http="proxy.example.com:3128"))
opener.add_handler(ph)
password_manager = MockPasswordManager()
auth_handler = urllib.request.ProxyBasicAuthHandler(password_manager)
realm = "ACME Networks"
http_handler = MockHTTPHandler(
407, 'Proxy-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
self._test_basic_auth(opener, auth_handler, "Proxy-authorization",
realm, http_handler, password_manager,
"http://acme.example.com:3128/protected",
"proxy.example.com:3128",
)
def test_basic_and_digest_auth_handlers(self):
# HTTPDigestAuthHandler raised an exception if it couldn't handle a 40*
# response (http://python.org/sf/1479302), where it should instead
# return None to allow another handler (especially
# HTTPBasicAuthHandler) to handle the response.
# Also (http://python.org/sf/14797027, RFC 2617 section 1.2), we must
# try digest first (since it's the strongest auth scheme), so we record
# order of calls here to check digest comes first:
class RecordingOpenerDirector(OpenerDirector):
def __init__(self):
OpenerDirector.__init__(self)
self.recorded = []
def record(self, info):
self.recorded.append(info)
class TestDigestAuthHandler(urllib.request.HTTPDigestAuthHandler):
def http_error_401(self, *args, **kwds):
self.parent.record("digest")
urllib.request.HTTPDigestAuthHandler.http_error_401(self,
*args, **kwds)
class TestBasicAuthHandler(urllib.request.HTTPBasicAuthHandler):
def http_error_401(self, *args, **kwds):
self.parent.record("basic")
urllib.request.HTTPBasicAuthHandler.http_error_401(self,
*args, **kwds)
opener = RecordingOpenerDirector()
password_manager = MockPasswordManager()
digest_handler = TestDigestAuthHandler(password_manager)
basic_handler = TestBasicAuthHandler(password_manager)
realm = "ACME Networks"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(basic_handler)
opener.add_handler(digest_handler)
opener.add_handler(http_handler)
# check basic auth isn't blocked by digest handler failing
self._test_basic_auth(opener, basic_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected",
)
# check digest was tried before basic (twice, because
# _test_basic_auth called .open() twice)
self.assertEqual(opener.recorded, ["digest", "basic"]*2)
def test_unsupported_auth_digest_handler(self):
opener = OpenerDirector()
# While using DigestAuthHandler
digest_auth_handler = urllib.request.HTTPDigestAuthHandler(None)
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Kerberos\r\n\r\n')
opener.add_handler(digest_auth_handler)
opener.add_handler(http_handler)
self.assertRaises(ValueError, opener.open, "http://www.example.com")
def test_unsupported_auth_basic_handler(self):
# While using BasicAuthHandler
opener = OpenerDirector()
basic_auth_handler = urllib.request.HTTPBasicAuthHandler(None)
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: NTLM\r\n\r\n')
opener.add_handler(basic_auth_handler)
opener.add_handler(http_handler)
self.assertRaises(ValueError, opener.open, "http://www.example.com")
def _test_basic_auth(self, opener, auth_handler, auth_header,
realm, http_handler, password_manager,
request_url, protected_url):
import base64
user, password = "wile", "coyote"
# .add_password() fed through to password manager
auth_handler.add_password(realm, request_url, user, password)
self.assertEqual(realm, password_manager.realm)
self.assertEqual(request_url, password_manager.url)
self.assertEqual(user, password_manager.user)
self.assertEqual(password, password_manager.password)
opener.open(request_url)
# should have asked the password manager for the username/password
self.assertEqual(password_manager.target_realm, realm)
self.assertEqual(password_manager.target_url, protected_url)
# expect one request without authorization, then one with
self.assertEqual(len(http_handler.requests), 2)
self.assertFalse(http_handler.requests[0].has_header(auth_header))
userpass = bytes('%s:%s' % (user, password), "ascii")
auth_hdr_value = ('Basic ' +
base64.encodebytes(userpass).strip().decode())
self.assertEqual(http_handler.requests[1].get_header(auth_header),
auth_hdr_value)
self.assertEqual(http_handler.requests[1].unredirected_hdrs[auth_header],
auth_hdr_value)
# if the password manager can't find a password, the handler won't
# handle the HTTP auth error
password_manager.user = password_manager.password = None
http_handler.reset()
opener.open(request_url)
self.assertEqual(len(http_handler.requests), 1)
self.assertFalse(http_handler.requests[0].has_header(auth_header))
def test_basic_prior_auth_auto_send(self):
# Assume already authenticated if is_authenticated=True
# for APIs like Github that don't return 401
user, password = "wile", "coyote"
request_url = "http://acme.example.com/protected"
http_handler = MockHTTPHandlerCheckAuth(200)
pwd_manager = HTTPPasswordMgrWithPriorAuth()
auth_prior_handler = HTTPBasicAuthHandler(pwd_manager)
auth_prior_handler.add_password(
None, request_url, user, password, is_authenticated=True)
is_auth = pwd_manager.is_authenticated(request_url)
self.assertTrue(is_auth)
opener = OpenerDirector()
opener.add_handler(auth_prior_handler)
opener.add_handler(http_handler)
opener.open(request_url)
# expect request to be sent with auth header
self.assertTrue(http_handler.has_auth_header)
def test_basic_prior_auth_send_after_first_success(self):
# Auto send auth header after authentication is successful once
user, password = 'wile', 'coyote'
request_url = 'http://acme.example.com/protected'
realm = 'ACME'
pwd_manager = HTTPPasswordMgrWithPriorAuth()
auth_prior_handler = HTTPBasicAuthHandler(pwd_manager)
auth_prior_handler.add_password(realm, request_url, user, password)
is_auth = pwd_manager.is_authenticated(request_url)
self.assertFalse(is_auth)
opener = OpenerDirector()
opener.add_handler(auth_prior_handler)
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm="%s"\r\n\r\n' % None)
opener.add_handler(http_handler)
opener.open(request_url)
is_auth = pwd_manager.is_authenticated(request_url)
self.assertTrue(is_auth)
http_handler = MockHTTPHandlerCheckAuth(200)
self.assertFalse(http_handler.has_auth_header)
opener = OpenerDirector()
opener.add_handler(auth_prior_handler)
opener.add_handler(http_handler)
# After getting 200 from MockHTTPHandler
# Next request sends header in the first request
opener.open(request_url)
# expect request to be sent with auth header
self.assertTrue(http_handler.has_auth_header)
def test_http_closed(self):
"""Test the connection is cleaned up when the response is closed"""
for (transfer, data) in (
("Connection: close", b"data"),
("Transfer-Encoding: chunked", b"4\r\ndata\r\n0\r\n\r\n"),
("Content-Length: 4", b"data"),
):
header = "HTTP/1.1 200 OK\r\n{}\r\n\r\n".format(transfer)
conn = test_urllib.fakehttp(header.encode() + data)
handler = urllib.request.AbstractHTTPHandler()
req = Request("http://dummy/")
req.timeout = None
with handler.do_open(conn, req) as resp:
resp.read()
self.assertTrue(conn.fakesock.closed,
"Connection not closed with {!r}".format(transfer))
def test_invalid_closed(self):
"""Test the connection is cleaned up after an invalid response"""
conn = test_urllib.fakehttp(b"")
handler = urllib.request.AbstractHTTPHandler()
req = Request("http://dummy/")
req.timeout = None
with self.assertRaises(http.client.BadStatusLine):
handler.do_open(conn, req)
self.assertTrue(conn.fakesock.closed, "Connection not closed")
class MiscTests(unittest.TestCase):
def opener_has_handler(self, opener, handler_class):
self.assertTrue(any(h.__class__ == handler_class
for h in opener.handlers))
def test_build_opener(self):
class MyHTTPHandler(urllib.request.HTTPHandler):
pass
class FooHandler(urllib.request.BaseHandler):
def foo_open(self):
pass
class BarHandler(urllib.request.BaseHandler):
def bar_open(self):
pass
build_opener = urllib.request.build_opener
o = build_opener(FooHandler, BarHandler)
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# can take a mix of classes and instances
o = build_opener(FooHandler, BarHandler())
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# subclasses of default handlers override default handlers
o = build_opener(MyHTTPHandler)
self.opener_has_handler(o, MyHTTPHandler)
# a particular case of overriding: default handlers can be passed
# in explicitly
o = build_opener()
self.opener_has_handler(o, urllib.request.HTTPHandler)
o = build_opener(urllib.request.HTTPHandler)
self.opener_has_handler(o, urllib.request.HTTPHandler)
o = build_opener(urllib.request.HTTPHandler())
self.opener_has_handler(o, urllib.request.HTTPHandler)
# Issue2670: multiple handlers sharing the same base class
class MyOtherHTTPHandler(urllib.request.HTTPHandler):
pass
o = build_opener(MyHTTPHandler, MyOtherHTTPHandler)
self.opener_has_handler(o, MyHTTPHandler)
self.opener_has_handler(o, MyOtherHTTPHandler)
@unittest.skipUnless(support.is_resource_enabled('network'),
'test requires network access')
def test_issue16464(self):
with support.transient_internet("http://www.example.com/"):
opener = urllib.request.build_opener()
request = urllib.request.Request("http://www.example.com/")
self.assertEqual(None, request.data)
opener.open(request, "1".encode("us-ascii"))
self.assertEqual(b"1", request.data)
self.assertEqual("1", request.get_header("Content-length"))
opener.open(request, "1234567890".encode("us-ascii"))
self.assertEqual(b"1234567890", request.data)
self.assertEqual("10", request.get_header("Content-length"))
def test_HTTPError_interface(self):
"""
Issue 13211 reveals that HTTPError didn't implement the URLError
interface even though HTTPError is a subclass of URLError.
"""
msg = 'something bad happened'
url = code = fp = None
hdrs = 'Content-Length: 42'
err = urllib.error.HTTPError(url, code, msg, hdrs, fp)
self.assertTrue(hasattr(err, 'reason'))
self.assertEqual(err.reason, 'something bad happened')
self.assertTrue(hasattr(err, 'headers'))
self.assertEqual(err.headers, 'Content-Length: 42')
expected_errmsg = 'HTTP Error %s: %s' % (err.code, err.msg)
self.assertEqual(str(err), expected_errmsg)
expected_errmsg = '<HTTPError %s: %r>' % (err.code, err.msg)
self.assertEqual(repr(err), expected_errmsg)
def test_parse_proxy(self):
parse_proxy_test_cases = [
('proxy.example.com',
(None, None, None, 'proxy.example.com')),
('proxy.example.com:3128',
(None, None, None, 'proxy.example.com:3128')),
('proxy.example.com', (None, None, None, 'proxy.example.com')),
('proxy.example.com:3128',
(None, None, None, 'proxy.example.com:3128')),
# The authority component may optionally include userinfo
# (assumed to be # username:password):
('joe:password@proxy.example.com',
(None, 'joe', 'password', 'proxy.example.com')),
('joe:password@proxy.example.com:3128',
(None, 'joe', 'password', 'proxy.example.com:3128')),
#Examples with URLS
('http://proxy.example.com/',
('http', None, None, 'proxy.example.com')),
('http://proxy.example.com:3128/',
('http', None, None, 'proxy.example.com:3128')),
('http://joe:password@proxy.example.com/',
('http', 'joe', 'password', 'proxy.example.com')),
('http://joe:password@proxy.example.com:3128',
('http', 'joe', 'password', 'proxy.example.com:3128')),
# Everything after the authority is ignored
('ftp://joe:password@proxy.example.com/rubbish:3128',
('ftp', 'joe', 'password', 'proxy.example.com')),
# Test for no trailing '/' case
('http://joe:password@proxy.example.com',
('http', 'joe', 'password', 'proxy.example.com'))
]
for tc, expected in parse_proxy_test_cases:
self.assertEqual(_parse_proxy(tc), expected)
self.assertRaises(ValueError, _parse_proxy, 'file:/ftp.example.com'),
class RequestTests(unittest.TestCase):
class PutRequest(Request):
method = 'PUT'
def setUp(self):
self.get = Request("http://www.python.org/~jeremy/")
self.post = Request("http://www.python.org/~jeremy/",
"data",
headers={"X-Test": "test"})
self.head = Request("http://www.python.org/~jeremy/", method='HEAD')
self.put = self.PutRequest("http://www.python.org/~jeremy/")
self.force_post = self.PutRequest("http://www.python.org/~jeremy/",
method="POST")
def test_method(self):
self.assertEqual("POST", self.post.get_method())
self.assertEqual("GET", self.get.get_method())
self.assertEqual("HEAD", self.head.get_method())
self.assertEqual("PUT", self.put.get_method())
self.assertEqual("POST", self.force_post.get_method())
def test_data(self):
self.assertFalse(self.get.data)
self.assertEqual("GET", self.get.get_method())
self.get.data = "spam"
self.assertTrue(self.get.data)
self.assertEqual("POST", self.get.get_method())
# issue 16464
# if we change data we need to remove content-length header
# (cause it's most probably calculated for previous value)
def test_setting_data_should_remove_content_length(self):
self.assertNotIn("Content-length", self.get.unredirected_hdrs)
self.get.add_unredirected_header("Content-length", 42)
self.assertEqual(42, self.get.unredirected_hdrs["Content-length"])
self.get.data = "spam"
self.assertNotIn("Content-length", self.get.unredirected_hdrs)
# issue 17485 same for deleting data.
def test_deleting_data_should_remove_content_length(self):
self.assertNotIn("Content-length", self.get.unredirected_hdrs)
self.get.data = 'foo'
self.get.add_unredirected_header("Content-length", 3)
self.assertEqual(3, self.get.unredirected_hdrs["Content-length"])
del self.get.data
self.assertNotIn("Content-length", self.get.unredirected_hdrs)
def test_get_full_url(self):
self.assertEqual("http://www.python.org/~jeremy/",
self.get.get_full_url())
def test_selector(self):
self.assertEqual("/~jeremy/", self.get.selector)
req = Request("http://www.python.org/")
self.assertEqual("/", req.selector)
def test_get_type(self):
self.assertEqual("http", self.get.type)
def test_get_host(self):
self.assertEqual("www.python.org", self.get.host)
def test_get_host_unquote(self):
req = Request("http://www.%70ython.org/")
self.assertEqual("www.python.org", req.host)
def test_proxy(self):
self.assertFalse(self.get.has_proxy())
self.get.set_proxy("www.perl.org", "http")
self.assertTrue(self.get.has_proxy())
self.assertEqual("www.python.org", self.get.origin_req_host)
self.assertEqual("www.perl.org", self.get.host)
def test_wrapped_url(self):
req = Request("<URL:http://www.python.org>")
self.assertEqual("www.python.org", req.host)
def test_url_fragment(self):
req = Request("http://www.python.org/?qs=query#fragment=true")
self.assertEqual("/?qs=query", req.selector)
req = Request("http://www.python.org/#fun=true")
self.assertEqual("/", req.selector)
# Issue 11703: geturl() omits fragment in the original URL.
url = 'http://docs.python.org/library/urllib2.html#OK'
req = Request(url)
self.assertEqual(req.get_full_url(), url)
def test_url_fullurl_get_full_url(self):
urls = ['http://docs.python.org',
'http://docs.python.org/library/urllib2.html#OK',
'http://www.python.org/?qs=query#fragment=true']
for url in urls:
req = Request(url)
self.assertEqual(req.get_full_url(), req.full_url)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "73bc1a220d4d2731d3fd19999b0e6776",
"timestamp": "",
"source": "github",
"line_count": 1782,
"max_line_length": 83,
"avg_line_length": 39.67732884399551,
"alnum_prop": 0.5706102821582633,
"repo_name": "ms-iot/python",
"id": "d1423ccc486e1c50bb135ab87c724597ae6fec3d",
"size": "70705",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cpython/Lib/test/test_urllib2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "481852"
},
{
"name": "Batchfile",
"bytes": "35616"
},
{
"name": "C",
"bytes": "15555469"
},
{
"name": "C#",
"bytes": "1231"
},
{
"name": "C++",
"bytes": "726292"
},
{
"name": "CSS",
"bytes": "2839"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "HTML",
"bytes": "130698"
},
{
"name": "JavaScript",
"bytes": "10616"
},
{
"name": "M4",
"bytes": "223087"
},
{
"name": "Makefile",
"bytes": "197108"
},
{
"name": "Objective-C",
"bytes": "2098686"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PostScript",
"bytes": "13803"
},
{
"name": "PowerShell",
"bytes": "1372"
},
{
"name": "Python",
"bytes": "24948876"
},
{
"name": "Roff",
"bytes": "254942"
},
{
"name": "Shell",
"bytes": "437386"
},
{
"name": "TeX",
"bytes": "323102"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import pytest
@pytest.fixture(scope="function")
def workspace():
from structurizr import Workspace
return Workspace("Name", "Description")
|
{
"content_hash": "29d47d6986ad5db7926ceb724a4e93c6",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 43,
"avg_line_length": 18.75,
"alnum_prop": 0.7333333333333333,
"repo_name": "sixty-north/structurizr-python",
"id": "1bb1db2b82bbde807aed282851b11d8ddc7f082a",
"size": "150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/core/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "33222"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'VKAuction.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
{
"content_hash": "4a3e6d5e5ca69a5152db4b5ae528ce97",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 54,
"avg_line_length": 25,
"alnum_prop": 0.6533333333333333,
"repo_name": "immzz/VKAuction",
"id": "288d07c97f68ce287734e934b4db9008403c4153",
"size": "300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "VKAuction/VKAuction/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2926"
}
],
"symlink_target": ""
}
|
import socket
import logging
class Run:
def __init__(self, sshClient):
self._sshClient = sshClient
self._logger = logging.getLogger('ssh')
def script(self, bashScript, outputTimeout=20 * 60):
self._logger.debug("Running bash script:\n\n%(bashScript)s\n", dict(bashScript=bashScript))
command = "\n".join([
"sh 2>&1 << 'RACKATTACK_SSH_RUN_SCRIPT_EOF'",
bashScript,
"RACKATTACK_SSH_RUN_SCRIPT_EOF\n"])
transport = self._sshClient.get_transport()
chan = transport.open_session()
try:
chan.exec_command(command)
chan.settimeout(outputTimeout)
stdin = chan.makefile('wb', -1)
stdout = chan.makefile('rb', -1)
stderr = chan.makefile_stderr('rb', -1)
stdin.close()
outputArray = []
try:
while True:
segment = stdout.read(4 * 1024)
if segment == "":
break
outputArray.append(segment)
except socket.timeout:
output = "".join(outputArray)
e = socket.timeout(
"Timeout running '%s', no input for timeout of '%s'. Partial output was\n:%s" % (
bashScript, outputTimeout, output))
e.output = output
raise e
output = "".join(outputArray)
status = chan.recv_exit_status()
stderr.read()
stdout.close()
stderr.close()
self._logger.debug("Bash script output:\n\n%(output)s\n", dict(output=output))
if status != 0:
e = Exception("Failed running '%s', status '%s', output was:\n%s" % (
bashScript, status, output))
e.output = output
raise e
return output
finally:
chan.close()
def backgroundScript(self, bashScript):
command = "\n".join([
"nohup sh << 'RACKATTACK_SSH_RUN_SCRIPT_EOF' >& /dev/null &",
bashScript,
"RACKATTACK_SSH_RUN_SCRIPT_EOF\n"])
transport = self._sshClient.get_transport()
chan = transport.open_session()
try:
chan.exec_command(command)
status = chan.recv_exit_status()
if status != 0:
raise Exception("Failed running '%s', status '%s'" % (bashScript, status))
finally:
chan.close()
|
{
"content_hash": "9a3818248f4dc7cd76e450422b43fcd6",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 101,
"avg_line_length": 37.76119402985075,
"alnum_prop": 0.5035573122529644,
"repo_name": "noam-stratoscale/rackattack-api",
"id": "83b1dc46445e4951a06c68dc91a9b2de422be0ea",
"size": "2530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/rackattack/ssh/run.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "680"
},
{
"name": "Python",
"bytes": "62464"
}
],
"symlink_target": ""
}
|
from pyjamas.ui.Sink import Sink, SinkInfo
from pyjamas.ui.ListBox import ListBox
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui import HasAlignment
from pyjamas.ui.Label import Label
from pyjamas.ui.Widget import Widget
class Lists(Sink):
def __init__(self):
Sink.__init__(self)
self.sStrings=[["foo0", "bar0", "baz0", "toto0", "tintin0"],
["foo1", "bar1", "baz1", "toto1", "tintin1"],
["foo2", "bar2", "baz2", "toto2", "tintin2"],
["foo3", "bar3", "baz3", "toto3", "tintin3"],
["foo4", "bar4", "baz4", "toto4", "tintin4"]]
self.combo=ListBox(VisibleItemCount=1)
self.list=ListBox(MultipleSelect=True, VisibleItemCount=10)
self.echo=Label()
self.combo.addChangeListener(self)
for i in range(len(self.sStrings)):
txt = "List %d" % i
self.combo.addItem(txt)
# test setItemText
self.combo.setItemText(i, txt + " using set text")
self.combo.setSelectedIndex(0)
self.fillList(0)
self.list.setItemSelected(0, False)
self.list.setItemSelected(1, True)
self.list.addChangeListener(self)
horz = HorizontalPanel(VerticalAlignment=HasAlignment.ALIGN_TOP,
Spacing=8)
horz.add(self.combo)
horz.add(self.list)
panel = VerticalPanel(HorizontalAlignment=HasAlignment.ALIGN_LEFT)
panel.add(horz)
panel.add(self.echo)
self.initWidget(panel)
self.echoSelection()
def onChange(self, sender):
if sender == self.combo:
self.fillList(self.combo.getSelectedIndex())
elif sender == self.list:
self.echoSelection()
def onShow(self):
pass
def fillList(self, idx):
self.list.clear()
strings = self.sStrings[idx]
for i in range(len(strings)):
self.list.addItem(strings[i])
self.echoSelection()
def echoSelection(self):
msg = "Selected items: "
for i in range(self.list.getItemCount()):
if self.list.isItemSelected(i):
msg += self.list.getItemText(i) + " "
self.echo.setText(msg)
def init():
text="Here is the ListBox widget in its two major forms."
return SinkInfo("Lists", text, Lists)
|
{
"content_hash": "014ef98a1602f377d56370fe684eef79",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 74,
"avg_line_length": 31.2987012987013,
"alnum_prop": 0.6012448132780083,
"repo_name": "Hasimir/pyjs",
"id": "e1bd23dbbe5c11befc291c0841d73799a3760cce",
"size": "2410",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "examples/kitchensink/sink/Lists.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4640"
},
{
"name": "Groff",
"bytes": "6633"
},
{
"name": "HTML",
"bytes": "10106"
},
{
"name": "JavaScript",
"bytes": "63385"
},
{
"name": "Makefile",
"bytes": "453"
},
{
"name": "Python",
"bytes": "5515375"
},
{
"name": "Shell",
"bytes": "4264"
}
],
"symlink_target": ""
}
|
"""
Base settings file for development.
"""
import os
from webassets import Bundle
DEBUG = True
PROJECT_DIR = os.path.normpath(
os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"..",
)
)
# Common site name and domain to use available in templates
SITE_NAME = "{{ cookiecutter.project_name }}"
SITE_DOMAIN = "localhost"
# Sources directory where the assets will be searched
SOURCES_DIR = os.path.join(PROJECT_DIR, "sources")
# Templates directory
TEMPLATES_DIR = os.path.join(SOURCES_DIR, "templates")
# Directory where all stuff will be builded
PUBLISH_DIR = os.path.join(PROJECT_DIR, "_build", "dev")
# Path where will be moved all the static files, usually this is a directory in
# the ``PUBLISH_DIR``
STATIC_DIR = os.path.join(PROJECT_DIR, PUBLISH_DIR, "static")
# Path to the i18n messages catalog directory
LOCALES_DIR = os.path.join(PROJECT_DIR, "locale")
# Python path to views module which enable the page views to build
PAGES_MAP = "views"
# Locale name for default language to use for Pages
LANGUAGE_CODE = "en_US"
# A list of locale name for all available languages to manage with PO files
LANGUAGES = (LANGUAGE_CODE, "fr_FR")
# The static url to use in templates and with webassets
# This can be a full URL like http://, a relative path or an absolute path
STATIC_URL = "static/"
# Extra or custom bundles
BUNDLES = {
"modernizr_js": Bundle(
"js/modernizr.src.js", filters=None, output="js/modernizr.min.js"
),
"app_css": Bundle("css/app.css", filters=None, output="css/app.min.css"),
"app_js": Bundle("js/app.js", filters=None, output="js/app.min.js"),
}
# Sources files or directory to synchronize within the static directory
FILES_TO_SYNC = (
# Synchronize compiled CSS
"css",
# Synchronize images if any
# "images",
)
|
{
"content_hash": "3e79d6a356f8441be1eb1fda9382a954",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 79,
"avg_line_length": 30.283333333333335,
"alnum_prop": 0.7006053935057788,
"repo_name": "sveetch/Optimus",
"id": "9843a4237ed145df78464afc7305a96adef3c3c1",
"size": "1841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "optimus/starters/basic/{{cookiecutter.package_name}}/project/settings/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14380"
},
{
"name": "HTML",
"bytes": "16553"
},
{
"name": "JavaScript",
"bytes": "101904"
},
{
"name": "Makefile",
"bytes": "1564"
},
{
"name": "Python",
"bytes": "245913"
},
{
"name": "Ruby",
"bytes": "855"
},
{
"name": "Smarty",
"bytes": "8827"
}
],
"symlink_target": ""
}
|
import gevent.monkey
gevent.monkey.patch_all()
import multiprocessing
#bind = 'unix:/path/to/june.sock'
bind = '127.0.0.1:8000'
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = 'egg:gunicorn#gevent'
# you should change this
user = 'lepture'
# maybe you like error
accesslog = '-'
loglevel = 'warning'
errorlog = '-'
secure_scheme_headers = {
'X-SCHEME': 'https',
'X-FORWARDED-PROTOCOL': 'ssl',
'X-FORWARDED-PROTO': 'https',
'X-FORWARDED-SSL': 'on',
}
x_forwarded_for_header = 'X-FORWARDED-FOR'
|
{
"content_hash": "32901448ac6141ef03efa2b247df1fff",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 45,
"avg_line_length": 21.916666666666668,
"alnum_prop": 0.6711026615969582,
"repo_name": "lepture/june",
"id": "05cf75ed8d92ea3e738f152b0809c91d146ddae3",
"size": "549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "etc/gunicorn.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "62114"
},
{
"name": "Python",
"bytes": "71538"
},
{
"name": "Shell",
"bytes": "1081"
}
],
"symlink_target": ""
}
|
"""
Created on Thu Jun 23 09:45:44 2016
@author: Arturo
"""
import signal
import sys
import time
import pyupm_grove as grove
import pyupm_i2clcd as lcd
def interruptHandler(signal, frame):
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, interruptHandler)
myLcd = lcd.Jhd1313m1(0, 0x3E, 0x62)
sensortemp = grove.GroveTemp(0)
colorR = 255;
colorG = 0;
colorB = 0;
myLcd.setColor(colorR,colorG,colorB)
# Read the input and print, waiting 1/2 second between readings
while True:
valorSensor= sensortemp.value();
myLcd.setCursor(0,0)
myLcd.write('%6d'% valorSensor)
time.sleep(0.5)
del sensortemp
|
{
"content_hash": "54ffd74434b4d9934132fec525a01cab",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 63,
"avg_line_length": 21.294117647058822,
"alnum_prop": 0.6298342541436464,
"repo_name": "pasc-04/CursoGalileo",
"id": "1a5abcefcd67523991b3bb1f96b62146ee3e30a5",
"size": "749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sensortemp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6548"
}
],
"symlink_target": ""
}
|
import unittest
from conans.search.binary_html_table import RowResult, Headers, Results
class RowResultTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
data = {'id': '1234',
'outdated': True,
'extra': 'never used',
'settings': {'os': 'Windows'},
'options': {'opt.key1': 23},
'requires': ['pkg1/version:1234', 'pkg2/version@user/channel:12345']}
cls.row_result = RowResult("remote", "name/version@user/testing", data)
def test_basic(self):
self.assertEqual(self.row_result.remote, "remote")
self.assertEqual(self.row_result.reference, "name/version@user/testing")
self.assertEqual(self.row_result.recipe, "name/version@user/testing")
self.assertEqual(self.row_result.package_id, "1234")
self.assertEqual(self.row_result.outdated, True)
def test_row(self):
headers = Headers(settings=['os', 'os.api'], options=['opt.key1'], requires=True,
keys=['remote', 'reference', 'outdated', 'package_id'])
row = list(self.row_result.row(headers))
self.assertEqual(row, ['remote', 'name/version@user/testing', True, '1234', # Keys
'Windows', None, # Settings
23, # Options
'pkg1/version, pkg2/version@user/channel' # Requires
])
class HeadersTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
settings = ['build_type', 'os', 'other', 'compiler', 'compiler.version',
'compiler.libcxx', 'os.api', ]
options = ['opt.key1', 'opt2']
requires = True
keys = ['remote', 'reference', 'outdated', 'package_id']
cls.headers = Headers(settings, options, requires, keys)
def test_settings_ordering(self):
self.assertEqual(self.headers.settings, ['os', 'os.api', 'compiler', 'compiler.libcxx',
'compiler.version', 'build_type', 'other'])
def test_1row(self):
row = self.headers.row(n_rows=1)
# Order: keys, settings, options and requires
self.assertEqual(row, [
'remote', 'reference', 'outdated', 'package_id',
'os', 'os.api', 'compiler', 'compiler.libcxx', 'compiler.version', 'build_type', 'other',
'opt.key1', 'opt2',
'requires'])
def test_2row(self):
row = self.headers.row(n_rows=2)
self.assertEqual(row, [
# Keys
('remote', ['']), ('reference', ['']), ('outdated', ['']), ('package_id', ['']),
# Settings
('os', ['', 'api']), ('compiler', ['', 'libcxx', 'version']), ('build_type', ['']),
('other', ['']),
# Options
('options', ['opt.key1', 'opt2']),
# Requires
('requires', [''])
])
class ResultsTestCase(unittest.TestCase):
def test_gather_data(self):
# Data returned by the API protocol
json = [
{
'remote': 'remote1',
'items': [{
'recipe': {'id': 'name/version@user/channel'},
'packages': [
{
'settings': {'os': 'Windows', 'os.api': 23},
'options': {'opt.key1': 'option_value'},
'requires': []
},
{
'settings': {'os': 'Windows', 'compiler': 'VS'},
'options': {},
'requires': ['pkgA/vv:1234', 'pkgB/vv@user/testing:12345']
}
]
}]
},
{
'remote': 'remote2',
'items': [{'packages': []}]
}
]
results = Results(json)
self.assertListEqual(sorted(results.settings), sorted(['os.api', 'os', 'compiler']))
self.assertListEqual(results.options, ['opt.key1'])
self.assertEqual(results.requires, True)
self.assertListEqual(sorted(results.remotes), sorted(['remote1', 'remote2']))
|
{
"content_hash": "dba3bbb5e3f54dba031595c731553d2b",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 101,
"avg_line_length": 40.75238095238095,
"alnum_prop": 0.485627483056789,
"repo_name": "conan-io/conan",
"id": "c78f62ebd556ff9328a27529c6bf81d369a5af64",
"size": "4279",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/test/unittests/search/binary_html_table_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "264"
},
{
"name": "C++",
"bytes": "425"
},
{
"name": "CMake",
"bytes": "447"
},
{
"name": "Python",
"bytes": "8209945"
}
],
"symlink_target": ""
}
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
from telemetry import decorators
from telemetry.core import exceptions
from telemetry.core import forwarders
from telemetry.core import util
from telemetry.core.backends.chrome import chrome_browser_backend
from telemetry.core.backends.chrome import misc_web_contents_backend
class CrOSBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
def __init__(self, cros_platform_backend, browser_options, cri, is_guest,
extensions_to_load):
super(CrOSBrowserBackend, self).__init__(
cros_platform_backend, supports_tab_control=True,
supports_extensions=not is_guest,
browser_options=browser_options,
output_profile_path=None, extensions_to_load=extensions_to_load)
assert browser_options.IsCrosBrowserOptions()
# Initialize fields so that an explosion during init doesn't break in Close.
self._cri = cri
self._is_guest = is_guest
self._forwarder = None
self.wpr_port_pairs = forwarders.PortPairs(
http=forwarders.PortPair(self.wpr_port_pairs.http.local_port,
self._platform_backend.GetRemotePort(
self.wpr_port_pairs.http.local_port)),
https=forwarders.PortPair(self.wpr_port_pairs.https.local_port,
self._platform_backend.GetRemotePort(
self.wpr_port_pairs.http.local_port)),
dns=None)
self._remote_debugging_port = self._cri.GetRemotePort()
self._port = self._remote_debugging_port
# Copy extensions to temp directories on the device.
# Note that we also perform this copy locally to ensure that
# the owner of the extensions is set to chronos.
for e in extensions_to_load:
extension_dir = cri.RunCmdOnDevice(
['mktemp', '-d', '/tmp/extension_XXXXX'])[0].rstrip()
e.local_path = os.path.join(extension_dir, os.path.basename(e.path))
cri.PushFile(e.path, extension_dir)
cri.Chown(extension_dir)
self._cri.RestartUI(self.browser_options.clear_enterprise_policy)
util.WaitFor(self.IsBrowserRunning, 20)
# Delete test user's cryptohome vault (user data directory).
if not self.browser_options.dont_override_profile:
self._cri.RunCmdOnDevice(['cryptohome', '--action=remove', '--force',
'--user=%s' % self._username])
if self.browser_options.profile_dir:
cri.RmRF(self.profile_directory)
cri.PushFile(self.browser_options.profile_dir + '/Default',
self.profile_directory)
cri.Chown(self.profile_directory)
def GetBrowserStartupArgs(self):
args = super(CrOSBrowserBackend, self).GetBrowserStartupArgs()
args.extend([
'--enable-smooth-scrolling',
'--enable-threaded-compositing',
'--enable-per-tile-painting',
# Allow devtools to connect to chrome.
'--remote-debugging-port=%i' % self._remote_debugging_port,
# Open a maximized window.
'--start-maximized',
# Skip user image selection screen, and post login screens.
'--oobe-skip-postlogin',
# Debug logging.
'--vmodule=*/chromeos/net/*=2,*/chromeos/login/*=2'])
# Disable GAIA services unless we're using GAIA login, or if there's an
# explicit request for it.
if (self.browser_options.disable_gaia_services and
not self.browser_options.gaia_login):
args.append('--disable-gaia-services')
return args
@property
def pid(self):
return self._cri.GetChromePid()
@property
def browser_directory(self):
result = self._cri.GetChromeProcess()
if result and 'path' in result:
return os.path.dirname(result['path'])
return None
@property
def profile_directory(self):
return '/home/chronos/Default'
def __del__(self):
self.Close()
def Start(self):
# Escape all commas in the startup arguments we pass to Chrome
# because dbus-send delimits array elements by commas
startup_args = [a.replace(',', '\\,') for a in self.GetBrowserStartupArgs()]
# Restart Chrome with the login extension and remote debugging.
logging.info('Restarting Chrome with flags and login')
args = ['dbus-send', '--system', '--type=method_call',
'--dest=org.chromium.SessionManager',
'/org/chromium/SessionManager',
'org.chromium.SessionManagerInterface.EnableChromeTesting',
'boolean:true',
'array:string:"%s"' % ','.join(startup_args)]
self._cri.RunCmdOnDevice(args)
if not self._cri.local:
self._port = util.GetUnreservedAvailableLocalPort()
self._forwarder = self._platform_backend.forwarder_factory.Create(
forwarders.PortPairs(
http=forwarders.PortPair(self._port, self._remote_debugging_port),
https=None,
dns=None), use_remote_port_forwarding=False)
# Wait for oobe.
self._WaitForBrowserToComeUp()
self._InitDevtoolsClientBackend(
remote_devtools_port=self._remote_debugging_port)
util.WaitFor(lambda: self.oobe_exists, 10)
if self.browser_options.auto_login:
try:
if self._is_guest:
pid = self.pid
self.oobe.NavigateGuestLogin()
# Guest browsing shuts down the current browser and launches an
# incognito browser in a separate process, which we need to wait for.
util.WaitFor(lambda: pid != self.pid, 10)
elif self.browser_options.gaia_login:
self.oobe.NavigateGaiaLogin(self._username, self._password)
else:
self.oobe.NavigateFakeLogin(self._username, self._password)
self._WaitForLogin()
except util.TimeoutException:
self._cri.TakeScreenShot('login-screen')
raise exceptions.LoginException('Timed out going through login screen')
logging.info('Browser is up!')
def Close(self):
super(CrOSBrowserBackend, self).Close()
if self._cri:
self._cri.RestartUI(False) # Logs out.
self._cri.CloseConnection()
util.WaitFor(lambda: not self._IsCryptohomeMounted(), 180)
if self._forwarder:
self._forwarder.Close()
self._forwarder = None
if self._cri:
for e in self._extensions_to_load:
self._cri.RmRF(os.path.dirname(e.local_path))
self._cri = None
def IsBrowserRunning(self):
return bool(self.pid)
def GetStandardOutput(self):
return 'Cannot get standard output on CrOS'
def GetStackTrace(self):
return 'Cannot get stack trace on CrOS'
@property
@decorators.Cache
def misc_web_contents_backend(self):
"""Access to chrome://oobe/login page."""
return misc_web_contents_backend.MiscWebContentsBackend(self)
@property
def oobe(self):
return self.misc_web_contents_backend.GetOobe()
@property
def oobe_exists(self):
return self.misc_web_contents_backend.oobe_exists
@property
def _username(self):
return self.browser_options.username
@property
def _password(self):
return self.browser_options.password
def _IsCryptohomeMounted(self):
username = '$guest' if self._is_guest else self._username
return self._cri.IsCryptohomeMounted(username, self._is_guest)
def _IsLoggedIn(self):
"""Returns True if cryptohome has mounted, the browser is
responsive to devtools requests, and the oobe has been dismissed."""
return (self._IsCryptohomeMounted() and
self.HasBrowserFinishedLaunching() and
not self.oobe_exists)
def _WaitForLogin(self):
# Wait for cryptohome to mount.
util.WaitFor(self._IsLoggedIn, 60)
# For incognito mode, the session manager actually relaunches chrome with
# new arguments, so we have to wait for the browser to come up.
self._WaitForBrowserToComeUp()
# Wait for extensions to load.
if self._supports_extensions:
self._WaitForExtensionsToLoad()
|
{
"content_hash": "a345a64c1240909b1a6dcf3d069edeb8",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 80,
"avg_line_length": 36.47085201793722,
"alnum_prop": 0.6648223287839665,
"repo_name": "CTSRD-SOAAP/chromium-42.0.2311.135",
"id": "a5c14c33cba8e4ffcfd2c59486c89e7924c9eb5f",
"size": "8133",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/core/backends/chrome/cros_browser_backend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "8402"
},
{
"name": "Assembly",
"bytes": "241154"
},
{
"name": "C",
"bytes": "12370053"
},
{
"name": "C++",
"bytes": "266788423"
},
{
"name": "CMake",
"bytes": "27829"
},
{
"name": "CSS",
"bytes": "813488"
},
{
"name": "Emacs Lisp",
"bytes": "2360"
},
{
"name": "Go",
"bytes": "13628"
},
{
"name": "Groff",
"bytes": "5283"
},
{
"name": "HTML",
"bytes": "20131029"
},
{
"name": "Java",
"bytes": "8495790"
},
{
"name": "JavaScript",
"bytes": "12980966"
},
{
"name": "LLVM",
"bytes": "1169"
},
{
"name": "Logos",
"bytes": "6893"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "208709"
},
{
"name": "Objective-C",
"bytes": "1509363"
},
{
"name": "Objective-C++",
"bytes": "7960581"
},
{
"name": "PLpgSQL",
"bytes": "215882"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "432373"
},
{
"name": "Python",
"bytes": "11147426"
},
{
"name": "Ragel in Ruby Host",
"bytes": "104923"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "1207731"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseForbidden, JsonResponse, HttpResponse, HttpResponseBadRequest
from django.shortcuts import render, get_object_or_404
from django.template import loader
from django.template.context_processors import csrf
from django.urls import reverse
from herders.decorators import username_case_redirect
from herders.filters import ArtifactInstanceFilter
from herders.forms import FilterArtifactForm, ArtifactInstanceForm, AssignArtifactForm
from herders.models import Summoner, MonsterInstance, ArtifactInstance, ArtifactCraftInstance
@username_case_redirect
def artifacts(request, profile_name):
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return render(request, 'herders/profile/not_found.html')
is_owner = (request.user.is_authenticated and summoner.user == request.user)
filter_form = FilterArtifactForm(auto_id="filter_id_%s")
filter_form.helper.form_action = reverse('herders:artifact_inventory', kwargs={'profile_name': profile_name})
context = {
'view': 'artifacts',
'profile_name': profile_name,
'summoner': summoner,
'is_owner': is_owner,
'filter_form': filter_form,
}
if is_owner or summoner.public:
return render(request, 'herders/profile/artifacts/base.html', context)
else:
return render(request, 'herders/profile/not_public.html', context)
@username_case_redirect
def inventory(request, profile_name, box_grouping=None):
# If we passed in view mode or sort method, set the session variable and redirect back to base profile URL
if box_grouping:
request.session['artifact_inventory_box_method'] = box_grouping.lower()
if request.session.modified:
return HttpResponse("Artifact view mode cookie set")
box_grouping = request.session.get('artifact_inventory_box_method', 'slot').lower()
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return HttpResponseBadRequest()
is_owner = (request.user.is_authenticated and summoner.user == request.user)
artifact_queryset = ArtifactInstance.objects.filter(
owner=summoner
).select_related(
'assigned_to', 'assigned_to__monster'
).order_by('-quality', '-level')
total_count = artifact_queryset.count()
form = FilterArtifactForm(request.GET or None)
if form.is_valid():
artifact_filter = ArtifactInstanceFilter(form.cleaned_data, queryset=artifact_queryset)
else:
artifact_filter = ArtifactInstanceFilter(None, queryset=artifact_queryset)
filtered_count = artifact_filter.qs.count()
context = {
'artifacts': artifact_filter.qs,
'total_count': total_count,
'filtered_count': filtered_count,
'profile_name': profile_name,
'summoner': summoner,
'is_owner': is_owner,
}
if is_owner or summoner.public:
artifact_box = []
if box_grouping == 'slot':
# Element + archetype
for slot_val, slot_desc in ArtifactInstance.NORMAL_ELEMENT_CHOICES:
artifact_box.append({
'name': slot_desc,
'artifacts': artifact_filter.qs.filter(element=slot_val).order_by('main_stat', '-level')
})
for slot_val, slot_desc in ArtifactInstance.ARCHETYPE_CHOICES:
artifact_box.append({
'name': slot_desc,
'artifacts': artifact_filter.qs.filter(archetype=slot_val).order_by('main_stat', '-level')
})
elif box_grouping == 'quality':
for qual_val, qual_desc in reversed(ArtifactInstance.QUALITY_CHOICES):
artifact_box.append({
'name': qual_desc,
'artifacts': artifact_filter.qs.filter(quality=qual_val)
})
elif box_grouping == 'orig. quality':
for qual_val, qual_desc in reversed(ArtifactInstance.QUALITY_CHOICES):
artifact_box.append({
'name': qual_desc,
'artifacts': artifact_filter.qs.filter(original_quality=qual_val)
})
elif box_grouping == 'equipped':
artifact_box.append({
'name': 'Not Equipped',
'artifacts': artifact_filter.qs.filter(assigned_to__isnull=True)
})
# Create a dictionary of monster PKs and their equipped runes
monsters = OrderedDict()
unassigned_runes = artifact_filter.qs.filter(
assigned_to__isnull=False
).select_related(
'assigned_to',
'assigned_to__monster'
).order_by(
'assigned_to__monster__name',
'slot'
)
for rune in unassigned_runes:
if rune.assigned_to.pk not in monsters:
monsters[rune.assigned_to.pk] = {
'name': str(rune.assigned_to),
'artifacts': []
}
monsters[rune.assigned_to.pk]['artifacts'].append(rune)
for monster_runes in monsters.values():
artifact_box.append(monster_runes)
context['artifacts'] = artifact_box
context['box_grouping'] = box_grouping
return render(request, 'herders/profile/artifacts/inventory.html', context)
else:
return render(request, 'herders/profile/not_public.html', context)
@username_case_redirect
@login_required
def add(request, profile_name):
form = ArtifactInstanceForm(request.POST or None)
form.helper.form_action = reverse('herders:artifact_add', kwargs={'profile_name': profile_name})
template = loader.get_template('herders/profile/artifacts/form.html')
if request.method == 'POST':
if form.is_valid():
# Create the rune instance
new_artifact = form.save(commit=False)
new_artifact.owner = request.user.summoner
new_artifact.save()
if new_artifact.assigned_to:
new_artifact.assigned_to.default_build.assign_artifact(new_artifact)
messages.success(request, 'Added ' + str(new_artifact))
# Send back blank form
form = ArtifactInstanceForm()
form.helper.form_action = reverse('herders:artifact_add', kwargs={'profile_name': profile_name})
context = {'form': form}
context.update(csrf(request))
response_data = {
'code': 'success',
'html': template.render(context)
}
else:
context = {'form': form}
context.update(csrf(request))
response_data = {
'code': 'error',
'html': template.render(context)
}
else:
# Check for any pre-filled GET parameters
element = request.GET.get('element', None)
archetype = request.GET.get('archetype', None)
specific_slot = request.GET.get('slot', None)
if specific_slot:
if specific_slot in [v[0] for v in ArtifactInstance.NORMAL_ELEMENT_CHOICES]:
slot = ArtifactInstance.SLOT_ELEMENTAL
element = specific_slot
archetype = None
elif specific_slot in [v[0] for v in ArtifactInstance.ARCHETYPE_CHOICES]:
slot = ArtifactInstance.SLOT_ARCHETYPE
archetype = specific_slot
element = None
else:
if element is not None:
slot = ArtifactInstance.SLOT_ELEMENTAL
elif archetype is not None:
slot = ArtifactInstance.SLOT_ARCHETYPE
else:
slot = None
assigned_to = request.GET.get('assigned_to', None)
try:
assigned_monster = MonsterInstance.objects.get(owner=request.user.summoner, pk=assigned_to)
except MonsterInstance.DoesNotExist:
assigned_monster = None
form = ArtifactInstanceForm(initial={
'assigned_to': assigned_monster,
'slot': slot,
'element': element,
'archetype': archetype,
})
form.helper.form_action = reverse('herders:artifact_add', kwargs={'profile_name': profile_name})
# Return form filled in and errors shown
context = {'form': form}
context.update(csrf(request))
response_data = {
'html': template.render(context)
}
return JsonResponse(response_data)
@username_case_redirect
@login_required
def edit(request, profile_name, artifact_id):
artifact = get_object_or_404(ArtifactInstance, pk=artifact_id)
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return HttpResponseBadRequest()
is_owner = (request.user.is_authenticated and summoner.user == request.user)
form = ArtifactInstanceForm(request.POST or None, instance=artifact)
form.helper.form_action = reverse('herders:artifact_edit', kwargs={'profile_name': profile_name, 'artifact_id': artifact_id})
template = loader.get_template('herders/profile/artifacts/form.html')
if is_owner:
context = {'form': form}
context.update(csrf(request))
if request.method == 'POST' and form.is_valid():
orig_assigned_to = ArtifactInstance.objects.get(pk=form.instance.pk).assigned_to
artifact = form.save()
if orig_assigned_to and artifact.assigned_to != orig_assigned_to:
orig_assigned_to.default_build.artifacts.remove(artifact)
artifact.assigned_to.default_build.assign_artifact(artifact)
elif not orig_assigned_to and artifact.assigned_to:
artifact.assigned_to.default_build.assign_artifact(artifact)
elif orig_assigned_to and artifact.assigned_to == orig_assigned_to:
artifact.assigned_to.default_build.clear_cache_properties()
artifact.assigned_to.default_build.update_stats()
artifact.assigned_to.default_build.save()
messages.success(request, 'Saved changes to ' + str(artifact))
form = ArtifactInstanceForm()
form.helper.form_action = reverse('herders:artifact_edit', kwargs={'profile_name': profile_name, 'artifact_id': artifact_id})
context = {'form': form}
context.update(csrf(request))
response_data = {
'code': 'success',
'html': template.render(context)
}
else:
context = {'form': form}
context.update(csrf(request))
# Return form filled in and errors shown
response_data = {
'code': 'error',
'html': template.render(context)
}
return JsonResponse(response_data)
else:
return HttpResponseForbidden()
@username_case_redirect
@login_required
def assign(request, profile_name, instance_id, slot=None):
qs = ArtifactInstance.objects.filter(owner=request.user.summoner)
filter_form = AssignArtifactForm(request.POST or None, initial={'slot': [slot]}, prefix='assign')
filter_form.helper.form_action = reverse('herders:artifact_assign', kwargs={'profile_name': profile_name, 'instance_id': instance_id})
if request.method == 'POST' and filter_form.is_valid():
filter = ArtifactInstanceFilter(filter_form.cleaned_data, queryset=qs)
template = loader.get_template('herders/profile/artifacts/assign_results.html')
context = {
'filter': filter.qs,
'profile_name': profile_name,
'instance_id': instance_id,
}
context.update(csrf(request))
response_data = {
'code': 'results',
'html': template.render(context)
}
else:
filter = ArtifactInstanceFilter({'slot': [slot]}, queryset=qs)
template = loader.get_template('herders/profile/artifacts/assign_form.html')
context = {
'filter': filter.qs,
'form': filter_form,
'profile_name': profile_name,
'instance_id': instance_id,
}
context.update(csrf(request))
response_data = {
'code': 'success',
'html': template.render(context)
}
return JsonResponse(response_data)
@username_case_redirect
@login_required
def assign_choice(request, profile_name, instance_id, artifact_id):
monster = get_object_or_404(MonsterInstance, pk=instance_id)
build = monster.default_build
artifact = get_object_or_404(ArtifactInstance, pk=artifact_id)
build.assign_artifact(artifact)
response_data = {
'code': 'success',
}
return JsonResponse(response_data)
@username_case_redirect
@login_required
def unassign(request, profile_name, artifact_id):
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return HttpResponseBadRequest()
is_owner = (request.user.is_authenticated and summoner.user == request.user)
if is_owner:
artifact = get_object_or_404(ArtifactInstance, pk=artifact_id)
monster = artifact.assigned_to
if monster:
monster.default_build.artifacts.remove(artifact)
response_data = {
'code': 'success',
}
return JsonResponse(response_data)
else:
return HttpResponseForbidden()
@username_case_redirect
@login_required
def delete(request, profile_name, artifact_id):
try:
summoner = Summoner.objects.select_related('user').get(user__username=profile_name)
except Summoner.DoesNotExist:
return HttpResponseBadRequest()
is_owner = (request.user.is_authenticated and summoner.user == request.user)
if is_owner:
artifact = get_object_or_404(ArtifactInstance, pk=artifact_id)
mon = artifact.assigned_to
if mon:
mon.default_build.artifacts.remove(artifact)
artifact.delete()
messages.warning(request, 'Deleted ' + str(artifact))
if mon:
mon.save()
response_data = {
'code': 'success',
}
return JsonResponse(response_data)
else:
return HttpResponseForbidden()
|
{
"content_hash": "1b47f53681646fb84ae7bfae01b39a98",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 138,
"avg_line_length": 36.669154228855724,
"alnum_prop": 0.6173936639305339,
"repo_name": "porksmash/swarfarm",
"id": "fcec86e791645f78ab32ed415280de9107414e12",
"size": "14741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "herders/views/artifacts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "29358"
},
{
"name": "HTML",
"bytes": "349774"
},
{
"name": "JavaScript",
"bytes": "80827"
},
{
"name": "Python",
"bytes": "932930"
},
{
"name": "Shell",
"bytes": "3018"
}
],
"symlink_target": ""
}
|
from .elements import element_line, element_rect, element_text, element_blank
from .theme import theme
from .theme_gray import theme_gray
class theme_linedraw(theme_gray):
"""
A theme with only black lines of various widths on white backgrounds
Parameters
----------
base_size : int, optional
Base font size. All text sizes are a scaled versions of
the base font size. Default is 11.
base_family : str, optional
Base font family.
"""
def __init__(self, base_size=11, base_family=None):
theme_gray.__init__(self, base_size, base_family)
self += theme(
axis_text=element_text(color='black', size=base_size*0.8),
axis_ticks=element_line(color='black', size=0.5),
axis_ticks_minor=element_blank(),
legend_key=element_rect(color='black', size=0.5),
panel_background=element_rect(fill='white'),
panel_border=element_rect(fill='None', color='black', size=1),
panel_grid_major=element_line(color='black', size=0.1),
panel_grid_minor=element_line(color='black', size=0.02),
strip_background=element_rect(fill='black', color='black', size=1),
strip_text_x=element_text(color='white'),
strip_text_y=element_text(color='white', angle=-90)
)
|
{
"content_hash": "3b02647a5c35c44fda45a4f12db86986",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 79,
"avg_line_length": 40.75757575757576,
"alnum_prop": 0.6178438661710037,
"repo_name": "has2k1/plotnine",
"id": "d560bd5f40b865ea48daad20c5e4ec3e8da14803",
"size": "1345",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "plotnine/themes/theme_linedraw.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1729"
},
{
"name": "Python",
"bytes": "991031"
},
{
"name": "Shell",
"bytes": "2997"
}
],
"symlink_target": ""
}
|
import os
from twitter.pants.targets.exportable_jvm_library import ExportableJvmLibrary
class JavaProtobufLibrary(ExportableJvmLibrary):
"""Defines a target that builds java stubs from a protobuf IDL file."""
def __init__(self,
name,
sources,
provides = None,
dependencies = None,
excludes = None,
buildflags = None,
is_meta = False):
"""name: The name of this module target, addressable via pants via the portion of the spec
following the colon
sources: A list of paths containing the protobuf source files this modules jar is compiled from
provides: An optional Dependency object indicating the The ivy artifact to export
dependencies: An optional list of Dependency objects specifying the binary (jar) dependencies of
this module.
excludes: An optional list of dependency exclude patterns to filter all of this module's
transitive dependencies against.
buildflags: A list of additional command line arguments to pass to the underlying build system
for this target"""
ExportableJvmLibrary.__init__(self,
name,
sources,
provides,
dependencies,
excludes,
buildflags,
is_meta)
self.add_label('java')
self.add_label('codegen')
def _as_jar_dependency(self):
return ExportableJvmLibrary._as_jar_dependency(self).with_sources()
def _create_template_data(self):
allsources = []
if self.sources:
allsources += list(os.path.join(self.target_base, src) for src in self.sources)
return ExportableJvmLibrary._create_template_data(self).extend(
allsources = allsources,
)
|
{
"content_hash": "057e70a1cab2900cb642cd8e433708a8",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 100,
"avg_line_length": 39.16326530612245,
"alnum_prop": 0.6039603960396039,
"repo_name": "foursquare/commons-old",
"id": "5191aeaf87adafa7e85d12914d20219041c6ce0a",
"size": "2820",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/python/twitter/pants/targets/java_protobuf_library.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "2164475"
},
{
"name": "Python",
"bytes": "1285839"
},
{
"name": "Scala",
"bytes": "24999"
},
{
"name": "Shell",
"bytes": "6233"
},
{
"name": "Smalltalk",
"bytes": "10614"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
from pybvc.controller.controller import Controller
from pybvc.common.status import STATUS
from pybvc.common.utils import load_dict_from_file
if __name__ == "__main__":
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
nodeIpAddr = d['nodeIpAddr']
nodePortNum = d['nodePortNum']
nodeUname = d['nodeUname']
nodePswd = d['nodePswd']
except:
print ("Failed to get Controller device attributes")
exit(0)
print "\n"
print ("<<< NETCONF nodes configured on the Controller")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
result = ctrl.get_netconf_nodes_in_config()
status = result.get_status()
if(status.eq(STATUS.OK)):
print "Nodes configured:"
nlist = result.get_data()
for item in nlist:
print " '{}'".format(item)
else:
print ("\n")
print ("!!!Failed, reason: %s" % status.brief().lower())
exit(0)
print "\n"
print ("<<< NETCONF nodes connection status on the Controller")
result = ctrl.get_netconf_nodes_conn_status()
status = result.get_status()
if(status.eq(STATUS.OK)):
print "Nodes connection status:"
nlist = result.get_data()
for item in nlist:
status = ""
if (item['connected']):
status = "connected"
else:
status = "not connected"
print " '{}' is {}".format(item['node'], status)
else:
print ("Failed, reason: %s" % status.brief().lower())
exit(0)
print "\n"
|
{
"content_hash": "18d50012cf4b0d690115db2dea2e9a3f",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 76,
"avg_line_length": 34.06930693069307,
"alnum_prop": 0.6730601569311246,
"repo_name": "tnadeau/pybvc",
"id": "1c644e5ac5950f79856e10b06af8bee53b5e73c0",
"size": "3460",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "samples/samplenetconf/cmds/show_mount.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "2452"
},
{
"name": "Python",
"bytes": "436853"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Steel'
copyright = u'2011, Marty Alchin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1a'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# The name of the language Pygments should highlight.
highlight_language = 'python3'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Steeldoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Steel.tex', u'Steel: File Formats Made Easy',
u'Marty Alchin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'steel', u'Steel: File Formats Made Easy',
[u'Marty Alchin'], 1)
]
|
{
"content_hash": "38abc60b018d1a6d958a42ff66a2fcfd",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 103,
"avg_line_length": 33.55555555555556,
"alnum_prop": 0.6847106248200403,
"repo_name": "gulopine/steel",
"id": "0556c53551de4a9e3e4f309d11bfec2585a3c50a",
"size": "7373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "103498"
}
],
"symlink_target": ""
}
|
from test import test_support
import unittest
import sys
import difflib
# A very basic example. If this fails, we're in deep trouble.
def basic():
return 1
basic.events = [(0, 'call'),
(1, 'line'),
(1, 'return')]
# Armin Rigo's failing example:
def arigo_example():
x = 1
del x
while 0:
pass
x = 1
arigo_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(5, 'line'),
(5, 'return')]
# check that lines consisting of just one instruction get traced:
def one_instr_line():
x = 1
del x
x = 1
one_instr_line.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(3, 'return')]
def no_pop_tops(): # 0
x = 1 # 1
for a in range(2): # 2
if a: # 3
x = 1 # 4
else: # 5
x = 1 # 6
no_pop_tops.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(6, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(2, 'line'),
(2, 'return')]
def no_pop_blocks():
while 0:
bla
x = 1
no_pop_blocks.events = [(0, 'call'),
(1, 'line'),
(3, 'line'),
(3, 'return')]
def called(): # line -3
x = 1
def call(): # line 0
called()
call.events = [(0, 'call'),
(1, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'return'),
(1, 'return')]
def raises():
raise Exception
def test_raise():
try:
raises()
except Exception, exc:
x = 1
test_raise.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'exception'),
(-2, 'return'),
(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
def _settrace_and_return(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
def settrace_and_return(tracefunc):
_settrace_and_return(tracefunc)
settrace_and_return.events = [(1, 'return')]
def _settrace_and_raise(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
raise RuntimeError
def settrace_and_raise(tracefunc):
try:
_settrace_and_raise(tracefunc)
except RuntimeError, exc:
pass
settrace_and_raise.events = [(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
# implicit return example
def ireturn_example():
a = 5
b = 5
if a == b:
b = a+1
else:
pass
ireturn_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
# Tight loop with while(1) example (SF #765624)
def tightloop_example():
items = range(0, 3)
try:
i = 0
while 1:
b = items[i]; i+=1
except IndexError:
pass
tightloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'exception'),
(6, 'line'),
(7, 'line'),
(7, 'return')]
def tighterloop_example():
items = range(1, 4)
try:
i = 0
while 1: i = items[i]
except IndexError:
pass
tighterloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'exception'),
(5, 'line'),
(6, 'line'),
(6, 'return')]
class Tracer:
def __init__(self):
self.events = []
def trace(self, frame, event, arg):
self.events.append((frame.f_lineno, event))
return self.trace
class TraceTestCase(unittest.TestCase):
def compare_events(self, line_offset, events, expected_events):
events = [(l - line_offset, e) for (l, e) in events]
if events != expected_events:
self.fail(
"events did not match expectation:\n" +
"\n".join(difflib.ndiff(map(str, expected_events),
map(str, events))))
def run_test(self, func):
tracer = Tracer()
sys.settrace(tracer.trace)
func()
sys.settrace(None)
self.compare_events(func.func_code.co_firstlineno,
tracer.events, func.events)
def run_test2(self, func):
tracer = Tracer()
func(tracer.trace)
sys.settrace(None)
self.compare_events(func.func_code.co_firstlineno,
tracer.events, func.events)
def test_01_basic(self):
self.run_test(basic)
def test_02_arigo(self):
self.run_test(arigo_example)
def test_03_one_instr(self):
self.run_test(one_instr_line)
def test_04_no_pop_blocks(self):
self.run_test(no_pop_blocks)
def test_05_no_pop_tops(self):
self.run_test(no_pop_tops)
def test_06_call(self):
self.run_test(call)
def test_07_raise(self):
self.run_test(test_raise)
def test_08_settrace_and_return(self):
self.run_test2(settrace_and_return)
def test_09_settrace_and_raise(self):
self.run_test2(settrace_and_raise)
def test_10_ireturn(self):
self.run_test(ireturn_example)
def test_11_tightloop(self):
self.run_test(tightloop_example)
def test_12_tighterloop(self):
self.run_test(tighterloop_example)
class RaisingTraceFuncTestCase(unittest.TestCase):
def trace(self, frame, event, arg):
"""A trace function that raises an exception in response to a
specific trace event."""
if event == self.raiseOnEvent:
raise ValueError # just something that isn't RuntimeError
else:
return self.trace
def f(self):
"""The function to trace; raises an exception if that's the case
we're testing, so that the 'exception' trace event fires."""
if self.raiseOnEvent == 'exception':
x = 0
y = 1/x
else:
return 1
def run_test_for_event(self, event):
"""Tests that an exception raised in response to the given event is
handled OK."""
self.raiseOnEvent = event
try:
for i in xrange(sys.getrecursionlimit() + 1):
sys.settrace(self.trace)
try:
self.f()
except ValueError:
pass
else:
self.fail("exception not thrown!")
except RuntimeError:
self.fail("recursion counter not reset")
# Test the handling of exceptions raised by each kind of trace event.
def test_call(self):
self.run_test_for_event('call')
def test_line(self):
self.run_test_for_event('line')
def test_return(self):
self.run_test_for_event('return')
def test_exception(self):
self.run_test_for_event('exception')
def test_trash_stack(self):
def f():
for i in range(5):
print i # line tracing will raise an exception at this line
def g(frame, why, extra):
if (why == 'line' and
frame.f_lineno == f.func_code.co_firstlineno + 2):
raise RuntimeError, "i am crashing"
return g
sys.settrace(g)
try:
f()
except RuntimeError:
# the test is really that this doesn't segfault:
import gc
gc.collect()
else:
self.fail("exception not propagated")
# 'Jump' tests: assigning to frame.f_lineno within a trace function
# moves the execution position - it's how debuggers implement a Jump
# command (aka. "Set next statement").
class JumpTracer:
"""Defines a trace function that jumps from one place to another,
with the source and destination lines of the jump being defined by
the 'jump' property of the function under test."""
def __init__(self, function):
self.function = function
self.jumpFrom = function.jump[0]
self.jumpTo = function.jump[1]
self.done = False
def trace(self, frame, event, arg):
if not self.done and frame.f_code == self.function.func_code:
firstLine = frame.f_code.co_firstlineno
if frame.f_lineno == firstLine + self.jumpFrom:
# Cope with non-integer self.jumpTo (because of
# no_jump_to_non_integers below).
try:
frame.f_lineno = firstLine + self.jumpTo
except TypeError:
frame.f_lineno = self.jumpTo
self.done = True
return self.trace
# The first set of 'jump' tests are for things that are allowed:
def jump_simple_forwards(output):
output.append(1)
output.append(2)
output.append(3)
jump_simple_forwards.jump = (1, 3)
jump_simple_forwards.output = [3]
def jump_simple_backwards(output):
output.append(1)
output.append(2)
jump_simple_backwards.jump = (2, 1)
jump_simple_backwards.output = [1, 1, 2]
def jump_out_of_block_forwards(output):
for i in 1, 2:
output.append(2)
for j in [3]: # Also tests jumping over a block
output.append(4)
output.append(5)
jump_out_of_block_forwards.jump = (3, 5)
jump_out_of_block_forwards.output = [2, 5]
def jump_out_of_block_backwards(output):
output.append(1)
for i in [1]:
output.append(3)
for j in [2]: # Also tests jumping over a block
output.append(5)
output.append(6)
output.append(7)
jump_out_of_block_backwards.jump = (6, 1)
jump_out_of_block_backwards.output = [1, 3, 5, 1, 3, 5, 6, 7]
def jump_to_codeless_line(output):
output.append(1)
# Jumping to this line should skip to the next one.
output.append(3)
jump_to_codeless_line.jump = (1, 2)
jump_to_codeless_line.output = [3]
def jump_to_same_line(output):
output.append(1)
output.append(2)
output.append(3)
jump_to_same_line.jump = (2, 2)
jump_to_same_line.output = [1, 2, 3]
# Tests jumping within a finally block, and over one.
def jump_in_nested_finally(output):
try:
output.append(2)
finally:
output.append(4)
try:
output.append(6)
finally:
output.append(8)
output.append(9)
jump_in_nested_finally.jump = (4, 9)
jump_in_nested_finally.output = [2, 9]
# The second set of 'jump' tests are for things that are not allowed:
def no_jump_too_far_forwards(output):
try:
output.append(2)
output.append(3)
except ValueError, e:
output.append('after' in str(e))
no_jump_too_far_forwards.jump = (3, 6)
no_jump_too_far_forwards.output = [2, True]
def no_jump_too_far_backwards(output):
try:
output.append(2)
output.append(3)
except ValueError, e:
output.append('before' in str(e))
no_jump_too_far_backwards.jump = (3, -1)
no_jump_too_far_backwards.output = [2, True]
# Test each kind of 'except' line.
def no_jump_to_except_1(output):
try:
output.append(2)
except:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_1.jump = (2, 3)
no_jump_to_except_1.output = [True]
def no_jump_to_except_2(output):
try:
output.append(2)
except ValueError:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_2.jump = (2, 3)
no_jump_to_except_2.output = [True]
def no_jump_to_except_3(output):
try:
output.append(2)
except ValueError, e:
output.append('except' in str(e))
no_jump_to_except_3.jump = (2, 3)
no_jump_to_except_3.output = [True]
def no_jump_to_except_4(output):
try:
output.append(2)
except (ValueError, RuntimeError), e:
output.append('except' in str(e))
no_jump_to_except_4.jump = (2, 3)
no_jump_to_except_4.output = [True]
def no_jump_forwards_into_block(output):
try:
output.append(2)
for i in 1, 2:
output.append(4)
except ValueError, e:
output.append('into' in str(e))
no_jump_forwards_into_block.jump = (2, 4)
no_jump_forwards_into_block.output = [True]
def no_jump_backwards_into_block(output):
try:
for i in 1, 2:
output.append(3)
output.append(4)
except ValueError, e:
output.append('into' in str(e))
no_jump_backwards_into_block.jump = (4, 3)
no_jump_backwards_into_block.output = [3, 3, True]
def no_jump_into_finally_block(output):
try:
try:
output.append(3)
x = 1
finally:
output.append(6)
except ValueError, e:
output.append('finally' in str(e))
no_jump_into_finally_block.jump = (4, 6)
no_jump_into_finally_block.output = [3, 6, True] # The 'finally' still runs
def no_jump_out_of_finally_block(output):
try:
try:
output.append(3)
finally:
output.append(5)
output.append(6)
except ValueError, e:
output.append('finally' in str(e))
no_jump_out_of_finally_block.jump = (5, 1)
no_jump_out_of_finally_block.output = [3, True]
# This verifies the line-numbers-must-be-integers rule.
def no_jump_to_non_integers(output):
try:
output.append(2)
except ValueError, e:
output.append('integer' in str(e))
no_jump_to_non_integers.jump = (2, "Spam")
no_jump_to_non_integers.output = [True]
# This verifies that you can't set f_lineno via _getframe or similar
# trickery.
def no_jump_without_trace_function():
try:
previous_frame = sys._getframe().f_back
previous_frame.f_lineno = previous_frame.f_lineno
except ValueError, e:
# This is the exception we wanted; make sure the error message
# talks about trace functions.
if 'trace' not in str(e):
raise
else:
# Something's wrong - the expected exception wasn't raised.
raise RuntimeError, "Trace-function-less jump failed to fail"
class JumpTestCase(unittest.TestCase):
def compare_jump_output(self, expected, received):
if received != expected:
self.fail( "Outputs don't match:\n" +
"Expected: " + repr(expected) + "\n" +
"Received: " + repr(received))
def run_test(self, func):
tracer = JumpTracer(func)
sys.settrace(tracer.trace)
output = []
func(output)
sys.settrace(None)
self.compare_jump_output(func.output, output)
def test_01_jump_simple_forwards(self):
self.run_test(jump_simple_forwards)
def test_02_jump_simple_backwards(self):
self.run_test(jump_simple_backwards)
def test_03_jump_out_of_block_forwards(self):
self.run_test(jump_out_of_block_forwards)
def test_04_jump_out_of_block_backwards(self):
self.run_test(jump_out_of_block_backwards)
def test_05_jump_to_codeless_line(self):
self.run_test(jump_to_codeless_line)
def test_06_jump_to_same_line(self):
self.run_test(jump_to_same_line)
def test_07_jump_in_nested_finally(self):
self.run_test(jump_in_nested_finally)
def test_08_no_jump_too_far_forwards(self):
self.run_test(no_jump_too_far_forwards)
def test_09_no_jump_too_far_backwards(self):
self.run_test(no_jump_too_far_backwards)
def test_10_no_jump_to_except_1(self):
self.run_test(no_jump_to_except_1)
def test_11_no_jump_to_except_2(self):
self.run_test(no_jump_to_except_2)
def test_12_no_jump_to_except_3(self):
self.run_test(no_jump_to_except_3)
def test_13_no_jump_to_except_4(self):
self.run_test(no_jump_to_except_4)
def test_14_no_jump_forwards_into_block(self):
self.run_test(no_jump_forwards_into_block)
def test_15_no_jump_backwards_into_block(self):
self.run_test(no_jump_backwards_into_block)
def test_16_no_jump_into_finally_block(self):
self.run_test(no_jump_into_finally_block)
def test_17_no_jump_out_of_finally_block(self):
self.run_test(no_jump_out_of_finally_block)
def test_18_no_jump_to_non_integers(self):
self.run_test(no_jump_to_non_integers)
def test_19_no_jump_without_trace_function(self):
no_jump_without_trace_function()
def test_main():
test_support.run_unittest(
TraceTestCase,
RaisingTraceFuncTestCase,
JumpTestCase
)
if __name__ == "__main__":
test_main()
|
{
"content_hash": "c2dacecc1757103a8fadd46b6063b706",
"timestamp": "",
"source": "github",
"line_count": 607,
"max_line_length": 76,
"avg_line_length": 29.40362438220758,
"alnum_prop": 0.532328552218736,
"repo_name": "loongson-community/EFI-MIPS",
"id": "7f866fbcf539ddbc2d7472e9abca07fd6e3d8766",
"size": "17884",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "ToolKit/cmds/python/Lib/test/bad/test_trace.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "271282"
},
{
"name": "Batchfile",
"bytes": "318"
},
{
"name": "C",
"bytes": "32642014"
},
{
"name": "C++",
"bytes": "1058125"
},
{
"name": "CSS",
"bytes": "2547"
},
{
"name": "GAP",
"bytes": "111381"
},
{
"name": "Groff",
"bytes": "1245691"
},
{
"name": "HTML",
"bytes": "1328432"
},
{
"name": "Lex",
"bytes": "14559"
},
{
"name": "M",
"bytes": "748"
},
{
"name": "Makefile",
"bytes": "468567"
},
{
"name": "Mask",
"bytes": "3420"
},
{
"name": "NSIS",
"bytes": "8743"
},
{
"name": "Objective-C",
"bytes": "3415447"
},
{
"name": "Pascal",
"bytes": "3368"
},
{
"name": "Python",
"bytes": "7763565"
},
{
"name": "R",
"bytes": "546"
},
{
"name": "Shell",
"bytes": "10084"
},
{
"name": "Yacc",
"bytes": "30661"
}
],
"symlink_target": ""
}
|
from zirkon.program_config import ModuleConfig
CONF = ModuleConfig("""\
opt0 = Int(default=10)
opt1 = Float(default=1.0)
""")
|
{
"content_hash": "6fa7fe8f16eba3d0b9b393eabd562b5e",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 46,
"avg_line_length": 18.285714285714285,
"alnum_prop": 0.7109375,
"repo_name": "simone-campagna/daikon",
"id": "71e4b2b6b077c41b1c28a6fb50cb524a604b3b3d",
"size": "128",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test_zirkon/pack5/comp_relocate_above/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "486219"
}
],
"symlink_target": ""
}
|
from ..groups import *
from ...models import Statistic, State, Timer, Sequence, UUID, StatisticBarPlot
TRACKABLES = {'Statistic': Statistic, 'State': State, 'Timer': Timer, 'Sequence': Sequence}
@app.route('/usage/plots', methods=['GET', 'POST'])
@flask_login.login_required
def usage_plots():
group = flask_login.current_user.group
if group is None:
return 'You are not in a group.'
state_trackables = [q.name for q in db.session.query(State.name.distinct().label('name'))]
statistic_trackables = [t for t in db.session.query(StatisticBarPlot.id, StatisticBarPlot.name)\
.filter(StatisticBarPlot.group_id==group.id).all()]
html = render_template('anonymous_usage.html', user=flask_login.current_user,
statistics=statistic_trackables, states=state_trackables)
return html
|
{
"content_hash": "a8dce9b93dff316ec087768bd0a12593",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 100,
"avg_line_length": 46.526315789473685,
"alnum_prop": 0.6583710407239819,
"repo_name": "lobocv/crashreporter_hq",
"id": "0bb4ed778ff36368cfc9a27b544ff6b265ae3ea0",
"size": "884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crashreporter_hq/views/usage_stats/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7477"
},
{
"name": "HTML",
"bytes": "60876"
},
{
"name": "JavaScript",
"bytes": "71126"
},
{
"name": "Python",
"bytes": "100447"
},
{
"name": "Shell",
"bytes": "51"
}
],
"symlink_target": ""
}
|
from exceptions import Exception
class Eclipse2017Exception(Exception):
pass
class CloudStorageError(Eclipse2017Exception):
pass
class CouldNotObtainCredentialsError(Eclipse2017Exception):
pass
class FailedToRenameFileError(Eclipse2017Exception):
pass
class FailedToSaveToDatastoreError(Eclipse2017Exception):
pass
class FailedToUploadToGCSError(Eclipse2017Exception):
pass
class UserDeletedError(Eclipse2017Exception):
pass
class ApplicationIdentityError(Exception):
pass
class MissingCredentialTokenError(Exception):
pass
class MissingUserError(Exception):
pass
|
{
"content_hash": "7e8c33c50c6759e8c71ca51db5ea7285",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 59,
"avg_line_length": 16.756756756756758,
"alnum_prop": 0.8064516129032258,
"repo_name": "google/eclipse2017",
"id": "9629b74e7d4db88194287e57b53be32e7c494107",
"size": "1198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/eclipse2017_exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1210"
},
{
"name": "HTML",
"bytes": "174182"
},
{
"name": "JavaScript",
"bytes": "72747"
},
{
"name": "Python",
"bytes": "665417"
},
{
"name": "Shell",
"bytes": "47103"
}
],
"symlink_target": ""
}
|
"""Prototype decorator for defining legacy-graph-mode functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
class VariableHolder(object):
"""Holds variables for a python function."""
def __init__(self, fn=None, share_variables=False):
self._fn = fn
self._share_variables = share_variables
self._variables_by_name = data_structures.Mapping()
@property
def variables(self):
return self._variables_by_name
def variable_creator_scope(self, next_creator, **kwargs):
"""Creates variables & adds them to collections to match legacy code."""
collections = kwargs.pop("collections", None)
v = None
# Get expected variable name.
with ops.name_scope(kwargs.get("name", None), "Variable") as name:
variable_name = ops.name_from_scope_name(name)
kwargs["name"] = name
if self._share_variables:
v = self._variables_by_name.get(variable_name, None)
if v is None:
v = next_creator(**kwargs)
self._variables_by_name[variable_name] = v
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if v.trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
ops.add_to_collections(collections, v)
return v
def __call__(self, *args, **kwargs):
return self.call_with_variable_creator_scope(self._fn)(*args, **kwargs)
def call_with_variable_creator_scope(self, fn):
def wrapped(*args, **kwargs):
with variable_scope.variable_creator_scope(self.variable_creator_scope):
return fn(*args, **kwargs)
return wrapped
# TODO(allenl): make this trackable
class WrappedFunction(function.ConcreteFunction):
"""Wraps a tf V1 piece of code in a function."""
def __init__(self, fn_graph, variable_holder, attrs=None, signature=None):
super(WrappedFunction, self).__init__(
fn_graph, attrs=attrs, signature=signature)
self._variable_holder = variable_holder
if ops.executing_eagerly_outside_functions():
# TODO(allenl): Make this work in 1.x?
self._lift_unlifted_variables()
def _lift_unlifted_variables(self):
"""Finds resource variables and lifts them into the outer context.
When we import a GraphDef inside a wrap_function, no Python graph building
code runs. This means we get VarHandleOps which create variable resources,
but no corresponding Python objects. Leaving them like this works but gives
the user no way to interact with or modify the variables outside the graph.
This method searches for variables and lifts them out as regular variable
objects when possible, indicating to the FuncGraph that they are captures.
"""
with self.graph.as_default():
collection_variables = (
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
+ ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES))
existing_captures = set(self.graph.internal_captures)
lifted_variables = {}
for old_variable in collection_variables:
if (old_variable._in_graph_mode # pylint: disable=protected-access
and isinstance(old_variable,
resource_variable_ops.ResourceVariable)):
if old_variable.handle in existing_captures:
continue
new_variable = def_function.UnliftedInitializerVariable(
array_ops.placeholder(
name="unused_{}_initializer".format(old_variable.op.name),
shape=old_variable.shape,
dtype=old_variable.dtype),
name=old_variable.op.name,
trainable=old_variable.trainable)
self.graph.captures[new_variable.handle] = old_variable.handle
existing_captures.add(old_variable.handle)
lifted_variables[old_variable] = new_variable
# pylint: disable=protected-access
variable_name = new_variable.name.split(":")[0]
self._variable_holder._variables_by_name[variable_name] = new_variable
self.graph._weak_variables.append(weakref.ref(new_variable))
# pylint: enable=protected-access
# Update the graph's collections, partly for the user and partly so this
# function is idempotent when it runs again in prune() calls.
for collection_name in [ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.LOCAL_VARIABLES]:
mutable_collection = ops.get_collection_ref(collection_name)
for index, current in enumerate(mutable_collection):
mutable_collection[index] = lifted_variables.get(current, current)
def prune(self, feeds, fetches, name=None, input_signature=None):
# TODO(b/129646028): Add support for CompositeTensors.
name = name or "pruned"
flat_feeds, flat_fetches = nest.flatten(feeds), nest.flatten(fetches)
for f in flat_feeds:
if not isinstance(f, ops.Tensor):
raise ValueError("Feeds must be tensors.")
# Ignoring all feeds that are captures allows prune to be called
# using wrapped_func.inputs even when it uses variables
internal_captures = self.graph.internal_captures
flat_feeds = [f for f in flat_feeds
if f not in internal_captures]
tensor_fetches = []
operation_fetches = []
for f in flat_fetches:
if isinstance(f, ops.Tensor):
tensor_fetches.append(f)
elif isinstance(f, ops.Operation):
operation_fetches.append(f)
else:
raise ValueError("Fetches must be tensors or operations.")
for f in flat_feeds + flat_fetches:
if f.graph is not self._func_graph:
raise ValueError(
"Can only prune function whose feeds and fetches "
"are from this graph (%s). Tensor %s from graph %s" % (
self._func_graph, f, f.graph))
with self._func_graph.as_default():
pruned_graph = func_graph.FuncGraph(name)
with ops.control_dependencies(operation_fetches):
if tensor_fetches:
identity_fetches = array_ops.identity_n(tensor_fetches)
sink_tensor = identity_fetches[0]
else:
identity_fetches = []
sink_tensor = array_ops.zeros([])
lift_map = lift_to_graph.lift_to_graph(
[sink_tensor], pruned_graph, sources=flat_feeds + internal_captures)
for original_fetch, identity_fetch in zip(
tensor_fetches, identity_fetches):
lift_map[original_fetch] = lift_map[identity_fetch]
pruned_graph.outputs.extend(
lift_map[x] for x in flat_fetches if isinstance(x, ops.Tensor))
pruned_graph.control_outputs.extend(
[lift_map[operation] for operation in operation_fetches])
for external_capture, internal_capture in self.graph.captures.items():
pruned_graph.captures[external_capture] = lift_map[internal_capture]
pruned_graph.inputs.extend(lift_map[x] for x in flat_feeds)
pruned_graph.inputs.extend(pruned_graph.captures.values())
pruned_graph.variables = self.graph.variables
def _structured_output_mapping(fetched):
lifted = lift_map[fetched]
if isinstance(lifted, ops.Operation):
return None
return lifted
pruned_graph.structured_outputs = nest.map_structure(
_structured_output_mapping, fetches)
pruned_graph.structured_input_signature = input_signature
pruned_fn = WrappedFunction(
pruned_graph, variable_holder=self._variable_holder)
pruned_fn._num_positional_args = len(flat_feeds) # pylint: disable=protected-access
# TODO(kathywu): Enable keyword arguments if an input signature is specified
pruned_fn._arg_keywords = [] # pylint: disable=protected-access
return pruned_fn
def _filter_returned_ops(fn):
"""Filtering out any ops returned by function.
Args:
fn: a function
Returns:
A tuple of (
Wrapped function that returns `None` in place of any ops,
dict that maps the index in the flat output structure to the returned op
)
"""
returned_ops = {}
def wrap_and_filter_returned_ops(*args, **kwargs):
outputs = fn(*args, **kwargs)
flat_outputs = nest.flatten(outputs)
for n in range(len(flat_outputs)):
output = flat_outputs[n]
if isinstance(output, ops.Operation):
returned_ops[n] = output
flat_outputs[n] = None
return nest.pack_sequence_as(outputs, flat_outputs)
return wrap_and_filter_returned_ops, returned_ops
class WrappedGraph(object):
"""Class for wrapping multiple TF 1.X functions in a single graph.
Maintains a dictionary mapping names to wrapped functions. See
`tf.compat.v1.wrap_function` to learn more about wrapping V1 functions.
Functions wrapped using this class have access to variables and collections
created in other wrapped functions, using the standard TF 1.X API (
`tf.compat.v1.get_variable` or
`tf.compat.v1.get_default_graph().get_collection(...)`)
Outside a function, variables and collections may be accessed using the
`variables` and `graph` properties.
Example:
```
def add_v1(x):
with tf.compat.v1.variable_scope('vars', reuse=tf.AUTO_REUSE):
v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32)
return v + x
def increment_var_v1(x):
with tf.compat.v1.variable_scope('vars', reuse=tf.AUTO_REUSE):
v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32)
return v.assign_add(x)
g = WrappedGraph()
add = g.wrap_function(add_v1, [tf.TensorSpec([], tf.int32)])
increment_var = g.wrap_function(increment_var_v1,
[tf.TensorSpec([], tf.int32)])
assert len(g.variables) == 1
assert g.variables[0].numpy() == 0
increment_var(tf.constant(5))
assert g.variables[0].numpy() == 5
```
"""
def __init__(self, variable_holder=None, **kwargs):
self._variable_holder = (
variable_holder or VariableHolder(share_variables=True))
name = kwargs.pop("name", "wrapped_function_graph")
# Always start with empty collections, unless otherwise specified. Setting
# `collections=None` will copy the collections from the outer graph.
collections = kwargs.pop("collections", {})
self.graph = func_graph.FuncGraph(name, collections=collections, **kwargs)
self._wrapped_function = WrappedFunction(self.graph, self._variable_holder)
self._functions = {}
@property
def functions(self):
return self._functions
@property
def variables(self):
return self._variable_holder.variables
def wrap_function(self, fn, signature, name=None):
"""Wraps a TF 1.X function and returns an eager-compatible function.
All functions wrapped in the same `WrappedGraph` will have access to the
same graph (`tf.get_default_graph` to get the graph object within a
function, or `WrappedGraph.graph` to get the graph outside a function).
Variables created within the function will be added to the `variables` list.
Function inputs: All inputs to the function must be tensors (nested ok),
with their shapes and dtypes defined in the `signature` argument.
Function outputs:
* The 1.X function may return tensors, variables, and ops. The wrapped
eager-compatible function will always return tensors in the same nested
structure.
* Variables are replaced with a tensor containing the latest read values.
* Returned ops are executed, and replaced with None.
* The order of op execution and variable reads in the return is
nondeterministic. For example:
```
def update_var(x):
v = tf.Variable(0)
op = tf.compat.v1.assign(v, x).op
return v, op
g = WrappedGraph()
fn = g.wrap_function(update_var)
read_value, _ = fn(tf.constant(3))
print(read_value.numpy()) # could be 0 or 3
print(g.variables[0].numpy()) # always 3
```
To ensure that ops in the function are executed (e.g. ops added to the
`tf.GraphKeys.UPDATE_OPS` collection), include them in the function returns.
Args:
fn: a 1.X tensorflow function.
signature: a possibly nested sequence of `TensorSpecs` specifying the
shapes and dtypes of the arguments.
name: an optional string name for the function. The function will be saved
with key `name` in the `functions` dictionary.
Returns:
An eager-compatible function.
"""
return self._wrap_function(fn, signature=signature, name=name)
def _wrap_function(
self, fn, args=None, kwargs=None, signature=None, name=None):
"""Internal wrap function method with extended func_graph arguments."""
fn_with_filter_and_scope, returned_ops = _filter_returned_ops(
self._variable_holder.call_with_variable_creator_scope(fn))
func_graph.func_graph_from_py_func(
None, # Name is unused.
fn_with_filter_and_scope,
args=args, kwargs=kwargs, signature=signature,
add_control_dependencies=False,
func_graph=self.graph)
# This code relies on questional behavior from `func_graph_from_py_func`.
# If an existing FuncGraph is passed into the `func_graph` arg, the inputs
# and structured outputs are overwritten. Pretty sure this is a bug,
# because structured outputs doesn't match up with the outputs...
fn_inputs = self.graph.inputs[:-len(self.graph.captures)]
# Return filtered ops to the flattened outputs.
flat_fn_outputs = nest.flatten(self.graph.structured_outputs)
for index, op in returned_ops.items():
flat_fn_outputs[index] = op
fn_outputs = nest.pack_sequence_as(self.graph.structured_outputs,
flat_fn_outputs)
name = name or fn.__name__
wrapped_function = self._wrapped_function.prune(
fn_inputs, fn_outputs, name, self.graph.structured_input_signature)
self._functions[name] = wrapped_function
return wrapped_function
@tf_export(v1=["wrap_function"])
def wrap_function(fn, signature, name=None):
"""Wraps the TF 1.x function fn into a graph function.
The python function `fn` will be called once with symbolic arguments specified
in the `signature`, traced, and turned into a graph function. Any variables
created by `fn` will be owned by the object returned by `wrap_function`. The
resulting graph function can be called with tensors which match the
signature.
```python
def f(x, do_add):
v = tf.Variable(5.0)
if do_add:
op = v.assign_add(x)
else:
op = v.assign_sub(x)
with tf.control_dependencies([op]):
return v.read_value()
f_add = tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), True])
assert float(f_add(1.0)) == 6.0
assert float(f_add(1.0)) == 7.0
# Can call tf.compat.v1.wrap_function again to get a new trace, a new set
# of variables, and possibly different non-template arguments.
f_sub= tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), False])
assert float(f_sub(1.0)) == 4.0
assert float(f_sub(1.0)) == 3.0
```
Both `tf.compat.v1.wrap_function` and `tf.function` create a callable
TensorFlow graph. But while `tf.function` runs all stateful operations
(e.g. `tf.print`) and sequences operations to provide the same semantics as
eager execution, `wrap_function` is closer to the behavior of `session.run` in
TensorFlow 1.x. It will not run any operations unless they are required to
compute the function's outputs, either through a data dependency or a control
dependency. Nor will it sequence operations.
Unlike `tf.function`, `wrap_function` will only trace the Python function
once. As with placeholders in TF 1.x, shapes and dtypes must be provided to
`wrap_function`'s `signature` argument.
Since it is only traced once, variables and state may be created inside the
function and owned by the function wrapper object.
Args:
fn: python function to be wrapped
signature: the placeholder and python arguments to be passed to the
wrapped function
name: Optional. The name of the function.
Returns:
the wrapped graph function.
"""
holder = VariableHolder(fn)
func_graph_name = "wrapped_function"
if name is not None:
func_graph_name = "wrapped_function_" + name
return WrappedFunction(
func_graph.func_graph_from_py_func(
func_graph_name,
holder,
args=None, kwargs=None, signature=signature,
add_control_dependencies=False,
collections={}),
variable_holder=holder,
signature=signature)
def function_from_graph_def(graph_def, inputs, outputs):
"""Creates a ConcreteFunction from a GraphDef.
Args:
graph_def: A GraphDef to make a function out of.
inputs: A Tensor name or nested structure of names in `graph_def` which
should be inputs to the function.
outputs: A Tensor name or nested structure of names in `graph_def` which
should be outputs of the function.
Returns:
A ConcreteFunction.
"""
def _imports_graph_def():
importer.import_graph_def(graph_def, name="")
wrapped_import = wrap_function(_imports_graph_def, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
nest.map_structure(import_graph.as_graph_element, inputs),
nest.map_structure(import_graph.as_graph_element, outputs))
|
{
"content_hash": "6d937b4cce2118fd3810fd3f74a60db5",
"timestamp": "",
"source": "github",
"line_count": 463,
"max_line_length": 88,
"avg_line_length": 38.8768898488121,
"alnum_prop": 0.6851666666666667,
"repo_name": "kevin-coder/tensorflow-fork",
"id": "ec7ecf8985b08b78ea2d8c1f092177f2ada2c555",
"size": "18729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/eager/wrap_function.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9117"
},
{
"name": "C",
"bytes": "340300"
},
{
"name": "C++",
"bytes": "39383425"
},
{
"name": "CMake",
"bytes": "194940"
},
{
"name": "Go",
"bytes": "1046987"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "567239"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "33617202"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "425910"
}
],
"symlink_target": ""
}
|
"""Search API module."""
from search import AddDocumentError
from search import AddDocumentResult
from search import AtomField
from search import DateField
from search import DeleteDocumentError
from search import DeleteDocumentResult
from search import Document
from search import DocumentOperationResult
from search import Error
from search import Field
from search import FieldExpression
from search import HtmlField
from search import Index
from search import InternalError
from search import InvalidRequest
from search import ListDocumentsResponse
from search import list_indexes
from search import ListIndexesResponse
from search import NumberField
from search import SearchResponse
from search import SearchResult
from search import SortSpec
from search import TextField
from search import TransientError
|
{
"content_hash": "dce15cb064282dbef1ee0dd1a67580ca",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 42,
"avg_line_length": 31.26923076923077,
"alnum_prop": 0.8671586715867159,
"repo_name": "adviti/melange",
"id": "11e69a2dd11c0a30fdf3f75a583cdd7002682a81",
"size": "1418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thirdparty/google_appengine/google/appengine/api/search/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
__profile__ = {
"id": "test_versioning",
"driver": "nose",
"test_path": "fuel_plugin/tests/functional/dummy_tests/test_versioning.py",
"description": "Test suite that contains fake tests for versioning check",
"deployment_tags": ["releases_comparison"],
"test_runs_ordering_priority": 13,
"exclusive_testsets": [],
"available_since_release": "2015.2-6.0",
}
import unittest2
class TestVersioning(unittest2.TestCase):
def test_simple_fake_first(self):
"""This is simple fake test
for versioning checking.
It should be discovered for
releases == of >= 2015.2-6.0
Available since release: 2015.2-6.0
Deployment tags: releases_comparison
"""
self.assertTrue(True)
def test_simple_fake_second(self):
"""This is simple fake test
for versioning checking.
It should be discovered for
releases == of >= 2015.2-6.1
Available since release: 2015.2-6.1
Deployment tags: releases_comparison
"""
self.assertTrue(True)
|
{
"content_hash": "4c24ca08bed119f084e57b0b29fde56b",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 79,
"avg_line_length": 31.647058823529413,
"alnum_prop": 0.6282527881040892,
"repo_name": "mcloudv/fuel-ostf",
"id": "3a53cb6e9f23c4e2d65092021ee226acad0256ec",
"size": "1687",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fuel_plugin/testing/fixture/dummy_tests/test_versioning.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "404"
},
{
"name": "Python",
"bytes": "594650"
},
{
"name": "Shell",
"bytes": "6024"
}
],
"symlink_target": ""
}
|
from PyQt4 import QtGui, QtCore
from core.domainobjects import MovieModel
import core.Constants as Constants
import ui.DefaultComponents
SAVE_BUTTON_LABEL = " Save "
UPDATE_BUTTON_LABEL = " Update "
class AddMovieRequest(QtGui.QFrame):
def __init__(self, mainWindow):
super(AddMovieRequest, self).__init__()
self.mainWindow = mainWindow
self.setStyleSheet(Constants.WIDGET_STYLE_WITH_BORDER)
self.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
self.createWidget(False)
def showUpdateView(self, movieModel):
self.nameText.setText(movieModel.name)
self.nameText.setReadOnly(True)
self.spokenLanguageText.updateCurrentIndexFor(movieModel.spokenLanguage)
self.statusText.updateCurrentIndexFor(movieModel.status)
self.storageText.updateCurrentIndexFor(movieModel.storage)
self.commentsText.setDocument(QtGui.QTextDocument(movieModel.comments))
self.okButton.setText(UPDATE_BUTTON_LABEL)
self.show();
def showAddView(self):
self.clear()
self.okButton.setText(SAVE_BUTTON_LABEL)
self.show()
def createWidget(self, withErrors):
nameLabel = ui.DefaultComponents.SystemLabel(self,"Name")
spokenLanguageLabel = ui.DefaultComponents.SystemLabel(self, "Spoken Language")
statusLabel = ui.DefaultComponents.SystemLabel(self, "Status")
storageLabel = ui.DefaultComponents.SystemLabel(self, "Storage")
commentsLabel = ui.DefaultComponents.SystemLabel(self, "Comments")
self.nameText = QtGui.QLineEdit()
self.spokenLanguageText = PropertyComboBox(["Hindi", "English"])
self.statusText = PropertyComboBox(["Downloaded", "Seen", "Not Downloaded", "Not Found", "Need To Purchage"])
self.storageText = PropertyComboBox(["me@homework", "manu's laptop", "me@store", "maanu@store"])
self.commentsText = QtGui.QTextEdit()
row = 0
column = 0;
if(withErrors):
pass
self.okButton = SaveButton(self)
self.cancelButton = CancelButton(self)
# Details of the movie
movieDetailBox = QtGui.QGridLayout()
movieDetailBox.setSpacing(5)
movieDetailBox.addWidget(nameLabel, 0,0)
movieDetailBox.addWidget(self.nameText,0,2)
movieDetailBox.addWidget(statusLabel, 1, 0)
movieDetailBox.addWidget(self.statusText,1, 2)
movieDetailBox.addWidget(storageLabel, 2, 0)
movieDetailBox.addWidget(self.storageText, 2, 2)
movieDetailBox.addWidget(spokenLanguageLabel, 3, 0)
movieDetailBox.addWidget(self.spokenLanguageText, 3, 2)
# Comments
commentsBox = QtGui.QGridLayout()
commentsBox.addWidget(commentsLabel, 1, 4)
commentsBox.addWidget(self.commentsText, 2, 4)
# Buttons: Actions for this page.
actionBox = QtGui.QHBoxLayout()
actionBox.addWidget(self.okButton)
actionBox.addStretch(2)
actionBox.addWidget(self.cancelButton)
mainGrid = QtGui.QGridLayout()
mainGrid.addLayout(movieDetailBox, 1, 1)
mainGrid.addLayout(commentsBox, 1, 2)
mainGrid.addLayout(actionBox, 3, 1, 1, 2)
self.setLayout(mainGrid)
def save(self):
movieModel = self.bindModel();
errors = movieModel.validate()
if(len(errors) > 0):
showErrors()
else:
# save movie to db
MovieDatabase().addMovie(movieModel)
#refresh the center view
movies = MovieDatabase().loadAllMovies()
self.mainWindow.refreshTableView(movies)
#Now remove
self.cancel()
def update(self):
toUpdate = self.bindModel();
errors = toUpdate.validate()
if(len(errors) > 0):
showErrors()
else:
moviedb = MovieDatabase()
movieModel = moviedb.findByName(toUpdate.name)
movieModel = movieModel.copyProperties(toUpdate, movieModel)
# save movie to db
moviedb.updateMovie(movieModel)
#refresh the center view
movies = MovieDatabase().loadAllMovies()
self.mainWindow.refreshTableView(movies)
#Now remove
self.cancel()
def bindModel(self):
movieModel = MovieModel()
movieModel.name = str(self.nameText.text())
movieModel.status = str(self.statusText.currentText())
movieModel.spokenLanguage =str(self.spokenLanguageText.currentText())
movieModel.storage =str(self.storageText.currentText())
movieModel.comments = str(self.commentsText.toPlainText())
return movieModel
def cancel(self):
self.clear();
self.hide()
def clear(self):
self.nameText.setText("")
self.commentsText.setDocument(QtGui.QTextDocument(""))
self.statusText.setCurrentIndex(0)
self.spokenLanguageText.setCurrentIndex(0)
self.storageText.setCurrentIndex(0)
class SaveButton(ui.DefaultComponents.SystemButton):
def __init__(self, parentWidget):
super(SaveButton, self).__init__(self, SAVE_BUTTON_LABEL)
self.parentWidget = parentWidget
self.connect(self, QtCore.SIGNAL('clicked()'), self.saveMovie)
def saveMovie(self):
if(self.text() == SAVE_BUTTON_LABEL):
self.parentWidget.save()
elif(self.text() == UPDATE_BUTTON_LABEL):
self.parentWidget.update()
class CancelButton(ui.DefaultComponents.SystemButton):
def __init__(self, parentWidget):
super(CancelButton, self).__init__(self, " Cancel ")
self.parentWidget = parentWidget
self.connect(self, QtCore.SIGNAL('clicked()'), self.cancelAddMovieRequest)
def cancelAddMovieRequest(self):
self.parentWidget.cancel()
class PropertyComboBox(QtGui.QComboBox):
def __init__(self, items):
self.defaultItem = "-- Select --"
super(PropertyComboBox, self).__init__()
self.addItem(self.defaultItem)
self.addItems(items)
self.setStyleSheet("background :none;")
self.setStyle(QtGui.QStyleFactory.create("macintos"));
self.setSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
def currentText(self):
text = super(PropertyComboBox, self).currentText()
if(self.defaultItem != text):
return text
else:
return ""
def updateCurrentIndexFor(self, itemText):
index = self.findText(itemText)
self.setCurrentIndex(index)
|
{
"content_hash": "069fdeae9957c78f3b37e5611897b226",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 117,
"avg_line_length": 33.43396226415094,
"alnum_prop": 0.6058126410835214,
"repo_name": "vshandubey/movie-database",
"id": "2620353fe247d62454000dc216205ebc6c4e6ad5",
"size": "7088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ui/home/AddMovieRequest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "43297"
}
],
"symlink_target": ""
}
|
from tempest.api.network import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
class VPNaaSTestJSON(base.BaseNetworkTest):
_interface = 'json'
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
List VPN Services
Show VPN Services
Create VPN Services
Update VPN Services
Delete VPN Services
List, Show, Create, Delete, and Update IKE policy
"""
@classmethod
@test.safe_setup
def setUpClass(cls):
if not test.is_extension_enabled('vpnaas', 'network'):
msg = "vpnaas extension not enabled."
raise cls.skipException(msg)
super(VPNaaSTestJSON, cls).setUpClass()
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.router = cls.create_router(
data_utils.rand_name("router-"),
external_network_id=CONF.network.public_network_id)
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
cls.vpnservice = cls.create_vpnservice(cls.subnet['id'],
cls.router['id'])
cls.ikepolicy = cls.create_ikepolicy(
data_utils.rand_name("ike-policy-"))
cls.ipsecpolicy = cls.create_ipsecpolicy(
data_utils.rand_name("ipsec-policy-"))
def _delete_ike_policy(self, ike_policy_id):
# Deletes a ike policy and verifies if it is deleted or not
ike_list = list()
resp, all_ike = self.client.list_ikepolicies()
for ike in all_ike['ikepolicies']:
ike_list.append(ike['id'])
if ike_policy_id in ike_list:
resp, _ = self.client.delete_ikepolicy(ike_policy_id)
self.assertEqual(204, resp.status)
# Asserting that the policy is not found in list after deletion
resp, ikepolicies = self.client.list_ikepolicies()
ike_id_list = list()
for i in ikepolicies['ikepolicies']:
ike_id_list.append(i['id'])
self.assertNotIn(ike_policy_id, ike_id_list)
def _delete_ipsec_policy(self, ipsec_policy_id):
# Deletes an ike policy if it exists
try:
self.client.delete_ipsecpolicy(ipsec_policy_id)
except exceptions.NotFound:
pass
def _assertExpected(self, expected, actual):
# Check if not expected keys/values exists in actual response body
for key, value in expected.iteritems():
self.assertIn(key, actual)
self.assertEqual(value, actual[key])
@test.attr(type='smoke')
def test_list_vpn_services(self):
# Verify the VPN service exists in the list of all VPN services
resp, body = self.client.list_vpnservices()
self.assertEqual('200', resp['status'])
vpnservices = body['vpnservices']
self.assertIn(self.vpnservice['id'], [v['id'] for v in vpnservices])
@test.attr(type='smoke')
def test_create_update_delete_vpn_service(self):
# Creates a VPN service
name = data_utils.rand_name('vpn-service-')
resp, body = self.client.create_vpnservice(subnet_id=self.subnet['id'],
router_id=self.router['id'],
name=name,
admin_state_up=True)
self.assertEqual('201', resp['status'])
vpnservice = body['vpnservice']
# Assert if created vpnservices are not found in vpnservices list
resp, body = self.client.list_vpnservices()
vpn_services = [vs['id'] for vs in body['vpnservices']]
self.assertIsNotNone(vpnservice['id'])
self.assertIn(vpnservice['id'], vpn_services)
# TODO(raies): implement logic to update vpnservice
# VPNaaS client function to update is implemented.
# But precondition is that current state of vpnservice
# should be "ACTIVE" not "PENDING*"
# Verification of vpn service delete
resp, body = self.client.delete_vpnservice(vpnservice['id'])
self.assertEqual('204', resp['status'])
# Asserting if vpn service is found in the list after deletion
resp, body = self.client.list_vpnservices()
vpn_services = [vs['id'] for vs in body['vpnservices']]
self.assertNotIn(vpnservice['id'], vpn_services)
@test.attr(type='smoke')
def test_show_vpn_service(self):
# Verifies the details of a vpn service
resp, body = self.client.show_vpnservice(self.vpnservice['id'])
self.assertEqual('200', resp['status'])
vpnservice = body['vpnservice']
self.assertEqual(self.vpnservice['id'], vpnservice['id'])
self.assertEqual(self.vpnservice['name'], vpnservice['name'])
self.assertEqual(self.vpnservice['description'],
vpnservice['description'])
self.assertEqual(self.vpnservice['router_id'], vpnservice['router_id'])
self.assertEqual(self.vpnservice['subnet_id'], vpnservice['subnet_id'])
self.assertEqual(self.vpnservice['tenant_id'], vpnservice['tenant_id'])
@test.attr(type='smoke')
def test_list_ike_policies(self):
# Verify the ike policy exists in the list of all IKE policies
resp, body = self.client.list_ikepolicies()
self.assertEqual('200', resp['status'])
ikepolicies = body['ikepolicies']
self.assertIn(self.ikepolicy['id'], [i['id'] for i in ikepolicies])
@test.attr(type='smoke')
def test_create_update_delete_ike_policy(self):
# Creates a IKE policy
name = data_utils.rand_name('ike-policy-')
resp, body = (self.client.create_ikepolicy(
name=name,
ike_version="v1",
encryption_algorithm="aes-128",
auth_algorithm="sha1"))
self.assertEqual('201', resp['status'])
ikepolicy = body['ikepolicy']
self.addCleanup(self._delete_ike_policy, ikepolicy['id'])
# Verification of ike policy update
description = "Updated ike policy"
new_ike = {'description': description, 'pfs': 'group5',
'name': data_utils.rand_name("New-IKE-")}
resp, body = self.client.update_ikepolicy(ikepolicy['id'],
**new_ike)
self.assertEqual('200', resp['status'])
updated_ike_policy = body['ikepolicy']
self.assertEqual(updated_ike_policy['description'], description)
# Verification of ike policy delete
resp, body = self.client.delete_ikepolicy(ikepolicy['id'])
self.assertEqual('204', resp['status'])
@test.attr(type='smoke')
def test_show_ike_policy(self):
# Verifies the details of a ike policy
resp, body = self.client.show_ikepolicy(self.ikepolicy['id'])
self.assertEqual('200', resp['status'])
ikepolicy = body['ikepolicy']
self.assertEqual(self.ikepolicy['id'], ikepolicy['id'])
self.assertEqual(self.ikepolicy['name'], ikepolicy['name'])
self.assertEqual(self.ikepolicy['description'],
ikepolicy['description'])
self.assertEqual(self.ikepolicy['encryption_algorithm'],
ikepolicy['encryption_algorithm'])
self.assertEqual(self.ikepolicy['auth_algorithm'],
ikepolicy['auth_algorithm'])
self.assertEqual(self.ikepolicy['tenant_id'],
ikepolicy['tenant_id'])
self.assertEqual(self.ikepolicy['pfs'],
ikepolicy['pfs'])
self.assertEqual(self.ikepolicy['phase1_negotiation_mode'],
ikepolicy['phase1_negotiation_mode'])
self.assertEqual(self.ikepolicy['ike_version'],
ikepolicy['ike_version'])
@test.attr(type='smoke')
def test_list_ipsec_policies(self):
# Verify the ipsec policy exists in the list of all ipsec policies
resp, body = self.client.list_ipsecpolicies()
self.assertEqual('200', resp['status'])
ipsecpolicies = body['ipsecpolicies']
self.assertIn(self.ipsecpolicy['id'], [i['id'] for i in ipsecpolicies])
@test.attr(type='smoke')
def test_create_update_delete_ipsec_policy(self):
# Creates an ipsec policy
ipsec_policy_body = {'name': data_utils.rand_name('ipsec-policy'),
'pfs': 'group5',
'encryption_algorithm': "aes-128",
'auth_algorithm': 'sha1'}
resp, resp_body = self.client.create_ipsecpolicy(**ipsec_policy_body)
self.assertEqual('201', resp['status'])
ipsecpolicy = resp_body['ipsecpolicy']
self.addCleanup(self._delete_ipsec_policy, ipsecpolicy['id'])
self._assertExpected(ipsec_policy_body, ipsecpolicy)
# Verification of ipsec policy update
new_ipsec = {'description': 'Updated ipsec policy',
'pfs': 'group2',
'name': data_utils.rand_name("New-IPSec"),
'encryption_algorithm': "aes-256",
'lifetime': {'units': "seconds", 'value': '2000'}}
resp, body = self.client.update_ipsecpolicy(ipsecpolicy['id'],
**new_ipsec)
self.assertEqual('200', resp['status'])
updated_ipsec_policy = body['ipsecpolicy']
self._assertExpected(new_ipsec, updated_ipsec_policy)
# Verification of ipsec policy delete
resp, _ = self.client.delete_ipsecpolicy(ipsecpolicy['id'])
self.assertEqual('204', resp['status'])
self.assertRaises(exceptions.NotFound,
self.client.delete_ipsecpolicy, ipsecpolicy['id'])
@test.attr(type='smoke')
def test_show_ipsec_policy(self):
# Verifies the details of an ipsec policy
resp, body = self.client.show_ipsecpolicy(self.ipsecpolicy['id'])
self.assertEqual('200', resp['status'])
ipsecpolicy = body['ipsecpolicy']
self._assertExpected(self.ipsecpolicy, ipsecpolicy)
class VPNaaSTestXML(VPNaaSTestJSON):
_interface = 'xml'
|
{
"content_hash": "08a08d59c145a147e3ef94ececcbcc68",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 79,
"avg_line_length": 45.427947598253276,
"alnum_prop": 0.5974238200519081,
"repo_name": "vmahuli/tempest",
"id": "d1fe15c25bd276117c71172c6cb3ee236b7811b7",
"size": "11039",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/api/network/test_vpnaas_extensions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3087389"
},
{
"name": "Shell",
"bytes": "17930"
}
],
"symlink_target": ""
}
|
from urllib import urlencode
import urllib2
from wham.httmock import urlmatch
from os.path import join
from os import listdir
from os.path import isfile, join
from urlparse import urlparse, parse_qs, parse_qsl
# def mock_function_builder(scheme, netloc, url_name, settings, responses_dir):
# url_path = settings['url_path']
# params = settings.get('params', {})
# method = settings.get('method', 'GET')
# @urlmatch(scheme=scheme, netloc=netloc, path=url_path, method=method, params=params)
# def mock_function(_url, _request):
# with open(join(responses_dir, url_name)) as f:
# return f.read()
# return mock_function
def build_httmock_function(scheme, netloc, url_path, response_content, method='GET', params=None):
if params is None:
params = {}
@urlmatch(scheme=scheme, netloc=netloc, path=url_path, method=method, params=params)
def mock_function(_url, _request):
return response_content
return mock_function
def build_httmock_functions(mock_response_dir):
print 'building mock functions'
functions = []
for filename in listdir(mock_response_dir):
filepath = join(mock_response_dir,filename)
if isfile(filepath):
method = None
for _method in ('GET', 'POST', 'PUT', 'DELETE', 'PATCH'):
if filename.startswith(_method):
filename = filename[len(_method):]
method = _method
url = urllib2.unquote(filename)
parts = urlparse(url)
params = {}
if parts.query:
print parts.query
params = dict(parse_qsl(parts.query))
print params
with open(filepath) as f:
content = f.read()
functions.append(build_httmock_function(
parts.scheme, parts.netloc, parts.path, content, params=params, method=method))
return functions
def make_mock_response_file(url, content, output_dir, method='GET', extra_params=None):
if extra_params:
url += '?' + urlencode(extra_params)
path = output_dir + method + urllib2.quote(url, safe='')
print path
with open(path, 'w') as f:
f.write(content)
|
{
"content_hash": "d8d30c10f9eac7b18ee05f8e1eadfd1b",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 99,
"avg_line_length": 35.714285714285715,
"alnum_prop": 0.616,
"repo_name": "mbylstra/django-wham",
"id": "69bc0fd3ad7646875c140cf0b5eb868259f127e8",
"size": "2250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wham/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "594"
},
{
"name": "Python",
"bytes": "68066"
}
],
"symlink_target": ""
}
|
import datetime
import errno
import os
import os.path
import re
import subprocess
import sys
import time
def mkdir_p(directory):
"""Make the directory, and all its ancestors as required. Any of the
directories are allowed to already exist."""
if directory == "":
# We're being asked to make the current directory.
return
try:
os.makedirs(directory)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(directory):
pass
else:
raise
def command_output(cmd, directory):
"""Runs a command in a directory and returns its standard output stream.
Captures the standard error stream.
Raises a RuntimeError if the command fails to launch or otherwise fails.
"""
p = subprocess.Popen(cmd,
cwd=directory,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, _) = p.communicate()
if p.returncode != 0:
raise RuntimeError('Failed to run %s in %s' % (cmd, directory))
return stdout
def deduce_software_version(directory):
"""Returns a software version number parsed from the CHANGES file
in the given directory.
The CHANGES file describes most recent versions first.
"""
# Match the first well-formed version-and-date line.
# Allow trailing whitespace in the checked-out source code has
# unexpected carriage returns on a linefeed-only system such as
# Linux.
pattern = re.compile(r'^(v\d+\.\d+(-dev)?) \d\d\d\d-\d\d-\d\d\s*$')
changes_file = os.path.join(directory, 'CHANGES')
with open(changes_file, mode='r') as f:
for line in f.readlines():
match = pattern.match(line)
if match:
return match.group(1)
raise Exception('No version number found in {}'.format(changes_file))
def describe(directory):
"""Returns a string describing the current Git HEAD version as descriptively
as possible.
Runs 'git describe', or alternately 'git rev-parse HEAD', in directory. If
successful, returns the output; otherwise returns 'unknown hash, <date>'."""
try:
# decode() is needed here for Python3 compatibility. In Python2,
# str and bytes are the same type, but not in Python3.
# Popen.communicate() returns a bytes instance, which needs to be
# decoded into text data first in Python3. And this decode() won't
# hurt Python2.
return command_output(['git', 'describe'], directory).rstrip().decode()
except:
try:
return command_output(
['git', 'rev-parse', 'HEAD'], directory).rstrip().decode()
except:
# This is the fallback case where git gives us no information,
# e.g. because the source tree might not be in a git tree.
# In this case, usually use a timestamp. However, to ensure
# reproducible builds, allow the builder to override the wall
# clock time with environment variable SOURCE_DATE_EPOCH
# containing a (presumably) fixed timestamp.
timestamp = int(os.environ.get('SOURCE_DATE_EPOCH', time.time()))
formatted = datetime.datetime.utcfromtimestamp(timestamp).isoformat()
return 'unknown hash, {}'.format(formatted)
def main():
if len(sys.argv) != 3:
print('usage: {} <spirv-tools-dir> <output-file>'.format(sys.argv[0]))
sys.exit(1)
output_file = sys.argv[2]
mkdir_p(os.path.dirname(output_file))
software_version = deduce_software_version(sys.argv[1])
new_content = '"{}", "SPIRV-Tools {} {}"\n'.format(
software_version, software_version,
describe(sys.argv[1]).replace('"', '\\"'))
if os.path.isfile(output_file):
with open(output_file, 'r') as f:
if new_content == f.read():
return
with open(output_file, 'w') as f:
f.write(new_content)
if __name__ == '__main__':
main()
|
{
"content_hash": "d8183b29ed996abe6b85b9bf9dc1e8e8",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 81,
"avg_line_length": 34.452991452991455,
"alnum_prop": 0.6187050359712231,
"repo_name": "endlessm/chromium-browser",
"id": "321de74bdcf8147c00dd7ba39a68f3cad7086810",
"size": "5348",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "third_party/swiftshader/third_party/SPIRV-Tools/utils/update_build_version.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import logging
from desktop.lib.python_util import force_dict_to_strings
from exception import SqoopException
from form import Form
class Connection(object):
"""
Sqoop connection object.
Example of sqoop connection dictionary received by server: {
"id": -1,
"updated": 1371245829436,
"created": 1371245829436,
"name": "test1",
"connector": [
{
"id": 1,
"inputs": [
{
"id": 1,
"name": "connection.jdbcDriver",
"value": "org.apache.derby.jdbc.EmbeddedDriver",
"type": "STRING",
"size": 128,
"sensitive": false
},
{
"id": 2,
"name": "connection.connectionString",
"value": "jdbc%3Aderby%3A%2Ftmp%2Ftest",
"type": "STRING",
"size": 128,
"sensitive": false
},
{
"id": 3,
"name": "connection.username",
"type": "STRING",
"size": 40,
"sensitive": false
},
{
"id": 4,
"name": "connection.password",
"type": "STRING",
"size": 40,
"sensitive": true
},
{
"id": 5,
"name": "connection.jdbcProperties",
"type": "MAP",
"sensitive": false
}
],
"name": "connection",
"type": "CONNECTION"
}
],
"connector-id": 1,
"framework": [
{
"id": 4,
"inputs": [
{
"id": 16,
"name": "security.maxConnections",
"type": "INTEGER",
"sensitive": false
}
],
"name": "security",
"type": "CONNECTION"
}
]
}
Some of the key-value pairs are structured and others are not.
For example, every connection will have a name, id, and connector-id key,
but the values of the ``connector`` key will vary given the chosen connector.
The same is true for the ``framework`` key.
The connection object will have a single framework component
and a single connector, for the moment.
@see sqoop.client.form for more information on unstructured forms in sqoop.
"""
SKIP = ('id', 'creation_date', 'creation_user', 'update_date', 'update_user')
def __init__(self, name, connector_id, connector=None, framework=None, enabled=True, creation_user='hue', creation_date=0, update_user='hue', update_date=0, **kwargs):
self.id = kwargs.setdefault('id', -1)
self.creation_user = creation_user
self.creation_date = creation_date
self.update_user = update_user
self.update_date = update_date
self.enabled = enabled
self.name = name
self.connector_id = connector_id
self.connector = connector
self.framework = framework
@staticmethod
def from_dict(connection_dict):
connection_dict.setdefault('connector', [])
connection_dict['connector'] = [ Form.from_dict(con_form_dict) for con_form_dict in connection_dict['connector'] ]
connection_dict.setdefault('framework', [])
connection_dict['framework'] = [ Form.from_dict(framework_form_dict) for framework_form_dict in connection_dict['framework'] ]
if not 'connector_id' in connection_dict:
connection_dict['connector_id'] = connection_dict.setdefault('connector-id', -1)
if not 'creation_user' in connection_dict:
connection_dict['creation_user'] = connection_dict.setdefault('creation-user', 'hue')
if not 'creation_date' in connection_dict:
connection_dict['creation_date'] = connection_dict.setdefault('creation-date', 0)
if not 'update_user' in connection_dict:
connection_dict['update_user'] = connection_dict.setdefault('update-user', 'hue')
if not 'update_date' in connection_dict:
connection_dict['update_date'] = connection_dict.setdefault('update-date', 0)
return Connection(**force_dict_to_strings(connection_dict))
def to_dict(self):
d = {
'id': self.id,
'name': self.name,
'creation-user': self.creation_user,
'creation-date': self.creation_date,
'update-user': self.update_user,
'update-date': self.update_date,
'connector-id': self.connector_id,
'connector': [ connector.to_dict() for connector in self.connector ],
'framework': [ framework.to_dict() for framework in self.framework ],
'enabled': self.enabled
}
return d
def update_from_dict(self, connection_dict):
self.update(Connection.from_dict(connection_dict))
def update(self, connection):
for key in self.__dict__:
if key not in Connection.SKIP:
if hasattr(connection, key):
setattr(self, key, getattr(connection, key))
class SqoopConnectionException(SqoopException):
"""
This is what the server generally responds with:
{
"connector": {
"status": "UNACCEPTABLE",
"messages": {
"connection": {
"message": "Can't connect to the database with given credentials: No suitable driver found for test",
"status": "ACCEPTABLE"
},
"connection.connectionString": {
"message": "This do not seem as a valid JDBC URL",
"status": "UNACCEPTABLE"
}
}
},
"framework": {
"status": "FINE",
"messages": {}
}
}
"""
def __init__(self, connector, framework):
self.connector = connector
self.framework = framework
@classmethod
def from_dict(cls, error_dict):
return SqoopConnectionException(**force_dict_to_strings(error_dict))
def to_dict(self):
return {
'connector': self.connector,
'framework': self.framework
}
def __str__(self):
return 'Connector: %s\nFramework: %s\n' % (self.connector, self.framework)
|
{
"content_hash": "7c029f6ea888cd8ea6cafacb3873e7ef",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 169,
"avg_line_length": 29.95876288659794,
"alnum_prop": 0.5874053682037165,
"repo_name": "yongshengwang/builthue",
"id": "2c90f240c41edc1138099fd0bacaa402e300d0c0",
"size": "6582",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/sqoop/src/sqoop/client/connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "10774013"
},
{
"name": "C++",
"bytes": "184593"
},
{
"name": "CSS",
"bytes": "655282"
},
{
"name": "Emacs Lisp",
"bytes": "14875"
},
{
"name": "GAP",
"bytes": "11337"
},
{
"name": "Java",
"bytes": "3080564"
},
{
"name": "JavaScript",
"bytes": "2418037"
},
{
"name": "Makefile",
"bytes": "86977"
},
{
"name": "Perl",
"bytes": "161801"
},
{
"name": "PigLatin",
"bytes": "282"
},
{
"name": "Prolog",
"bytes": "4590"
},
{
"name": "Python",
"bytes": "29990389"
},
{
"name": "Shell",
"bytes": "38643"
},
{
"name": "TeX",
"bytes": "129526"
},
{
"name": "Thrift",
"bytes": "99710"
},
{
"name": "XSLT",
"bytes": "367778"
}
],
"symlink_target": ""
}
|
"""
A GTK+ implementation for a user interface. PhotoInfo window
"""
__program__ = "photoplace"
__author__ = "Jose Riguera Lopez <jriguera@gmail.com>"
__version__ = "0.6.1"
__date__ = "Dec 2014"
__license__ = "Apache 2.0"
__copyright__ ="(c) Jose Riguera"
import os
import codecs
import xml.dom.minidom
import StringIO
import warnings
warnings.filterwarnings('ignore', module='gtk')
try:
import pygtk
pygtk.require("2.0")
import gtk
import gobject
except Exception as e:
warnings.resetwarnings()
print("Warning: %s" % str(e))
print("You don't have the PyGTK 2.0 module installed")
raise
warnings.resetwarnings()
from PhotoPlace.definitions import *
from GTKUIdefinitions import *
# ############################
# Autocompletion for textviews
# ############################
class TextViewCompleter(object):
def __init__(self, textview, position, completion, size=TEXVIEWCOMPLETER_SIZE):
object.__init__(self)
self.textview = textview
self.completion = completion
self.position = position
self.popup = gtk.Window(gtk.WINDOW_POPUP)
parent = textview.get_toplevel()
self.popup.set_transient_for(parent)
self.popup.set_destroy_with_parent(True)
frame = gtk.Frame()
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
model = gtk.ListStore(gobject.TYPE_STRING)
for item in self.completion:
ite = model.append()
model.set(ite, 0, item)
self.list_view = gtk.TreeView(model)
self.list_view.set_property("headers-visible", False)
selection = self.list_view.get_selection()
selection.select_path((0,))
column = gtk.TreeViewColumn("", gtk.CellRendererText(), text=0)
self.list_view.append_column(column)
sw.add(self.list_view)
frame.add(sw)
self.popup.add(frame)
self.popup.set_size_request(size[0], size[1])
self.show_popup()
def hide_popup(self, *args, **kwargs):
self.popup.hide()
def show_popup(self):
tbuffer = self.textview.get_buffer()
ite = tbuffer.get_iter_at_mark(tbuffer.get_insert())
rectangle = self.textview.get_iter_location(ite)
absX, absY = self.textview.buffer_to_window_coords(gtk.TEXT_WINDOW_TEXT,
rectangle.x + rectangle.width + 0 ,
rectangle.y + rectangle.height + 70)
parent = self.textview.get_parent()
self.popup.move(self.position[0] + absX, self.position[1] + absY)
self.popup.show_all()
def prev(self):
sel = self.list_view.get_selection()
model, ite = sel.get_selected()
mite = model.get_path(ite)
if mite != None and mite[0] > 0:
path = (mite[0] - 1,)
self.list_view.set_cursor(path)
def next(self):
sel = self.list_view.get_selection()
model, ite = sel.get_selected()
mite = model.iter_next(ite)
if mite != None:
path = model.get_path(mite)
self.list_view.set_cursor(path)
def confirm(self):
sel = self.list_view.get_selection()
selection = self.select(sel)
self.destroy()
return selection
def select(self, selection):
model, ite = selection.get_selected()
name = model.get_value(ite, 0)
return name
def destroy(self):
self.popup.hide()
self.popup.destroy()
# ###########################
# Window Editor for Templates
# ###########################
class TemplateEditorGUI(gobject.GObject):
"""
GTK Editor Window
"""
_instance = None
__gsignals__ = {
'load' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,
(gobject.TYPE_STRING, gobject.TYPE_STRING)),
'save' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,
(gobject.TYPE_STRING, gobject.TYPE_STRING)),
'_save': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,
(gobject.TYPE_STRING, gobject.TYPE_STRING)),
'close': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,
(gobject.TYPE_STRING, gobject.TYPE_STRING)),
'new' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
}
# Singleton
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(TemplateEditorGUI, cls).__new__(cls)
return cls._instance
def __init__(self, resourcedir=None, parentwindow=None):
if resourcedir:
gobject.GObject.__init__(self)
guifile = os.path.join(resourcedir, GTKUI_RESOURCE_TemplateEditorGUIXML)
self.builder = gtk.Builder()
self.builder.set_translation_domain(GTKUI_GETTEXT_DOMAIN)
self.builder.add_from_file(guifile)
self.window = self.builder.get_object("window")
self.window.set_transient_for(parentwindow)
self.window.set_destroy_with_parent(True)
self.statusbar = self.builder.get_object("statusbar")
self.textbuffer = self.builder.get_object("textbuffer")
self.textview = self.builder.get_object("textview")
tag = self.textbuffer.create_tag('attr')
tag.set_property('foreground', "green")
tag.set_property('family', "Monospace")
tag = self.textbuffer.create_tag('defaults')
tag.set_property('foreground', "red")
tag.set_property('family', "Monospace")
tag = self.textbuffer.create_tag('photo')
tag.set_property('foreground', "blue")
tag.set_property('family', "Monospace")
self.tooltip = _("You can use simple HTML tags like <i>list</i> (<i>li</i>, "
"<i>ul</i>) or <i>table</i> and use expresions like "
"<b>%(Variable|<i>DEFAULT</i>)s</b> to get values. <b><i>DEFAULT</i></b> is the "
"value to set up when <b>Variable</b> has no value, if <b><i>DEFAULT</i></b> is "
"none (not a character, even space) <b>Variable</b> will not be shown. "
"You can use the all global variables defined in the same way.\n"
"\nTo get all supported variables press <b><ctl><space></b>\n")
self.ready = False
def __getitem__(self, key):
return self.builder.get_object(key)
def __setitem__(self, key, value):
raise ValueError("Cannot set key!")
def init(self, userfacade):
self.templatefile = None
self.recoverfile = None
self.savefile = None
self.popup = None
self.cansave = True
self.canrecover = True
self.canload = True
self.canvalidate = True
self.userfacade = userfacade
self.textview.add_events(gtk.gdk.KEY_PRESS_MASK)
self.signals = {
"on_window_delete_event": self.close,
"on_textview_key_press_event": self._key_press,
"on_toolbutton-wintemplates-exit_clicked": self.close,
"on_toolbutton-wintemplates-load_clicked": self.load,
"on_toolbutton-wintemplates-save_clicked": self.save,
"on_textbuffer_mark_set": self._update_statusbar,
"on_textbuffer_changed": self._update_statusbar,
"on_toolbutton-wintemplates-new_clicked": self.new,
"on_toolbutton-wintemplates-recover_clicked": self.recover,
"on_toolbutton-wintemplates-check_clicked": self.validate,
}
self.builder.connect_signals(self.signals)
self._signals = {
'load' : [],
'save' : [],
'close': [],
'new' : [],
}
self.ready = True
def connect(self, name, *args):
if self.ready:
retval = None
if name.startswith('_'):
retval = gobject.GObject.connect(self, name, *args)
else:
retval = gobject.GObject.connect(self, name, *args)
self._signals[name].append(retval)
return retval
def disconnect(self, identifier=None):
if self.ready:
if identifier:
for signal in self._signals:
if identifier in self._signals[signal]:
self._signals[signal].remove(identifier)
gobject.GObject.disconnect(self, identifier)
else:
for signal in self._signals:
for i in self._signals[signal]:
gobject.GObject.disconnect(self, i)
self._signals[signal].remove(i)
self._signals[signal] = list()
def show(self, text='', template=None, save=None, recover=None,
completions=[], tooltip='', cansave=True, canrecover=True, canload=True, canvalidate=True):
if not self.ready:
return False
self.popup = None
dgettext = dict()
dgettext['program'] = PhotoPlace_name
can_save = cansave
can_recover = canrecover
can_load = canload
can_validate = canvalidate
self.templatefile = template
if template:
dgettext['template'] = os.path.basename(template)
self.templatefile = self.userfacade.state.get_template(template)
self.savefile = save
if not save and cansave:
self.savefile = self.userfacade.state.get_savepath(template)
self.recoverfile = recover
if not recover and canrecover:
self.recoverfile = self.userfacade.state.get_recoverpath(template)
else:
can_save = False
can_recover = False
self.savefile = None
self.recover = None
if text:
fd = StringIO.StringIO(text)
self._load(fd)
fd.close()
else:
self.load(None, self.templatefile)
self.autocompletions = list()
for item in self.userfacade.options['defaults'].iterkeys():
self.autocompletions.append("%(" + item + "|)s")
self.autocompletions += completions
self.textview.set_tooltip_markup(self.tooltip + tooltip)
self.window.show_all()
if not can_save:
self["toolbutton-wintemplates-save"].hide()
self.window.set_title(_('%(program)s: Editing description') % dgettext)
else:
if dgettext.has_key('template'):
self.window.set_title(_('%(program)s: Editing template <%(template)s>') % dgettext)
else:
self.window.set_title(_('%(program)s: Editing template') % dgettext)
self.cansave = can_save
if not can_recover:
self["toolbutton-wintemplates-recover"].hide()
self.canrecover = can_recover
if not can_load:
self["toolbutton-wintemplates-load"].hide()
self.canload = can_load
if not can_validate:
self["toolbutton-wintemplates-check"].hide()
self.canvalidate = can_validate
return True
def load(self, widget=None, template_file=None):
if not self.canload:
return False
if template_file == None:
dialog = gtk.FileChooserDialog(title=_("Select file to load ..."),
parent=self.window, action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
ffilter = gtk.FileFilter()
ffilter.set_name(_("All files"))
ffilter.add_pattern("*")
dialog.add_filter(ffilter)
filename = None
if dialog.run() == gtk.RESPONSE_OK:
filename = dialog.get_filename()
dialog.destroy()
else:
filename = template_file
self.statusbar.pop(0)
fd = None
try:
fd = codecs.open(filename, "r", encoding="utf-8")
self._load(fd)
self.statusbar.push(0,_("Template from file '%s' loaded") % os.path.basename(filename))
except Exception as exception:
self.statusbar.push(0, str(exception))
return False
finally:
if fd != None:
fd.close()
ite_start, ite_end = self.textbuffer.get_bounds()
text = self.textbuffer.get_text(ite_start, ite_end)
self.emit('load', text, filename)
return True
def _load(self, fd):
tbuffer = self.textbuffer
ite_end = tbuffer.get_iter_at_mark(tbuffer.get_insert())
begin = True
lines = 0
for line in fd:
for part in re.split(r"(%\([a-zA-Z0-9_\.]+\|?[a-zA-Z0-9 \?¿_.,:;=!@$&\-\+\*]*\).)", line):
if part.startswith('%('):
key = re.match(r"%\(([a-zA-Z0-9_\.]+)\|?.*\).", part).group(1)
if key in self.userfacade.options['defaults']:
tbuffer.insert_with_tags_by_name(ite_end, part, 'defaults')
elif key in PhotoPlace_TEMPLATE_VARS:
tbuffer.insert_with_tags_by_name(ite_end, part, 'photo')
else:
tbuffer.insert_with_tags_by_name(ite_end, part, 'attr')
else:
tbuffer.insert(ite_end, part)
ite_end = tbuffer.get_iter_at_mark(tbuffer.get_insert())
lines += 1
# Delete last template div, if it exists!
nline = lines
while nline > 0:
ite_nline = tbuffer.get_iter_at_line(nline)
text = tbuffer.get_text(ite_nline, ite_end).strip()
if text.startswith('</div>'):
tbuffer.delete(ite_nline, ite_end)
break
elif len(text) > 1:
# Not a valid template
break
else:
tbuffer.delete(ite_nline, ite_end)
ite_end = ite_nline
nline -= 1
# Delete first template div, if it exists!
ite_start = tbuffer.get_start_iter()
nline = 0
while nline <= lines:
ite_nline = tbuffer.get_iter_at_line(nline)
text = tbuffer.get_text(ite_start, ite_nline).strip()
search = re.search(r'<div\s+mode=.(\w+).\s*>', text)
if search:
tbuffer.delete(ite_start, ite_nline)
mode = search.group(1)
break
elif len(text) > 1:
# Not a valid template
break
else:
tbuffer.delete(ite_start, ite_nline)
ite_start = ite_nline
nline += 1
def save(self, widget=None):
if not self.cansave:
return False
self.statusbar.pop(0)
start, end = self.textbuffer.get_bounds()
template = self.textbuffer.get_text(start, end)
fd = None
error = False
savedir = os.path.dirname(self.savefile)
try:
if not os.path.exists(savedir):
os.makedirs(savedir)
fd = codecs.open(self.savefile, "w", encoding="utf-8")
fd.write("<div mode='cdata'>\n")
fd.write(template)
fd.write("\n</div>\n")
except Exception as exception:
self.statusbar.push(0, str(exception))
error = True
finally:
if fd != None:
fd.close()
if not error:
self.emit('save', template, self.savefile)
self.emit('_save', template, self.savefile)
self.statusbar.push(0,_('Template saved without problems'))
return True
else:
self.statusbar.push(0,_('Error processing template'))
return False
def _key_press(self, textview, event):
if self.popup != None:
if event.keyval == gtk.gdk.keyval_from_name("Up"):
self.popup.prev()
return True
elif event.keyval == gtk.gdk.keyval_from_name("Down"):
self.popup.next()
return True
elif event.keyval == gtk.gdk.keyval_from_name("Return"):
value = self.popup.confirm()
tbuffer = self.textbuffer
end = tbuffer.get_iter_at_mark(tbuffer.get_insert())
start = end.copy()
start.backward_char()
while start.get_char() not in " ,()[]<>|/\\\"\'\n\t":
start.backward_char()
start.forward_char()
tbuffer.delete(start, end)
ite = tbuffer.get_iter_at_mark(tbuffer.get_insert())
key = re.match(r"%\(([a-zA-Z0-9_\.]+)\|?.*]*\).", value).group(1)
if key in self.userfacade.options['defaults']:
tbuffer.insert_with_tags_by_name(ite, value, 'defaults')
elif key in PhotoPlace_TEMPLATE_VARS:
tbuffer.insert_with_tags_by_name(ite, value, 'photo')
else:
tbuffer.insert_with_tags_by_name(ite, value, 'attr')
self.popup = None
return True
else:
self.popup.destroy()
self.popup = None
else:
if event.keyval == gtk.gdk.keyval_from_name("space") \
and event.state & gtk.gdk.CONTROL_MASK:
return self._autocomplete(self.textbuffer)
elif gtk.gdk.keyval_from_name("percent") == event.keyval:
return self._autocomplete(self.textbuffer)
return False
def _autocomplete(self, textbuffer):
if self.autocompletions:
position = self.window.window.get_root_origin()
self.popup = TextViewCompleter(self.textview, position, self.autocompletions)
return True
return False
def _update_statusbar(self, textbuffer, *args, **kwargs):
self.statusbar.pop(0)
count = textbuffer.get_char_count()
ite = textbuffer.get_iter_at_mark(textbuffer.get_insert())
row = ite.get_line()
col = ite.get_line_offset()
dgettext = {}
dgettext['line'] = row + 1
dgettext['column'] = col
dgettext['chars'] = count
self.statusbar.push(0,
_('Line %(line)d, column %(column)d (%(chars)d chars in document)') % dgettext)
def new(self, widget=None):
self.statusbar.pop(0)
start, end = self.textbuffer.get_bounds()
self.textbuffer.delete(start, end)
if self.templatefile:
self.statusbar.push(0, _('New empty template'))
else:
self.statusbar.push(0, _('Empty description'))
self.emit('new')
def recover(self, widget=None, filename=None):
if not self.canrecover:
return False
template = self.recoverfile
if filename:
template = filename
if not template:
self.statusbar.pop(0)
self.statusbar.push(0, _('Cannot be recovered!'))
return False
self.new()
if os.path.isfile(template):
if self.load(None, template):
return self.save()
return True
else:
self.statusbar.pop(0)
self.statusbar.push(0, _('Cannot find system template!'))
return False
def validate(self, widget=None):
if not self.canvalidate:
return False
start, end = self.textbuffer.get_bounds()
template = self.textbuffer.get_text(start, end)
template = "<div mode='cdata'>\n" + template + "\n</div>"
self.statusbar.pop(0)
try:
tdom = xml.dom.minidom.parseString(template)
tdom.normalize()
except Exception as exception:
text = str(exception)
line = re.search(r'line\s+(\d+)', text, re.IGNORECASE)
if line:
# correct line numbers ...
pos = int(line.group(1)) - 1
text = re.sub(r'(.+line )(\d+)(.+)', r"\1 %s\3" % pos, text)
ins = self.textbuffer.get_iter_at_line(pos - 1)
bound = self.textbuffer.get_iter_at_line(pos)
self.textbuffer.select_range(ins, bound)
self.statusbar.push(0, _('XML error: %s') % text)
return False
else:
self.statusbar.push(0, _('Perfect! template is ok!'))
return True
def close(self, widget=None, data=None):
if self.popup != None:
self.popup.destroy()
self.popup = None
start, end = self.textbuffer.get_bounds()
text = self.textbuffer.get_text(start, end)
self.textbuffer.set_text('')
self.window.hide()
self.emit('close', text, self.templatefile)
self.templatefile = None
self.recoverfile = None
self.savefile = None
self.autocompletions = list()
self.disconnect()
return True
# EOF
|
{
"content_hash": "4a6ac7c481ea9600caba82c14de49617",
"timestamp": "",
"source": "github",
"line_count": 572,
"max_line_length": 102,
"avg_line_length": 36.86188811188811,
"alnum_prop": 0.5475456485653308,
"repo_name": "jriguera/photoplace",
"id": "669cf21bd358c84c5518a532c43fa085d6eb9d43",
"size": "21795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "photoplace/lib/PhotoPlace/UserInterface/GTKTemplateEditor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1153"
},
{
"name": "C",
"bytes": "23405"
},
{
"name": "HTML",
"bytes": "206909"
},
{
"name": "NSIS",
"bytes": "9577"
},
{
"name": "Python",
"bytes": "630139"
},
{
"name": "Shell",
"bytes": "1341"
}
],
"symlink_target": ""
}
|
import os, shutil
import tempfile
from io import StringIO
from pathlib import Path, PurePath
from sys import executable
from collections.abc import Iterable
import pytest
from doit.exceptions import TaskError
from doit.exceptions import BaseFail
from doit import action
from doit import task
from doit.task import Stream
#path to test folder
TEST_PATH = os.path.dirname(__file__)
PROGRAM = "%s %s/sample_process.py" % (executable, TEST_PATH)
class TestStream():
def test_from_task(self):
# use value from task, not global from Stream
v0 = Stream(0)
assert v0.effective_verbosity(1) == 1
assert v0.effective_verbosity(2) == 2
v2 = Stream(2)
assert v2.effective_verbosity(0) == 0
assert v2.effective_verbosity(1) == 1
def test_force_global(self):
# use value from task, not global from Stream
v0 = Stream(0, force_global=True)
assert v0.effective_verbosity(2) == 0
v2 = Stream(2, force_global=True)
assert v2.effective_verbosity(0) == 2
def test_task_verbosity_not_specified(self):
# default
v0 = Stream(None)
assert v0.effective_verbosity(None) == 1
v2 = Stream(2)
assert v2.effective_verbosity(None) == 2
class TestTaskCheckInput(object):
def testOkType(self):
task.Task.check_attr('xxx', 'attr', [], ((int, list),()))
def testOkTypeABC(self):
task.Task.check_attr('xxx', 'attr', {}, ((Iterable,),()))
def testOkValue(self):
task.Task.check_attr('xxx', 'attr', None, ((list,), (None,)))
def testFailType(self):
pytest.raises(task.InvalidTask, task.Task.check_attr, 'xxx',
'attr', int, ((list,), (False,)))
def testFailValue(self):
pytest.raises(task.InvalidTask, task.Task.check_attr, 'xxx',
'attr', True, ((list,), (False,)))
class TestTaskCompare(object):
def test_equal(self):
# only task name is used to compare for equality
t1 = task.Task("foo", None)
t2 = task.Task("bar", None)
t3 = task.Task("foo", None)
assert t1 != t2
assert t1 == t3
def test_lt(self):
# task name is used to compare/sort tasks
t1 = task.Task("foo", None)
t2 = task.Task("bar", None)
t3 = task.Task("gee", None)
assert t1 > t2
sorted_names = sorted(t.name for t in (t1,t2,t3))
assert sorted_names == ['bar', 'foo', 'gee']
class TestTaskInit(object):
def test_groupTask(self):
# group tasks have no action
t = task.Task("taskX", None)
assert t.actions == []
def test_dependencySequenceIsValid(self):
task.Task("Task X", ["taskcmd"], file_dep=["123","456"])
# dependency must be a sequence or bool.
# give proper error message when anything else is used.
def test_dependencyNotSequence(self):
filePath = "data/dependency1"
pytest.raises(task.InvalidTask, task.Task,
"Task X",["taskcmd"], file_dep=filePath)
def test_options(self):
# when task is created, options contain the default values
p1 = {'name':'p1', 'default':'p1-default'}
p2 = {'name':'p2', 'default':'', 'short':'m'}
t = task.Task("MyName", None, params=[p1, p2], pos_arg='pos')
t.execute(Stream(0))
assert 'p1-default' == t.options['p1']
assert '' == t.options['p2']
assert 'pos' == t.pos_arg
assert None == t.pos_arg_val # always uninitialized
def test_options_from_cfg(self):
# Ensure that doit.cfg can specify task options.
p1 = {'name': 'x', 'long': 'x', 'default': None}
t = task.Task("MyName", None, params=[p1])
t.cfg_values = {'x': 1}
assert t.options is None
t.init_options()
assert t.options is not None
assert 1 == t.options['x']
def test_options_from_cfg_override(self):
# Ensure that doit.cfg specified task options can be replaced by
# command line specified options.
p1 = {'name': 'x', 'long': 'x', 'default': None, 'type': int}
p2 = {'name': 'y', 'long': 'y', 'default': 2, 'type': int}
t = task.Task("MyName", None, params=[p1, p2])
t.cfg_values = {'x': 1}
assert t.options is None
t.init_options(['--x=2'])
assert t.options is not None
assert 2 == t.options['x']
assert 2 == t.options['y']
def test_setup(self):
t = task.Task("task5", ['action'], setup=["task2"])
assert ["task2"] == t.setup_tasks
def test_forbid_equal_sign_on_name(self):
pytest.raises(task.InvalidTask,
task.Task, "a=1", ["taskcmd"])
class TestTaskValueSavers(object):
def test_execute_value_savers(self):
t = task.Task("Task X", ["taskcmd"])
t.value_savers.append(lambda: {'v1':1})
t.save_extra_values()
assert 1 == t.values['v1']
class TestTaskUpToDate(object):
def test_FalseRunalways(self):
t = task.Task("Task X", ["taskcmd"], uptodate=[False])
assert t.uptodate == [(False, None, None)]
def test_NoneIgnored(self):
t = task.Task("Task X", ["taskcmd"], uptodate=[None])
assert t.uptodate == [(None, None, None)]
def test_callable_function(self):
def custom_check(): return True
t = task.Task("Task X", ["taskcmd"], uptodate=[custom_check])
assert t.uptodate[0] == (custom_check, [], {})
def test_callable_instance_method(self):
class Base(object):
def check(self): return True
base = Base()
t = task.Task("Task X", ["taskcmd"], uptodate=[base.check])
assert t.uptodate[0] == (base.check, [], {})
def test_tuple(self):
def custom_check(pos_arg, xxx=None): return True
t = task.Task("Task X", ["taskcmd"],
uptodate=[(custom_check, [123], {'xxx':'yyy'})])
assert t.uptodate[0] == (custom_check, [123], {'xxx':'yyy'})
def test_str(self):
t = task.Task("Task X", ["taskcmd"], uptodate=['my-cmd xxx'])
assert t.uptodate[0] == ('my-cmd xxx', [], {})
def test_object_with_configure(self):
class Check(object):
def __call__(self): return True
def configure_task(self, task):
task.task_dep.append('y1')
check = Check()
t = task.Task("Task X", ["taskcmd"], uptodate=[check])
assert (check, [], {}) == t.uptodate[0]
assert ['y1'] == t.task_dep
def test_invalid(self):
pytest.raises(task.InvalidTask,
task.Task, "Task X", ["taskcmd"], uptodate=[{'x':'y'}])
class TestTaskExpandFileDep(object):
def test_dependencyStringIsFile(self):
my_task = task.Task("Task X", ["taskcmd"], file_dep=["123","456"])
assert set(["123","456"]) == my_task.file_dep
def test_file_dep_path(self):
my_task = task.Task("Task X", ["taskcmd"],
file_dep=["123", Path("456"), PurePath("789")])
assert {"123", "456", "789"} == my_task.file_dep
def test_file_dep_str(self):
pytest.raises(task.InvalidTask, task.Task, "Task X", ["taskcmd"],
file_dep=[['aaaa']])
def test_file_dep_unicode(self):
unicode_name = "中文"
my_task = task.Task("Task X", ["taskcmd"], file_dep=[unicode_name])
assert unicode_name in my_task.file_dep
class TestTaskDeps(object):
def test_task_dep(self):
my_task = task.Task("Task X", ["taskcmd"], task_dep=["123","4*56"])
assert ["123"] == my_task.task_dep
assert ["4*56"] == my_task.wild_dep
def test_calc_dep(self):
my_task = task.Task("Task X", ["taskcmd"], calc_dep=["123"])
assert set(["123"]) == my_task.calc_dep
def test_update_deps(self):
my_task = task.Task("Task X", ["taskcmd"], file_dep=["fileX"],
calc_dep=["calcX"], uptodate=[None])
my_task.update_deps({'file_dep': ['fileY'],
'task_dep': ['taskY'],
'calc_dep': ['calcX', 'calcY'],
'uptodate': [True],
'to_be_ignored': 'asdf',
})
assert set(['fileX', 'fileY']) == my_task.file_dep
assert ['taskY'] == my_task.task_dep
assert set(['calcX', 'calcY']) == my_task.calc_dep
assert [(None, None, None), (True, None, None)] == my_task.uptodate
class TestTaskTargets(object):
def test_targets_can_be_path(self):
my_task = task.Task("Task X", ["taskcmd"],
targets=["123", Path("456"), PurePath("789")])
assert ["123", "456", "789"] == my_task.targets
def test_targets_should_be_string_or_path(self):
assert pytest.raises(task.InvalidTask, task.Task, "Task X", ["taskcmd"],
targets=["123", Path("456"), 789])
class TestTask_Loader(object):
def test_delayed_after_execution(self):
# after `executed` creates an implicit task_dep
delayed = task.DelayedLoader(lambda: None, executed='foo')
t1 = task.Task('bar', None, loader=delayed)
assert t1.task_dep == ['foo']
class TestTask_Getargs(object):
def test_ok(self):
getargs = {'x' : ('t1','x'), 'y': ('t2','z')}
t = task.Task('t3', None, getargs=getargs)
assert len(t.uptodate) == 2
assert ['t1', 't2'] == sorted([t.uptodate[0][0].dep_name,
t.uptodate[1][0].dep_name])
def test_invalid_desc(self):
getargs = {'x' : 't1'}
assert pytest.raises(task.InvalidTask, task.Task,
't3', None, getargs=getargs)
def test_invalid_desc_tuple(self):
getargs = {'x' : ('t1',)}
assert pytest.raises(task.InvalidTask, task.Task,
't3', None, getargs=getargs)
class TestTaskTitle(object):
def test_title(self):
t = task.Task("MyName",["MyAction"])
assert "MyName" == t.title()
def test_custom_title(self):
t = task.Task("MyName",["MyAction"], title=(lambda x: "X%sX" % x.name))
assert "X%sX"%str(t.name) == t.title(), t.title()
class TestTaskRepr(object):
def test_repr(self):
t = task.Task("taskX",None,('t1','t2'))
assert "<Task: taskX>" == repr(t), repr(t)
class TestTaskActions(object):
def test_success(self):
t = task.Task("taskX", [PROGRAM])
t.execute(Stream(0))
def test_result(self):
# task.result is the value of last action
t = task.Task('t1', ["%s hi_list hi1" % PROGRAM,
"%s hi_list hi2" % PROGRAM])
t.dep_changed = []
t.execute(Stream(0))
assert "hi_listhi2" == t.result
def test_values(self):
def return_dict(d): return d
# task.result is the value of last action
t = task.Task('t1', [(return_dict, [{'x':5}]),
(return_dict, [{'y':10}]),])
t.execute(Stream(0))
assert {'x':5, 'y':10} == t.values
def test_failure(self):
t = task.Task("taskX", ["%s 1 2 3" % PROGRAM])
got = t.execute(Stream(0))
assert isinstance(got, TaskError)
# make sure all cmds are being executed.
def test_many(self):
t = task.Task("taskX",["%s hi_stdout hi2" % PROGRAM,
"%s hi_list hi6" % PROGRAM])
t.dep_changed = []
t.execute(Stream(0))
got = "".join([a.out for a in t.actions])
assert "hi_stdouthi_list" == got, repr(got)
def test_fail_first(self):
t = task.Task("taskX", ["%s 1 2 3" % PROGRAM, PROGRAM])
got = t.execute(Stream(0))
assert isinstance(got, TaskError)
def test_fail_second(self):
t = task.Task("taskX", ["%s 1 2" % PROGRAM, "%s 1 2 3" % PROGRAM])
got = t.execute(Stream(0))
assert isinstance(got, TaskError)
# python and commands mixed on same task
def test_mixed(self):
def my_print(msg):
print(msg, end='')
t = task.Task("taskX",["%s hi_stdout hi2" % PROGRAM,
(my_print,['_PY_']),
"%s hi_list hi6" % PROGRAM])
t.dep_changed = []
t.execute(Stream(0))
got = "".join([a.out for a in t.actions])
assert "hi_stdout_PY_hi_list" == got, repr(got)
class TestTaskTeardown(object):
def test_ok(self):
got = []
def put(x):
got.append(x)
t = task.Task('t1', [], teardown=[(put, [1]), (put, [2])])
t.execute(Stream(0))
assert None == t.execute_teardown(Stream(0))
assert [1,2] == got
def test_fail(self):
def my_raise():
raise Exception('hoho')
t = task.Task('t1', [], teardown=[(my_raise,)])
t.execute(Stream(0))
got = t.execute_teardown(Stream(0))
assert isinstance(got, BaseFail)
class TestTaskClean(object):
@pytest.fixture
def tmpdir(self, request):
tmpdir = {}
tmpdir['dir'] = tempfile.mkdtemp(prefix='doit-')
tmpdir['subdir'] = tempfile.mkdtemp(dir=tmpdir['dir'])
files = [os.path.join(tmpdir['dir'], fname)
for fname in ['a.txt',
'b.txt',
os.path.join(tmpdir['subdir'], 'c.txt')]]
tmpdir['files'] = files
# create empty files
for filename in tmpdir['files']:
open(filename, 'a').close()
def remove_tmpdir():
if os.path.exists(tmpdir['dir']):
shutil.rmtree(tmpdir['dir'])
request.addfinalizer(remove_tmpdir)
return tmpdir
def test_clean_nothing(self, tmpdir):
t = task.Task("xxx", None)
assert False == t._remove_targets
assert 0 == len(t.clean_actions)
t.clean(StringIO(), False)
for filename in tmpdir['files']:
assert os.path.exists(filename)
def test_clean_targets(self, tmpdir):
t = task.Task("xxx", None, targets=tmpdir['files'], clean=True)
assert True == t._remove_targets
assert 0 == len(t.clean_actions)
t.clean(StringIO(), False)
for filename in tmpdir['files']:
assert not os.path.exists(filename), filename
def test_clean_non_existent_targets(self):
t = task.Task('xxx', None, targets=["i_dont_exist"], clean=True)
t.clean(StringIO(), False)
# nothing is raised
def test_clean_empty_dirs(self, tmpdir):
# Remove empty directories listed in targets
targets = tmpdir['files'] + [tmpdir['subdir']]
t = task.Task("xxx", None, targets=targets, clean=True)
assert True == t._remove_targets
assert 0 == len(t.clean_actions)
t.clean(StringIO(), False)
for filename in tmpdir['files']:
assert not os.path.exists(filename)
assert not os.path.exists(tmpdir['subdir'])
assert os.path.exists(tmpdir['dir'])
def test_keep_non_empty_dirs(self, tmpdir):
# Keep non empty directories listed in targets
targets = [tmpdir['files'][0], tmpdir['dir']]
t = task.Task("xxx", None, targets=targets, clean=True)
assert True == t._remove_targets
assert 0 == len(t.clean_actions)
t.clean(StringIO(), False)
for filename in tmpdir['files']:
expected = not filename in targets
assert expected == os.path.exists(filename)
assert os.path.exists(tmpdir['dir'])
def test_clean_any_order(self, tmpdir):
# Remove targets in reverse lexical order so that subdirectories' order
# in the targets array is irrelevant
targets = tmpdir['files'] + [tmpdir['dir'], tmpdir['subdir']]
t = task.Task("xxx", None, targets=targets, clean=True)
assert True == t._remove_targets
assert 0 == len(t.clean_actions)
t.clean(StringIO(), False)
for filename in tmpdir['files']:
assert not os.path.exists(filename)
assert not os.path.exists(tmpdir['dir'])
assert not os.path.exists(tmpdir['subdir'])
def test_clean_actions(self, tmpdir):
# a clean action can be anything, it can even not clean anything!
c_path = tmpdir['files'][0]
def say_hello():
fh = open(c_path, 'a')
fh.write("hello!!!")
fh.close()
t = task.Task("xxx",None,targets=tmpdir['files'], clean=[(say_hello,)])
assert False == t._remove_targets
assert 1 == len(t.clean_actions)
t.clean(StringIO(), False)
for filename in tmpdir['files']:
assert os.path.exists(filename)
fh = open(c_path, 'r')
got = fh.read()
fh.close()
assert "hello!!!" == got
def test_clean_action_error(self, capsys):
def fail_clean():
5/0
t = task.Task("xxx", None, clean=[(fail_clean,)])
assert 1 == len(t.clean_actions)
t.clean(StringIO(), dryrun=False)
err = capsys.readouterr()[1]
assert "PythonAction Error" in err
def test_clean_action_kwargs(self):
def fail_clean(dryrun):
print('hello %s' % dryrun)
t = task.Task("xxx", None, clean=[(fail_clean,)])
assert 1 == len(t.clean_actions)
out = StringIO()
t.clean(out, dryrun=False)
assert "hello False" in out.getvalue()
def test_dryrun_file(self, tmpdir):
t = task.Task("xxx", None, targets=tmpdir['files'], clean=True)
assert True == t._remove_targets
assert 0 == len(t.clean_actions)
t.clean(StringIO(), True)
# files are NOT removed
for filename in tmpdir['files']:
assert os.path.exists(filename), filename
def test_dryrun_dir(self, tmpdir):
targets = tmpdir['files'] + [tmpdir['dir']]
for filename in tmpdir['files']:
os.remove(filename)
t = task.Task("xxx", None, targets=targets, clean=True)
assert True == t._remove_targets
assert 0 == len(t.clean_actions)
t.clean(StringIO(), True)
assert os.path.exists(tmpdir['dir'])
def test_dryrun_actions_not_executed(self, tmpdir):
# clean action is not executed at all if it does not contain
# a `dryrun` parameter
self.executed = False
def say_hello(): self.executed = True
t = task.Task("xxx", None, targets=tmpdir['files'],
clean=[(say_hello,)])
assert False == t._remove_targets
assert 1 == len(t.clean_actions)
t.clean(StringIO(), True)
assert not self.executed
def test_dryrun_actions_with_param_true(self, tmpdir):
# clean action is not executed at all if it does not contain
# a `dryrun` parameter
self.executed = False
self.dryrun_val = None
def say_hello(dryrun):
self.executed = True
self.dryrun_val = dryrun
t = task.Task("xxx", None, targets=tmpdir['files'],
clean=[(say_hello,)])
assert False == t._remove_targets
assert 1 == len(t.clean_actions)
t.clean(StringIO(), dryrun=True)
assert self.executed is True
assert self.dryrun_val is True
def test_dryrun_actions_with_param_false(self, tmpdir):
# clean action is not executed at all if it does not contain
# a `dryrun` parameter
self.executed = False
self.dryrun_val = None
def say_hello(dryrun):
self.executed = True
self.dryrun_val = dryrun
t = task.Task("xxx", None, targets=tmpdir['files'],
clean=[(say_hello,)])
assert False == t._remove_targets
assert 1 == len(t.clean_actions)
t.clean(StringIO(), dryrun=False)
assert self.executed is True
assert self.dryrun_val is False
class TestTaskDoc(object):
def test_no_doc(self):
t = task.Task("name", ["action"])
assert '' == t.doc
def test_single_line(self):
t = task.Task("name", ["action"], doc=" i am doc")
assert "i am doc" == t.doc
def test_multiple_lines(self):
t = task.Task("name", ["action"], doc="i am doc \n with many lines\n")
assert "i am doc" == t.doc
def test_start_with_empty_lines(self):
t = task.Task("name", ["action"], doc="\n\n i am doc \n")
assert "i am doc" == t.doc
def test_just_new_line(self):
t = task.Task("name", ["action"], doc=" \n \n\n")
assert "" == t.doc
class TestTaskPickle(object):
def test_geststate(self):
t = task.Task("my_name", ["action"])
pd = t.__getstate__()
assert None == pd['uptodate']
assert None == pd['_action_instances']
def test_safedict(self):
t = task.Task("my_name", ["action"])
pd = t.pickle_safe_dict()
assert 'uptodate' not in pd
assert '_action_instances' not in pd
assert 'value_savers' not in pd
assert 'clean_actions' not in pd
class TestTaskUpdateFromPickle(object):
def test_change_value(self):
t = task.Task("my_name", ["action"])
assert {} == t.values
class FakePickle():
def __init__(self):
self.values = [1,2,3]
t.update_from_pickle(FakePickle().__dict__)
assert [1,2,3] == t.values
assert 'my_name' == t.name
class TestDictToTask(object):
def testDictOkMinimum(self):
dict_ = {'name':'simple','actions':['xpto 14']}
assert isinstance(task.dict_to_task(dict_), task.Task)
def testDictFieldTypo(self):
dict_ = {'name':'z','actions':['xpto 14'],'typo_here':['xxx']}
pytest.raises(action.InvalidTask, task.dict_to_task, dict_)
def testDictMissingFieldAction(self):
pytest.raises(action.InvalidTask, task.dict_to_task, {'name':'xpto 14'})
class TestResultDep(object):
def test_single(self, dep_manager):
tasks = {'t1': task.Task("t1", None, uptodate=[task.result_dep('t2')]),
't2': task.Task("t2", None),
}
# _config_task was executed and t2 added as task_dep
assert ['t2'] == tasks['t1'].task_dep
# first t2 result
tasks['t2'].result = 'yes'
dep_manager.save_success(tasks['t2'])
assert 'run' == dep_manager.get_status(tasks['t1'], tasks).status # first time
tasks['t1'].save_extra_values()
dep_manager.save_success(tasks['t1'])
assert 'up-to-date' == dep_manager.get_status(tasks['t1'], tasks).status
# t2 result changed
tasks['t2'].result = '222'
dep_manager.save_success(tasks['t2'])
assert 'run' == dep_manager.get_status(tasks['t1'], tasks).status
tasks['t1'].save_extra_values()
dep_manager.save_success(tasks['t1'])
assert 'up-to-date' == dep_manager.get_status(tasks['t1'], tasks).status
def test_group(self, dep_manager):
tasks = {'t1': task.Task("t1", None, uptodate=[task.result_dep('t2')]),
't2': task.Task("t2", None, task_dep=['t2:a', 't2:b'],
has_subtask=True),
't2:a': task.Task("t2:a", None),
't2:b': task.Task("t2:b", None),
}
# _config_task was executed and t2 added as task_dep
assert ['t2'] == tasks['t1'].task_dep
# first t2 result
tasks['t2:a'].result = 'yes1'
dep_manager.save_success(tasks['t2:a'])
tasks['t2:b'].result = 'yes2'
dep_manager.save_success(tasks['t2:b'])
assert 'run' == dep_manager.get_status(tasks['t1'], tasks).status # first time
tasks['t1'].save_extra_values()
dep_manager.save_success(tasks['t1'])
assert 'up-to-date' == dep_manager.get_status(tasks['t1'], tasks).status
# t2 result changed
tasks['t2:a'].result = '222'
dep_manager.save_success(tasks['t2:a'])
assert 'run' == dep_manager.get_status(tasks['t1'], tasks).status
tasks['t1'].save_extra_values()
dep_manager.save_success(tasks['t1'])
assert 'up-to-date' == dep_manager.get_status(tasks['t1'], tasks).status
|
{
"content_hash": "65cbe486d190abee00251908a34ce01e",
"timestamp": "",
"source": "github",
"line_count": 694,
"max_line_length": 87,
"avg_line_length": 35.17579250720461,
"alnum_prop": 0.5543175487465181,
"repo_name": "pydoit/doit",
"id": "b31f64e7d8587c50f574af181d230cfa09a870ae",
"size": "24416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_task.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "442"
},
{
"name": "Python",
"bytes": "561336"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
from six.moves import range
import tensorflow as tf
from tensorflow import flags
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.lite.experimental.examples.lstm.rnn import bidirectional_dynamic_rnn
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
# Number of steps to train model.
TRAIN_STEPS = 1
CONFIG = tf.ConfigProto(device_count={"GPU": 0})
class BidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
def __init__(self, *args, **kwargs):
super(BidirectionalSequenceRnnTest, self).__init__(*args, **kwargs)
# Define constants
# Unrolled through 28 time steps
self.time_steps = 28
# Rows of 28 pixels
self.n_input = 28
# Learning rate for Adam optimizer
self.learning_rate = 0.001
# MNIST is meant to be classified in 10 classes(0-9).
self.n_classes = 10
# Batch size
self.batch_size = 16
# Rnn Units.
self.num_units = 16
def setUp(self):
super(BidirectionalSequenceRnnTest, self).setUp()
# Import MNIST dataset
data_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
self.mnist = input_data.read_data_sets(data_dir, one_hot=True)
def buildRnnLayer(self):
return tf.keras.layers.StackedRNNCells([
tf.lite.experimental.nn.TfLiteRNNCell(self.num_units, name="rnn1"),
tf.lite.experimental.nn.TfLiteRNNCell(self.num_units, name="rnn2")
])
def buildModel(self,
fw_rnn_layer,
bw_rnn_layer,
is_dynamic_rnn,
is_inference,
use_sequence_length=False):
"""Build Mnist recognition model.
Args:
fw_rnn_layer: The forward rnn layer either a single rnn cell or a multi
rnn cell.
bw_rnn_layer: The backward rnn layer either a single rnn cell or a multi
rnn cell.
is_dynamic_rnn: Use dynamic_rnn or not.
use_sequence_length: Whether to use sequence length or not. Default to
False.
Returns:
A tuple containing:
- Input tensor of the model.
- Prediction tensor of the model.
- Output class tensor of the model.
"""
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random.normal([self.num_units * 2, self.n_classes]))
out_bias = tf.Variable(tf.random.normal([self.n_classes]))
batch_size = self.batch_size
if is_inference:
batch_size = 1
# input image placeholder
x = tf.placeholder(
"float", [batch_size, self.time_steps, self.n_input],
name="INPUT_IMAGE")
sequence_length = None
if use_sequence_length:
sequence_length = [self.time_steps] * batch_size
if is_dynamic_rnn:
rnn_inputs = tf.transpose(x, [1, 0, 2])
outputs, _ = bidirectional_dynamic_rnn(
fw_rnn_layer,
bw_rnn_layer,
rnn_inputs,
sequence_length,
dtype="float32",
time_major=True)
fw_outputs, bw_outputs = outputs
output = tf.concat([fw_outputs, bw_outputs], 2)
output = tf.unstack(output, axis=0)
output = output[-1]
else:
rnn_inputs = tf.unstack(x, self.time_steps, 1)
# Sequence length is not supported for static since we don't have a
# wrapper for it. At training phase, we can still have sequence_length,
# but inference phase, we change it to None.
if is_inference:
sequence_length = None
outputs, _, _ = tf.nn.static_bidirectional_rnn(
fw_rnn_layer,
bw_rnn_layer,
rnn_inputs,
dtype="float32",
sequence_length=sequence_length)
output = outputs[-1]
# Compute logits by multiplying output of shape [batch_size,num_units*2]
# by the softmax layer's out_weight of shape [num_units*2,n_classes]
# plus out_bias
prediction = tf.matmul(output, out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
def trainModel(self, x, prediction, output_class, sess):
"""Train the model.
Args:
x: The input tensor.
prediction: The prediction class tensor.
output_class: The output tensor.
sess: The graph session.
"""
# input label placeholder
y = tf.placeholder("float", [None, self.n_classes])
# Loss function
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# Optimization
opt = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(loss)
# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, shuffle=False)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
def saveAndRestoreModel(self,
fw_rnn_layer,
bw_rnn_layer,
sess,
saver,
is_dynamic_rnn,
use_sequence_length=False):
"""Saves and restores the model to mimic the most common use case.
Args:
fw_rnn_layer: The forward rnn layer either a single rnn cell or a multi
rnn cell.
bw_rnn_layer: The backward rnn layer either a single rnn cell or a multi
rnn cell.
sess: Old session.
saver: Saver created by tf.compat.v1.train.Saver()
is_dynamic_rnn: Use dynamic_rnn or not.
use_sequence_length: Whether to use sequence length or not. Default to
False.
Returns:
A tuple containing:
- Input tensor of the restored model.
- Prediction tensor of the restored model.
- Output tensor, which is the softwmax result of the prediction tensor.
- new session of the restored model.
"""
model_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
saver.save(sess, model_dir)
# Reset the graph.
tf.reset_default_graph()
x, prediction, output_class = self.buildModel(
fw_rnn_layer, bw_rnn_layer, is_dynamic_rnn, True, use_sequence_length)
new_sess = tf.compat.v1.Session(config=CONFIG)
saver = tf.train.Saver()
saver.restore(new_sess, model_dir)
return x, prediction, output_class, new_sess
def getInferenceResult(self, x, output_class, sess):
"""Get inference result given input tensor and output tensor.
Args:
x: The input tensor.
output_class: The output tensor.
sess: Current session.
Returns:
A tuple containing:
- Input of the next batch, batch size is 1.
- Expected output.
"""
b1, _ = self.mnist.train.next_batch(batch_size=1)
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})
return sample_input, expected_output
def tfliteInvoke(self,
sess,
test_inputs,
input_tensor,
output_tensor,
use_mlir_converter=False):
"""Get tflite inference result.
This method will convert tensorflow from session to tflite model then based
on the inputs, run tflite inference and return the results.
Args:
sess: Current tensorflow session.
test_inputs: The test inputs for tflite.
input_tensor: The input tensor of tensorflow graph.
output_tensor: The output tensor of tensorflow graph.
use_mlir_converter: Whether or not to use MLIRConverter to convert the
model.
Returns:
The tflite inference result.
"""
converter = tf.lite.TFLiteConverter.from_session(sess, [input_tensor],
[output_tensor])
tflite = converter.convert()
converter.experimental_new_converter = use_mlir_converter
interpreter = tf.lite.Interpreter(model_content=tflite)
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]["index"]
interpreter.set_tensor(input_index, test_inputs)
interpreter.invoke()
output_index = interpreter.get_output_details()[0]["index"]
result = interpreter.get_tensor(output_index)
# Reset all variables so it will not pollute other inferences.
interpreter.reset_all_variables()
return result
def testStaticRnnMultiRnnCell(self):
sess = tf.compat.v1.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(), self.buildRnnLayer(), False, is_inference=False)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(), self.buildRnnLayer(), sess, saver, False)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
# Test Toco-converted model.
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class, False)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
def testStaticRnnMultiRnnCellWithSequenceLength(self):
sess = tf.compat.v1.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
False,
is_inference=False,
use_sequence_length=True)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
sess,
saver,
False,
use_sequence_length=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
# Test Toco-converted model.
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class, False)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
@test_util.enable_control_flow_v2
def testDynamicRnnMultiRnnCell(self):
sess = tf.compat.v1.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(), self.buildRnnLayer(), True, is_inference=False)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
sess,
saver,
is_dynamic_rnn=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
# Test Toco-converted model.
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class, False)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
@test_util.enable_control_flow_v2
def testDynamicRnnMultiRnnCellWithSequenceLength(self):
sess = tf.compat.v1.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
True,
is_inference=False,
use_sequence_length=True)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(),
self.buildRnnLayer(),
sess,
saver,
is_dynamic_rnn=True,
use_sequence_length=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
# Test Toco-converted model.
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class, False)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
if __name__ == "__main__":
test.main()
|
{
"content_hash": "a6b5486cbfb6fe0d6cf2c63eaebf83b1",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 84,
"avg_line_length": 33.480555555555554,
"alnum_prop": 0.6470588235294118,
"repo_name": "adit-chandra/tensorflow",
"id": "606f969b92a32b2b3ea9955d8dc26bce27119ea7",
"size": "12770",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/lite/experimental/examples/lstm/bidirectional_sequence_rnn_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45988"
},
{
"name": "C",
"bytes": "773694"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "76734263"
},
{
"name": "CMake",
"bytes": "6545"
},
{
"name": "Dockerfile",
"bytes": "81136"
},
{
"name": "Go",
"bytes": "1679107"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "952944"
},
{
"name": "Jupyter Notebook",
"bytes": "567243"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1299322"
},
{
"name": "Makefile",
"bytes": "61397"
},
{
"name": "Objective-C",
"bytes": "104706"
},
{
"name": "Objective-C++",
"bytes": "297753"
},
{
"name": "PHP",
"bytes": "24055"
},
{
"name": "Pascal",
"bytes": "3752"
},
{
"name": "Pawn",
"bytes": "17546"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "38764318"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "7459"
},
{
"name": "Shell",
"bytes": "643787"
},
{
"name": "Smarty",
"bytes": "34727"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
}
|
from kazoo.client import KazooClient, KazooState
from kazoo.exceptions import ZookeeperError, KazooException
from threading import Thread
import argparse
import sys
from time import sleep
class ZookeeperWatcher():
zoo_client = None # The KazooClient to manage the config
point_path = None # Zookeeper path to pointed to file
pointed_at_expired = None # is True when the assignment has been set to
# None but we cannot remove the config listener
valid_handler = None # the function to call when the validity changes
config_handler = None # the function to call when the config changes
error_handler = None # the function to call when an error occurs in reading
valid_file = False # the current state of the ConfigWatcher with ZK
do_not_restart = False # used when closing via ^C
old_data = '' # The current file contents, to see if a change occurred
old_pointed = '' # the current pointed path, to see if change occurred
INVALID_PATH = "Invalid pointer path"
INVALID_GET = "Invalid get on file path"
BAD_CONNECTION = "Connection interrupted with Zookeeper, re-establishing"
def __init__(self, hosts, filepath, valid_handler=None,
config_handler=None, error_handler=None, pointer=False,
ensure=False, valid_init=True):
'''
Zookeeper file watcher, used to tell a program their zookeeper file has
changed. Can be used to watch a single file, or both a file and path of
its contents. Manages all connections, drops, reconnections for you.
@param hosts: The zookeeper hosts to use
@param filepath: The full path to the file to watch
@param valid_handler: The method to call for a 'is valid' state change
@param config_handler: The method to call when a content change occurs
@param error_handler: The method to call when an error occurs
@param pointer: Set to true if the file contents are actually a path to
another zookeeper file, where the real config resides
@param ensure: Set to true for the ZooWatcher to create the watched file
@param valid_init: Ensure the client can connect to Zookeeper first try
Ex 1. /stuff/A: "stuff I care about"
Ex 2. /stuff/A: "/other/stuff", /other/stuff: "contents I care about"
- in Ex 2 you care about /other/stuff contents
but are only aware of your assignment /stuff/A
You can use this class as any combination of event driven or polling.
Polling:
In the main loop of your program, check if is_valid() is
True, otherwise clear your contents as there is some ZK error.
Event:
You will be notified via the various handlers when content changes.
'''
self.hosts = hosts
self.my_file = filepath
self.pointer = pointer
self.ensure = ensure
self.valid_handler = valid_handler
self.config_handler = config_handler
self.error_handler = error_handler
if valid_init:
# this will throw an exception if it can't start right away
self.zoo_client = KazooClient(hosts=self.hosts)
self.zoo_client.start()
self.threaded_start(no_init=True)
def threaded_start(self, no_init=False):
'''
Spawns a worker thread to set up the zookeeper connection
'''
thread = Thread(target=self.init_connections, kwargs={
'no_init': no_init})
thread.setDaemon(True)
thread.start()
thread.join()
def init_connections(self, no_init=False):
'''
Sets up the initial Kazoo Client and watches
'''
success = False
self.set_valid(False)
if not no_init:
if self.zoo_client:
self.zoo_client.remove_listener(self.state_listener)
self.old_data = ''
self.old_pointed = ''
while not success:
try:
if self.zoo_client is None:
self.zoo_client = KazooClient(hosts=self.hosts)
self.zoo_client.start()
else:
# self.zoo_client.stop()
self.zoo_client._connection.connection_stopped.set()
self.zoo_client.close()
self.zoo_client = KazooClient(hosts=self.hosts)
self.zoo_client.start()
except Exception, e:
print "ZKWatcher Exception:", e
sleep(1)
continue
self.setup()
success = self.update_file(self.my_file)
sleep(5)
else:
self.setup()
self.update_file(self.my_file)
def setup(self):
'''
Ensures the path to the watched file exists and we have a state
listener
'''
self.zoo_client.add_listener(self.state_listener)
if self.ensure:
self.zoo_client.ensure_path(self.my_file)
def state_listener(self, state):
'''
Restarts the session if we get anything besides CONNECTED
'''
if state == KazooState.SUSPENDED:
self.set_valid(False)
self.call_error(self.BAD_CONNECTION)
elif state == KazooState.LOST and not self.do_not_restart:
self.threaded_start()
elif state == KazooState.CONNECTED:
# This is going to throw a SUSPENDED kazoo error
# which will cause the sessions to be wiped and re established.
# Used b/c of massive connection pool issues
self.zoo_client.stop()
def is_valid(self):
'''
@return: True if the currently watch file is valid
'''
return self.valid_file
def ping(self):
'''
Simple command to test if the zookeeper session is able to connect
at this very moment
'''
try:
# dummy ping to ensure we are still connected
self.zoo_client.server_version()
return True
except KazooException:
return False
def close(self, kill_restart=True):
'''
Use when you would like to close everything down
@param kill_restart= Prevent kazoo restarting from occurring
'''
self.do_not_restart = kill_restart
self.zoo_client.stop()
self.zoo_client.close()
def get_file_contents(self, pointer=False):
'''
Gets any file contents you care about. Defaults to the main file
@param pointer: The the contents of the file pointer, not the pointed
at file
@return: A string of the contents
'''
if self.pointer:
if pointer:
return self.old_pointed
else:
return self.old_data
else:
return self.old_data
def watch_file(self, event):
'''
Fired when changes made to the file
'''
if not self.update_file(self.my_file):
self.threaded_start()
def update_file(self, path):
'''
Updates the file watcher and calls the appropriate method for results
@return: False if we need to keep trying the connection
'''
try:
# grab the file
result, stat = self.zoo_client.get(path, watch=self.watch_file)
except ZookeeperError:
self.set_valid(False)
self.call_error(self.INVALID_GET)
return False
if self.pointer:
if result is not None and len(result) > 0:
self.pointed_at_expired = False
# file is a pointer, go update and watch other file
self.point_path = result
if self.compare_pointer(result):
self.update_pointed()
else:
self.pointed_at_expired = True
self.old_pointed = ''
self.old_data = ''
self.set_valid(False)
self.call_error(self.INVALID_PATH)
else:
# file is not a pointer, return contents
if self.compare_data(result):
self.call_config(result)
self.set_valid(True)
return True
def watch_pointed(self, event):
'''
Fired when changes made to pointed file
'''
self.update_pointed()
def update_pointed(self):
'''
Grabs the latest file contents based on the pointer uri
'''
# only grab file if our pointer is still good (not None)
if not self.pointed_at_expired:
try:
conf_string, stat2 = self.zoo_client.get(self.point_path,
watch=self.watch_pointed)
except ZookeeperError:
self.old_data = ''
self.set_valid(False)
self.pointed_at_expired = True
self.call_error(self.INVALID_PATH)
return
if self.compare_data(conf_string):
self.call_config(conf_string)
self.set_valid(True)
def set_valid(self, boolean):
'''
Sets the state and calls the change if needed
@param bool: The state (true or false)
'''
old_state = self.is_valid()
self.valid_file = boolean
if old_state != self.valid_file:
self.call_valid(self.valid_file)
def call_valid(self, state):
'''
Calls the valid change function passed in
@param valid_state: The new config
'''
if self.valid_handler is not None:
self.valid_handler(self.is_valid())
def call_config(self, new_config):
'''
Calls the config function passed in
@param new_config: The new config
'''
if self.config_handler is not None:
self.config_handler(new_config)
def call_error(self, message):
'''
Calls the error function passed in
@param message: The message to throw
'''
if self.error_handler is not None:
self.error_handler(message)
def compare_data(self, data):
'''
Compares the string data
@return: True if the data is different
'''
if self.old_data != data:
self.old_data = data
return True
return False
def compare_pointer(self, data):
'''
Compares the string data
@return: True if the data is different
'''
if self.old_pointed != data:
self.old_pointed = data
return True
return False
def main():
parser = argparse.ArgumentParser(
description="Zookeeper file watcher")
parser.add_argument('-z', '--zoo-keeper', action='store', required=True,
help="The Zookeeper connection <host>:<port>")
parser.add_argument('-f', '--file', action='store', required=True,
help="The full path to the file to watch in Zookeeper")
parser.add_argument('-p', '--pointer', action='store_const', const=True,
help="The file contents point to another file")
parser.add_argument('-s', '--sleep', nargs='?', const=1, default=1,
type=int, help="The time to sleep between poll checks")
parser.add_argument('-v', '--valid-init', action='store_false',
help="Do not ensure zookeeper is up upon initial setup",
default=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--poll', action='store_true', help="Polling example")
group.add_argument('--event', action='store_true',
help="Event driven example")
args = vars(parser.parse_args())
hosts = args['zoo_keeper']
file = args['file']
pointer = args['pointer']
sleep_time = args['sleep']
poll = args['poll']
event = args['event']
valid = args['valid_init']
def valid_file(state):
print "The valid state is now", state
def change_file(conf_string):
print "Your file contents:", conf_string
def error_file(message):
print "An error was thrown:", message
# You can use any or all of these, polling + handlers, some handlers, etc
if pointer:
if poll:
zoo_watcher = ZookeeperWatcher(hosts, file, pointer=True)
elif event:
zoo_watcher = ZookeeperWatcher(hosts, file,
valid_handler=valid_file,
config_handler=change_file,
error_handler=error_file,
pointer=True, valid_init=valid)
else:
if poll:
zoo_watcher = ZookeeperWatcher(hosts, file)
elif event:
zoo_watcher = ZookeeperWatcher(hosts, file,
valid_handler=valid_file,
config_handler=change_file,
error_handler=error_file,
valid_init=valid)
try:
while True:
if poll:
print "Valid File?", zoo_watcher.is_valid()
print "Contents:", zoo_watcher.get_file_contents()
sleep(sleep_time)
except:
pass
zoo_watcher.close()
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "5702139cfdd47063f70d1646bcdaa425",
"timestamp": "",
"source": "github",
"line_count": 376,
"max_line_length": 80,
"avg_line_length": 36.630319148936174,
"alnum_prop": 0.5600087126987584,
"repo_name": "quixey/scrapy-cluster",
"id": "07832e82ed217974c8b9411798fac8bcf6ca9912",
"size": "13773",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "utils/scutils/zookeeper_watcher.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "46003"
},
{
"name": "Makefile",
"bytes": "165"
},
{
"name": "Python",
"bytes": "292759"
},
{
"name": "Shell",
"bytes": "8082"
}
],
"symlink_target": ""
}
|
from flask import (
render_template,
redirect,
url_for,
request,
session,
current_app,
g)
from flask.ext.login import (
login_user,
logout_user,
current_user,
login_required)
from flask.ext.principal import (
identity_changed,
identity_loaded,
AnonymousIdentity,
Identity,
RoleNeed,
UserNeed)
from flask.ext.babel import gettext
from .. import app, admin_permission, babel
from ..forms.login_form import LoginForm
from .navigations import Navigation, Dropdown, Divider
@app.before_request
def before_request():
""" Set the current flask-login user to g.user
"""
g.user = current_user
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
''' Set the correct roles for the current user
:sender: Not used
:identity: The identity to set the user and the user roles
'''
# set the identity user object
identity.user = current_user
# Add the UserNeed to the identity
if hasattr(current_user, 'id'):
identity.provides.add(UserNeed(current_user.id))
if hasattr(current_user, 'groups'):
for group in current_user.groups:
identity.provides.add(RoleNeed(group.name))
add_parents(identity, group)
def add_parents(identity, group):
''' Add the parent groups to the Roles
:identity: The identity to add the roles to
:group: The group object to get the parents from
'''
for parent in group.parents:
identity.provides.add(RoleNeed(parent.name))
add_parents(identity, parent)
@app.route('/')
@app.route('/index')
def index():
''' The start page '''
if g.user.is_authenticated():
return render_template(
"index.html", title=gettext("App"), data=gettext("DATA"),
navigations=[
Navigation(gettext('Test'), '#test'),
Navigation(gettext('About'), '#about'),
Dropdown('[{0}]'.format(g.user.name), [
Navigation(gettext('Settings'), '/index'), Divider(),
Navigation(gettext('Logout'), '/logout')])])
else:
return render_template(
"index.html", title="App", data="DATA", navigations=[
Navigation(gettext('Test'), '#test'),
Navigation(gettext('About'), '#about'),
Navigation(gettext('Login'), '/login')])
@app.route('/admin')
@admin_permission.require(403)
def admin():
''' The admin start page '''
return render_template(
'admin.html',
text=gettext("Admin welcome to the Matrix %(name)s", name=g.user.name),
title="App [Admin]",
navigations=[Navigation(gettext('Test'), '#test'),
Navigation(gettext('About'), '#about'),
Dropdown('[{0}]'.format(g.user.name), [
Navigation(gettext('Settings'), '/index'), Divider(),
Navigation(gettext('Logout'), '/logout')])])
@app.route('/login', methods=['GET', 'POST'])
def login():
''' The Login handler for all users. '''
form = LoginForm()
if form.validate_on_submit():
login_user(form.user, remember=form.remember_me)
identity_changed.send(current_app._get_current_object(),
identity=Identity(form.user.id))
return redirect(request.args.get('next') or url_for('index'))
return render_template(
'login.html', form=form, navigations=[
Navigation(gettext('Test'), '#test'),
Navigation(gettext('About'), '#about')])
@app.route('/logout')
@login_required
def logout():
''' Logout the current user. '''
logout_user()
for key in ('identity.name', 'identity.auth_type'):
session.pop(key, None)
# Tell Flask-Prinicpal the user is anonymous
identity_changed.send(current_app._get_current_object(),
identity=AnonymousIdentity())
return redirect(url_for('index'))
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(403)
def access_denied(e):
return render_template('403.html'), 403
@babel.localeselector
def get_local():
translations = [str(tr) for tr in babel.list_translations()] + ['en']
return request.accept_languages.best_match(translations)
|
{
"content_hash": "c54aac9d9125c285ec037771d1afe364",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 79,
"avg_line_length": 30.412587412587413,
"alnum_prop": 0.6084157277535066,
"repo_name": "mweb/flask-starter",
"id": "becffd513ccf36fef0cde61aa5ad18fdc8218f48",
"size": "5720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/views/main.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PHP",
"bytes": "38"
},
{
"name": "Python",
"bytes": "49187"
}
],
"symlink_target": ""
}
|
from morsel.entities import Vector
from morsel.quad_field import Field
from morsel.pathfinder import Pathfinder
import matplotlib.pyplot as plt
import networkx as nx
def print_samples(samples):
for sample in samples:
print sample.get_json_string()
sample_field = Field(
Vector(1, 6),
Vector(6, 11.1),
Vector(11.2, 6),
Vector(6, 1))
#---How to generate stratified random samples inside a field: ---
srs = sample_field.get_stratified_random_samples(10, 1, plt=plt)
Pathfinder.tsp_path(srs)
# G = Pathfinder.get_nn_roadmap(srs, 10)
# pos = nx.spring_layout(G)
# nx.draw_networkx_nodes(G, pos, node_size=7)
# nx.draw_networkx_edges(G, pos, width=2)
# plt.show()
|
{
"content_hash": "bfddd7554bc8d50cac05eb294b1ed5e7",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 65,
"avg_line_length": 27.6,
"alnum_prop": 0.7072463768115942,
"repo_name": "NickTikhonov/morsel",
"id": "08b5152cac41165a8054ec4871c6adfe9d3552ed",
"size": "710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/sanity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "5777"
},
{
"name": "Python",
"bytes": "14949"
},
{
"name": "Shell",
"bytes": "40"
}
],
"symlink_target": ""
}
|
import os
import sys
import unittest
from test.support import run_unittest, import_module
# Skip tests if we don't have threading.
import_module('threading')
# Skip tests if we don't have concurrent.futures.
import_module('concurrent.futures')
def suite():
tests = unittest.TestSuite()
loader = unittest.TestLoader()
for fn in os.listdir(os.path.dirname(__file__)):
if fn.startswith("test") and fn.endswith(".py"):
mod_name = 'test.test_asyncio.' + fn[:-3]
try:
__import__(mod_name)
except unittest.SkipTest:
pass
else:
mod = sys.modules[mod_name]
tests.addTests(loader.loadTestsFromModule(mod))
return tests
def test_main():
run_unittest(suite())
|
{
"content_hash": "90507582aeb5cc772549b1f758ba76a7",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 63,
"avg_line_length": 27.344827586206897,
"alnum_prop": 0.6090794451450189,
"repo_name": "ArcherCraftStore/ArcherVMPeridot",
"id": "82158af77ddcb6b3f44630f4f3d88bbb57c99820",
"size": "793",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Python/Lib/test/test_asyncio/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import logging
from django.http import Http404
from rest_framework import status
from rest_framework.generics import get_object_or_404
from rest_framework.mixins import CreateModelMixin, UpdateModelMixin, DestroyModelMixin
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from apps.core.api import mixins
from apps.core.api.serializers import GeoModelSerializer
from apps.core.exceptions import AlreadyExistException, GameAlreadyStartedException, GameAlreadyFinishedException, \
GameNotStartedException
from apps.core.models import PortalUser, Location
from apps.ctf.api.serializers.common import ItemSerializer
from apps.ctf.api.serializers.games import GameSerializer, MarkerSerializer
from apps.ctf.models import Game, Item
__author__ = 'mkr'
logger = logging.getLogger("root")
class GameViewSet(mixins.ModelPermissionsMixin,
CreateModelMixin,
UpdateModelMixin,
DestroyModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
GenericViewSet):
serializer_class = GameSerializer
model = Game
def pre_save(self, obj):
user = self.request.user
setattr(obj, "owner", user)
class ItemViewSet(mixins.ModelPermissionsMixin,
CreateModelMixin,
UpdateModelMixin,
DestroyModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
GenericViewSet):
serializer_class = ItemSerializer
model = Item
class InGameLocation(APIView):
def post(self, request, pk, format=None):
user = request.user
try:
logger.debug("looking for a game with id: '%s'", pk)
game = user.joined_games.get(id=pk)
except Game.DoesNotExist, e:
logger.error(e)
raise Http404
else:
serializer = GeoModelSerializer(data=request.DATA)
if serializer.is_valid():
user = request.user
lat = serializer.object.get('lat')
lon = serializer.object.get('lon')
user.location = Location(lat, lon)
user.save()
logger.debug("location: %s", user.location)
context = {'request': request}
markers = game.get_markers(user, context)
logger.debug("markers size: %d", len(markers))
logger.debug("markers: %s", markers)
serializer = MarkerSerializer(markers, context=context, many=True)
data = serializer.data
logger.debug("data: %s type(%s)", data, type(data))
json_data = {
"markers": data,
"game": {
"red_team_points": game.red_team_points,
"blue_team_points": game.blue_team_points,
"time_to_end": game.get_time_to_end(),
"status": game.status
}
}
return Response(data=json_data, status=status.HTTP_200_OK)
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
class JoinToGame(APIView):
def post(self, request, pk):
logger.debug("joining player to selected game...")
game = get_object_or_404(Game, pk=pk)
user = request.user
try:
game.add_player(user)
except AlreadyExistException, e:
return Response(data={"error": e.message}, status=status.HTTP_400_BAD_REQUEST)
except AssertionError, e:
# todo: add error code
return Response(data={"error": e.message}, status=status.HTTP_400_BAD_REQUEST)
except Exception, e:
# todo: add error code
return Response(data={"error": e.message}, status=status.HTTP_400_BAD_REQUEST)
else:
logger.info("Player '%s' was added into the game '%s'", user.username, game.name)
return Response(status=status.HTTP_200_OK)
def delete(self, request, pk):
logger.debug("removing player from selected game...")
game = get_object_or_404(Game, pk=pk)
user = request.user
try:
game.remove_player(user)
except PortalUser.DoesNotExist, e:
return Response(data={"error": e.message}, status=status.HTTP_404_NOT_FOUND)
except AssertionError, e:
return Response(data={"error": e.message}, status=status.HTTP_400_BAD_REQUEST)
else:
logger.info("Player '%s' is no longer in game '%s'", user.username, game.name)
return Response(status=status.HTTP_200_OK)
class StartGame(APIView):
def post(self, request, pk):
logger.debug("starting selected game...")
game = get_object_or_404(Game, pk=pk)
user = request.user
try:
game.start()
except GameAlreadyStartedException:
logger.error("Game: '%s' already started", game)
return Response(data={"error": "Game already started"}, status=status.HTTP_400_BAD_REQUEST)
except GameAlreadyFinishedException:
logger.error("Game: '%s' already finished", game)
return Response(data={"error": "Game already finish"}, status=status.HTTP_400_BAD_REQUEST)
except Exception, e:
# todo: add error code
return Response(data={"error": e.message}, status=status.HTTP_400_BAD_REQUEST)
else:
logger.info("Player '%s' was started the game '%s'", user.username, game.name)
return Response(status=status.HTTP_200_OK)
class StopGame(APIView):
def post(self, request, pk):
logger.debug("stopping selected game...")
game = get_object_or_404(Game, pk=pk)
user = request.user
try:
game.stop()
except GameAlreadyFinishedException:
logger.error("Game: '%s' already finished", game)
return Response(data={"error": "Game already finished"}, status=status.HTTP_400_BAD_REQUEST)
except GameNotStartedException:
logger.error("Game: '%s' not started", game)
return Response(data={"error": "Game not started"}, status=status.HTTP_400_BAD_REQUEST)
except Exception, e:
# todo: add error code
return Response(data={"error": e.message}, status=status.HTTP_400_BAD_REQUEST)
else:
logger.info("Player '%s' was stopped the game '%s'", user.username, game.name)
return Response(status=status.HTTP_200_OK)
|
{
"content_hash": "edff325807dd640f48fadaebd5bc8d20",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 116,
"avg_line_length": 38.11363636363637,
"alnum_prop": 0.6046511627906976,
"repo_name": "blstream/CaptureTheFlag",
"id": "9bc89c1a6e56edeb2dab34f39f0695b3df71d97b",
"size": "6708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ctf-web-app/apps/ctf/api/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "232"
},
{
"name": "Python",
"bytes": "82182"
}
],
"symlink_target": ""
}
|
"""Elementary Rules of Usage.
---
layout: post
source: Strunk & White
source_url: ???
title: Elementary Principles of Composition
date: 2014-06-10 12:31:19
categories: writing
---
Strunk & White say:
1. Choose a suitable design and hold to it.
* MDPNB: Sounds like a principle of `consistency`.
2. Make the paragraph the unit of composition.
* MDPNB: This can be generalized to say something about variability in the
length of paragraphs and sentences. When any device is too often used it
becomes a mannerism.
* MDPNB: Sounds like a principle of `variation`.
3. Use the active voice.
4. Put statements in positive form.
* MDPNB: In some cases this will apply as an invective against the use of
a double negative.
* Ex: He was not very often on time. -> He usually came late.
* Ex:
4.1. Placing negative and positive in opposition makes for a stronger
structure.
* Ex. Not charity, but simple justice.
* Not that I loved Caesar less, but that I loved Rome more.
4.2. Do not use unnecessary auxiliaries or conditionals.
5. Use definite, specific, concrete language.
* A period of unfavorable weather set in. ->It rained every day for a week.
6. Omit needless words.
* `The fact that` is particularly pernicious.
* `who is, which was` and the like are often superfluous
7. Avoid a succession of loose sentences.
* MDPNB Principle of brevity. Take 2.
8. Express coordinate ideas in similar form.
* MDPNB: Principle of parallel structure.
* MDPNB: This one will be hard...
9. Keep related words together.
* MDPNB: Principle of localism in semantics.
10. In summaries, keep to one tense.
* MDPNB: Principle of temporal consistency.
11. Place the emphatic word of a sentence at the end.
* MDPNB: Principle of recency.
"""
from proselint.tools import memoize, preferred_forms_check
@memoize
def check(text):
"""Suggest the preferred forms."""
err = "strunk_white.composition"
msg = "Try '{}' instead of '{}'."
bad_forms = [
# Put statements in positive form
["dishonest", ["not honest"]],
["trifling", ["not important"]],
["forgot", ["did not remember"]],
["ignored", ["did not pay (any )?attention to"]],
["distrusted", ["did not have much confidence in"]],
# Omit needless words
["whether", ["the question as to whether"]],
["no doubt", ["there is no doubt but that"]],
["used for fuel", ["used for fuel purposes"]],
["he", ["he is a man who"]],
["hastily", ["in a hasty manner"]],
["this subject", ["this is a subject that"]],
["Her story is strange.", ["Her story is a strange one."]],
["because", ["the reason why is that"]],
["because / since", ["owing to the fact that"]],
["although / though", ["in spite of the fact that"]],
["remind you / notify you",
["call your attention to the fact that"]],
["I did not know that / I was unaware that",
["I was unaware of the fact that"]],
["his failure", ["the fact that he had not succeeded"]],
["my arrival", ["the fact that i had arrived"]]
]
return preferred_forms_check(text, bad_forms, err, msg)
|
{
"content_hash": "1689b6ff5505005857f94142e07683cf",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 79,
"avg_line_length": 41.5,
"alnum_prop": 0.6012621916236374,
"repo_name": "amperser/proselint",
"id": "9f8b744127069cb73724b452e6a97f3748cdcc2d",
"size": "3486",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "proselint/checks/misc/composition.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2558"
},
{
"name": "HTML",
"bytes": "241413"
},
{
"name": "JavaScript",
"bytes": "249832"
},
{
"name": "Procfile",
"bytes": "83"
},
{
"name": "Python",
"bytes": "333207"
},
{
"name": "Ruby",
"bytes": "364"
},
{
"name": "SCSS",
"bytes": "30668"
},
{
"name": "Shell",
"bytes": "1830"
}
],
"symlink_target": ""
}
|
from datetime import datetime
import glob
from itertools import groupby
import os
import re
import sys
import time
import traceback
# project
from checks import LaconicFilter
import modules
from util import windows_friendly_colon_split
from utils.tailfile import TailFile
def partition(s, sep):
pos = s.find(sep)
if pos == -1:
return (s, sep, '')
else:
return s[0:pos], sep, s[pos + len(sep):]
def point_sorter(p):
# Sort and group by timestamp, metric name, host_name, device_name
return (p[1], p[0], p[3].get('host_name', None), p[3].get('device_name', None))
class EventDefaults(object):
EVENT_TYPE = 'dogstream_event'
EVENT_OBJECT = 'dogstream_event:default'
class Dogstreams(object):
@classmethod
def init(cls, logger, config):
dogstreams_config = config.get('dogstreams', None)
if dogstreams_config:
dogstreams = cls._instantiate_dogstreams(logger, config, dogstreams_config)
else:
dogstreams = []
logger.info("Dogstream parsers: %s" % repr(dogstreams))
return cls(logger, dogstreams)
def __init__(self, logger, dogstreams):
self.logger = logger
self.dogstreams = dogstreams
@classmethod
def _instantiate_dogstreams(cls, logger, config, dogstreams_config):
"""
Expecting dogstreams config value to look like:
<dogstream value>, <dog stream value>, ...
Where <dogstream value> looks like:
<log path>
or
<log path>:<module>:<parser function>
"""
dogstreams = []
# Create a Dogstream object for each <dogstream value>
for config_item in dogstreams_config.split(','):
try:
config_item = config_item.strip()
parts = windows_friendly_colon_split(config_item)
if len(parts) == 2:
logger.warn("Invalid dogstream: %s" % ':'.join(parts))
continue
log_path = cls._get_dogstream_log_paths(parts[0]) if len(parts) else []
parser_spec = ':'.join(parts[1:3]) if len(parts) >= 3 else None
parser_args = parts[3:] if len(parts) >= 3 else None
for path in log_path:
dogstreams.append(Dogstream.init(
logger,
log_path=path,
parser_spec=parser_spec,
parser_args=parser_args,
config=config))
except Exception:
logger.exception("Cannot build dogstream")
return dogstreams
@classmethod
def _get_dogstream_log_paths(cls, path):
"""
Paths may include wildcard *'s and ?'s.
"""
if '*' not in path:
return [path]
return glob.glob(path)
def check(self, agentConfig, move_end=True):
if not self.dogstreams:
return {}
output = {}
for dogstream in self.dogstreams:
try:
result = dogstream.check(agentConfig, move_end)
# result may contain {"dogstream": [new]}.
# If output contains {"dogstream": [old]}, that old value will get concatenated with the new value
assert type(result) == type(output), "dogstream.check must return a dictionary"
for k in result:
if k in output:
output[k].extend(result[k])
else:
output[k] = result[k]
except Exception:
self.logger.exception("Error in parsing %s" % (dogstream.log_path))
return output
class Dogstream(object):
@classmethod
def init(cls, logger, log_path, parser_spec=None, parser_args=None, config=None):
class_based = False
parse_func = None
parse_args = tuple(parser_args or ())
if parser_spec:
try:
parse_func = modules.load(parser_spec)
if isinstance(parse_func, type):
logger.info('Instantiating class-based dogstream')
parse_func = parse_func(
user_args=parse_args or (),
logger=logger,
log_path=log_path,
config=config,
)
parse_args = ()
class_based = True
else:
logger.info('Instantiating function-based dogstream')
except Exception:
logger.exception(traceback.format_exc())
logger.error('Could not load Dogstream line parser "%s" PYTHONPATH=%s' % (
parser_spec,
os.environ.get('PYTHONPATH', ''))
)
logger.info("dogstream: parsing %s with %s (requested %s)" % (log_path, parse_func, parser_spec))
else:
logger.info("dogstream: parsing %s with default parser" % log_path)
return cls(logger, log_path, parse_func, parse_args, class_based=class_based)
def __init__(self, logger, log_path, parse_func=None, parse_args=(), class_based=False):
self.logger = logger
self.class_based = class_based
# Apply LaconicFilter to avoid log flooding
self.logger.addFilter(LaconicFilter("dogstream"))
self.log_path = log_path
self.parse_func = parse_func or self._default_line_parser
self.parse_args = parse_args
self._gen = None
self._values = None
self._freq = 15 # Will get updated on each check()
self._error_count = 0L
self._line_count = 0L
self.parser_state = {}
def check(self, agentConfig, move_end=True):
if self.log_path:
self._freq = int(agentConfig.get('check_freq', 15))
self._values = []
self._events = []
# Build our tail -f
if self._gen is None:
self._gen = TailFile(self.logger, self.log_path, self._line_parser).tail(line_by_line=False, move_end=move_end)
# read until the end of file
try:
self._gen.next()
self.logger.debug("Done dogstream check for file {0}".format(self.log_path))
self.logger.debug("Found {0} metric points".format(len(self._values)))
except StopIteration, e:
self.logger.exception(e)
self.logger.warn("Can't tail %s file" % self.log_path)
check_output = self._aggregate(self._values)
if self._events:
check_output.update({"dogstreamEvents": self._events})
self.logger.debug("Found {0} events".format(len(self._events)))
return check_output
else:
return {}
def _line_parser(self, line):
try:
# alq - Allow parser state to be kept between invocations
# This means a new argument can be passed the custom parsing function
# to store context that can be shared between parsing of lines.
# One example is a running counter, which is incremented each time
# a line is processed.
parsed = None
if self.class_based:
parsed = self.parse_func.parse_line(line)
else:
try:
parsed = self.parse_func(self.logger, line, self.parser_state, *self.parse_args)
except TypeError:
# Arity of parse_func is 3 (old-style), not 4
parsed = self.parse_func(self.logger, line)
self._line_count += 1
if parsed is None:
return
if isinstance(parsed, (tuple, dict)):
parsed = [parsed]
for datum in parsed:
# Check if it's an event
if isinstance(datum, dict):
# An event requires at least a title or a body
if 'msg_title' not in datum and 'msg_text' not in datum:
continue
# Populate the default fields
if 'event_type' not in datum:
datum['event_type'] = EventDefaults.EVENT_TYPE
if 'timestamp' not in datum:
datum['timestamp'] = time.time()
# Make sure event_object and aggregation_key (synonyms) are set
# FIXME when the backend treats those as true synonyms, we can
# deprecate event_object.
if 'event_object' in datum or 'aggregation_key' in datum:
datum['aggregation_key'] = datum.get('event_object', datum.get('aggregation_key'))
else:
datum['aggregation_key'] = EventDefaults.EVENT_OBJECT
datum['event_object'] = datum['aggregation_key']
self._events.append(datum)
continue
# Otherwise, assume it's a metric
try:
metric, ts, value, attrs = datum
except Exception:
continue
# Validation
invalid_reasons = []
try:
# Bucket points into 15 second buckets
ts = (int(float(ts)) / self._freq) * self._freq
date = datetime.fromtimestamp(ts)
assert date.year > 1990
except Exception:
invalid_reasons.append('invalid timestamp')
try:
value = float(value)
except Exception:
invalid_reasons.append('invalid metric value')
if invalid_reasons:
self.logger.debug('Invalid parsed values %s (%s): "%s"',
repr(datum), ', '.join(invalid_reasons), line)
else:
self._values.append((metric, ts, value, attrs))
except Exception:
self.logger.debug("Error while parsing line %s" % line, exc_info=True)
self._error_count += 1
self.logger.error("Parser error: %s out of %s" % (self._error_count, self._line_count))
def _default_line_parser(self, logger, line):
sep = ' '
metric, _, line = partition(line.strip(), sep)
timestamp, _, line = partition(line.strip(), sep)
value, _, line = partition(line.strip(), sep)
attributes = {}
try:
while line:
keyval, _, line = partition(line.strip(), sep)
key, val = keyval.split('=', 1)
attributes[key] = val
except Exception:
logger.debug(traceback.format_exc())
return metric, timestamp, value, attributes
def _aggregate(self, values):
""" Aggregate values down to the second and store as:
{
"dogstream": [(metric, timestamp, value, {key: val})]
}
If there are many values per second for a metric, take the median
"""
output = []
values.sort(key=point_sorter)
for (timestamp, metric, host_name, device_name), val_attrs in groupby(values, key=point_sorter):
attributes = {}
vals = []
for _metric, _timestamp, v, a in val_attrs:
try:
v = float(v)
vals.append(v)
attributes.update(a)
except Exception:
self.logger.debug("Could not convert %s into a float", v)
if len(vals) == 1:
val = vals[0]
elif len(vals) > 1:
val = vals[-1]
else: # len(vals) == 0
continue
metric_type = str(attributes.get('metric_type', '')).lower()
if metric_type == 'gauge':
val = float(val)
elif metric_type == 'counter':
val = sum(vals)
output.append((metric, timestamp, val, attributes))
if output:
return {"dogstream": output}
else:
return {}
# Allow a smooth uninstall of previous version
class RollupLP:
pass
class DdForwarder(object):
QUEUE_SIZE = "queue_size"
QUEUE_COUNT = "queue_count"
RE_QUEUE_STAT = re.compile(r"\[.*\] Queue size: at (.*), (\d+) transaction\(s\), (\d+) KB")
def __init__(self, logger, config):
self.log_path = config.get('ddforwarder_log', '/var/log/ddforwarder.log')
self.logger = logger
self._gen = None
def _init_metrics(self):
self.metrics = {}
def _add_metric(self, name, value, ts):
if name in self.metrics:
self.metrics[name].append((ts, value))
else:
self.metrics[name] = [(ts, value)]
def _parse_line(self, line):
try:
m = self.RE_QUEUE_STAT.match(line)
if m is not None:
ts, count, size = m.groups()
self._add_metric(self.QUEUE_SIZE, size, round(float(ts)))
self._add_metric(self.QUEUE_COUNT, count, round(float(ts)))
except Exception, e:
self.logger.exception(e)
def check(self, agentConfig, move_end=True):
if self.log_path and os.path.isfile(self.log_path):
#reset metric points
self._init_metrics()
# Build our tail -f
if self._gen is None:
self._gen = TailFile(self.logger, self.log_path, self._parse_line).tail(line_by_line=False,
move_end=move_end)
# read until the end of file
try:
self._gen.next()
self.logger.debug("Done ddforwarder check for file %s" % self.log_path)
except StopIteration, e:
self.logger.exception(e)
self.logger.warn("Can't tail %s file" % self.log_path)
return {'ddforwarder': self.metrics}
else:
self.logger.debug("Can't tail datadog forwarder log file: %s" % self.log_path)
return {}
def testddForwarder():
import logging
logger = logging.getLogger("ddagent.checks.datadog")
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
config = {'api_key':'my_apikey', 'ddforwarder_log': sys.argv[1]}
dd = DdForwarder(logger, config)
m = dd.check(config, move_end=False)
while True:
print m
time.sleep(5)
m = dd.check(config)
if __name__ == '__main__':
testddForwarder()
|
{
"content_hash": "bc1a4ff486db8e2d7f84fd5bba5d6f4c",
"timestamp": "",
"source": "github",
"line_count": 418,
"max_line_length": 127,
"avg_line_length": 35.303827751196174,
"alnum_prop": 0.5258521379684218,
"repo_name": "gphat/dd-agent",
"id": "ddbd7642e33704405674ac1af03c5897a5d8fb3d",
"size": "14873",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "checks/datadog.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2717"
},
{
"name": "Go",
"bytes": "2389"
},
{
"name": "HTML",
"bytes": "8758"
},
{
"name": "Nginx",
"bytes": "3908"
},
{
"name": "PowerShell",
"bytes": "2665"
},
{
"name": "Python",
"bytes": "1898335"
},
{
"name": "Ruby",
"bytes": "97701"
},
{
"name": "Shell",
"bytes": "51885"
},
{
"name": "XSLT",
"bytes": "2222"
}
],
"symlink_target": ""
}
|
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from seaserv import seafile_api
from .utils import check_time_period_valid, \
get_log_events_by_type_and_time
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.utils import api_error
from seahub.base.templatetags.seahub_tags import email2nickname
from seahub.utils.timeutils import datetime_to_isoformat_timestr
from seahub.utils import is_pro_version
class FileUpdate(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication )
permission_classes = (IsAdminUser,)
throttle_classes = (UserRateThrottle,)
def get(self, request):
if not is_pro_version():
error_msg = 'Feature disabled.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# check the date format, should be like '2015-10-10'
start = request.GET.get('start', None)
end = request.GET.get('end', None)
if not check_time_period_valid(start, end):
error_msg = 'start or end date invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
result = []
events = get_log_events_by_type_and_time('file_update', start, end)
if events:
for ev in events:
tmp_repo = seafile_api.get_repo(ev.repo_id)
tmp_repo_name = tmp_repo.name if tmp_repo else ''
result.append({
'commit_id': ev.commit_id,
'repo_id': ev.repo_id,
'repo_name': tmp_repo_name,
'time': datetime_to_isoformat_timestr(ev.timestamp),
'file_operation': ev.file_oper,
'user_name': email2nickname(ev.user),
'user_email': ev.user
})
return Response(result)
|
{
"content_hash": "76bbbdcd9a64d035e4e68b064fae1f4a",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 75,
"avg_line_length": 36.50877192982456,
"alnum_prop": 0.6444017299375301,
"repo_name": "saukrIppl/seahub",
"id": "03870c9af383c52e272817e145b54582fffcbbf5",
"size": "2081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seahub/api2/endpoints/admin/file_update.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "329387"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "722728"
},
{
"name": "Java",
"bytes": "307193"
},
{
"name": "JavaScript",
"bytes": "7293422"
},
{
"name": "Makefile",
"bytes": "1097"
},
{
"name": "PLpgSQL",
"bytes": "19598"
},
{
"name": "Python",
"bytes": "9050702"
},
{
"name": "Shell",
"bytes": "9695"
}
],
"symlink_target": ""
}
|
from .bugzilla import Bugzilla, parse_bug_id, parse_bug_id_from_changelog
# Unclear if Bug and Attachment need to be public classes.
from .bug import Bug
from .attachment import Attachment
|
{
"content_hash": "86352351cd9ee41efd2ed6b66a8a6b72",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 73,
"avg_line_length": 47.25,
"alnum_prop": 0.798941798941799,
"repo_name": "mogoweb/webkit_for_android5.1",
"id": "bde67c6d5d5627b06f3de396381e3efae6daf56d",
"size": "341",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "webkit/Tools/Scripts/webkitpy/common/net/bugzilla/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AppleScript",
"bytes": "6772"
},
{
"name": "Assembly",
"bytes": "26025"
},
{
"name": "Awk",
"bytes": "2800"
},
{
"name": "Batchfile",
"bytes": "57337"
},
{
"name": "C",
"bytes": "7713030"
},
{
"name": "C++",
"bytes": "153178707"
},
{
"name": "CMake",
"bytes": "192330"
},
{
"name": "CSS",
"bytes": "483041"
},
{
"name": "Common Lisp",
"bytes": "9920"
},
{
"name": "DIGITAL Command Language",
"bytes": "5243"
},
{
"name": "DTrace",
"bytes": "1931"
},
{
"name": "Go",
"bytes": "3744"
},
{
"name": "HTML",
"bytes": "14998422"
},
{
"name": "Java",
"bytes": "1522083"
},
{
"name": "JavaScript",
"bytes": "18008829"
},
{
"name": "Lex",
"bytes": "42554"
},
{
"name": "Lua",
"bytes": "13768"
},
{
"name": "M4",
"bytes": "49839"
},
{
"name": "Makefile",
"bytes": "476166"
},
{
"name": "Module Management System",
"bytes": "9756"
},
{
"name": "Objective-C",
"bytes": "2798053"
},
{
"name": "Objective-C++",
"bytes": "7846322"
},
{
"name": "PHP",
"bytes": "66595"
},
{
"name": "Perl",
"bytes": "1130475"
},
{
"name": "Perl 6",
"bytes": "445215"
},
{
"name": "Python",
"bytes": "5503045"
},
{
"name": "QML",
"bytes": "3331"
},
{
"name": "QMake",
"bytes": "294800"
},
{
"name": "R",
"bytes": "290"
},
{
"name": "Roff",
"bytes": "273562"
},
{
"name": "Ruby",
"bytes": "81928"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "488223"
},
{
"name": "Yacc",
"bytes": "153801"
},
{
"name": "xBase",
"bytes": "328"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic import View
from django.shortcuts import redirect
from .scripts.data_management import create_new_user, update_cache, remove_picture_cache, load_user, load_links, get_page
from models import User
# Create your views here.
def search_page(request):
list = User.objects.all()[:5]
return render(request, 'index.html', {'user': list})
class check_user(View):
def post(self, request):
query = request.POST.get('username')
if not User.objects.filter(user_name=query).exists(): # checking if this is a new user
print "New user creation"
user = create_new_user(query) # creating new user in a DB
update_cache(user) # getting list of latest pictures for the user
elif User.objects.get(user_name=query).cache_outdated(): # Checking if the user exists, but cache is older than 10 days
print "User exists, cache outdated"
user = load_user(query) # loading user
remove_picture_cache(user) # removing cached images from DB
update_cache(user) # loading new images and updating cache stamp
else: # user exists and cache is still valid
print "Using existing user"
return redirect("Instagram:result", view=0, user_name=query)
class MyView(View):
def get(self, request, view, user_name):
# <view logic>
user = load_user(user_name) # loading the user
links = load_links(user) # loading the links
context = {
'username': user.user_name,
'id': user.id,
'profile_picture': user.profile_picture,
'urls': links
}
page = get_page(view)
return render(request, page, context)
class MyViewBla(View):
def get(self, request,view,user_id):
# <view logic>
return HttpResponse('result')
# def LoadUser(request, view="0"):
#
# query = request.POST.get('username')
#
# if not User.objects.filter(user_name=query).exists(): # checking if this is a new user
# print "New user creation"
# user = create_new_user(query) # creating new user in a DB
# pictures = update_cache(user) # getting list of latest pictures for the user
#
# context = generate_package(user, pictures) # generating context dict
# elif User.objects.get(user_name=query).cache_outdated(): # Checking if the user exists, but cache is older than 10 days
# print "User exists, cache outdated"
# user = load_user(query) # loading user
# remove_picture_cache(user) # removing cached images from DB
# pictures = update_cache(user) # loading new images and updating cache stamp
#
# context = generate_package(user, pictures) # generating context dict
# else: # user exists and cache is still valid
# print "Using existing user"
# user = load_user(query) # loading the user
# links = load_links(user) # loading the links
#
# context = {
# 'username': user.user_name,
# 'id': user.id,
# 'profile_picture': user.profile_picture,
# 'urls': links
# }
#
# if view:
# page = get_page(view)
# else:
# page = 'content_0.html'
#
# return render(request, page, context)
# def picture_detail(request):
|
{
"content_hash": "ed1e39d0fae79db1f360337e71b95084",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 128,
"avg_line_length": 34.707070707070706,
"alnum_prop": 0.6239813736903376,
"repo_name": "Lord-Phoenix/Instam",
"id": "03efb2bf59210158d86e3679e1f860363c5b3de5",
"size": "3436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Instagram/views.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1959"
},
{
"name": "HTML",
"bytes": "7774"
},
{
"name": "Python",
"bytes": "14115"
}
],
"symlink_target": ""
}
|
"""Runs YCSB against Cloud Bigtable.
Cloud Bigtable (https://cloud.google.com/bigtable/) is a managed NoSQL database
with an HBase-compatible API.
Compared to hbase_ycsb, this benchmark:
* Modifies hbase-site.xml to work with Cloud Bigtable.
* Adds the Bigtable client JAR.
* Adds alpn-boot-7.0.0.v20140317.jar to the bootclasspath, required to
operate.
This benchmark requires a Cloud Bigtable cluster to be provisioned before
running.
The benchmark will fail if the specified cluster is not found.
"""
import json
import logging
import os
import pipes
import posixpath
import subprocess
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import flags
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_benchmarks import hbase_ycsb_benchmark \
as hbase_ycsb
from perfkitbenchmarker.linux_packages import hbase
from perfkitbenchmarker.linux_packages import ycsb
FLAGS = flags.FLAGS
flags.DEFINE_string('google_bigtable_endpoint', 'bigtable.googleapis.com',
'Google API endpoint for Cloud Bigtable.')
flags.DEFINE_string('google_bigtable_admin_endpoint',
'bigtabletableadmin.googleapis.com',
'Google API endpoint for Cloud Bigtable table '
'administration.')
flags.DEFINE_string('google_bigtable_zone_name', 'us-central1-b',
'Bigtable zone.')
flags.DEFINE_string('google_bigtable_cluster_name', None,
'Bigtable cluster name.')
flags.DEFINE_string(
'google_bigtable_alpn_jar_url',
'http://central.maven.org/maven2/org/mortbay/jetty/alpn/'
'alpn-boot/7.1.3.v20150130/alpn-boot-7.1.3.v20150130.jar',
'URL for the ALPN boot JAR, required for HTTP2')
flags.DEFINE_string(
'google_bigtable_hbase_jar_url',
'https://oss.sonatype.org/service/local/repositories/releases/content/'
'com/google/cloud/bigtable/bigtable-hbase-1.0/'
'0.2.1/bigtable-hbase-1.0-0.2.1.jar',
'URL for the Bigtable-HBase client JAR.')
BENCHMARK_NAME = 'cloud_bigtable_ycsb'
BENCHMARK_CONFIG = """
cloud_bigtable_ycsb:
description: >
Run YCSB against an existing Cloud Bigtable
cluster. Configure the number of client VMs via --num_vms.
vm_groups:
default:
vm_spec: *default_single_core
vm_count: null
"""
HBASE_SITE = 'cloudbigtable/hbase-site.xml.j2'
HBASE_CONF_FILES = [HBASE_SITE]
YCSB_HBASE_LIB = posixpath.join(ycsb.YCSB_DIR, 'hbase-binding', 'lib')
YCSB_HBASE_CONF = posixpath.join(ycsb.YCSB_DIR, 'hbase-binding', 'conf')
REQUIRED_SCOPES = (
'https://www.googleapis.com/auth/bigtable.admin',
'https://www.googleapis.com/auth/bigtable.data')
# TODO(connormccoy): Make table parameters configurable.
COLUMN_FAMILY = 'cf'
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
for resource in HBASE_CONF_FILES:
data.ResourcePath(resource)
hbase.CheckPrerequisites()
ycsb.CheckPrerequisites()
for scope in REQUIRED_SCOPES:
if scope not in FLAGS.gcloud_scopes:
raise ValueError('Scope {0} required.'.format(scope))
# TODO: extract from gcloud config if available.
if not FLAGS.google_bigtable_cluster_name:
raise ValueError('Missing --google_bigtable_cluster_name')
if not FLAGS.google_bigtable_zone_name:
raise ValueError('Missing --google_bigtable_zone_name')
cluster = _GetClusterDescription(FLAGS.project or _GetDefaultProject(),
FLAGS.google_bigtable_zone_name,
FLAGS.google_bigtable_cluster_name)
logging.info('Found cluster: %s', cluster)
def _GetALPNLocalPath():
bn = os.path.basename(FLAGS.google_bigtable_alpn_jar_url)
if not bn.endswith('.jar'):
bn = 'alpn.jar'
return posixpath.join(vm_util.VM_TMP_DIR, bn)
def _GetClusterDescription(project, zone, cluster_name):
"""Gets the description for a Cloud Bigtable cluster.
Args:
project: str. Name of the project in which the cluster was created.
zone: str. Zone of the project in which the cluster was created.
cluster_name: str. Cluster ID of the desired Bigtable cluster.
Returns:
A dictionary containing a cluster description.
Raises:
KeyError: when the cluster was not found.
"""
env = {'CLOUDSDK_CORE_DISABLE_PROMPTS': '1'}
env.update(os.environ)
cmd = [FLAGS.gcloud_path, 'alpha', 'bigtable', 'clusters', 'list', '--quiet',
'--format', 'json', '--project', project]
stdout, stderr, returncode = vm_util.IssueCommand(cmd, env=env)
if returncode:
raise IOError('Command "{0}" failed:\nSTDOUT:\n{1}\nSTDERR:\n{2}'.format(
' '.join(cmd), stdout, stderr))
result = json.loads(stdout)
clusters = {cluster['name']: cluster for cluster in result['clusters']}
expected_cluster_name = 'projects/{0}/zones/{1}/clusters/{2}'.format(
project, zone, cluster_name)
try:
return clusters[expected_cluster_name]
except KeyError:
raise KeyError('Cluster {0} not found in {1}'.format(
expected_cluster_name, list(clusters)))
def _GetTableName():
return 'ycsb{0}'.format(FLAGS.run_uri)
def _GetDefaultProject():
cmd = [FLAGS.gcloud_path, 'config', 'list', '--format', 'json']
stdout, stderr, return_code = vm_util.IssueCommand(cmd)
if return_code:
raise subprocess.CalledProcessError(return_code, cmd, stdout)
config = json.loads(stdout)
try:
return config['core']['project']
except KeyError:
raise KeyError('No default project found in {0}'.format(config))
def _Install(vm):
"""Install YCSB and HBase on 'vm'."""
vm.Install('hbase')
vm.Install('ycsb')
vm.Install('curl')
hbase_lib = posixpath.join(hbase.HBASE_DIR, 'lib')
for url in [FLAGS.google_bigtable_hbase_jar_url]:
jar_name = os.path.basename(url)
jar_path = posixpath.join(YCSB_HBASE_LIB, jar_name)
vm.RemoteCommand('curl -Lo {0} {1}'.format(jar_path, url))
vm.RemoteCommand('cp {0} {1}'.format(jar_path, hbase_lib))
vm.RemoteCommand('curl -Lo {0} {1}'.format(
_GetALPNLocalPath(),
FLAGS.google_bigtable_alpn_jar_url))
vm.RemoteCommand(('echo "export JAVA_HOME=/usr\n'
'export HBASE_OPTS=-Xbootclasspath/p:{0}"'
' >> {1}/hbase-env.sh').format(_GetALPNLocalPath(),
hbase.HBASE_CONF_DIR))
context = {
'google_bigtable_endpoint': FLAGS.google_bigtable_endpoint,
'google_bigtable_admin_endpoint': FLAGS.google_bigtable_admin_endpoint,
'project': FLAGS.project or _GetDefaultProject(),
'cluster': FLAGS.google_bigtable_cluster_name,
'zone': FLAGS.google_bigtable_zone_name,
}
for file_name in HBASE_CONF_FILES:
file_path = data.ResourcePath(file_name)
remote_path = posixpath.join(hbase.HBASE_CONF_DIR,
os.path.basename(file_name))
if file_name.endswith('.j2'):
vm.RenderTemplate(file_path, os.path.splitext(remote_path)[0], context)
else:
vm.RemoteCopy(file_path, remote_path)
def Prepare(benchmark_spec):
"""Prepare the virtual machines to run cloud bigtable.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
benchmark_spec.always_call_cleanup = True
vms = benchmark_spec.vms
vm_util.RunThreaded(_Install, vms)
# Create table
hbase_ycsb.CreateYCSBTable(vms[0], table_name=_GetTableName(),
use_snappy=False, limit_filesize=False)
def Run(benchmark_spec):
"""Spawn YCSB and gather the results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample instances.
"""
vms = benchmark_spec.vms
table_name = _GetTableName()
# Add hbase conf dir to the classpath, ALPN to the bootclasspath.
ycsb_memory = ycsb_memory = min(vms[0].total_memory_kb // 1024, 4096)
jvm_args = pipes.quote('-Xmx{0}m -Xbootclasspath/p:{1}'.format(
ycsb_memory, _GetALPNLocalPath()))
executor_flags = {'cp': hbase.HBASE_CONF_DIR,
'jvm-args': jvm_args,
'table': table_name}
executor = ycsb.YCSBExecutor('hbase-10', **executor_flags)
cluster_info = _GetClusterDescription(FLAGS.project or _GetDefaultProject(),
FLAGS.google_bigtable_zone_name,
FLAGS.google_bigtable_cluster_name)
metadata = {'ycsb_client_vms': len(vms),
'bigtable_nodes': cluster_info.get('serveNodes')}
# By default YCSB uses a BufferedMutator for Puts / Deletes.
# This leads to incorrect update latencies, since since the call returns
# before the request is acked by the server.
# Disable this behavior during the benchmark run.
run_kwargs = {
'columnfamily': COLUMN_FAMILY,
'clientbuffering': 'false'}
load_kwargs = run_kwargs.copy()
load_kwargs['clientbuffering'] = 'true'
samples = list(executor.LoadAndRun(vms,
load_kwargs=load_kwargs,
run_kwargs=run_kwargs))
for sample in samples:
sample.metadata.update(metadata)
return samples
def Cleanup(benchmark_spec):
"""Cleanup.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vm = benchmark_spec.vms[0]
# Delete table
command = ("""echo 'disable "{0}"; drop "{0}"; exit' | """
"""{1}/hbase shell""").format(_GetTableName(), hbase.HBASE_BIN)
vm.RemoteCommand(command, should_log=True, ignore_failure=True)
|
{
"content_hash": "2353ebb9c624abe425a6d59c42b91cb8",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 79,
"avg_line_length": 34.390243902439025,
"alnum_prop": 0.6728470111448834,
"repo_name": "mateusz-blaszkowski/PerfKitBenchmarker",
"id": "cc656fd16a028f07b2ff6a671b25dc63895ff461",
"size": "10481",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "perfkitbenchmarker/linux_benchmarks/cloud_bigtable_ycsb_benchmark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "1282006"
},
{
"name": "Shell",
"bytes": "23160"
}
],
"symlink_target": ""
}
|
"""
A multi-heart Heartbeat system using PUB and ROUTER sockets. pings are sent out on the PUB,
and hearts are tracked based on their DEALER identities.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import time
import uuid
import zmq
from zmq.devices import ThreadDevice, ThreadMonitoredQueue
from zmq.eventloop import ioloop, zmqstream
from IPython.config.configurable import LoggingConfigurable
from IPython.utils.py3compat import str_to_bytes
from IPython.utils.traitlets import Set, Instance, CFloat, Integer, Dict, Bool
from IPython.parallel.util import log_errors
class Heart(object):
"""A basic heart object for responding to a HeartMonitor.
This is a simple wrapper with defaults for the most common
Device model for responding to heartbeats.
It simply builds a threadsafe zmq.FORWARDER Device, defaulting to using
SUB/DEALER for in/out.
You can specify the DEALER's IDENTITY via the optional heart_id argument."""
device=None
id=None
def __init__(self, in_addr, out_addr, mon_addr=None, in_type=zmq.SUB, out_type=zmq.DEALER, mon_type=zmq.PUB, heart_id=None):
if mon_addr is None:
self.device = ThreadDevice(zmq.FORWARDER, in_type, out_type)
else:
self.device = ThreadMonitoredQueue(in_type, out_type, mon_type, in_prefix=b"", out_prefix=b"")
# do not allow the device to share global Context.instance,
# which is the default behavior in pyzmq > 2.1.10
self.device.context_factory = zmq.Context
self.device.daemon=True
self.device.connect_in(in_addr)
self.device.connect_out(out_addr)
if mon_addr is not None:
self.device.connect_mon(mon_addr)
if in_type == zmq.SUB:
self.device.setsockopt_in(zmq.SUBSCRIBE, b"")
if heart_id is None:
heart_id = uuid.uuid4().bytes
self.device.setsockopt_out(zmq.IDENTITY, heart_id)
self.id = heart_id
def start(self):
return self.device.start()
class HeartMonitor(LoggingConfigurable):
"""A basic HeartMonitor class
pingstream: a PUB stream
pongstream: an ROUTER stream
period: the period of the heartbeat in milliseconds"""
debug = Bool(False, config=True,
help="""Whether to include every heartbeat in debugging output.
Has to be set explicitly, because there will be *a lot* of output.
"""
)
period = Integer(3000, config=True,
help='The frequency at which the Hub pings the engines for heartbeats '
'(in ms)',
)
max_heartmonitor_misses = Integer(10, config=True,
help='Allowed consecutive missed pings from controller Hub to engine before unregistering.',
)
pingstream=Instance('zmq.eventloop.zmqstream.ZMQStream')
pongstream=Instance('zmq.eventloop.zmqstream.ZMQStream')
loop = Instance('zmq.eventloop.ioloop.IOLoop')
def _loop_default(self):
return ioloop.IOLoop.instance()
# not settable:
hearts=Set()
responses=Set()
on_probation=Dict()
last_ping=CFloat(0)
_new_handlers = Set()
_failure_handlers = Set()
lifetime = CFloat(0)
tic = CFloat(0)
def __init__(self, **kwargs):
super(HeartMonitor, self).__init__(**kwargs)
self.pongstream.on_recv(self.handle_pong)
def start(self):
self.tic = time.time()
self.caller = ioloop.PeriodicCallback(self.beat, self.period, self.loop)
self.caller.start()
def add_new_heart_handler(self, handler):
"""add a new handler for new hearts"""
self.log.debug("heartbeat::new_heart_handler: %s", handler)
self._new_handlers.add(handler)
def add_heart_failure_handler(self, handler):
"""add a new handler for heart failure"""
self.log.debug("heartbeat::new heart failure handler: %s", handler)
self._failure_handlers.add(handler)
def beat(self):
self.pongstream.flush()
self.last_ping = self.lifetime
toc = time.time()
self.lifetime += toc-self.tic
self.tic = toc
if self.debug:
self.log.debug("heartbeat::sending %s", self.lifetime)
goodhearts = self.hearts.intersection(self.responses)
missed_beats = self.hearts.difference(goodhearts)
newhearts = self.responses.difference(goodhearts)
for heart in newhearts:
self.handle_new_heart(heart)
heartfailures, on_probation = self._check_missed(missed_beats, self.on_probation,
self.hearts)
for failure in heartfailures:
self.handle_heart_failure(failure)
self.on_probation = on_probation
self.responses = set()
#print self.on_probation, self.hearts
# self.log.debug("heartbeat::beat %.3f, %i beating hearts", self.lifetime, len(self.hearts))
self.pingstream.send(str_to_bytes(str(self.lifetime)))
# flush stream to force immediate socket send
self.pingstream.flush()
def _check_missed(self, missed_beats, on_probation, hearts):
"""Update heartbeats on probation, identifying any that have too many misses.
"""
failures = []
new_probation = {}
for cur_heart in (b for b in missed_beats if b in hearts):
miss_count = on_probation.get(cur_heart, 0) + 1
self.log.info("heartbeat::missed %s : %s" % (cur_heart, miss_count))
if miss_count > self.max_heartmonitor_misses:
failures.append(cur_heart)
else:
new_probation[cur_heart] = miss_count
return failures, new_probation
def handle_new_heart(self, heart):
if self._new_handlers:
for handler in self._new_handlers:
handler(heart)
else:
self.log.info("heartbeat::yay, got new heart %s!", heart)
self.hearts.add(heart)
def handle_heart_failure(self, heart):
if self._failure_handlers:
for handler in self._failure_handlers:
try:
handler(heart)
except Exception as e:
self.log.error("heartbeat::Bad Handler! %s", handler, exc_info=True)
pass
else:
self.log.info("heartbeat::Heart %s failed :(", heart)
self.hearts.remove(heart)
@log_errors
def handle_pong(self, msg):
"a heart just beat"
current = str_to_bytes(str(self.lifetime))
last = str_to_bytes(str(self.last_ping))
if msg[1] == current:
delta = time.time()-self.tic
if self.debug:
self.log.debug("heartbeat::heart %r took %.2f ms to respond", msg[0], 1000*delta)
self.responses.add(msg[0])
elif msg[1] == last:
delta = time.time()-self.tic + (self.lifetime-self.last_ping)
self.log.warn("heartbeat::heart %r missed a beat, and took %.2f ms to respond", msg[0], 1000*delta)
self.responses.add(msg[0])
else:
self.log.warn("heartbeat::got bad heartbeat (possibly old?): %s (current=%.3f)", msg[1], self.lifetime)
|
{
"content_hash": "b7c7ba49a28906257fbae0cc590a89cf",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 128,
"avg_line_length": 37.96875,
"alnum_prop": 0.6262002743484225,
"repo_name": "wolfram74/numerical_methods_iserles_notes",
"id": "3692e5ee6049002e40b6bb89bf174c91e01fbedb",
"size": "7312",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/IPython/parallel/controller/heartmonitor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "282435"
},
{
"name": "C++",
"bytes": "59801"
},
{
"name": "CSS",
"bytes": "2038"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "Groff",
"bytes": "6753"
},
{
"name": "HTML",
"bytes": "37522"
},
{
"name": "JavaScript",
"bytes": "1368241"
},
{
"name": "Python",
"bytes": "31296026"
},
{
"name": "Shell",
"bytes": "3869"
},
{
"name": "Smarty",
"bytes": "21425"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
from __future__ import generators
if __name__ == '__main__':
import sys
import os
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests.test_utils import test_not_implemented, unittest
else:
from test.test_utils import test_not_implemented, unittest
import pygame
from pygame.compat import long_
import math
#################################### Tests #####################################
class CameraModuleTest(unittest.TestCase):
pass
|
{
"content_hash": "91ad08e614bb3132fbfec61b87ee31fd",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 80,
"avg_line_length": 29.22222222222222,
"alnum_prop": 0.5944233206590621,
"repo_name": "sumpfgottheit/pdu1800_data_provider",
"id": "31d3ea2e2d4c6471d54ced79b72725a9ed51481f",
"size": "871",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "pygame32/pygame/tests/camera_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "642350"
},
{
"name": "Java",
"bytes": "11586"
},
{
"name": "Python",
"bytes": "3081455"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from google.cloud.speech_v1.gapic import speech_client
from google.cloud.speech_v1.gapic import enums
from google.cloud.speech_v1.helpers import SpeechHelpers
from google.cloud.speech_v1 import types
class SpeechClient(SpeechHelpers, speech_client.SpeechClient):
__doc__ = speech_client.SpeechClient.__doc__
enums = enums
types = types
__all__ = (
'enums',
'SpeechClient',
'types',
)
|
{
"content_hash": "be5c82ef739ebb80f1272d15dfbd2f48",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 62,
"avg_line_length": 22.65,
"alnum_prop": 0.7218543046357616,
"repo_name": "tseaver/gcloud-python",
"id": "272b623c510dd2551db967acbda0b077fe4236da",
"size": "1028",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "speech/google/cloud/speech_v1/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "93642"
},
{
"name": "Python",
"bytes": "2874989"
},
{
"name": "Shell",
"bytes": "4436"
}
],
"symlink_target": ""
}
|
"""Instance Metadata information."""
import base64
import json
import os
import posixpath
from oslo.config import cfg
from nova.api.ec2 import ec2utils
from nova.api.metadata import password
from nova import block_device
from nova.compute import flavors
from nova import conductor
from nova import context
from nova import network
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import instance as instance_obj
from nova.objects import security_group as secgroup_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
from nova.virt import netutils
metadata_opts = [
cfg.StrOpt('config_drive_skip_versions',
default=('1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 '
'2007-12-15 2008-02-01 2008-09-01'),
help=('List of metadata versions to skip placing into the '
'config drive')),
cfg.StrOpt('vendordata_driver',
default='nova.api.metadata.vendordata_json.JsonFileVendorData',
help='Driver to use for vendor data'),
]
CONF = cfg.CONF
CONF.register_opts(metadata_opts)
CONF.import_opt('dhcp_domain', 'nova.network.manager')
VERSIONS = [
'1.0',
'2007-01-19',
'2007-03-01',
'2007-08-29',
'2007-10-10',
'2007-12-15',
'2008-02-01',
'2008-09-01',
'2009-04-04',
]
FOLSOM = '2012-08-10'
GRIZZLY = '2013-04-04'
HAVANA = '2013-10-17'
OPENSTACK_VERSIONS = [
FOLSOM,
GRIZZLY,
HAVANA,
]
VERSION = "version"
CONTENT = "content"
CONTENT_DIR = "content"
MD_JSON_NAME = "meta_data.json"
VD_JSON_NAME = "vendor_data.json"
UD_NAME = "user_data"
PASS_NAME = "password"
LOG = logging.getLogger(__name__)
class InvalidMetadataVersion(Exception):
pass
class InvalidMetadataPath(Exception):
pass
class InstanceMetadata():
"""Instance metadata."""
def __init__(self, instance, address=None, content=None, extra_md=None,
conductor_api=None, network_info=None, vd_driver=None):
"""Creation of this object should basically cover all time consuming
collection. Methods after that should not cause time delays due to
network operations or lengthy cpu operations.
The user should then get a single instance and make multiple method
calls on it.
"""
if not content:
content = []
ctxt = context.get_admin_context()
# NOTE(danms): This should be removed after bp:compute-manager-objects
if not isinstance(instance, instance_obj.Instance):
instance = instance_obj.Instance._from_db_object(
ctxt, instance_obj.Instance(), instance,
expected_attrs=['metadata', 'system_metadata'])
self.instance = instance
self.extra_md = extra_md
if conductor_api:
capi = conductor_api
else:
capi = conductor.API()
self.availability_zone = ec2utils.get_availability_zone_by_host(
instance['host'], capi)
self.security_groups = secgroup_obj.SecurityGroupList.get_by_instance(
ctxt, instance)
self.mappings = _format_instance_mapping(ctxt, instance)
if instance.get('user_data', None) is not None:
self.userdata_raw = base64.b64decode(instance['user_data'])
else:
self.userdata_raw = None
self.ec2_ids = capi.get_ec2_ids(ctxt,
obj_base.obj_to_primitive(instance))
self.address = address
# expose instance metadata.
self.launch_metadata = utils.instance_meta(instance)
self.password = password.extract_password(instance)
self.uuid = instance.get('uuid')
self.content = {}
self.files = []
# get network info, and the rendered network template
if network_info is None:
network_info = network.API().get_instance_nw_info(ctxt,
instance)
self.ip_info = \
ec2utils.get_ip_info_for_instance_from_nw_info(network_info)
self.network_config = None
cfg = netutils.get_injected_network_template(network_info)
if cfg:
key = "%04i" % len(self.content)
self.content[key] = cfg
self.network_config = {"name": "network_config",
'content_path': "/%s/%s" % (CONTENT_DIR, key)}
# 'content' is passed in from the configdrive code in
# nova/virt/libvirt/driver.py. Thats how we get the injected files
# (personalities) in. AFAIK they're not stored in the db at all,
# so are not available later (web service metadata time).
for (path, contents) in content:
key = "%04i" % len(self.content)
self.files.append({'path': path,
'content_path': "/%s/%s" % (CONTENT_DIR, key)})
self.content[key] = contents
if vd_driver is None:
vdclass = importutils.import_class(CONF.vendordata_driver)
else:
vdclass = vd_driver
self.vddriver = vdclass(instance=instance, address=address,
extra_md=extra_md, network_info=network_info)
self.route_configuration = None
def _route_configuration(self):
if self.route_configuration:
return self.route_configuration
path_handlers = {UD_NAME: self._user_data,
PASS_NAME: self._password,
VD_JSON_NAME: self._vendor_data,
MD_JSON_NAME: self._metadata_as_json,
VERSION: self._handle_version,
CONTENT: self._handle_content}
self.route_configuration = RouteConfiguration(path_handlers)
return self.route_configuration
def get_ec2_metadata(self, version):
if version == "latest":
version = VERSIONS[-1]
if version not in VERSIONS:
raise InvalidMetadataVersion(version)
hostname = self._get_hostname()
floating_ips = self.ip_info['floating_ips']
floating_ip = floating_ips and floating_ips[0] or ''
fixed_ips = self.ip_info['fixed_ips']
fixed_ip = fixed_ips and fixed_ips[0] or ''
fmt_sgroups = [x['name'] for x in self.security_groups]
meta_data = {
'ami-id': self.ec2_ids['ami-id'],
'ami-launch-index': self.instance['launch_index'],
'ami-manifest-path': 'FIXME',
'instance-id': self.ec2_ids['instance-id'],
'hostname': hostname,
'local-ipv4': self.address or fixed_ip,
'reservation-id': self.instance['reservation_id'],
'security-groups': fmt_sgroups}
# public keys are strangely rendered in ec2 metadata service
# meta-data/public-keys/ returns '0=keyname' (with no trailing /)
# and only if there is a public key given.
# '0=keyname' means there is a normally rendered dict at
# meta-data/public-keys/0
#
# meta-data/public-keys/ : '0=%s' % keyname
# meta-data/public-keys/0/ : 'openssh-key'
# meta-data/public-keys/0/openssh-key : '%s' % publickey
if self.instance['key_name']:
meta_data['public-keys'] = {
'0': {'_name': "0=" + self.instance['key_name'],
'openssh-key': self.instance['key_data']}}
if self._check_version('2007-01-19', version):
meta_data['local-hostname'] = hostname
meta_data['public-hostname'] = hostname
meta_data['public-ipv4'] = floating_ip
if False and self._check_version('2007-03-01', version):
# TODO(vish): store product codes
meta_data['product-codes'] = []
if self._check_version('2007-08-29', version):
instance_type = flavors.extract_flavor(self.instance)
meta_data['instance-type'] = instance_type['name']
if False and self._check_version('2007-10-10', version):
# TODO(vish): store ancestor ids
meta_data['ancestor-ami-ids'] = []
if self._check_version('2007-12-15', version):
meta_data['block-device-mapping'] = self.mappings
if 'kernel-id' in self.ec2_ids:
meta_data['kernel-id'] = self.ec2_ids['kernel-id']
if 'ramdisk-id' in self.ec2_ids:
meta_data['ramdisk-id'] = self.ec2_ids['ramdisk-id']
if self._check_version('2008-02-01', version):
meta_data['placement'] = {'availability-zone':
self.availability_zone}
if self._check_version('2008-09-01', version):
meta_data['instance-action'] = 'none'
data = {'meta-data': meta_data}
if self.userdata_raw is not None:
data['user-data'] = self.userdata_raw
return data
def get_ec2_item(self, path_tokens):
# get_ec2_metadata returns dict without top level version
data = self.get_ec2_metadata(path_tokens[0])
return find_path_in_tree(data, path_tokens[1:])
def get_openstack_item(self, path_tokens):
if path_tokens[0] == CONTENT_DIR:
return self._handle_content(path_tokens)
return self._route_configuration().handle_path(path_tokens)
def _metadata_as_json(self, version, path):
metadata = {'uuid': self.uuid}
if self.launch_metadata:
metadata['meta'] = self.launch_metadata
if self.files:
metadata['files'] = self.files
if self.extra_md:
metadata.update(self.extra_md)
if self.network_config:
metadata['network_config'] = self.network_config
if self.instance['key_name']:
metadata['public_keys'] = {
self.instance['key_name']: self.instance['key_data']
}
metadata['hostname'] = self._get_hostname()
metadata['name'] = self.instance['display_name']
metadata['launch_index'] = self.instance['launch_index']
metadata['availability_zone'] = self.availability_zone
if self._check_os_version(GRIZZLY, version):
metadata['random_seed'] = base64.b64encode(os.urandom(512))
return json.dumps(metadata)
def _handle_content(self, path_tokens):
if len(path_tokens) == 1:
raise KeyError("no listing for %s" % "/".join(path_tokens))
if len(path_tokens) != 2:
raise KeyError("Too many tokens for /%s" % CONTENT_DIR)
return self.content[path_tokens[1]]
def _handle_version(self, version, path):
# request for /version, give a list of what is available
ret = [MD_JSON_NAME]
if self.userdata_raw is not None:
ret.append(UD_NAME)
if self._check_os_version(GRIZZLY, version):
ret.append(PASS_NAME)
if self._check_os_version(HAVANA, version):
ret.append(VD_JSON_NAME)
return ret
def _user_data(self, version, path):
if self.userdata_raw is None:
raise KeyError(path)
return self.userdata_raw
def _password(self, version, path):
if self._check_os_version(GRIZZLY, version):
return password.handle_password
raise KeyError(path)
def _vendor_data(self, version, path):
if self._check_os_version(HAVANA, version):
return json.dumps(self.vddriver.get())
raise KeyError(path)
def _check_version(self, required, requested, versions=VERSIONS):
return versions.index(requested) >= versions.index(required)
def _check_os_version(self, required, requested):
return self._check_version(required, requested, OPENSTACK_VERSIONS)
def _get_hostname(self):
return "%s%s%s" % (self.instance['hostname'],
'.' if CONF.dhcp_domain else '',
CONF.dhcp_domain)
def lookup(self, path):
if path == "" or path[0] != "/":
path = posixpath.normpath("/" + path)
else:
path = posixpath.normpath(path)
# fix up requests, prepending /ec2 to anything that does not match
path_tokens = path.split('/')[1:]
if path_tokens[0] not in ("ec2", "openstack"):
if path_tokens[0] == "":
# request for /
path_tokens = ["ec2"]
else:
path_tokens = ["ec2"] + path_tokens
path = "/" + "/".join(path_tokens)
# all values of 'path' input starts with '/' and have no trailing /
# specifically handle the top level request
if len(path_tokens) == 1:
if path_tokens[0] == "openstack":
# NOTE(vish): don't show versions that are in the future
today = timeutils.utcnow().strftime("%Y-%m-%d")
versions = [v for v in OPENSTACK_VERSIONS if v <= today]
if OPENSTACK_VERSIONS != versions:
LOG.debug(_("future versions %s hidden in version list"),
[v for v in OPENSTACK_VERSIONS
if v not in versions])
versions += ["latest"]
else:
versions = VERSIONS + ["latest"]
return versions
try:
if path_tokens[0] == "openstack":
data = self.get_openstack_item(path_tokens[1:])
else:
data = self.get_ec2_item(path_tokens[1:])
except (InvalidMetadataVersion, KeyError):
raise InvalidMetadataPath(path)
return data
def metadata_for_config_drive(self):
"""Yields (path, value) tuples for metadata elements."""
# EC2 style metadata
for version in VERSIONS + ["latest"]:
if version in CONF.config_drive_skip_versions.split(' '):
continue
data = self.get_ec2_metadata(version)
if 'user-data' in data:
filepath = os.path.join('ec2', version, 'user-data')
yield (filepath, data['user-data'])
del data['user-data']
try:
del data['public-keys']['0']['_name']
except KeyError:
pass
filepath = os.path.join('ec2', version, 'meta-data.json')
yield (filepath, json.dumps(data['meta-data']))
ALL_OPENSTACK_VERSIONS = OPENSTACK_VERSIONS + ["latest"]
for version in ALL_OPENSTACK_VERSIONS:
path = 'openstack/%s/%s' % (version, MD_JSON_NAME)
yield (path, self.lookup(path))
path = 'openstack/%s/%s' % (version, UD_NAME)
if self.userdata_raw is not None:
yield (path, self.lookup(path))
if self._check_version(HAVANA, version, ALL_OPENSTACK_VERSIONS):
path = 'openstack/%s/%s' % (version, VD_JSON_NAME)
yield (path, self.lookup(path))
for (cid, content) in self.content.iteritems():
yield ('%s/%s/%s' % ("openstack", CONTENT_DIR, cid), content)
class RouteConfiguration(object):
"""Routes metadata paths to request handlers."""
def __init__(self, path_handler):
self.path_handlers = path_handler
def _version(self, version):
if version == "latest":
version = OPENSTACK_VERSIONS[-1]
if version not in OPENSTACK_VERSIONS:
raise InvalidMetadataVersion(version)
return version
def handle_path(self, path_tokens):
version = self._version(path_tokens[0])
if len(path_tokens) == 1:
path = VERSION
else:
path = '/'.join(path_tokens[1:])
path_handler = self.path_handlers[path]
if path_handler is None:
raise KeyError(path)
return path_handler(version, path)
class VendorDataDriver(object):
"""The base VendorData Drivers should inherit from."""
def __init__(self, *args, **kwargs):
"""Init method should do all expensive operations."""
self._data = {}
def get(self):
"""Return a dictionary of primitives to be rendered in metadata
:return: A dictionary or primitives.
"""
return self._data
def get_metadata_by_address(conductor_api, address):
ctxt = context.get_admin_context()
fixed_ip = network.API().get_fixed_ip_by_address(ctxt, address)
return get_metadata_by_instance_id(conductor_api,
fixed_ip['instance_uuid'],
address,
ctxt)
def get_metadata_by_instance_id(conductor_api, instance_id, address,
ctxt=None):
ctxt = ctxt or context.get_admin_context()
instance = instance_obj.Instance.get_by_uuid(ctxt, instance_id)
return InstanceMetadata(instance, address)
def _format_instance_mapping(ctxt, instance):
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, instance.uuid)
return block_device.instance_block_mapping(instance, bdms)
def ec2_md_print(data):
if isinstance(data, dict):
output = ''
for key in sorted(data.keys()):
if key == '_name':
continue
if isinstance(data[key], dict):
if '_name' in data[key]:
output += str(data[key]['_name'])
else:
output += key + '/'
else:
output += key
output += '\n'
return output[:-1]
elif isinstance(data, list):
return '\n'.join(data)
else:
return str(data)
def find_path_in_tree(data, path_tokens):
# given a dict/list tree, and a path in that tree, return data found there.
for i in range(0, len(path_tokens)):
if isinstance(data, dict) or isinstance(data, list):
if path_tokens[i] in data:
data = data[path_tokens[i]]
else:
raise KeyError("/".join(path_tokens[0:i]))
else:
if i != len(path_tokens) - 1:
raise KeyError("/".join(path_tokens[0:i]))
data = data[path_tokens[i]]
return data
|
{
"content_hash": "45f1a6ebf41e392cc1b4507b608b5c85",
"timestamp": "",
"source": "github",
"line_count": 534,
"max_line_length": 79,
"avg_line_length": 34.734082397003746,
"alnum_prop": 0.5738084968729782,
"repo_name": "spring-week-topos/nova-week",
"id": "90e8c27a47e188e6120da73e5c84dc73d0aa1afd",
"size": "19280",
"binary": false,
"copies": "3",
"ref": "refs/heads/spring-week",
"path": "nova/api/metadata/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13843251"
},
{
"name": "Shell",
"bytes": "17451"
}
],
"symlink_target": ""
}
|
from jupyter_core.paths import jupyter_config_dir, jupyter_data_dir
import os
import sys
import glob
import shutil
data_dir = jupyter_data_dir()
dest_templates = os.path.join(data_dir, 'templates')
src_templates = os.path.join(data_dir, 'nbextensions', 'templates')
dest_extensions = os.path.join(data_dir, 'extensions')
src_extensions = os.path.join(data_dir, 'nbextensions','extensions')
# make sure destinations exist
if os.path.exists(dest_templates) is False:
os.mkdir(dest_templates)
if os.path.exists(dest_extensions) is False:
os.mkdir(dest_extensions)
# Finally copy templates and pre/postprocessor
for filename in glob.glob(os.path.join(src_templates,'*')):
shutil.copy2(filename,dest_templates)
for filename in glob.glob(os.path.join(src_extensions,'*')):
shutil.copy2(filename,dest_extensions)
|
{
"content_hash": "112b399bdca02e7a75701bd0b88ec1a6",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 68,
"avg_line_length": 32.84615384615385,
"alnum_prop": 0.7306791569086651,
"repo_name": "benvarkey/IPython-notebook-extensions",
"id": "dcca9c9e04e19e345253013b91ff0aae54e8f61c",
"size": "854",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nbextensions/usability/highlighter/mv_paths.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "38375"
},
{
"name": "HTML",
"bytes": "1134905"
},
{
"name": "JavaScript",
"bytes": "516473"
},
{
"name": "Jupyter Notebook",
"bytes": "296013"
},
{
"name": "Python",
"bytes": "65298"
},
{
"name": "Shell",
"bytes": "3154"
},
{
"name": "Smarty",
"bytes": "8096"
},
{
"name": "TeX",
"bytes": "104015"
}
],
"symlink_target": ""
}
|
from typing import List, NamedTuple
from marshmallow import Schema, fields
from airflow.api_connexion.schemas.common_schema import (
ClassReferenceSchema,
ColorField,
TimeDeltaSchema,
WeightRuleField,
)
from airflow.api_connexion.schemas.dag_schema import DAGSchema
from airflow.models.baseoperator import BaseOperator
class TaskSchema(Schema):
"""Task schema"""
class_ref = fields.Method("_get_class_reference", dump_only=True)
task_id = fields.String(dump_only=True)
owner = fields.String(dump_only=True)
start_date = fields.DateTime(dump_only=True)
end_date = fields.DateTime(dump_only=True)
trigger_rule = fields.String(dump_only=True)
extra_links = fields.List(
fields.Nested(ClassReferenceSchema), dump_only=True, attribute="operator_extra_links"
)
depends_on_past = fields.Boolean(dump_only=True)
wait_for_downstream = fields.Boolean(dump_only=True)
retries = fields.Number(dump_only=True)
queue = fields.String(dump_only=True)
pool = fields.String(dump_only=True)
pool_slots = fields.Number(dump_only=True)
execution_timeout = fields.Nested(TimeDeltaSchema, dump_only=True)
retry_delay = fields.Nested(TimeDeltaSchema, dump_only=True)
retry_exponential_backoff = fields.Boolean(dump_only=True)
priority_weight = fields.Number(dump_only=True)
weight_rule = WeightRuleField(dump_only=True)
ui_color = ColorField(dump_only=True)
ui_fgcolor = ColorField(dump_only=True)
template_fields = fields.List(fields.String(), dump_only=True)
sub_dag = fields.Nested(DAGSchema, dump_only=True)
downstream_task_ids = fields.List(fields.String(), dump_only=True)
def _get_class_reference(self, obj):
result = ClassReferenceSchema().dump(obj)
return result.data if hasattr(result, "data") else result
class TaskCollection(NamedTuple):
"""List of Tasks with metadata"""
tasks: List[BaseOperator]
total_entries: int
class TaskCollectionSchema(Schema):
"""Schema for TaskCollection"""
tasks = fields.List(fields.Nested(TaskSchema))
total_entries = fields.Int()
task_schema = TaskSchema()
task_collection_schema = TaskCollectionSchema()
|
{
"content_hash": "04cedb03a65c7636828f6b459b559015",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 93,
"avg_line_length": 34.46875,
"alnum_prop": 0.7234814143245694,
"repo_name": "airbnb/airflow",
"id": "d87123fb5d8d4e1c6d7b31ff7c9d2df1e0278148",
"size": "2992",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "airflow/api_connexion/schemas/task_schema.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36374"
},
{
"name": "HTML",
"bytes": "99535"
},
{
"name": "JavaScript",
"bytes": "891618"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "796220"
},
{
"name": "Shell",
"bytes": "9040"
}
],
"symlink_target": ""
}
|
from .auth import basic_auth_opener
from .site import SharePointSite
class ExitCodes(object):
MISSING_ACTION = 1
NO_SUCH_ARGUMENT = 2
NO_SUCH_LIST = 3
MISSING_ARGUMENT = 4
MISSING_CREDENTIALS = 5
INVALID_CREDENTIALS = 6
NO_SUCH_ACTION = 7
def main():
from optparse import OptionParser, OptionGroup
import os
import sys
import warnings
from lxml import etree
warnings.simplefilter("ignore")
description = ["A utility to extract data from SharePoint sites, returning ",
"XML. Available actions are 'lists' (returns a list of ",
"lists in the SharePoint site), and 'exportlists' (returns ",
"data for all or specified lists"]
parser = OptionParser(usage='%prog action [options]',
description=''.join(description))
parser.add_option('-s', '--site-url', dest='site_url', help='Root URL for the SharePoint site')
parser.add_option('-u', '--username', dest='username', help='Username')
parser.add_option('-p', '--password', dest='password', help='Password')
parser.add_option('-c', '--credentials', dest='credentials', help="File containing 'username:password'.")
parser.add_option('-n', '--pretty-print', dest='pretty_print', action='store_true', default=True)
parser.add_option('-N', '--no-pretty-print', dest='pretty_print', action='store_false')
list_options = OptionGroup(parser, 'List options')
list_options.add_option('-l', '--list-name', dest='list_names', action='append',
help='Name of a list to retrieve. Can be repeated to return multiple lists. If not present '
'at all, all lists will be returned.')
list_options.add_option('-d', '--data', dest='include_data', action='store_true', default=True,
help="Include list data in output (default for exportlists)")
list_options.add_option('-D', '--no-data', dest='include_data', action='store_false',
help="Don't include list data in output")
list_options.add_option('-f', '--fields', dest='include_field_definitions', action='store_true', default=True,
help="Include field definitions data in output (default for exportlists)")
list_options.add_option('-F', '--no-fields', dest='include_field_definitions', action='store_false',
help="Don't include field definitions data in output")
list_options.add_option('-t', '--transclude-xml', dest='transclude_xml', action='store_true', default=False,
help="Transclude linked XML files into row data")
list_options.add_option('-T', '--no-transclude-xml', dest='transclude_xml', action='store_false',
help="Don't transclude XML (default)")
list_options.add_option('--include-users', dest='include_users', action='store_true', default=False,
help="Include data about referenced users")
list_options.add_option('--no-include-users', dest='include_users', action='store_false',
help="Don't include data about users (default)")
list_options.add_option('--description', dest='description', default='',
help='Description when creating lists')
list_options.add_option('--template', dest='template', default='100',
help='List template name')
list_options.add_option('--timeout', dest='timeout', default=None, type="float",
help='Connection timeout (in seconds)')
parser.add_option_group(list_options)
options, args = parser.parse_args()
if not options.site_url:
sys.stderr.write("--site-url is a required parameter. Use -h for more information.\n")
sys.exit(ExitCodes.MISSING_ARGUMENT)
if options.credentials:
username, password = open(os.path.expanduser(options.credentials)).read().strip().split(':', 1)
else:
username, password = options.username, options.password
if not username:
username = raw_input("Username: ")
if not password:
from getpass import getpass
password = getpass()
opener = basic_auth_opener(options.site_url, username, password)
site = SharePointSite(options.site_url, opener, timeout=options.timeout)
if not len(args) == 1:
sys.stderr.write("You must provide an action. Use -h for more information.\n")
sys.exit(ExitCodes.NO_SUCH_ACTION)
action, xml = args[0], None
if action == 'lists':
xml = site.as_xml(include_lists=True,
list_names=options.list_names or None,
include_list_data=False,
include_field_definitions=False)
elif action == 'exportlists':
xml = site.as_xml(include_lists=True,
include_users=options.include_users,
list_names=options.list_names or None,
include_list_data=options.include_data,
include_field_definitions=options.include_field_definitions,
transclude_xml=options.transclude_xml)
elif action == 'deletelists':
for list_name in options.list_names:
try:
site.lists.remove(site.lists[list_name])
except KeyError:
sys.stderr.write("No such list: '{0}'\n".format(list_name))
sys.exit(ExitCodes.NO_SUCH_LIST)
if not options.list_names:
sys.stderr.write("You must specify a list. See -h for more information.\n")
sys.exit(ExitCodes.MISSING_ARGUMENT)
sys.exit(0)
elif action == 'addlists':
for list_name in options.list_names:
try:
site.lists.create(list_name, options.description, options.template)
except KeyError:
sys.stderr.write("No such list: '{0}'\n".format(list_name))
sys.exit(ExitCodes.NO_SUCH_LIST)
if not options.list_names:
sys.stderr.write("You must specify a list. See -h for more information.\n")
sys.exit(ExitCodes.MISSING_ARGUMENT)
xml = site.as_xml(list_names=options.list_names or None,
include_field_definitions=options.include_field_definitions)
elif action == 'shell':
try:
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed()
ipshell()
except ImportError:
import code
import readline
readline.parse_and_bind("tab: complete")
shell = code.InteractiveConsole({'site': site})
shell.interact()
else:
sys.stderr.write("Unsupported action: '%s'. Use -h to discover supported actions.\n")
sys.exit(1)
if xml is not None:
sys.stdout.write(etree.tostring(xml, pretty_print=options.pretty_print))
if __name__ == '__main__':
main()
|
{
"content_hash": "420e5a5f5343928f04269be8d5466bb9",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 120,
"avg_line_length": 47.91216216216216,
"alnum_prop": 0.5975179805387111,
"repo_name": "ox-it/python-sharepoint",
"id": "4c50b3b8d13c6fec0976388b0aefcb0abe256e05",
"size": "7091",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sharepoint/cmd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "56588"
}
],
"symlink_target": ""
}
|
'''
Author: alexc89@mit.edu
IRB140LCMWrapper
This is a script connecting OPEN ABB Driver with LCM. It publishes IRB140's joint position in IRB140Pos LCM Channel and listens IRB140Input to control IRB140's joint.
Please note that the unit of the joint state and command is dependent on the setting on the IRB140, this script DOES NOT translate the command into a specific unit like radian.
'''
import lcm
import time
import abb
from ctypes import *
import threading
#Import LCM Messages
from abblcm import *
#Message Conversion
def convertABBstate(joint_pos,joint_vel,cartesian):
msg = abb_irb140state()
msg.utime= time.time()*1000000
msg.joints = abb_irb140joints()
msg.cartesian = abb_irb140cartesian()
msg.joints.utime = msg.utime
msg.cartesian.utime= msg.utime
msg.joints.pos = joint_pos
msg.joints.vel = joint_vel
msg.cartesian.pos = cartesian[0]
msg.cartesian.quat = cartesian[1]
return msg
def convertSensordata(rawdata):
msg = abb_irb140ftsensor()
msg.utime = time.time()*1000000
msg.hand_force = rawdata[0:2]
msg.hand_torque = rawdata[3:6]
return msg
def convertACH_Command(msg):
return msg.Joints
class abbIRB140LCMWrapper:
def __init__(self):
self.robot = abb.Robot(); #Robot Connection to openABB, input Robot's IP if needed.
self.lc = lcm.LCM("udpm://239.255.76.67:7667?ttl=1")
self.lc.subscribe("IRB140Input",self.command_handler)
self.lc.subscribe("IRB140JOINTPLAN",self.plan_handler)
self.lc.subscribe("IRB140JOINTCMD",self.command_handler)
def plan_handler(self,channel,data):
print "receive plan"
msg = abb_irb140joint_plan.decode(data)
for i in range(msg.n_cmd_times):
self.robot.addJointPosBuffer(msg.joint_cmd[i].pos)
self.robot.executeJointPosBuffer()
self.robot.clearJointPosBuffer()
def command_handler(self,channel,data):
print "receive command"
msg = abb_irb140joints.decode(data)
jointCommand = msg.pos
self.robot.setJoints(jointCommand)
def broadcast_state(self):
jointPos = self.robot.getJoints()
cartesian = self.robot.getCartesian()
#ABB drive to LCM conversion
msg = convertABBstate(jointPos,[0,0,0,0,0,0],cartesian)
self.lc.publish("IRB140STATE", msg.encode())
#sensordata = self.robot.getSensors2()
#Force Torque Sensor, Not yet Tested -AC Feb 23
#msg = convertSensordata(sensordata)
#self.lc.publish("IRB140FTSENSOR", msg.encode())
def mainLoop(self,freq):
pauseDelay = 1.0/freq #In Seconds.
t = 1
while True:
self.broadcast_state()
self.lc.handle()
time.sleep(pauseDelay)
if __name__ == "__main__":
wrapper = abbIRB140LCMWrapper()
print "IRB140LCMWrapper finish initialization, Begin transmission to LCM"
wrapper.mainLoop(10) #Hertz
print "IRB140LCMWrapper terminated successfully."
|
{
"content_hash": "8f9f09b4f7897c77350aa0f9165b7e95",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 176,
"avg_line_length": 33.62921348314607,
"alnum_prop": 0.6795856999665887,
"repo_name": "WeirdCoder/ABB-IRB140",
"id": "50e765418b062d9a1d9ca099b95af46cd9365741",
"size": "2993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "robotsuite-lcm-util/IRB140LCM_monitor2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "14225"
},
{
"name": "Python",
"bytes": "23956"
},
{
"name": "Shell",
"bytes": "140"
}
],
"symlink_target": ""
}
|
"""Helpers for overriding numpy functions.
We override numpy functions in `~astropy.units.Quantity.__array_function__`.
In this module, the numpy functions are split in four groups, each of
which has an associated `set` or `dict`:
1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
supports Quantity; we pass on to ndarray.__array_function__.
2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
after converting quantities to arrays with suitable units,
and possibly setting units on the result.
3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
requires a Quantity-specific implementation
4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
For the FUNCTION_HELPERS `dict`, the value is a function that does the
unit conversion. It should take the same arguments as the numpy
function would (though one can use ``*args`` and ``**kwargs``) and
return a tuple of ``args, kwargs, unit, out``, where ``args`` and
``kwargs`` will be will be passed on to the numpy implementation,
``unit`` is a possible unit of the result (`None` if it should not be
converted to Quantity), and ``out`` is a possible output Quantity passed
in, which will be filled in-place.
For the DISPATCHED_FUNCTIONS `dict`, the value is a function that
implements the numpy functionality for Quantity input. It should
return a tuple of ``result, unit, out``, where ``result`` is generally
a plain array with the result, and ``unit`` and ``out`` are as above.
If unit is `None`, result gets returned directly, so one can also
return a Quantity directly using ``quantity_result, None, None``.
"""
import functools
import operator
import numpy as np
from astropy.units.core import (
UnitsError, UnitTypeError, dimensionless_unscaled)
from astropy.utils.compat import NUMPY_LT_1_17, NUMPY_LT_1_15, NUMPY_LT_1_18
from astropy.utils import isiterable
if NUMPY_LT_1_17: # pragma: no cover
# pre 1.16, overrides.py did not exist; in 1.16, it existed, but
# __array_function__ overrides were turned off by default.
ARRAY_FUNCTION_ENABLED = (hasattr(np.core, 'overrides') and
np.core.overrides.ENABLE_ARRAY_FUNCTION)
else:
# In 1.17, overrides are enabled by default, but it is still possible to
# turn them off using an environment variable. We use getattr since it
# is planned to remove that possibility in later numpy versions.
ARRAY_FUNCTION_ENABLED = getattr(np.core.overrides,
'ENABLE_ARRAY_FUNCTION', True)
SUBCLASS_SAFE_FUNCTIONS = set()
"""Functions with implementations supporting subclasses like Quantity."""
FUNCTION_HELPERS = {}
"""Functions with implementations usable with proper unit conversion."""
DISPATCHED_FUNCTIONS = {}
"""Functions for which we provide our own implementation."""
UNSUPPORTED_FUNCTIONS = set()
"""Functions that cannot sensibly be used with quantities."""
SUBCLASS_SAFE_FUNCTIONS |= {
np.shape, np.size, np.ndim,
np.reshape, np.ravel, np.moveaxis, np.rollaxis, np.swapaxes,
np.transpose, np.atleast_1d, np.atleast_2d, np.atleast_3d,
np.expand_dims, np.squeeze, np.broadcast_to, np.broadcast_arrays,
np.flip, np.fliplr, np.flipud, np.rot90,
np.argmin, np.argmax, np.argsort, np.lexsort, np.searchsorted,
np.nonzero, np.argwhere, np.flatnonzero,
np.diag_indices_from, np.triu_indices_from, np.tril_indices_from,
np.real, np.imag, np.diag, np.diagonal, np.diagflat,
np.empty_like, np.zeros_like,
np.compress, np.extract, np.delete, np.trim_zeros, np.roll, np.take,
np.put, np.fill_diagonal, np.tile, np.repeat,
np.split, np.array_split, np.hsplit, np.vsplit, np.dsplit,
np.stack, np.column_stack, np.hstack, np.vstack, np.dstack,
np.amax, np.amin, np.ptp, np.sum, np.cumsum,
np.prod, np.product, np.cumprod, np.cumproduct,
np.round, np.around,
np.fix, np.angle, np.i0, np.clip,
np.isposinf, np.isneginf, np.isreal, np.iscomplex,
np.average, np.mean, np.std, np.var, np.median, np.trace,
np.nanmax, np.nanmin, np.nanargmin, np.nanargmax, np.nanmean,
np.nanmedian, np.nansum, np.nancumsum, np.nanstd, np.nanvar,
np.nanprod, np.nancumprod,
np.einsum_path, np.trapz, np.linspace,
np.sort, np.msort, np.partition, np.meshgrid,
np.common_type, np.result_type, np.can_cast, np.min_scalar_type,
np.iscomplexobj, np.isrealobj,
np.shares_memory, np.may_share_memory,
np.apply_along_axis}
if not NUMPY_LT_1_15:
SUBCLASS_SAFE_FUNCTIONS |= {np.take_along_axis, np.put_along_axis}
if NUMPY_LT_1_18:
SUBCLASS_SAFE_FUNCTIONS |= {np.alen}
# Implemented as methods on Quantity:
# np.ediff1d is from setops, but we support it anyway; the others
# currently return NotImplementedError.
# TODO: move latter to UNSUPPORTED? Would raise TypeError instead.
SUBCLASS_SAFE_FUNCTIONS |= {
np.ediff1d,
np.all, np.any, np.sometrue, np.alltrue}
# Subclass safe, but possibly better if overridden (e.g., with different
# default arguments for isclose, allclose).
# TODO: decide on desired behaviour.
SUBCLASS_SAFE_FUNCTIONS |= {
np.isclose, np.allclose,
np.array2string, np.array_repr, np.array_str}
# Nonsensical for quantities.
UNSUPPORTED_FUNCTIONS |= {
np.packbits, np.unpackbits, np.unravel_index,
np.ravel_multi_index, np.ix_, np.cov, np.corrcoef,
np.busday_count, np.busday_offset, np.datetime_as_string,
np.is_busday}
# The following are not just unsupported, but so unlikely to be thought
# to be supported that we ignore them in testing. (Kept in a separate
# variable so that we can check consistency in the test routine -
# test_quantity_non_ufuncs.py)
IGNORED_FUNCTIONS = {
# Deprecated
np.asscalar,
# I/O - useless for Quantity, since no way to store the unit.
np.save, np.savez, np.savetxt, np.savez_compressed,
# Polynomials
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
# financial
np.fv, np.ipmt, np.irr, np.mirr, np.nper, np.npv, np.pmt, np.ppmt,
np.pv, np.rate}
if NUMPY_LT_1_18:
IGNORED_FUNCTIONS |= {np.rank}
else:
IGNORED_FUNCTIONS |= {np.alen}
UNSUPPORTED_FUNCTIONS |= IGNORED_FUNCTIONS
class FunctionAssigner:
def __init__(self, assignments):
self.assignments = assignments
def __call__(self, f=None, helps=None):
"""Add a helper to a numpy function.
Normally used as a decorator.
If ``helps`` is given, it should be the numpy function helped (or an
iterable of numpy functions helped).
If ``helps`` is not given, it is assumed the function helped is the
numpy function with the same name as the decorated function.
"""
if f is not None:
if helps is None:
helps = getattr(np, f.__name__)
if not isiterable(helps):
helps = (helps,)
for h in helps:
self.assignments[h] = f
return f
elif helps is not None:
return functools.partial(self.__call__, helps=helps)
else: # pragma: no cover
raise ValueError("function_helper requires at least one argument.")
function_helper = FunctionAssigner(FUNCTION_HELPERS)
dispatched_function = FunctionAssigner(DISPATCHED_FUNCTIONS)
@function_helper(helps={np.copy, np.asfarray, np.real_if_close,
np.sort_complex, np.resize})
def invariant_a_helper(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, a.unit, None
@function_helper(helps={np.tril, np.triu})
def invariant_m_helper(m, *args, **kwargs):
return (m.view(np.ndarray),) + args, kwargs, m.unit, None
# Note that ones_like does *not* work by default (unlike zeros_like) since if
# one creates an empty array with a unit, one cannot just fill it with unity.
# Indeed, in this respect, it is a bit of an odd function for Quantity. On the
# other hand, it matches the idea that a unit is the same as the quantity with
# that unit and value of 1. Also, it used to work without __array_function__.
@function_helper
def ones_like(a, *args, **kwargs):
subok = args[2] if len(args) > 2 else kwargs.pop('subok', True)
unit = a.unit if subok else None
return (a.view(np.ndarray),) + args, kwargs, unit, None
@function_helper
def sinc(x):
from astropy.units.si import radian
try:
x = x.to_value(radian)
except UnitsError:
raise UnitTypeError("Can only apply 'sinc' function to "
"quantities with angle units")
return (x,), {}, dimensionless_unscaled, None
@dispatched_function
def unwrap(p, discont=None, axis=-1):
from astropy.units.si import radian
if discont is None:
discont = np.pi << radian
p, discont = _as_quantities(p, discont)
result = np.unwrap.__wrapped__(p.to_value(radian),
discont.to_value(radian), axis=axis)
result = radian.to(p.unit, result)
return result, p.unit, None
@function_helper
def argpartition(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, None, None
@function_helper
def full_like(a, fill_value, *args, **kwargs):
unit = a.unit if kwargs.get('subok', True) else None
return (a.view(np.ndarray),
a._to_own_unit(fill_value)) + args, kwargs, unit, None
@function_helper
def putmask(a, mask, values):
from astropy.units import Quantity
if isinstance(a, Quantity):
return (a.view(np.ndarray), mask,
a._to_own_unit(values)), {}, a.unit, None
elif isinstance(values, Quantity):
return (a, mask,
values.to_value(dimensionless_unscaled)), {}, None, None
else:
raise NotImplementedError
@function_helper
def place(arr, mask, vals):
from astropy.units import Quantity
if isinstance(arr, Quantity):
return (arr.view(np.ndarray), mask,
arr._to_own_unit(vals)), {}, arr.unit, None
elif isinstance(vals, Quantity):
return (arr, mask,
vals.to_value(dimensionless_unscaled)), {}, None, None
else:
raise NotImplementedError
@function_helper
def copyto(dst, src, *args, **kwargs):
from astropy.units import Quantity
if isinstance(dst, Quantity):
return ((dst.view(np.ndarray), dst._to_own_unit(src)) + args,
kwargs, None, None)
elif isinstance(src, Quantity):
return ((dst, src.to_value(dimensionless_unscaled)) + args,
kwargs, None, None)
else:
raise NotImplementedError
@function_helper
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
nan = x._to_own_unit(nan)
if posinf is not None:
posinf = x._to_own_unit(posinf)
if neginf is not None:
neginf = x._to_own_unit(neginf)
return ((x.view(np.ndarray),),
dict(copy=True, nan=nan, posinf=posinf, neginf=neginf),
x.unit, None)
def _as_quantity(a):
"""Convert argument to a Quantity (or raise NotImplementedError)."""
from astropy.units import Quantity
try:
return Quantity(a, copy=False, subok=True)
except Exception:
# If we cannot convert to Quantity, we should just bail.
raise NotImplementedError
def _as_quantities(*args):
"""Convert arguments to Quantity (or raise NotImplentedError)."""
from astropy.units import Quantity
try:
return tuple(Quantity(a, copy=False, subok=True)
for a in args)
except Exception:
# If we cannot convert to Quantity, we should just bail.
raise NotImplementedError
def _quantities2arrays(*args):
"""Convert to Quantities with the unit of the first argument."""
qs = _as_quantities(*args)
unit = qs[0].unit
# Allow any units error to be raised.
arrays = tuple(q.to_value(unit) for q in qs)
return arrays, unit
def _iterable_helper(*args, out=None, **kwargs):
"""Convert arguments to Quantity, and treat possible 'out'."""
from astropy.units import Quantity
if out is not None:
if isinstance(out, Quantity):
kwargs['out'] = out.view(np.ndarray)
else:
# TODO: for an ndarray output, we could in principle
# try converting all Quantity to dimensionless.
raise NotImplementedError
arrays, unit = _quantities2arrays(*args)
return arrays, kwargs, unit, out
@function_helper
def concatenate(arrays, axis=0, out=None):
# TODO: make this smarter by creating an appropriately shaped
# empty output array and just filling it.
arrays, kwargs, unit, out = _iterable_helper(*arrays, out=out, axis=axis)
return (arrays,), kwargs, unit, out
@dispatched_function
def block(arrays):
# We need to override block since the numpy implementation can take two
# different paths, one for concatenation, one for creating a large empty
# result array in which parts are set. Each assumes array input and
# cannot be used directly. Since it would be very costly to inspect all
# arrays and then turn them back into a nested list, we just copy here the
# second implementation, np.core.shape_base._block_slicing, since it is
# shortest and easiest.
(arrays, list_ndim, result_ndim,
final_size) = np.core.shape_base._block_setup(arrays)
shape, slices, arrays = np.core.shape_base._block_info_recursion(
arrays, list_ndim, result_ndim)
# Here, one line of difference!
arrays, unit = _quantities2arrays(*arrays)
# Back to _block_slicing
dtype = np.result_type(*[arr.dtype for arr in arrays])
F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
order = 'F' if F_order and not C_order else 'C'
result = np.empty(shape=shape, dtype=dtype, order=order)
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result, unit, None
@function_helper
def choose(a, choices, out=None, **kwargs):
choices, kwargs, unit, out = _iterable_helper(*choices, out=out, **kwargs)
return (a, choices,), kwargs, unit, out
@function_helper
def select(condlist, choicelist, default=0):
choicelist, kwargs, unit, out = _iterable_helper(*choicelist)
if default != 0:
default = (1 * unit)._to_own_unit(default)
return (condlist, choicelist, default), kwargs, unit, out
@dispatched_function
def piecewise(x, condlist, funclist, *args, **kw):
from astropy.units import Quantity
# Copied implementation from numpy.lib.function_base.piecewise,
# taking care of units of function outputs.
n2 = len(funclist)
# undocumented: single condition is promoted to a list of one condition
if np.isscalar(condlist) or (
not isinstance(condlist[0], (list, np.ndarray)) and x.ndim != 0):
condlist = [condlist]
if any(isinstance(c, Quantity) for c in condlist):
raise NotImplementedError
condlist = np.array(condlist, dtype=bool)
n = len(condlist)
if n == n2 - 1: # compute the "otherwise" condition.
condelse = ~np.any(condlist, axis=0, keepdims=True)
condlist = np.concatenate([condlist, condelse], axis=0)
n += 1
elif n != n2:
raise ValueError(
"with {} condition(s), either {} or {} functions are expected"
.format(n, n, n+1)
)
y = np.zeros(x.shape, x.dtype)
where = []
what = []
for k in range(n):
item = funclist[k]
if not callable(item):
where.append(condlist[k])
what.append(item)
else:
vals = x[condlist[k]]
if vals.size > 0:
where.append(condlist[k])
what.append(item(vals, *args, **kw))
what, unit = _quantities2arrays(*what)
for item, value in zip(where, what):
y[item] = value
return y, unit, None
@function_helper
def append(arr, values, *args, **kwargs):
arrays, unit = _quantities2arrays(arr, values)
return arrays + args, kwargs, unit, None
@function_helper
def insert(arr, obj, values, *args, **kwargs):
from astropy.units import Quantity
if isinstance(obj, Quantity):
raise NotImplementedError
(arr, values), unit = _quantities2arrays(arr, values)
return (arr, obj, values) + args, kwargs, unit, None
@function_helper
def pad(array, pad_width, mode='constant', **kwargs):
# pad dispatches only on array, so that must be a Quantity.
for key in 'constant_values', 'end_values':
value = kwargs.pop(key, None)
if value is None:
continue
if not isinstance(value, tuple):
value = (value,)
new_value = []
for v in value:
new_value.append(
tuple(array._to_own_unit(_v) for _v in v)
if isinstance(v, tuple) else array._to_own_unit(v))
kwargs[key] = new_value
return (array.view(np.ndarray), pad_width, mode), kwargs, array.unit, None
@function_helper
def where(condition, *args):
from astropy.units import Quantity
if isinstance(condition, Quantity) or len(args) != 2:
raise NotImplementedError
args, unit = _quantities2arrays(*args)
return (condition,) + args, {}, unit, None
# Quantile was only introduced in numpy 1.15.
@function_helper(helps=({np.quantile, np.nanquantile}
if not NUMPY_LT_1_15 else ()))
def quantile(a, q, *args, _q_unit=dimensionless_unscaled, **kwargs):
if len(args) >= 2:
out = args[1]
args = args[:1] + args[2:]
else:
out = kwargs.pop('out', None)
from astropy.units import Quantity
if isinstance(q, Quantity):
q = q.to_value(_q_unit)
(a,), kwargs, unit, out = _iterable_helper(a, out=out, **kwargs)
return (a, q) + args, kwargs, unit, out
@function_helper(helps={np.percentile, np.nanpercentile})
def percentile(a, q, *args, **kwargs):
from astropy.units import percent
return quantile(a, q, *args, _q_unit=percent, **kwargs)
@function_helper
def count_nonzero(a, *args, **kwargs):
return (a.value,) + args, kwargs, None, None
@function_helper
def array_equal(a1, a2):
args, unit = _quantities2arrays(a1, a2)
return args, {}, None, None
@function_helper
def array_equiv(a1, a2):
args, unit = _quantities2arrays(a1, a2)
return args, {}, None, None
@function_helper(helps={np.dot, np.outer})
def dot_like(a, b, out=None):
from astropy.units import Quantity
a, b = _as_quantities(a, b)
unit = a.unit * b.unit
if out is not None:
if not isinstance(out, Quantity):
raise NotImplementedError
return tuple(x.view(np.ndarray) for x in (a, b, out)), {}, unit, out
else:
return (a.view(np.ndarray), b.view(np.ndarray)), {}, unit, None
@function_helper(helps={np.cross, np.inner, np.vdot, np.tensordot, np.kron,
np.correlate, np.convolve})
def cross_like(a, b, *args, **kwargs):
a, b = _as_quantities(a, b)
unit = a.unit * b.unit
return (a.view(np.ndarray), b.view(np.ndarray)) + args, kwargs, unit, None
@function_helper
def einsum(subscripts, *operands, out=None, **kwargs):
from astropy.units import Quantity
if not isinstance(subscripts, str):
raise ValueError('only "subscripts" string mode supported for einsum.')
if out is not None:
if not isinstance(out, Quantity):
raise NotImplementedError
else:
kwargs['out'] = out.view(np.ndarray)
qs = _as_quantities(*operands)
unit = functools.reduce(operator.mul, (q.unit for q in qs),
dimensionless_unscaled)
arrays = tuple(q.view(np.ndarray) for q in qs)
return (subscripts,) + arrays, kwargs, unit, out
@function_helper
def bincount(x, weights=None, minlength=0):
from astropy.units import Quantity
if isinstance(x, Quantity):
raise NotImplementedError
return (x, weights.value, minlength), {}, weights.unit, None
@function_helper
def digitize(x, bins, *args, **kwargs):
arrays, unit = _quantities2arrays(x, bins)
return arrays + args, kwargs, None, None
def _check_bins(bins, unit):
from astropy.units import Quantity
check = _as_quantity(bins)
if check.ndim > 0:
return check.to_value(unit)
elif isinstance(bins, Quantity):
# bins should be an integer (or at least definitely not a Quantity).
raise NotImplementedError
else:
return bins
@function_helper
def histogram(a, bins=10, range=None, weights=None, density=None):
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
a = _as_quantity(a)
if not isinstance(bins, str):
bins = _check_bins(bins, a.unit)
if density:
unit = (unit or 1) / a.unit
return ((a.value, bins, range), {'weights': weights, 'density': density},
(unit, a.unit), None)
# histogram_bin_edges was only introduced in numpy 1.15.
@function_helper(helps=np.histogram_bin_edges if not NUMPY_LT_1_15 else ())
def histogram_bin_edges(a, bins=10, range=None, weights=None):
# weights is currently unused
a = _as_quantity(a)
if not isinstance(bins, str):
bins = _check_bins(bins, a.unit)
return (a.value, bins, range, weights), {}, a.unit, None
@function_helper
def histogram2d(x, y, bins=10, range=None, weights=None, density=None):
from astropy.units import Quantity
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
x, y = _as_quantities(x, y)
try:
n = len(bins)
except TypeError:
# bins should be an integer (or at least definitely not a Quantity).
if isinstance(bins, Quantity):
raise NotImplementedError
else:
if n == 1:
raise NotImplementedError
elif n == 2 and not isinstance(bins, Quantity):
bins = [_check_bins(b, unit)
for (b, unit) in zip(bins, (x.unit, y.unit))]
else:
bins = _check_bins(bins, x.unit)
y = y.to(x.unit)
if density:
unit = (unit or 1) / x.unit / y.unit
return ((x.value, y.value, bins, range),
{'weights': weights, 'density': density},
(unit, x.unit, y.unit), None)
@function_helper
def histogramdd(sample, bins=10, range=None, weights=None, density=None):
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
try:
# Sample is an ND-array.
_, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = _as_quantities(*sample)
sample_units = [s.unit for s in sample]
sample = [s.value for s in sample]
D = len(sample)
else:
sample = _as_quantity(sample)
sample_units = [sample.unit] * D
try:
M = len(bins)
except TypeError:
# bins should be an integer
from astropy.units import Quantity
if isinstance(bins, Quantity):
raise NotImplementedError
else:
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
bins = [_check_bins(b, unit)
for (b, unit) in zip(bins, sample_units)]
if density:
unit = functools.reduce(operator.truediv, sample_units, (unit or 1))
return ((sample, bins, range), {'weights': weights, 'density': density},
(unit, sample_units), None)
@function_helper
def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
a = _as_quantity(a)
if prepend is not np._NoValue:
prepend = _as_quantity(prepend).to_value(a.unit)
if append is not np._NoValue:
append = _as_quantity(append).to_value(a.unit)
return (a.value, n, axis, prepend, append), {}, a.unit, None
@function_helper
def gradient(f, *varargs, **kwargs):
f = _as_quantity(f)
axis = kwargs.get('axis', None)
if axis is None:
n_axis = f.ndim
elif isinstance(axis, tuple):
n_axis = len(axis)
else:
n_axis = 1
if varargs:
varargs = _as_quantities(*varargs)
if len(varargs) == 1 and n_axis > 1:
varargs = varargs * n_axis
if varargs:
units = [f.unit / q.unit for q in varargs]
varargs = tuple(q.value for q in varargs)
else:
units = [f.unit] * n_axis
if len(units) == 1:
units = units[0]
return (f.value,) + varargs, kwargs, units, None
@function_helper
def logspace(start, stop, *args, **kwargs):
from astropy.units import LogQuantity, dex
if (not isinstance(start, LogQuantity) or
not isinstance(stop, LogQuantity)):
raise NotImplementedError
# Get unit from end point as for linspace.
stop = stop.to(dex(stop.unit.physical_unit))
start = start.to(stop.unit)
unit = stop.unit.physical_unit
return (start.value, stop.value) + args, kwargs, unit, None
@function_helper
def geomspace(start, stop, *args, **kwargs):
# Get unit from end point as for linspace.
(stop, start), unit = _quantities2arrays(stop, start)
return (start, stop) + args, kwargs, unit, None
@function_helper
def interp(x, xp, fp, *args, **kwargs):
from astropy.units import Quantity
(x, xp), _ = _quantities2arrays(x, xp)
if isinstance(fp, Quantity):
unit = fp.unit
fp = fp.value
else:
unit = None
return (x, xp, fp) + args, kwargs, unit, None
@function_helper
def unique(ar, return_index=False, return_inverse=False,
return_counts=False, axis=None):
unit = ar.unit
n_index = sum(bool(i) for i in
(return_index, return_inverse, return_counts))
if n_index:
unit = [unit] + n_index * [None]
return (ar.value, return_index, return_inverse, return_counts,
axis), {}, unit, None
@function_helper
def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
(ar1, ar2), unit = _quantities2arrays(ar1, ar2)
if return_indices:
unit = [unit, None, None]
return (ar1, ar2, assume_unique, return_indices), {}, unit, None
@function_helper(helps=(np.setxor1d, np.union1d, np.setdiff1d))
def twosetop(ar1, ar2, *args, **kwargs):
(ar1, ar2), unit = _quantities2arrays(ar1, ar2)
return (ar1, ar2) + args, kwargs, unit, None
@function_helper(helps=(np.isin, np.in1d))
def setcheckop(ar1, ar2, *args, **kwargs):
# This tests whether ar1 is in ar2, so we should change the unit of
# a1 to that of a2.
(ar2, ar1), unit = _quantities2arrays(ar2, ar1)
return (ar1, ar2) + args, kwargs, None, None
@dispatched_function
def apply_over_axes(func, a, axes):
# Copied straight from numpy/lib/shape_base, just to omit its
# val = asarray(a); if only it had been asanyarray, or just not there
# since a is assumed to an an array in the next line...
# Which is what we do here - we can only get here if it is a Quantity.
val = a
N = a.ndim
if np.array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = np.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
# Returning unit is None to signal nothing should happen to
# the output.
return val, None, None
|
{
"content_hash": "b60410a9913c1918afd5eee67d9dac49",
"timestamp": "",
"source": "github",
"line_count": 840,
"max_line_length": 79,
"avg_line_length": 33.37023809523809,
"alnum_prop": 0.6388641147301202,
"repo_name": "bsipocz/astropy",
"id": "b9d363abf1887aae82fba50531eaff672fd1cfd5",
"size": "28254",
"binary": false,
"copies": "1",
"ref": "refs/heads/hacking",
"path": "astropy/units/quantity_helper/function_helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "442627"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Objective-C",
"bytes": "615"
},
{
"name": "Python",
"bytes": "9395160"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
from block import *
from BeautifulSoup import BeautifulSoup
from logging import ERROR, WARN, INFO, DEBUG
import urlparse
class bookmark_query(Block):
def on_load(self, config):
self.config = config
self.add_port("list", Port.QUERY, Port.UNNAMED, [])
self.add_port("restore", Port.QUERY, Port.UNNAMED, ["url", "time"])
self.add_port("delete", Port.QUERY, Port.UNNAMED, ["url", "time"])
self.add_port("store_control", Port.QUERY, Port.UNNAMED, ["command", "args"])
self.add_port("meta_control", Port.QUERY, Port.UNNAMED, ["command", "args"])
def add_meta(self, log):
mlog = Log()
mlog.append_field("path", log.log["internet_url"])
#we only have one fingerprint per url now, so create a one element list
mlog.append_field("fingerprints", [[f] for f in log.log["fingerprint"]])
self.push("meta_store", mlog)
def add_chunks(self, log):
clog = Log()
clog.append_field("url", log.log["url"])
clog.append_field("fingerprint", log.log["fingerprint"])
self.push("store", clog)
def recv_push(self, port, log):
self.add_chunks(log)
self.add_meta(log)
def fetch_meta(self, url, time):
mlog = Log()
mlog.append_field("command", ["restore"])
mlog.append_field("args", [[(url, time)]])
retlog = self.query("meta_control", mlog)
asset_list = retlog.log["assets"]
self.log(INFO, "got assets from meta_store: %r" % asset_list)
assert(len(asset_list)==1)
return asset_list[0]
def fetch_store(self, fp):
slog = Log()
slog.append_field("command", ["restore"])
slog.append_field("args", [[fp]])
retlog = self.query("store_control", slog)
store_urls = retlog.log["chunk"]
self.log(INFO, "got urls from data_store: %r" % store_urls)
assert(len(store_urls)<=1)
if len(store_urls) == 0:
raise KeyError
return store_urls[0]
def rewrite_links(self, url, html, assets):
soup = BeautifulSoup(html)
img_links = [l.get('src') for l in soup.findAll('img')]
css_links = [l.get('href') for l in soup.findAll('link') if l.has_key('rel') and l['rel'].lower() == 'stylesheet']
#extract links with valid sources
links = [l for l in (img_links + css_links) if l != None]
local_links = {}
for l in links:
#convert relative links into absolute
try:
fp = assets[urlparse.urljoin(url, l) if l[:4] != 'http' else l]
local_links[l] = self.fetch_store(fp)
except KeyError:
self.log(WARN, "did not download url for link: %r" % l)
#we did not dowload that url
local_links[l] = l
img_tags = soup.findAll('img')
for i in img_tags:
i['src'] = local_links[i['src']]
css_tags = [t for t in soup.findAll('link') if t.has_key('rel') and t['rel'].lower() == 'stylesheet']
for c in css_tags:
c['href'] = local_links[c['href']]
return soup.prettify()
def restore(self, url, time):
try:
asset_pairs = self.fetch_meta(url, time)
assets = {}
for aurl, fp in asset_pairs:
assets[aurl] = fp
local_url = assets[url]
html = BlockUtils.fetch_file_at_url(self.fetch_store(local_url),
self.ip_address)
html = self.rewrite_links(url, html, assets)
name = unicode('bookmark_restored')
with open(name, 'w') as f:
f.write(html)
path = os.path.join(os.getcwd(), name)
return BlockUtils.generate_url_for_path(path, self.ip_address)
except KeyError:
self.log(WARN, "could not restore file: %r" % url)
return ''
def delete(self, url, time):
try:
asset_pairs = self.fetch_meta(url, time)
fps = [a[1] for a in asset_pairs]
mlog = Log()
mlog.append_field("command", ["delete"])
mlog.append_field("args", [[(url, time)]])
res = self.query("meta_control", mlog).log["result"][0]
slog = Log()
slog.append_field("command", ["delete"])
slog.append_field("args", [fps])
deleteres = self.query("store_control", slog).log["result"]
for r in deleteres:
res = res and r
return res
except Exception as e:
self.log(WARN, "could not delete %r (at %r) due to %r" % (url, time, e))
return False
def list_bookmarks(self):
mlog = Log()
mlog.append_field("command", ["list"])
mlog.append_field("args", [[]])
retlog = self.query("meta_control", mlog)
return (retlog.log["path"], retlog.log["time"])
def recv_query(self, port, log):
retlog = Log()
if port == "list":
urls, times = self.list_bookmarks()
retlog.append_field("url", urls)
retlog.append_field("time", times)
elif port == "delete":
delete_res = [self.delete(u, t) for u, t in log.iter_fields("url", "time")]
retlog.append_field("result", delete_res)
elif port == "restore":
restored_urls = [self.restore(u, t) for u, t in log.iter_fields("url", "time")]
retlog.append_field("url", restored_urls)
self.return_query_res(port, retlog)
|
{
"content_hash": "f1cddf28f1e3e3da82ddf1aa88eaeae9",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 118,
"avg_line_length": 36.594202898550726,
"alnum_prop": 0.6,
"repo_name": "mpi-sws-rse/datablox",
"id": "18fadffed328fb5d4ae5d7840ae738ad319c6669",
"size": "5050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blox/bookmark_query__1_0/b_bookmark_query.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7801"
},
{
"name": "Python",
"bytes": "378676"
},
{
"name": "Shell",
"bytes": "7299"
}
],
"symlink_target": ""
}
|
import numpy as np
from bs4 import BeautifulSoup
from docluster.core import Model
from docluster.utils.constants import Language
from nltk import sent_tokenize, word_tokenize
from .token_filter import TokenFilter
class Preprocessor(Model):
def __init__(self, language=Language.english, lower=True, parse_html=True, token_filter=TokenFilter(language=Language.english), do_stem=False, do_lemmatize=False):
self.language = language
self.lower = lower
self.parse_html = parse_html
self.token_filter = token_filter if token_filter else None
self.do_stem = do_stem
self.do_lemmatize = do_lemmatize
self.tokens = None
self.vocab = None
def fit(self, text):
tokens = self.tokenize(text)
tokens = self.stem(tokens) if self.do_stem else tokens
tokens = self.lemmatize(tokens) if self.do_lemmatize else tokens
self.tokens = tokens
self.vocab = list(set(self.tokens))
return self.tokens
def tokenize(self, text):
text = BeautifulSoup(text, "html5lib").get_text() if self.parse_html else text
sentece_tokens = np.array(
[words for words in [word_tokenize(sent) for sent in sent_tokenize(text)]])
if sentece_tokens.shape == (0,):
return []
tokens = np.hstack(sentece_tokens)
filtered_tokens = filter(lambda token: not self.token_filter.fit(
token), tokens) if self.token_filter else tokens
lowered_tokens = list(map(lambda token: token.lower().split("'")[0],
filtered_tokens)) if self.lower else list(filtered_tokens)
return lowered_tokens
def stem(self, tokens):
pass
def lemmatize(self, tokens):
pass
|
{
"content_hash": "1856bedcb60f7291b5fe92cb5c213a37",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 167,
"avg_line_length": 31.24561403508772,
"alnum_prop": 0.6445816956765862,
"repo_name": "metinsay/docluster",
"id": "b607905719fee8bdd9c82a5a3248eeb8fd4f6099",
"size": "1781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docluster/models/preprocessing/preprocessor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2275"
},
{
"name": "Python",
"bytes": "66306"
}
],
"symlink_target": ""
}
|
"""
Read a wiggle track and print out a series of lines containing
"chrom position score". Ignores track lines, handles bed, variableStep
and fixedStep wiggle lines.
"""
import psyco_full
import sys
import bx.wiggle
if len( sys.argv ) > 1: in_file = open( sys.argv[1] )
else: in_file = sys.stdin
if len( sys.argv ) > 2: out_file = open( sys.argv[2], "w" )
else: out_file = sys.stdout
for fields in bx.wiggle.Reader( in_file ):
print " ".join( map( str, fields ) )
in_file.close()
out_file.close()
|
{
"content_hash": "1b1c4f345c7d16a0e61d6f5fa3c93ef9",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 70,
"avg_line_length": 23.045454545454547,
"alnum_prop": 0.6824457593688363,
"repo_name": "poojavade/Genomics_Docker",
"id": "d8c4f57d0d3e5faed89daa2d6df136786441cc2c",
"size": "529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/bx_python-0.7.1-py2.7-linux-x86_64.egg/EGG-INFO/scripts/wiggle_to_simple.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "C",
"bytes": "1265138"
},
{
"name": "C++",
"bytes": "4734960"
},
{
"name": "CSS",
"bytes": "17332"
},
{
"name": "FORTRAN",
"bytes": "10375"
},
{
"name": "GLSL",
"bytes": "493"
},
{
"name": "Groff",
"bytes": "77173"
},
{
"name": "HTML",
"bytes": "395483"
},
{
"name": "Java",
"bytes": "9223"
},
{
"name": "JavaScript",
"bytes": "783663"
},
{
"name": "Jupyter Notebook",
"bytes": "189877"
},
{
"name": "Lua",
"bytes": "28217"
},
{
"name": "Makefile",
"bytes": "77825"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Perl",
"bytes": "244796"
},
{
"name": "Python",
"bytes": "54562861"
},
{
"name": "R",
"bytes": "2568"
},
{
"name": "Shell",
"bytes": "40620"
},
{
"name": "Smarty",
"bytes": "21035"
},
{
"name": "TeX",
"bytes": "55310"
}
],
"symlink_target": ""
}
|
class something(object):
def __init__(self):
pass
def ppp(self):
print("Great, you can see this now, in ppp function")
def ppp2(self, x, y, z=0):
return x+y+z
class something2(object):
def __init__(self, mm, nn, oo, pp='pp', qq='qq'):
self.value = mm + nn + oo
self.msg = pp + qq
def get_value(self):
return self.value
def multiple(self, kk):
self.value *= kk
return self.value
def get_msg(self):
return self.msg
|
{
"content_hash": "ec4b41720adb0acf89612bd9d74a153d",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 61,
"avg_line_length": 21.583333333333332,
"alnum_prop": 0.5386100386100386,
"repo_name": "steven004/pytest_oot",
"id": "6074d189e0485ef24b982a532578806b5177b3f0",
"size": "545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/test/lesson8_Sample_module.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17591"
}
],
"symlink_target": ""
}
|
import os
assert 'THOR_HOME' in os.environ, 'please first set env_var THOR_HOME as the absolute path of DRL/THOR'
# open display
display = False
# size of the input image to the network
net_input_width=224
net_input_height=224
THOR_HOME = os.environ['THOR_HOME']
# OSX / Linux
binary_build = THOR_HOME + '/thor_binary/thor-201705011400-OSXIntel64.app/Contents/MacOS/thor-201705011400-OSXIntel64'
# binary_build = THOR_HOME + '/thor_binary/thor-201705011400-Linux64'
target_folder = THOR_HOME + '/thor_binary/thor-challenge-targets/targets-train.json'
linux_build='not supported'
x_display="0.0"
supported_envs = ['env_t1']
action_reverse_table = {'MoveAhead': 'MoveBack',
'MoveBack': 'MoveAhead',
'RotateLeft': 'RotateRight',
'RotateRight': 'RotateLeft',
'MoveLeft': 'MoveRight',
'MoveRight': 'MoveLeft'}
position_actions = ['MoveAhead', 'MoveBack', 'MoveLeft', 'MoveRight']
supported_actions = ['MoveAhead', 'MoveBack', 'RotateLeft', 'RotateRight']
# build action to idx mapping
supported_actions_idx = {}
for i in range(len(supported_actions)):
supported_actions_idx[supported_actions[i]] = i
# under what threshold we think two images are identical (used for collecting target images)
target_image_diff_threshold = 10
# number of randomly sampled targets per scene
targets_per_scene = 100
# offline-environment
target_images_folder = THOR_HOME + '/target_images'
env_db_folder = THOR_HOME + '/env_db_a' + str(len(supported_actions))
env_feat_folder = THOR_HOME + '/env_feat_a' + str(len(supported_actions))
# random actions being taken when new episode is started
random_start = 30 # TODO: check the value used in paper
# maximum number of steps before the episode terminates
episode_max_steps = 10000
# reward received
reward_notfound = -0.01
reward_found = 10.0
reward_notfound_notsuccess = -0.015 # don't hit the wall, it hurts
use_distance_reward = False
distance_decrease_reward = 0.001 # if distance decreases, you receive additional reward
# debug options:
diable_random_start = True
|
{
"content_hash": "e4ed39fcc6ee9cc6a57e27cc5b5e06c8",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 118,
"avg_line_length": 32.91935483870968,
"alnum_prop": 0.7329740323370897,
"repo_name": "YuMao1993/DRL",
"id": "3754a4c9ea5df7e7326f9bf17c313fbe1d724aaa",
"size": "2069",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "THOR/THORConfig.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "412836"
},
{
"name": "Shell",
"bytes": "1976"
}
],
"symlink_target": ""
}
|
"""
Created on Tue Aug 11 11:44:38 2015
@author: Kevin
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import scipy
data_path = 'C:\Users\Kevin\Desktop\workStuff\LDA\data' + "\\"
def createDataFrame(vbfMEList, ttbarMEList, path = data_path):
data_vbfME = np.asarray([0])
data_ttbarME = np.asarray([0])
label_vector = np.asarray([10]) # label is either 0 or 1, 1 = signal, 0 = background
Qvec_vbfME = np.asarray([10])
Qvec_ttbarME = np.asanyarray([10])
evtWeightVector = np.asarray([0])
for process in vbfMEList:
tempData = np.genfromtxt(path+process)
data_vbfME = np.append(data_vbfME, tempData[:,0])
label_vector = np.append(label_vector, np.ones_like(tempData[:,0])) if 'vbf125data' in process else np.append(label_vector,np.zeros_like(tempData[:,0]))
Qvec_vbfME = np.append(Qvec_vbfME, tempData[:,4])
evtWeightVector = np.append(evtWeightVector, tempData[:,5])
for process in ttbarMEList:
tempData = np.genfromtxt(path+process)
data_ttbarME = np.append(data_ttbarME, tempData[:,0])
Qvec_ttbarME = np.append(Qvec_ttbarME, tempData[:,4])
label_vector = np.delete(label_vector, 0)
data_vbfME = np.delete(data_vbfME, 0)
data_ttbarME = np.delete(data_ttbarME, 0)
Qvec_vbfME = np.delete(Qvec_vbfME, 0)
Qvec_ttbarME = np.delete(Qvec_ttbarME, 0)
evtWeightVector = np.delete(evtWeightVector, 0)
dataFrame = np.vstack(([data_vbfME], [data_ttbarME], [label_vector], [Qvec_vbfME], [Qvec_ttbarME], [evtWeightVector]))
return dataFrame.T
def getFeatureNames(fileNameList):
vbfList = [process for process in fileNameList if process.split('data_')[1] == 'vbf125mem.txt']
ttbarList = [process for process in fileNameList if process.split('data_')[1] == 'ttbarmem.txt']
return sorted(vbfList), sorted(ttbarList)
def getFileNames(path=data_path):
return os.listdir(path)
def writeDataFrame(dataFrame):
filename = 'dataFrame.txt'
np.savetxt(filename, dataFrame, fmt='%.10e')
def getDataFrame():
currentDirectory = os.listdir('.')
if 'dataFrame.txt' in currentDirectory:
dataFrame = np.genfromtxt('dataFrame.txt')
else:
fileNameList = getFileNames()
vbfList, ttbarList = getFeatureNames(fileNameList)
dataFrame = createDataFrame(vbfList, ttbarList)
writeDataFrame(dataFrame)
return dataFrame
def plotHisto(discmin, discmax, signalEvents, backgroundEvents, separation, sigWeights, bgWeights):
histbins = np.linspace(discmin, discmax, 25)
plt.figure(figsize=(8,6))
ax = plt.subplot(111)
print np.shape(signalEvents)
print np.shape(sigWeights)
plt.hist(signalEvents, bins = histbins, label = 'Signal',
histtype='stepfilled', fc = '#2482FF', linewidth = 2,
alpha = 0.5, normed = True, weights=sigWeights)
plt.hist(backgroundEvents, bins = histbins, label = 'Background',
histtype='stepfilled', fc = 'lightgreen', linewidth = 2, alpha = 0.5,
hatch = '//', normed = True, weights=bgWeights)
plt.xlim([discmin, discmax])
# hiding axis ticks
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# adding horizontal grid lines
ax.yaxis.grid(True)
# remove axis spines
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
# labels
plt.xlabel('Discriminant')
plt.ylabel('Distribution Value')
plt.legend(loc='upper left', fancybox=True)
plt.savefig('converged'+"_hist_"+"S"+str(separation)+".pdf")
plt.show()
def plotROC(signalEffArray, backgroundRejArray, separation):
plt.figure(figsize=(8,6))
ax = plt.subplot(111)
plt.plot([0,1],[1,0], 'k--', alpha=0.5)
plt.plot(signalEffArray, backgroundRejArray, '-',
color='#FF8000', alpha = 0.6,
linewidth = 2, label = 'Separation: {0}'.format(separation) )
# labels
plt.xlabel('Signal Efficiency')
plt.ylabel('Background Rejection')
plt.legend(loc='best', fancybox=True, fontsize=10)
# hiding axis ticks
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# adding horizontal grid lines
ax.yaxis.grid(True)
# remove axis spines
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
plt.savefig('converged'+"_roc_"+"S"+str(separation)+".pdf")
plt.show()
if __name__ == '__main__':
dataFrame = getDataFrame()
Q_cut = np.logical_and(dataFrame[:,3]>0.2, dataFrame[:,4]>0.2)
length_before = len(dataFrame)
dataFrame = dataFrame[Q_cut]
length_after = float(len(dataFrame))
print "\nFraction of events that failed to meet Q requirement: {}".format(1-length_after/length_before)
print "Out of a total of {} events".format(length_before)
label = dataFrame[:,2]
sigdata_sigmem = dataFrame[:,0][label==1]
bgdata_sigmem = dataFrame[:,0][label==0]
sigdata_bgmem = dataFrame[:,1][label==1]
bgdata_bgmem = dataFrame[:,1][label==0]
sig_evtW = dataFrame[:,5][label==1]
bg_evtW = dataFrame[:,5][label==0]
xmax = max(np.amax(sigdata_sigmem),np.amax(bgdata_sigmem))
ymax = max(np.amax(sigdata_bgmem),np.amax(bgdata_bgmem))
xymax = max(xmax,ymax)
signalEvents = np.log10(sigdata_sigmem) - np.log10(sigdata_bgmem)
backgroundEvents = np.log10(bgdata_sigmem) - np.log10(bgdata_bgmem)
discmin = min(np.amin(signalEvents), np.amin(backgroundEvents))
discmax = max(np.amax(signalEvents), np.amax(backgroundEvents))
signalEffArray = []
backgroundRejArray = []
discVal = np.linspace(discmin, discmax, 10000)
sigNorm = sum(sig_evtW)
bgNorm = sum(bg_evtW)
for thisVal in discVal:
signalEff = sum(sig_evtW[np.where(signalEvents >= thisVal)])
backgroundRej = sum(bg_evtW[np.where(backgroundEvents < thisVal)])
signalEffArray.append(signalEff/sigNorm)
backgroundRejArray.append(backgroundRej/bgNorm)
def roc(x):
# x: the desired signal efficiency
opt = (np.abs(np.asarray(signalEffArray)-x)).argmin()
return 1-sum(bg_evtW[np.where(backgroundEvents > discVal[opt])])/bgNorm
def getSep(fn = roc):
separation = 0
# for i in range(100): separation += scipy.integrate.quad(fn, i*0.01, (i+1)*0.01)[0]
separation = scipy.integrate.quad(fn, 0, 1)[0]
return 2*separation-1
separation = getSep()
plotHisto(discmin, discmax, signalEvents, backgroundEvents, separation, sig_evtW, bg_evtW)
plotROC(signalEffArray, backgroundRejArray, separation)
|
{
"content_hash": "8c8668629a0179ab8635b3b28a266c33",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 164,
"avg_line_length": 34.455882352941174,
"alnum_prop": 0.6517285531370038,
"repo_name": "kvmu/SFU-workterm",
"id": "7a7c8a9bf3ee2e1d53cfb15e292e0281d77f2364",
"size": "7053",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gen_plots.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40456"
}
],
"symlink_target": ""
}
|
import numpy as np
from utils.grabscreen import grab_screen
from utils.directkeys import PressKey,ReleaseKey,A,W,S,D
from matplotlib import pyplot as plt
from utils.grabkeys import key_check
import cv2
import time
import sys
import os
def draw_lines(img,lines):
try:
for l in lines:
coords=l[0]
cv2.line(img,(coords[0],coords[1]),(coords[2],coords[3]),[230,230,230],3)
except:
pass
def draw_circles(img,circles):
try:
for i in circles[0, :1]:
# draw the outer circle
cv2.circle(img, (i[0], i[1]), i[2], (0, 255, 0), 2)
# draw the center of the circle
cv2.circle(img, (i[0], i[1]), 2, (0, 0, 255), 3)
return [i[0],i[1]]
except:
pass
def straight():
PressKey(W)
ReleaseKey(A)
ReleaseKey(D)
def left():
PressKey(A)
ReleaseKey(W)
ReleaseKey(D)
time.sleep(0.1)
ReleaseKey(A)
def right():
PressKey(D)
ReleaseKey(A)
ReleaseKey(W)
time.sleep(0.1)
ReleaseKey(D)
def calmdown():
ReleaseKey(W)
ReleaseKey(A)
ReleaseKey(D)
def roi(img,vertices):
# blank mask:
mask = np.zeros_like(img)
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, 255)
# returning the image only where mask pixels are nonzero
masked = cv2.bitwise_and(img, mask)
return masked
def process_img(img):
original_image=img
processed_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
processed_img = cv2.GaussianBlur(processed_img, (3,3), 0 )
copy=processed_img
vertices = np.array([[30, 240], [30, 100], [195, 100], [195, 240]])
processed_img = roi(processed_img, np.int32([vertices]))
verticesP = np.array([[30, 270], [30, 230], [197, 230], [197, 270]])
platform = roi(copy, np.int32([verticesP]))
# edges
#lines = cv2.HoughLinesP(platform, 1, np.pi/180, 180,np.array([]), 3, 2)
#draw_lines(processed_img,lines)
#draw_lines(original_image,lines)
#Platform lines
#imgray = cv2.cvtColor(platform,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(platform,127,255,0)
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(original_image, contours, -1, (0,255,0), 3)
try:
platformpos=contours[0][0][0]
except:
platformpos=[[0]]
circles = cv2.HoughCircles(processed_img, cv2.HOUGH_GRADIENT, 1, 20,
param1=90, param2=5, minRadius=1, maxRadius=3)
ballpos=draw_circles(original_image,circles=circles)
return processed_img,original_image,platform,platformpos,ballpos
def k(screen):
Z = screen.reshape((-1,3))
# convert to np.float32
Z = np.float32(Z)
# define criteria, number of clusters(K) and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 2
ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
# Now convert back into uint8, and make original image
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape((screen.shape))
return res2
def main():
for i in list(range(4))[::-1]:
print(i+1)
time.sleep(1)
paused = False
c=0
last_time = time.time()
laspos=0
while(True):
if not paused:
# 800x600 windowed mode
screen = grab_screen(title='FCEUX 2.2.2: Arkanoid (USA)')
if c%10==0:
print('Recording at ' + str((10 / (time.time() - last_time)))+' fps')
last_time = time.time()
screen = cv2.cvtColor(screen, cv2.COLOR_BGR2RGB)
screen=k(screen)
processed,original,platform,platformpos,ballpos=process_img(screen)
screen = cv2.resize(screen, (160,90))
try:
if (platformpos[0] - ballpos[0] > 0):
left()
print('moving left')
else:
right()
print('moving right')
except:
pass
cv2.imshow('window',processed)
cv2.imshow('window1',original)
cv2.imshow('window2',platform)
#cv2.imshow('window',cv2.cvtColor(screen, cv2.COLOR_BGR2RGB))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
main()
|
{
"content_hash": "5f9262a609d074550aca77e47f3a739f",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 93,
"avg_line_length": 29.748387096774195,
"alnum_prop": 0.5827369334200824,
"repo_name": "AlwaysLearningDeeper/OpenAI_Challenges",
"id": "8621078a2782dd91c652ee7ed0ad9312bf39b368",
"size": "4611",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utils/oldutils/screencp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "129029"
}
],
"symlink_target": ""
}
|
from models import Content
def delete_all_contents():
for content in get_contents():
delete_content(content._type)
def get_contents():
return Content.get_contents()
def delete_content(_type):
return Content.delete_content(_type)
def add_content(data, _type):
content = Content(data, _type)
return Content.save_content(content)
def edit_content(_type, **kwargs):
data = kwargs.get("data")
content = Content.get_content_by_type(_type)
if content is None:
return None
if data is not None:
content.data = data
Content.make_commit()
return content
|
{
"content_hash": "5aafe643f79252d72b3ba887f64b7b50",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 48,
"avg_line_length": 17.885714285714286,
"alnum_prop": 0.65814696485623,
"repo_name": "pygamebrasil/pygame-site",
"id": "34ced7e228abac56650ef65094ce8cf5b6154e04",
"size": "643",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/app/contents/usecase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "32384"
},
{
"name": "HTML",
"bytes": "205963"
},
{
"name": "JavaScript",
"bytes": "20046"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "23470"
}
],
"symlink_target": ""
}
|
import re
import sys
import json
import requests
from time import sleep
from bs4 import BeautifulSoup
class NckuCrawler(object):
def __init__(self):
self.l =[]
self.start = int(sys.argv[1])
self.end = int(sys.argv[2])
#自定清除格式1
def format(self,unformat):
try:
format = unformat.encode('raw_unicode_escape').decode('utf-8').replace(' ', '').replace('\n', '').replace('\t', '')
return format
except:
pass
#自定清除格式1
def format2(self,unformat):
try:
format = unformat.encode('raw_unicode_escape').decode('utf-8').replace('\n', '').replace('\t', '')
return format
except:
pass
def crawler(self,start,end):
for key in range(start,end):
print( 'index is '+str(key) )
url = "http://web.ncku.edu.tw/files/501-1000-1048-"+str(key)+".php"
resp = requests.get(url=url)
soup = BeautifulSoup(resp.text)
for tag in soup.find_all("tr",class_=re.compile("row_0")):
date = self.format( tag.contents[1].string )
title = self.format2( tag.find("a") )
organization = self.format( tag.contents[5].string )
dic = {"日期":date,"標題":title,"公告單位":organization}
print(dic)
self.l.append(dic)
sleep(0.1)
sleep(3)
def storage(self,l):
json_data = json.dumps(l,ensure_ascii=False)
with open('ncku.json', 'w') as f:
f.write(json_data)
def run(self):
self.crawler(self.start,self.end)
self.storage(self.l)
#主函數
def main() :
nckuCrawler = NckuCrawler()
nckuCrawler.run()
if __name__ == '__main__' :
main()
|
{
"content_hash": "c2a17298ff2755565979fc639c31e568",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 119,
"avg_line_length": 24.14516129032258,
"alnum_prop": 0.633934535738143,
"repo_name": "wy36101299/nckuCrawler",
"id": "ac2e32f96d8fbaefac4f13213a894a50f47b747c",
"size": "1543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nckuCrawler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1543"
}
],
"symlink_target": ""
}
|
from optparse import make_option
import sys
from django.conf import settings
from django.core.management.base import NoArgsCommand
from django.core.management.color import no_style
from django.core.management.sql import custom_sql_for_model, emit_post_sync_signal
from django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
from django.utils.datastructures import SortedDict
from django.utils.importlib import import_module
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.'),
)
help = "Create the database tables for all apps in INSTALLED_APPS whose tables haven't already been created."
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive')
show_traceback = options.get('traceback', False)
# Stealth option -- 'load_initial_data' is used by the testing setup
# process to disable initial fixture loading.
load_initial_data = options.get('load_initial_data', True)
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError, exc:
# This is slightly hackish. We want to ignore ImportErrors
# if the "management" module itself is missing -- but we don't
# want to ignore the exception if the management module exists
# but raises an ImportError for some reason. The only way we
# can do this is to check the text of the exception. Note that
# we're a bit broad in how we check the text, because different
# Python implementations may not use the same text.
# CPython uses the text "No module named management"
# PyPy uses "No module named myproject.myapp.management"
msg = exc.args[0]
if not msg.startswith('No module named') or 'management' not in msg:
raise
db = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[db]
cursor = connection.cursor()
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names()
seen_models = connection.introspection.installed_models(tables)
created_models = set()
pending_references = {}
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app.__name__.split('.')[-2],
[m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)])
for app in models.get_apps()
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
return not ((converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
manifest = SortedDict(
(app_name, filter(model_installed, model_list))
for app_name, model_list in all_models
)
# Create the tables for each model
if verbosity >= 1:
print "Creating tables ..."
for app_name, model_list in manifest.items():
for model in model_list:
# Create the model's database table, if it doesn't already exist.
if verbosity >= 3:
print "Processing %s.%s model" % (app_name, model._meta.object_name)
sql, references = connection.creation.sql_create_model(model, self.style, seen_models)
seen_models.add(model)
created_models.add(model)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in seen_models:
sql.extend(connection.creation.sql_for_pending_references(refto, self.style, pending_references))
sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references))
if verbosity >= 1 and sql:
print "Creating table %s" % model._meta.db_table
for statement in sql:
cursor.execute(statement)
tables.append(connection.introspection.table_name_converter(model._meta.db_table))
transaction.commit_unless_managed(using=db)
# Send the post_syncdb signal, so individual apps can do whatever they need
# to do at this point.
emit_post_sync_signal(created_models, verbosity, interactive, db)
# The connection may have been closed by a syncdb handler.
cursor = connection.cursor()
# Install custom SQL for the app (but only if this
# is a model we've just created)
if verbosity >= 1:
print "Installing custom SQL ..."
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
custom_sql = custom_sql_for_model(model, self.style, connection)
if custom_sql:
if verbosity >= 2:
print "Installing custom SQL for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in custom_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
if show_traceback:
import traceback
traceback.print_exc()
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
else:
if verbosity >= 3:
print "No custom SQL for %s.%s model" % (app_name, model._meta.object_name)
if verbosity >= 1:
print "Installing indexes ..."
# Install SQL indicies for all newly created models
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
index_sql = connection.creation.sql_indexes_for_model(model, self.style)
if index_sql:
if verbosity >= 2:
print "Installing index for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in index_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install index for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
# Load initial_data fixtures (unless that has been disabled)
if load_initial_data:
from django.core.management import call_command
call_command('loaddata', 'initial_data', verbosity=verbosity, database=db)
|
{
"content_hash": "715169a179cf84ca71e5fd2aefbd1f01",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 121,
"avg_line_length": 50.640243902439025,
"alnum_prop": 0.556532209512342,
"repo_name": "rimbalinux/MSISDNArea",
"id": "a821340eec793c0ccabb32c8ec3a1edb28e89ef7",
"size": "8305",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django/core/management/commands/syncdb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "118069"
},
{
"name": "Python",
"bytes": "7281875"
}
],
"symlink_target": ""
}
|
import multiprocessing
import sys
_is_pypy = hasattr(sys, 'pypy_version_info')
# falcon only implements json and plain. Not wait DB.
workers = multiprocessing.cpu_count()
bind = "0.0.0.0:8080"
keepalive = 120
if _is_pypy:
worker_class = "tornado"
else:
worker_class = "meinheld.gmeinheld.MeinheldWorker"
def post_fork(server, worker):
# Disalbe access log
import meinheld.server
meinheld.server.set_access_logger(None)
|
{
"content_hash": "81b83bb6d7b23fd7e41325f12cac6115",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 54,
"avg_line_length": 23,
"alnum_prop": 0.6956521739130435,
"repo_name": "seem-sky/FrameworkBenchmarks",
"id": "6380907ffcbf726cbc10a881b216a5fb867b8e99",
"size": "460",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "falcon/gunicorn_conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "838"
},
{
"name": "ApacheConf",
"bytes": "20460"
},
{
"name": "Batchfile",
"bytes": "5149"
},
{
"name": "C",
"bytes": "252520"
},
{
"name": "C#",
"bytes": "128140"
},
{
"name": "C++",
"bytes": "182779"
},
{
"name": "CSS",
"bytes": "234858"
},
{
"name": "Clojure",
"bytes": "18787"
},
{
"name": "DIGITAL Command Language",
"bytes": "34"
},
{
"name": "Dart",
"bytes": "28519"
},
{
"name": "Elixir",
"bytes": "1912"
},
{
"name": "Erlang",
"bytes": "8219"
},
{
"name": "Go",
"bytes": "26375"
},
{
"name": "Groff",
"bytes": "57"
},
{
"name": "Groovy",
"bytes": "18121"
},
{
"name": "HTML",
"bytes": "76218"
},
{
"name": "Handlebars",
"bytes": "242"
},
{
"name": "Haskell",
"bytes": "8929"
},
{
"name": "Java",
"bytes": "261012"
},
{
"name": "JavaScript",
"bytes": "390160"
},
{
"name": "Lua",
"bytes": "6991"
},
{
"name": "Makefile",
"bytes": "2915"
},
{
"name": "MoonScript",
"bytes": "2189"
},
{
"name": "Nginx",
"bytes": "100578"
},
{
"name": "Nimrod",
"bytes": "31172"
},
{
"name": "PHP",
"bytes": "17337660"
},
{
"name": "Perl",
"bytes": "5303"
},
{
"name": "PowerShell",
"bytes": "34846"
},
{
"name": "Python",
"bytes": "337598"
},
{
"name": "QMake",
"bytes": "2056"
},
{
"name": "Racket",
"bytes": "1375"
},
{
"name": "Ruby",
"bytes": "37524"
},
{
"name": "Scala",
"bytes": "58608"
},
{
"name": "Shell",
"bytes": "79156"
},
{
"name": "Smarty",
"bytes": "7730"
},
{
"name": "Volt",
"bytes": "677"
}
],
"symlink_target": ""
}
|
"""BreezeBlocks Python Querying package."""
from .database import Database
from .sql import Table
from .query_builder import QueryBuilder
from .dml_builders import InsertBuilder, UpdateBuilder, DeleteBuilder
__version__ = "0.3.2"
|
{
"content_hash": "fe4af35d568ca0b17d292b9b844da79e",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 69,
"avg_line_length": 33,
"alnum_prop": 0.7835497835497836,
"repo_name": "modimore/BreezeBlocks",
"id": "ede17988278e5774f22e685f392ae33930f54017",
"size": "231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "package/breezeblocks/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "114783"
}
],
"symlink_target": ""
}
|
'''
Given a string, find out if its characters can be rearranged to form a palindrome.
Example
For inputString = "aabb", the output should be
palindromeRearranging(inputString) = true.
We can rearrange "aabb" to make "abba", which is a palindrome.
'''
from collections import Counter
def palindromeRearranging(inputString):
count_char = Counter(inputString)
pair = 0
nt_pair = 0
for k,v in count_char.items():
if v %2 == 0:
pair += 1
else:
nt_pair += 1
if pair > 0 and nt_pair == 0:
return True
if pair % 2 == 0 and nt_pair ==1:
return True
if nt_pair > 1:
return False
if __name__ == "__main__":
strng = "zyyzzzzz"
print palindromeRearranging(strng)
|
{
"content_hash": "84c5d2d88fc04ddae00f7b6e1fdf5121",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 82,
"avg_line_length": 23.939393939393938,
"alnum_prop": 0.5860759493670886,
"repo_name": "mayababuji/MyCodefights",
"id": "4df79f7ea0032044b2b2f107ae5165f326b574af",
"size": "837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "palindromeRearranging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21846"
}
],
"symlink_target": ""
}
|
"""Tests for subclasses of IIncludes."""
import unittest
from musicbrainz2.model import Release
from musicbrainz2.webservice import (
ArtistIncludes, ReleaseIncludes, TrackIncludes, LabelIncludes)
class ArtistIncludesTest(unittest.TestCase):
def testReleases(self):
inc1 = ArtistIncludes(aliases=True, releaseRelations=True)
tags1 = inc1.createIncludeTags()
tags1.sort()
self.assertEqual(tags1, ['aliases', 'release-rels'])
inc2 = ArtistIncludes(releaseRelations=True)
tags2 = inc2.createIncludeTags()
tags2.sort()
self.assertNotEqual(tags2, ['aliases', 'release-rels'])
inc3 = ArtistIncludes(aliases=True,
releases=(Release.TYPE_ALBUM, Release.TYPE_OFFICIAL))
tags3 = inc3.createIncludeTags()
tags3.sort()
self.assertEqual(tags3, ['aliases', 'sa-Album', 'sa-Official'])
inc4 = ArtistIncludes(aliases=True, vaReleases=('Bootleg',))
tags4 = inc4.createIncludeTags()
tags4.sort()
self.assertEqual(tags4, ['aliases', 'va-Bootleg'])
inc5 = ArtistIncludes(aliases=True,
vaReleases=(Release.TYPE_BOOTLEG,))
tags5 = inc5.createIncludeTags()
tags5.sort()
self.assertEqual(tags5, ['aliases', 'va-Bootleg'])
def testReleaseGroups(self):
inc = ArtistIncludes(releaseGroups=True)
self.assertEqual(inc.createIncludeTags(), ['release-groups'])
def testTags(self):
def check(includes_class):
inc1 = includes_class(tags=True)
tags1 = inc1.createIncludeTags()
tags1.sort()
self.assertEqual(tags1, ['tags'])
check(ArtistIncludes)
check(ReleaseIncludes)
check(TrackIncludes)
check(LabelIncludes)
class ReleaseIncludesTest(unittest.TestCase):
# Test that including isrcs in release also pulls in tracks
def testIsrcs(self):
inc = ReleaseIncludes(isrcs=True)
tags = inc.createIncludeTags()
tags.sort()
self.assertEqual(tags, ['isrcs', 'tracks'])
# Test that including labels in release also pulls in release events
def testReleaseEvents(self):
inc = ReleaseIncludes(labels=True)
tags = inc.createIncludeTags()
tags.sort()
self.assertEqual(tags, ['labels', 'release-events'])
def testReleaseGroup(self):
inc = ReleaseIncludes(releaseGroup=True)
tags = inc.createIncludeTags()
self.assertEqual(tags, ['release-groups'])
# EOF
|
{
"content_hash": "853ae003c5808b2f6ed8d4257547e5c4",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 69,
"avg_line_length": 30.397260273972602,
"alnum_prop": 0.7399729607931501,
"repo_name": "mineo/python-musicbrainz2",
"id": "30cd873ae05ce87e0e344ecfd54e26df104df5e1",
"size": "2219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_ws_includes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "256773"
}
],
"symlink_target": ""
}
|
"""This module contains a collection of unit tests which
validate the ..async_actions module.
"""
import unittest
import uuid
import mock
import tor_async_util
from ..async_actions import AsyncCrawlCreator
from ..async_actions import SpidersAsyncHealthCheck
from cloudfeaster_services.tests import AsyncPublishMessagePatcher
from cloudfeaster_services.tests import AsyncReadCrawlResultsPatcher
from cloudfeaster_services.tests import ConfigPatcher
class AsyncCrawlCreatorTestCase(unittest.TestCase):
def test_ctr_without_async_state(self):
docker_image_name = uuid.uuid4().hex
spider_name = uuid.uuid4().hex
crawl_args = [
uuid.uuid4().hex,
uuid.uuid4().hex,
uuid.uuid4().hex,
]
acc = AsyncCrawlCreator(docker_image_name, spider_name, crawl_args)
self.assertEqual(acc.docker_image_name, docker_image_name)
self.assertEqual(acc.spider_name, spider_name)
self.assertEqual(acc.crawl_args, crawl_args)
self.assertIsNone(acc.async_state)
self.assertIsNone(acc.create_failure_detail)
def test_ctr_with_async_state(self):
docker_image_name = uuid.uuid4().hex
spider_name = uuid.uuid4().hex
crawl_args = [
uuid.uuid4().hex,
uuid.uuid4().hex,
uuid.uuid4().hex,
]
async_state = uuid.uuid4().hex
acc = AsyncCrawlCreator(docker_image_name, spider_name, crawl_args, async_state)
self.assertEqual(acc.docker_image_name, docker_image_name)
self.assertEqual(acc.spider_name, spider_name)
self.assertEqual(acc.crawl_args, crawl_args)
self.assertEqual(acc.async_state, async_state)
self.assertIsNone(acc.create_failure_detail)
def test_get_from_cache_fails(self):
docker_image_name = uuid.uuid4().hex
spider_name = uuid.uuid4().hex
crawl_args = [
uuid.uuid4().hex,
uuid.uuid4().hex,
uuid.uuid4().hex,
]
with AsyncReadCrawlResultsPatcher(False, None):
callback = mock.Mock()
the_acc = AsyncCrawlCreator(docker_image_name, spider_name, crawl_args)
the_acc.create(callback)
self.assertEqual(
the_acc.create_failure_detail,
type(the_acc).CFD_ERROR_GETTING_CRAWL_RESULTS_FROM_CACHE)
self.assertEqual(1, callback.call_count)
self.assertEqual(1, len(callback.call_args_list))
(is_ok, crawl_id, crawl_result, acc) = callback.call_args_list[0][0]
self.assertFalse(is_ok)
self.assertIsNone(crawl_id)
self.assertIsNone(crawl_result)
self.assertTrue(acc is the_acc)
def test_happy_path_crawl_results_from_crawl_results_cache(self):
docker_image_name = uuid.uuid4().hex
spider_name = uuid.uuid4().hex
crawl_args = [
uuid.uuid4().hex,
uuid.uuid4().hex,
uuid.uuid4().hex,
]
the_crawl_result = uuid.uuid4().hex
with AsyncReadCrawlResultsPatcher(True, the_crawl_result):
callback = mock.Mock()
the_acc = AsyncCrawlCreator(docker_image_name, spider_name, crawl_args)
the_acc.create(callback)
self.assertEqual(
the_acc.create_failure_detail,
type(the_acc).CFD_OK)
self.assertEqual(1, callback.call_count)
self.assertEqual(1, len(callback.call_args_list))
(is_ok, crawl_id, crawl_result, acc) = callback.call_args_list[0][0]
self.assertTrue(is_ok)
self.assertIsNotNone(crawl_id)
self.assertIsNotNone(crawl_result)
self.assertEqual(crawl_result, the_crawl_result)
self.assertTrue(acc is the_acc)
def test_publish_fails(self):
docker_image_name = uuid.uuid4().hex
spider_name = uuid.uuid4().hex
crawl_args = [
uuid.uuid4().hex,
uuid.uuid4().hex,
uuid.uuid4().hex,
]
config = {
'google_cloud_pubsub_access_token': uuid.uuid4().hex,
}
with AsyncReadCrawlResultsPatcher(True, None):
with AsyncPublishMessagePatcher(False):
with ConfigPatcher(config):
callback = mock.Mock()
the_acc = AsyncCrawlCreator(docker_image_name, spider_name, crawl_args)
the_acc.create(callback)
self.assertEqual(
the_acc.create_failure_detail,
type(the_acc).CFD_ERROR_PUSHING_CRAWL_MESSAGE)
self.assertEqual(1, callback.call_count)
self.assertEqual(1, len(callback.call_args_list))
(is_ok, crawl_id, crawl_result, acc) = callback.call_args_list[0][0]
self.assertFalse(is_ok)
self.assertIsNone(crawl_id)
self.assertIsNone(crawl_result)
self.assertTrue(acc is the_acc)
def test_happy_path(self):
docker_image_name = uuid.uuid4().hex
spider_name = uuid.uuid4().hex
crawl_args = [
uuid.uuid4().hex,
uuid.uuid4().hex,
uuid.uuid4().hex,
]
config = {
'google_cloud_pubsub_access_token': uuid.uuid4().hex,
}
with AsyncReadCrawlResultsPatcher(True, None):
with AsyncPublishMessagePatcher(True):
with ConfigPatcher(config):
callback = mock.Mock()
the_acc = AsyncCrawlCreator(docker_image_name, spider_name, crawl_args)
the_acc.create(callback)
self.assertEqual(
the_acc.create_failure_detail,
type(the_acc).CFD_OK)
self.assertEqual(1, callback.call_count)
self.assertEqual(1, len(callback.call_args_list))
(is_ok, crawl_id, crawl_result, acc) = callback.call_args_list[0][0]
self.assertTrue(is_ok)
self.assertIsNotNone(crawl_id)
self.assertIsNone(crawl_result)
self.assertTrue(acc is the_acc)
class SpidersAsyncHealthCheckTestCase(unittest.TestCase):
def test_ctr_without_async_state(self):
is_quick = uuid.uuid4().hex
ahc = SpidersAsyncHealthCheck(is_quick)
self.assertEqual(ahc.is_quick, is_quick)
self.assertIsNone(ahc.async_state)
self.assertIsNone(ahc.check_failure_detail)
def test_ctr_with_async_state(self):
is_quick = uuid.uuid4().hex
async_state = uuid.uuid4().hex
ahc = SpidersAsyncHealthCheck(is_quick, async_state)
self.assertEqual(ahc.is_quick, is_quick)
self.assertEqual(ahc.async_state, async_state)
self.assertIsNone(ahc.check_failure_detail)
def test_is_quick_true(self):
callback = mock.Mock()
the_ahc = SpidersAsyncHealthCheck(is_quick=True)
the_ahc.check(callback)
expected_details = None
callback.assert_called_once_with(expected_details, the_ahc)
self.assertEqual(
the_ahc.check_failure_detail,
type(the_ahc).CFD_OK)
def test_is_quick_false(self):
config = {
'google_cloud_pubsub_service_account_credentials_filename': uuid.uuid4().hex,
'google_cloud_pubsub_service_account_credentials': uuid.uuid4().hex,
}
expected_health_details = {
'status': 'green',
'details': {
'google_cloud_pubsub_service_account_credentials': {
'status': 'green',
'details': {
'configured': 'green',
'readable': 'green',
},
},
},
}
with ConfigPatcher(config):
callback = mock.Mock()
the_async_action = SpidersAsyncHealthCheck(is_quick=False)
the_async_action.check(callback)
self.assertEqual(1, callback.call_count)
self.assertEqual(1, len(callback.call_args_list))
(health_details, async_action) = callback.call_args_list[0][0]
self.assertIsNotNone(health_details)
self.assertEqual(
tor_async_util._health_check_gen_response_body(health_details),
expected_health_details)
self.assertTrue(the_async_action is async_action)
self.assertEqual(
async_action.check_failure_detail,
type(async_action).CFD_OK)
|
{
"content_hash": "758df92c50f486d415299028fb56c1a3",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 91,
"avg_line_length": 35.520325203252035,
"alnum_prop": 0.5779354543373769,
"repo_name": "simonsdave/cloudfeaster_infrastructure",
"id": "b53f112915759b3a161d55564e38ebd5ad59d163",
"size": "8738",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cloudfeaster_services/spiders/tests/async_actions_unit_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "14635"
},
{
"name": "Python",
"bytes": "292516"
},
{
"name": "RAML",
"bytes": "60187"
},
{
"name": "Ruby",
"bytes": "528"
},
{
"name": "Shell",
"bytes": "11670"
},
{
"name": "VimL",
"bytes": "502"
}
],
"symlink_target": ""
}
|
import sys
from typing import Any, Callable, Dict, IO, List, Optional, TypeVar, Union, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_server_request(
resource_group_name: str, server_name: str, subscription_id: str, *, expand: Optional[str] = None, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
) # type: Literal["2020-11-01-preview"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/advisors",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"serverName": _SERIALIZER.url("server_name", server_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
if expand is not None:
_params["$expand"] = _SERIALIZER.query("expand", expand, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str, server_name: str, advisor_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
) # type: Literal["2020-11-01-preview"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/advisors/{advisorName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"serverName": _SERIALIZER.url("server_name", server_name, "str"),
"advisorName": _SERIALIZER.url("advisor_name", advisor_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(
resource_group_name: str, server_name: str, advisor_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
) # type: Literal["2020-11-01-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/advisors/{advisorName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"serverName": _SERIALIZER.url("server_name", server_name, "str"),
"advisorName": _SERIALIZER.url("advisor_name", advisor_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
class ServerAdvisorsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.sql.SqlManagementClient`'s
:attr:`server_advisors` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_server(
self, resource_group_name: str, server_name: str, expand: Optional[str] = None, **kwargs: Any
) -> List[_models.Advisor]:
"""Gets a list of server advisors.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param expand: The child resources to include in the response. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of Advisor or the result of cls(response)
:rtype: list[~azure.mgmt.sql.models.Advisor]
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
) # type: Literal["2020-11-01-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[List[_models.Advisor]]
request = build_list_by_server_request(
resource_group_name=resource_group_name,
server_name=server_name,
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.list_by_server.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("[Advisor]", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_server.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/advisors"} # type: ignore
@distributed_trace
def get(self, resource_group_name: str, server_name: str, advisor_name: str, **kwargs: Any) -> _models.Advisor:
"""Gets a server advisor.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param advisor_name: The name of the Server Advisor. Required.
:type advisor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Advisor or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.Advisor
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
) # type: Literal["2020-11-01-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Advisor]
request = build_get_request(
resource_group_name=resource_group_name,
server_name=server_name,
advisor_name=advisor_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("Advisor", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/advisors/{advisorName}"} # type: ignore
@overload
def update(
self,
resource_group_name: str,
server_name: str,
advisor_name: str,
parameters: _models.Advisor,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Advisor:
"""Updates a server advisor.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param advisor_name: The name of the Server Advisor. Required.
:type advisor_name: str
:param parameters: The requested advisor resource state. Required.
:type parameters: ~azure.mgmt.sql.models.Advisor
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Advisor or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.Advisor
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def update(
self,
resource_group_name: str,
server_name: str,
advisor_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Advisor:
"""Updates a server advisor.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param advisor_name: The name of the Server Advisor. Required.
:type advisor_name: str
:param parameters: The requested advisor resource state. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Advisor or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.Advisor
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def update(
self,
resource_group_name: str,
server_name: str,
advisor_name: str,
parameters: Union[_models.Advisor, IO],
**kwargs: Any
) -> _models.Advisor:
"""Updates a server advisor.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param advisor_name: The name of the Server Advisor. Required.
:type advisor_name: str
:param parameters: The requested advisor resource state. Is either a model type or a IO type.
Required.
:type parameters: ~azure.mgmt.sql.models.Advisor or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Advisor or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.Advisor
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2020-11-01-preview")
) # type: Literal["2020-11-01-preview"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Advisor]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "Advisor")
request = build_update_request(
resource_group_name=resource_group_name,
server_name=server_name,
advisor_name=advisor_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("Advisor", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/advisors/{advisorName}"} # type: ignore
|
{
"content_hash": "31cba0a04b83ef7ad7636d3410a70643",
"timestamp": "",
"source": "github",
"line_count": 438,
"max_line_length": 183,
"avg_line_length": 42.34018264840183,
"alnum_prop": 0.6458344567268806,
"repo_name": "Azure/azure-sdk-for-python",
"id": "5c5c60e7e4f9067e084dbb6e776fda11eac508d8",
"size": "19045",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/sql/azure-mgmt-sql/azure/mgmt/sql/operations/_server_advisors_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import unittest
import sqlite3 as sqlite
class CollationTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def CheckCreateCollationNotCallable(self):
con = sqlite.connect(":memory:")
try:
con.create_collation("X", 42)
self.fail("should have raised a TypeError")
except TypeError as e:
self.assertEqual(e.args[0], "parameter must be callable")
def CheckCreateCollationNotAscii(self):
con = sqlite.connect(":memory:")
try:
con.create_collation("collä", lambda x, y: (x > y) - (x < y))
self.fail("should have raised a ProgrammingError")
except sqlite.ProgrammingError as e:
pass
def CheckCollationIsUsed(self):
if sqlite.version_info < (3, 2, 1): # old SQLite versions crash on this test
return
def mycoll(x, y):
# reverse order
return -((x > y) - (x < y))
con = sqlite.connect(":memory:")
con.create_collation("mycoll", mycoll)
sql = """
select x from (
select 'a' as x
union
select 'b' as x
union
select 'c' as x
) order by x collate mycoll
"""
result = con.execute(sql).fetchall()
if result[0][0] != "c" or result[1][0] != "b" or result[2][0] != "a":
self.fail("the expected order was not returned")
con.create_collation("mycoll", None)
try:
result = con.execute(sql).fetchall()
self.fail("should have raised an OperationalError")
except sqlite.OperationalError as e:
self.assertEqual(e.args[0].lower(), "no such collation sequence: mycoll")
def CheckCollationRegisterTwice(self):
"""
Register two different collation functions under the same name.
Verify that the last one is actually used.
"""
con = sqlite.connect(":memory:")
con.create_collation("mycoll", lambda x, y: (x > y) - (x < y))
con.create_collation("mycoll", lambda x, y: -((x > y) - (x < y)))
result = con.execute("""
select x from (select 'a' as x union select 'b' as x) order by x collate mycoll
""").fetchall()
if result[0][0] != 'b' or result[1][0] != 'a':
self.fail("wrong collation function is used")
def CheckDeregisterCollation(self):
"""
Register a collation, then deregister it. Make sure an error is raised if we try
to use it.
"""
con = sqlite.connect(":memory:")
con.create_collation("mycoll", lambda x, y: (x > y) - (x < y))
con.create_collation("mycoll", None)
try:
con.execute("select 'a' as x union select 'b' as x order by x collate mycoll")
self.fail("should have raised an OperationalError")
except sqlite.OperationalError as e:
if not e.args[0].startswith("no such collation sequence"):
self.fail("wrong OperationalError raised")
class ProgressTests(unittest.TestCase):
def CheckProgressHandlerUsed(self):
"""
Test that the progress handler is invoked once it is set.
"""
con = sqlite.connect(":memory:")
progress_calls = []
def progress():
progress_calls.append(None)
return 0
con.set_progress_handler(progress, 1)
con.execute("""
create table foo(a, b)
""")
self.assertTrue(progress_calls)
def CheckOpcodeCount(self):
"""
Test that the opcode argument is respected.
"""
con = sqlite.connect(":memory:")
progress_calls = []
def progress():
progress_calls.append(None)
return 0
con.set_progress_handler(progress, 1)
curs = con.cursor()
curs.execute("""
create table foo (a, b)
""")
first_count = len(progress_calls)
progress_calls = []
con.set_progress_handler(progress, 2)
curs.execute("""
create table bar (a, b)
""")
second_count = len(progress_calls)
self.assertTrue(first_count > second_count)
def CheckCancelOperation(self):
"""
Test that returning a non-zero value stops the operation in progress.
"""
con = sqlite.connect(":memory:")
progress_calls = []
def progress():
progress_calls.append(None)
return 1
con.set_progress_handler(progress, 1)
curs = con.cursor()
self.assertRaises(
sqlite.OperationalError,
curs.execute,
"create table bar (a, b)")
def CheckClearHandler(self):
"""
Test that setting the progress handler to None clears the previously set handler.
"""
con = sqlite.connect(":memory:")
action = 0
def progress():
nonlocal action
action = 1
return 0
con.set_progress_handler(progress, 1)
con.set_progress_handler(None, 1)
con.execute("select 1 union select 2 union select 3").fetchall()
self.assertEqual(action, 0, "progress handler was not cleared")
class TraceCallbackTests(unittest.TestCase):
def CheckTraceCallbackUsed(self):
"""
Test that the trace callback is invoked once it is set.
"""
con = sqlite.connect(":memory:")
traced_statements = []
def trace(statement):
traced_statements.append(statement)
con.set_trace_callback(trace)
con.execute("create table foo(a, b)")
self.assertTrue(traced_statements)
self.assertTrue(any("create table foo" in stmt for stmt in traced_statements))
def CheckClearTraceCallback(self):
"""
Test that setting the trace callback to None clears the previously set callback.
"""
con = sqlite.connect(":memory:")
traced_statements = []
def trace(statement):
traced_statements.append(statement)
con.set_trace_callback(trace)
con.set_trace_callback(None)
con.execute("create table foo(a, b)")
self.assertFalse(traced_statements, "trace callback was not cleared")
def CheckUnicodeContent(self):
"""
Test that the statement can contain unicode literals.
"""
unicode_value = '\xf6\xe4\xfc\xd6\xc4\xdc\xdf\u20ac'
con = sqlite.connect(":memory:")
traced_statements = []
def trace(statement):
traced_statements.append(statement)
con.set_trace_callback(trace)
con.execute("create table foo(x)")
# Can't execute bound parameters as their values don't appear
# in traced statements before SQLite 3.6.21
# (cf. http://www.sqlite.org/draft/releaselog/3_6_21.html)
con.execute('insert into foo(x) values ("%s")' % unicode_value)
con.commit()
self.assertTrue(any(unicode_value in stmt for stmt in traced_statements),
"Unicode data %s garbled in trace callback: %s"
% (ascii(unicode_value), ', '.join(map(ascii, traced_statements))))
def suite():
collation_suite = unittest.makeSuite(CollationTests, "Check")
progress_suite = unittest.makeSuite(ProgressTests, "Check")
trace_suite = unittest.makeSuite(TraceCallbackTests, "Check")
return unittest.TestSuite((collation_suite, progress_suite, trace_suite))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
|
{
"content_hash": "1804ac2fcbc3512253a4cac30b06dc1d",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 91,
"avg_line_length": 35.71296296296296,
"alnum_prop": 0.5767435830956702,
"repo_name": "MalloyPower/parsing-python",
"id": "3dc44f6110c3bde9fb84d2eb9e61d3801d82f2f8",
"size": "8754",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.3.0/Lib/sqlite3/test/hooks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'commute_together.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^meeting/new/', 'commute_together.views.new_meeting', name='new_meeting'),
url(r'^meeting/(\d+)/$', 'commute_together.views.meeting', name='meeting'),
url(r'^meeting/$', 'commute_together.views.home', name='home'),
url(r'^meeting/schedule/', 'commute_together.views.schedule', name='schedule'),
url(r'^meeting/vklogin/$', 'commute_together.views.vklogin', name='vklogin'),
url(r'^meeting/logout/$', 'commute_together.views.logout_view', name='logout'),
url(r'^meeting/api/station_name_hints/', 'commute_together.views.station_name_hints_JSON', name='station_name_hints'),
url(r'^meeting/api/get_schedule/', 'commute_together.views.get_schedule_JSON', name="get_schedule"),
url(r'^meeting/api/board/', 'commute_together.views.get_board_JSON', name="get_meetings"),
)
|
{
"content_hash": "e29e89f1c2aaada624453ce3ebb02de7",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 122,
"avg_line_length": 51.76190476190476,
"alnum_prop": 0.6798528058877645,
"repo_name": "DimaWittmann/commute-together",
"id": "ff8b5a7cdbb1270cf6f16c0d77334fcda3fabe72",
"size": "1087",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commute_together/commute_together/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18390"
},
{
"name": "HTML",
"bytes": "9548"
},
{
"name": "JavaScript",
"bytes": "111494"
},
{
"name": "Makefile",
"bytes": "325"
},
{
"name": "Python",
"bytes": "23785"
}
],
"symlink_target": ""
}
|
__author__ = 'jbjohnso'
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 Lenovo Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#This file is responsible for a client-side communication method to enable
#capabilities like measuring and rearranging the terminal window for
#wcons
import atexit
import os
import socket
import stat
import threading
class TermHandler(object):
def __init__(self, path):
self.path = path
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
os.remove(path)
except OSError: # if file does not exist, no big deal
pass
atexit.register(self.shutdown)
self.socket.bind(path)
os.chmod(path, stat.S_IWUSR | stat.S_IRUSR)
th = threading.Thread(target=self.sockinteract)
th.daemon = True
th.start()
def shutdown(self):
try:
os.remove(self.path)
except OSError:
pass
def sockinteract(self):
self.socket.listen(5)
while True:
connection = None
try:
connection, address = self.socket.accept()
connection.sendall(b"confetty control v1--\n")
cmd = connection.recv(8)
if b'GETWINID' == cmd:
winid = os.environ['WINDOWID']
if not isinstance(winid, bytes):
winid = winid.encode('utf8')
connection.sendall(winid)
connection.close()
except BaseException:
pass
finally:
if connection is not None:
connection.close()
|
{
"content_hash": "a94b65fa28ce662e1b417b30d6a6f031",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 74,
"avg_line_length": 32.44117647058823,
"alnum_prop": 0.6128739800543971,
"repo_name": "jjohnson42/confluent",
"id": "0b3b0d16ef6673822cc4f0b12451dbc05237f35b",
"size": "2206",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "confluent_client/confluent/termhandler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "82023"
},
{
"name": "CSS",
"bytes": "2464"
},
{
"name": "Dockerfile",
"bytes": "333"
},
{
"name": "HTML",
"bytes": "352"
},
{
"name": "Makefile",
"bytes": "648"
},
{
"name": "Perl",
"bytes": "8853"
},
{
"name": "Python",
"bytes": "1822565"
},
{
"name": "Shell",
"bytes": "369359"
}
],
"symlink_target": ""
}
|
import os.path
import tornado
import tornado.auth
import markdown
import re
from gblog import config
from gblog import utils
from gblog.handlers.basehandler import BaseHandler
class HomeHandler(BaseHandler):
"""Handle URL '/'.
Subclass of BaseHandler and RequestHandler, support standard GET/POST
method.
"""
def get(self):
"""Return the home page."""
# Calculate the pages num
entries_per_page=config.options.entries_per_page
try:
int(entries_per_page)
except ValueError:
raise tornado.web.HTTPError(500)
count = self.db.get("SELECT COUNT(*) FROM entries")
count = list(count.values())[0]
pages = int((count-1)/entries_per_page + 1) # equal math.ceil(a/b)
pages = pages if pages else 1 # pages cannot be 0
entries = self.db.query("SELECT id,title,slug,abstract,published,\
readtimes,comments FROM entries ORDER BY published DESC \
LIMIT {0}".format(entries_per_page))
if not entries:
pass
tags_list = self.db.query("SELECT * FROM tags")
dates_list = self.db.query("SELECT * FROM dates")
self.render("home.html", entries=entries, tags_list=tags_list,
dates_list=dates_list, pages=pages)
def post(self):
"""Return the entries in home page #."""
page = self.get_argument("page", None)
if not page: raise tornado.web.HTTPError(404)
entries_per_page=config.options.entries_per_page
start = int(page)*entries_per_page - entries_per_page
entries = self.db.query("SELECT id,title,slug,abstract,published,\
readtimes,comments FROM entries ORDER BY published DESC \
LIMIT {0},{1}".format(start, entries_per_page))
self.render("modules/entry.html", entries=entries)
class AboutHandler(BaseHandler):
"""Handle URL '/about'.
"""
def get(self):
"""Return the about page."""
about_file_path = self.settings["config_dir"] + '/about.md'
if os.path.isfile(about_file_path):
f = open(about_file_path)
content=f.read()
content = markdown.markdown(content)
else:
content = None
comments = self.db.query("SELECT * FROM comments WHERE \
entry_id={0}".format(0))
#if comments:
reply_map={}
i=0
for comment in comments:
reply_map[ comment["id"] ]=i
i+=1
self.render("about.html", content=content, comments=comments,
entry_id=0, reply_map=reply_map)
class DefaultHandler(BaseHandler):
"""handler of default_handler_class in Application Setting.
"""
def get(self):
"""Return the 404 page."""
# search *.jpg, *.ico, *.css ....
#match = re.search('\.', self.request.uri)
#if match:
#self.send_error(400)
#else:
#self.render("404.html")
raise tornado.web.HTTPError(404)
class EntryHandler(BaseHandler):
"""Handle URL '/entry/[^/]+'.
Subclass of BaseHandler and RequestHandler, support standard GET method.
"""
def get(self, slug):
entry = self.db.get("SELECT * FROM entries WHERE slug = '{0}'"\
.format(slug))
if not entry: raise tornado.web.HTTPError(404)
# Update readtimes
if not self.current_user:
entry.readtimes+=1
self.db.execute( "UPDATE entries SET readtimes = '{0}' WHERE \
id = {1}".format(entry.readtimes, entry.id))
tags_list = self.db.query("SELECT * FROM tags")
dates_list = self.db.query("SELECT * FROM dates")
# Query the pre and next article
query = self.db.query("(SELECT id,slug,title FROM entries WHERE \
id<{0} ORDER BY id DESC LIMIT 1) UNION ALL (SELECT id, \
slug,title FROM entries WHERE id>{0} ORDER BY id LIMIT 1) \
".format(entry.id))
# Only little interger can use 'is'
if len(query) is 2:
pre = query[0]
nex = query[1]
elif len(query) is 1:
if query[0]["id"] < entry["id"]:
pre = query[0]
nex = None
else:
pre = None
nex = query[0]
else:
pre = None
nex = None
self.render("article.html", entry=entry, tags_list=tags_list,
dates_list=dates_list, pre=pre, nex=nex)
class ArchiveHandler(BaseHandler):
"""Handle URL '/archive'.
Subclass of BaseHandler and RequestHandler, support standard GET method.
"""
def get(self):
entries = self.db.query("SELECT * FROM entries ORDER BY \
published DESC")
self.render("archive.html", entries=entries)
class CategoryHandler(BaseHandler):
"""Handle URL '/category'.
"""
def get(self):
# Check argument
name=self.get_argument("name")
id = self.get_argument("id")
# Check id
try:
id=int(id)
except ValueError:
raise tornado.web.HTTPError()
if name == "tag":
if id is 0:
tagname=self.get_argument("tagname")
print(tagname)
tag = self.db.get("SELECT * FROM tags WHERE name = '{0}' \
LIMIT 1".format(tagname))
entries = self.db.query("SELECT * FROM entries WHERE id IN \
(SELECT entry_id FROM tagmaps WHERE tag_id = {0})" \
. format(tag["id"]))
if not entries:
raise tornado.web.HTTPError(404)
self.render("category.html", entries=entries, category="tag",
item=tag)
entries = self.db.query("SELECT * FROM entries WHERE id IN (SELECT \
entry_id FROM tagmaps WHERE tag_id = {0})".format(id))
if not entries:
raise tornado.web.HTTPError(404)
tag = self.db.get("SELECT * FROM tags WHERE id = {0} LIMIT 1"\
.format(id))
self.render("category.html", entries=entries, category="tag",
item=tag)
elif name == "date":
entries = self.db.query("SELECT * FROM entries WHERE id IN (SELECT\
entry_id FROM datemaps WHERE date_id = {0})".format(id))
if not entries:
raise tornado.web.HTTPError(404)
date = self.db.get("SELECT * FROM dates WHERE id = {0} LIMIT 1"\
.format(id))
self.render("category.html", entries=entries, category="date",
item=date)
else:
raise tornado.web.HTTPError(404)
|
{
"content_hash": "485424ca8cce358b9929e252348053d2",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 80,
"avg_line_length": 33.71078431372549,
"alnum_prop": 0.5471862730841937,
"repo_name": "waterdrinker/gblog",
"id": "e37849d8d96a128f24e129fc9d48de03a1a8d7f6",
"size": "6877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gblog/handlers/multi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19169"
},
{
"name": "HTML",
"bytes": "16625"
},
{
"name": "JavaScript",
"bytes": "4434"
},
{
"name": "Nginx",
"bytes": "2526"
},
{
"name": "Python",
"bytes": "48977"
},
{
"name": "Shell",
"bytes": "1192"
}
],
"symlink_target": ""
}
|
from common_fixtures import * # NOQA
from test_services_lb_ssl_balancer import validate_lb_services_ssl
UPGRADE_SUBDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resources/upgrade')
pre_upgrade_stack_name = os.environ.get('PRE_UPGRADE_STACK_NAME')
post_upgrade_stack_name = os.environ.get('POST_UPGRADE_STACK_NAME')
preportsuffixnum = os.environ.get('PRE_PORT_SUFFIX_NUM')
postportsuffixnum = os.environ.get('POST_PORT_SUFFIX_NUM')
preupgrade_stacklist = []
postupgrade_stacklist = []
dom_list = ["test1.com"]
if_pre_upgrade_testing = pytest.mark.skipif(
os.environ.get('UPGRADE_TESTING') != "true" or
pre_upgrade_stack_name is None or
preportsuffixnum is None,
reason='All parameters needed for UPGRADE_TESTING is not set')
if_post_upgrade_testing = pytest.mark.skipif(
os.environ.get('UPGRADE_TESTING') != "true" or
post_upgrade_stack_name is None or
postportsuffixnum is None,
pre_upgrade_stack_name is None or
preportsuffixnum is None,
reason='All parameters needed for UPGRADE_TESTING is not set')
def pre_upgrade(client):
# Create certificate to be used in the yml files
domain = dom_list[0]
create_cert(client, domain, "test1certificate")
# Create two stacks
pre_upgrade_stack1 = pre_upgrade_stack_name + "-1"
pre_upgrade_stack2 = pre_upgrade_stack_name + "-2"
create_stacks(client, pre_upgrade_stack1, pre_upgrade_stack2,
str(preportsuffixnum))
validate_stacks(client, pre_upgrade_stack_name,
preportsuffixnum, socat_containers)
def create_stacks(client, stack_name1, stack_name2,
portsuffixnum):
# Create pre-upgrade stack
print "**** In Create Stacks ****"
print "PORT SUFFIX NUM"
print portsuffixnum
lb_image_setting = get_lb_image_version(client)
print lb_image_setting
dc_config_file1 = "dc_first_stack.yml"
rc_config_file1 = "rc_first_stack.yml"
dc_config1 = readDataFile(UPGRADE_SUBDIR, dc_config_file1)
dc_config1 = dc_config1.replace("$portsuffixnum", portsuffixnum)
dc_config1 = dc_config1.replace("$lbimage", lb_image_setting)
print dc_config1
rc_config1 = readDataFile(UPGRADE_SUBDIR, rc_config_file1)
rc_config1 = rc_config1.replace("$portsuffixnum", portsuffixnum)
print rc_config1
create_stack_with_service_from_config(client, stack_name1, dc_config1,
rc_config1)
dc_config_file2 = "dc_second_stack.yml"
rc_config_file2 = "rc_second_stack.yml"
dc_config2 = readDataFile(UPGRADE_SUBDIR, dc_config_file2)
dc_config2 = dc_config2.replace("$stack", stack_name1)
dc_config2 = dc_config2.replace("$lbimage", lb_image_setting)
dc_config2 = dc_config2.replace("$portsuffixnum", portsuffixnum)
print dc_config2
rc_config2 = readDataFile(UPGRADE_SUBDIR, rc_config_file2)
rc_config2 = rc_config2.replace("$stack", stack_name1)
rc_config2 = rc_config2.replace("$portsuffixnum", portsuffixnum)
create_stack_with_service_from_config(client, stack_name2, dc_config2,
rc_config2)
def validate_stacks(client, stackname,
portsuffixnum, socat_containers):
stack1 = stackname + "-1"
stack2 = stackname + "-2"
print "In validate stacks"
# Validate the containers/lbs in the stack
stack, service1 = get_env_service_by_name(client, stack1, "service1")
assert service1['state'] == "active"
assert service1.scale == 2
# Validate LB Service
stack, lbservice = get_env_service_by_name(client, stack1, "mylb")
assert lbservice['state'] == "active"
assert lbservice.scale == 1
mylbport = "300" + str(portsuffixnum)
validate_lb_service(client, lbservice,
mylbport, [service1])
# Validate health service and health LB Service
stack, healthservice = get_env_service_by_name(client, stack1,
"healthservice")
assert service1['state'] == "active"
assert healthservice.scale == 1
assert healthservice.healthState == "healthy"
stack, healthlbservice = get_env_service_by_name(client, stack1,
"healthlb")
healthlbport = "200" + str(portsuffixnum)
healthlb_containers = get_service_container_list(client,
healthlbservice)
for con in healthlb_containers:
wait_for_condition(
client, con,
lambda x: x.healthState == 'healthy',
lambda x: 'State is: ' + x.healthState)
healthlbservice = wait_for_condition(
client, healthlbservice,
lambda x: x.healthState == 'healthy',
lambda x: 'State is: ' + x.healthState)
assert healthlbservice['state'] == "active"
assert healthlbservice.scale == 1
assert healthlbservice.healthState == "healthy"
validate_lb_service(client, healthlbservice,
healthlbport, [healthservice])
# Validate Global Health LB Service
stack, globalhealthservice = get_env_service_by_name(client, stack1,
"globalhealthservice")
assert globalhealthservice['state'] == "active"
assert globalhealthservice.healthState == "healthy"
verify_service_is_global(client, globalhealthservice)
stack, globalhealthlbservice = get_env_service_by_name(client, stack1,
"globalhealthlb")
globallbport = "100" + str(portsuffixnum)
globalhealthlb_containers = get_service_container_list(
client, globalhealthlbservice)
for con in globalhealthlb_containers:
wait_for_condition(
client, con,
lambda x: x.healthState == 'healthy',
lambda x: 'State is: ' + x.healthState)
globalhealthlbservice = wait_for_condition(
client, globalhealthlbservice,
lambda x: x.healthState == 'healthy',
lambda x: 'State is: ' + x.healthState)
assert globalhealthlbservice['state'] == "active"
assert globalhealthlbservice.healthState == "healthy"
verify_service_is_global(client,
globalhealthlbservice)
validate_lb_service(client, globalhealthlbservice,
globallbport, [globalhealthservice])
stack, service2 = get_env_service_by_name(client, stack1, "service2")
assert service2['state'] == "active"
assert service2.scale == 2
# Validate SSL LB service
stack, ssllbservice = get_env_service_by_name(client, stack1, "ssllb")
assert ssllbservice['state'] == "active"
assert ssllbservice.scale == 1
ssl_port = "40" + str(portsuffixnum)
client_port = ssl_port + "0"
port = ssl_port
domain = dom_list[0]
test_ssl_client_con = create_client_container_for_ssh(client, client_port)
print "***TEST CLIENT CONTAINER***"
print test_ssl_client_con["port"]
print test_ssl_client_con["host"]
print test_ssl_client_con["container"]
validate_lb_services_ssl(client, test_ssl_client_con,
stack, [service1, service2], ssllbservice,
port, ssl_port,
domain)
# Validate DNS Service
stack, servicewithexposedports = get_env_service_by_name(
client, stack1, "servicewithexposedports")
assert servicewithexposedports['state'] == "active"
assert servicewithexposedports.scale == 1
exposedport = "400" + str(portsuffixnum)
validate_dns_service(
client, servicewithexposedports, [service1, service2],
exposedport, "myalias")
# Validate DNS of services within a stack using dig servicename
validate_dns_service(
client, servicewithexposedports, [healthservice],
exposedport, "healthservice")
# Validate External Service
stack, extservicetohostname = get_env_service_by_name(
client, stack1, "extservicetohostname")
validate_external_service_for_hostname(
client, servicewithexposedports,
[extservicetohostname], exposedport)
# Validate Service with Link
stack, servicewithlink = get_env_service_by_name(client,
stack1, "servicewithlink")
assert servicewithlink['state'] == "active"
assert servicewithlink.scale == 1
servicelinkexposedport = "500" + str(portsuffixnum)
validate_linked_service(client, servicewithlink,
[service2], servicelinkexposedport,
linkName="mylink")
# Validate LB pointing to a service in the first stack
# (Cross stack LB validation)
stack, newstacklbservice = get_env_service_by_name(client, stack2,
"newstacklb")
assert newstacklbservice['state'] == "active"
assert newstacklbservice.scale == 1
newstacklbport = "600" + str(portsuffixnum)
validate_lb_service(client, newstacklbservice,
newstacklbport, [service2])
# Validate Service with Link in Second Stack [The link is
# pointing to a service in the first Stack]
stack, newstackservice1 = get_env_service_by_name(client, stack2,
"newstackservice1")
assert newstackservice1['state'] == "active"
assert newstackservice1.scale == 1
stack, newstackservicewithlink = get_env_service_by_name(
client, stack2, "newstackservicewithlink")
assert newstackservicewithlink['state'] == "active"
assert newstackservicewithlink.scale == 1
newstacklinkedserviceport = "700" + str(portsuffixnum)
validate_linked_service(client, newstackservicewithlink,
[service1], newstacklinkedserviceport,
linkName="mynewstacklink")
# Validate DNS of services across stack using dig servicename.stackname
dnsname = "newstackservice1." + stack2
validate_dns_service(
client, servicewithexposedports, [newstackservice1],
exposedport, dnsname)
delete_all(client, [test_ssl_client_con["container"]])
return
def post_upgrade(client):
post_upgrade_stack1 = post_upgrade_stack_name+"-1"
post_upgrade_stack2 = post_upgrade_stack_name+"-2"
pre_upgrade_stack1 = pre_upgrade_stack_name + "-1"
pre_upgrade_stack2 = pre_upgrade_stack_name + "-2"
print "***Validate Pre Stacks in Post UPGRADE ****"
validate_stacks(client, pre_upgrade_stack_name,
preportsuffixnum, socat_containers)
print "***Modify Pre Stacks in Post UPGRADE ****"
modify_preupgradestack_verify(client,
pre_upgrade_stack1, pre_upgrade_stack2)
print "****Create new Stacks in Post UPGRADE ****"
create_stacks(client, post_upgrade_stack1,
post_upgrade_stack2, postportsuffixnum)
print "****Validate new Stacks in Post UPGRADE ****"
validate_stacks(client, post_upgrade_stack_name,
postportsuffixnum, socat_containers)
def modify_preupgradestack_verify(client,
pre_upgrade_stack1, pre_upgrade_stack2):
# Increment service scale
stack, service1 = get_env_service_by_name(client,
pre_upgrade_stack1, "service1")
service1 = client.update(service1, name=service1.name, scale=3)
service1 = client.wait_success(service1, 300)
assert service1.state == "active"
assert service1.scale == 3
# Validate LB Service after service increment
stack, lbservice = get_env_service_by_name(client,
pre_upgrade_stack1, "mylb")
mylbport = "300" + str(preportsuffixnum)
validate_lb_service(client, lbservice,
mylbport, [service1])
# Increment LB scale and validate
lbservice = client.update(lbservice, name=lbservice.name, scale=2)
lbservice = client.wait_success(lbservice, 300)
assert lbservice['state'] == "active"
lbservice.scale == 2
validate_lb_service(client, lbservice,
mylbport, [service1])
# Validate DNS Service after incrementing service1
stack, servicewithexposedports = get_env_service_by_name(
client, pre_upgrade_stack1,
"servicewithexposedports")
exposedport = "400" + str(preportsuffixnum)
validate_dns_service(
client, servicewithexposedports, [service1],
exposedport, "service1")
# Validate Service with Link in NewStack [The link is
# pointing to a service in Default Stack]
stack, newstackservice1 = get_env_service_by_name(
client, pre_upgrade_stack2, "newstackservice1")
assert newstackservice1.state == "active"
stack, newstackservicewithlink = get_env_service_by_name(
client, pre_upgrade_stack2, "newstackservicewithlink")
newstacklinkedserviceport = "700" + str(preportsuffixnum)
validate_linked_service(client, newstackservicewithlink,
[service1], newstacklinkedserviceport,
linkName="mynewstacklink")
# Increment scale of service2
stack, service2 = get_env_service_by_name(client,
pre_upgrade_stack1, "service2")
service2 = client.update(service2, name=service2.name, scale=3)
service2 = client.wait_success(service2, 300)
assert service2.state == "active"
assert service2.scale == 3
# Validate DNS service as service1 and service2 are incremented
validate_dns_service(
client, servicewithexposedports, [service1, service2],
exposedport, "myalias")
# Validate LB Service in the second stack after incrementing the LB
# and service2 to which it is pointing to
stack, newstacklbservice = \
get_env_service_by_name(client, pre_upgrade_stack2,
"newstacklb")
newstacklbservice = client.update(newstacklbservice,
name=newstacklbservice.name, scale=2)
newstacklbservice = client.wait_success(newstacklbservice, 300)
assert newstacklbservice['state'] == "active"
newstacklbservice.scale == 2
newstacklbport = "600" + str(preportsuffixnum)
validate_lb_service(client, newstacklbservice,
newstacklbport, [service2])
# Validate linked service in the second stack after
# service1 has been incremented
stack, newstackservicewithlink = get_env_service_by_name(
client, pre_upgrade_stack2, "newstackservicewithlink")
assert newstackservicewithlink['state'] == "active"
assert newstackservicewithlink.scale == 1
newstacklinkedserviceport = "700" + str(preportsuffixnum)
validate_linked_service(client, newstackservicewithlink,
[service1], newstacklinkedserviceport,
linkName="mynewstacklink")
@if_pre_upgrade_testing
def test_pre_upgrade():
client = \
get_client_for_auth_enabled_setup(ACCESS_KEY, SECRET_KEY, PROJECT_ID)
create_socat_containers(client)
print "***PRE UPGRADE TEST***"
pre_upgrade(client)
@if_post_upgrade_testing
def test_post_upgrade():
client = \
get_client_for_auth_enabled_setup(ACCESS_KEY, SECRET_KEY, PROJECT_ID)
client = client
print "***POST UPGRADE TEST***"
create_socat_containers(client)
post_upgrade(client)
def get_lb_image_version(client):
setting = client.by_id_setting(
"lb.instance.image")
default_lb_image_setting = setting.value
return default_lb_image_setting
def verify_service_is_global(client, service):
# This method verifies if the service is global
globalservicetest = client.list_service(name=service.name,
include="instances",
uuid=service.uuid,
state="active")
print globalservicetest
print "The length of globalservicetest is:"
print len(globalservicetest)
assert len(globalservicetest) == 1
instanceslist = globalservicetest[0].instances
print "Instances list"
print instanceslist
hostlist = client.list_host(state="active")
print hostlist
hostidlist = []
for host in hostlist:
hostidlist.append(host.id)
# Verify that the number of containers of the global service
# is equal to the number of hosts
assert len(instanceslist) == len(hostlist)
print "Host id list"
print hostidlist
# Verify that there is one instance per host
for instance in instanceslist:
assert instance['hostId'] in hostidlist
hostidlist.remove(instance['hostId'])
|
{
"content_hash": "91f1e5df55f954d1741ea2a30f570fa1",
"timestamp": "",
"source": "github",
"line_count": 440,
"max_line_length": 79,
"avg_line_length": 38.43636363636364,
"alnum_prop": 0.641438032166509,
"repo_name": "rancherio/validation-tests",
"id": "b74ca3057278af65f30c5affb71139e5b69d19b4",
"size": "16912",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/v2_validation/cattlevalidationtest/core/test_cattle_upgrade.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1383015"
},
{
"name": "Shell",
"bytes": "4069"
}
],
"symlink_target": ""
}
|
"""
WSGI config for wedding project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wedding.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
from whitenoise.django import DjangoWhiteNoise
application = DjangoWhiteNoise(application)
|
{
"content_hash": "add4eaaba63ee197a0be6d2868667667",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 78,
"avg_line_length": 28.294117647058822,
"alnum_prop": 0.7920997920997921,
"repo_name": "jbinney/wedding",
"id": "e9f6877f9f6cdd674afdcfdabaf7c0ff6dea3315",
"size": "481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wedding/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "152"
},
{
"name": "HTML",
"bytes": "13912"
},
{
"name": "JavaScript",
"bytes": "568"
},
{
"name": "Python",
"bytes": "9277"
}
],
"symlink_target": ""
}
|
"""simplest class chapter 27
"""
def list_attrs(item):
return [attr for attr in item.__dict__ if not attr.startswith('__')]
class rec: pass
rec.name = "Bob"
rec.age = 45
print(rec.name)
x = rec()
y = rec()
print(x.name, y.name)
x.name = "Sue"
print(rec.name, x.name, y.name)
print(list_attrs(rec))
print(list_attrs(x))
print(list_attrs(y))
def uppername(obj):
"""return name in uppercase"""
return obj.name.upper()
rec.method = uppername
print(x.method())
print(y.method())
print(rec.method(x))
|
{
"content_hash": "6086aa6a672bf373cd257306b5301667",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 72,
"avg_line_length": 16.125,
"alnum_prop": 0.6531007751937985,
"repo_name": "skellykiernan/pylearn",
"id": "418ff762c630a7cce8035793081615fb1323399e",
"size": "516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "VI/ch27/simplest_class.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "17015"
},
{
"name": "Shell",
"bytes": "2713"
}
],
"symlink_target": ""
}
|
from time import sleep
def test_beta0():
sleep(5)
assert True
def test_beta1():
sleep(5)
assert True
def test_beta2():
sleep(5)
assert True
def test_beta3():
sleep(5)
assert True
def test_beta4():
sleep(5)
assert True
def test_beta5():
sleep(5)
assert True
def test_beta6():
sleep(5)
assert True
def test_beta7():
sleep(5)
assert True
def test_beta8():
sleep(5)
assert True
def test_beta9():
sleep(5)
assert True
|
{
"content_hash": "99b8feb96259074c2278910c748250f5",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 22,
"avg_line_length": 10.058823529411764,
"alnum_prop": 0.5828460038986355,
"repo_name": "pytest-dev/pytest-xdist",
"id": "6782b19ea52a0633e54f3463be24665144e4ebb7",
"size": "513",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "example/loadscope/test/test_beta.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "220356"
}
],
"symlink_target": ""
}
|
import sys
from xml.sax import make_parser, handler
class categoryHandler(handler.ContentHandler):
def __init__(self):
self.document = None
self.in_importants = False
def startElement(self, name, attrs):
if name=="document":
self.document = Document(attrs)
if name=="category":
self.document.categories.append( Category(attrs) )
elif name=="overviews":
category = self.document.categories[-1]
assert category.overviewItems is None, "category %r already has overviews" % (category,)
category.overviewItems = OverviewItems(attrs)
elif name=="item":
item = Item(attrs)
if self.in_importants:
self.document.important.append(item)
elif self.document.categories:
category = self.document.categories[-1]
category.overviewItems.items.append(item)
else:
self.document.links.append(item)
elif name=="important":
self.in_importants = True
def endElement(self, name):
if name=="important":
self.in_importants = False
def endDocument(self):
pass
class Document:
def __init__(self, attrs):
self.__dict__.update(attrs)
self.categories = []
self.links = []
self.important = []
def __iter__(self):
return iter(self.categories)
class Category:
def __init__(self, attrs):
self.__dict__.update(attrs)
self.overviewItems = None
class OverviewItems:
def __init__(self, attrs):
self.__dict__.update(attrs)
self.items = []
def __iter__(self):
return iter(self.items)
class Item:
def __init__(self, attrs):
self.__dict__.update(attrs)
def GetDocument(fname="pywin32-document.xml"):
parser = make_parser()
handler=categoryHandler()
parser.setContentHandler(handler)
parser.parse(fname)
return handler.document
if __name__=='__main__':
doc = GetDocument()
print("Important Notes")
for link in doc.important:
print(" ", link.name, link.href)
print("Doc links")
for link in doc.links:
print(" ", link.name, link.href)
print("Doc categories")
for c in doc:
print(" ", c.id, c.label)
|
{
"content_hash": "53a15499399d996acff6ebfe003ddf97",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 100,
"avg_line_length": 29.25,
"alnum_prop": 0.5837606837606838,
"repo_name": "mollstam/UnrealPy",
"id": "f9181f8f19670d99eb946ca26fd1500d41c60493",
"size": "2340",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/pywin32-219/AutoDuck/document_object.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "APL",
"bytes": "587"
},
{
"name": "ASP",
"bytes": "2753"
},
{
"name": "ActionScript",
"bytes": "5686"
},
{
"name": "Ada",
"bytes": "94225"
},
{
"name": "Agda",
"bytes": "3154"
},
{
"name": "Alloy",
"bytes": "6579"
},
{
"name": "ApacheConf",
"bytes": "12482"
},
{
"name": "AppleScript",
"bytes": "421"
},
{
"name": "Assembly",
"bytes": "1093261"
},
{
"name": "AutoHotkey",
"bytes": "3733"
},
{
"name": "AutoIt",
"bytes": "667"
},
{
"name": "Awk",
"bytes": "63276"
},
{
"name": "Batchfile",
"bytes": "147828"
},
{
"name": "BlitzBasic",
"bytes": "185102"
},
{
"name": "BlitzMax",
"bytes": "2387"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "Bro",
"bytes": "7337"
},
{
"name": "C",
"bytes": "108397183"
},
{
"name": "C#",
"bytes": "156749"
},
{
"name": "C++",
"bytes": "13535833"
},
{
"name": "CLIPS",
"bytes": "6933"
},
{
"name": "CMake",
"bytes": "12441"
},
{
"name": "COBOL",
"bytes": "114812"
},
{
"name": "CSS",
"bytes": "430375"
},
{
"name": "Ceylon",
"bytes": "1387"
},
{
"name": "Chapel",
"bytes": "4366"
},
{
"name": "Cirru",
"bytes": "2574"
},
{
"name": "Clean",
"bytes": "9679"
},
{
"name": "Clojure",
"bytes": "23871"
},
{
"name": "CoffeeScript",
"bytes": "20149"
},
{
"name": "ColdFusion",
"bytes": "9006"
},
{
"name": "Common Lisp",
"bytes": "49017"
},
{
"name": "Coq",
"bytes": "66"
},
{
"name": "Cucumber",
"bytes": "390"
},
{
"name": "Cuda",
"bytes": "776"
},
{
"name": "D",
"bytes": "7556"
},
{
"name": "DIGITAL Command Language",
"bytes": "425938"
},
{
"name": "DTrace",
"bytes": "6706"
},
{
"name": "Dart",
"bytes": "591"
},
{
"name": "Dylan",
"bytes": "6343"
},
{
"name": "Ecl",
"bytes": "2599"
},
{
"name": "Eiffel",
"bytes": "2145"
},
{
"name": "Elixir",
"bytes": "4340"
},
{
"name": "Emacs Lisp",
"bytes": "18303"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "F#",
"bytes": "19156"
},
{
"name": "FORTRAN",
"bytes": "38458"
},
{
"name": "Factor",
"bytes": "10194"
},
{
"name": "Fancy",
"bytes": "2581"
},
{
"name": "Fantom",
"bytes": "25331"
},
{
"name": "GAP",
"bytes": "29880"
},
{
"name": "GLSL",
"bytes": "450"
},
{
"name": "Gnuplot",
"bytes": "11501"
},
{
"name": "Go",
"bytes": "5444"
},
{
"name": "Golo",
"bytes": "1649"
},
{
"name": "Gosu",
"bytes": "2853"
},
{
"name": "Groff",
"bytes": "3458639"
},
{
"name": "Groovy",
"bytes": "2586"
},
{
"name": "HTML",
"bytes": "92126540"
},
{
"name": "Haskell",
"bytes": "49593"
},
{
"name": "Haxe",
"bytes": "16812"
},
{
"name": "Hy",
"bytes": "7237"
},
{
"name": "IDL",
"bytes": "2098"
},
{
"name": "Idris",
"bytes": "2771"
},
{
"name": "Inform 7",
"bytes": "1944"
},
{
"name": "Inno Setup",
"bytes": "18796"
},
{
"name": "Ioke",
"bytes": "469"
},
{
"name": "Isabelle",
"bytes": "21392"
},
{
"name": "Jasmin",
"bytes": "9428"
},
{
"name": "Java",
"bytes": "4040623"
},
{
"name": "JavaScript",
"bytes": "223927"
},
{
"name": "Julia",
"bytes": "27687"
},
{
"name": "KiCad",
"bytes": "475"
},
{
"name": "Kotlin",
"bytes": "971"
},
{
"name": "LSL",
"bytes": "160"
},
{
"name": "Lasso",
"bytes": "18650"
},
{
"name": "Lean",
"bytes": "6921"
},
{
"name": "Limbo",
"bytes": "9891"
},
{
"name": "Liquid",
"bytes": "862"
},
{
"name": "LiveScript",
"bytes": "972"
},
{
"name": "Logos",
"bytes": "19509"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Makefile",
"bytes": "2053844"
},
{
"name": "Mask",
"bytes": "815"
},
{
"name": "Mathematica",
"bytes": "191"
},
{
"name": "Max",
"bytes": "296"
},
{
"name": "Modelica",
"bytes": "6213"
},
{
"name": "Modula-2",
"bytes": "23838"
},
{
"name": "Module Management System",
"bytes": "14798"
},
{
"name": "Monkey",
"bytes": "2587"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "MoonScript",
"bytes": "14862"
},
{
"name": "Myghty",
"bytes": "3939"
},
{
"name": "NSIS",
"bytes": "7663"
},
{
"name": "Nemerle",
"bytes": "1517"
},
{
"name": "NewLisp",
"bytes": "42726"
},
{
"name": "Nimrod",
"bytes": "37191"
},
{
"name": "Nit",
"bytes": "55581"
},
{
"name": "Nix",
"bytes": "2448"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "104883"
},
{
"name": "Objective-J",
"bytes": "15340"
},
{
"name": "Opa",
"bytes": "172"
},
{
"name": "OpenEdge ABL",
"bytes": "49943"
},
{
"name": "PAWN",
"bytes": "6555"
},
{
"name": "PHP",
"bytes": "68611"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Pan",
"bytes": "1241"
},
{
"name": "Pascal",
"bytes": "349743"
},
{
"name": "Perl",
"bytes": "5931502"
},
{
"name": "Perl6",
"bytes": "113623"
},
{
"name": "PigLatin",
"bytes": "6657"
},
{
"name": "Pike",
"bytes": "8479"
},
{
"name": "PostScript",
"bytes": "18216"
},
{
"name": "PowerShell",
"bytes": "14236"
},
{
"name": "Prolog",
"bytes": "43750"
},
{
"name": "Protocol Buffer",
"bytes": "3401"
},
{
"name": "Puppet",
"bytes": "130"
},
{
"name": "Python",
"bytes": "122886305"
},
{
"name": "QML",
"bytes": "3912"
},
{
"name": "R",
"bytes": "49247"
},
{
"name": "Racket",
"bytes": "11341"
},
{
"name": "Rebol",
"bytes": "17708"
},
{
"name": "Red",
"bytes": "10536"
},
{
"name": "Redcode",
"bytes": "830"
},
{
"name": "Ruby",
"bytes": "91403"
},
{
"name": "Rust",
"bytes": "6788"
},
{
"name": "SAS",
"bytes": "15603"
},
{
"name": "SaltStack",
"bytes": "1040"
},
{
"name": "Scala",
"bytes": "730"
},
{
"name": "Scheme",
"bytes": "50346"
},
{
"name": "Scilab",
"bytes": "943"
},
{
"name": "Shell",
"bytes": "2925518"
},
{
"name": "ShellSession",
"bytes": "320"
},
{
"name": "Smali",
"bytes": "832"
},
{
"name": "Smalltalk",
"bytes": "158636"
},
{
"name": "Smarty",
"bytes": "523"
},
{
"name": "SourcePawn",
"bytes": "130"
},
{
"name": "Standard ML",
"bytes": "36869"
},
{
"name": "Swift",
"bytes": "2035"
},
{
"name": "SystemVerilog",
"bytes": "265"
},
{
"name": "Tcl",
"bytes": "6077233"
},
{
"name": "TeX",
"bytes": "487999"
},
{
"name": "Tea",
"bytes": "391"
},
{
"name": "TypeScript",
"bytes": "535"
},
{
"name": "VHDL",
"bytes": "4446"
},
{
"name": "VimL",
"bytes": "32053"
},
{
"name": "Visual Basic",
"bytes": "19441"
},
{
"name": "XQuery",
"bytes": "4289"
},
{
"name": "XS",
"bytes": "178055"
},
{
"name": "XSLT",
"bytes": "1995174"
},
{
"name": "Xtend",
"bytes": "727"
},
{
"name": "Yacc",
"bytes": "25665"
},
{
"name": "Zephir",
"bytes": "485"
},
{
"name": "eC",
"bytes": "31545"
},
{
"name": "mupad",
"bytes": "2442"
},
{
"name": "nesC",
"bytes": "23697"
},
{
"name": "xBase",
"bytes": "3349"
}
],
"symlink_target": ""
}
|
"""Transformer pass that removes empty moments from a circuit."""
from typing import Optional, TYPE_CHECKING
from cirq.transformers import transformer_api, transformer_primitives
if TYPE_CHECKING:
import cirq
@transformer_api.transformer
def drop_empty_moments(
circuit: 'cirq.AbstractCircuit', *, context: Optional['cirq.TransformerContext'] = None
) -> 'cirq.Circuit':
"""Removes empty moments from a circuit.
Args:
circuit: Input circuit to transform.
context: `cirq.TransformerContext` storing common configurable options for transformers.
Returns:
Copy of the transformed input circuit.
"""
if context is None:
context = transformer_api.TransformerContext()
return transformer_primitives.map_moments(
circuit.unfreeze(False),
lambda m, _: m if m else [],
deep=context.deep,
tags_to_ignore=context.tags_to_ignore,
)
|
{
"content_hash": "202e8414515ab9094ff11d71681be014",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 98,
"avg_line_length": 31.066666666666666,
"alnum_prop": 0.6920600858369099,
"repo_name": "quantumlib/Cirq",
"id": "889dfe842530099a31c31d3dd8fad45e8b0d0a05",
"size": "1517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cirq-core/cirq/transformers/drop_empty_moments.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4616"
},
{
"name": "HTML",
"bytes": "262"
},
{
"name": "JavaScript",
"bytes": "660"
},
{
"name": "Jupyter Notebook",
"bytes": "672675"
},
{
"name": "Makefile",
"bytes": "634"
},
{
"name": "Python",
"bytes": "8643017"
},
{
"name": "Scilab",
"bytes": "735"
},
{
"name": "Shell",
"bytes": "64230"
},
{
"name": "TypeScript",
"bytes": "91766"
}
],
"symlink_target": ""
}
|
"""A menu item that opens a submenu."""
from typing import TYPE_CHECKING, Any, Optional
from cursesmenu.items.menu_item import MenuItem
if TYPE_CHECKING:
from cursesmenu.curses_menu import CursesMenu
else:
CursesMenu = Any
class SubmenuItem(MenuItem):
"""
A menu item that opens a submenu.
:param text: The text of the item
:param submenu: A CursesMenu to be displayed when the item is selected
:param menu: The menu that this item belongs to
:param should_exit: Whether the menu will exit when this item is selected
"""
def __init__(
self,
text: str,
submenu: Optional[CursesMenu] = None,
menu: Optional[CursesMenu] = None,
should_exit: bool = False,
override_index: Optional[str] = None,
):
"""Initialize the item."""
self._submenu: Optional[CursesMenu] = submenu
self._menu: Optional[CursesMenu] = menu
if self._submenu:
self._submenu.parent = menu
super(SubmenuItem, self).__init__(
text=text,
menu=menu,
should_exit=should_exit,
override_index=override_index,
)
@property
def submenu(self) -> Optional[CursesMenu]:
"""Get the submenu associated with this item."""
return self._submenu
@submenu.setter
def submenu(self, submenu: Optional[CursesMenu]) -> None:
"""Set the submenu and update its parent."""
self._submenu = submenu
if self._submenu is not None:
self._submenu.parent = self._menu
@property # type: ignore[override]
def menu(self) -> Optional[CursesMenu]: # type: ignore[override]
"""Get the menu that this item belongs to."""
return self._menu
@menu.setter
def menu(self, menu: Optional[CursesMenu]) -> None:
"""Set the menu for the item and update the submenu's parent."""
self._menu = menu
if self._submenu is not None:
self._submenu.parent = menu
def set_up(self) -> None:
"""Set the screen up for the submenu."""
assert self.menu is not None
self.menu.pause()
self.menu.clear_screen()
def action(self) -> None:
"""Start the submenu."""
assert self.submenu is not None
self.submenu.start()
def clean_up(self) -> None:
"""Block until the submenu is done and then return to the parent."""
assert self.menu is not None
assert self.submenu is not None
self.submenu.join()
self.submenu.clear_screen()
self.menu.resume()
def get_return(self) -> Any:
"""Get the returned value from the submenu."""
if self.submenu is not None:
return self.submenu.returned_value
return None
|
{
"content_hash": "7b4aebec0b0e1cca194ef60973639239",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 77,
"avg_line_length": 31.066666666666666,
"alnum_prop": 0.6051502145922747,
"repo_name": "pmbarrett314/curses-menu",
"id": "37a779cd170c218a6a7765eb7c7721f8e4e0b002",
"size": "2796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cursesmenu/items/submenu_item.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88730"
}
],
"symlink_target": ""
}
|
"""
Organize a mess of dicoms by exam and series.
This is non-destructive regardless of the operation.
Example
-------
sort ./mess/of/dicoms into ./sort/path, and creates tarfiles of each in ./tar/path
.. code-block::bash
dicomsort.py tarsort ./mess/of/dicoms ./sort/path ./tar/path
get usage help
.. code-example:bash
$ dicomsort.py -h
"""
import os
import json
import time
import dicom
import hashlib
import tarfile
import argparse
def create_archive(path, content, arcname, **kwargs):
def add_to_archive(archive, content, arcname):
archive.add(content, arcname, recursive=False)
if os.path.isdir(content):
for fn in sorted(os.listdir(content), key=lambda fn: not fn.endswith('.json')):
add_to_archive(archive, os.path.join(content, fn), os.path.join(arcname, fn))
with tarfile.open(path, 'w:gz', **kwargs) as archive:
add_to_archive(archive, content, arcname)
def write_json_file(path, json_document):
with open(path, 'w') as json_file:
json.dump(json_document, json_file)
def checksum(path):
hash_ = hashlib.sha1()
with open(path, 'rb') as fd:
for chunk in iter(lambda: fd.read(1048577 * hash_.block_size), ''):
hash_.update(chunk)
return hash_.digest()
def sort(args):
if not os.path.isdir(args.sort_path):
os.makedirs(args.sort_path)
if not os.access(args.sort_path, os.W_OK):
print 'error: sort_path is not a writable directory'
files = []
print 'inspecting %s' % args.path
for dirpath, dirnames, filenames in os.walk(args.path):
for filepath in [dirpath + '/' + fn for fn in filenames if not fn.startswith('.')]:
if not os.path.islink(filepath):
files.append(filepath)
file_cnt = len(files)
cnt_width = len(str(file_cnt))
print 'found %d files to sort (ignoring symlinks and dotfiles)' % file_cnt
time.sleep(2)
for i, filepath in enumerate(files):
if args.verbose:
print '%*d/%d' % (cnt_width, i+1, file_cnt),
try:
dcm = dicom.read_file(filepath, stop_before_pixels=True)
except:
print 'not a DICOM file: %s' % filepath
else:
if dcm.get('Manufacturer').upper() != 'SIEMENS':
acq_name = '%s_%s_%s_dicoms' % (dcm.StudyID, dcm.SeriesNumber, int(dcm.get('AcquisitionNumber', 1)))
else:
acq_name = '%s_%s_dicoms' % (dcm.StudyID, dcm.SeriesNumber)
acq_path = os.path.join(args.sort_path, dcm.StudyInstanceUID, acq_name)
if not os.path.isdir(acq_path):
os.makedirs(acq_path)
new_filepath = os.path.join(acq_path, os.path.basename(filepath))
if not os.path.isfile(new_filepath):
if args.verbose:
print 'sorting %s' % filepath
os.rename(filepath, new_filepath)
elif checksum(filepath) == checksum(new_filepath):
print 'deleting duplicate %s' % filepath
os.remove(filepath)
else:
print 'retaining non-identical duplicate %s of %s' % (filepath, new_filepath)
def tar(args):
if not os.path.isdir(args.tar_path):
os.makedirs(args.tar_path)
if not os.access(args.tar_path, os.W_OK):
print 'error: tar_path is not a writable directory'
dirs = []
print 'inspecting %s' % args.sort_path
for dirpath, dirnames, filenames in os.walk(args.sort_path):
if not dirnames and not os.path.basename(dirpath).startswith('.') and not os.path.islink(dirpath):
dirs.append(dirpath)
dir_cnt = len(dirs)
cnt_width = len(str(dir_cnt))
print 'found %d directories to compress (ignoring symlinks and dotfiles)' % dir_cnt
time.sleep(2)
metadata = {'filetype': 'dicom'}
if args.group:
if not args.project:
args.project='unknown'
overwrite = {'overwrite': { 'group_name': args.group, 'project_name': args.project }}
metadata.update(overwrite)
for i, dirpath in enumerate(dirs):
dirname = os.path.basename(dirpath)
dir_relpath = os.path.relpath(dirpath, args.sort_path)
if args.verbose:
print '%*d/%d compressing %s' % (cnt_width, i+1, dir_cnt, dir_relpath)
write_json_file(dirpath + '/metadata.json', metadata)
create_archive(os.path.join(args.tar_path, dir_relpath.replace('/', '_') + '.tgz'), dirpath, dirname, compresslevel=6)
def tarsort(args):
sort(args)
print
tar(args)
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='operation to perform')
sort_parser = subparsers.add_parser(
name='sort',
help='sort all dicom files in a dicrectory tree',
)
sort_parser.add_argument('path', help='input path of unsorted data')
sort_parser.add_argument('sort_path', help='output path for sorted data')
sort_parser.add_argument('-v','--verbose', action='store_true', help='provide stream of files as they are sorted')
sort_parser.set_defaults(func=sort)
tar_parser = subparsers.add_parser(
name='tar',
help='tar a sorted directory tree of dicoms',
)
tar_parser.add_argument('sort_path', help='input path of sorted data')
tar_parser.add_argument('tar_path', help='output path for tar\'ed data')
tar_parser.add_argument('--group', type=str, help='name of group to sort data into')
tar_parser.add_argument('--project', type=str, help='name of project to sort data into')
tar_parser.add_argument('-v','--verbose', action='store_true', help='provide stream of tar files as they are tar\'d' )
tar_parser.set_defaults(func=tar)
tarsort_parser = subparsers.add_parser(
name='tarsort',
help='sort all dicom files in a dicrectory tree and tar the result',
)
tarsort_parser.add_argument('path', help='input path of unsorted data')
tarsort_parser.add_argument('sort_path', help='input path of sorted data')
tarsort_parser.add_argument('tar_path', help='output path for tar\'ed data')
tarsort_parser.add_argument('--group', type=str, help='name of group to sort data into')
tarsort_parser.add_argument('--project', type=str, help='name of project to sort data into')
tarsort_parser.add_argument('-v','--verbose', action='store_true', help='provide stream of files as they are sorted')
tarsort_parser.set_defaults(func=tarsort)
args = parser.parse_args()
args.func(args)
|
{
"content_hash": "fec5bffc6b7d0919cec7979417c14112",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 126,
"avg_line_length": 36.9080459770115,
"alnum_prop": 0.6431018374338212,
"repo_name": "scitran/utilities",
"id": "73e60f4af32325e1fbc987597bdfee1d1f29d822",
"size": "6471",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dicomsort.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57736"
}
],
"symlink_target": ""
}
|
import os
def createHeaderDTOEntity(entity, directory):
fileName = entity.name + 'DTO.h'
file = open(os.path.join(directory, fileName), 'w+')
file.write('#import <CoreData/CoreData.h>')
file.write('\n')
for r in entity.relationships:
file.write('@class {en}DTO;'.format(en = r.entity.name))
file.write('\n')
file.write('\n')
file.write('@interface {className}DTO : NSObject'.format(className = entity.name))
file.write('\n')
file.write('@property (nonatomic, strong) NSManagedObjectID *objectID;\n')
for field in entity.fields:
type = None
mutateAttr = None
if field.type == 'string':
type = 'NSString'
mutateAttr = 'strong'
if field.type == 'float':
type = 'float'
mutateAttr = 'assign'
if field.type == 'boolean':
type = 'BOOL'
mutateAttr = 'assign'
if field.type == 'date':
type = 'NSDate'
mutateAttr = 'strong'
if field.type == 'double':
type = 'double'
mutateAttr = 'assign'
if field.type == 'data':
type = 'NSData'
mutateAttr = 'strong'
if field.type == 'integer32':
type = 'int32_t'
mutateAttr = 'assign'
if field.type == 'integer64':
type = 'int64_t'
mutateAttr = 'assign'
if type == None:
print('Bad type "{t}"', t = field.type)
pointer = '*' if (mutateAttr == 'strong') else ''
file.write('@property (nonatomic, {ma}) {t} {p}{n};'.format(ma = mutateAttr, t = type, p = pointer, n = field.name))
file.write('\n')
for r in entity.relationships:
if r.type == 'toMany':
file.write('@property (nonatomic, strong) NSArray *{name};'.format(name = r.name))
if r.type == 'toOne':
file.write('@property (nonatomic, strong) {e}DTO *{n};'.format(e = r.entity.name, n = r.name))
file.write('\n')
file.write('@end')
file.write('\n')
file.close()
def createSourceFileDTOEntity(entity, directory):
fileName = entity.name + 'DTO.m'
file = open(os.path.join(directory, fileName), 'w+')
file.write('#import "{className}DTO.h"'.format(className = entity.name))
file.write('\n')
file.write('\n')
file.write('@implementation {className}DTO'.format(className = entity.name))
file.write('\n')
file.write('@end')
file.write('\n')
file.close()
def createDTOEntities(entities, directory):
if not os.path.exists(directory):
os.makedirs(directory)
for entity in entities:
createHeaderDTOEntity(entity, directory)
createSourceFileDTOEntity(entity, directory)
|
{
"content_hash": "01f3fb4249fb529d5e298cadab1c8c3a",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 124,
"avg_line_length": 34.370370370370374,
"alnum_prop": 0.5589080459770115,
"repo_name": "denmorozov/DataTransferORM",
"id": "c8d76f8f1033f29debcc1699af53c259cee2b636",
"size": "2784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ORM-source/ORM/schema_parser/create_dto_entities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Objective-C",
"bytes": "21950"
},
{
"name": "Python",
"bytes": "40189"
}
],
"symlink_target": ""
}
|
import unittest
from tests import testlib
from splunklib import results
class TestCSC(testlib.SDKTestCase):
def test_eventing_app(self):
app_name = "eventing_app"
self.assertTrue(app_name in self.service.apps, msg="%s is not installed." % app_name)
# Fetch the app
app = self.service.apps[app_name]
app.refresh()
# Extract app info
access = app.access
content = app.content
state = app.state
# App info assertions
self.assertEqual(access.app, "system")
self.assertEqual(access.can_change_perms, "1")
self.assertEqual(access.can_list, "1")
self.assertEqual(access.can_share_app, "1")
self.assertEqual(access.can_share_global, "1")
self.assertEqual(access.can_share_user, "0")
self.assertEqual(access.can_write, "1")
self.assertEqual(access.modifiable, "1")
self.assertEqual(access.owner, "nobody")
self.assertEqual(access.sharing, "app")
self.assertEqual(access.perms.read, ['*'])
self.assertEqual(access.perms.write, ['admin', 'power'])
self.assertEqual(access.removable, "0")
self.assertEqual(content.author, "Splunk")
self.assertEqual(content.configured, "0")
self.assertEqual(content.description, "Eventing custom search commands example")
self.assertEqual(content.label, "Eventing App")
self.assertEqual(content.version, "1.0.0")
self.assertEqual(content.visible, "1")
self.assertEqual(state.title, "eventing_app")
jobs = self.service.jobs
stream = jobs.oneshot('search index="_internal" | head 4000 | eventingcsc status=200 | head 10',
output_mode='json')
result = results.JSONResultsReader(stream)
ds = list(result)
self.assertEqual(result.is_preview, False)
self.assertTrue(isinstance(ds[0], (dict, results.Message)))
nonmessages = [d for d in ds if isinstance(d, dict)]
self.assertTrue(len(nonmessages) <= 10)
def test_generating_app(self):
app_name = "generating_app"
self.assertTrue(app_name in self.service.apps, msg="%s is not installed." % app_name)
# Fetch the app
app = self.service.apps[app_name]
app.refresh()
# Extract app info
access = app.access
content = app.content
state = app.state
# App info assertions
self.assertEqual(access.app, "system")
self.assertEqual(access.can_change_perms, "1")
self.assertEqual(access.can_list, "1")
self.assertEqual(access.can_share_app, "1")
self.assertEqual(access.can_share_global, "1")
self.assertEqual(access.can_share_user, "0")
self.assertEqual(access.can_write, "1")
self.assertEqual(access.modifiable, "1")
self.assertEqual(access.owner, "nobody")
self.assertEqual(access.sharing, "app")
self.assertEqual(access.perms.read, ['*'])
self.assertEqual(access.perms.write, ['admin', 'power'])
self.assertEqual(access.removable, "0")
self.assertEqual(content.author, "Splunk")
self.assertEqual(content.configured, "0")
self.assertEqual(content.description, "Generating custom search commands example")
self.assertEqual(content.label, "Generating App")
self.assertEqual(content.version, "1.0.0")
self.assertEqual(content.visible, "1")
self.assertEqual(state.title, "generating_app")
jobs = self.service.jobs
stream = jobs.oneshot('| generatingcsc count=4', output_mode='json')
result = results.JSONResultsReader(stream)
ds = list(result)
self.assertTrue(len(ds) == 4)
def test_reporting_app(self):
app_name = "reporting_app"
self.assertTrue(app_name in self.service.apps, msg="%s is not installed." % app_name)
# Fetch the app
app = self.service.apps[app_name]
app.refresh()
# Extract app info
access = app.access
content = app.content
state = app.state
# App info assertions
self.assertEqual(access.app, "system")
self.assertEqual(access.can_change_perms, "1")
self.assertEqual(access.can_list, "1")
self.assertEqual(access.can_share_app, "1")
self.assertEqual(access.can_share_global, "1")
self.assertEqual(access.can_share_user, "0")
self.assertEqual(access.can_write, "1")
self.assertEqual(access.modifiable, "1")
self.assertEqual(access.owner, "nobody")
self.assertEqual(access.sharing, "app")
self.assertEqual(access.perms.read, ['*'])
self.assertEqual(access.perms.write, ['admin', 'power'])
self.assertEqual(access.removable, "0")
self.assertEqual(content.author, "Splunk")
self.assertEqual(content.configured, "0")
self.assertEqual(content.description, "Reporting custom search commands example")
self.assertEqual(content.label, "Reporting App")
self.assertEqual(content.version, "1.0.0")
self.assertEqual(content.visible, "1")
self.assertEqual(state.title, "reporting_app")
jobs = self.service.jobs
# All above 150
stream = jobs.oneshot(
'| makeresults count=10 | eval math=100, eng=100, cs=100 | reportingcsc cutoff=150 math eng cs',
output_mode='json')
result = results.JSONResultsReader(stream)
ds = list(result)
self.assertTrue(len(ds) > 0)
self.assertTrue(ds[0].values() is not None)
self.assertTrue(len(ds[0].values()) > 0)
no_of_students = int(list(ds[0].values())[0])
self.assertTrue(no_of_students == 10)
# All below 150
stream = jobs.oneshot(
'| makeresults count=10 | eval math=45, eng=45, cs=45 | reportingcsc cutoff=150 math eng cs',
output_mode='json')
result = results.JSONResultsReader(stream)
ds = list(result)
self.assertTrue(len(ds) > 0)
self.assertTrue(ds[0].values() is not None)
self.assertTrue(len(ds[0].values()) > 0)
no_of_students = int(list(ds[0].values())[0])
self.assertTrue(no_of_students == 0)
def test_streaming_app(self):
app_name = "streaming_app"
self.assertTrue(app_name in self.service.apps, msg="%s is not installed." % app_name)
# Fetch the app
app = self.service.apps[app_name]
app.refresh()
# Extract app info
access = app.access
content = app.content
state = app.state
# App info assertions
self.assertEqual(access.app, "system")
self.assertEqual(access.can_change_perms, "1")
self.assertEqual(access.can_list, "1")
self.assertEqual(access.can_share_app, "1")
self.assertEqual(access.can_share_global, "1")
self.assertEqual(access.can_share_user, "0")
self.assertEqual(access.can_write, "1")
self.assertEqual(access.modifiable, "1")
self.assertEqual(access.owner, "nobody")
self.assertEqual(access.sharing, "app")
self.assertEqual(access.perms.read, ['*'])
self.assertEqual(access.perms.write, ['admin', 'power'])
self.assertEqual(access.removable, "0")
self.assertEqual(content.author, "Splunk")
self.assertEqual(content.configured, "0")
self.assertEqual(content.description, "Streaming custom search commands example")
self.assertEqual(content.label, "Streaming App")
self.assertEqual(content.version, "1.0.0")
self.assertEqual(content.visible, "1")
self.assertEqual(state.title, "streaming_app")
jobs = self.service.jobs
stream = jobs.oneshot('| makeresults count=5 | eval celsius = 35 | streamingcsc', output_mode='json')
result = results.JSONResultsReader(stream)
ds = list(result)
self.assertTrue(len(ds) == 5)
self.assertTrue('_time' in ds[0])
self.assertTrue('celsius' in ds[0])
self.assertTrue('fahrenheit' in ds[0])
self.assertTrue(ds[0]['celsius'] == '35')
self.assertTrue(ds[0]['fahrenheit'] == '95.0')
self.assertTrue(len(ds) == 5)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "9de8e8faeb345477ccb2415c0295b2c0",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 109,
"avg_line_length": 37.412556053811656,
"alnum_prop": 0.6201606136881218,
"repo_name": "splunk/splunk-sdk-python",
"id": "b15574d1c8e1ca079d56f49cdff76629625d953b",
"size": "8949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/searchcommands/test_csc_apps.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2203"
},
{
"name": "Python",
"bytes": "793887"
},
{
"name": "Shell",
"bytes": "475"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import re
import sys
from operator import lt, gt, eq, le, ge
from os.path import (
abspath,
dirname,
join,
)
from distutils.version import StrictVersion
from setuptools import (
Extension,
find_packages,
setup,
)
class LazyCythonizingList(list):
cythonized = False
def lazy_cythonize(self):
if self.cythonized:
return
self.cythonized = True
from Cython.Build import cythonize
from numpy import get_include
self[:] = cythonize(
[
Extension(*ext_args, include_dirs=[get_include()])
for ext_args in self
]
)
def __iter__(self):
self.lazy_cythonize()
return super(LazyCythonizingList, self).__iter__()
def __getitem__(self, num):
self.lazy_cythonize()
return super(LazyCythonizingList, self).__getitem__(num)
ext_modules = LazyCythonizingList([
('zipline.assets._assets', ['zipline/assets/_assets.pyx']),
('zipline.lib.adjusted_array', ['zipline/lib/adjusted_array.pyx']),
('zipline.lib.adjustment', ['zipline/lib/adjustment.pyx']),
(
'zipline.data.ffc.loaders._us_equity_pricing',
['zipline/data/ffc/loaders/_us_equity_pricing.pyx']
),
])
STR_TO_CMP = {
'<': lt,
'<=': le,
'=': eq,
'==': eq,
'>': gt,
'>=': ge,
}
def _filter_requirements(lines_iter):
for line in lines_iter:
line = line.strip()
if not line or line.startswith('#'):
continue
# pip install -r understands line with ;python_version<'3.0', but
# whatever happens inside extras_requires doesn't. Parse the line
# manually and conditionally add it if needed.
if ';' not in line:
yield line
continue
requirement, version_spec = line.split(';')
try:
groups = re.match(
"(python_version)([<>=]{1,2})(')([0-9\.]+)(')(.*)",
version_spec,
).groups()
comp = STR_TO_CMP[groups[1]]
version_spec = StrictVersion(groups[3])
except Exception as e:
# My kingdom for a 'raise from'!
raise AssertionError(
"Couldn't parse requirement line; '%s'\n"
"Error was:\n"
"%r" % (line, e)
)
sys_version = '.'.join(list(map(str, sys.version_info[:3])))
if comp(sys_version, version_spec):
yield requirement
def read_requirements(path):
"""
Read a requirements.txt file, expressed as a path relative to Zipline root.
"""
real_path = join(dirname(abspath(__file__)), path)
with open(real_path) as f:
return list(_filter_requirements(f.readlines()))
def install_requires():
return read_requirements('etc/requirements.txt')
def extras_requires():
dev_reqs = read_requirements('etc/requirements_dev.txt')
talib_reqs = ['TA-Lib==0.4.9']
return {
'dev': dev_reqs,
'talib': talib_reqs,
'all': dev_reqs + talib_reqs,
}
def module_requirements(requirements_path, module_names):
module_names = set(module_names)
found = set()
module_lines = []
parser = re.compile("([^=<>]+)([<=>]{1,2})(.*)")
for line in read_requirements(requirements_path):
match = parser.match(line)
if match is None:
raise AssertionError("Could not parse requirement: '%s'" % line)
groups = match.groups()
name = groups[0]
if name in module_names:
found.add(name)
module_lines.append(line)
if found != module_names:
raise AssertionError(
"No requirements found for %s." % module_names - found
)
return module_lines
def pre_setup():
if not set(sys.argv) & {'install', 'develop', 'egg_info', 'bdist_wheel'}:
return
try:
import pip
if StrictVersion(pip.__version__) < StrictVersion('7.1.0'):
raise AssertionError(
"Zipline installation requires pip>=7.1.0, but your pip "
"version is {version}. \n"
"You can upgrade your pip with "
"'pip install --upgrade pip'.".format(
version=pip.__version__,
)
)
except ImportError:
raise AssertionError("Zipline installation requires pip")
required = ('Cython', 'numpy')
for line in module_requirements('etc/requirements.txt', required):
pip.main(['install', line])
pre_setup()
setup(
name='zipline',
version='0.8.0rc1',
description='A backtester for financial algorithms.',
author='Quantopian Inc.',
author_email='opensource@quantopian.com',
packages=find_packages('.', include=['zipline', 'zipline.*']),
ext_modules=ext_modules,
scripts=['scripts/run_algo.py'],
include_package_data=True,
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Topic :: Office/Business :: Financial',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: System :: Distributed Computing',
],
install_requires=install_requires(),
extras_require=extras_requires(),
url="http://zipline.io"
)
|
{
"content_hash": "0822a4fe4202ca4fc88f3bb339a562c1",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 79,
"avg_line_length": 28.375,
"alnum_prop": 0.5709251101321586,
"repo_name": "wubr2000/zipline",
"id": "1bcca3596005fe892438306354161997ed5da3ac",
"size": "6279",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "564"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Python",
"bytes": "1303856"
},
{
"name": "Shell",
"bytes": "4065"
}
],
"symlink_target": ""
}
|
import sys
import atrshmlog
result = atrshmlog.attach()
count = atrshmlog.get_prealloc_buffer_count()
print('prealloc count : ' + str(count) + ' : ')
oldid = atrshmlog.set_prealloc_buffer_count(42)
print('prealloc count : ' + str(oldid) + ' : ')
count = atrshmlog.get_prealloc_buffer_count()
print('prealloc count : ' + str(count) + ' : ')
oldid = atrshmlog.set_prealloc_buffer_count(128)
print('prealloc count : ' + str(oldid) + ' : ')
count = atrshmlog.get_prealloc_buffer_count()
print('prealloc count : ' + str(count) + ' : ')
print (' ')
exit(0)
# end of test
|
{
"content_hash": "ffa6e5e78357704401bf530f85683700",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 48,
"avg_line_length": 18.15625,
"alnum_prop": 0.6523235800344234,
"repo_name": "atrsoftgmbh/atrshmlog",
"id": "18a1184c4f40f2cbe3dfeff39137d707e03b07a2",
"size": "711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/src/tests/t_prealloc_count.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1262"
},
{
"name": "Batchfile",
"bytes": "15450"
},
{
"name": "C",
"bytes": "5425692"
},
{
"name": "C++",
"bytes": "183835"
},
{
"name": "CSS",
"bytes": "50850"
},
{
"name": "HTML",
"bytes": "451865"
},
{
"name": "Java",
"bytes": "896522"
},
{
"name": "JavaScript",
"bytes": "16280"
},
{
"name": "POV-Ray SDL",
"bytes": "1092"
},
{
"name": "Perl",
"bytes": "222533"
},
{
"name": "Python",
"bytes": "35540"
},
{
"name": "Roff",
"bytes": "350790"
},
{
"name": "Ruby",
"bytes": "33603"
},
{
"name": "Shell",
"bytes": "989385"
},
{
"name": "Tcl",
"bytes": "1071"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class HovertemplatesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="hovertemplatesrc", parent_name="funnelarea", **kwargs
):
super(HovertemplatesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
{
"content_hash": "136469bf3ee58ee0055ffc9b0edc890b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 80,
"avg_line_length": 33.69230769230769,
"alnum_prop": 0.6255707762557078,
"repo_name": "plotly/plotly.py",
"id": "c0d020c9efecde4b7ab1178f32d4dca3c42df42a",
"size": "438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/funnelarea/_hovertemplatesrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
try:
from my_func import print_hello
print_hello()
except ImportError as e:
print "Module my_func could not be found."
|
{
"content_hash": "95e9552ad693635cb803f048f903e9a9",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 43,
"avg_line_length": 24.6,
"alnum_prop": 0.7398373983739838,
"repo_name": "LinuxFlyGuy/PFNE",
"id": "6af3cf5994f052c1f3572a54a57c8ea25a31ddf9",
"size": "146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "importtest.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "569624"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.