repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
rory/openstreetmap-remove-tags
|
osmrmtags/__init__.py
|
Python
|
agpl-3.0
| 1,564
| 0.004476
|
from imposm.parser import OSMParser
from osmwriter import OSMWriter
def rm_tags(tags, tags_to_keep):
new_tags = {}
for ok_tag in tags_to_keep:
if ok_tag in tags:
new_tags[ok_tag] = tags[ok_tag]
return new_tags
class T
|
agRemover(object):
def __init__(self, output_writer, tags_to_keep):
self.output_writer = output_writer
self.tags_to_keep = tags_to_keep
def nodes(self, nodes):
for node in nodes:
id, tags, (lat, lon) = node
new_tags = rm_tags(tags, self.tags_to_keep)
self.output_writer.node(id, lat, lon, new_tags)
def ways(self, ways):
for way in ways:
id,
|
tags, nodes = way
new_tags = rm_tags(tags, self.tags_to_keep)
if len(tags) > 0:
self.output_writer.way(id, new_tags, nodes)
def remove_tags(input_filename, output_fp, tags_to_keep, close_output_fp=True):
output_writer = OSMWriter(fp=output_fp)
remover = TagRemover(output_writer, tags_to_keep)
parser = OSMParser(ways_callback=remover.ways, nodes_callback=remover.nodes, concurrency=1)
parser.parse(input_filename)
output_writer.close(close_file=close_output_fp)
def main(argv=None):
argv = argv or sys.argv[1:]
input_filename, output_filename, tags_to_keep = argv
tags_to_keep = tags_to_keep.split(",")
with open(output_filename, "w") as output_fp:
remove_tags(input_filename, output_fp, tags_to_keep, close_output_fp=True)
if __name__ == '__main__':
main(sys.argv[1:])
|
bytedance/fedlearner
|
fedlearner/model/tree/trainer.py
|
Python
|
apache-2.0
| 21,533
| 0.000557
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import os
import csv
import queue
import logging
import argparse
import traceback
import itertools
import numpy as np
import tensorflow.compat.v1 as tf
from
|
fedlearner.common.argparse_util import str_as_bool
from fedlearner.trainer.bridge import Bridge
from fedlearner.model.tree.tree imp
|
ort BoostingTreeEnsamble
from fedlearner.model.tree.trainer_master_client import LocalTrainerMasterClient
from fedlearner.model.tree.trainer_master_client import DataBlockInfo
def create_argument_parser():
parser = argparse.ArgumentParser(
description='FedLearner Tree Model Trainer.')
parser.add_argument('role', type=str,
help="Role of this trainer in {'local', "
"'leader', 'follower'}")
parser.add_argument('--local-addr', type=str,
help='Listen address of the local bridge, ' \
'in [IP]:[PORT] format')
parser.add_argument('--peer-addr', type=str,
help='Address of peer\'s bridge, ' \
'in [IP]:[PORT] format')
parser.add_argument('--application-id', type=str, default=None,
help='application id on distributed ' \
'training.')
parser.add_argument('--worker-rank', type=int, default=0,
help='rank of the current worker')
parser.add_argument('--num-workers', type=int, default=1,
help='total number of workers')
parser.add_argument('--mode', type=str, default='train',
help='Running mode in train, test or eval.')
parser.add_argument('--data-path', type=str, default=None,
help='Path to data file.')
parser.add_argument('--validation-data-path', type=str, default=None,
help='Path to validation data file. ' \
'Only used in train mode.')
parser.add_argument('--no-data', type=str_as_bool,
default=False, const=True, nargs='?',
help='Run prediction without data.')
parser.add_argument('--file-ext', type=str, default='.csv',
help='File extension to use')
parser.add_argument('--file-type', type=str, default='csv',
help='input file type: csv or tfrecord')
parser.add_argument('--load-model-path',
type=str,
default=None,
help='Path load saved models.')
parser.add_argument('--export-path',
type=str,
default=None,
help='Path to save exported models.')
parser.add_argument('--checkpoint-path',
type=str,
default=None,
help='Path to save model checkpoints.')
parser.add_argument('--output-path',
type=str,
default=None,
help='Path to save prediction output.')
parser.add_argument('--verbosity',
type=int,
default=1,
help='Controls the amount of logs to print.')
parser.add_argument('--loss-type',
default='logistic',
choices=['logistic', 'mse'],
help='What loss to use for training.')
parser.add_argument('--learning-rate',
type=float,
default=0.3,
help='Learning rate (shrinkage).')
parser.add_argument('--max-iters',
type=int,
default=5,
help='Number of boosting iterations.')
parser.add_argument('--max-depth',
type=int,
default=3,
help='Max depth of decision trees.')
parser.add_argument('--l2-regularization',
type=float,
default=1.0,
help='L2 regularization parameter.')
parser.add_argument('--max-bins',
type=int,
default=33,
help='Max number of histogram bins.')
parser.add_argument('--num-parallel',
type=int,
default=1,
help='Number of parallel threads.')
parser.add_argument('--verify-example-ids',
type=str_as_bool,
default=False, const=True, nargs='?',
help='If set to true, the first column of the '
'data will be treated as example ids that '
'must match between leader and follower')
parser.add_argument('--ignore-fields',
type=str,
default='',
help='Ignore data fields by name')
parser.add_argument('--cat-fields',
type=str,
default='',
help='Field names of categorical features. Feature'
' values should be non-negtive integers')
parser.add_argument('--use-streaming',
type=str_as_bool,
default=False, const=True, nargs='?',
help='Whether to use streaming transmit.')
parser.add_argument('--send-scores-to-follower',
type=str_as_bool,
default=False, const=True, nargs='?',
help='Whether to send prediction scores to follower.')
parser.add_argument('--send-metrics-to-follower',
type=str_as_bool,
default=False, const=True, nargs='?',
help='Whether to send metrics to follower.')
parser.add_argument('--enable-packing',
type=str_as_bool,
default=False, const=True, nargs='?',
help='Whether to enable packing grad and hess')
parser.add_argument('--label-field',
type=str,
default='label',
help='selected label name')
return parser
def parse_tfrecord(record):
example = tf.train.Example()
example.ParseFromString(record)
parsed = {}
for key, value in example.features.feature.items():
kind = value.WhichOneof('kind')
if kind == 'float_list':
assert len(value.float_list.value) == 1, "Invalid tfrecord format"
parsed[key] = value.float_list.value[0]
elif kind == 'int64_list':
assert len(value.int64_list.value) == 1, "Invalid tfrecord format"
parsed[key] = value.int64_list.value[0]
elif kind == 'bytes_list':
assert len(value.bytes_list.value) == 1, "Invalid tfrecord format"
parsed[key] = value.bytes_list.value[0]
else:
raise ValueError("Invalid tfrecord format")
return parsed
def extract_field(field_names, field_name, required):
if field_name in field_names:
return []
assert not required, \
"Field %s is required but missing in data"%field_name
return None
def read_data(file_type, filename, require_example_ids, require_labels,
ignore_fields, cat_fields, label_field):
logging.debug('Reading data file from %s',
|
bfontaine/edt2ics
|
edt2ics/ical.py
|
Python
|
mit
| 3,060
| 0.002941
|
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
from datetime import date, datetime, timedelta
from icalendar import Calendar, Event, vRecur
import json
import os.path
from os.path import dirname
from uuid import uuid4
class iCalSchedule(object):
DAYS = ['MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU']
def __init__(self, scraper, startdate=None, enddate=None):
if startdate is None or enddate is None:
dts, dte = self._get_dates(scraper.period, scraper.year,
scraper.semester)
if startdate is None:
startdate = dts
if enddate is None:
enddate = dte
self.startdate = startdate
# The 'DTEND' property is exclusive, we then must end one day later
self.enddate = enddate + timedelta(days=1)
self._first_weekdays = {} # cache
self._init_ical()
for ev in scraper.get_events():
self.add_event(ev)
def _init_ical(self):
cal = Calendar()
cal.add('version', '2.0')
cal.add('prodid', '-//edt2ics//bfontaine.net//')
cal.add('method', 'publish')
self.cal = cal
def _recur_params(self, wday):
return {
'freq': 'weekly',
'wkst': self.DAYS[0],
'byday': self.DAYS[wday],
'until': self.enddate,
}
def _get_first_weekday(self, day):
"""
Return the first date after ``self.startdate`` which is on the given
weekday (0=Monday, 1=Tuesday, etc)
"""
if day not in self._first_weekdays:
start_wd = self.startdate.weekday()
delta = (day - start_wd + 7) % 7
self._first_weekdays[day] = self.startdate + timedelta(days=delta)
return self._first_weekdays[day]
def _get_dates(self, period, year, semester):
source = os.path.join(dirname(__file__), 'dates.json')
with open(source, 'r') as f:
data = json.loads(f.read())
dates = data['dates'][period][str(semester)][year]
start, end = dates['start'], dates['end']
return self._str2date(start), self._str2date(end)
def _str2date(self, s):
return date(
|
*map(lambda e: int(e, 10), s.split('-')))
def add_event(self, ev):
"""
Add a new recurrent event to t
|
his schedule
"""
day = self._get_first_weekday(ev.day)
dtstart = datetime.combine(day, ev.tstart)
dtend = datetime.combine(day, ev.tend)
tz_params = {'tzid': 'Europe/Paris'}
iev = Event()
iev.add('uid', str(uuid4()))
iev.add('status', 'confirmed')
iev.add('dtstart', dtstart, parameters=tz_params)
iev.add('dtend', dtend, parameters=tz_params)
iev.add('rrule', vRecur(self._recur_params(ev.day)))
iev.add('summary', '%s %s' % (ev.type_, ev.title))
iev.add('location', ev.room)
iev.add('description', ev.description)
self.cal.add_component(iev)
def to_ical(self):
return self.cal.to_ical()
|
vrutkovs/atomic-reactor
|
tests/plugins/test_compare_components.py
|
Python
|
bsd-3-clause
| 4,488
| 0.000891
|
"""
Copyright (c) 2017 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, unicode_literals
import os
import json
from flexmock import flexmock
from atomic_reactor.constants import (PLUGIN_FETCH_WORKER_METADATA_KEY,
PLUGIN_COMPARE_COMPONENTS_KEY)
from atomic_reactor.core import DockerTasker
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.plugin import PostBuildPluginsRunner, PluginFailedException
from atomic_reactor.util import ImageName
from tests.constants import MOCK_SOURCE, TEST_IMAGE, INPUT_IMAGE, FILES
from tests.docker_mock import mock_docker
import pytest
class MockSource(object):
def __init__(self, tmpdir):
tmpdir = str(tmpdir)
self.dockerfile_path = os.path.join(tmpdir, 'Dockerfile')
self.path = tmpdir
def get_dockerfile_path(self):
return self.dockerfile_path, self.path
class MockInsideBuilder(object):
def __init__(self):
mock_docker()
self.tasker = DockerTasker()
self.base_image = ImageName(repo='fedora', tag='25')
self.image_id = 'image_id'
self.image = INPUT_IMAGE
self.df_path = 'df_path'
self.d
|
f_dir = 'df_dir'
def simplegen(x, y):
yield "some\u2018".encode('utf-8')
flexmock(self.tasker, build_image_from_path=simplegen)
def get_built_image_info(self):
return {'Id': 'some'}
def inspect_built_image(self):
return None
def ensure_not_built(self):
pass
def mock_workflow(tmpdir):
workflow = DockerBuildWorkflow(MOCK_SOURCE, TEST_IMAGE)
setattr(workflow, 'builder', MockInsideBuild
|
er())
setattr(workflow, 'source', MockSource(tmpdir))
setattr(workflow.builder, 'source', MockSource(tmpdir))
setattr(workflow, 'postbuild_result', {})
return workflow
def mock_metadatas():
json_x_path = os.path.join(FILES, "example-koji-metadata-x86_64.json")
json_p_path = os.path.join(FILES, "example-koji-metadata-ppc64le.json")
with open(json_x_path) as json_data:
metadatas_x = json.load(json_data)
with open(json_p_path) as json_data:
metadatas_p = json.load(json_data)
# need to keep data separate otherwise deepcopy and edit 'arch'
worker_metadatas = {
'x86_64': metadatas_x,
'ppc64le': metadatas_p,
}
return worker_metadatas
@pytest.mark.parametrize('fail', [True, False])
def test_compare_components_plugin(tmpdir, fail):
workflow = mock_workflow(tmpdir)
worker_metadatas = mock_metadatas()
if fail:
# example data has 2 log items before component item hence output[2]
worker_metadatas['ppc64le']['output'][2]['components'][0]['version'] = "bacon"
workflow.postbuild_results[PLUGIN_FETCH_WORKER_METADATA_KEY] = worker_metadatas
runner = PostBuildPluginsRunner(
None,
workflow,
[{
'name': PLUGIN_COMPARE_COMPONENTS_KEY,
"args": {}
}]
)
if fail:
with pytest.raises(PluginFailedException):
runner.run()
else:
runner.run()
def test_no_components(tmpdir):
workflow = mock_workflow(tmpdir)
worker_metadatas = mock_metadatas()
# example data has 2 log items before component item hence output[2]
del worker_metadatas['x86_64']['output'][2]['components']
del worker_metadatas['ppc64le']['output'][2]['components']
workflow.postbuild_results[PLUGIN_FETCH_WORKER_METADATA_KEY] = worker_metadatas
runner = PostBuildPluginsRunner(
None,
workflow,
[{
'name': PLUGIN_COMPARE_COMPONENTS_KEY,
"args": {}
}]
)
with pytest.raises(PluginFailedException):
runner.run()
def test_bad_component_type(tmpdir):
workflow = mock_workflow(tmpdir)
worker_metadatas = mock_metadatas()
# example data has 2 log items before component item hence output[2]
worker_metadatas['x86_64']['output'][2]['components'][0]['type'] = "foo"
workflow.postbuild_results[PLUGIN_FETCH_WORKER_METADATA_KEY] = worker_metadatas
runner = PostBuildPluginsRunner(
None,
workflow,
[{
'name': PLUGIN_COMPARE_COMPONENTS_KEY,
"args": {}
}]
)
with pytest.raises(PluginFailedException):
runner.run()
|
Tayamarn/socorro
|
webapp-django/crashstats/crashstats/tests/test_pipelinecompilers.py
|
Python
|
mpl-2.0
| 1,590
| 0
|
import os
import shutil
import tempfile
from crashstats.base.tests.testbase import DjangoTestCase
from crashstats.crashstats.pipelinecompilers import GoogleAnalyticsCompiler
from crashstats import crashstats
SOURCE_FILE = os.path.join(
crashstats.__path__[0], # dir of the module
'static/crashstats/js/socorro/google_analytics.js'
)
assert os.path.isfile(SOURCE_FILE), SOURCE_FILE
class TestGoogleAnalyticsCompiler(DjangoTestCase):
def setUp(self):
super(TestGoogleAnalyticsCompiler, self).setUp()
self.tmp_static = tempfile.mkdtemp('static')
def tearDown(self):
super(TestGoogleAnalyticsCompiler, self).tearDown()
shutil.rmtree(self.tmp_static)
def test_match(self):
compiler = GoogleAnalyticsCompiler(False, None)
assert compiler.match_file('/foo/google_analytics.js')
asse
|
rt not compiler.match_file('/foo/bar.js')
def test_compile(self):
compiler = GoogleAnalyticsCompiler(False, None)
with self.settings(GOOGLE_ANALYTICS_ID='UA-12345-6'):
|
outfile = os.path.join(self.tmp_static, 'google-analytics.min.js')
assert not os.path.isfile(outfile)
compiler.compile_file(SOURCE_FILE, outfile)
assert not os.path.isfile(outfile)
# Try again
compiler.compile_file(SOURCE_FILE, outfile, outdated=True)
assert os.path.isfile(outfile)
# now the outfile should have been created
with open(outfile) as f:
content = f.read()
assert 'UA-12345-6' in content
|
keithhamilton/blackmaas
|
bin/pilconvert.py
|
Python
|
bsd-3-clause
| 2,387
| 0.002095
|
#!/Users/keith.hamilton/Documents/GitHub/keithhamilton/blackmaas/bin/python
#
# The Python Imaging Library.
# $Id$
#
# convert image files
#
# History:
# 0.1 96-04-20 fl Created
# 0.2 96-10-04 fl Use draft mode when converting images
# 0.3 96-12-30 fl Optimize output (PNG, JPEG)
# 0.4 97-01-18 fl Made optimize an option (PNG, JPEG)
# 0.5 98-12-30 fl Fixed -f option (from Anthony Baxter)
#
from __future__ import print_function
import site
import getopt, string, sys
from PIL import Image
def usage():
print("PIL Convert 0.5/1998-12-30 -- convert image files")
print("Usage: pilconvert [option] infile outfile")
print()
print("Options:")
print()
print(" -c <format> convert to format (default is given by extension)")
print()
print(" -g convert to greyscale")
print(" -p convert to palette image (using standard palette)")
print(" -r convert to rgb")
print()
print(" -o optimize output (trade speed for size)")
print(" -q <value> set compression quality (0-100, JPEG only)")
print()
print(" -f list supported file formats")
sys.exit(1)
if len(sys.argv) == 1:
usage()
try:
opt, argv = getopt.getopt(sys.argv[1:], "c:dfgopq:r")
except getopt.error as v:
print(v)
sys.exit(1)
format = None
convert = None
options = { }
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats (* indicates output format):")
for i in id:
if i in Image.SAVE:
print(i+"*", end=' ')
else:
print(i, end=' ')
sys.exit(1)
elif o == "-c":
format = a
if o == "-g":
convert = "L"
elif o == "-p":
convert = "P"
elif o == "-r":
convert = "RGB"
elif o == "-o":
options["optimize"] = 1
elif o == "-q":
options["quality"] = string.atoi(a)
if
|
len(argv) != 2:
usage()
try:
im = Image.open(argv[0])
if convert and im.mode != convert:
|
im.draft(convert, im.size)
im = im.convert(convert)
if format:
im.save(argv[1], format, **options)
else:
im.save(argv[1], **options)
except:
print("cannot convert image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
|
Giftingnation/GN-Oscar-Custom
|
oscar/apps/wishlists/abstract_models.py
|
Python
|
bsd-3-clause
| 4,545
| 0.00044
|
import hashlib
import random
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from oscar.core.compat import AUTH_USER_MODEL
class AbstractWishList(models.Model):
"""
Represents a user's wish lists of products.
A user can have multiple wish lists, move products between them, etc.
"""
# Only authenticated users can have wishlists
owner = models.ForeignKey(AUTH_USER_MODEL, related_name='wishlists',
verbose_name=_('Owner'))
name = models.CharField(verbose_name=_('Name'), default=_('Default'),
max_length=255)
#: This key acts as primary key and is used instead of an int to make it
#: harder to guess
key = models.CharField(_('Key'), max_length=6, db_index=True, unique=True,
editable=False)
# Oscar core does not support public or shared wishlists at the moment, but
# all the right hooks should be there
PUBLIC, PRIVATE, SHARED = ('Public', 'Private', 'Shared')
VISIBILITY_CHOICES = (
(PRIVATE, _('Private - Only the owner can see the wish list')),
(SHARED, _('Shared - Only the owner and people with access to the obfuscated link can see the wish list')),
(PUBLIC, _('Public - Everybody can see the wish list')),
)
visibility = models.CharField(
_('Visibility'), max_length=20, default=PRIVATE, choices=VISIBILITY_CHOICES)
# Convention: A user can have multiple wish lists. The last created wish
# list for a user shall be her "default" wish list.
# If an UI element only allows adding to wish list without
# specifying which one , one shall use the default one.
# That is a rare enough case to handle it by convention instead of a
# BooleanField.
date_created = models.DateTimeField(
_('Date created'), auto_now_add=True, editable=False)
def __unicode__(self):
return u"%s's Wish List '%s'" % (self.owner, self.name)
def save(self, *args, **kwargs):
|
if not self.pk or kwargs.get('force_insert', False):
self.key = self.__class__.random_key()
|
super(AbstractWishList, self).save(*args, **kwargs)
@classmethod
def random_key(cls, length=6):
"""
Get a unique random generated key based on SHA-1 and owner
"""
while True:
key = hashlib.sha1(str(random.random())).hexdigest()[:length]
if cls._default_manager.filter(key=key).count() == 0:
return key
def is_allowed_to_see(self, user):
if self.visibility in (self.PUBLIC, self.SHARED):
return True
else:
return user == self.owner
def is_allowed_to_edit(self, user):
# currently only the owner can edit her wish list
return user == self.owner
class Meta:
ordering = ('owner', 'date_created', )
verbose_name = _('Wish List')
abstract = True
def get_absolute_url(self):
return reverse('customer:wishlists-detail', kwargs={
'key': self.key})
def add(self, product):
"""
Add a product to this wishlist
"""
lines = self.lines.filter(product=product)
if len(lines) == 0:
self.lines.create(
product=product, title=product.get_title())
else:
line = lines[0]
line.quantity += 1
line.save()
class AbstractLine(models.Model):
"""
One entry in a wish list. Similar to order lines or basket lines.
"""
wishlist = models.ForeignKey('wishlists.WishList', related_name='lines',
verbose_name=_('Wish List'))
product = models.ForeignKey(
'catalogue.Product', verbose_name=_('Product'),
related_name='wishlists_lines', on_delete=models.SET_NULL,
blank=True, null=True)
quantity = models.PositiveIntegerField(_('Quantity'), default=1)
#: Store the title in case product gets deleted
title = models.CharField(_("Title"), max_length=255)
def __unicode__(self):
return u'%sx %s on %s' % (self.quantity, self.title,
self.wishlist.name)
def get_title(self):
if self.product:
return self.product.get_title()
else:
return self.title
class Meta:
abstract = True
verbose_name = _('Wish list line')
unique_together = (('wishlist', 'product'), )
|
ellisdg/3DUnetCNN
|
unet3d/models/pytorch/fcn/__init__.py
|
Python
|
mit
| 21
| 0
|
fr
|
om .fcn import FCN
| |
aikramer2/spaCy
|
spacy/tests/doc/test_doc_api.py
|
Python
|
mit
| 10,763
| 0.001487
|
# codin
|
g: utf-8
from __future__ import unicode_literals
from ..util import get_doc
from ...tokens import Doc
from ...vocab import Vocab
from ...attrs import LEMMA
from ...tokens import Span
import pytest
import numpy
@pytest.mark.parametrize('text', [["one", "two", "three"]])
def test_doc_api_compare_by_string_position(en_vocab, text):
doc = g
|
et_doc(en_vocab, text)
# Get the tokens in this order, so their ID ordering doesn't match the idx
token3 = doc[-1]
token2 = doc[-2]
token1 = doc[-1]
token1, token2, token3 = doc
assert token1 < token2 < token3
assert not token1 > token2
assert token2 > token1
assert token2 <= token3
assert token3 >= token1
def test_doc_api_getitem(en_tokenizer):
text = "Give it back! He pleaded."
tokens = en_tokenizer(text)
assert tokens[0].text == 'Give'
assert tokens[-1].text == '.'
with pytest.raises(IndexError):
tokens[len(tokens)]
def to_str(span):
return '/'.join(token.text for token in span)
span = tokens[1:1]
assert not to_str(span)
span = tokens[1:4]
assert to_str(span) == 'it/back/!'
span = tokens[1:4:1]
assert to_str(span) == 'it/back/!'
with pytest.raises(ValueError):
tokens[1:4:2]
with pytest.raises(ValueError):
tokens[1:4:-1]
span = tokens[-3:6]
assert to_str(span) == 'He/pleaded'
span = tokens[4:-1]
assert to_str(span) == 'He/pleaded'
span = tokens[-5:-3]
assert to_str(span) == 'back/!'
span = tokens[5:4]
assert span.start == span.end == 5 and not to_str(span)
span = tokens[4:-3]
assert span.start == span.end == 4 and not to_str(span)
span = tokens[:]
assert to_str(span) == 'Give/it/back/!/He/pleaded/.'
span = tokens[4:]
assert to_str(span) == 'He/pleaded/.'
span = tokens[:4]
assert to_str(span) == 'Give/it/back/!'
span = tokens[:-3]
assert to_str(span) == 'Give/it/back/!'
span = tokens[-3:]
assert to_str(span) == 'He/pleaded/.'
span = tokens[4:50]
assert to_str(span) == 'He/pleaded/.'
span = tokens[-50:4]
assert to_str(span) == 'Give/it/back/!'
span = tokens[-50:-40]
assert span.start == span.end == 0 and not to_str(span)
span = tokens[40:50]
assert span.start == span.end == 7 and not to_str(span)
span = tokens[1:4]
assert span[0].orth_ == 'it'
subspan = span[:]
assert to_str(subspan) == 'it/back/!'
subspan = span[:2]
assert to_str(subspan) == 'it/back'
subspan = span[1:]
assert to_str(subspan) == 'back/!'
subspan = span[:-1]
assert to_str(subspan) == 'it/back'
subspan = span[-2:]
assert to_str(subspan) == 'back/!'
subspan = span[1:2]
assert to_str(subspan) == 'back'
subspan = span[-2:-1]
assert to_str(subspan) == 'back'
subspan = span[-50:50]
assert to_str(subspan) == 'it/back/!'
subspan = span[50:-50]
assert subspan.start == subspan.end == 4 and not to_str(subspan)
@pytest.mark.parametrize('text', ["Give it back! He pleaded.",
" Give it back! He pleaded. "])
def test_doc_api_serialize(en_tokenizer, text):
tokens = en_tokenizer(text)
new_tokens = get_doc(tokens.vocab).from_bytes(tokens.to_bytes())
assert tokens.text == new_tokens.text
assert [t.text for t in tokens] == [t.text for t in new_tokens]
assert [t.orth for t in tokens] == [t.orth for t in new_tokens]
new_tokens = get_doc(tokens.vocab).from_bytes(
tokens.to_bytes(tensor=False), tensor=False)
assert tokens.text == new_tokens.text
assert [t.text for t in tokens] == [t.text for t in new_tokens]
assert [t.orth for t in tokens] == [t.orth for t in new_tokens]
new_tokens = get_doc(tokens.vocab).from_bytes(
tokens.to_bytes(sentiment=False), sentiment=False)
assert tokens.text == new_tokens.text
assert [t.text for t in tokens] == [t.text for t in new_tokens]
assert [t.orth for t in tokens] == [t.orth for t in new_tokens]
def test_doc_api_set_ents(en_tokenizer):
text = "I use goggle chrone to surf the web"
tokens = en_tokenizer(text)
assert len(tokens.ents) == 0
tokens.ents = [(tokens.vocab.strings['PRODUCT'], 2, 4)]
assert len(list(tokens.ents)) == 1
assert [t.ent_iob for t in tokens] == [0, 0, 3, 1, 0, 0, 0, 0]
assert tokens.ents[0].label_ == 'PRODUCT'
assert tokens.ents[0].start == 2
assert tokens.ents[0].end == 4
def test_doc_api_merge(en_tokenizer):
text = "WKRO played songs by the beach boys all night"
# merge 'The Beach Boys'
doc = en_tokenizer(text)
assert len(doc) == 9
doc.merge(doc[4].idx, doc[6].idx + len(doc[6]), tag='NAMED', lemma='LEMMA',
ent_type='TYPE')
assert len(doc) == 7
assert doc[4].text == 'the beach boys'
assert doc[4].text_with_ws == 'the beach boys '
assert doc[4].tag_ == 'NAMED'
# merge 'all night'
doc = en_tokenizer(text)
assert len(doc) == 9
doc.merge(doc[7].idx, doc[8].idx + len(doc[8]), tag='NAMED', lemma='LEMMA',
ent_type='TYPE')
assert len(doc) == 8
assert doc[7].text == 'all night'
assert doc[7].text_with_ws == 'all night'
# merge both with bulk merge
doc = en_tokenizer(text)
assert len(doc) == 9
with doc.retokenize() as retokenizer:
retokenizer.merge(doc[4: 7], attrs={'tag':'NAMED', 'lemma':'LEMMA',
'ent_type':'TYPE'})
retokenizer.merge(doc[7: 9], attrs={'tag':'NAMED', 'lemma':'LEMMA',
'ent_type':'TYPE'})
assert len(doc) == 6
assert doc[4].text == 'the beach boys'
assert doc[4].text_with_ws == 'the beach boys '
assert doc[4].tag_ == 'NAMED'
assert doc[5].text == 'all night'
assert doc[5].text_with_ws == 'all night'
assert doc[5].tag_ == 'NAMED'
def test_doc_api_merge_children(en_tokenizer):
"""Test that attachments work correctly after merging."""
text = "WKRO played songs by the beach boys all night"
doc = en_tokenizer(text)
assert len(doc) == 9
doc.merge(doc[4].idx, doc[6].idx + len(doc[6]), tag='NAMED', lemma='LEMMA',
ent_type='TYPE')
for word in doc:
if word.i < word.head.i:
assert word in list(word.head.lefts)
elif word.i > word.head.i:
assert word in list(word.head.rights)
def test_doc_api_merge_hang(en_tokenizer):
text = "through North and South Carolina"
doc = en_tokenizer(text)
doc.merge(18, 32, tag='', lemma='', ent_type='ORG')
doc.merge(8, 32, tag='', lemma='', ent_type='ORG')
def test_doc_api_retokenizer(en_tokenizer):
doc = en_tokenizer("WKRO played songs by the beach boys all night")
with doc.retokenize() as retokenizer:
retokenizer.merge(doc[4:7])
assert len(doc) == 7
assert doc[4].text == 'the beach boys'
def test_doc_api_retokenizer_attrs(en_tokenizer):
doc = en_tokenizer("WKRO played songs by the beach boys all night")
# test both string and integer attributes and values
attrs = {LEMMA: 'boys', 'ENT_TYPE': doc.vocab.strings['ORG']}
with doc.retokenize() as retokenizer:
retokenizer.merge(doc[4:7], attrs=attrs)
assert len(doc) == 7
assert doc[4].text == 'the beach boys'
assert doc[4].lemma_ == 'boys'
assert doc[4].ent_type_ == 'ORG'
def test_doc_api_sents_empty_string(en_tokenizer):
doc = en_tokenizer("")
doc.is_parsed = True
sents = list(doc.sents)
assert len(sents) == 0
def test_doc_api_runtime_error(en_tokenizer):
# Example that caused run-time error while parsing Reddit
text = "67% of black households are single parent \n\n72% of all black babies born out of wedlock \n\n50% of all black kids don\u2019t finish high school"
deps = ['nsubj', 'prep', 'amod', 'pobj', 'ROOT', 'amod', 'attr', '',
'nummod', 'prep', 'det', 'amod', 'pobj', 'acl', 'prep', 'prep',
'pobj', '', 'nummod', 'prep', 'det', 'amod', 'pobj', 'aux', 'neg',
'ROOT', 'amod', 'dobj']
tokens = en_tokenizer(text)
doc = get_doc(tokens.vocab, [t.text for t in tokens], deps=deps)
nps = []
for np in doc.noun_chunks:
while
|
Exgibichi/statusquo
|
test/functional/create_cache.py
|
Python
|
mit
| 841
| 0.003567
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http:/
|
/www.opensource.org/licenses/mit-license.php.
"""Create a blockchain cache.
Creating a cache of the blockchain speeds up test execution when running
multiple functional tests. This helper script is executed by test_runner when multiple
|
tests are being run in parallel.
"""
from test_framework.test_framework import StatusquoTestFramework
class CreateCache(StatusquoTestFramework):
def __init__(self):
super().__init__()
# Test network and test nodes are not required:
self.num_nodes = 0
self.nodes = []
def setup_network(self):
pass
def run_test(self):
pass
if __name__ == '__main__':
CreateCache().main()
|
jss-emr/openerp-7-src
|
openerp/addons/mail/wizard/mail_compose_message.py
|
Python
|
agpl-3.0
| 12,527
| 0.003193
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import re
from openerp import tools
from openerp.osv import osv
from openerp.osv import fields
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
# main mako-like expression pattern
EXPRESSION_PATTERN = re.compile('(\$\{.+?\})')
class mail_compose_message(osv.TransientModel):
""" Generic message composition wizard. You may inherit from this wizard
at model and view levels to provide specific features.
The behavior of the wizard depends on the composition_mode field:
- 'reply': reply to a previous message. The wizard is pre-populated
via ``get_message_data``.
- 'comment': new post on a record. The wizard is pre-populated via
``get_record_data``
- 'mass_mail': wizard in mass mailing mode where the mail details can
contain template placeholders that will be merged with actual data
before being sent to each recipient.
"""
_name = 'mail.compose.message'
_inherit = 'mail.message'
_description = 'Email composition wizard'
_log_access = True
def default_get(self, cr, uid, fields, context=None):
""" Handle composition mode. Some details about context keys:
- comment: default mode, model and ID of a record the user comments
-
|
default_model or active_model
- default_res_id or active_id
- reply: active_id of a message the user replies to
- default_parent_id or message_id or active_id: ID of the
mail.message we reply to
- message.res_model or default_model
- message.res_id or default_res_id
- mass_mail: model and IDs of records the user mass-mails
- active_ids: record IDs
- default_m
|
odel or active_model
"""
if context is None:
context = {}
result = super(mail_compose_message, self).default_get(cr, uid, fields, context=context)
# get some important values from context
composition_mode = context.get('default_composition_mode', context.get('mail.compose.message.mode'))
model = context.get('default_model', context.get('active_model'))
res_id = context.get('default_res_id', context.get('active_id'))
message_id = context.get('default_parent_id', context.get('message_id', context.get('active_id')))
active_ids = context.get('active_ids')
# get default values according to the composition mode
if composition_mode == 'reply':
vals = self.get_message_data(cr, uid, message_id, context=context)
elif composition_mode == 'comment' and model and res_id:
vals = self.get_record_data(cr, uid, model, res_id, context=context)
elif composition_mode == 'mass_mail' and model and active_ids:
vals = {'model': model, 'res_id': res_id}
else:
vals = {'model': model, 'res_id': res_id}
if composition_mode:
vals['composition_mode'] = composition_mode
for field in vals:
if field in fields:
result[field] = vals[field]
# TDE HACK: as mailboxes used default_model='res.users' and default_res_id=uid
# (because of lack of an accessible pid), creating a message on its own
# profile may crash (res_users does not allow writing on it)
# Posting on its own profile works (res_users redirect to res_partner)
# but when creating the mail.message to create the mail.compose.message
# access rights issues may rise
# We therefore directly change the model and res_id
if result.get('model') == 'res.users' and result.get('res_id') == uid:
result['model'] = 'res.partner'
result['res_id'] = self.pool.get('res.users').browse(cr, uid, uid).partner_id.id
return result
def _get_composition_mode_selection(self, cr, uid, context=None):
return [('comment', 'Comment a document'), ('reply', 'Reply to a message'), ('mass_mail', 'Mass mailing')]
_columns = {
'composition_mode': fields.selection(
lambda s, *a, **k: s._get_composition_mode_selection(*a, **k),
string='Composition mode'),
'partner_ids': fields.many2many('res.partner',
'mail_compose_message_res_partner_rel',
'wizard_id', 'partner_id', 'Additional contacts'),
'attachment_ids': fields.many2many('ir.attachment',
'mail_compose_message_ir_attachments_rel',
'wizard_id', 'attachment_id', 'Attachments'),
'filter_id': fields.many2one('ir.filters', 'Filters'),
}
_defaults = {
'composition_mode': 'comment',
'body': lambda self, cr, uid, ctx={}: '',
'subject': lambda self, cr, uid, ctx={}: False,
'partner_ids': lambda self, cr, uid, ctx={}: [],
}
def _notify(self, cr, uid, newid, context=None):
""" Override specific notify method of mail.message, because we do
not want that feature in the wizard. """
return
def get_record_data(self, cr, uid, model, res_id, context=None):
""" Returns a defaults-like dict with initial values for the composition
wizard when sending an email related to the document record
identified by ``model`` and ``res_id``.
:param str model: model name of the document record this mail is
related to.
:param int res_id: id of the document record this mail is related to
"""
doc_name_get = self.pool.get(model).name_get(cr, uid, [res_id], context=context)
if doc_name_get:
record_name = doc_name_get[0][1]
else:
record_name = False
return {'model': model, 'res_id': res_id, 'record_name': record_name}
def get_message_data(self, cr, uid, message_id, context=None):
""" Returns a defaults-like dict with initial values for the composition
wizard when replying to the given message (e.g. including the quote
of the initial message, and the correct recipients).
:param int message_id: id of the mail.message to which the user
is replying.
"""
if not message_id:
return {}
if context is None:
context = {}
message_data = self.pool.get('mail.message').browse(cr, uid, message_id, context=context)
# create subject
re_prefix = _('Re:')
reply_subject = tools.ustr(message_data.subject or '')
if not (reply_subject.startswith('Re:') or reply_subject.startswith(re_prefix)) and message_data.subject:
reply_subject = "%s %s" % (re_prefix, reply_subject)
# get partner_ids from original message
partner_ids = [partner.id for partner in message_data.partner_ids] if message_data.partner_ids else []
partner_ids += context.get('default_partner_ids', [])
# update the result
result = {
'record_name': message_data.record_name,
'model': message_data.model,
'res_id': message_data.res_id,
|
vpp-dev/vpp
|
test/test_util.py
|
Python
|
apache-2.0
| 512
| 0
|
#!/usr/bin/env python3
"""Test framework utility functions tests"""
import unittest
from framework import VppTestRunner
from vpp_papi import mac_pton, mac_ntop
class TestUtil (unittest.TestCase):
""" MAC to binary and back """
def
|
test_mac_to_binary(self):
mac = 'aa:bb:cc:dd:ee:ff'
b = mac_pton(mac)
mac2 = mac_ntop(b)
self.assertEqual(type(mac), type(mac2))
self.asser
|
tEqual(mac2, mac)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
|
flupzor/newsdiffs
|
news/parsers/tests/test_utils.py
|
Python
|
mit
| 1,938
| 0
|
from django.test import TestCase
from pyquery import PyQuery as pq
from ..utils import collapse_whitespace, html_to_text
class UtilsTests(TestCase):
def test_collapse_whitespace(self):
text = '\r\n\f\u200b \n\t '
self.assertEqual(collapse_whitespace(text), "")
def test_html_to_text_one_element(self):
html = pq("<div>A simple test case ©</div>")
text = html_to_text(html)
self.assertEqual("\u0041 simple test case \u00A9\n", text)
def test_html_to_text_with_script(self):
html = pq("<div>A simple test case ©<script>This should be "
"ignored.</script> with a tail ©</div>")
text = html_to_text(html)
self.asser
|
tEqual(
"\u0041 simple test case \u00A9 with a tail \u00A9\n", text
)
def test_html_to_text_with_script_with_children(self):
html = pq("<div>A simple test case ©<script><p>This should "
"be ignored as well</p>This should be ignored.</script> "
"with
|
a tail ©</div>")
text = html_to_text(html)
self.assertEqual(
"\u0041 simple test case \u00A9 with a tail \u00A9\n", text
)
def test_html_to_text_multiple_levels(self):
html = pq("<div>A test case © <div><p>with multiple "
"levels </p>and a tail ©</div> and another tail "
"©</div>")
text = html_to_text(html)
expected = "A test case \u00A9 with multiple levels \nand " \
"a tail \u00A9\nand another tail \u00A9\n"
self.assertEqual(text, expected)
def test_html_to_text_with_comments(self):
html = pq("<!-- IGNORE --><div>text<p><!-- comment which should be "
"ignored --> and more text</p></div>")
text = html_to_text(html)
self.assertEqual("text and more text\n", text)
|
denverfoundation/storybase
|
apps/storybase_help/admin.py
|
Python
|
mit
| 1,189
| 0.003364
|
from django import forms
from django.conf import settings
from django.contrib import admin
from storybase.admin import StorybaseModelAdmin, StorybaseStackedInline
from storybase_help.models import HelpTranslation, Help
class HelpTranslationAdminForm(forms.ModelForm):
class Meta:
model = HelpTranslation
if 'tinymce' in settings.INSTALLED_APPS:
from tinymce.widgets import TinyMCE
widgets = {
'body': TinyMCE(
|
attrs={'cols': 80, 'rows': 30},
mce_attrs={'theme': 'advanced', 'force_p_newlines': False, 'forced_root_block': '', 'theme_advanced_toolbar_location': 'top', 'plugins': 'table', 'theme_advanced_buttons3_add': 'tablecontrols', 'theme_advanced_st
|
atusbar_location': 'bottom', 'theme_advanced_resizing' : True},
),
}
class HelpTranslationInline(StorybaseStackedInline):
model = HelpTranslation
form = HelpTranslationAdminForm
extra = 1
class HelpAdmin(StorybaseModelAdmin):
inlines = [HelpTranslationInline]
prefix_inline_classes = ['HelpTranslationInline']
readonly_fields = ['help_id']
admin.site.register(Help, HelpAdmin)
|
buffer/thug
|
thug/DOM/Crypto.py
|
Python
|
gpl-2.0
| 1,112
| 0.001799
|
#!/usr/bin/env python
#
# Crypto.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
from .JSClass import JSClass
class Crypto(JSClass):
def __init__(self):
|
pass
@property
def enableSmartCardEvents(self):
return False
@property
def version(self):
return "2.4"
def disableRightClick(self):
pass
|
def importUserCertificates(self, nickname, cmmfResponse, forceToBackUp): # pylint:disable=unused-argument
return ""
def logout(self):
pass
|
pauliacomi/pyGAPS
|
src/pygaps/modelling/henry.py
|
Python
|
mit
| 3,834
| 0.000261
|
"""Henry isotherm model."""
import numpy
from pygaps.modelling.base_model import IsothermBaseModel
class Henry(IsothermBaseModel):
r"""
Henry's law. Assumes a linear dependence of adsorbed amount with pressure.
.. math::
n(p) = K_H p
Notes
-----
The simplest method of describing adsorption on a
surface is Henry’s law. It assumes only interactions
with the adsorbate surface and is described by a
linear dependence of adsorbed amount with
increasing pressure.
It is derived from the Gibbs isotherm, by substituting
a two dimensional analogue to the ideal gas law.
From a physical standpoint, Henry's law is unrealistic as
adsorption sites
will saturate at higher pressures. However, the constant kH,
or Henry’s constant, can be thought of as a measure of the strength
of the interaction of the probe gas with the surface. At very
low concentrations of gas there is a
thermodynamic requirement for the applicability of Henry's law.
Therefore, most models reduce to the Henry equation
as :math:`\lim_{p \to 0} n(p)`.
Usually, Henry's law is unrealistic because the adsorption sites
will saturate at higher pressures.
Only use if your data is linear.
"""
# Model parameters
name = 'Henry'
formula = r"n(p) = K_H p"
calculates = 'loading'
param_names = ["K"]
param_bounds = {
"K": [0, numpy.inf],
}
def loading(self, pressure):
"""
Calculate loading at specified pressure.
Parameters
----------
pressure : float
The pressure at which to calculate the loading.
Returns
-------
float
Loading at specified pressure.
"""
return self.params["K"] * pressure
def pressure(self, loading):
"""
Calculate pressure at specified loading.
For the Henry model, a direct relationship can be found
by rearranging the function.
.. math::
p = n / K_H
Parameters
----------
loading : float
The loading at which to calculate the pressure.
Returns
-------
float
Pressure at specified loading.
"""
return loading / self.params["K"]
def spreading_pressure(self, pressure):
r"""
Calculate spreading pressure at specified gas pressure.
Function that calculates spreading pressure by solving the
following integral at each point i.
.. math::
\pi = \int_{0}^{p_i} \frac{n_i(p_i)}{p_i} dp_i
The integral for the Henry model is solved analytically.
.. math::
\pi = K_H p
Parameters
----------
pressure : float
The pressure at which to calculate the spreading pressure.
Returns
-------
float
Spreading pressure at specified pressure.
"
|
""
return self.params["K"] * pressure
def initial_guess(self, pressure, loading):
"""
Return initial gu
|
ess for fitting.
Parameters
----------
pressure : ndarray
Pressure data.
loading : ndarray
Loading data.
Returns
-------
dict
Dictionary of initial guesses for the parameters.
"""
saturation_loading, langmuir_k = super().initial_guess(pressure, loading)
guess = {"K": saturation_loading * langmuir_k}
for param in guess:
if guess[param] < self.param_bounds[param][0]:
guess[param] = self.param_bounds[param][0]
if guess[param] > self.param_bounds[param][1]:
guess[param] = self.param_bounds[param][1]
return guess
|
knorby/boxeeremotelib
|
boxeeremotelib/utils.py
|
Python
|
bsd-2-clause
| 951
| 0.005258
|
import random
class MultiResultCallbackHandler(object):
def __init__(self, cb=None):
self._count = 0
self._results = []
self._cb = cb
def result_cb(res):
self._results.append(res)
|
if len(self._results)==self._count:
self._fire()
self._result_cb = result_cb
def _fire(self):
if self._cb:
self._cb(self._results)
def get_cb(self):
self._count+=1
return self._result_cb
class MultiBoolCallbackHandler(MultiResultCallbackHandler):
def _fire(self):
if self._cb:
self._cb(all(self._results))
def get_random_string(min_length=5, max_length=15):
chrs = [chr(x) for x in range(ord('A'), ord('Z')+1)]
chrs.extend([chr(x) for x in range(ord('a'), ord('z')+1)])
return ''.join(random.choice(chrs) for i in
xrange(random.randint(min_length, max_length)))
|
|
noironetworks/neutron
|
neutron/tests/unit/cmd/upgrade_checks/test_checks.py
|
Python
|
apache-2.0
| 1,114
| 0
|
# Copyright 2018 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the Licen
|
se. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the Licens
|
e.
import mock
from oslo_upgradecheck.upgradecheck import Code
from neutron.cmd.upgrade_checks import checks
from neutron.tests import base
class TestChecks(base.BaseTestCase):
def setUp(self):
super(TestChecks, self).setUp()
self.checks = checks.CoreChecks()
def test_get_checks_list(self):
self.assertIsInstance(self.checks.get_checks(), tuple)
def test_noop_check(self):
check_result = checks.CoreChecks.noop_check(mock.Mock())
self.assertEqual(Code.SUCCESS, check_result.code)
|
bowen0701/algorithms_data_structures
|
alg_shortest_game.py
|
Python
|
bsd-2-clause
| 1,558
| 0.002567
|
"""Shortest game.
When we play games, we always bet in one of two ways in each game:
- betting one chip
- betting all-in
Wins are paid equal to the wager, so if we bet C chips and wins,
we get 2C chips back.
Suppose yesterday was a lucky day for us, we won every game we played.
Starting with 1 chip and leaving the game with N chips. And we played
all-in no more than K times.
Given the integers N and K, return the minimum number of games that
are necessary for us to play.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def shortest_game(N, K):
# Apply top-down recursion, which is efficient with no repetition.
if N <= 2 or K == 0:
# Base cases: If N is 1 or 2, or K is 0, bet N-1 times of 1 chip.
return N - 1
if N % 2 == 0:
# If N is even, bet 1 all-in, and
# continue playing game for N//2 with K-1 all-in opportunities.
return 1 + shortest_game(N // 2, K - 1)
else:
# If N is odd, bet 1 chip, and
# c
|
ontinue playing game for N-1
|
with K all-in opportunities.
return 1 + shortest_game(N - 1, K)
def main():
# Output: 7
N = 8
K = 0
print(shortest_game(N, K))
# Output: 6
N = 18
K = 2
print(shortest_game(N, K))
# Output: 4
N = 10
K = 10
print(shortest_game(N, K))
# Output: 0
N = 1
K = 0
print(shortest_game(N, K))
# Output: 8
N = 100
K = 5
print(shortest_game(N, K))
if __name__ == '__main__':
main()
|
indirectlylit/whattowatch
|
data-utils/_fetch_new_rt_data.py
|
Python
|
mit
| 1,292
| 0.001548
|
import json
import os
import time
from rottentomatoes import RT
BOX_OFFICE_COUNTRIES = [
"us",
"in",
"uk",
"nl",
]
LIMIT = 50 # max allowed by rotten tomatoes
OUTPUT_FILE = "download/more_movies.json"
def main():
assert os.environ["RT_KEY"], "Your Rotten Tomatoes API key should be stored in the RT_KEY env var!"
rt = RT()
|
# NOTE: you should have your API key stored in RT_KEY before this will work
movies = []
link_template = ""
for country in BOX_OFFICE_COUNTRIES:
print "requesting box office hits for {}".format(country)
r = rt.lists('movies', 'box_office', limit=LIMIT, country=country)
movies += r['movies']
link_template = link_template or r['link_template']
|
time.sleep(10) # respect our API limits!
# to maintain compatibility with movies.json fields, our top level dict
# should have the following fields:
# total (int)
# movies (list)
# link_template (string)
total = len(movies)
result = {
"total": total,
"movies": movies,
"link_template": link_template,
}
with open(OUTPUT_FILE, "w") as f:
json.dump(result, f, indent=2, sort_keys=True)
if __name__ == "__main__":
main()
|
mjuric/duplicity
|
duplicity/backends/lftpbackend.py
|
Python
|
gpl-2.0
| 9,388
| 0.001172
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <ben@emerose.org>
# Copyright 2007 Kenneth Loafman <kenneth@loafman.com>
# Copyright 2010 Marcel Pennewiss <opensource@pennewiss.de>
# Copyright 2014 Edgar Soldin
# - webdav, fish, sftp support
# - https cert v
|
erification switches
# - debug output
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and
|
/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import os.path
import re
import urllib
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote
import duplicity.backend
from duplicity import globals
from duplicity import log
from duplicity import tempdir
class LFTPBackend(duplicity.backend.Backend):
"""Connect to remote store using File Transfer Protocol"""
def __init__(self, parsed_url):
duplicity.backend.Backend.__init__(self, parsed_url)
# we expect an output
try:
p = os.popen("lftp --version")
fout = p.read()
ret = p.close()
except Exception:
pass
# there is no output if lftp not found
if not fout:
log.FatalError("LFTP not found: Please install LFTP.",
log.ErrorCode.ftps_lftp_missing)
# version is the second word of the second part of the first line
version = fout.split('\n')[0].split(' | ')[1].split()[1]
log.Notice("LFTP version is %s" % version)
self.parsed_url = parsed_url
self.scheme = duplicity.backend.strip_prefix(parsed_url.scheme, 'lftp').lower()
self.scheme = re.sub('^webdav', 'http', self.scheme)
self.url_string = self.scheme + '://' + parsed_url.hostname
if parsed_url.port:
self.url_string += ":%s" % parsed_url.port
self.remote_path = re.sub('^/', '', parsed_url.path)
# Fix up an empty remote path
if len(self.remote_path) == 0:
self.remote_path = '/'
# Use an explicit directory name.
if self.remote_path[-1] != '/':
self.remote_path += '/'
self.authflag = ''
if self.parsed_url.username:
self.username = self.parsed_url.username
self.password = self.get_password()
self.authflag = "-u '%s,%s'" % (self.username, self.password)
if globals.ftp_connection == 'regular':
self.conn_opt = 'off'
else:
self.conn_opt = 'on'
# check for cacert file if https
self.cacert_file = globals.ssl_cacert_file
if self.scheme == 'https' and not globals.ssl_no_check_certificate:
cacert_candidates = ["~/.duplicity/cacert.pem",
"~/duplicity_cacert.pem",
"/etc/duplicity/cacert.pem"]
# look for a default cacert file
if not self.cacert_file:
for path in cacert_candidates:
path = os.path.expanduser(path)
if (os.path.isfile(path)):
self.cacert_file = path
break
# save config into a reusable temp file
self.tempfile, self.tempname = tempdir.default().mkstemp()
os.write(self.tempfile, "set ssl:verify-certificate " +
("false" if globals.ssl_no_check_certificate else "true") + "\n")
if self.cacert_file:
os.write(self.tempfile, "set ssl:ca-file " + cmd_quote(self.cacert_file) + "\n")
if globals.ssl_cacert_path:
os.write(self.tempfile, "set ssl:ca-path " + cmd_quote(globals.ssl_cacert_path) + "\n")
if self.parsed_url.scheme == 'ftps':
os.write(self.tempfile, "set ftp:ssl-allow true\n")
os.write(self.tempfile, "set ftp:ssl-protect-data true\n")
os.write(self.tempfile, "set ftp:ssl-protect-list true\n")
elif self.parsed_url.scheme == 'ftpes':
os.write(self.tempfile, "set ftp:ssl-force on\n")
os.write(self.tempfile, "set ftp:ssl-protect-data on\n")
os.write(self.tempfile, "set ftp:ssl-protect-list on\n")
else:
os.write(self.tempfile, "set ftp:ssl-allow false\n")
os.write(self.tempfile, "set http:use-propfind true\n")
os.write(self.tempfile, "set net:timeout %s\n" % globals.timeout)
os.write(self.tempfile, "set net:max-retries %s\n" % globals.num_retries)
os.write(self.tempfile, "set ftp:passive-mode %s\n" % self.conn_opt)
if log.getverbosity() >= log.DEBUG:
os.write(self.tempfile, "debug\n")
if self.parsed_url.scheme == 'ftpes':
os.write(self.tempfile, "open %s %s\n" % (self.authflag, self.url_string.replace('ftpes', 'ftp')))
else:
os.write(self.tempfile, "open %s %s\n" % (self.authflag, self.url_string))
os.close(self.tempfile)
# print settings in debug mode
if log.getverbosity() >= log.DEBUG:
f = open(self.tempname, 'r')
log.Debug("SETTINGS: \n"
"%s" % f.read())
def _put(self, source_path, remote_filename):
commandline = "lftp -c \"source %s; mkdir -p %s; put %s -o %s\"" % (
self.tempname,
cmd_quote(self.remote_path),
cmd_quote(source_path.name),
cmd_quote(self.remote_path) + remote_filename
)
log.Debug("CMD: %s" % commandline)
s, l, e = self.subprocess_popen(commandline)
log.Debug("STATUS: %s" % s)
log.Debug("STDERR:\n"
"%s" % (e))
log.Debug("STDOUT:\n"
"%s" % (l))
def _get(self, remote_filename, local_path):
commandline = "lftp -c \"source %s; get %s -o %s\"" % (
cmd_quote(self.tempname),
cmd_quote(self.remote_path) + remote_filename,
cmd_quote(local_path.name)
)
log.Debug("CMD: %s" % commandline)
_, l, e = self.subprocess_popen(commandline)
log.Debug("STDERR:\n"
"%s" % (e))
log.Debug("STDOUT:\n"
"%s" % (l))
def _list(self):
# Do a long listing to avoid connection reset
# remote_dir = urllib.unquote(self.parsed_url.path.lstrip('/')).rstrip()
remote_dir = urllib.unquote(self.parsed_url.path)
# print remote_dir
quoted_path = cmd_quote(self.remote_path)
# failing to cd into the folder might be because it was not created already
commandline = "lftp -c \"source %s; ( cd %s && ls ) || ( mkdir -p %s && cd %s && ls )\"" % (
cmd_quote(self.tempname),
quoted_path, quoted_path, quoted_path
)
log.Debug("CMD: %s" % commandline)
_, l, e = self.subprocess_popen(commandline)
log.Debug("STDERR:\n"
"%s" % (e))
log.Debug("STDOUT:\n"
"%s" % (l))
# Look for our files as the last element of a long list line
return [x.split()[-1] for x in l.split('\n') if x]
def _delete(self, filename):
commandline = "lftp -c \"source %s; cd %s; rm %s\"" % (
cmd_quote(self.tempname),
cmd_quote(self.remote_path),
cmd_quote(filename)
)
log.Debug("CMD: %s" % commandline)
_, l, e = self.subprocess_popen(commandline)
log.Debug("STDERR:\n"
"%s" % (e))
log.Debug("STDOUT:\n"
"%s" % (
|
lavakyan/mstm-spectrum
|
doc/source/scripting/mie_contrib.py
|
Python
|
gpl-3.0
| 567
| 0.003527
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from mstm_studio.contributions import MieLognormSpheres
from mstm_studio.alloy_AuAg import AlloyAuAg
import numpy as np
mie = MieLognormSpheres(name='mie',
|
wavelengths=np.l
|
inspace(300,800,51))
mie.set_material(AlloyAuAg(x_Au=1), 1.5) # golden sphere in glass
values = [1, 1.5, 0.5] # scale, mu, sigma
fig, _ = mie.plot(values)
fig.savefig('mie_contrib.png', bbox_inches='tight')
mie.MAX_DIAMETER_TO_PLOT = 20 # 100 nm is by default
fig, _ = mie.plot_distrib(values)
fig.savefig('mie_distrib.png', bbox_inches='tight')
|
tysonholub/twilio-python
|
tests/integration/preview/acc_security/service/test_verification.py
|
Python
|
mit
| 1,803
| 0.002773
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class VerificationTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.acc_security.services(sid="VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.verifications.create(to="to", chan
|
nel="channel")
values = {'To': "to", 'Channel': "channel", }
self.holodeck.assert_has_request(Request(
'post',
|
'https://preview.twilio.com/Verification/Services/VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Verifications',
data=values,
))
def test_create_verification_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "VEaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "VAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"to": "+15017122661",
"channel": "sms",
"status": "pending",
"valid": null,
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z"
}
'''
))
actual = self.client.preview.acc_security.services(sid="VAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.verifications.create(to="to", channel="channel")
self.assertIsNotNone(actual)
|
neviim/logporsocket
|
nvmtools.py
|
Python
|
mit
| 3,224
| 0.036036
|
#!/usr/bin/python
# -*-coding: UTF-8-*-
import random, platform, subprocess
import sys, os, time, getopt
dev=[{ "Nome": "Neviim",
"Status": {"list":[ {"Data": "18/02/2013"},
{"Update": "20/02/2014"},
{"Versao": "0.1"}]}}]
class MacTools:
"""documentação para classe MacTools"""
def __init__(self, mac=""):
self.mac = mac
def isLinux(self):
"""Verifica se a corrente plataforma é Linux"""
OS = platform.system()
return OS == 'Linux'
def isOSx(self):
"""Verifica se a corrente plataforma é OSx"""
OS = platform.system()
return OS == 'Darwin'
def isRoot(self):
"""Verifica se o corrente usar é root"""
return os.getuid() & os.getgid() == 0
d
|
ef checkOSxMac(self,device,mac):
"""Returna true
|
se o corrente mac address correspponde ao mac address enviado"""
output = subprocess.Popen(["ifconfig", "%s" % device], stdout=subprocess.PIPE).communicate()[0]
index = output.find('ether') + len('ether ')
localAddr = output[index:index+17].lower()
return mac == localAddr
def checkLinuxMac(self,device,mac):
"""Returna true se o corrente mac address correspponde ao mac address enviado"""
output = subprocess.Popen(["ifconfig", "%s" % device], stdout=subprocess.PIPE).communicate()[0]
index = output.find('HWaddr') + len('HWaddr ')
localAddr = output[index:index+17].lower()
return mac == localAddr
def getOSxIP(self,device):
"""Returna o IP corrente do device recebido"""
output = subprocess.Popen(["ifconfig", "%s" % device], stdout=subprocess.PIPE).communicate()[0]
index1 = output.find('inet ') + len('inet ')
index2 = output.find('netmask ') + len('netmask')
localIP = output[index1:index1-(index1-index2)-len('netmask ')].lower()
return localIP
def getLinuxIP(self,device):
"""Returna o IP corrente do device recebido"""
output = subprocess.Popen(["ifconfig", "%s" % device], stdout=subprocess.PIPE).communicate()[0]
index1 = output.find('addr:') + len('addr:')
index2 = output.find('Bcast:') + len('Bcast:')
localIP = output[index1:index1-(index1-index2)-len('Bcast: ')].lower()
return localIP
def getOSxMac(self,device):
"""Returna o corrente mac address correspponde ao device enviado"""
output = subprocess.Popen(["ifconfig", "%s" % device], stdout=subprocess.PIPE).communicate()[0]
index = output.find('ether') + len('ether ')
localAddr = output[index:index+17].lower()
return localAddr
def getLinuxMac(self,device):
"""Returna o corrente mac address correspponde ao device enviado"""
output = subprocess.Popen(["ifconfig", "%s" % device], stdout=subprocess.PIPE).communicate()[0]
index = output.find('HWaddr') + len('HWaddr ')
localAddr = output[index:index+17].lower()
return localAddr
def setLinuxMAC(self,device,mac):
"""Sets um novo mac address para esta maquina, se for um sistema Linux"""
subprocess.check_call(["ifconfig","%s" % device, "up"])
subprocess.check_call(["ifconfig","%s" % device, "hw", "ether","%s" % mac])
def setOSxMAC(self,device,mac):
"""Sets um novo mac address para esta maquina, se for um sistema Darwin"""
subprocess.check_call(["ifconfig","%s" % device,"up"])
subprocess.check_call(["ifconfig","%s" % device,"lladdr","%s" % mac])
|
zkan/microservices-with-swarm-101
|
services/front_gateway/front_gateway/wsgi.py
|
Python
|
mit
| 392
| 0
|
"""
WSGI config for bangkok project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this
|
file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bangkok.settings")
application =
|
get_wsgi_application()
|
jonfoster/pyxb-upstream-mirror
|
tests/trac/test-trac-0111.py
|
Python
|
apache-2.0
| 1,835
| 0.010899
|
# -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.utils.domutils
from xml.dom import Node
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:simpleType name="cards">
<xs:restriction base="xs:string">
<xs:enumeration value="clubs"/>
<xs:enumeration value="hearts"/>
<xs:enumeration value="diamonds"/>
<xs:enumeration value="spades"/>
</xs:restriction>
</xs:simpleType>
<xs:element name="card" type="cards"/>
</xs:schema>'''
code = pyxb.binding.generate.GeneratePython(schema_text=xsd)
#open('code.py', 'w').write(code)
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestTrac0111 (unittest.TestCase):
Expected = set( ('clubs', 'hearts', 'diamonds', 'spades') )
def testItems (self):
vals = set()
for ee in cards.iteritems():
self.assertTrue(isinstance(ee, cards._CF_enumeration._CollectionFacet_itemType))
vals.add(ee.value())
self.assertEqual(self.Expected, vals)
def testIterItems
|
(self):
vals = set()
for ee in cards.iteritems():
self.assertTrue(isinstance(ee, cards._CF_enumeration._CollectionFacet_itemType))
|
vals.add(ee.value())
self.assertEqual(self.Expected, vals)
def testValues (self):
vals = set()
for e in cards.itervalues():
vals.add(e)
self.assertEqual(self.Expected, vals)
def testIterValues (self):
vals = set()
for e in cards.itervalues():
vals.add(e)
self.assertEqual(self.Expected, vals)
if __name__ == '__main__':
unittest.main()
|
porduna/appcomposer
|
run_celery_single_queue.py
|
Python
|
bsd-2-clause
| 153
| 0
|
#!/usr/bin/python
from appcomposer.translator.tasks
|
import cel
import sys
cel.worker_main(sys.argv + ['--concurrency=1', '--queues=single-sync-task
|
s'])
|
qmlc/qmlc
|
qmc/qmc-rccpro.py
|
Python
|
lgpl-2.1
| 5,020
| 0.003386
|
#!/usr/bin/python
import sys
if len(sys.argv) < 7:
print "Usage: " + sys.argv[0] + "pri-file in-dir out-dir qrc-var deps-var qmc-flags qrc-files..."
print " pri-file : File to create. To be included in calling project file."
print " in-dir : Source directory. Use $$PWD."
print " out-dir : Output directory. Use $$OUT_PWD."
print " qrc-var : Variable to which to add qrc-files."
print " deps-var : Variable to which to add dependency
|
file list."
print " Pass \\\" \\\" if none."
print " qmc-flags: Command-line options for qmc. Pass \\\" \\\" if none."
print " qrc-files: Names of the qrc files to process."
print "Example: __qmc-res.pri $$PWD $$OUT_PWD RESOURCES \\\" \\\" \"-g \" res.qrc res2.qrc"
print "Example: __qmc-res.pri $$PWD $$OUT_PWD RESVAR DEPSVAR \\\" \\\" res.qrc"
exit(1)
import os
import time
import subprocess
impo
|
rt xml.etree.ElementTree as et
outName = sys.argv[1].strip()
inDir = sys.argv[2].strip()
outDir = sys.argv[3].strip()
varName = sys.argv[4].strip()
depName = sys.argv[5].strip()
qmcFlags = sys.argv[6].strip()
qrcFiles = sys.argv[7:]
toInFromOut = os.path.relpath(inDir, outDir)
if toInFromOut == ".":
toInFromOut = ""
if len(outName) == 0 or len(varName) == 0:
print "Error: pri-name or variable must not be empty"
exit(6)
def writeFile(name, lines):
try:
f = open(name, "w")
for line in lines:
f.write(line + "\n")
f.close()
except Exception, e:
print "Error: failed to write: " + name
print e
return False
return True
def qmcCommand(name, asList=False):
c = "qmc " + qmcFlags + " " + name
if not asList:
return c
out = []
for p in c.split(" "):
if len(p):
out.append(p)
return out
def addOutVar(name, first=False):
outPri.append(varName + (" = " if first else " += ") + name)
tgtNum = 0
def targetName():
global tgtNum
tgtNum += 1
return "__qmc_" + varName + str(tgtNum)
rootDir = os.getcwd()
outPri = [ "QMAKE_DISTCLEAN += " + outName ]
addOutVar("", True)
if len(depName) != 0:
outPri.append(depName + " =")
outData = set()
touchables = []
for res in qrcFiles:
# Read source file.
try:
qrc = et.parse(res)
except Exception, e:
print e
print "Error: failed to read: " + res
exit(4)
toHere, n = os.path.split(res)
name = os.path.join(toHere, "__qmc_" + n)
if len(toHere):
os.chdir(toHere)
# Shadow build requires relative path to work.
toHere = os.path.join(toInFromOut, toHere)
changed = False
deps = []
for elem in qrc.iter():
if elem.tag != "file":
continue
source = ""
target = ""
if elem.text.endswith(".js"):
source = elem.text
elem.text += "c"
target = elem.text
if "alias" in elem.attrib:
elem.attrib["alias"] = elem.attrib["alias"] + "c"
changed = True
elif elem.text.endswith(".qml"):
source = elem.text
elem.text = elem.text[:-1] + "c"
target = elem.text
if "alias" in elem.attrib:
elem.attrib["alias"] = elem.attrib["alias"][:-1] + "c"
changed = True
else:
deps.append(os.path.join(toHere, elem.text))
continue
deps.append(os.path.join(toHere, elem.text))
src = os.path.join(toHere, source)
outData.add((src, os.path.join(toHere, target),))
if not os.path.isfile(target):
touchables.append(src)
open(target, "w").close()
os.chdir(rootDir)
if changed:
try:
qrc.write(name)
except Exception, e:
print e
print "Error: failed to write modified " + res + " to " + name
exit(2)
addOutVar(name)
outPri.append("QMAKE_DISTCLEAN += " + name)
tgtFile = name
else:
addOutVar(res)
tgtFile = outName
tgt = targetName()
outPri.extend([
tgt + ".target = " + tgtFile,
tgt + ".depends = " + res,
tgt + '.commands = echo && echo "Resource list file changed. Re-run qmake" && echo && exit 1',
"QMAKE_EXTRA_TARGETS += " + tgt ])
if len(depName):
for d in deps:
outPri.append(depName + " += " + d)
for src, dst in outData:
tgt = targetName()
outPri.extend([
tgt + ".target = " + dst,
tgt + ".depends = " + src,
tgt + ".commands = " + qmcCommand(src),
"QMAKE_CLEAN += " + dst,
"QMAKE_EXTRA_TARGETS += " + tgt ])
if not writeFile(outName, outPri):
exit(3)
# Ensure that file times differ.
time.sleep(1)
for t in touchables:
if not os.path.isfile(t):
print "Warning: missing qrc source: " + t
continue
cmd = [ "touch", t ]
if subprocess.call(cmd) != 0:
print "Error touching source: " + " ".join(cmd)
exit(5)
|
uhliarik/rebase-helper
|
test/test_base_output.py
|
Python
|
gpl-2.0
| 3,601
| 0.001944
|
# -*- coding: utf-8 -*-
#
# This tool helps you to rebase package to the latest version
# Copyright (C) 2013-2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# he Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA
|
02110-1301 USA.
#
# Authors: Petr Hracek <phracek@redhat.com>
# Tomas Hozza <thozza@redhat.com>
import six
from rebasehelper.base_output import OutputLogger
class TestBaseOutput(object):
"""
Class is used for testing OutputTool
"""
old_rpm_data = {'rpm': ['rpm-0.1.0.x86_64.rpm', ' rpm-devel-0.1.0.x86_64.rpm'],
'srpm': 'rpm-0.1.0.src.rpm',
'logs': ['logfile1.log', 'log
|
file2.log']}
new_rpm_data = {'rpm': ['rpm-0.2.0.x86_64.rpm', ' rpm-devel-0.2.0.x86_64.rpm'],
'srpm': 'rpm-0.2.0.src.rpm',
'logs': ['logfile3.log', 'logfile4.log']}
patches_data = {'deleted': ['del_patch1.patch', 'del_patch2.patch'],
'modified': ['mod_patch1.patch', 'mod_patch2.patch']}
info_data = {'Information text': 'some information text'}
info_data2 = {'Next Information': 'some another information text'}
def setup(self):
OutputLogger.set_info_text('Information text', 'some information text')
OutputLogger.set_info_text('Next Information', 'some another information text')
OutputLogger.set_patch_output('Patches:', self.patches_data)
OutputLogger.set_build_data('old', self.old_rpm_data)
OutputLogger.set_build_data('new', self.new_rpm_data)
def test_base_output_global(self):
expect_dict = self.info_data
expect_dict.update(self.info_data2)
build_dict = {'old': self.old_rpm_data,
'new': self.new_rpm_data}
expected_result = {'build': build_dict,
'patch': self.patches_data,
'information': expect_dict}
for key, value in six.iteritems(expected_result):
assert value == expected_result[key]
def test_base_output_info(self):
"""
Test Output logger info
:return:
"""
info_results = OutputLogger.get_summary_info()
expect_dict = self.info_data
expect_dict.update(self.info_data2)
assert info_results == expect_dict
def test_base_output_patches(self):
"""
Test Output logger patches
:return:
"""
patch_results = OutputLogger.get_patches()
expected_patches = self.patches_data
assert patch_results == expected_patches
def test_base_output_builds_old(self):
"""
Test Output logger old builds
:return:
"""
build_results = OutputLogger.get_build('old')
assert build_results == self.old_rpm_data
def test_base_output_builds_new(self):
"""
Test Output logger new builds
:return:
"""
build_results = OutputLogger.get_build('new')
assert build_results == self.new_rpm_data
|
mozilla/firefox-flicks
|
vendor-local/lib/python/celery/backends/database/dfd042c7.py
|
Python
|
bsd-3-clause
| 1,554
| 0
|
# -*- coding: utf-8 -*-
"""
celery.backends.database.dfd042c7
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
SQLAlchemy 0.5.8 version of :mod:`~celery.backends.database.a805d4bd`,
see the docstring of that module for an explanation of why we need
this workaround.
"""
from __future__ import absolute_import
from sqlalchemy.types import PickleType as _PickleType
from sqlalchemy import util
class PickleType(_PickleType): # pragma: no cover
def process_bind_param(self, value, dialect):
dumps = self.pickler.dumps
protocol = self.protocol
if value is not None:
return dumps(value, protocol)
def process_result_value(self, value, dialect):
loads = self.pickler.loads
if value is not None:
return loads(str(value))
def copy_value(self, value):
if self.mutable:
|
return self.pickler.loads(self.pickler.dumps(value, self.protocol))
else:
return value
def compare_values(self, x, y):
if self.comparator:
return self.comparator(x, y)
elif self.mutable and not hasattr(x, '__eq__') and x is not None:
util.warn_deprecated(
'Objects stored with PickleType when mutable=True '
'must imple
|
ment __eq__() for reliable comparison.')
a = self.pickler.dumps(x, self.protocol)
b = self.pickler.dumps(y, self.protocol)
return a == b
else:
return x == y
def is_mutable(self):
return self.mutable
|
egor-tensin/vk-scripts
|
vk/platform.py
|
Python
|
mit
| 1,582
| 0
|
# Cop
|
yright (c) 2016 Egor Tensin <Egor.Tens
|
in@gmail.com>
# This file is part of the "VK scripts" project.
# For details, see https://github.com/egor-tensin/vk-scripts.
# Distributed under the MIT License.
from enum import Enum
import re
class Platform(Enum):
MOBILE = 1
IPHONE = 2
IPAD = 3
ANDROID = 4
WINDOWS_PHONE = 5
WINDOWS8 = 6
WEB = 7
VK_MOBILE = 8
@staticmethod
def from_string(s):
return Platform(int(s))
def __str__(self):
return str(self.value)
@staticmethod
def _capitalize_first_letter(s):
m = re.search(r'\w', s)
if m is None:
return s
return s[:m.start()] + m.group().upper() + s[m.end():]
def get_descr_header(self):
return self._capitalize_first_letter(_PLATFORM_DESCRIPTIONS[self])
def get_descr_text(self):
s = _PLATFORM_DESCRIPTIONS[self]
if self == Platform.VK_MOBILE:
return s
s = s.replace('unrecognized', 'an unrecognized')
return 'the ' + s
def get_descr_text_capitalized(self):
return self._capitalize_first_letter(self.get_descr_text())
_PLATFORM_DESCRIPTIONS = {
Platform.MOBILE: '"mobile" web version (or unrecognized mobile app)',
Platform.IPHONE: 'official iPhone app',
Platform.IPAD: 'official iPad app',
Platform.ANDROID: 'official Android app',
Platform.WINDOWS_PHONE: 'official Windows Phone app',
Platform.WINDOWS8: 'official Windows 8 app',
Platform.WEB: 'web version (or unrecognized app)',
Platform.VK_MOBILE: 'VK Mobile',
}
|
flaviocpontes/ffmpymedia
|
setup.py
|
Python
|
mit
| 1,809
| 0
|
from setuptools import setup
from codecs import open
from os import path
from ffmpymedia import __version__
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='ffmpymedia',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version=__version__,
description='Wrapper around the FFMPEG utility',
long_description=long_description,
packages=['ffmpymedia'],
# The project's main homepage.
url='https://github.com/flaviocpontes/ffmpymedia',
download_url='https://github.com/flaviocpontes/ffmpymedia/tarball/0.3.2.2',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
|
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2
|
, Python 3 or both.
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
author='Flávio Cardoso Pontes',
author_email='flaviopontes@acerp.org.br',
keywords=['media', 'ffmpeg']
)
|
SINGROUP/pycp2k
|
pycp2k/classes/_xalpha1.py
|
Python
|
lgpl-3.0
| 368
| 0.002717
|
from pycp2k.inputsection import InputSection
class _xalpha1(InputSection):
def __init__(self)
|
:
InputSection.__init__(self)
self.Section_parameters = None
self.Xa = None
self.Scale_x = None
self._name = "XALPHA"
self._keywords = {'Xa': 'XA', 'Scale_x': 'SCALE_X'}
|
self._attributes = ['Section_parameters']
|
ceelian/Flatty
|
src/flatty/__init__.py
|
Python
|
bsd-3-clause
| 423
| 0.002364
|
"""flatty - marshaller/unmarshaller for light-schema python obje
|
cts"""
VERSION = (0, 1, 2)
__version__ = ".".join(map(str, VERSION))
__author__ = "Christian Haintz"
__contact__ = "christian.haintz@orangelabs.at"
__homepage__ = "http://packages.python.org/flatty"
__docformat__ = "restructuredtext"
from flatty import *
try:
import mongo
except ImportError:
pass
try:
impor
|
t couch
except ImportError:
pass
|
chandler14362/panda3d
|
contrib/src/sceneeditor/seBlendAnimPanel.py
|
Python
|
bsd-3-clause
| 28,869
| 0.014445
|
#################################################################
# collisionWindow.py
# Written by Yi-Hong Lin, yihhongl@andrew.cmu.edu, 2004
#################################################################
# Import Tkinter, Pmw, and the floater code from this directory tree.
from direct.tkwidgets.AppShell import *
from direct.showbase.TkGlobal import *
import string
import math
import types
from direct.task import Task
if sys.version_info >= (3, 0):
from tkinter.simpledialog import askfloat
else:
from tkSimpleDialog import askfloat
FRAMES = 0
SECONDS = 1
#####################################################################################
# BlendAnimPanel(AppShell)
# This Panel will allow user to blend tow animations
# that have already been loaded for this actor.
# user can play and manipulate this blended animation
# just like in the animation panel. And, they can save this blended animation.
#####################################################################################
class BlendAnimPanel(AppShell):
# Override class variables
appname = 'Blend Anim Panel'
frameWidth = 575
frameHeight = 450
usecommandarea = 0
usestatusarea = 0
index = 0
dragMode = False
blendRatio = 0
rateList= ['1/24.0', '0.1', '0.5', '1.0', '2.0', '5.0' , '10.0']
enableBlend = False
currentBlendName = None
def __init__(self, aNode = None, blendDict={}, parent = None, **kw):
INITOPT = Pmw.INITOPT
self.id = 'BlendAnimPanel '+ aNode.getName()
self.appname = self.id
self.actorNode = aNode
self.blendDict = blendDict.copy()
if len(blendDict)>0:
self.blendList = blendDict.keys()
else:
self.blendList = []
optiondefs = (
('title', self.appname, None),
('actor', aNode, None),
('animList',
|
[], None),
('blendAnimList', self.blendList, None),
)
self.defineoptions(kw, optiondefs)
self.id = 'Blend AnimPanel '+ aNode.getName()
self.nodeName = aNode.ge
|
tName()
# Initialize the superclass
AppShell.__init__(self)
# Execute option callbacks
self.initialiseoptions(BlendAnimPanel)
self.currTime = 0.0
self.animNameA = None
self.animNameB = None
self.parent.resizable(False,False) ## Disable the ability to resize for this Window.
def createInterface(self):
# Handle to the toplevels interior
interior = self.interior()
self.menuBar.destroy()
# show the actor's name
actorFrame = Frame(interior)
name_label = Label(actorFrame, text= self.nodeName,font=('MSSansSerif', 14),
relief = SUNKEN, borderwidth=3)
name_label.pack(side = TOP, expand = False)
actorFrame.pack(side = TOP, expand = False, fill = X)
# Create a frame to show is there any ore-blended animation and save, edit, rename button.
group = Pmw.Group(interior, tag_pyclass=None)
actorFrame = group.interior()
group.pack(side = TOP, expand = False, fill = X)
Label(actorFrame, text= "Blended:", font=('MSSansSerif', 10)).pack(side=LEFT)
self.blendAnimEntry = self.createcomponent(
'Blended Animation', (), None,
Pmw.ComboBox, (actorFrame,),
labelpos = W, entry_width = 20, selectioncommand = self.setBlendAnim,
scrolledlist_items = self['blendAnimList'])
self.blendAnimEntry.pack(side=LEFT)
Label(actorFrame, text= " ", font=('MSSansSerif', 10)).pack(side=LEFT)
button = Button(actorFrame, text="Save", font=('MSSansSerif', 10),width = 12,
command = self.saveButtonPushed).pack(side=LEFT)
button = Button(actorFrame, text="Remove", font=('MSSansSerif', 10),width = 12,
command = self.removeButtonPushed).pack(side=LEFT)
button = Button(actorFrame, text="Rename", font=('MSSansSerif', 10),width = 12,
command = self.renameButtonPushed).pack(side=LEFT)
actorFrame.pack(side = TOP, expand = False, fill = X)
# Create a frame to hold all the animation setting
group = Pmw.Group(interior, tag_pyclass=None)
actorFrame = group.interior()
group.pack(side = TOP, expand = False, fill = X)
Label(actorFrame, text= "Animation A:", font=('MSSansSerif', 10)).pack(side=LEFT)
self['animList'] = self['actor'].getAnimNames()
self.AnimEntryA = self.createcomponent(
'AnimationMenuA', (), None,
Pmw.ComboBox, (actorFrame,),
labelpos = W, entry_width = 20, entry_state = DISABLED,
selectioncommand = lambda name, a = 'a' : self.setAnimation(name, AB=a),
scrolledlist_items = self['animList'])
self.AnimEntryA.pack(side=LEFT)
Label(actorFrame, text= " ", font=('MSSansSerif', 10)).pack(side=LEFT,)
Label(actorFrame, text= "Animation B:", font=('MSSansSerif', 10)).pack(side=LEFT)
self['animList'] = self['actor'].getAnimNames()
self.AnimEntryB = self.createcomponent(
'AnimationMenuB', (), None,
Pmw.ComboBox, (actorFrame,),
labelpos = W, entry_width = 20, entry_state = DISABLED,
selectioncommand = lambda name, a = 'b' : self.setAnimation(name, AB=a),
scrolledlist_items = self['animList'])
self.AnimEntryB.pack(side=LEFT)
actorFrame.pack(side = TOP, expand = False, fill = X)
### Blend Enable checkbox
actorFrame = Frame(interior, relief = SUNKEN, bd = 1)
Label(actorFrame, text= "Enable Blending:", font=('MSSansSerif', 10)).pack(side=LEFT,)
self.blendVar = IntVar()
self.blendVar.set(0)
self.blendButton = self.createcomponent(
'blendButton', (), None,
Checkbutton, (actorFrame,),
variable = self.blendVar,
command = self.toggleBlend)
self.blendButton.pack(side=LEFT)
actorFrame.pack(side = TOP, expand = False, fill = X)
## Ratio control
actorFrame = Frame(interior)
frameFrame = Frame(actorFrame, relief = SUNKEN, bd = 1)
minRatioLabel = self.createcomponent(
'minRatioLabel', (), 'sLabel',
Label, (frameFrame,),
text = 0.00)
minRatioLabel.pack(side = LEFT)
self.ratioControl = self.createcomponent(
'ratio', (), None,
Scale, (frameFrame,),
from_ = 0.0, to = 1.0, resolution = 0.01,
command = self.setRatio, length = 500,
orient = HORIZONTAL, showvalue = 1)
self.ratioControl.pack(side = LEFT, expand = 1)
self.ratioControl.set(1.0)
self.maxRatioLabel = self.createcomponent(
'maxRatioLabel', (), 'sLabel',
Label, (frameFrame,),
text = 1.00)
self.maxRatioLabel.pack(side = LEFT)
frameFrame.pack(side = LEFT, expand = 1, fill = X)
actorFrame.pack(side = TOP, expand = True, fill = X)
###################################################################################
###################################################################################
actorFrame = Frame(interior)
Label(actorFrame, text= "Play Rate:", font=('MSSansSerif', 10)).pack(side=LEFT)
self.playRateEntry = self.createcomponent(
'playRateMenu', (), None,
Pmw.ComboBox, (actorFrame,),
labelpos = W, entry_width = 20, selectioncommand = self.setPlayRate,
scrolledlist_items = self.rateList)
self.playRateEntry.pack(side=LEFT)
self.playRateEntry.selectitem('1.0')
### Loop checkbox
Label(actorFrame, text= " ", font=('MSSansSerif', 10)).pack(side=LEFT,)
Label(actorFrame, text= "Loop:", font=('MSSansSerif', 10)).pack(side=LEFT,)
self.loopVar = IntVar()
self.loopVar.set(0)
self.loopButton = self.createcomponent(
|
meng89/epubuilder
|
epubaker/metas/epub3_meta.py
|
Python
|
mit
| 570
| 0
|
# coding=utf-8
from .attrs import Attrs, AltScript, Dir, FileAs, Id, Scheme, Lang
from .dcmes import Base
class Property(Attrs):
@property
def property(self):
"""xml attribute: `property`"""
return self._attrs.setdefault('property')
@property.setter
def prop
|
erty(s
|
elf, value):
self._attrs['property'] = value
class Meta3(Base, AltScript, Dir, FileAs, Id, Property, Scheme, Lang):
"""meta for Epub3.metadata"""
def __init__(self, property_, text):
Base.__init__(self, text)
self.property = property_
|
sserrot/champion_relationships
|
venv/Lib/site-packages/testpath/asserts.py
|
Python
|
mit
| 6,826
| 0.003516
|
import os
import stat
try:
from pathlib import Path
except ImportError:
try:
# Python 2 backport
from pathlib2 import Path
except ImportError:
class Path(object):
"""Dummy for isinstance checks"""
pass
__all__ = ['assert_path_exists', 'assert_not_path_exists',
'assert_isfile', 'assert_not_isfile',
'assert_isdir', 'assert_not_isdir',
'assert_islink', 'assert_not_islink',
'assert_ispipe', 'assert_not_ispipe',
'assert_issocket', 'assert_not_issocket',
]
if hasattr(os, 'fspath'):
_strpath = os.fspath
else:
def _strpath(p):
if hasattr(p, '__fspath__'):
return p.__fspath__()
elif isinstance(p, Path):
return str(p)
return p
def _stat_for_assert(path, follow_symlinks=True, msg=None):
stat = os.stat if follow_symlinks else os.lstat
try:
return stat(path)
except OSError:
if msg is None:
msg = "Path does not exist, or can't be stat-ed: %r" % path
raise AssertionError(msg)
def assert_path_exists(path, msg=None):
"""Assert that something exists at the given path.
"""
_stat_for_assert(_strpath(path), True, msg)
def assert_not_path_exists(path, msg=None):
"""Assert that nothing exists at the given path.
"""
path = _strpath(path)
if os.path.exists(path):
if msg is None:
msg = "Path exists: %r" % path
raise AssertionError(msg)
def assert_isfile(path, follow_symlinks=True, msg=None):
"""Assert that path exists and is a regular file.
With follow_symlinks=True, the default, this will pass if path is a symlink
to a regular file. With follow_symlinks=False, it will fail in that case.
"""
path = _strpath(path)
st = _stat_for_assert(path, follow_symlinks, msg)
if not stat.S_ISREG(st.st_mode):
if msg is None:
msg = "Path exists, but is not a regular file: %r" % path
raise AssertionError(msg)
def assert_not_isfile(path, follow_symlinks=True, msg=None):
"""Assert that path exists but is not a regular file.
With follow_symlinks=True, the default, this will fail if path is a symlink
to a regular file. With follow_symlinks=False, it will pass in that case.
"""
path = _strpath(path)
st = _stat_for_assert(path, follow_symlinks, msg)
if stat.S_ISREG(st.st_mode):
if msg is None:
msg = "Path is a regular file: %r" % path
raise AssertionError(msg)
def assert_isdir(path, follow_symlinks=True, msg=None):
"""Assert that path exists and is a directory.
With follow_symlinks=True, the default, this will pass if path is a symlink
to a directory. With follow_symlinks=False, it will fail in that case.
"""
path = _strpath(path)
st = _stat_for_assert(path, follow_symlinks, msg)
if not stat.S_ISDIR(st.st_mode):
if msg is None:
msg = "Path exists, but is not a directory: %r" % path
raise AssertionError(msg)
def assert_not_isdir(path, follow_symlinks=True, msg=None):
"""Assert that path exists but is not a directory.
With follow_symlinks=True, the default, this will fail if path is a symlink
to a directory. With follow_symlinks=False, it will pass in that case.
"""
path = _strpath(path)
st = _stat_for_assert(path, f
|
ollow_symlinks, msg)
if stat.S_ISDIR(st.st_mode):
if msg is None:
msg = "Path is a directory: %r" % path
raise AssertionError(msg)
_link_target_msg = """Symlink target of:
{path}
Expected:
{expected}
Actual:
{actual}
"""
def assert_islink(path, to=None, msg=None):
"""Assert
|
that path exists and is a symlink.
If to is specified, also check that it is the target of the symlink.
"""
path = _strpath(path)
st = _stat_for_assert(path, False, msg)
if not stat.S_ISLNK(st.st_mode):
if msg is None:
msg = "Path exists, but is not a symlink: %r" % path
raise AssertionError(msg)
if to is not None:
to = _strpath(to)
target = os.readlink(path)
# TODO: Normalise the target to an absolute path?
if target != to:
if msg is None:
msg = _link_target_msg.format(path=path, expected=to, actual=target)
raise AssertionError(msg)
def assert_not_islink(path, msg=None):
"""Assert that path exists but is not a symlink.
"""
path = _strpath(path)
st = _stat_for_assert(path, False, msg)
if stat.S_ISLNK(st.st_mode):
if msg is None:
msg = "Path is a symlink: %r" % path
raise AssertionError(msg)
def assert_ispipe(path, follow_symlinks=True, msg=None):
"""Assert that path exists and is a named pipe (FIFO).
With follow_symlinks=True, the default, this will pass if path is a symlink
to a named pipe. With follow_symlinks=False, it will fail in that case.
"""
path = _strpath(path)
st = _stat_for_assert(path, follow_symlinks, msg)
if not stat.S_ISFIFO(st.st_mode):
if msg is None:
msg = "Path exists, but is not a named pipe: %r" % path
raise AssertionError(msg)
def assert_not_ispipe(path, follow_symlinks=True, msg=None):
"""Assert that path exists but is not a named pipe (FIFO).
With follow_symlinks=True, the default, this will fail if path is a symlink
to a named pipe. With follow_symlinks=False, it will pass in that case.
"""
path = _strpath(path)
st = _stat_for_assert(path, follow_symlinks, msg)
if stat.S_ISFIFO(st.st_mode):
if msg is None:
msg = "Path is a named pipe: %r" % path
raise AssertionError(msg)
def assert_issocket(path, follow_symlinks=True, msg=None):
"""Assert that path exists and is a Unix domain socket.
With follow_symlinks=True, the default, this will pass if path is a symlink
to a Unix domain socket. With follow_symlinks=False, it will fail in that case.
"""
path = _strpath(path)
st = _stat_for_assert(path, follow_symlinks, msg)
if not stat.S_ISSOCK(st.st_mode):
if msg is None:
msg = "Path exists, but is not a socket: %r" % path
raise AssertionError(msg)
def assert_not_issocket(path, follow_symlinks=True, msg=None):
"""Assert that path exists but is not a Unix domain socket.
With follow_symlinks=True, the default, this will fail if path is a symlink
to a Unix domain socket. With follow_symlinks=False, it will pass in that case.
"""
path = _strpath(path)
st = _stat_for_assert(path, follow_symlinks, msg)
if stat.S_ISSOCK(st.st_mode):
if msg is None:
msg = "Path is a socket: %r" % path
raise AssertionError(msg)
|
AdeshAtole/coala
|
coalib/output/ConfWriter.py
|
Python
|
agpl-3.0
| 4,015
| 0
|
from itertools import chain
from pyprint.ClosableObject import ClosableObject
from coal
|
ib.parsing.StringProcessing import escape
from coalib.settings.Section import Section
class ConfWriter(ClosableObject):
def __init__(self,
file_name,
key_value_delimiters=('=',),
comment_seperators=('#',),
key_delimiters=(',', ' '),
section_name_surroundings=None,
section_override_delimiters=(".",),
unsavable_keys=("save",)):
section_
|
name_surroundings = section_name_surroundings or {"[": "]"}
ClosableObject.__init__(self)
self.__file_name = file_name
self.__file = open(self.__file_name, "w")
self.__key_value_delimiters = key_value_delimiters
self.__comment_seperators = comment_seperators
self.__key_delimiters = key_delimiters
self.__section_name_surroundings = section_name_surroundings
self.__section_override_delimiters = section_override_delimiters
self.__unsavable_keys = unsavable_keys
self.__wrote_newline = True
self.__closed = False
self.__key_delimiter = self.__key_delimiters[0]
self.__key_value_delimiter = self.__key_value_delimiters[0]
(self.__section_name_surrounding_beg,
self.__section_name_surrounding_end) = (
tuple(self.__section_name_surroundings.items())[0])
def _close(self):
self.__file.close()
def write_sections(self, sections):
assert not self.__closed
self.__wrote_newline = True
for section in sections:
self.write_section(sections[section])
def write_section(self, section):
assert not self.__closed
if not isinstance(section, Section):
raise TypeError
self.__write_section_name(section.name)
keys = []
val = None
section_iter = section.__iter__(ignore_defaults=True)
try:
while True:
setting = section[next(section_iter)]
if (str(setting) == val and
not self.is_comment(setting.key) and
(
(setting.key not in self.__unsavable_keys) or
(not setting.from_cli))):
keys.append(setting.key)
elif ((setting.key not in self.__unsavable_keys) or
(not setting.from_cli)):
self.__write_key_val(keys, val)
keys = [setting.key]
val = str(setting)
except StopIteration:
self.__write_key_val(keys, val)
def __write_section_name(self, name):
assert not self.__closed
if not self.__wrote_newline:
self.__file.write("\n")
self.__file.write(self.__section_name_surrounding_beg + name +
self.__section_name_surrounding_end + '\n')
self.__wrote_newline = False
def __write_key_val(self, keys, val):
assert not self.__closed
if keys == []:
return
if all(self.is_comment(key) for key in keys):
self.__file.write(val + "\n")
self.__wrote_newline = val == ""
return
# Add escape characters as appropriate
keys = [escape(key, chain(['\\'],
self.__key_value_delimiters,
self.__comment_seperators,
self.__key_delimiters,
self.__section_override_delimiters))
for key in keys]
val = escape(val, chain(['\\'], self.__comment_seperators))
self.__file.write((self.__key_delimiter + " ").join(keys) + " " +
self.__key_value_delimiter + " " + val + "\n")
self.__wrote_newline = False
@staticmethod
def is_comment(key):
return key.lower().startswith("comment")
|
ODM2/ODM2WebSDL
|
src/accounts/models.py
|
Python
|
bsd-3-clause
| 910
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.db import models
from django.contrib.auth.models import AbstractUser
from dataloader.models import Affiliation
class User(AbstractUser):
affiliation_id = models.IntegerField(null=True) # Temporarily nullable
email = models.EmailField(max_length=254, unique=True, blank=False)
organization_code = models.CharField(max_length=50, blank=True, null=True)
organi
|
zation_name = models.CharField(max_le
|
ngth=255, blank=True, null=True)
@property
def affiliation(self):
return Affiliation.objects.get(pk=self.affiliation_id)
def owns_site(self, registration):
return registration.django_user == self
def can_administer_site(self, registration):
return self.is_staff or registration.django_user == self
class Meta:
db_table = 'auth_user'
|
khchine5/book
|
lino_book/projects/properties/models.py
|
Python
|
bsd-2-clause
| 4,443
| 0.008103
|
"""
Module `lino_xl.lib.properties`
-------------------------------
Imagine that we are doing a study about alimentary habits. We observe a
defined series of properties on the people who participate in our study.
Here are the properties that we are going to observe::
>>> weight = properties.INT.create_property(name='weight')
>>> weight.save()
>>> married = properties.BOOL.create_property(name='married')
>>> married.save()
>>> favdish = properties.CHAR.create_property(name='favdish',label='favorite dish')
>>> favdish.save()
>>> favdish.create_value("Cookies").save()
>>> v = favdish.create_value("Fish").save()
>>> favdish.create_value("Meat").save()
>>> favdish.create_value("Vegetables").save()
Now we have setup the properties. Let's have a look at this metadata::
>>> print favdis
|
h.choices_list()
[u'Cookies',
|
u'Fish', u'Meat', u'Vegetables']
>>> qs = properties.Property.objects.all()
>>> ["%s (%s)" % (p.name,','.join(map(str,p.choices_list()))) for p in qs]
[u'weight ()', u'married (True,False)', u'favdish (Cookies,Fish,Meat,Vegetables)']
PropValuesByOwner is a report that cannot be rendered into a normal grid because the 'value' column has variable data type, but it's render_to_dict() method is used to fill an `Ext.grid.PropertyGrid`:
>>> properties.PropValuesByOwner().request(master=Person).render_to_dict()
{'count': 3, 'rows': [{'name': u'favdish', 'value': ''}, {'name': u'married', 'value': None}, {'name': u'weight', 'value': None}], 'title': u'Properties for persons'}
Here are the people we are going to analyze::
>>> chris = Person(name='Chris')
>>> chris.save()
>>> fred = Person(name='Fred')
>>> fred.save()
>>> vera = Person(name='Vera')
>>> vera.save()
>>> mary = Person(name='Mary')
>>> mary.save()
Now we are ready to fill in some real data. Chris, Fred and Vera
answered together to each question. First we asked them "What's
your weight?", and they answered:
>>> weight.set_value_for(chris,70)
>>> weight.set_value_for(fred,110)
>>> weight.set_value_for(vera,60)
When asked whether they were married, they answered:
>>> married.set_value_for(chris,True)
>>> married.set_value_for(fred,False)
>>> married.set_value_for(vera,True)
And about their favourite dish they answered:
>>> favdish.set_value_for(chris,'Cookies')
>>> favdish.set_value_for(fred,'Fish')
>>> favdish.set_value_for(vera,'Vegetables')
Mary came later. She answered all questions at once, which we can enter
in one line of code:
>>> properties.set_value_for(mary,married=True,favdish='Meat')
Note that Mary didn't know her weight.
To see the property values of a person, we can use a manual query...
>>> qs = properties.PropValue.objects.filter(owner_id=fred.pk).order_by('prop__name')
>>> [v.by_owner() for v in qs]
[u'favdish: Fish', u'married: False', u'weight: 110']
... or use the `PropValuesByOwner` report:
>>> properties.PropValuesByOwner().request(master_instance=fred).render_to_dict()
{'count': 3, 'rows': [{'name': u'favdish', 'value': u'Fish'}, {'name': u'married', 'value': False}, {'name': u'weight', 'value': 110}], 'title': u'Properties for Fred'}
Note how properties.PropValuesByOwner also returns 3 rows for Mary although we don't know her weight:
>>> properties.PropValuesByOwner().request(master_instance=mary).render_to_dict()
{'count': 3, 'rows': [{'name': u'favdish', 'value': u'Meat'}, {'name': u'married', 'value': True}, {'name': u'weight', 'value': None}], 'title': u'Properties for Mary'}
Query by property:
>>> qs = properties.PropValue.objects.filter(prop=weight)
>>> [v.by_property() for v in qs]
[u'Chris: 70', u'Fred: 110', u'Vera: 60']
>>> qs = weight.values_query().order_by('value')
>>> [v.by_property() for v in qs]
[u'Vera: 60', u'Chris: 70', u'Fred: 110']
`Report.as_text()`, is currently broken:
>>> #properties.PropValuesByOwner().as_text(fred)
"""
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from lino_xl.lib.properties import models as properties
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField(max_length=20)
properties = generic.GenericRelation(properties.Property)
def __str__(self):
return self.name
|
robert-impey/tree-sorter
|
randomtrees.py
|
Python
|
mit
| 2,051
| 0.000488
|
#!/usr/bin/env python3
import random
"""
Generates random trees
"""
import argparse
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
def generate_random_item(length=8, chars=alphabet):
item = ""
for i in range(length):
index = random.randint(0, len(chars) - 1)
item += chars[index]
return item
def generate_random_tree_lines(
depth,
items,
length,
chars=alphabet,
current_indentation=''):
lines = []
if depth > 0:
remaining_items_to_add = items
while remaining_items_to_add > 0:
lines.append('{0}{1}'.format(current_indentation, generate_random_item(length, chars)))
remaining_items_to_add -= 1
sub_lines = generate_random_tree_lines(
depth - 1,
items,
length,
chars,
current_indentation + ' ')
for sub_line in sub_lines:
lines.append(sub_line)
return lines
if __name__ == '__main__':
parser = argparse.ArgumentParser('Tree sorting Stress Test')
parser.add_argument('--Depth',
help='The depth of the trees.',
type=int,
default=3)
parser.add_argument('--Items',
help='The number of items for each node of the tree.',
|
type=int,
default=10)
parser.add_argument('--Length',
help='The length of each item.',
type=int,
default=8)
parser.add_argument('--Alphabet',
help='The alphabet of allowed characters.',
type=str,
default=alphabet)
args = parser.par
|
se_args()
random_tree_lines = generate_random_tree_lines(
args.Depth,
args.Items,
args.Length,
args.Alphabet)
for line in random_tree_lines:
print(line)
|
elkingtowa/alphacoin
|
Bitcoin/ngcccbase-master/ngcccbase/p2ptrade/comm.py
|
Python
|
mit
| 3,132
| 0.003831
|
import urllib2
import json
import time
import threading
import Queue
from utils import make_random_id, LOGINFO, LOGDEBUG, LOGERROR
class CommBase(object):
def __init__(self):
self.agents = []
def add_agent(self, agent):
self.agents.append(agent)
class HTTPComm(CommBase):
def __init__(self, config, url = 'http://localhost:8080/messages'):
super(HTTPComm, self).__init__()
self.config = config
self.lastpoll = -1
self.url = url
self.own_msgids = set()
def post_message(self, content):
msgid = make_random_id()
content['msgid'] = msgid
self.own_msgids.add(msgid)
LOGDEBUG( "----- POSTING MESSAGE ----")
data = json.dumps(content)
LOGDEBUG(data)
u = urllib2.urlopen(self.url, data)
return u.read() == 'Success'
def poll_and_dispatch(self):
url = self.url
if self.lastpoll == -1:
url = url + "?from_timestamp_rel=%s" % self.config['offer_expiry_interval']
else:
url = url + '?from_serial=%s' % (self.lastpoll+1)
print (url)
u = urllib2.urlopen(url)
resp = json.loads(u.read())
for x in resp:
if int(x.get('serial',0)) > self.lastpoll: self.lastpoll = int(x.get('serial',0))
content = x.get('content',None)
if content and not content.get('msgid', '') in self.own_msgids:
for a in self.agents:
a.dispatch_message(content)
class ThreadedComm(CommBase):
class AgentProxy(object):
def __init__(self, tc):
self.tc = tc
def dispatch_message(self, content):
self.tc.receive_queue.put(content)
def __init__(self, upstream_comm):
super(ThreadedComm, self).__init__()
self.upstream_comm = upstream_comm
self.send_queue = Queue.Queue()
self.receive_queue = Queue.Queue()
self.comm_thread = CommThread(self, upstream_comm)
upstream_comm.add_agent(self.AgentProxy(self))
def post_message(self, content):
self.send_queue.put(content)
def poll_and_dispatch(self):
while not self.receive_queue.empty():
content = self.receive_queue.get()
for a in self.agents:
a.dispatch_message(content)
def start(self):
self.comm_thread.start()
def stop(self):
self.comm_thread.stop()
self.comm_thread.join()
class CommThread(threading.Thread):
def __init__(self, threaded_comm, upstream_comm):
threading.Thread.__init__(self)
self._stop = threading.Event()
self.threaded_comm = threaded_comm
self.upstream_comm = upstream_comm
def run(self):
send_queue = self.threaded_comm.send_q
|
ueue
receive_queue = self.threaded_comm.receive_queue
while not self._stop.is_set():
while not send_queue.empty():
self.upstream_comm.post_message(send_queue.get())
self.upstream_c
|
omm.poll_and_dispatch()
time.sleep(1)
def stop(self):
self._stop.set()
|
svamp/rp_management
|
roleplaying/wsgi.py
|
Python
|
gpl-3.0
| 399
| 0
|
"""
WSGI config for roleplaying project.
It exposes the WSGI callable as a module-level variable named ``applicat
|
ion``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
|
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "roleplaying.settings")
application = get_wsgi_application()
|
aaronbassett/djangocon-pusher
|
talk/todo/models.py
|
Python
|
mit
| 891
| 0
|
# -*- coding: utf-8 -*-
# Django
from django.db import models
from .mixins import SelfPublishModel
from .serializers import TodoListSerializer, TodoItemSerializer
class TodoList(SelfPublishModel, models.Model):
serializer_class = TodoListSerializer
channel_name = u"todo-list"
name = models.CharField(max_length=100)
|
description = models.TextField()
def __unicode__(self):
return u"{name}".format(
name=self.name,
)
class TodoItem(SelfPublishModel, models.Model):
serializer_class = TodoItemSerializer
channel_name = u"todo-item"
todo_list = models.ForeignKey(TodoList)
done = models.
|
BooleanField(default=False)
text = models.CharField(max_length=100)
def __unicode__(self):
return u"{text} ({status})".format(
text=self.text,
status=(u"✓" if self.done else u"×")
)
|
ddsc/ddsc-core
|
ddsc_core/migrations/0078_auto__chg_field_alarm_last_checked.py
|
Python
|
mit
| 28,517
| 0.007925
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Alarm.last_checked'
db.alter_column(u'ddsc_core_alarm', 'last_checked', self.gf('django.db.models.fields.DateTimeField')(null=True))
def backwards(self, orm):
# Changing field 'Alarm.last_checked'
db.alter_column(u'ddsc_core_alarm', 'last_checked', self.gf('django.db.models.fields.DateTimeField')())
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ddsc_core.alarm': {
'Meta': {'object_name': 'Alarm'},
'active_status': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'date_cr': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True', 'blank': 'True'}),
'first_born': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'frequency': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_checked': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'logical_check': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'message_type': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'previous_alarm': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Alarm']", 'null': 'True', 'blank': 'True'}),
'single_or_group': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['contenttypes.ContentType']"}),
'template': ('django.db.models.fields.TextField', [], {'default': "u'this is a alarm message template'"}),
'urgency': ('django.db.models.fields.IntegerField', [], {'default': '2'})
},
u'ddsc_core.alarm_active': {
'Meta': {'object_name': 'Alarm_Active'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'alarm': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Alarm']"}),
'deactivated_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1900, 1, 1, 0, 0)'}),
'first_triggered_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1900, 1, 1, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {})
},
u'ddsc_core.alarm_item': {
'Meta': {'object_name': 'Alarm_Item'},
'alarm': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Alarm']"}),
'alarm_type': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['contenttypes.ContentType']"}),
'comparision': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
|
'first_born': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logical_check': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'obj
|
ect_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value_bool': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'value_double': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_int': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'value_type': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'ddsc_core.compartment': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Compartment'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'source': ('django.db.m
|
Richard-West/RootTheBox
|
handlers/StaticFileHandler.py
|
Python
|
apache-2.0
| 2,151
| 0
|
# -*- coding: utf-8 -*-
'''
Created on Mar 13, 2012
@author: moloch
Copyright 2012 Root the Box
Licensed under the Apache License, Version 2.0 (the "Lice
|
nse");
you may
|
not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-------------
Modification of the tornado web StaticFileHandler
'''
import logging
from tornado.web import StaticFileHandler as DefaultStaticHandler
from tornado.options import options
class StaticFileHandler(DefaultStaticHandler):
'''
Same as the normal Tornado StaticFileHandler with a
couple overloaded methods.
'''
session = None
config = options
def set_default_headers(self):
'''
We need to add the security headers here too, especially the
X-Content-Type-Options header, since we whitelist file extenstions.
this should prevent anyone from serving html/etc from the static
handler
'''
self.set_header("Server", "Microsoft-IIS/7.5")
self.add_header("X-AspNetMvc-Version", "3.0")
self.add_header("X-AspNet-Version", "4.0.30319")
self.add_header("X-Powered-By", "ASP.NET")
self.add_header("X-Frame-Options", "DENY")
self.add_header("X-XSS-Protection", "1; mode=block")
self.add_header("X-Content-Type-Options", "nosniff")
if self.config.ssl:
self.add_header("Strict-Transport-Security", 'max-age=31536000;')
def write_error(self, status_code, **kwargs):
''' Render a generic error page '''
logging.error("Static file request from %s resulted in %d status" % (
self.request.remote_ip, status_code
))
# Reguardless of error, send a 404
self.render('public/404.html')
|
iulian787/spack
|
var/spack/repos/builtin/packages/launchmon/package.py
|
Python
|
lgpl-2.1
| 1,541
| 0.001947
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Launchmon(AutotoolsPackage):
"""Software infrastructure that enables HPC run-time tools to
co-locate tool daemons with a parallel job."""
homepage = "https://github.com/LLNL/LaunchMON"
url = "https://github.com/LLNL/LaunchMON/releases/download/v1.0.2/launchmon-v1.0.2.tar.gz"
git = "https://github.com/llnl/launchmon.git"
version('master', branch='master')
version('1.0.2', sha256='1d301ccccfe0873efcd66da87ed5e4d7bafc560b00aee396d8a9365f53b3a33a')
depends_on('autoconf', type='build', when='@master')
depends_on('automake', type='build', when='@master')
depends_on('libtool', type='build', when='@master')
depends_on('
|
pkgconfig', type='build')
depends_on('libgcrypt')
depends_on('libgpg-error')
depends_on("elf", type='link')
depends_on("boost")
depends_on("spectrum-mpi", when='arch=ppc64le')
patch('launchmon-char-conv.patch', when='@1.0.2')
patch('for_aarch64.patch', when='
|
@:1.0.2 target=aarch64:')
def setup_build_environment(self, env):
if self.spec.satisfies('@master'):
# automake for launchmon requires the AM_PATH_LIBGCRYPT macro
# which is defined in libgcrypt.m4
env.prepend_path('ACLOCAL_PATH',
self.spec['libgcrypt'].prefix.share.aclocal)
|
gonrin/gatco
|
gatco/exceptions.py
|
Python
|
mit
| 78
| 0.025641
|
from sanic.exceptions import *
class GatcoException(SanicException
|
):
pass
|
|
zyga/debian.plainbox
|
plainbox/impl/secure/rfc822.py
|
Python
|
gpl-3.0
| 16,235
| 0
|
# This file is part of Checkbox.
#
# Copyright 2012, 2013 Canonical Ltd.
# Written by:
# Sylvain Pineau <sylvain.pineau@canonical.com>
# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
:mod:`plainbox.impl.secure.rfc822` -- RFC822 parser
===================================================
Implementation of rfc822 serializer and deserializer.
.. warning::
THIS MODULE DOES NOT HAVE STABLE PUBLIC API
"""
from functools import total_ordering
from inspect import cleandoc
import inspect
import logging
from plainbox.abc import ITextSource
logger = logging.getLogger("plainbox.secure.rfc822")
class UnknownTextSource(ITextSource):
"""
A :class:`ITextSource` subclass indicating that the source of text is
unknown.
This instances of this class are constructed by gen_rfc822_records() when
no explicit source is provided and the stream has no name. The serve as
non-None values to prevent constructing :class:`PythonFileTextSource` with
origin computed from :meth:`Origin.get_caller_origin()`
"""
def __str__(self):
return "???"
def __repr__(self):
return "<{}>".format(self.__class__.__name__)
def __eq__(self, other):
if isinstance(other, UnknownTextSource):
return True
else:
return False
def __gt__(self, other):
return NotImplemented
@total_ordering
class FileTextSource(ITextSource):
"""
A :class:`ITextSource` subclass indicating that text came from a file.
:ivar filename:
name of the file something comes from
"""
def __init__(self, filename):
self.filename = filename
def __str__(self):
return self.filename
def __repr__(self):
return "<{} filename:{!r}>".format(
self.__class__.__name__, self.filename)
def __eq__(self, other):
if isinstance(other, FileTextSource):
return self.filename == other.filename
else:
return False
def __gt__(self, other):
if isinstance(other, FileTextSource):
return self.filename > other.filename
else:
return NotImplemented
class PythonFileTextSource(FileTextSource):
"""
A :class:`FileTextSource` subclass indicating the file was a python file.
It implements no differences but in some context it might be helpful to
differentiate on the type of the source field in the origin of a job
definition record.
:ivar filename:
name of the python filename that something comes from
"""
@total_ordering
class Origin:
"""
Simple class for tracking where something came from
:ivar source:
something that describes where the text came frome, technically it
should be a :class:`~plainbox.abc.ITextSource` subclass but that
interface defines just the intent, not any concrete API.
:ivar line_start:
the number of the line where the record begins
:ivar line_end:
the number of the line where the record ends
"""
__slots__ = ['source', 'line_start', 'line_end']
def __init__(self, source, line_start, line_end):
self.source = source
self.line_start = line_start
self.line_end = line_end
def __repr__(self):
return "<{} source:{!r} line_start:{} line_end:{}>".format(
self.__class__.__name__,
self.source, self.line_start, self.line_end)
def __str__(self):
return "{}:{}-{}".format(
self.source, self.line_start, self.line_end)
def __eq__(self, other):
if isinstance(other, Origin):
return ((self.source, self.line_start, self.line_end) ==
(other.source, other.line_start, other.line_end))
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, Origin):
return ((self.source, self.line_start, self.line_end) >
(other.source, other.line_start, other.line_end))
else:
return NotImplemented
@classmethod
def get_caller_origin(cls, back=0):
"""
Create an Origin instance pointing at the call site of this method.
"""
# Create an Origin instance that pinpoints the place that called
# get_caller_origin().
caller_frame, filename, lineno = inspect.stack(0)[2 + back][:3]
try:
source = PythonFileTextSource(filename)
origin = Origin(source, lineno, lineno)
finally:
# Explicitly delete the frame object, this breaks the
# reference cycle and makes this part of the code deterministic
# with regards to the CPython garbage collector.
#
# As recommended by the python documentation:
# http://docs.python.org/3/library/inspect.html#the-interpreter-stack
del caller_frame
return origin
class RFC822Record:
"""
C
|
lass for tracking RFC822 records
This is a simple container for the dictionary of data.
Each instance also holds the origin of the data
"""
def __
|
init__(self, data, origin=None):
"""
Initialize a new record.
:param data:
A dictionary with record data
:param origin:
A :class:`Origin` instance that describes where the data came from
"""
if origin is None:
origin = Origin.get_caller_origin()
self._data = data
self._origin = origin
def __repr__(self):
return "<{} data:{!r} origin:{!r}>".format(
self.__class__.__name__, self._data, self._origin)
def __eq__(self, other):
if isinstance(other, RFC822Record):
return (self._data, self._origin) == (other._data, other._origin)
return NotImplemented
def __ne__(self, other):
if isinstance(other, RFC822Record):
return (self._data, self._origin) != (other._data, other._origin)
return NotImplemented
@property
def data(self):
"""
The data set (dictionary)
"""
return self._data
@property
def origin(self):
"""
The origin of the record.
"""
return self._origin
def dump(self, stream):
"""
Dump this record to a stream
"""
def _dump_part(stream, key, values):
stream.write("%s:\n" % key)
for value in values:
if not value:
stream.write(" .\n")
elif value == ".":
stream.write(" ..\n")
else:
stream.write(" %s\n" % value)
for key, value in self._data.items():
if isinstance(value, (list, tuple)):
_dump_part(stream, key, value)
elif isinstance(value, str) and "\n" in value:
values = value.split("\n")
if not values[-1]:
values = values[:-1]
_dump_part(stream, key, values)
else:
stream.write("%s: %s\n" % (key, value))
stream.write("\n")
class RFC822SyntaxError(SyntaxError):
"""
SyntaxError subclass for RFC822 parsing functions
"""
def __init__(self, filename, lineno, msg):
self.filename = filename
self.lineno = lineno
self.msg = msg
def __eq__(self, other):
if isinstance(other, RFC822SyntaxError):
return ((self.filename, self.lineno, self.msg)
|
rookuu/AdventOfCode-2015
|
Day 9/Puzzle 1 and 2.py
|
Python
|
mit
| 998
| 0.001002
|
#!/usr/bin/env python
"""
Solution to Day X - Puzzle X of the Advent Of Code 2015 series of challenges.
--- Day X: Day X Title ---
Description of Puzzle
-----------------------------------------
Author: Luke "rookuu" Roberts
"""
from collections import defaultdict
fr
|
om itertools
|
import permutations
inputFile = open('input.txt')
dataFromFile = inputFile.read().splitlines()
places = set()
graph = defaultdict(dict)
distances = []
for line in dataFromFile:
values = line.split(" ")
places.add(values[0])
places.add(values[2])
graph[values[0]][values[2]] = int(values[4])
graph[values[2]][values[0]] = int(values[4])
for perm in permutations(places):
distance = 0
for i, elem in enumerate(perm):
if i != len(perm) - 1:
distance += graph[perm[i]][perm[i+1]]
distances.append(distance)
print "The minimum distance that Santa can take is: " + str(min(distances))
print "The maximum distance that Santa can take is: " + str(max(distances))
|
lukas-krecan/tensorflow
|
tensorflow/python/__init__.py
|
Python
|
apache-2.0
| 3,483
| 0.001723
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
|
=====
# pylint: disable=wildcard-import,unused-import,g-bad-import-order,line-too-long
"""Import core names of TensorFlow.
Programs that want to build Brain Ops and Graphs without having to import the
constructors and utilities individually can import this file:
from __future__ import absolute_import
from __future__ import division
from __future__ imp
|
ort print_function
import tensorflow.python.platform
import tensorflow as tf
"""
import inspect
import traceback
# pylint: disable=g-import-not-at-top
try:
import tensorflow.python.platform
from tensorflow.core.framework.graph_pb2 import *
except ImportError:
msg = """%s\n\nError importing tensorflow. Unless you are using bazel,
you should not try to import tensorflow from its source directory;
please exit the tensorflow source tree, and relaunch your python interpreter
from there.""" % traceback.format_exc()
raise ImportError(msg)
from tensorflow.core.framework.summary_pb2 import *
from tensorflow.core.framework.attr_value_pb2 import *
from tensorflow.core.framework.config_pb2 import *
from tensorflow.core.util.event_pb2 import *
# Import things out of contrib
from tensorflow import contrib
# Framework
from tensorflow.python.framework.framework_lib import *
from tensorflow.python.framework.versions import *
from tensorflow.python.framework import errors
# Session
from tensorflow.python.client.client_lib import *
# Ops
from tensorflow.python.ops.standard_ops import *
# Bring in subpackages
from tensorflow.python.ops import learn
from tensorflow.python.ops import nn
from tensorflow.python.ops import image_ops as image
from tensorflow.python.user_ops import user_ops
from tensorflow.python.util import compat
from tensorflow.python import unsupported
# Import the names from python/training.py as train.Name.
from tensorflow.python.training import training as train
# Sub-package for performing i/o directly instead of via ops in a graph.
from tensorflow.python.lib.io import python_io
# Make some application and test modules available.
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from tensorflow.python.platform import logging
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
# Don't export modules except for the few we really want
_whitelist = set([app, compat, contrib, errors, flags, image, learn, logging, nn,
python_io, resource_loader, test, train, unsupported,
user_ops])
# TODO(b/25561952): tf.tensor_util is DEPRECATED. Please avoid.
_whitelist.update([tensor_util]) # pylint: disable=undefined-variable
__all__ = [name for name, x in locals().items() if not name.startswith('_') and
(not inspect.ismodule(x) or x in _whitelist)]
__all__.append('__version__')
|
yekeqiang/mypython
|
time_1.py
|
Python
|
gpl-2.0
| 438
| 0.004566
|
#!/usr/bin/env python
import time
class Timer(object):
def __init__(self, verb
|
ose=False):
self.verbose = verbose
def __enter__(self):
|
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
self.msecs = self.secs * 1000 # millisecs
if self.verbose:
print 'elapsed time: %f ms' % self.msecs
|
iulian787/spack
|
var/spack/repos/builtin/packages/py-pytz/package.py
|
Python
|
lgpl-2.1
| 1,493
| 0.007368
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPytz(PythonPackage):
"""World timezone definitions, modern and historical."""
homepage = "http://pythonhosted.org/pytz"
url = "https://pypi.io/packages/source/p/pytz/pytz-2019.3.tar.gz"
import_modules = ['pytz
|
']
version('2020.1', sha256='c35965d010ce31b23eeb663ed3cc8c906275d6be1a34393a1d73a41febf4a048')
version('2019.3', sha256='b02c06db6cf09c12dd25137e563b31700d3b80fcc4ad23abb7a315f2789819be')
version('2019.1', sha256='d747dd3d23d77ef44c6a3526e274af6efeb0a6f1afd5a69ba4d5be4098c8e141')
version('2018.4', sha256='c06425302f2cf668f1bba7a0a03f3c1d34d4ebeef2c72003da308b3947c7f749')
version('2016.10', sha256='9a43e20aa537cfad8fe7a1715165c91cb4a6935d40947f2d070e
|
4c80f2dcd22b')
version('2016.6.1', sha256='6f57732f0f8849817e9853eb9d50d85d1ebb1404f702dbc44ee627c642a486ca')
version('2014.10', sha256='a94138b638907491f473c875e8c95203a6a02efef52b6562be302e435016f4f3')
version('2014.9', sha256='c5bcbd11cf9847096ae1eb4e83dde75d10ac62efe6e73c4600f3f980968cdbd2')
version('2015.4', sha256='c4ee70cb407f9284517ac368f121cf0796a7134b961e53d9daf1aaae8f44fb90')
version('2016.3', sha256='3449da19051655d4c0bb5c37191331748bcad15804d81676a88451ef299370a8')
depends_on('py-setuptools', type='build')
|
avinassh/random-python-scripts
|
Modern-Combat-4/MC4.py
|
Python
|
mit
| 242
| 0.008264
|
import requests
from time import sleep
URL = 'http://ano
|
nymouse.org/cgi-bin/anon-www.cgi/http://www.ign.com/private/prime/promo/modern-combat-4/code'
for i in range(1, 1000):
r = requests.get(URL)
print r.json()['code']
sleep(2
|
)
|
iamaziz/PyDataset
|
pydataset/datasets_handler.py
|
Python
|
mit
| 1,747
| 0
|
# datasets_handler.py
# dataset handling file
import pandas as pd
from .utils import html2text
from .locate_datasets import __items_dict, __docs_dict, __get_data_folder_path
items = __items_dict()
docs = __docs_dict()
# make dataframe layout (of __datasets_desc()) terminal-friendly
pd.set_option('display.max_rows', 170)
pd.set_option('display.max_colwidth', 90)
# for terminal, auto-detect
pd.set_option('display.width', None)
# HELPER
def __filter_doc(raw):
note = "PyDataset Documentation (adopted from R Documentation. " \
"The displayed examples are in R)"
txt = raw.replace('R Documentation', note)
return txt
def __read_docs(path):
# raw html
html = open(path, 'r').read()
# html handler
h = html2text.HTML2Text()
h.ignore_links = True
h.ignore_images = True
txt = h.handle(html)
return txt
# MAIN
def __get_csv_path(item):
"""return the full path of the item's csv file"""
return items[item]
def __read_csv(item):
path = __get_csv_path(item)
df = pd.read_csv(path, index_col=0)
# display 'optional' log msg "loaded: Titanic <class 'numpy.ndarray'>"
# print('loaded: {} {}'.format(item, type(df)))
return df
def __get_doc_path(item):
return docs[item]
def __print_item_docs(item):
path = __get_doc_path(item)
doc = __read_docs(path) # html f
|
ormat
txt = __fi
|
lter_doc(doc) # edit R related txt
print(txt)
def __datasets_desc():
"""return a df of the available datasets with description"""
datasets = __get_data_folder_path() + 'datasets.csv'
df = pd.read_csv(datasets)
df = df[['Item', 'Title']]
df.columns = ['dataset_id', 'title']
# print('a list of the available datasets:')
return df
|
gppeixoto/pcc
|
kmp/paguso/kmp.py
|
Python
|
mit
| 2,708
| 0.016248
|
import sys
verbose = 1
def brute_force(txt, pat):
n = len(txt)
m = len(pat)
# assumption: n > m
i = 0 # store window current position
occ = []
while i < n-m+1:
if verbose:
print " %si=%d" % (i*" ", i)
|
print "T: %s" % txt
j = 0 # store how many matches until current
while j < m and txt[i+j]==pat[j]:
j += 1
if verbose:
print " %s%s%s" % (i*" ", (j)*"=", "" if (j==m) else "!")
print "P: %s%s" % (i*" ", pat)
print " %sj=%d" % (i*" ", i)
if j==m: #match starting in i
occ.append(i)
i += 1
return oc
|
c
def init_next(pat):
"""
returns table with strict borders for each prefix length
from prefix[:0]="" until prefix[:]=pat
"""
m = len(pat)
B = (m+1)*[-1]
if m==1 or (m > 1 and pat[0]!=pat[1]):
B[1] = 0
i = 1
j = 0
while i < m:
while i+j < m and pat[j]==pat[i+j]:
j += 1
if i+j==m or (pat[i+j]!=pat[j]): #strict border
B[i+j] = j
else:
B[i+j]=B[j]
if B[j]==-1 and pat[0]!=pat[i+j]:
B[i+1]=0
i += j-B[j]
j = max(0, B[j])
return B
def sbrd(pat, j):
m = len(pat)
l = j-1
while l >= 0:
k = 0 # how many matches
while k < l and pat[k]==pat[j-l+k]:
k += 1
if k==l and (j==m or pat[k]!=pat[j]): #strict border
return l
l -= 1
return -1
def kmp(txt, pat):
nxt = init_next(pat)
n = len(txt)
m = len(pat)
# assumption: n > m
i = 0 # store window current position
occ = []
while i < n-m+1:
if verbose:
print " %si=%d" % (i*" ", i)
print "T: %s" % txt
j = 0 # store how many matches until current
while j < m and txt[i+j]==pat[j]:
j += 1
if verbose:
print " %s%s%s" % (i*" ", (j)*"=", "" if (j==m) else "!")
print "P: %s%s" % (i*" ", pat)
print " %sj=%d" % (i*" ", i)
if j==m: #match starting in i
occ.append(i)
sb = sbrd(pat, j)
assert(sb==nxt[j])
if verbose:
print " %ssb=%d" % (i*" ", sb)
i += j - sb
j = max(0, sb)
return occ
def main():
with open(sys.argv[1], "r") as ftxt:
txt = ""
for line in ftxt:
txt += line
print "TXT=\"%s\"" % txt
with open(sys.argv[2], "r") as fpat:
for pat in fpat:
print "PAT=\"%s\"" % pat
print "OCC={0}".format(str(kmp(txt, pat)))
if __name__ == "__main__":
main()
|
StellarCN/py-stellar-base
|
examples/payment_muxed_account.py
|
Python
|
apache-2.0
| 1,168
| 0.001712
|
import pprint
from stellar_sdk import (
Asset,
Keypair,
MuxedAccount,
Network,
Server,
TransactionBuilder,
)
horizon_url = "https://horizon-testnet.stellar.org/"
network_passphrase = Network.TESTNET_NETWORK_PASS
|
PHRASE
alice_secret = "SAHN2RCKC5I7NFDCIUKA3BG4H4T6WMLLGSAZVDKUHF7PQXHMYWD7UAIH"
bob_account = MuxedAccount(
account_id="GBZSQ3YZMZEWL5ZRCEQ5CCSOTXCFCMKDGFFP4IEQN2KN6LCHCLI46UMF",
account_muxed_id=1234,
)
print(f"account_id_muxed: {bob_account.account_muxed}")
# You can also use addresses starting with M.
# bob_account = "MBZSQ3YZMZEWL5ZRCEQ5CCSOTXCFCMKDGFFP4IEQN2KN6LCHCLI46AAAAAAAAAAE2L2QE"
alice_keypair = Keypair.from_
|
secret(alice_secret)
server = Server(horizon_url=horizon_url)
alice_account = server.load_account(alice_keypair.public_key)
transaction = (
TransactionBuilder(
source_account=alice_account,
network_passphrase=network_passphrase,
base_fee=100,
)
.append_payment_op(destination=bob_account, amount="100", asset=Asset.native())
.set_timeout(30)
.build()
)
transaction.sign(alice_keypair)
resp = server.submit_transaction(transaction)
pprint.pprint(resp)
|
daStrauss/subsurface
|
src/expts/paramSplitFieldBoth.py
|
Python
|
apache-2.0
| 1,344
| 0.013403
|
'''
Created on Oct 3, 2012
Copyright © 2013
The Board of Trustees of The Leland Stanford Junior University.
All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENS
|
E-2.0
Unless required by applicable law or agreed to in writing, softwa
|
re
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: dstrauss
'''
import numpy as np
D = {'solverType':'splitField', 'flavor':'both', 'numRuns':500, 'expt':'intParameters', 'numProcs':16}
def getMyVars(parseNumber, D):
'''routine to return the parameters to test at the current iteration.'''
rhos, xis = np.meshgrid(np.logspace(2,4,10), np.logspace(-4,-2,10))
rhos = rhos.flatten()
xis = xis.flatten()
noFreqs = np.array(8)
bkg = 0
D['freqs'] = np.round(np.logspace(np.log10(1000), np.log10(50000), noFreqs))
D['inc'] = np.array([45*np.pi/180.0])
D['rho'] = rhos[parseNumber%100]
D['xi'] = xis[parseNumber%100]
D['bkgNo'] = int(parseNumber/100) + 100
return D
|
Evert-Arends/AuroraPlusClient
|
run.py
|
Python
|
mit
| 5,859
| 0.003072
|
#!/usr/bin/env python2
# Imports
import json
import datetime
from bin import monitor, register
from ClientSettings import ClientSettings
from ClientSettings import constants
import dataCollector
import settings
import time
import sys
Monitor = monitor.Monitor()
message1 = "Client script running on version: {0}".format(ClientSettings.VERSION)
message2 = "Your version is still maintained: {0}".format('True')
class StartMonitor:
def __init__(self):
print message1, message2
print 'Initialising...'
def monitor(self):
while 1:
MonitorData = self.collect_data()
MonitorData = self.check_need_to_log(MonitorData)
self.write_data(MonitorData)
self.upload_data()
self.update_count_requests()
self.print_data(MonitorData)
print
@staticmethod
def collect_data():
MonitorData = dataCollector.get_all_data()
return MonitorData
@staticmethod
def print_data(MonitorData):
print '\-----------------------------------System Statistics--------------------------------------\\'
print ' Your Network load is at the moment sent: {0} Bytes, and received: {1} Bytes.'.format(MonitorData[3][0],
MonitorData[3][1])
print ' Your CPU load is at the moment: {0}%.'.format(MonitorData[2])
print ' Your RAM usage is at the moment: {0}%.'.format(MonitorData[4])
print ' Your DISK usage is at the moment: {0}%.'.format(MonitorData[5])
print ' You currently have {0} read, and {1} written.'.format(MonitorData[6][0], MonitorData[6][1])
print ' Your ServerId is: {0}.'.format(MonitorData[1])
print ' Your HostName is: {0}.'.format(MonitorData[0])
print ' Last reported message ID is: {0}.'.format(MonitorData[8])
print ' Last reported message is: {0}.'.format(MonitorData[7])
print ' This is request: {0}.'.format(constants.REQUEST_COUNT)
print '\------------------------------------------------------------------------------------------\\'
@staticmethod
def write_data(MonitorData):
with open(settings.JSON_FILE, 'r+') as f:
json_data = json.load(f)
json_data["RequestDetails"]["Time"]["RequestSent"] = str(time.time())
json_data["Server"]["ServerDetails"]["NetworkLoad"]["Sent"] = MonitorData[3][0]
json_data["Server"]["ServerDetails"]["NetworkLoad"]["Received"] = MonitorData[3][1]
json_data["Server"]["ServerDetails"]["ServerName"] = MonitorData[0]
json_data["Server"]["ServerDetails"]["CPU_Usage"] = MonitorData[2]
json_data["Server"]["ServerDetails"]["ServerKey"] = MonitorData[1]
json_data["Server"]["ServerDetails"]["Ram_Usage"] = MonitorData[4]
json_data["Server"]["ServerDetails"]["Disk_Usage"] = MonitorData[5]
json_data["Server"]["ServerDetails"]["Disk_Load"]["Read"] = MonitorData[6][0]
json_data["Server"]["Ser
|
verDetails"]["Disk_Load"]["Write"] = MonitorData[6][1]
if MonitorData[8]:
if Monitor.getLastLogID() < float(MonitorData[8]):
json_data["Server"]["Messages"]["Log"] = MonitorData[7]
json_data["Server"]["Messages"]["AlertID"] = MonitorData[8]
|
json_data["Server"]["Messages"]["Alert"] = True
else:
json_data["Server"]["Messages"]["Alert"] = False
f.seek(0)
f.write(json.dumps(json_data))
f.truncate()
@staticmethod
def upload_data():
# print 'Sending json.'
Monitor.SendJsonToServer()
@staticmethod
def update_count_requests():
constants.REQUEST_COUNT += 1
return constants.REQUEST_COUNT
def check_need_to_log(self, MonitorData):
if not MonitorData:
pass
CPU = MonitorData[2]
RAM = MonitorData[4]
if float(CPU) > 75:
MonitorData = self.log_message(target='CPU', spike=CPU, MonitorData=MonitorData)
elif float(RAM) > 75:
MonitorData = self.log_message(target='RAM', spike=RAM, MonitorData=MonitorData)
else:
MonitorData = MonitorData
MonitorData[7] = 'None'
MonitorData[8] = 0
self.log_message(target='None', spike=0, MonitorData=MonitorData)
return MonitorData
@staticmethod
def log_message(target, spike, MonitorData):
if target == 'CPU':
message = 'There has been a CPU usage spike of: {0}%!'.format(spike)
elif target == 'RAM':
message = 'There has been a RAM usage spike of: {0}%!'.format(spike)
elif target == 'None':
message = ''
else:
message = 'There is an unexpected spike, we are not sure where it is coming from, ' \
'but the value is: {0}'.format(spike)
MonitorData[7] = message
LastID = Monitor.getLastLogID()
LastID += 1
MonitorData[8] = LastID
return MonitorData
if __name__ == '__main__':
Arguments = sys.argv
try:
if Arguments[1].lower() == '-r':
print 'Registering...'
register = register.Register()
key = register.register_agent()
with open(ClientSettings.FILE_DIR + 'details.json', 'r+') as f:
json_data = json.load(f)
json_data["ServerDetails"]["ServerKey"] = key
f.seek(0)
f.write(json.dumps(json_data))
f.truncate()
else:
constants.REGISTER = False
except IndexError:
constants.REGISTER = False
StartMonitor = StartMonitor()
StartMonitor.monitor()
|
EricssonResearch/calvin-base
|
calvin/actorstore/docobject.py
|
Python
|
apache-2.0
| 12,850
| 0.003735
|
import json
import inspect
import pystache
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class DocObject(object):
"""docstring for DocObject"""
use_links = False
COMPACT_FMT = "{{{qualified_name}}} : {{{short_desc}}}"
DETAILED_FMT_MD = "{{{e_qualified_name}}} : {{{e_short_desc}}}"
DETAILED_FMT_PLAIN = COMPACT_FMT
def __init__(self, namespace, name=None, docs=None):
super(DocObject, self).__init__()
self.ns = namespace
self.name = name
if type(docs) is list:
docs = "\n".join(docs)
self.label = "DocObject"
self.docs = docs.rstrip() or ""
#
# Allow templates to use e_attr to access an escaped version of attribute attr
#
def __getattr__(self, synthetic_attr):
if not synthetic_attr.startswith('e_'):
raise AttributeError("No such attribute: %s" % synthetic_attr)
_, attr = synthetic_attr.split('_', 1)
if not hasattr(self, attr):
raise AttributeError("No such attribute: %s" % attr)
x = getattr(self, attr)
# N.B. Kind of escape should depend on desired output
return self._escape_text(x)
def _escape_text(self, txt):
def _escape_line(line):
if line.startswith(' '):
return line
for c in "\\<>*_{}[]()#+-.!":
line = line.replace(c, "\\"+c)
return line
lines_in = txt.split('\n')
lines_out = [_escape_line(line) for line in lines_in]
return "\n".join(lines_out)
@property
def has_actors(self):
return False
@property
def has_modules(self):
return False
@property
def qualified_name(self):
if self.name:
return "{}.{}".format(self.ns, self.name)
return self.ns
@property
def own_name(self):
return self.name or self.ns
@property
def short_desc(self):
short_desc, _, _ = self.docs.partition('\n')
return short_desc
@property
def slug(self):
return self.qualified_name.replace('.', '_')
#
# "API" to produce output from a DocObject
#
def compact(self):
fmt = inspect.cleandoc(self.COMPACT_FMT)
return pystache.render(fmt, self)
def detailed(self):
fmt = inspect.cleandoc(self.DETAILED_FMT_PLAIN)
return pystache.render(fmt, self)
def markdown(self):
DocObject.use_links = False
fmt = inspect.cleandoc(self.DETAILED_FMT_MD)
return pystache.render(fmt, self)
def markdown_links(self):
DocObject.use_links = True
fmt = inspect.cleandoc(self.DETAILED_FMT_MD)
return pystache.render(fmt, self)
def metadata(self):
return {'is_known': False}
def __repr__(self):
def _convert(x):
try:
return x.name or x.ns
except:
return None
r = {'type':str(self.__class__.__name__)}
r.update(self.__dict__)
return json.dumps(r, default=_convert)
class ErrorDoc(DocObject):
"""docstring for ErrDoc"""
COMPACT_FMT = "({{{label}}}) {{{qualified_name}}} : {{{short_desc}}}"
DETAILED_FMT_MD = "({{{label}}}) {{{e_qualified_name}}} : {{{e_short_desc}}}"
DETAILED_FMT_PLAIN = COMPACT_FMT
def __init__(self, namespace, n
|
ame, short_desc):
docs = short_desc or "Unknown error"
super(ErrorDoc, self).__init__(namespace, name, docs)
self.label = "Error"
def search(self, search_list):
_log.debug("Actor module {}/ is missing file __init__.py".format(self.ns))
return self
class M
|
oduleDoc(DocObject):
"""docstring for ModuleDoc"""
COMPACT_FMT = """
{{{qualified_name}}}
{{{short_desc}}}
{{#modules_compact}}
Modules: {{{modules_compact}}}
{{/modules_compact}}
{{#actors_compact}}
Actors: {{{actors_compact}}}
{{/actors_compact}}
"""
DETAILED_FMT_PLAIN = """
============================================================
{{{label}}}: {{{qualified_name}}}
============================================================
{{{docs}}}
{{#has_modules}}
Modules:
{{/has_modules}}
{{#modules}}
{{{own_name}}} : {{{short_desc}}}
{{/modules}}
{{#has_actors}}
Actors:
{{/has_actors}}
{{#actors}}
{{{own_name}}} : {{{short_desc}}}
{{/actors}}
"""
DETAILED_FMT_MD = """
## {{{label}}}: {{{e_qualified_name}}} {{#use_links}}<a name="{{{slug}}}"></a>{{/use_links}}
{{{e_docs}}}
{{#has_modules}}
### Modules:
{{/has_modules}}
{{#modules}}
{{#use_links}}[**{{{e_own_name}}}**](#{{{slug}}}){{/use_links}}{{^use_links}}**{{{e_own_name}}}**{{/use_links}} : {{{e_short_desc}}}
{{/modules}}
{{#has_actors}}
### Actors:
{{/has_actors}}
{{#actors}}
{{#use_links}}[**{{{e_own_name}}}**](#{{{slug}}}){{/use_links}}{{^use_links}}**{{{e_own_name}}}**{{/use_links}} : {{{e_short_desc}}}
{{/actors}}
{{#use_links}}[\[Top\]](#Calvin){{/use_links}}
***
"""
def __init__(self, namespace, modules, actors, doclines):
super(ModuleDoc, self).__init__(namespace, None, doclines)
self.modules = modules
self.actors = actors
self.label = "Module"
@property
def has_actors(self):
return bool(self.actors)
@property
def has_modules(self):
return bool(self.modules)
@property
def modules_compact(self):
return ", ".join([x.own_name for x in self.modules if type(x) is not ErrorDoc])
@property
def actors_compact(self):
return ", ".join([x.own_name for x in self.actors if type(x) is not ErrorDoc])
def search(self, search_list):
if not search_list:
return self
name = search_list.pop(0)
for x in self.modules:
if name == x.ns:
return x.search(search_list)
for x in self.actors:
if name == x.name:
if not search_list:
return x
return None # Error
return None
def metadata(self):
metadata = super(ModuleDoc, self).metadata()
metadata['modules'] = [x.ns for x in self.modules]
metadata['actors'] = [x.name for x in self.actors]
return metadata
class ActorDoc(DocObject):
"""docstring for ActorDoc"""
COMPACT_FMT = """
{{{qualified_name}}}({{{fargs}}})
{{{short_desc}}}
{{#has_inports}}Inports: {{{inports_compact}}}{{/has_inports}}
{{#has_outports}}Outports: {{{outports_compact}}}{{/has_outports}}
{{#has_requirements}}Requires: {{{requires_compact}}}{{/has_requirements}}
"""
DETAILED_FMT_PLAIN = """
============================================================
{{{label}}}: {{{qualified_name}}}({{{fargs}}})
============================================================
{{{docs}}}
{{#has_inports}}
Inports:
{{/has_inports}}
{{#inports}}
{{{name}}} : {{{docs}}} {{#props}}Properties({{{props}}}){{/props}}
{{/inports}}
{{#has_outports}}
Outports:
{{/has_outports}}
{{#outports}}
{{{name}}} : {{{docs}}} {{#props}}Properties({{{props}}}){{/props}}
{{/outports}}
{{#has_requirements}}
Requires:
{{{requires_compact}}}
{{/has_requirements}}
"""
DETAILED_FMT_MD = """
## {{{label}}}: {{{e_qualified_name}}}({{{e_fargs}}}) {{#use_links}}<a name="{{{slug}}}"></a>{{/use_links}}
{{{e_docs}}}
{{#has_inports}}
### Inports:
{{/has_inports}}
{{#inports}}
**{{{e_name}}}** : {{{e_docs}}} {{#props}}_Properties({{{e_props}}})_{{/props}}
{{/inports}}
{{#has_outports}}
### Outports:
{{/has_outports}}
{{#outports}}
**{{{e_name}}}** : {{{e_docs}}} {{#props}}_Properties({{{e_props}}})_{{/props}}
{{/outports}}
{{#has_requirements}}
### Requires:
{{{e_requires_compact}}}
{{/has_requirements}}
{{#use_links}}[\[Top\]](#Calvin) [\[Module: {{{e_ns}}}\]](#{{{ns}}}){{/use_links}}
***
"""
def __init__(self,
|
ROGUE-JCTD/vida
|
vida/firestation/api.py
|
Python
|
mit
| 4,167
| 0.00336
|
import json
import logging
from .forms import StaffingForm
from .models import FireStation, Staffing, FireDepartment
from django.core.serializers.json import DjangoJSONEncoder
from tastypie import fields
from tastypie.authentication import SessionAuthentication, ApiKeyAuthentication, MultiAuthentication
from tastypie.authorization import DjangoAuthorization
from tastypie.cache import SimpleCache
from tastypie.constants import ALL
from tastypie.contrib.gis.resources import ModelResource
from tastypie.serializers import Serializer
from tastypie.validation import FormValidation
logger = logging.getLogger(__name__)
# Note this is from one of my other projects, not sure it is actually needed or not.
class SessionAuth(SessionAuthentication):
"""
This is a hack to fix a bug which returns occasional TypeErrors returned from SessionAuthentication.
About:
Every now and then the super class' get_identifier returns a TypeError (getattr(): attribute name must be string).
It seems that the logic that returns the string used for the username sometimes returns None.
"""
def get_identifier(self, request):
"""
Provides a unique string identifier for the requestor.
This implementation returns the user's username.
"""
try:
return super(SessionAuth, self).get_identifier(request)
except TypeError:
return getattr(request.user, 'username')
class PrettyJSONSerializer(Serializer):
json_indent = 2
def to_json(self, data, options=None):
options = options or {}
data = self.to
|
_simple(data, options)
return json.dumps(data,
|
cls=DjangoJSONEncoder,
sort_keys=True, ensure_ascii=False, indent=self.json_indent)
class FireDepartmentResource(ModelResource):
"""
The Fire Department API.
"""
class Meta:
resource_name = 'fire-departments'
queryset = FireDepartment.objects.all()
authorization = DjangoAuthorization()
authentication = MultiAuthentication(SessionAuthentication(), ApiKeyAuthentication())
cache = SimpleCache()
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
filtering = {'state': ALL, 'featured': ALL}
serializer = PrettyJSONSerializer()
limit = 120
def determine_format(self, request):
return 'application/json'
class FireStationResource(ModelResource):
"""
The Fire Station API.
"""
department = fields.ForeignKey(FireDepartmentResource, 'department', null=True)
class Meta:
resource_name = 'firestations'
queryset = FireStation.objects.all()
cache = SimpleCache()
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
filtering = {'department': ('exact',), 'state': ('exact',)}
excludes = ['addressbuildingname', 'complex_id', 'data_security', 'distribution_policy', 'fcode', 'foot_id',
'ftype', 'globalid', 'gnis_id', 'islandmark', 'loaddate', 'objectid', 'permanent_identifier',
'pointlocationtype', 'source_datadesc', 'source_datasetid', 'source_featureid', 'source_originator',
'admintype', 'district'
]
serializer = PrettyJSONSerializer()
limit = 120
def determine_format(self, request):
return 'application/json'
class StaffingResource(ModelResource):
"""
The ResponseCapability API.
"""
firestation = fields.ForeignKey(FireStationResource, 'firestation')
class Meta:
resource_name = 'staffing'
queryset = Staffing.objects.all()
authorization = DjangoAuthorization()
authentication = MultiAuthentication(SessionAuthentication(), ApiKeyAuthentication())
filtering = {'firestation': ALL}
validation = FormValidation(form_class=StaffingForm)
list_allowed_methods = ['get', 'post']
detail_allowed_methods = ['get', 'put', 'delete']
serializer = PrettyJSONSerializer()
always_return_data = True
def determine_format(self, request):
return 'application/json'
|
ScreamingUdder/mantid
|
Testing/SystemTests/tests/analysis/SANSDiagnosticPageTest.py
|
Python
|
gpl-3.0
| 6,474
| 0.001545
|
# pylint: disable=too-many-public-methods, invalid-name, too-many-arguments
from __future__ import (absolute_import, division, print_function)
import unittest
import os
import stresstesting
import mantid
from sans.state.data import get_data_builder
from sans.common.enums import (DetectorType, SANSFacility, IntegralEnum)
from sans.user_file.state_director import StateDirectorISIS
from sans.common.constants import EMPTY_NAME
from sans.common.general_functions import create_unmanaged_algorithm
from sans.gui_logic.models.diagnostics_page_model import run_integral
# -----------------------------------------------
# Tests for the SANSDiagnosticPage
# -----------------------------------------------
class SANSDiagnosticPageTest(unittest.TestCase):
def _compare_workspace(self, workspace, reference_file_name):
# Load the reference file
load_name = "LoadNexusProcessed"
load_options = {"Filename": reference_file_name,
"OutputWorkspace": EMPTY_NAME}
load_alg = create_unmanaged_algorithm(load_name, **load_options)
load_alg.execute()
reference_workspace = load_alg.getProperty("OutputWorkspace").value
# Save the workspace out and reload it again. This equalizes it with the reference workspace
f_name = os.path.join(mantid.config.getString('defaultsave.directory'),
'SANS_temp_single_core_reduction_testout.nxs')
save_name = "SaveNexus"
save_options = {"Filename": f_name,
"InputWorkspace": workspace}
save_alg = create_unmanaged_algorithm(save_name, **save_options)
save_alg.execute()
load_alg.setProperty("Filename", f_name)
load_alg.setProperty("OutputWorkspace", EMPTY_NAME)
load_alg.execute()
ws = load_alg.getProperty("OutputWorkspace").value
# Compare reference file with the output_workspace
# We need to disable the instrument comparison, it takes way too long
# We need to disable the sample -- since the sample has been modified (more logs are being written)
# operation how many entries can be found in the sample logs
compare_name = "CompareWorkspaces"
compare_options = {"Workspace1": ws,
"Workspace2": reference_workspace,
"Tolerance": 1e-6,
"CheckInstrument": False,
"CheckSample": False,
"ToleranceRelErr": True,
"CheckAllData": True,
"CheckMasking": True,
"CheckType": True,
"CheckAxes": True,
"CheckSpectraMap": True}
compare_alg = create_unmanaged_algorithm(compare_name, **compare_options)
compare_alg.setChild(False)
compare_alg.execute()
result = compare_alg.getProperty("Result").value
self.assertTrue(result)
# Remove file
if os.path.exists(f_name):
os.remove(f_name)
def test_that_produces_correct_workspace_for_SANS2D(self):
# Arrange
# Build the data information
data_b
|
uilder = get_data_builder(SANSFacility.ISIS)
data_builder.set_sample_scatter("SANS2D00034484")
data_builder.set_calibration("TUBE_SANS2D_BOTH_31681_25Sept15.nxs")
data_state = data_builder.build()
# Get the rest of the state from the user file
user_file_director = StateDirectorISIS(data_state)
user_file_director.set_user_file("USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# COMPATIBILITY BEGIN -- Remove when appropriate
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Since we are dealing with event based data but we want to compare it with histogram data from the
# old reduction system we need to enable the compatibility mode
user_file_director.set_compatibility_builder_use_compatibility_mode(True)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# COMPATIBILITY END
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Construct the final state
state = user_file_director.construct()
# Act
output_workspaces = run_integral('', True, IntegralEnum.Horizontal, DetectorType.LAB, state)
# Evaluate it up to a defined point
reference_file_name = "SANS2D_ws_diagnostic_reference.nxs"
self._compare_workspace(output_workspaces[0], reference_file_name)
def test_that_produces_correct_workspace_multiperiod_LARMOR(self):
# Arrange
# Build the data information
data_builder = get_data_builder(SANSFacility.ISIS)
data_builder.set_sample_scatter("LARMOR00013065")
data_builder.set_calibration("80tubeCalibration_1-05-2015_r3157-3160.nxs")
data_state = data_builder.build()
# Get the rest of the state from the user file
user_file_director = StateDirectorISIS(data_state)
user_file_director.set_user_file("USER_LARMOR_151B_LarmorTeam_80tubes_BenchRot1p4_M4_r3699.txt")
# Construct the final state
state = user_file_director.construct()
# Act
output_workspaces = run_integral('', True, IntegralEnum.Horizontal, DetectorType.LAB, state)
# Evaluate it up to a defined point
reference_file_name = "LARMOR_ws_diagnostic_reference.nxs"
self._compare_workspace(output_workspaces[0], reference_file_name)
class SANSDiagnosticPageRunnerTest(stresstesting.MantidStressTest):
def __init__(self):
stresstesting.MantidStressTest.__init__(self)
self._success = False
def runTest(self):
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SANSDiagnosticPageTest, 'test'))
runner = unittest.TextTestRunner()
res = runner.run(suite)
if res.wasSuccessful():
self._success = True
def requiredMemoryMB(self):
return 2000
def validate(self):
return self._success
if __name__ == '__main__':
unittest.main()
|
LennonChin/Django-Practices
|
MxShop/apps/trade/migrations/0004_auto_20171023_2354.py
|
Python
|
apache-2.0
| 1,497
| 0.002088
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-23 23:54
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('goods', '0002_auto_20171017_2017'),
('trade', '0003_auto_
|
20171022_1507'),
]
operations = [
migrations.AlterField(
model_name='ordergoods',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='goods', to='trade.OrderInfo', verbose_name=
|
'订单信息'),
),
migrations.AlterField(
model_name='orderinfo',
name='order_sn',
field=models.CharField(blank=True, max_length=30, null=True, unique=True, verbose_name='订单号'),
),
migrations.AlterField(
model_name='orderinfo',
name='pay_status',
field=models.CharField(choices=[('TRADE_SUCCESS', '成功'), ('TRADE_CLOSE', '超时关闭'), ('WAIT_BUYER_PAY', '交易创建,等待付款'), ('TRADE_FINISHED', '交易结束')], default='paying', max_length=30, verbose_name='订单状态'),
),
migrations.AlterUniqueTogether(
name='shoppingcart',
unique_together=set([('user', 'goods')]),
),
]
|
DaKnOb/mwhois
|
netaddr/strategy/eui48.py
|
Python
|
mit
| 8,693
| 0.003681
|
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2015, David P. D. Moss. All rights reserved.
#
# Released under the BSD license. See the LICENSE file for details.
#-----------------------------------------------------------------------------
"""
IEEE 48-bit EUI (MAC address) logic.
Supports numerous MAC string formats including Cisco's triple hextet as well
as bare MACs containing no delimiters.
"""
import struct as _struct
import re as _re
# Check whether we need to use fallback code or not.
try:
from socket import AF_LINK
except ImportError:
AF_LINK = 48
from netaddr.core import AddrFormatError
from netaddr.strategy import \
valid_words as _valid_words, \
int_to_words as _int_to_words, \
words_to_int as _words_to_int, \
valid_bits as _valid_bits, \
bits_to_int as _bits_to_int, \
int_to_bits as _int_to_bits, \
valid_bin as _valid_bin, \
int_to_bin as _int_to_bin, \
bin_to_int as _bin_to_int
from netaddr.compat import _is_str
#: The width (in bits) of this address type.
width = 48
#: The AF_* constant value of this address type.
family = AF_LINK
#: A friendly string name address type.
family_name = 'MAC'
#: The version of this address type.
version = 48
#: The maximum integer value that can be represented by this address type.
max_int = 2 ** width - 1
#-----------------------------------------------------------------------------
# Dialect classes.
#-----------------------------------------------------------------------------
class mac_eui48(object):
"""A standard IEEE EUI-48 dialect class."""
#: The individual word size (in bits) of this address type.
word_size = 8
#: The number of words in this address type.
num_words = width // word_size
#: The maximum integer value for an individual word in this address type.
max_word = 2 ** word_size - 1
#: The separator character used between each word.
word_sep = '-'
#: The format string to be used when converting words to string values.
word_fmt = '%.2X'
#: The number base to be used when interpreting word values as integers.
word_base = 16
class mac_unix(mac_eui48):
"""A UNIX-style MAC address dialect class."""
word_size = 8
num_words = width // word_size
word_sep = ':'
word_fmt = '%x'
word_base = 16
class mac_unix_expanded(mac_unix):
"""A UNIX-style MAC address dialect class with leading zeroes."""
word_fmt = '%.2x'
class mac_cisco(mac_eui48):
"""A Cisco 'triple hextet' MAC address dialect class."""
word_size = 16
num_words = width // word_size
word_sep = '.'
word_fmt = '%.4x'
word_base = 16
class mac_bare(mac_eui48):
"""A bare (no delimiters) MAC address dialect class."""
|
word_size = 48
num_words = width // word_siz
|
e
word_sep = ''
word_fmt = '%.12X'
word_base = 16
class mac_pgsql(mac_eui48):
"""A PostgreSQL style (2 x 24-bit words) MAC address dialect class."""
word_size = 24
num_words = width // word_size
word_sep = ':'
word_fmt = '%.6x'
word_base = 16
#: The default dialect to be used when not specified by the user.
DEFAULT_DIALECT = mac_eui48
#-----------------------------------------------------------------------------
#: Regular expressions to match all supported MAC address formats.
RE_MAC_FORMATS = (
# 2 bytes x 6 (UNIX, Windows, EUI-48)
'^' + ':'.join(['([0-9A-F]{1,2})'] * 6) + '$',
'^' + '-'.join(['([0-9A-F]{1,2})'] * 6) + '$',
# 4 bytes x 3 (Cisco)
'^' + ':'.join(['([0-9A-F]{1,4})'] * 3) + '$',
'^' + '-'.join(['([0-9A-F]{1,4})'] * 3) + '$',
'^' + '\.'.join(['([0-9A-F]{1,4})'] * 3) + '$',
# 6 bytes x 2 (PostgreSQL)
'^' + '-'.join(['([0-9A-F]{5,6})'] * 2) + '$',
'^' + ':'.join(['([0-9A-F]{5,6})'] * 2) + '$',
# 12 bytes (bare, no delimiters)
'^(' + ''.join(['[0-9A-F]'] * 12) + ')$',
'^(' + ''.join(['[0-9A-F]'] * 11) + ')$',
)
# For efficiency, each string regexp converted in place to its compiled
# counterpart.
RE_MAC_FORMATS = [_re.compile(_, _re.IGNORECASE) for _ in RE_MAC_FORMATS]
def valid_str(addr):
"""
:param addr: An IEEE EUI-48 (MAC) address in string form.
:return: ``True`` if MAC address string is valid, ``False`` otherwise.
"""
for regexp in RE_MAC_FORMATS:
try:
match_result = regexp.findall(addr)
if len(match_result) != 0:
return True
except TypeError:
pass
return False
def str_to_int(addr):
"""
:param addr: An IEEE EUI-48 (MAC) address in string form.
:return: An unsigned integer that is equivalent to value represented
by EUI-48/MAC string address formatted according to the dialect
settings.
"""
words = []
if _is_str(addr):
found_match = False
for regexp in RE_MAC_FORMATS:
match_result = regexp.findall(addr)
if len(match_result) != 0:
found_match = True
if isinstance(match_result[0], tuple):
words = match_result[0]
else:
words = (match_result[0],)
break
if not found_match:
raise AddrFormatError('%r is not a supported MAC format!' % addr)
else:
raise TypeError('%r is not str() or unicode()!' % addr)
int_val = None
if len(words) == 6:
# 2 bytes x 6 (UNIX, Windows, EUI-48)
int_val = int(''.join(['%.2x' % int(w, 16) for w in words]), 16)
elif len(words) == 3:
# 4 bytes x 3 (Cisco)
int_val = int(''.join(['%.4x' % int(w, 16) for w in words]), 16)
elif len(words) == 2:
# 6 bytes x 2 (PostgreSQL)
int_val = int(''.join(['%.6x' % int(w, 16) for w in words]), 16)
elif len(words) == 1:
# 12 bytes (bare, no delimiters)
int_val = int('%012x' % int(words[0], 16), 16)
else:
raise AddrFormatError('unexpected word count in MAC address %r!' \
% addr)
return int_val
def int_to_str(int_val, dialect=None):
"""
:param int_val: An unsigned integer.
:param dialect: (optional) a Python class defining formatting options.
:return: An IEEE EUI-48 (MAC) address string that is equivalent to
unsigned integer formatted according to the dialect settings.
"""
if dialect is None:
dialect = mac_eui48
words = int_to_words(int_val, dialect)
tokens = [dialect.word_fmt % i for i in words]
addr = dialect.word_sep.join(tokens)
return addr
def int_to_packed(int_val):
"""
:param int_val: the integer to be packed.
:return: a packed string that is equivalent to value represented by an
unsigned integer.
"""
return _struct.pack(">HI", int_val >> 32, int_val & 0xffffffff)
def packed_to_int(packed_int):
"""
:param packed_int: a packed string containing an unsigned integer.
It is assumed that string is packed in network byte order.
:return: An unsigned integer equivalent to value of network address
represented by packed binary string.
"""
words = list(_struct.unpack('>6B', packed_int))
int_val = 0
for i, num in enumerate(reversed(words)):
word = num
word = word << 8 * i
int_val = int_val | word
return int_val
def valid_words(words, dialect=None):
if dialect is None:
dialect = DEFAULT_DIALECT
return _valid_words(words, dialect.word_size, dialect.num_words)
def int_to_words(int_val, dialect=None):
if dialect is None:
dialect = DEFAULT_DIALECT
return _int_to_words(int_val, dialect.word_size, dialect.num_words)
def words_to_int(words, dialect=None):
if dialect is None:
dialect = DEFAULT_DIALECT
return _words_to_int(words, dialect.word_size, dialect.num_words)
def valid_bits(bits, dialect=None):
if dialect is None:
dialect = DEFAULT_DIALECT
return _valid_bits(bits, width, dialect.word_sep)
def bits_to_int(bits, dialect=None):
if dialect is None:
dialect = DEFAULT_DIALECT
ret
|
scottpurdy/nupic
|
src/nupic/algorithms/backtracking_tm.py
|
Python
|
agpl-3.0
| 143,241
| 0.007966
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Temporal memory implementation.
This is the Python implementation and is used as the base class for the C++
implementation in :class:`~nupic.algorithms.backtracking_tm.BacktrackingTMCPP`.
"""
import copy
import cPickle as pickle
import itertools
try:
import capnp
except ImportError:
capnp = None
import numpy
if capnp:
from nupic.algorithms.backtracking_tm_capnp import (
SegmentProto, SegmentUpdateProto, BacktrackingTMProto)
from nupic.bindings.math import Random
from nupic.bindings.algorithms import getSegmentActivityLevel, isSegmentActive
from nupic.math import GetNTAReal
from nupic.serializable import Serializable
from nupic.support.console_printer import ConsolePrinterMixin
# Default verbosity while running unit tests
VERBOSITY = 0
# The current TM version used to track the checkpoint state.
TM_VERSION = 1
# The numpy equivalent to the floating point type used by NTA
dtype = GetNTAReal()
class BacktrackingTM(ConsolePrinterMixin, Serializable):
"""
Class implementing the temporal memory algorithm as described in
`BAMI <https://numenta.com/biological-and-machine-intelligence/>`_. The
implementation here attempts to closely match the pseudocode in the
documentation. This implementation does contain several additional bells and
whistles such as a column confidence measure.
:param numberOfCols: (int) Number of mini-columns in the region. This values
needs to be the same as the number of columns in the SP, if one is
used.
:param cellsPerColumn: (int) The number of cells per mini-column.
:param initialPerm: (float) Initial permanence for newly created synapses.
:param connectedPerm: TODO: document
:param minThreshold: (int) Minimum number of active synapses for a segment to
be considered during search for the best-matching segments.
:param newSynapseCount: (int) The max number of synapses added to a segment
during learning.
:param permanenceInc: (float) Active synapses get their permanence counts
incremented by this value.
:param permanenceDec: (float) All other synapses get their permanence counts
decremented by this value.
:param permanenceMax: TODO: document
:param maxAge: (int) Number of iterations before global decay takes effect.
Also the global decay execution interval. After global decay starts, it
will will run again every ``maxAge`` iterations. If ``maxAge==1``,
global decay is applied to every iteration to every segment.
.. note:: Using ``maxAge > 1`` can significantly speed up the TM when
global decay is used.
:param globalDecay: (float) Value to decrease permanences when the global
decay process runs. Global decay will remove synapses if their
permanence value reaches 0. It will also remove segments when they no
longer have synapses.
.. note:: Global decay is applied after ``maxAge`` iterations, after
which it will run every ``maxAge`` iterations.
:param activationThreshold: (int) Number of synapses tha
|
t must be active to
activate a segment.
:param doPooling: (bool) If True, pooling is enabled. False is the default.
:param segUpdateValidDuration: TODO: document
:param burnIn: (int) Used for evaluating the prediction score. Default is 2.
:param collectStats: (bool) If True, collect training / inference stats.
Default is False.
:par
|
am seed: (int) Random number generator seed. The seed affects the random
aspects of initialization like the initial permanence values. A fixed
value ensures a reproducible result.
:param verbosity: (int) Controls the verbosity of the TM diagnostic output:
- verbosity == 0: silent
- verbosity in [1..6]: increasing levels of verbosity
:param pamLength: (int) Number of time steps to remain in "Pay Attention Mode"
after we detect we've reached the end of a learned sequence. Setting
this to 0 disables PAM mode. When we are in PAM mode, we do not burst
unpredicted columns during learning, which in turn prevents us from
falling into a previously learned sequence for a while (until we run
through another 'pamLength' steps).
The advantage of PAM mode is that it requires fewer presentations to
learn a set of sequences which share elements. The disadvantage of PAM
mode is that if a learned sequence is immediately followed by set set
of elements that should be learned as a 2nd sequence, the first
``pamLength`` elements of that sequence will not be learned as part of
that 2nd sequence.
:param maxInfBacktrack: (int) How many previous inputs to keep in a buffer for
inference backtracking.
:param maxLrnBacktrack: (int) How many previous inputs to keep in a buffer for
learning backtracking.
:param maxSeqLength: (int) If not 0, we will never learn more than
``maxSeqLength`` inputs in a row without starting over at start cells.
This sets an upper bound on the length of learned sequences and thus is
another means (besides ``maxAge`` and ``globalDecay``) by which to
limit how much the TM tries to learn.
:param maxSegmentsPerCell: (int) The maximum number of segments allowed on a
cell. This is used to turn on "fixed size CLA" mode. When in effect,
``globalDecay`` is not applicable and must be set to 0 and ``maxAge``
must be set to 0. When this is used (> 0), ``maxSynapsesPerSegment``
must also be > 0.
:param maxSynapsesPerSegment: (int) The maximum number of synapses allowed in
a segment. This is used to turn on "fixed size CLA" mode. When in
effect, ``globalDecay`` is not applicable and must be set to 0, and
``maxAge`` must be set to 0. When this is used (> 0),
``maxSegmentsPerCell`` must also be > 0.
:param outputType: (string) Can be one of the following (default ``normal``):
- ``normal``: output the OR of the active and predicted state.
- ``activeState``: output only the active state.
- ``activeState1CellPerCol``: output only the active state, and at most
1 cell/column. If more than 1 cell is active in a column, the one
with the highest confidence is sent up.
"""
def __init__(self,
numberOfCols=500,
cellsPerColumn=10,
initialPerm=0.11,
connectedPerm=0.50,
minThreshold=8,
newSynapseCount=15,
permanenceInc=0.10,
permanenceDec=0.10,
permanenceMax=1.0,
globalDecay=0.10,
activationThreshold=12,
doPooling=False,
segUpdateValidDuration=5,
burnIn=2,
collectStats=False,
seed=42,
verbosity=VERBOSITY,
checkSynapseConsistency=False, # for cpp only -- ignored
pamLength=1,
m
|
arraystream/simpleplotly
|
tests/test_figure.py
|
Python
|
mit
| 1,366
| 0.001464
|
import unittest
import plotly.graph_objs as go
import weplot as wp
class FigureHolderTest(unittest.TestCase):
def test_can_update_layout(self):
fh = wp.FigureHolder(go.Figure())
layout = fh.figure.layout
self.assertIsNone(layout.barmode)
self.assertIsNone(layout.title)
fh.update_layout(barmode='group', title='test plot')
# self.assertDictEqual(fh.figure.layout._props, dict(barmode='group', title='test plot'))
self.assertEqual(layout.barmode, 'group')
|
self.assertEqual(layout.title, 'test plot')
fh.drop_layout_key('barmode')
self.assertIsNone(layout.barmode)
self.assertEqual(layout.title, 'test plot')
def test_update_layout_with_invalid_property_raises(self):
fh = wp.FigureHolder(go.Figure())
layout = fh.figure.layout
self.assertIsNone(layout.barmode)
self.assertIsNone(layout.title)
self.assertRai
|
ses(ValueError, fh.update_layout, invalid_property1='any value')
fh.update_layout(barmode='group', title='test plot')
self.assertEqual(layout.barmode, 'group')
self.assertEqual(layout.title, 'test plot')
self.assertRaises(ValueError, fh.drop_layout_key, 'invalid_property2')
class FigureBuilderTest(unittest.TestCase):
def test_figure_builder_operations(self):
pass
|
mozts2005/OuterSpace
|
client-pygame/lib/pygameui/Fonts.py
|
Python
|
gpl-2.0
| 2,420
| 0.022727
|
#
# Copyright 2001 - 2006 Ludek Smid [http://www.ospace.net/]
#
# This file is part of Pygame.UI.
#
# Pygame.UI is free software; you can redistribute
|
it and/or modify
# it under the terms of the Lesser GNU General Public License as published by
#
|
the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# Pygame.UI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with Pygame.UI; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from pygame.font import Font
from types import StringType, UnicodeType
__all__ = ['initFont', 'renderText', 'getTextSize', 'getLineSize']
fontFaces = {}
fontCache = {}
misses = 0
hits = 0
def initFont(name, ttfFile, size, bold = 0, italic = 0, underline = 0):
global fontFaces
if name in fontFaces:
del fontFaces[name]
font = Font(ttfFile, size)
font.set_bold(bold)
font.set_italic(italic)
font.set_underline(underline)
fontFaces[name] = font
def renderText(name, text, antialias, fg, bg = None):
antialias = 1
tType = type(text)
if tType != StringType and tType != UnicodeType:
text = str(text)
if len(text) == 0:
# TODO return very small surface
text = " "
#@print "->", text, "<-", type(text)
global misses, hits, fontCache
surface = fontCache.get((name, text, antialias, fg, bg), None)
if not surface:
misses += 1
if bg:
surface = fontFaces[name].render(text, antialias, fg, bg)
else:
surface = fontFaces[name].render(text, antialias, fg)
fontCache[name, text, antialias, fg, bg] = surface
else:
hits += 1
# clean up cache if size is > 2000
if misses > 2000:
print 'FONT CACHE STATS:', misses, hits, hits / float(misses + hits)
misses = 0
fontCache.clear()
return surface
def renderSmartText(surface, x, y, name, text, antialias, fg, bg = None):
# TODO
pass
def getTextSize(name, text):
#return renderText(name, text, 1, (0x00, 0x00, 0x00)).get_size()
return fontFaces[name].size(text)
def getLineSize(name):
return fontFaces[name].get_linesize()
|
miraculixx/pyrules
|
pyrules/rules.py
|
Python
|
mit
| 6,453
| 0.003409
|
import json
import yaml
from .conditions import Lo
|
gicEvaluator
from .dictobj import DictObject
from .language import Translator
class Rule(object):
"""
Base rule
"""
name = None
def should_trigger(self, context):
return True
def perform(self, context):
raise NotImplementedError
def record(self, context, resu
|
lt):
context._executed.append((self.ruleid, result))
@property
def ruleid(self):
return self.name or self.__class__.__name__.rsplit('.', 1)[-1]
class ConditionalRule(Rule):
"""
ConditionalRule defines receives two functions as parameters: condition and
action.
@param condition: lambda context: <some condition returning True or False>
@param action: lambda context: <return a dict to update the context with>
Example:
>>> rule = ConditionalRule(
... condition=lambda context: True,
... action=lambda context: {'result': 5})
"""
def __init__(self, condition=None, action=None):
self._condition = condition
self._action = action
def condition(self, context):
"""
Condition for executing this rule.
Override in subclasses if necessary. Should return boolean value that
determines if rule is used.
"""
return self._condition(self, context)
def action(self, context):
"""
Action for executing this rule.
Override in subclasses if necessary. Should return dictionary with
results that will be added to context.
"""
return self._action(self, context)
def should_trigger(self, context):
return self.condition(context)
def perform(self, context):
result = self.action(context)
context.update(result)
return result
class TableRule(Rule):
"""
A table rule is created from a list of dict objects of the following format:
[
{
'if' : {'logic': '1 | 2', 'conditions': ['foo', {'bar': 10}]},
'then' : ['action1', ...],
'target' : ['target1', ...]
},
...
]
Each rule is only executed if all conditions are met. In actions, use
'context.' to reference variables. Targets and conditions implicitly reference
'context.' (target 'xy' means 'context.xy'). Logic can be omitted, which
would imply "&" operation for all conditions. Condition can be a dictionary or
a single value, so 'value' is equivalent to {'value': True}
The result of the nth 'then' action is stored in the nth 'context.variable'
as defined in target.
"""
def __init__(self, rules, name=None):
self.rules = self._load_data({'rules': rules})
if name:
self.name = name
self._current_ruleid = None
self._evaluators = []
for rule in self.rules:
evaluator = LogicEvaluator(
rule['if'].get('logic'), rule['if']['conditions'])
self._evaluators.append(evaluator)
def perform(self, context):
count = 0
for evaluator, rule in zip(self._evaluators, self.rules):
if evaluator.evaluate(context):
count = count + 1
self._current_ruleid = rule.get('rule') or count
for action, target in zip(rule['then'], rule['target']):
result = \
context[target.replace('context.', '').strip()] = (
eval(action, {'context': context})
if isinstance(action, basestring)
else action)
self.record(context, result)
else:
continue
else:
self._current_ruleid = None
return True
return False
@property
def ruleid(self):
if self._current_ruleid:
return "%s.%s" % (super(TableRule, self).ruleid, self._current_ruleid)
return super(TableRule, self).ruleid
@classmethod
def from_yaml(cls, text):
return cls._from_data(yaml.load(text))
@classmethod
def from_json(cls, text):
return cls._from_data(json.loads(text))
@classmethod
def _from_data(cls, data):
rules = cls._load_data(data)
return cls(rules, name=data.get('ruleset'))
@staticmethod
def _load_data(data):
"""
Rules data is preprocessed here i.e. to convert from brief rule
definitions to detailed format.
"""
# We have to convert non-string data in clauses back to strings,
# because they will be eval-ed
rules = []
for rule in data['rules']:
obj = {
'rule': rule.get('rule'),
'then': rule['then'],
'target': rule['target']}
if_clause = {}
# Convert conditions to dictionaries, i.e. "foo" becomes {"foo": True}
if isinstance(rule['if'], list):
logic = None
conditions = rule['if']
else:
conditions = rule['if'].get('conditions', [])
# Get logic string. If it's not specified, generate string like
# "1 & 2 & .. & N", where N is number of conditions.
logic = rule['if'].get('logic')
obj['if'] = {'logic': logic, 'conditions': conditions}
rules.append(obj)
return rules
class SequencedRuleset(Rule):
"""
A set of Rules, guaranteed to run in sequence
"""
def __init__(self, rules):
self.rules = rules or []
def should_trigger(self, context):
return True
def perform(self, context):
for rule in self.rules:
if rule.should_trigger(context):
result = rule.perform(context)
rule.record(context, result)
return True
class NaturalLanguageRule(TableRule):
"""
A natural language rule given as a text.
TODO implement this
"""
def __init__(self, translations):
if translations:
translator = Translator(translations)
for rule in self.rules:
for key in rule.keys():
rule[key] = [translator.replace(item) for item in rule[key]]
translator = Translator(translations)
def should_trigger(self, context):
pass
|
antoinecarme/sklearn2sql_heroku
|
tests/classification/BinaryClass_100/ws_BinaryClass_100_SVC_sigmoid_sqlite_code_gen.py
|
Python
|
bsd-3-clause
| 142
| 0.014085
|
from skl
|
earn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("SVC_sigmoid" , "BinaryClass_100" , "
|
sqlite")
|
doozr/euler.py
|
p0020_factorial_digit_sum_test.py
|
Python
|
gpl-3.0
| 438
| 0
|
"""
n! means n x (n - 1) x ... x 3 x 2 x 1
For example, 10! = 10 x 9 x ... x 3 x 2 x 1 = 3628800,
and the sum of the digits in t
|
he number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27.
Find the sum of the digits in the number 100!
Answer:
648
"""
from math import factorial
def factorial_digit_sum(x):
return sum(int(d) for d in str(factorial(x)))
def test_0020_fact
|
orial_digit_sum():
assert factorial_digit_sum(100) == 648
|
stackmachine/bearweb
|
core/templatetags/core_extras.py
|
Python
|
mit
| 1,258
| 0
|
import hashlib
from django import template
from django.core import urlresolvers
register = template.Library()
@register.simple_tag(takes_context=True)
def gravatar(context, user):
email_hash = hashlib.md5(user.email.lower()).hexdigest()
return 'https://www.gravatar.com/avatar/{}?s=30&d=retro'.format(email_hash)
@register.simple_tag(takes_context=True)
def active(context, url_name, return_value=' active', **kwargs):
matches = current_url_equals(context, url_name, **kwargs)
return return_value if matches else ''
def current_url_equals(context, url_name, **kwargs):
namespace = ""
if ":" in url_name:
namespace, url_name = url_name.split(":")
resolved = False
if context.get('request') is None:
return False
try:
resolved = urlresolvers.resolve(context.get('request').path)
except urlresolvers.Resolver404:
pass
matches = resolved and
|
resolved.url_name == url_name \
and resolved.namespace == namespace
if matches and kwargs:
for key in kwargs:
kwarg = kwargs.get(key)
resolved_kwarg = resolved.kwargs.g
|
et(key)
if not resolved_kwarg or kwarg != resolved_kwarg:
return False
return matches
|
bnsantos/python-junk-code
|
tests/graphs/complementGraphTest.py
|
Python
|
gpl-2.0
| 2,970
| 0.001684
|
__author__ = 'bruno'
import unittest
import algorithms.graphs.complementGraph as ComplementGraph
class TestComplementGraph(unittest.TestCase):
def setUp(self):
pass
def test_complement_graph_1(self):
graph = {'a': {'b': 1, 'c': 1},
'b': {'a': 1, 'c': 1, 'd': 1},
'c': {'a': 1, 'b': 1, 'd': 1},
'd': {'b': 1, 'c': 1}}
self.assertEqual({'a': {'d': 1}, 'd': {'a': 1}},
ComplementGraph.make_complement_graph(graph))
def test_complement_graph_2(self):
graph = {'a': {'b': 1, 'd': 1},
'b': {'a': 1, 'c': 1},
'c': {'b': 1, 'd': 1},
'd': {'a': 1, 'c': 1}}
complement = {'a': {'c': 1},
'b': {'d': 1},
'c': {'a': 1},
'd': {'b': 1}}
self.assertEqual(complement, ComplementGraph.make_complement_graph(graph))
def test_complement_graph_3(self):
graph = {'a': {'c': 1, 'd': 1},
'b': {'c': 1, 'd': 1},
'c': {'a': 1, 'b': 1},
'd': {'a': 1, 'b': 1, 'e': 1, 'f': 1},
'e': {'d': 1, 'f': 1},
'f': {'d': 1, 'e': 1}}
complement = {'a': {'b': 1, 'e': 1, 'f': 1},
'b
|
': {'a': 1, 'e': 1, 'f': 1},
'c': {'e': 1, 'd': 1, 'f': 1},
'd': {'c': 1},
'e': {'a': 1, 'c': 1, 'b': 1},
'f': {'a': 1, 'c': 1, 'b': 1}}
self.assertEqual(complement, ComplementGraph.make_complement_graph(graph))
def test
|
_complement_graph_4(self):
graph = {'a': {'b': 1, 'f': 1},
'b': {'a': 1, 'c': 1},
'c': {'b': 1, 'd': 1},
'd': {'c': 1, 'e': 1},
'e': {'d': 1, 'f': 1},
'f': {'a': 1, 'e': 1}}
complement = {'a': {'c': 1, 'e': 1, 'd': 1},
'b': {'e': 1, 'd': 1, 'f': 1},
'c': {'a': 1, 'e': 1, 'f': 1},
'd': {'a': 1, 'b': 1, 'f': 1},
'e': {'a': 1, 'c': 1, 'b': 1},
'f': {'c': 1, 'b': 1, 'd': 1}}
self.assertEqual(complement, ComplementGraph.make_complement_graph(graph))
def test_complement_graph_5(self):
graph = {'a': {'b': 1, 'c': 1, 'd': 1, 'e': 1},
'b': {'a': 1, 'c': 1, 'd': 1, 'e': 1},
'c': {'a': 1, 'b': 1, 'd': 1, 'e': 1},
'd': {'a': 1, 'b': 1, 'c': 1, 'e': 1},
'e': {'a': 1, 'b': 1, 'c': 1, 'd': 1, 'f': 1},
'f': {'e': 1}}
complement = {'a': {'f': 1},
'b': {'f': 1},
'c': {'f': 1},
'd': {'f': 1},
'f': {'a': 1, 'c': 1, 'b': 1, 'd': 1}}
self.assertEqual(complement, ComplementGraph.make_complement_graph(graph))
|
IGITUGraz/scoop
|
examples/dependency/sortingnetwork.py
|
Python
|
lgpl-3.0
| 4,724
| 0.004234
|
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
try:
from itertools import product
except ImportError:
def product(*args, **kwds):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
class SortingNetwork(list):
"""Sorting network class.
From Wikipedia : A sorting network is an abstract mathematical model
of a network of wires and comparator mod
|
ules that is used to sort a
sequence of numbers. Each comparator connects two wires and sort the
values by outputting the smaller value to one wire, and a larger
value to the other.
"""
def __init__(self, dimension, connectors = []):
self.dimension = dimension
for wire1, wire2 in connectors:
self.addConnector(wire1, wire2)
def addConn
|
ector(self, wire1, wire2):
"""Add a connector between wire1 and wire2 in the network."""
if wire1 == wire2:
return
if wire1 > wire2:
wire1, wire2 = wire2, wire1
try:
last_level = self[-1]
except IndexError:
# Empty network, create new level and connector
self.append([(wire1, wire2)])
return
for wires in last_level:
if wires[1] >= wire1 and wires[0] <= wire2:
self.append([(wire1, wire2)])
return
last_level.append((wire1, wire2))
def sort(self, values):
"""Sort the values in-place based on the connectors in the network."""
for level in self:
for wire1, wire2 in level:
if values[wire1] > values[wire2]:
values[wire1], values[wire2] = values[wire2], values[wire1]
def assess(self, cases=None):
"""Try to sort the **cases** using the network, return the number of
misses. If **cases** is None, test all possible cases according to
the network dimensionality.
"""
if cases is None:
cases = product(range(2), repeat=self.dimension)
misses = 0
ordered = [[0]*(self.dimension-i) + [1]*i for i in range(self.dimension+1)]
for sequence in cases:
sequence = list(sequence)
self.sort(sequence)
misses += (sequence != ordered[sum(sequence)])
return misses
def draw(self):
"""Return an ASCII representation of the network."""
str_wires = [["-"]*7 * self.depth]
str_wires[0][0] = "0"
str_wires[0][1] = " o"
str_spaces = []
for i in range(1, self.dimension):
str_wires.append(["-"]*7 * self.depth)
str_spaces.append([" "]*7 * self.depth)
str_wires[i][0] = str(i)
str_wires[i][1] = " o"
for index, level in enumerate(self):
for wire1, wire2 in level:
str_wires[wire1][(index+1)*6] = "x"
str_wires[wire2][(index+1)*6] = "x"
for i in range(wire1, wire2):
str_spaces[i][(index+1)*6+1] = "|"
for i in range(wire1+1, wire2):
str_wires[i][(index+1)*6] = "|"
network_draw = "".join(str_wires[0])
for line, space in zip(str_wires[1:], str_spaces):
network_draw += "\n"
network_draw += "".join(space)
network_draw += "\n"
network_draw += "".join(line)
return network_draw
@property
def depth(self):
"""Return the number of parallel steps that it takes to sort any input.
"""
return len(self)
@property
def length(self):
"""Return the number of comparison-swap used."""
return sum(len(level) for level in self)
|
ViktorZharina/BestBankExchange
|
src/config/settings.py
|
Python
|
gpl-2.0
| 427
| 0.017544
|
#!/usr/bin/p
|
ython
# -*- coding: utf-8 -*-
# @author Viktor Zharina <viktorz1986@gmail.com>
# CC-BY-SA License
settings = {
'currencys' : ['usd', 'eur'],
'operations' : ['buy', 'sell'],
}
lang = {
'ru': {
'usd_buy' : u'USD покупка',
'usd_sell' : u'USD продажа',
'eur_buy' : u'EUR покупка',
'eur_sell' : u'EUR продажа
|
'
}
}
|
liamw9534/mopidy
|
mopidy/http/handlers.py
|
Python
|
apache-2.0
| 5,682
| 0
|
from __future__ import unicode_literals
import logging
import os
import socket
import tornado.escape
import tornado.web
import tornado.websocket
import mopidy
from mopidy import core, models
from mopidy.utils import jsonrpc
logger = logging.getLogger(__name__)
def make_mopidy_app_factory(apps, statics):
def mopidy_app_factory(config, core):
return [
(r'/ws/?', WebSocketHandler, {
'core': core,
}),
(r'/rpc', JsonRpcHandler, {
'core': core,
}),
(r'/(.+)', StaticFileHandler, {
'path': os.path.join(os.path.dirname(__file__), 'data'),
}),
(r'/', ClientListHandler, {
'apps': apps,
'statics': statics,
}),
]
return mopidy_app_factory
def make_jsonrpc_wrapper(core_actor):
inspector = jsonrpc.JsonRpcInspector(
objects={
'core.get_uri_schemes': core.Core.get_uri_schemes,
'core.get_version': core.Core.get_version,
'core.library': core.LibraryController,
'core.playback': core.PlaybackController,
'core.playlists': core.PlaylistsController,
'core.tracklist': core.TracklistController,
})
return jsonrpc.JsonRpcWrapper(
objects={
'core.describe': inspector.describe,
'core.get_uri_schemes': core_actor.get_uri_schemes,
'core.get_version': core_actor.get_version,
'core.library': core_actor.library,
'core.playback': core_actor.playback,
'core.playlists': core_actor.playlists,
'core.tracklist': core_actor.tracklist,
},
decoders=[models.model_json_decoder],
encoders=[models.ModelJSONEncoder]
)
class WebSocketHandler(tornado.websocket.WebSocketHandler):
# XXX This set is shared by all WebSocketHandler objects. This isn't
# optimal, but there's currently no use case for having more than one of
# these anyway.
|
clients = set()
@classmethod
def broadcast(cls, msg):
for client in cls.clients:
client.write_message(msg)
def
|
initialize(self, core):
self.jsonrpc = make_jsonrpc_wrapper(core)
def open(self):
if hasattr(self, 'set_nodelay'):
# New in Tornado 3.1
self.set_nodelay(True)
else:
self.stream.socket.setsockopt(
socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.clients.add(self)
logger.debug(
'New WebSocket connection from %s', self.request.remote_ip)
def on_close(self):
self.clients.discard(self)
logger.debug(
'Closed WebSocket connection from %s',
self.request.remote_ip)
def on_message(self, message):
if not message:
return
logger.debug(
'Received WebSocket message from %s: %r',
self.request.remote_ip, message)
try:
response = self.jsonrpc.handle_json(
tornado.escape.native_str(message))
if response and self.write_message(response):
logger.debug(
'Sent WebSocket message to %s: %r',
self.request.remote_ip, response)
except Exception as e:
logger.error('WebSocket request error: %s', e)
self.close()
def check_origin(self, origin):
# Allow cross-origin WebSocket connections, like Tornado before 4.0
# defaulted to.
return True
def set_mopidy_headers(request_handler):
request_handler.set_header('Cache-Control', 'no-cache')
request_handler.set_header(
'X-Mopidy-Version', mopidy.__version__.encode('utf-8'))
class JsonRpcHandler(tornado.web.RequestHandler):
def initialize(self, core):
self.jsonrpc = make_jsonrpc_wrapper(core)
def head(self):
self.set_extra_headers()
self.finish()
def post(self):
data = self.request.body
if not data:
return
logger.debug(
'Received RPC message from %s: %r', self.request.remote_ip, data)
try:
self.set_extra_headers()
response = self.jsonrpc.handle_json(
tornado.escape.native_str(data))
if response and self.write(response):
logger.debug(
'Sent RPC message to %s: %r',
self.request.remote_ip, response)
except Exception as e:
logger.error('HTTP JSON-RPC request error: %s', e)
self.write_error(500)
def set_extra_headers(self):
set_mopidy_headers(self)
self.set_header('Accept', 'application/json')
self.set_header('Content-Type', 'application/json; utf-8')
class ClientListHandler(tornado.web.RequestHandler):
def initialize(self, apps, statics):
self.apps = apps
self.statics = statics
def get_template_path(self):
return os.path.dirname(__file__)
def get(self):
set_mopidy_headers(self)
names = set()
for app in self.apps:
names.add(app['name'])
for static in self.statics:
names.add(static['name'])
names.discard('mopidy')
self.render('data/clients.html', apps=sorted(list(names)))
class StaticFileHandler(tornado.web.StaticFileHandler):
def set_extra_headers(self, path):
set_mopidy_headers(self)
class AddSlashHandler(tornado.web.RequestHandler):
@tornado.web.addslash
def prepare(self):
return super(AddSlashHandler, self).prepare()
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247972723/samba/dcerpc/samr/RidWithAttribute.py
|
Python
|
gpl-2.0
| 1,272
| 0.007075
|
# encoding: utf-8
# module samba.dcerpc.samr
# from /usr/lib/python2.7/dist-packages/samba/dcerpc/samr.so
# by generator 1.135
""" samr DCE/RPC """
# imports
import dcerpc as __dcerpc
import talloc as __talloc
class RidWithAttribute(__talloc.Object):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __ndr_pack__(self, *args, **kwargs): # real signature unknown
"""
S.ndr_pack(object) -> blob
NDR pack
"""
pass
def __ndr_print__(self, *args, **kwargs): # real signature unknown
"""
S.ndr_print(object) -> None
NDR print
"""
pass
def __ndr_unpack__(self, *args, **kwargs): # real signature unknown
"""
S.ndr_unpack(class, blob, allow_remaining=False) -> None
NDR unpack
"""
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) ->
|
a new object with type S, a subtype of T """
pass
attributes = property(lambda self: ob
|
ject(), lambda self, v: None, lambda self: None) # default
rid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
|
souravbadami/oppia
|
core/domain/topic_domain.py
|
Python
|
apache-2.0
| 38,486
| 0.000208
|
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.]
"""Domain objects for topics, and related models."""
import copy
from constants import constants
from core.domain import change_domain
from core.domain import skill_services
from core.domain import user_services
from core.platform import models
import feconf
import utils
(topic_models,) = models.Registry.import_models([models.NAMES.topic])
CMD_CREATE_NEW = 'create_new'
CMD_CHANGE_ROLE = 'change_role'
CMD_REMOVE_MANAGER_ROLE = 'remove_manager_role'
CMD_PUBLISH_TOPIC = 'publish_topic'
CMD_UNPUBLISH_TOPIC = 'unpublish_topic'
ROLE_MANAGER = 'manager'
ROLE_NONE = 'none'
# Do not modify the values of these constants. This is to preserve backwards
# compatibility with previous change dicts.
TOPIC_PROPERTY_NAME = 'name'
TOPIC_PROPERTY_DESCRIPTION = 'description'
TOPIC_PROPERTY_CANONICAL_STORY_IDS = 'canonical_story_ids'
TOPIC_PROPERTY_ADDITIONAL_STORY_IDS = 'additional_story_ids'
TOPIC_PROPERTY_LANGUAGE_CODE = 'language_code'
SUBTOPIC_PROPERTY_TITLE = 'title'
CMD_ADD_SUBTOPIC = 'add_subtopic'
CMD_DELETE_SUBTOPIC = 'delete_subtopic'
CMD_ADD_UNCATEGORIZED_SKILL_ID = 'add_uncategorized_skill_id'
CMD_REMOVE_UNCATEGORIZED_SKILL_ID = 'remove_uncategorized_skill_id'
CMD_MOVE_SKILL_ID_TO_SUBTOPIC = 'move_skill_id_to_subtopic'
CMD_REMOVE_SKILL_ID_FROM_SUBTOPIC = 'remove_skill_id_from_subtopic'
# These take additional 'property_name' and 'new_value' parameters and,
# optionally, 'old_value'.
CMD_UPDATE_TOPIC_PROPERTY = 'update_topic_property'
CMD_UPDATE_SUBTOPIC_PROPERTY = 'update_subtopic_property'
CMD_MIGRATE_SUBTOPIC_SCHEMA_TO_LATEST_VERSION = 'migrate_subtopic_schema_to_latest_version' # pylint: disable=line-too-long
class TopicChange(change_domain.BaseChange):
"""Domain object for changes made to topic object.
The allowed commands, together with the attributes:
- 'add_subtopic' (with title, subtopic_id)
- 'delete_subtopic' (with subtopic_id)
- 'add_uncategorized_skill_id' (with
new_uncategorized_skill_id)
- 'remove_uncategorized_skill_id' (with uncategorized_skill_id)
- 'move_skill_id_to_subtopic' (with old_subtopic_id,
new_subtopic_id and skill_id)
- 'remove_skill_id_from_subtopic' (with subtopic_id and
skill_id)
- 'update_topic_property' (with property_name, new_value
and old_value)
- 'update_subtopic_property' (with subtopic_id, property_name,
new_value and old_value)
- 'migrate_subtopic_schema_to_latest_version' (with
from_version and to_version)
- 'create_new' (with name)
"""
# The allowed list of topic properties which can be used in
# update_topic_property command.
TOPIC_PROPERTIES = (
TOPIC_PROPERTY_NAME, TOPIC_PROPERTY_DESCRIPTION,
TOPIC_PROPERTY_CANONICAL_STORY_IDS, TOPIC_PROPERTY_ADDITIONAL_STORY_IDS,
TOPIC_PROPERTY_LANGUAGE_CODE)
# The allowed list of subtopic properties which can be used in
# update_subtopic_property command.
SUBTOPIC_PROPERTIES = (SUBTOPIC_PROPERTY_TITLE,)
ALLOWED_COMMANDS = [{
'name': CMD_CREATE_NEW,
'required_attribute_names': ['name'],
'optional_attribute_names': []
}, {
'name': CMD_ADD_SUBTOPIC,
'required_attribute_names': ['title', 'subtopic_id'],
'optional_attribute_names': []
}, {
'name': CMD_DELETE_SUBTOPIC,
'required_attribute_names': ['subtopic_id'],
'optional_attribute_names': []
}, {
'name': CMD_ADD_UNCATEGORIZED_SKILL_ID,
'required_attribute_names': ['new_uncategorized_skill_id'],
'optional_attribute_names': []
}, {
'name': CMD_REMOVE_UNCATEGORIZED_SKILL_ID,
'required_attribute_names': ['uncategorized_skill_id'],
'optional_attribute_names': [],
}, {
'name': CMD_MOVE_SKILL_ID_TO_SUBTOPIC,
'required_attribute_names': [
'old_subtopic_id', 'new_subtopic_id', 'skill_id'],
'optional_attribute_names': [],
}, {
'name': CMD_REMOVE_SKILL_ID_FROM_SUBTOPIC,
'required_attribute_names': ['subtopic_id', 'skill_id'],
'optional_attribute_names': [],
}, {
'name': CMD_UPDATE_SUBTOPIC_PROPERTY,
'required_attribute_names': [
'subtopic_id', 'property_name', 'new_value', 'old_value'],
'optional_attribute_names': [],
'allowed_values': {'property_name': SUBTOPIC_PROPERTIES}
}, {
'name': CMD_UPDATE_TOPIC_PROPERTY,
'required_attribute_names': ['prop
|
erty_name', 'new_value', 'old_value'],
'optional_attribute_names': [],
'allowed_values': {'property_name': TOPIC_PROPERTIES}
}, {
'name': CMD_MIGRATE_SUBT
|
OPIC_SCHEMA_TO_LATEST_VERSION,
'required_attribute_names': ['from_version', 'to_version'],
'optional_attribute_names': []
}]
class TopicRightsChange(change_domain.BaseChange):
"""Domain object for changes made to a topic rights object.
The allowed commands, together with the attributes:
- 'change_role' (with assignee_id, new_role and old_role)
- 'create_new'
- 'publish_story'
- 'unpublish_story'.
"""
# The allowed list of roles which can be used in change_role command.
ALLOWED_ROLES = [ROLE_NONE, ROLE_MANAGER]
ALLOWED_COMMANDS = [{
'name': CMD_CREATE_NEW,
'required_attribute_names': [],
'optional_attribute_names': []
}, {
'name': CMD_CHANGE_ROLE,
'required_attribute_names': ['assignee_id', 'new_role', 'old_role'],
'optional_attribute_names': [],
'allowed_values': {'new_role': ALLOWED_ROLES, 'old_role': ALLOWED_ROLES}
}, {
'name': CMD_REMOVE_MANAGER_ROLE,
'required_attribute_names': ['removed_user_id'],
'optional_attribute_names': []
}, {
'name': CMD_PUBLISH_TOPIC,
'required_attribute_names': [],
'optional_attribute_names': []
}, {
'name': CMD_UNPUBLISH_TOPIC,
'required_attribute_names': [],
'optional_attribute_names': []
}]
class Subtopic(object):
"""Domain object for a Subtopic."""
def __init__(self, subtopic_id, title, skill_ids):
"""Constructs a Subtopic domain object.
Args:
subtopic_id: int. The number of the subtopic.
title: str. The title of the subtopic.
skill_ids: list(str). The list of skill ids that are part of this
subtopic.
"""
self.id = subtopic_id
self.title = title
self.skill_ids = skill_ids
def to_dict(self):
"""Returns a dict representing this Subtopic domain object.
Returns:
A dict, mapping all fields of Subtopic instance.
"""
return {
'id': self.id,
'title': self.title,
'skill_ids': self.skill_ids
}
@classmethod
def from_dict(cls, subtopic_dict):
"""Returns a Subtopic domain object from a dict.
Args:
subtopic_dict: dict. The dict representation of Subtopic object.
Returns:
Subtopic. The corresponding Subtopic domain object.
"""
subtopic = cls(
subtopic_dict['id'], subtopic_dict['title'],
subtopic_dict['skill_ids'])
return subtopic
@classmethod
def create_default_subtopic(cls, subtopic_id, title):
"""Creates a Subtopic object with default values.
Args:
subtopic_id: str. ID of the new subtopic.
title: str. The title for
|
sourcelair/castor
|
castor/docker_servers/models.py
|
Python
|
mit
| 1,063
| 0
|
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
import docker
@python_2_unicode_compatible
class DockerServer(models.Model):
name = models.CharField(max_length=255, unique=True)
version = models.CharField(max_length=255, default='auto')
docker_host = models.CharField(max_length=255)
docker_tls_verify = models.BooleanField(default=True)
docker_cert_path = models.CharField(max_length=255, null=True, blank=True)
def get_env(self):
env = {
'DOCKER_HOST': self.docker_host
}
if self.docker_t
|
ls_verify:
env['DOCKER_TLS_VERIFY'] = self.docker_tls_verify
if self.docker_cert_path:
env['DOCKER_CERT_PATH'] = self.docker_cert_path
return env
def get_client(self):
client = docker.from_env(
version=self.version,
environment=self.get_
|
env()
)
return client
def __str__(self):
return 'Docker Server: %s' % self.name
|
leejz/meta-omics-scripts
|
query_ncbi_lineage_from_mgrast_md5.py
|
Python
|
mit
| 8,172
| 0.009055
|
#!/usr/bin/env python
"""
--------------------------------------------------------------------------------
Created: Jackson Lee 11/4/14
This script reads in a tab delimited file of annotations and queries
asynchronously the MGRAST REST API to parse back the original ncbi tax_id entry.
This script then uses the ncbi tax_id to query the taxonomic lineage.
Input file format:
query sequence id hit m5nr id (md5sum) percentage identity alignment length, number of mismatches number of gap openings query start query end hit start hit end e-value bit score semicolon separated list of annotations
mgm4581121.3|contig-1350000035_45_1_2592_+ 0000679ceb3fc9c950779468e06329a7 61.03 136 53 654 789 366 501 6.80E-44 175 hyalin repeat protein
mgm4581121.3|contig-18000183_1_1_2226_+ 0000679ceb3fc9c950779468e06329a7 64.44 45 16 525 569 457 501 1.70E-08 57 hyalin repeat protein
['Download complete. 78538 rows retrieved']
MGRAST REST API:
http://api.metagenomics.anl.gov/m5nr/md5/<M5nr MD5 hash>?source=GenBank
e.g. http://api.metagenomics.anl.gov/m5nr/md5/000821a2e2f63df1a3873e4b280002a8?source=GenBank
resources:
http://api.metagenomics.anl.gov/api.html#m5nr
http://angus.readthedocs.org/en/2014/howe-mgrast.html
Returns:
{"next":null,"prev":null,"version":"10","url":"http://api.metagenomics.anl.gov//m5nr/md5/000821a2e2f63df1a3873e4b280002a8?source=GenBank&offset=0","data":[{"source":"GenBank","function":"sulfatase","ncbi_tax_id":399741,"accession":"ABV39241.1","type":"protein","organism":"Serratia proteamaculans 568","md5":"000821a2e2f63df1a3873e4b280002a8","alias":["GI:157320144"]}],"limit":10,"total_count":1,"offset":0}
This output will then be stored in a buffer and queried for the exact id and grab the xml based lineage
http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=taxonomy&id=399741
<TaxaSet><Taxon><TaxId>399741</TaxId><ScientificName>Serratia proteamaculans 568</ScientificName><OtherNames><EquivalentName>Serratia proteamaculans str. 568</EquivalentName><EquivalentName>Serratia proteamaculans strain 568</EquivalentName></OtherNames><ParentTaxId>28151</ParentTaxId><Rank>no rank</Rank><Division>Bacteria</Division><GeneticCode><GCId>11</GCId><GCName>Bacterial, Archaeal and Plant Plastid</GCName></GeneticCode><MitoGeneticCode><MGCId>0</MGCId><MGCName>Unspecified</MGCName></MitoGeneticCode><Lineage>cellular organisms; Bacteria; Proteobacteria; Gammaproteobacteria; Enterobacteriales; Enterobacteriaceae; Serratia; Serratia proteamaculans</Lineage>
Output file format:
A tab delimited file of
contig-faa-name\tlineage
an error.log of mismatches from both MGRAST and NCBI is also generated
--------------------------------------------------------------------------------
usage: query_ncbi_lineage_from_mgrast_md5.py -i mgrast_organism.txt -o output.file
"""
#-------------------------------------------------------------------------------
#
##http thread pool from: http://stackoverflow.com/questions/2632520/what-is-the-fastest-way-to-send-100-000-http-requests-in-python
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#Header - Linkers, Libs, Constants
from string import strip
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from threading import Thread
import sys
import time
import requests
import json
from Queue import Queue
#-------------------------------------------------------------------------------
#function declarations
def doWork():
while not exitapp:
id, name, mgrast_urlstring = q.get()
if id % 100 == 0:
print 'Query: HTTP Thread: ' + str(id) + ' started.'
try:
mgrast_response = requests.get(url=mgrast_urlstring, timeout=10)
if mgrast_response.status_code == 200:
json_data = json.loads(mgrast_response.text)
if json_data['data']!= []:
if 'ncbi_tax_id' in json_data['data'][0]:
eutils_urlstring = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=taxonomy&id=' + str(json_data['data'][0]['ncbi_tax_id'])
eutils_response = requests.get(url=eutils_urlstring, timeout=10)
if eutils_response.status_code == 200:
if '<Lineage>' in eutils_response.text:
output_dict[name] = eutils_response.text.split('Lineage>')[1][0:-2]
else:
output_dict[name] = 'No NCBI'
else:
print 'HTTP error, Thread: ' + str(id) + ' in eutils worker with error: ' + eutils_response.reason
logfile.write(str(id) + '\t' + urlstring + '\t' + eutils_response.reason + '\n')
raise
else:
output_dict[name] = 'No MGRAST tax ID'
else:
output_dict[name] = 'No MGRAST source data'
else:
print 'HTTP error, Thread: ' + str(id) + ' in MG-RAST worker with error: ' + mgrast_response.reason
logfile.write(str(id) + '\t' + urlstring + '\t' + mgrast_response.reason + '\n')
raise
except:
print 'Thread: ' + str(id) + '. Error. '
print sys.exc_info()[0]
q.task_done()
#-------------------------------------------------------------------------------
#Body
print "Running..."
if __name__ == '__main__':
parser = ArgumentParser(usage = "query_ncbi_lineage_from_mgrast_md5.py -i \
mgrast_organism.txt -o output.file",
description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-i", "--input_file", action="store",
dest="inputfilename",
|
help="tab-delimited MGRAST organism file")
parser.add_argument("-o", "--output_filename", action="store",
dest="outputfilename",
help="tab-delimited output file")
|
options = parser.parse_args()
mandatories = ["outputfilename","inputfilename"]
for m in mandatories:
if not options.__dict__[m]:
print "\nError: Missing Arguments\n"
parser.print_help()
exit(-1)
inputfilename = options.inputfilename
outputfilename = options.outputfilename
infile_list = []
with open(inputfilename,'U') as infile:
infile_list = [line.strip().split('\t') for line in infile]
infile.close()
urlpool = []
name_list = []
for entry in infile_list[1:-1]:
contig_name = entry[0]
md5_hash = entry[1]
urlpool.append([contig_name, 'http://api.metagenomics.anl.gov/m5nr/md5/' + md5_hash + '?source=RefSeq'])
name_list.append(contig_name)
concurrent = 10
exitapp = False
output_dict = {}
print "Querying MGRAST REST API Service..."
with open('./' + outputfilename + '.errorlog.txt','w') as logfile:
q = Queue(concurrent * 2)
for i in range(concurrent):
t = Thread(target=doWork)
t.daemon = True
time.sleep(1)
t.start()
try:
for id, url_load in enumerate(urlpool):
q.put([id] + url_load)
q.join()
except KeyboardInterrupt:
exitapp = True
sys.exit(1)
logfile.close()
logfile.close()
print "Matching taxonomies and writing..."
with open(outputfilename, 'w') as outfile:
for name in name_list:
if name in output_dict:
outfile.write(name + '\t' + output_dict[name] + '\n')
else:
outfile.write(name + '\t' + 'None\n')
outfile.close()
print "Done!"
|
sassoftware/catalog-service
|
catalogService/rest/models/jobs.py
|
Python
|
apache-2.0
| 120
| 0
|
from rpath_job.models import job
|
Job = job.J
|
ob
Jobs = job.Jobs
ResultResource = job.ResultResource
System = job.System
|
keras-team/reservoir_nn
|
reservoir_nn/models/nlp_rnn_test.py
|
Python
|
apache-2.0
| 2,126
| 0.002352
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for nlp_rnn."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from reservoir_nn.models import nlp_rnn
LAYER_NAMES = frozenset(["DenseReservoirRNNTemporal", "RNN", "LSTM"])
class NlpRnnTest(parameterized.TestCase):
@parameterized.parameters(LAYER_NAMES)
def test_lstm_language_model_outputs_correct_shape(self, model_name):
weights = np.ones((42, 42))
batch_size = 5
vocab_size = 50
maxlen = 20
embed_dim = 10
model = nlp_rnn.recurrent_reservoir_language_model(
layer_name=model_name,
reservoir_weight=we
|
ights,
embed_dim=embed_dim,
vocab_size=vocab_size)
x_data = np.ones((batch_size, maxlen))
model.fit(x_data, x_data)
result = model.predict(x_data)
self.assertEqual(result.shape, (batch_size, maxlen, vocab_size))
@parameterized.parameters(LAYER_NAMES)
def test_lstm_classifier_outputs_correct_shape(self, model_name):
weights = np.ones((42, 42))
|
num_classes = 2
batch_size = 5
vocab_size = 50
maxlen = 20
embed_dim = 10
model = nlp_rnn.recurrent_reservoir_nlp_classifier(
layer_name=model_name,
reservoir_weight=weights,
num_classes=num_classes,
embed_dim=embed_dim,
vocab_size=vocab_size)
x_data = np.ones((batch_size, maxlen))
y_data = np.ones(batch_size)
model.fit(x_data, y_data)
result = model(x_data)
self.assertEqual(result.shape, (batch_size, num_classes))
if __name__ == "__main__":
absltest.main()
|
skosukhin/spack
|
var/spack/repos/builtin.mock/packages/a/package.py
|
Python
|
lgpl-2.1
| 2,262
| 0.000442
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and L
|
ICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistrib
|
ute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class A(AutotoolsPackage):
"""Simple package with one optional dependency"""
homepage = "http://www.example.com"
url = "http://www.example.com/a-1.0.tar.gz"
version('1.0', '0123456789abcdef0123456789abcdef')
version('2.0', '2.0_a_hash')
variant(
'foo',
values=('bar', 'baz', 'fee'),
default='bar',
description='',
multi=True
)
variant(
'foobar',
values=('bar', 'baz', 'fee'),
default='bar',
description='',
multi=False
)
variant('bvv', default=True, description='The good old BV variant')
depends_on('b', when='foobar=bar')
def with_or_without_fee(self, activated):
if not activated:
return '--no-fee'
return '--fee-all-the-time'
def autoreconf(self, spec, prefix):
pass
def configure(self, spec, prefix):
pass
def build(self, spec, prefix):
pass
def install(self, spec, prefix):
pass
|
Josef-Friedrich/audiorename
|
_generate-readme.py
|
Python
|
mit
| 1,114
| 0
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
import os
def path(*path_segments):
return os.path.join(os.getcwd(), *path_segments)
def open_file(*path_segments):
file_path = path(*path_segments)
open(file_path, 'w').close()
return open(file_path, 'a')
header = open(path('README_header.rst'), 'r')
readme = open_file('README.rst')
sphinx = open_file('doc', 'source', 'cli.rst')
sphinx_header = (
'Comande line interface\n',
'======================\n'
|
,
'\n',
'.. code-block:: text\n',
'\n',
)
for line in sphinx_header:
sphinx.write(str(line))
footer = open(path('README_footer.rst'), 'r')
for line in header:
readme.write(line)
audiorenamer = subprocess.Popen('audiorenamer --help', shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
readme.write('\n')
for line in audiorenamer.stdout:
indented_line = ' ' + line.decode('utf-8')
readme.write(indented_line)
sph
|
inx.write(indented_line)
audiorenamer.wait()
for line in footer:
readme.write(line)
readme.close()
sphinx.close()
|
smartstudy/midauth
|
midauth/models/base.py
|
Python
|
mit
| 1,286
| 0.001555
|
# -*- coding: utf-8 -*-
import sqlalchemy.ext.declarative
from sqlalchemy import types
from sqlal
|
chemy.dialects import postgr
|
esql
import uuid
class Base(object):
pass
#:
Base = sqlalchemy.ext.declarative.declarative_base(cls=Base)
class GUID(types.TypeDecorator):
"""Platform-independent GUID type.
Uses Postgresql's UUID type, otherwise uses
CHAR(32), storing as stringified hex values.
.. seealso:: http://docs.sqlalchemy.org/en/rel_0_8/core/types.html#backend-agnostic-guid-type
"""
impl = types.CHAR
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(postgresql.UUID())
else:
return dialect.type_descriptor(types.CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'postgresql':
return str(value)
else:
if not isinstance(value, uuid.UUID):
return "%.32x" % uuid.UUID(value)
else:
# hexstring
return "%.32x" % value
def process_result_value(self, value, dialect):
if value is None:
return value
else:
return uuid.UUID(value)
|
AOSC-Dev/acbs
|
acbs/deps.py
|
Python
|
lgpl-2.1
| 4,219
| 0.001896
|
from collections import OrderedDict, defaultdict, deque
from typing import List, Dict, Deque
from acbs.find import find_package
from acbs.parser import ACBSPackageInfo, check_buildability
# package information cache
pool: Dict[str, ACBSPackageInfo] = {}
def tarjan_search(packages: 'OrderedDict[str, ACBSPackageInfo]', search_path: str) -> List[List[ACBSPackageInfo]]:
"""This function describes a Tarjan's strongly connected components algorithm.
The resulting list of ACBSPackageInfo are sorted topologically as a byproduct of the algorithm
"""
# Initialize state trackers
lowlink: Dict[str, int] = defaultdict(lambda: -1)
index: Dict[str, int] = defaultdict(lambda: -1)
stackstate: Dict[str, bool] = defaultdict(bool)
stack: Deque[str] = deque()
results: List[List[ACBSPackageInfo]] = []
packages_list: List[str] = [i for i in packages]
pool.update(packages)
for i in packages_list:
if index[i] == -1: # recurse on each package that is not yet visited
strongly_connected(search_path, packages_list, results, packages,
i, lowlink, index, stackstate, stack)
return results
def prepare_for_reorder(package: ACBSPackageInfo, packages_list: List[str]) -> ACBSPackageIn
|
fo:
"""This function prepares the package for reordering.
The idea is to move the installable dependencies which are in the build list to the "uninstallable" list.
"""
new_installables = []
for d in package.installables:
# skip self-dependency
if d == package.name:
new_installables.append
|
(d)
continue
try:
packages_list.index(d)
package.deps.append(d)
except ValueError:
new_installables.append(d)
package.installables = new_installables
return package
def strongly_connected(search_path: str, packages_list: List[str], results: list, packages: 'OrderedDict[str, ACBSPackageInfo]', vert: str, lowlink: Dict[str, int], index: Dict[str, int], stackstate: Dict[str, bool], stack: Deque[str], depth=0):
# update depth indices
index[vert] = depth
lowlink[vert] = depth
depth += 1
stackstate[vert] = True
stack.append(vert)
# search package begin
print(f'[{len(results) + 1}/{len(pool)}] {vert}\t\t\r', end='', flush=True)
current_package = packages.get(vert)
if current_package is None:
package = pool.get(vert) or find_package(vert, search_path)
if not package:
raise ValueError(
f'Package {vert} not found')
if isinstance(package, list):
for s in package:
if vert == s.name:
current_package = s
pool[s.name] = s
continue
pool[s.name] = s
packages_list.append(s.name)
else:
current_package = package
pool[vert] = current_package
assert current_package is not None
# first check if this dependency is buildable
# when `required_by` argument is present, it will raise an exception when the dependency is unbuildable.
check_buildability(current_package, stack[-2] if len(stack) > 1 else '<unknown>')
# search package end
# Look for adjacent packages (dependencies)
for p in current_package.deps:
if index[p] == -1:
# recurse on unvisited packages
strongly_connected(search_path, packages_list, results, packages,
p, lowlink, index, stackstate, stack, depth)
lowlink[vert] = min(lowlink[p], lowlink[vert])
# adjacent package is in the stack which means it is part of a loop
elif stackstate[p] is True:
lowlink[vert] = min(lowlink[p], index[vert])
w = ''
result = []
# if this is a root vertex
if lowlink[vert] == index[vert]:
# the current stack contains the vertices that belong to the same loop
# if the stack only contains one vertex, then there is no loop there
while w != vert:
w = stack.pop()
result.append(pool[w])
stackstate[w] = False
results.append(result)
|
masci/oauthlib
|
tests/oauth1/rfc5849/endpoints/test_authorization.py
|
Python
|
bsd-3-clause
| 2,250
| 0.002667
|
from __future__ import unicode_literals, absolute_import
from mock import MagicMock
from ....unittest import TestCase
from oauthlib.oauth1 import RequestValidator
from oauthlib.oauth1.rfc5849 import errors
from
|
oauthlib.oauth1.rfc5849.endpoints import AuthorizationEndpoint
class ResourceEndpointTest(TestCase):
def setUp(self):
self.validator = MagicMock(wraps=RequestValidator())
self.validator.verify_request_token.return_value = True
self.validator.verify_realms.return_value = True
self.validator.get_realms.return_value = ['test']
self.validator.save_verifier = MagicMock()
self.endpoint = AuthorizationEndpoint(self.validator)
se
|
lf.uri = 'https://i.b/authorize?oauth_token=foo'
def test_get_realms_and_credentials(self):
realms, credentials = self.endpoint.get_realms_and_credentials(self.uri)
self.assertEqual(realms, ['test'])
def test_verify_token(self):
self.validator.verify_request_token.return_value = False
self.assertRaises(errors.InvalidClientError,
self.endpoint.get_realms_and_credentials, self.uri)
self.assertRaises(errors.InvalidClientError,
self.endpoint.create_authorization_response, self.uri)
def test_verify_realms(self):
self.validator.verify_realms.return_value = False
self.assertRaises(errors.InvalidRequestError,
self.endpoint.create_authorization_response,
self.uri,
realms=['bar'])
def test_create_authorization_response(self):
self.validator.get_redirect_uri.return_value = 'https://c.b/cb'
h, b, s = self.endpoint.create_authorization_response(self.uri)
self.assertEqual(s, 302)
self.assertIn('Location', h)
self.assertTrue(location.startswith('https://c.b/cb'))
self.assertIn('oauth_verifier', location)
def test_create_authorization_response(self):
self.validator.get_redirect_uri.return_value = 'oob'
h, b, s = self.endpoint.create_authorization_response(self.uri)
self.assertEqual(s, 200)
self.assertNotIn('Location', h)
self.assertIn('oauth_verifier', b)
self.assertIn('oauth_token', b)
|
danpalmer/open-data-quality-dashboard
|
tools/csvkit/csvkit/utilities/csvjson.py
|
Python
|
mit
| 5,216
| 0.006135
|
#!/usr/bin/env python
import json
import codecs
from csvkit import CSVKitReader
from csvkit.cli import CSVKitUtility, match_column_identifier
from csvkit.exceptions import NonUniqueKeyColumnException
class CSVJSON(CSVKitUtility):
description = 'Convert a CSV file into JSON (or GeoJSON).'
override_flags = ['H']
def add_arguments(self):
self.argparser.add_argument('-i', '--indent', dest='indent', type=int, default=None,
help='Indent the output JSON this many spaces. Disabled by default.')
self.argparser.add_argument('-k', '--key', dest='key', type=str, default=None,
help='Output JSON as an array of objects keyed by a given column, KEY, rather than as a list. All values in the column must be unique. If --lat and --lon are also specified, this column will be used as GeoJSON Feature ID.')
self.argparser.add_argument('--lat', dest='lat', type=str, default=None,
help='A column index or name containing a latitude. Output will be GeoJSON instead of JSON. Only valid if --lon is also specified.')
self.argparser.add_argument('--lon', dest='lon', type=str, default=None,
help='A column index or name containing a longitude. Output will be GeoJSON instead of JSON. Only valid if --lat is also specified.')
self.argparser.add_argument('--crs', dest='crs', type=str, default=None,
help='A coordinate reference system string to be included with GeoJSON output. Only valid if --lat and --lon are also specified.')
def main(self):
"""
Convert CSV to JSON.
"""
if self.args.lat and not self.args.lon:
self.argparser.error('--lon is required whenever --lat is specified.')
if self.args.lon and not self.args.lat:
self.argparser.error('--lat is required whenever --lon is specified.')
if self.args.crs and not self.args.lat:
self.argparser.error('--crs is only allowed when --lat and --lon are also specified.')
rows = CSVKitReader(self.args.file, **self.reader_kwargs)
column_names = rows.next()
stream = codecs.getwriter('utf-8')(self.output_file)
# GeoJSON
if self.args.lat and self.args.lon:
features = []
min_lon = None
min_lat = None
max_lon = None
max_lat = None
lat_column = match_column_identifier(column_names, self.args.lat, self.args.zero_based)
lon_column = match_column_identifier(column_names, self.args.lon, self.args.zero_based)
if self.args.key:
id_column = match_column_identifier(column_names, self.args.key, self.args.zero_based)
else:
id_column = None
for row in rows:
feature = { 'type': 'Feature' }
properties = {}
geoid = None
lat = None
lon = None
for i, c in enumerate(row):
if i == lat_column:
lat = float(c)
if min_lat is None or lat < min_lat:
min_lat = lat
if max_lat is None or lat > max_lat:
max_lat = lat
elif i == lon_column:
lon = float(c)
if min_lon is None or lon < min_lon:
min_lon = lon
if max_lon is None or lon > max_lon:
max_lon = lon
elif id_column is not None and i == id_column:
geoid = c
else:
properties[column_names[i]] = c
if id_column is not None:
feature['id'] = geoid
feature['geometry'] = {
'type': 'Point',
'coordinates': [lon, lat]
}
feature['properties'] = properties
features.append(feature)
output = {
'type': 'FeatureCollection',
'bbox': [min_lon, min_lat, max_lon, max_lat],
'features': features
}
if self.args.crs:
output['crs'] = {
'type': 'name',
'properties': {
'name': self.args.crs
|
}
|
}
# Keyed JSON
elif self.args.key:
output = {}
for row in rows:
row_dict = dict(zip(column_names, row))
k = row_dict[self.args.key]
if k in output:
raise NonUniqueKeyColumnException('Value %s is not unique in the key column.' % unicode(k))
output[k] = row_dict
# Boring JSON
else:
output = [dict(zip(column_names, row)) for row in rows]
json.dump(output, stream, ensure_ascii=False, indent=self.args.indent, encoding='utf-8')
def launch_new_instance():
utility = CSVJSON()
utility.main()
if __name__ == "__main__":
launch_new_instance()
|
ivan-fedorov/intellij-community
|
python/testData/psi/ExecPy2.py
|
Python
|
apache-2.0
| 25
| 0
|
e
|
xec 'print 1' in {}, {}
| |
jbaiter/plugin.video.brmediathek
|
resources/lib/xbmcswift2/mockxbmc/xbmcaddon.py
|
Python
|
gpl-3.0
| 1,905
| 0.0021
|
import os
from xbmcswift2.logger import log
from xbmcswift2.mockxbmc import utils
def _get_env_setting(name):
return os.getenv('XBMCSWIFT2_%s' % name.upper())
class Addon(object):
def __init__(self, id=None):
# In CLI mode, xbmcswift2 must be run from the root of the addon
# directory, so we can rely on getcwd() being correct.
addonxml = os.path.join(os.getcwd(), 'addon.xml')
id = id or utils.get_addon_id(addonxml)
self._info = {
'id': id,
'name': utils.get_addon_name(addonxml),
|
'profile': 'special://profile/addon_data/%s/' % id,
'path': 'special://home/addons/%s' % id
}
self._strings = {}
self._settings = {}
def getAddonInfo(self, id):
properti
|
es = ['author', 'changelog', 'description', 'disclaimer',
'fanart', 'icon', 'id', 'name', 'path', 'profile', 'stars', 'summary',
'type', 'version']
assert id in properties, '%s is not a valid property.' % id
return self._info.get(id, 'Unavailable')
def getLocalizedString(self, id):
key = str(id)
assert key in self._strings, 'id not found in English/strings.po or strings.xml.'
return self._strings[key]
def getSetting(self, id):
log.warning('xbmcaddon.Addon.getSetting() has not been implemented in '
'CLI mode.')
try:
value = self._settings[id]
except KeyError:
# see if we have an env var
value = _get_env_setting(id)
if _get_env_setting(id) is None:
value = raw_input('* Please enter a temporary value for %s: ' %
id)
self._settings[id] = value
return value
def setSetting(self, id, value):
self._settings[id] = value
def openSettings(self):
pass
|
tmr232/Sark
|
tests/dumpers/data_dumper.py
|
Python
|
mit
| 1,365
| 0
|
import sark
from dumper_helper import dump_attrs
import itertools
def main():
print('Bytes')
print(list(itertools.islice(sark.data.B
|
ytes(), 10)))
print()
print('Bytes Until 0')
print(list(sark.data.bytes_until()))
print()
print('Words')
print(list(itertools.islice(sark.data.Words(), 10)))
print()
print('Words Until 0')
print(list(sark.data.words_until()))
print()
print('DWords')
print(list(itertools.islice(sark.data.Dwords(), 10)))
print()
print('DWords Until 0')
print(list(sark.data.dwords_until()))
print()
print('QWords')
print(list(itertools.is
|
lice(sark.data.Qwords(), 10)))
print()
print('QWords Until 0')
print(list(sark.data.qwords_until()))
print()
print('Native Words')
print(list(itertools.islice(sark.data.NativeWords(), 10)))
print()
print('Native Words Until 0')
print(list(sark.data.native_words_until()))
print()
print('Chars')
print(list(itertools.islice(sark.data.Chars(), 10)))
print()
print('Chars Until \\0')
print(list(sark.data.chars_until()))
print()
print('Read ascii string')
print(repr(sark.data.read_ascii_string(0x004005A4)))
print()
print('Get String')
print(repr(sark.data.get_string(0x004005A4)))
if __name__ == '__main__':
main()
|
firemark/grazyna
|
grazyna/plugins/weekend.py
|
Python
|
gpl-2.0
| 562
| 0.005357
|
from datetime import datetime
from grazyna.utils import register
@register(cmd='weekend')
def weekend(bot):
"""
Answer to timeless question - are we at .weekend, yet?
"""
current_d
|
ate = datetime.now()
day = current_date.weekday()
nick = bot.user.nick
if day in (5, 6):
|
answer = "Oczywiście %s - jest weekend. Omawiamy tylko lajtowe tematy, ok?" % nick
else:
str_day = datetime.strftime(current_date, "%A")
answer = "%s - dopiero %s, musisz jeszcze poczekać..." % (nick, str_day)
bot.reply(answer)
|
mjschultz/django-tastefulpy
|
tests/core/tests/fields.py
|
Python
|
bsd-3-clause
| 56,271
| 0.001528
|
import datetime
from dateutil.tz import *
from django.db import models
from django.contrib.auth.models import User
from django.test import TestCase
from django.http import HttpRequest
from tastefulpy.bundle import Bundle
from tastefulpy.exceptions import ApiFieldError, NotFound
from tastefulpy.fields import *
from tastefulpy.resources import ModelResource
from core.models import Note, Subject, MediaBit
from core.tests.mocks import MockRequest
from tastefulpy.utils import aware_datetime, aware_date
class ApiFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = ApiField()
self.assertEqual(field_1.instance_name, None)
self.assertEqual(field_1.attribute, None)
self.assertEqual(field_1._default, NOT_PROVIDED)
self.assertEqual(field_1.null, False)
self.assertEqual(field_1.value, None)
self.assertEqual(field_1.help_text, '')
self.assertEqual(field_1.use_in, 'all')
field_2 = ApiField(attribute='foo', default=True, null=True, readonly=True, help_text='Foo.', use_in="foo")
self.assertEqual(field_2.instance_name, None)
self.assertEqual(field_2.attribute, 'foo')
self.assertEqual(field_2._default, True)
self.assertEqual(field_2.null, True)
self.assertEqual(field_2.value, None)
self.assertEqual(field_2.readonly, True)
self.assertEqual(field_2.help_text, 'Foo.')
self.assertEqual(field_1.use_in, 'all')
field_3 = ApiField(use_in="list")
self.assertEqual(field_3.use_in, 'list')
field_4 = ApiField(use_in="detail")
self.assertEqual(field_4.use_in, 'detail')
use_in_callable = lambda x: True
field_5 = ApiField(use_in=use_in_callable)
self.assertTrue(field_5.use_in is use_in_callable)
def test_dehydrated_type(self):
field_1 = ApiField()
self.assertEqual(field_1.dehydrated_type, 'string')
def test_has_default(self):
field_1 = ApiField()
self.assertEqual(field_1.has_default(), False)
field_2 = ApiField(default=True)
self.assertEqual(field_2.has_default(), True)
def test_default(self):
field_1 = ApiField()
self.assertEqual(isinstance(field_1.default, NOT_PROVIDED), True)
field_2 = ApiField(default=True)
self.assertEqual(field_2.default, True)
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
# With no attribute or default, we should get ``None``.
field_1 = ApiField()
self.assertEqual(field_1.dehydrate(bundle), None)
# Still no attribute, so we should pick up the default
field_2 = ApiField(default=True)
self.assertEqual(field_2.dehydrate(bundle), True)
# Wrong attribute should yield default.
field_3 = ApiField(attribute='foo', default=True)
self.assertEqual(field_3.dehydrate(bundle), True)
# Wrong attribute should yield null.
field_4 = ApiField(attribute='foo', null=True)
self.assertEqual(field_4.dehydrate(bundle), None)
# Correct attribute.
field_5 = ApiField(attribute='title', default=True)
self.assertEqual(field_5.dehydrate(bundle), u'First Post!')
# Correct callable attribute.
field_6 = ApiField(attribute='what_time_is_it', default=True)
self.assertEqual(field_6.dehydrate(bundle), aware_datetime(2010, 4, 1, 0, 48))
def test_convert(self):
field_1 = ApiField()
self.assertEqual(field_1.convert('foo'), 'foo')
self.assertEqual(field_1.convert(True), True)
def test_hydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
# With no value, default or nullable, we should get an ``ApiFieldError``.
field_1 = ApiField()
field_1.instance_name = 'api'
self.assertRaises(ApiFieldError, field_1.hydrate, bundle)
# The default.
field_2 = ApiField(default='foo')
field_2.instance_name = 'api'
self.assertEqual(field_2.hydrate(bundle), 'foo')
# The callable default.
def foo():
return 'bar'
field_3 = ApiField(default=foo)
field_3.instance_name = 'api'
self.assertEqual(field_3.hydrate(bundle), 'bar')
# The nullable case.
field_4 = ApiField(null=True)
field_4.instance_name = 'api'
self.assertEqual(field_4.hydrate(bundle), None)
# The readonly case.
field_5 = ApiField(readonly=True)
field_5.instance_name = 'api'
bundle.data['api'] = 'abcdef'
self.assertEqual(field_5.hydrate(bundle), None)
# A real, live attribute!
field_6 = ApiField(attribute='title')
field_6.instance_name = 'api'
bundle.data['api'] = note.title
self.assertEqual(field_6.hydrate(bundle), u'Fi
|
rst Post!')
# Make sure it uses attribute when there's no data
field_7 = ApiField(attribute='title')
field_7.instance_name = 'notinbundle'
self.assertEqual(field_7.hydrate(bundle), u'First Post!')
# Make sure it falls back to instance name if th
|
ere is no attribute
field_8 = ApiField()
field_8.instance_name = 'title'
self.assertEqual(field_8.hydrate(bundle), u'First Post!')
# Attribute & null regression test.
# First, simulate data missing from the bundle & ``null=True``.
field_9 = ApiField(attribute='notinbundle', null=True)
field_9.instance_name = 'notinbundle'
self.assertEqual(field_9.hydrate(bundle), None)
# The do something in the bundle also with ``null=True``.
field_10 = ApiField(attribute='title', null=True)
field_10.instance_name = 'title'
self.assertEqual(field_10.hydrate(bundle), u'First Post!')
# The blank case.
field_11 = ApiField(attribute='notinbundle', blank=True)
field_11.instance_name = 'notinbundle'
self.assertEqual(field_11.hydrate(bundle), None)
bundle.data['title'] = note.title
field_12 = ApiField(attribute='title', blank=True)
field_12.instance_name = 'title'
self.assertEqual(field_12.hydrate(bundle), u'First Post!')
class CharFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = CharField()
self.assertEqual(field_1.help_text, 'Unicode string data. Ex: "Hello World"')
field_2 = CharField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = CharField()
self.assertEqual(field_1.dehydrated_type, 'string')
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = CharField(attribute='title', default=True)
self.assertEqual(field_1.dehydrate(bundle), u'First Post!')
field_2 = CharField(default=20)
self.assertEqual(field_2.dehydrate(bundle), u'20')
class FileFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = FileField()
self.assertEqual(field_1.help_text, 'A file URL as a string. Ex: "http://media.example.com/media/photos/my_photo.jpg"')
field_2 = FileField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = FileField()
self.assertEqual(field_1.dehydrated_type, 'string')
def test_dehydrate(self):
bit = MediaBit.objects.get(pk=1)
bundle = Bundle(obj=bit)
field_1 = FileField(attribute='image', default=True)
self.assertEqual(field_1.dehydrate(bundle), u'http://localhost:8080/media/lulz/catz.gif')
field_2 = FileField(default='http://media.example.com/img/default_avatar.jpg')
self.assertEqual(field_2.dehydrate(bundle), u'http://media.example.com/img/default_avatar.jpg')
bit = MediaBit.objects.get(pk=1)
bit.image = ''
bundle = Bundle(obj=bit)
field_3 = FileField(attribute='image', default=True)
|
tibor0991/OBM-BOB
|
bob-main/test_sender.py
|
Python
|
gpl-3.0
| 455
| 0.024176
|
import socket
import sys
import time
server_add = './bob_system_socket'
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
message = sys.argv[1]+" "+sys.argv[2]
if sys.argv[1] == 'set':
message+= " "+sys.argv[3]
else:
message+= " null"
try:
sock.connect(server_add)
except socket.error, msg:
print >>sys.stderr, msg
sys.exit(1)
sock.send(message)
data =
|
sock.recv(1024)
if data: print 'reply from server:', dat
|
a
time.sleep(1)
sock.close()
|
jmacmahon/invenio
|
modules/oairepository/lib/oai_repository_config.py
|
Python
|
gpl-2.0
| 1,219
| 0
|
# This file is part of Invenio.
# Copyright (C) 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of t
|
he GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Publ
|
ic License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""OAI Repository Configuration."""
# Maximum number of records to put in a single bibupload
CFG_OAI_REPOSITORY_MARCXML_SIZE = 1000
# A magic value used to specify the global set (e.g. when the admin
# specify a set configuration without putting any setSpec)
# NOTE: if you change this value, please update accordingly the root
# Makefile.am and tabcreate.sql defaults for setSpec column in
# oaiREPOSITORY MySQL table.
CFG_OAI_REPOSITORY_GLOBAL_SET_SPEC = "GLOBAL_SET"
|
rbuffat/pyidf
|
tests/test_zonehvacoutdoorairunit.py
|
Python
|
apache-2.0
| 5,734
| 0.004709
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.zone_hvac_forced_air_units import ZoneHvacOutdoorAirUnit
log = logging.getLogger(__name__)
class TestZoneHvacOutdoorAirUnit(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_zonehvacoutdoorairunit(self):
pyidf.validation_level = ValidationLevel.error
obj = ZoneHvacOutdoorAirUnit()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_availability_schedule_name = "object-list|Availability Schedule Name"
obj.availability_schedule_name = var_availability_schedule_name
# object-list
var_zone_name = "object-list|Zone Name"
obj.zone_name = var_zone_name
# real
var_outdoor_air_flow_rate = 0.0001
obj.outdoor_air_flow_rate = var_outdoor_air_flow_rate
# object-list
var_outdoor_air_schedule_name = "object-list|Outdoor Air Schedule Name"
obj.outdoor_air_schedule_name = var_outdoor_air_schedule_name
# object-list
var_supply_fan_name = "object-list|Supply Fan Name"
obj.supply_fan_name = var_supply_fan_name
# alpha
var_supply_fan_placement = "BlowThrough"
obj.supply_fan_placement = var_supply_fan_placement
# object-list
var_exhaust_fan_name = "object-list|Exhaust Fan Name"
obj.exhaust_fan_name = var_exhaust_fan_name
# real
var_exhaust_air_flow_rate = 9.9
obj.exhaust_air_flow_rate = var_exhaust_air_flow_rate
# object-list
var_exhaust_air_schedule_name = "object-list|Exhaust Air Schedule Name"
obj.exhaust_air_schedule_name = var_exhaust_air_schedule_name
# alpha
var_unit_control_type = "NeutralControl"
obj.unit_control_type = var_unit_control_type
# object-list
var_high_air_control_temperature_schedule_name = "object-list|High Air Control Temperature Schedule Name"
obj.high_air_control_temperature_schedule_name = var_high_air_control_temperature_schedule_name
# object-list
var_low_air_control_temperature_schedule_name = "object-list|Low Air Control Temperature Schedule Name"
obj.low_air_control_temperature_schedule_name = var_low_air_control_temperature_schedule_name
# node
var_outdoor_air_node_name = "node|Outdoor Air Node Name"
obj.outdoor_air_node_name = var_outdoor_air_node_name
# node
var_airoutlet_node_name = "node|AirOutlet Node Name"
obj.airoutlet_node_name = var_airoutlet_node_name
# node
var_airinlet_node_name = "node|AirInlet Node Name"
obj.airinlet_node_name = var_airinlet_node_name
# node
var_supply_fanoutlet_node_name = "node|Supply FanOutlet Node Name"
obj.supply_fanoutlet_node_name = var_supply_fanoutlet_node_name
# object-list
var_outdoor_air_unit_list_name = "object-list|Outdoor Air Unit List Name"
obj.outdoor_air_unit_list_name = var_outdoor_air_unit_list_name
# object-list
var_availability_manager_list_name = "object-list|Availability Manager List Name"
obj.availability_manager_list_name = var_availability_manager_list_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.zonehvacoutdoorairunits[0].name, var_name)
self.assertEqual(idf2.zonehvacoutdoorairunits[0].availability_schedule_name, var_availability_schedule_name)
self.assertEqual(idf2.zonehvacoutdoorairunits[0].zone_name, var_zone_name)
self.assertAlmostEqual(idf2.zonehvacoutdoorairunits[0].outdoor_air_flow_rate, var_outdoor_air_flow_rate)
self.assertEqual(idf2.zonehvacoutdoorairunits[0].outdoor_air_schedule_name, var_outdoor_air_schedule_name)
self.assertEqual(idf2.zonehvacoutdoorairunits[0].supply_fan_name, var_supply_fan_name)
self.assertEqual(idf2.zonehvacoutdoorairunits[0].supply_fan_placement, var_supply_fan_placement)
self.assertEqual(idf2.zonehvacoutdoorairunits[0].exhaust_fan_name, var_exhaust_fan_name)
self.assertAlmostEqual(idf2.zonehvacoutdoorairunits[0].exhaust_air_flow_rate, var_exhaust_air_flow_rate)
self.assertEqual(idf2.zonehvacoutdoorairunits[0].exhaust_air_schedule_name, var_exhaust_air_schedule_name)
self.assertEqual(idf2.zonehvacoutdoorairunits[0].unit_control_type, var_unit_control_type)
self.assertEqual(idf2.zonehvacoutdoorairunits[0].high_air_control_temperature_schedule_name, var_high_air_control_temperature_schedule_name)
self.assertEqual(idf2.zonehvacoutdoorairu
|
nits[0].low_air_control_temperature_schedule_name, var_low_air_control_temperature_schedule_name)
self.assertEqual(idf2.zonehvacoutdoorairunits[0].outdoor_air_node_name, var_outdoor_air_node_name)
self.assertEqual(idf2.zonehvacoutdoorairunits[0].airoutlet_node_nam
|
e, var_airoutlet_node_name)
self.assertEqual(idf2.zonehvacoutdoorairunits[0].airinlet_node_name, var_airinlet_node_name)
self.assertEqual(idf2.zonehvacoutdoorairunits[0].supply_fanoutlet_node_name, var_supply_fanoutlet_node_name)
self.assertEqual(idf2.zonehvacoutdoorairunits[0].outdoor_air_unit_list_name, var_outdoor_air_unit_list_name)
self.assertEqual(idf2.zonehvacoutdoorairunits[0].availability_manager_list_name, var_availability_manager_list_name)
|
ramusus/django-vkontakte-groups
|
vkontakte_groups/models.py
|
Python
|
bsd-3-clause
| 6,504
| 0.003455
|
# -*- coding: utf-8 -*-
import logging
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext as _
from vkontakte_api.models import VkontakteManager, VkontaktePKModel
from .mixins import ParseGroupsMixin, PhotableModelMixin, UserableModelMixin, VideoableModelMixin
log = logging.getLogger('vkontakte_groups')
GROUP_TYPE_CHOICES = (
('group', u'Группа'),
('page', u'Страница'),
('event', u'Событие'),
)
class CheckMembersCountFailed(Exception):
pass
class GroupRemoteManager(VkontakteManager):
def api_call(self, *args, **kwargs):
if 'ids' in kwargs:
kwargs['group_ids'] = ','.join(map(lambda i: str(i), kwargs.pop('ids')))
return super(GroupRemoteManager, self).api_call(*args, **kwargs)
def search(self, q, offset=None, count=None):
kwargs = {'q': q}
if offset:
kwargs.update(offset=offset)
if count:
kwargs.update(count=count)
return self.get(method='search', **kwargs)
def fetch(self, *args, **kwargs):
"""
Add additional fields to parent fetch request
"""
if 'fields' not in kwargs:
kwargs['fields'] = 'members_count'
return super(GroupRemoteManager, self).fetch(*args, **kwargs)
def get_members_ids(self, group, check_count=True, **kwargs):
ids = set()
attempts = 0
kwargs['offset'] = 0
kwargs['group_id'] = group.remote_id
while True:
response = self.api_call('get_members', **kwargs)
ids_iteration = response.get('items', [])
for user_id in ids_iteration:
ids.add(int(user_id))
ids_iteration_count = len(ids_iteration)
ids_count = len(ids)
log.debug('Get members of group %s. Got %s, total %s, actual ammount %s, offset %s' % (
group, ids_iteration_count, ids_count, group.members_count, kwargs['offset']))
if ids_iteration_count != 0:
attempts = 0
kwargs['offset'] += ids_iteration_count
else:
try:
if check_count:
self.check_members_count(group, ids_count)
break
except CheckMembersCountFailed as e:
attempts += 1
if attempts <= 5:
log.warning('%s, offset %s, attempts %s' % (e, kwargs['offset'], attempts))
continue
else:
log.error(e)
raise
return list(ids)
def check_members_count(self, group, count):
if group.members_count and count > 0:
division = float(group.members_count) / count
if 0.99 > division or 1.01 < division:
raise CheckMembersCountFailed("Suspicious ammount of members fetched for group %s. "
"Actual ammount is %d, fetched %d, division is %s" % (
group, group.members_count, count, division))
@python_2_unicode_compatible
class Group(PhotableModelMixin, VideoableModelMixin, UserableModelMixin, VkontaktePKModel):
resolve_screen_name_types = ['group', 'page', 'event']
slug_prefix = 'club'
name = models.CharField(max_length=800)
screen_name = models.CharField(u'Короткое имя группы', max_length=50, db_index=True)
is_closed = models.NullBooleanField(u'Флаг закрытой группы')
is_admin = models.NullBooleanField(u'Пользователь является администратором')
members_count = models.IntegerField(u'Всего участников', null=True)
verified = models.NullBooleanField(u'Флаг официальной группы')
type = models.CharField(u'Тип объекта', max_length=10, choices=GROUP_TYPE_CHOICES)
photo = models.URLField()
photo_big = models.URLField()
photo_medium = models.URLField()
remote = GroupRemoteManager(remote_pk=('remote_id',), methods_namespace='groups', version=5.28, methods={
'get': 'getById',
'search': 'search',
'get_members': 'getMembers',
})
class Meta:
verbose_name = _('Vkontakte group')
verbose_name_plural = _('Vkontakte groups')
def __str__(self):
return self.name
@property
def refresh_kwargs(self):
return {'ids': [self.remote_id]}
@property
def wall_comments(self):
if 'vkontakte_wall' not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("Application 'vkontakte_wall' not in INSTALLED_APPS")
from vkontakte
|
_wall.models import Comment
# TODO: improve schema and queries with using owner_id field
|
return Comment.objects.filter(remote_id__startswith='-%s_' % self.remote_id)
@property
def topics_comments(self):
if 'vkontakte_board' not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("Application 'vkontakte_board' not in INSTALLED_APPS")
from vkontakte_board.models import Comment
# TODO: improve schema and queries with using owner_id field
return Comment.objects.filter(remote_id__startswith='-%s_' % self.remote_id)
def fetch_posts(self, *args, **kwargs):
if 'vkontakte_wall' not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("Application 'vkontakte_wall' not in INSTALLED_APPS")
from vkontakte_wall.models import Post
return Post.remote.fetch_wall(owner=self, *args, **kwargs)
def fetch_topics(self, *args, **kwargs):
if 'vkontakte_board' not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("Application 'vkontakte_board' not in INSTALLED_APPS")
from vkontakte_board.models import Topic
return Topic.remote.fetch(group=self, *args, **kwargs)
def fetch_statistic(self, *args, **kwargs):
if 'vkontakte_groups_statistic' not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("Application 'vkontakte_groups_statistic' not in INSTALLED_APPS")
from vkontakte_groups_statistic.models import fetch_statistic_for_group
return fetch_statistic_for_group(group=self, *args, **kwargs)
from . import signals
|
arjclark/rose
|
etc/rose-meta/rose-demo-baked-alaska-sponge/vn1.0/lib/python/macros/desoggy.py
|
Python
|
gpl-3.0
| 1,653
| 0
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2012-7 Met Office.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the imp
|
lied warranty of
# MERCHA
|
NTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""This module contains:
SpongeDeSoggifier, a rose transform macro.
"""
import re
import subprocess
import rose.macro
class SpongeDeSoggifier(rose.macro.MacroBase):
"""De-soggifies the sponge."""
SOGGY_FIX_TEXT = "de-soggified"
def transform(self, config, meta_config=None):
"""Reduce the density of the sponge."""
sponge_density = config.get_value(["env", "SPONGE_DENSITY"])
if sponge_density is not None and float(sponge_density) > 0.5:
# 1 g cm^-3 is pure water, so this is pretty soggy.
config.set(["env", "SPONGE_DENSITY"], "0.3")
self.add_report(
"env", "SPONGE_DENSITY", "0.3", self.SOGGY_FIX_TEXT)
return config, self.reports
|
artizirk/digilib
|
store.py
|
Python
|
bsd-3-clause
| 1,148
| 0.000871
|
#/usr/bin/env python3
#####################
# code for mirroring/storing copy of digi.ee forum
from pymongo import MongoClient
import digilib
clien
|
t = MongoClient()
db = client.digi_clone
forums = db.forums
for forum in digilib.get_forums():
if not forums.find_one({"id": forum["id"
|
]}):
print("inserting forum", forum["title"])
forums.insert(forum)
threads = db.threads
for forum in forums.find():
forum_id = forum["id"]
print("geting threads from forum id", forum_id)
for thread in digilib.get_all_threads_in_forum(forum_id):
thread["forum_id"] = forum_id
if not threads.find_one({"id": thread["id"]}):
print("inserting thread", thread["id"], thread["title"])
threads.insert(thread)
posts = db.posts
for thread in threads.find():
thread_id = thread["id"]
print("getting posts from thread id:", thread_id)
for post in digilib.get_all_posts_in_thread(thread_id):
post["thread_id"] = thread_id
if not posts.find_one({"post_id": post["post_id"]}):
print("inserting post", post["post_id"], post["user"])
posts.insert(post)
|
3dfxsoftware/cbss-addons
|
invoice_report_per_journal/wizard/invoice_report_per_journal.py
|
Python
|
gpl-2.0
| 5,756
| 0.004343
|
# -*- encoding: utf-8 -*- #
############################################################################
# Module Writen to OpenERP, Open Source Management Solution #
# Copyright (C) Vauxoo (<http://vauxoo.com>). #
# All Rights Reserved #
###############Credits####################################
|
##################
# Coded by: Sabrina Romero (sabrina@vauxoo.com) #
# Planified by:
|
Nhomar Hernandez (nhomar@vauxoo.com) #
# Finance by: COMPANY NAME <EMAIL-COMPANY> #
# Audited by: author NAME LASTNAME <email@vauxoo.com> #
############################################################################
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import base64
import openerp.netsvc as netsvc
import logging
_logger = logging.getLogger(__name__)
class invoice_report_per_journal(osv.TransientModel):
"""
OpenERP Wizard: invoice.report.per.journal
"""
_name = "invoice.report.per.journal"
def get_journal_object(self, cr, uid, context=None):
record_brw = self.pool.get(context['active_model']).browse(
cr, uid, context['active_ids'][0])
if not record_brw.journal_id:
raise except_osv(_('ERROR !'), _(
'There is no journal configured for this invoice.'))
return record_brw.journal_id
def _get_journal(self, cr, uid, context=None):
return self.get_journal_object(cr, uid, context=context).name
def _prepare_service(self, cr, uid, report, context=None):
service = netsvc.LocalService('report.' + report.report_name)
(result, format) = service.create(cr, uid, context[
'active_ids'], {'model': context['active_model']}, {})
return (result, format)
def _get_report(self, cr, uid, context=None):
report = self.get_journal_object(
cr, uid, context=context).invoice_report_id
try:
(result, format) = self._prepare_service(cr, uid, report, context=context)
except:
if report:
_logger.warning("Error occurred in the report, the report set to the journal will be ignored.")
rep_id = self.pool.get("ir.actions.report.xml").search(
cr, uid, [('model', '=', 'account.invoice'),], order="id",
context=context)[0]
report_ = self.pool.get(
"ir.actions.report.xml").browse(cr, uid, rep_id, context=context)
(result, format) = self._prepare_service(cr, uid, report_, context=context)
try:
act_id = self.pool.get('ir.actions.act_window').search(cr, uid, [('name','=', report.name + ' txt')], context=context)[0]
if act_id:
act_brw = self.pool.get('ir.actions.act_window').browse(cr, uid, act_id, context=context)
wiz_obj = self.pool.get(act_brw.res_model)
wiz_id = wiz_obj.create(cr, uid, {}, context=context)
wiz_brw = wiz_obj.browse(cr, uid, wiz_id, context=context)
result = base64.decodestring(wiz_brw.fname_txt)
except:
if report:
_logger.info("txt report not defined for the report assigned to journal.")
return base64.encodestring(result)
def _get_report_name(self, cr, uid, context=None):
report = self.get_journal_object(cr, uid,
context=context).invoice_report_id
try:
(result, format) = self._prepare_service(cr, uid, report, context=context)
except:
if report:
_logger.warning("Error occurred in the report, the report set to the journal will be ignored.")
rep_id = self.pool.get("ir.actions.report.xml").search(
cr, uid, [('model', '=', 'account.invoice'),], order="id",
context=context)[0]
report = self.pool.get(
"ir.actions.report.xml").browse(cr, uid, rep_id, context=context)
return report.report_name
def print_invoice(self, cr, uid, ids, context=None):
return {'type': 'ir.actions.report.xml',
'report_name': self._get_report_name(cr, uid, context=context),
'datas': {'ids': context['active_ids']}}
_columns = {
'journal': fields.char('Journal', 64, readonly=True, requied=True),
'report_format': fields.binary("Report", readonly=True, required=True)
}
_defaults = {
'journal': _get_journal,
'report_format': _get_report,
}
|
NSLS-II-CHX/ipython_ophyd
|
startup/97_HDM.py
|
Python
|
bsd-2-clause
| 1,766
| 0.045866
|
CHA_Vol_PV = 'XF:11IDB-BI{X
|
BPM:02}CtrlDAC:ALevel-SP'
HDM_Encoder_PV = 'XF:11IDA-OP{Mir:HDM-Ax:P}Pos-I'
E=np.arange(9.,11.,.05)
SI_STRIPE = -9
RH_STRIPE = 9
def take_Rdata( voltage, E):
caput(CHA_Vol_PV, voltage)
#yield from bp.abs_set(hdm.y, RH_STRIPE)
hdm.y.user_setpoint.value = RH_STRIPE
sleep( 3.0 )
E_scan(list(E))
hrh=db[-1]
#yield from bp.abs_set(hdm.y,
|
Si_STRIPE)
hdm.y.user_setpoint.value = SI_STRIPE
sleep( 3.0 )
E_scan(list(E))
hsi=db[-1]
return get_R( hsi, hrh )
def get_R(header_si, header_rh):
datsi=get_table(header_si)
datrh=get_table(header_rh)
th_B=-datsi.dcm_b
En=xf.get_EBragg('Si111cryo',th_B)
Rsi=datsi.elm_sum_all
Rrh=datrh.elm_sum_all
plt.close(99)
plt.figure(99)
plt.semilogy(En,Rsi/Rrh,'ro-')
plt.xlabel('E [keV]');plt.ylabel('R_si / R_rh')
plt.grid()
return Rsi/Rrh
voltage_CHA = [ 3.5, 4.0, 4.5, 5.0, 5.5]
voltage_CHA = [ 3.0,3.2,3.4,3.6,3.8,4.0,4.2,4.4,4.6,4.8,5.0,5.2,5.4]
r_eng=np.array(np.loadtxt("/home/xf11id/Downloads/R_Rh_0p180.txt"))[:,0]/1e3
rsi_0p18=np.array(np.loadtxt("/home/xf11id/Downloads/R_Si_0p180.txt"))[:,1]
rrh_0p18=np.array(np.loadtxt("/home/xf11id/Downloads/R_Rh_0p180.txt"))[:,1]
def get_Rdata( voltage_CHA, E ):
R = np.zeros(len(voltage_CHA), len(E))
fig, ax = plt.subplots()
ax.plot(r_eng,rsi_0p18/rrh_0p18,label="calc 0.18 deg")
ax.set_xlabel("E [keV]")
ax.set_ylabel("R_Si/R_Rh")
i = 0
for voltage in voltage_CHA:
R_SiRh = take_Rdata( voltage, E)
R[i]=R_SiRh
HDM_Encoder = caget ( HDM_Encoder_PV )
ax.plot(E,R_SiRh/R_SiRh[1:5].mean(),label="%s V, %s urad"%(voltage,HDM_Encoder) )
ax.legend()
return R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.