text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from tests.wot import WotTestCase
class WOTGlobalWarTestCase(WotTestCase):
def setUp(self):
super(WOTGlobalWarTestCase, self).setUp()
self.map_id = 'globalmap'
self.clan_id = 1000000001
self.account_id = 1000000000
self.province_id = 'US_01'
def test_clans(self):
response = self.api.globalwar.clans(map_id=self.map_id)
self.assertGreater(len(response), 0)
def test_fame_points(self):
response = self.api.globalwar.fame_points(map_id=self.map_id,
account_id=self.account_id)
self.assertIn(str(self.account_id), response)
def test_maps(self):
response = self.api.globalwar.maps()
self.assertGreater(len(response), 0)
def test_provinces(self):
response = self.api.globalwar.provinces(map_id=self.map_id)
self.assertGreater(len(response), 0)
def test_tournaments(self):
self.assertValidResponse(self.api.globalwar.tournaments,
map_id=self.map_id, province_id=self.province_id)
def test_fame_points_history(self):
# Just skipping since we need a valid access token,
# which is unavailable this time
pass
def test_alley_of_fame(self):
self.api.globalwar.alley_of_fame(map_id=self.map_id)
def test_battles(self):
self.api.globalwar.battles(map_id=self.map_id, clan_id=self.clan_id)
|
{
"content_hash": "8cb57ba4e3ea6699c29eee12c1434167",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 82,
"avg_line_length": 33.95348837209303,
"alnum_prop": 0.6267123287671232,
"repo_name": "therocode/python-wargaming",
"id": "8b3bcca8d908f0d4efa08c0661caed01649a1108",
"size": "1485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/wot/globalwar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44123"
}
],
"symlink_target": ""
}
|
from six.moves import xrange
from django.conf import settings
from django.test import TestCase
from django_sharding_library.routing_read_strategies import RoundRobinRoutingStrategy, PrimaryOnlyRoutingStrategy
class RoundRobinBucketingStrategyTestCase(TestCase):
databases = '__all__'
def test_is_cyclic(self):
sut = RoundRobinRoutingStrategy(settings.DATABASES)
expected_cycled_shards = ['app_shard_001_replica_001', 'app_shard_001_replica_002', 'app_shard_001']
expected_cycled_shards.sort()
resulting_shards = [sut.pick_read_db('app_shard_001') for i in xrange(150)]
self.assertEqual(len(set([resulting_shards[i] for i in xrange(0, 150, 3)])), 1)
self.assertEqual(len(set([resulting_shards[i] for i in xrange(1, 150, 3)])), 1)
self.assertEqual(len(set([resulting_shards[i] for i in xrange(2, 150, 3)])), 1)
resulting_cycled_shard = resulting_shards[:3]
resulting_cycled_shard.sort()
self.assertEqual(expected_cycled_shards, resulting_cycled_shard)
class MasterOnlyRoutingStrategyTestCase(TestCase):
databases = '__all__'
def test_is_always_primary(self):
sut = PrimaryOnlyRoutingStrategy(settings.DATABASES)
expected_shards = ['app_shard_001'] * 150
resulting_shards = [sut.pick_read_db('app_shard_001') for i in xrange(150)]
self.assertEqual(expected_shards, resulting_shards)
class RandomRoutingStrategyTestCase(TestCase):
databases = '__all__'
def test_no_exception_raised(self):
sut = RoundRobinRoutingStrategy(settings.DATABASES)
[sut.pick_read_db('app_shard_001') for i in xrange(150)]
|
{
"content_hash": "abbc8fb4a58e7478b11a62773006d3ac",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 113,
"avg_line_length": 36.93333333333333,
"alnum_prop": 0.6979542719614922,
"repo_name": "JBKahn/django-sharding",
"id": "ac43d551c406ce79b05ed98dcc0a1039e7dd9523",
"size": "1662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_routing_read_strategies.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "130928"
}
],
"symlink_target": ""
}
|
"""Uniform replay buffer in Python with compressed storage.
PyHashedReplayBuffer is a flavor of the base class which
compresses the observations when the observations have some partial overlap
(e.g. when using frame stacking).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pickle
import threading
from absl import logging
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.replay_buffers import py_uniform_replay_buffer
from tf_agents.specs import array_spec
from tf_agents.trajectories import trajectory
class FrameBuffer(tf.train.experimental.PythonState):
"""Saves some frames in a memory efficient way.
Thread safety: cannot add multiple frames in parallel.
"""
def __init__(self):
self._frames = {}
def add_frame(self, frame):
"""Add a frame to the buffer.
Args:
frame: Numpy array.
Returns:
A deduplicated frame.
"""
h = hash(frame.tobytes())
if h in self._frames:
_, refcount = self._frames[h]
self._frames[h] = (frame, refcount + 1)
return h
self._frames[h] = (frame, 1)
return h
def __len__(self):
return len(self._frames)
def serialize(self):
"""Callback for `PythonStateWrapper` to serialize the dictionary."""
return pickle.dumps(self._frames)
def deserialize(self, string_value):
"""Callback for `PythonStateWrapper` to deserialize the array."""
self._frames = pickle.loads(string_value)
def compress(self, observation, split_axis=-1):
# e.g. When split_axis is -1, turns an array of size 84x84x4
# into a list of arrays of size 84x84x1.
frame_list = np.split(observation, observation.shape[split_axis],
split_axis)
return np.array([self.add_frame(f) for f in frame_list])
def decompress(self, observation, split_axis=-1):
frames = [self._frames[h][0] for h in observation]
return np.concatenate(frames, axis=split_axis)
def on_delete(self, observation, split_axis=-1):
for h in observation:
frame, refcount = self._frames[h]
if refcount > 1:
self._frames[h] = (frame, refcount - 1)
else:
del self._frames[h]
def clear(self):
self._frames = {}
class PyHashedReplayBuffer(py_uniform_replay_buffer.PyUniformReplayBuffer):
"""A Python-based replay buffer with optimized underlying storage.
This replay buffer deduplicates data in the stored trajectories along the
last axis of the observation, which is useful, e.g., if you are performing
something like frame stacking. For example, if each observation is 4 stacked
84x84 grayscale images forming a shape [84, 84, 4], then the replay buffer
will separate out each of the images and depuplicate across each trajectory
in case an image is repeated.
Note: This replay buffer assumes that the items being stored are
trajectory.Trajectory instances.
"""
def __init__(self, data_spec, capacity, log_interval=None):
if not isinstance(data_spec, trajectory.Trajectory):
raise ValueError(
'data_spec must be the spec of a trajectory: {}'.format(data_spec))
super(PyHashedReplayBuffer, self).__init__(
data_spec, capacity)
self._frame_buffer = FrameBuffer()
self._lock_frame_buffer = threading.Lock()
self._log_interval = log_interval
def _encoded_data_spec(self):
observation = self._data_spec.observation
observation = array_spec.ArraySpec(
shape=(observation.shape[-1],), dtype=np.int64)
return self._data_spec._replace(observation=observation)
def _encode(self, traj):
"""Encodes a trajectory for efficient storage.
The observations in this trajectory are replaced by a compressed
version of the observations: each frame is only stored exactly once.
Args:
traj: The original trajectory.
Returns:
The same trajectory where frames in the observation have been
de-duplicated.
"""
with self._lock_frame_buffer:
observation = self._frame_buffer.compress(traj.observation)
if (self._log_interval and
self._np_state.item_count % self._log_interval == 0):
logging.info('%s', 'Effective Replay buffer frame count: {}'.format(
len(self._frame_buffer)))
return traj._replace(observation=observation)
def _decode(self, encoded_trajectory):
"""Decodes a trajectory.
The observation in the trajectory has been compressed so that no frame
is present more than once in the replay buffer. Uncompress the observations
in this trajectory.
Args:
encoded_trajectory: The compressed version of the trajectory.
Returns:
The original trajectory (uncompressed).
"""
observation = self._frame_buffer.decompress(encoded_trajectory.observation)
return encoded_trajectory._replace(observation=observation)
def _on_delete(self, encoded_trajectory):
with self._lock_frame_buffer:
self._frame_buffer.on_delete(encoded_trajectory.observation)
def _clear(self):
super(PyHashedReplayBuffer, self)._clear()
self._frame_buffer.clear()
|
{
"content_hash": "fecdc8fa171ca27b4c2e0e7fb79d4e4f",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 79,
"avg_line_length": 32.540880503144656,
"alnum_prop": 0.6990722844994202,
"repo_name": "tensorflow/agents",
"id": "cdaf603e7f7fae22ba6a9477595f552d85a6e041",
"size": "5777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tf_agents/replay_buffers/py_hashed_replay_buffer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4930266"
},
{
"name": "Shell",
"bytes": "10950"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.contrib.auth.models import AbstractUser
class User(AbstractUser,models.Model):
AbstractUser._meta.get_field('email')._unique = True
confirmation_code = models.CharField(max_length=34,null = True,blank = True)
REQUIRED_FIELDS = ('email','first_name')
USERNAME_FIELD = 'username'
def __str__(self):
return self.username
def name(self):
return '%s %s' % (self.first_name,self.last_name)
class Meta:
db_table = 'auth_user'
|
{
"content_hash": "fbd8479ca36c4d74faeb6ab219bdbbf9",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 80,
"avg_line_length": 28.31578947368421,
"alnum_prop": 0.6394052044609665,
"repo_name": "28harishkumar/django-diary",
"id": "1f06c66c7e574fe2587c93beb10d081f37c82951",
"size": "538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "user/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "504344"
},
{
"name": "HTML",
"bytes": "1345812"
},
{
"name": "JavaScript",
"bytes": "219499"
},
{
"name": "Python",
"bytes": "64753"
},
{
"name": "Ruby",
"bytes": "123"
}
],
"symlink_target": ""
}
|
"""
クロージャのサンプルです。
"""
from typing import Callable
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr
class Sample(SampleBase):
def exec(self):
#
# クロージャとは、他の関数によって動的に生成される関数
# その関数の外で作られた変数の値を覚えていて、変更したりできるもの
#
closure1 = Sample.make_closure('hello world')
closure2 = Sample.make_closure('this is message')
pr('closure1', closure1())
pr('closure2', closure2())
closure3 = Sample.make_closure_with_param('hello world')
pr('closure3', closure3('closure parameter'))
@staticmethod
def make_closure_with_param(message: str) -> Callable[[str], str]:
"""
引数を一つ受け取るクロージャを生成します。
:param message: メッセージ
:return: クロージャ
"""
def new_function(option_message: str):
return f'{message} with {option_message}'
return new_function
@staticmethod
def make_closure(message: str) -> Callable[[], str]:
"""
クロージャを生成します。
:param message: メッセージ
:return: クロージャ
"""
def new_function():
return message.upper()
return new_function
def go():
obj = Sample()
obj.exec()
|
{
"content_hash": "c9573aee217b9465bcd468ecb9f95950",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 70,
"avg_line_length": 22.107142857142858,
"alnum_prop": 0.5920840064620355,
"repo_name": "devlights/try-python",
"id": "63b68413d1a13cad44f64b4414902263c607831b",
"size": "1503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trypython/advanced/closure/closure01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1262"
},
{
"name": "Python",
"bytes": "316370"
}
],
"symlink_target": ""
}
|
"""
CorpusReader for reviews corpora (syntax based on Customer Review Corpus).
- Customer Review Corpus information -
Annotated by: Minqing Hu and Bing Liu, 2004.
Department of Computer Sicence
University of Illinois at Chicago
Contact: Bing Liu, liub@cs.uic.edu
http://www.cs.uic.edu/~liub
Distributed with permission.
The "product_reviews_1" and "product_reviews_2" datasets respectively contain
annotated customer reviews of 5 and 9 products from amazon.com.
Related papers:
- Minqing Hu and Bing Liu. "Mining and summarizing customer reviews".
Proceedings of the ACM SIGKDD International Conference on Knowledge
Discovery & Data Mining (KDD-04), 2004.
- Minqing Hu and Bing Liu. "Mining Opinion Features in Customer Reviews".
Proceedings of Nineteeth National Conference on Artificial Intelligence
(AAAI-2004), 2004.
- Xiaowen Ding, Bing Liu and Philip S. Yu. "A Holistic Lexicon-Based Appraoch to
Opinion Mining." Proceedings of First ACM International Conference on Web
Search and Data Mining (WSDM-2008), Feb 11-12, 2008, Stanford University,
Stanford, California, USA.
Symbols used in the annotated reviews:
[t] : the title of the review: Each [t] tag starts a review.
xxxx[+|-n]: xxxx is a product feature.
[+n]: Positive opinion, n is the opinion strength: 3 strongest, and 1 weakest.
Note that the strength is quite subjective.
You may want ignore it, but only considering + and -
[-n]: Negative opinion
## : start of each sentence. Each line is a sentence.
[u] : feature not appeared in the sentence.
[p] : feature not appeared in the sentence. Pronoun resolution is needed.
[s] : suggestion or recommendation.
[cc]: comparison with a competing product from a different brand.
[cs]: comparison with a competing product from the same brand.
Note: Some of the files (e.g. "ipod.txt", "Canon PowerShot SD500.txt") do not
provide separation between different reviews. This is due to the fact that
the dataset was specifically designed for aspect/feature-based sentiment
analysis, for which sentence-level annotation is sufficient. For document-
level classification and analysis, this peculiarity should be taken into
consideration.
"""
from __future__ import division
import re
from nltk.corpus.reader.api import *
from nltk.tokenize import *
TITLE = re.compile(r'^\[t\](.*)$') # [t] Title
FEATURES = re.compile(r'((?:(?:\w+\s)+)?\w+)\[((?:\+|\-)\d)\]') # find 'feature' in feature[+3]
NOTES = re.compile(r'\[(?!t)(p|u|s|cc|cs)\]') # find 'p' in camera[+2][p]
SENT = re.compile(r'##(.*)$') # find tokenized sentence
@compat.python_2_unicode_compatible
class Review(object):
"""
A Review is the main block of a ReviewsCorpusReader.
"""
def __init__(self, title=None, review_lines=None):
"""
:param title: the title of the review.
:param review_lines: the list of the ReviewLines that belong to the Review.
"""
self.title = title
if review_lines is None:
self.review_lines = []
else:
self.review_lines = review_lines
def add_line(self, review_line):
"""
Add a line (ReviewLine) to the review.
:param review_line: a ReviewLine instance that belongs to the Review.
"""
assert isinstance(review_line, ReviewLine)
self.review_lines.append(review_line)
def features(self):
"""
Return a list of features in the review. Each feature is a tuple made of
the specific item feature and the opinion strength about that feature.
:return: all features of the review as a list of tuples (feat, score).
:rtype: list(tuple)
"""
features = []
for review_line in self.review_lines:
features.extend(review_line.features)
return features
def sents(self):
"""
Return all tokenized sentences in the review.
:return: all sentences of the review as lists of tokens.
:rtype: list(list(str))
"""
return [review_line.sent for review_line in self.review_lines]
def __repr__(self):
return 'Review(title=\"{}\", review_lines={})'.format(self.title, self.review_lines)
@compat.python_2_unicode_compatible
class ReviewLine(object):
"""
A ReviewLine represents a sentence of the review, together with (optional)
annotations of its features and notes about the reviewed item.
"""
def __init__(self, sent, features=None, notes=None):
self.sent = sent
if features is None:
self.features = []
else:
self.features = features
if notes is None:
self.notes = []
else:
self.notes = notes
def __repr__(self):
return ('ReviewLine(features={}, notes={}, sent={})'.format(
self.features, self.notes, self.sent))
class ReviewsCorpusReader(CorpusReader):
"""
Reader for the Customer Review Data dataset by Hu, Liu (2004).
Note: we are not applying any sentence tokenization at the moment, just word
tokenization.
>>> from nltk.corpus import product_reviews_1
>>> camera_reviews = product_reviews_1.reviews('Canon_G3.txt')
>>> review = camera_reviews[0]
>>> review.sents()[0]
['i', 'recently', 'purchased', 'the', 'canon', 'powershot', 'g3', 'and', 'am',
'extremely', 'satisfied', 'with', 'the', 'purchase', '.']
>>> review.features()
[('canon powershot g3', '+3'), ('use', '+2'), ('picture', '+2'),
('picture quality', '+1'), ('picture quality', '+1'), ('camera', '+2'),
('use', '+2'), ('feature', '+1'), ('picture quality', '+3'), ('use', '+1'),
('option', '+1')]
We can also reach the same information directly from the stream:
>>> product_reviews_1.features('Canon_G3.txt')
[('canon powershot g3', '+3'), ('use', '+2'), ...]
We can compute stats for specific product features:
>>> from __future__ import division
>>> n_reviews = len([(feat,score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture'])
>>> tot = sum([int(score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture'])
>>> # We use float for backward compatibility with division in Python2.7
>>> mean = tot / n_reviews
>>> print(n_reviews, tot, mean)
15 24 1.6
"""
CorpusView = StreamBackedCorpusView
def __init__(self, root, fileids, word_tokenizer=WordPunctTokenizer(),
encoding='utf8'):
"""
:param root: The root directory for the corpus.
:param fileids: a list or regexp specifying the fileids in the corpus.
:param word_tokenizer: a tokenizer for breaking sentences or paragraphs
into words. Default: `WordPunctTokenizer`
:param encoding: the encoding that should be used to read the corpus.
"""
CorpusReader.__init__(self, root, fileids, encoding)
self._word_tokenizer = word_tokenizer
def features(self, fileids=None):
"""
Return a list of features. Each feature is a tuple made of the specific
item feature and the opinion strength about that feature.
:param fileids: a list or regexp specifying the ids of the files whose
features have to be returned.
:return: all features for the item(s) in the given file(s).
:rtype: list(tuple)
"""
if fileids is None:
fileids = self._fileids
elif isinstance(fileids, string_types):
fileids = [fileids]
return concat([self.CorpusView(fileid, self._read_features, encoding=enc)
for (fileid, enc) in self.abspaths(fileids, True)])
def raw(self, fileids=None):
"""
:param fileids: a list or regexp specifying the fileids of the files that
have to be returned as a raw string.
:return: the given file(s) as a single string.
:rtype: str
"""
if fileids is None:
fileids = self._fileids
elif isinstance(fileids, string_types):
fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def readme(self):
"""
Return the contents of the corpus README.txt file.
"""
return self.open("README.txt").read()
def reviews(self, fileids=None):
"""
Return all the reviews as a list of Review objects. If `fileids` is
specified, return all the reviews from each of the specified files.
:param fileids: a list or regexp specifying the ids of the files whose
reviews have to be returned.
:return: the given file(s) as a list of reviews.
"""
if fileids is None:
fileids = self._fileids
return concat([self.CorpusView(fileid, self._read_review_block, encoding=enc)
for (fileid, enc) in self.abspaths(fileids, True)])
def sents(self, fileids=None):
"""
Return all sentences in the corpus or in the specified files.
:param fileids: a list or regexp specifying the ids of the files whose
sentences have to be returned.
:return: the given file(s) as a list of sentences, each encoded as a
list of word strings.
:rtype: list(list(str))
"""
return concat([self.CorpusView(path, self._read_sent_block, encoding=enc)
for (path, enc, fileid)
in self.abspaths(fileids, True, True)])
def words(self, fileids=None):
"""
Return all words and punctuation symbols in the corpus or in the specified
files.
:param fileids: a list or regexp specifying the ids of the files whose
words have to be returned.
:return: the given file(s) as a list of words and punctuation symbols.
:rtype: list(str)
"""
return concat([self.CorpusView(path, self._read_word_block, encoding=enc)
for (path, enc, fileid)
in self.abspaths(fileids, True, True)])
def _read_features(self, stream):
features = []
for i in range(20):
line = stream.readline()
if not line:
return features
features.extend(re.findall(FEATURES, line))
return features
def _read_review_block(self, stream):
while True:
line = stream.readline()
if not line:
return [] # end of file.
title_match = re.match(TITLE, line)
if title_match:
review = Review(title=title_match.group(1).strip()) # We create a new review
break
# Scan until we find another line matching the regexp, or EOF.
while True:
oldpos = stream.tell()
line = stream.readline()
# End of file:
if not line:
return [review]
# Start of a new review: backup to just before it starts, and
# return the review we've already collected.
if re.match(TITLE, line):
stream.seek(oldpos)
return [review]
# Anything else is part of the review line.
feats = re.findall(FEATURES, line)
notes = re.findall(NOTES, line)
sent = re.findall(SENT, line)
if sent:
sent = self._word_tokenizer.tokenize(sent[0])
review_line = ReviewLine(sent=sent, features=feats, notes=notes)
review.add_line(review_line)
def _read_sent_block(self, stream):
sents = []
for review in self._read_review_block(stream):
sents.extend([sent for sent in review.sents()])
return sents
def _read_word_block(self, stream):
words = []
for i in range(20): # Read 20 lines at a time.
line = stream.readline()
sent = re.findall(SENT, line)
if sent:
words.extend(self._word_tokenizer.tokenize(sent[0]))
return words
|
{
"content_hash": "c4f5b62d40be030ed5c3643ceb3fff7d",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 125,
"avg_line_length": 38.26479750778816,
"alnum_prop": 0.6064479361719449,
"repo_name": "hollabaq86/haikuna-matata",
"id": "d8a32d4c269d6d61ba1a9243fa1e62173f6f07b0",
"size": "12504",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "env/lib/python2.7/site-packages/nltk/corpus/reader/reviews.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2628"
},
{
"name": "HTML",
"bytes": "6169"
},
{
"name": "JavaScript",
"bytes": "2841"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "19501"
}
],
"symlink_target": ""
}
|
try:
# python 2.7 defaults comes with collections module
from collections import OrderedDict
except Exception as e:
try:
# In python 2.2 to 2.6, user need to install ordereddict via pip
from ordereddict import OrderedDict
except Exception as e:
print(e, "No module found to import OrderedDict. So using normal dict itself")
OrderedDict = dict
# end of try:
# end of try:
|
{
"content_hash": "e25f85bb543f78f675a3e76ca6e29bc2",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 86,
"avg_line_length": 35.333333333333336,
"alnum_prop": 0.6792452830188679,
"repo_name": "atvKumar/open-tamil",
"id": "1535dfef2dcebc79483054a99cfa9a2d54e74c54",
"size": "424",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tamil/txt2unicode/orddic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "14505"
},
{
"name": "HTML",
"bytes": "2558"
},
{
"name": "Java",
"bytes": "9842"
},
{
"name": "JavaScript",
"bytes": "9250"
},
{
"name": "Makefile",
"bytes": "146"
},
{
"name": "Python",
"bytes": "551116"
},
{
"name": "Ruby",
"bytes": "26442"
},
{
"name": "Shell",
"bytes": "3928"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myresume', '0002_auto_20160802_2217'),
]
operations = [
migrations.RenameModel(
old_name='Skills',
new_name='Skill',
),
]
|
{
"content_hash": "789d7cb78c02f6bb07c722ce2e11b6dc",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 48,
"avg_line_length": 18.941176470588236,
"alnum_prop": 0.5838509316770186,
"repo_name": "italomandara/mysite",
"id": "c6e55b4c373aac5eae1b81a03c776c7488e49258",
"size": "393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myresume/migrations/0003_auto_20160802_2219.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "261372"
},
{
"name": "HTML",
"bytes": "75306"
},
{
"name": "JavaScript",
"bytes": "47944"
},
{
"name": "Python",
"bytes": "64240"
}
],
"symlink_target": ""
}
|
class Dimension:
def __init__(self, line, coln, width, height):
self.line = line
self.coln = coln
self.width = width
self.height = height
|
{
"content_hash": "2533ed14a8a40fa0350c10359db41be4",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 50,
"avg_line_length": 28.833333333333332,
"alnum_prop": 0.5664739884393064,
"repo_name": "pencilcheck/pttbbs-py",
"id": "511e5a49a3fa135c51075d4c42358bccc17173bb",
"size": "173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pttbbs/utility.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "65410"
}
],
"symlink_target": ""
}
|
from transfert import Resource
from transfert.resources import FileResource
def test_resource():
r = Resource('file:///foo/bar')
assert r.url.scheme == 'file'
assert r.url.path == '/foo/bar'
assert isinstance(r, FileResource)
|
{
"content_hash": "9ec2ee525a4535363fe0a5bc92bf5ced",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 44,
"avg_line_length": 27.11111111111111,
"alnum_prop": 0.6967213114754098,
"repo_name": "rbernand/transfert",
"id": "ede62f2b9ad5a98f80f8737f82649d599ee47027",
"size": "244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/test_resource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46795"
},
{
"name": "Shell",
"bytes": "1384"
}
],
"symlink_target": ""
}
|
import os
import time
import socket
import shutil
import subprocess
from distutils.spawn import find_executable
import testtools
import click.testing as clicktest
from serv import utils
import serv.serv as serv
from serv import init
def _invoke_click(func, args=None, opts=None):
args = args or []
opts = opts or {}
opts_and_args = []
opts_and_args.extend(args)
for opt, value in opts.items():
if value:
opts_and_args.append(opt + value)
else:
opts_and_args.append(opt)
return clicktest.CliRunner().invoke(getattr(serv, func), opts_and_args)
class TestGenerate(testtools.TestCase):
def setUp(self):
super(TestGenerate, self).setUp()
self.service = 'testservice'
self.nssm = self.service + '.bat'
self.systemd = self.service + '.service'
self.upstart = self.service + '.conf'
self.sysv = self.service
def tearDown(self):
super(TestGenerate, self).tearDown()
# TODO: ignore_errors?
try:
shutil.rmtree(os.path.dirname(self.init_script))
except:
pass
def _get_file_for_system(self, system):
return os.path.join(
utils.get_tmp_dir(system, self.service), getattr(self, system))
def _test_generate(self, sys):
if sys == 'nssm':
self.cmd = find_executable('python') or 'c:\\python27\\python'
else:
self.cmd = find_executable('python2') or '/usr/bin/python2'
self.args = '-m SimpleHTTPServer'
opts = {
'-n': self.service,
'-a': self.args,
'-v': None,
'--overwrite': None,
'--init-system=': sys
}
additional_opts = {
'--nice=': '5',
'--limit-coredump=': '10',
'--limit-physical-memory=': '20',
'--var=': 'KEY1=VALUE1'
}
opts.update(additional_opts)
self.init_script = self._get_file_for_system(sys)
_invoke_click('generate', [self.cmd], opts)
self.assertTrue(self.init_script)
with open(self.init_script) as generated_file:
self.content = generated_file.read()
def test_systemd(self):
self._test_generate('systemd')
self.assertIn(self.cmd + ' ' + self.args, self.content)
self.assertIn('LimitNICE=5', self.content)
self.assertIn('LimitCORE=10', self.content)
self.assertIn('LimitRSS=20', self.content)
env_vars_file = os.path.join(
utils.get_tmp_dir('systemd', self.service), self.service)
with open(env_vars_file) as vars_file:
content = vars_file.read()
self.assertIn('KEY1=VALUE1', content)
def test_upstart(self):
self._test_generate('upstart')
self.assertIn(self.cmd + ' ' + self.args, self.content)
self.assertIn('nice 5', self.content)
self.assertIn('limit core 10 10', self.content)
self.assertIn('limit rss 20 20', self.content)
self.assertIn('env KEY1=VALUE1', self.content)
def test_sysv(self):
self._test_generate('sysv')
self.assertIn('program={0}'.format(self.cmd), self.content)
self.assertIn('args="{0}"'.format(self.args), self.content)
self.assertIn('nice -n "$nice"', self.content)
self.assertIn('ulimit -d 10 -m 20', self.content)
env_vars_file = os.path.join(
utils.get_tmp_dir('sysv', self.service),
self.service + '.defaults')
with open(env_vars_file) as vars_file:
content = vars_file.read()
self.assertIn('nice="5"', content)
def test_nssm(self):
self._test_generate('nssm')
self.assertIn(
'"{0}" "{1}" "{2}"'.format(self.service, self.cmd, self.args),
self.content)
self.assertIn('KEY1=VALUE1 ^', self.content)
def test_generate_no_overwrite(self):
sys = 'systemd'
cmd = find_executable('python2') or '/usr/bin/python2'
opts = {
'-n': self.service,
'--init-system=': sys
}
try:
_invoke_click('generate', [cmd], opts)
r = _invoke_click('generate', [cmd], opts)
self.assertEqual(r.exit_code, 1)
f = self._get_file_for_system(sys)
self.assertIn('File already exists: {0}'.format(f), r.output)
finally:
shutil.rmtree(os.path.dirname(f))
def test_bad_string_limit_value(self):
sys = 'systemd'
cmd = '/usr/bin/python2'
opts = {
'-n': self.service,
'-v': None,
'--overwrite': None,
'--init-system=': sys,
'--limit-coredump=': 'asd'
}
r = _invoke_click('generate', [cmd], opts)
self.assertIn('All limits must be integers', r.output)
def test_bad_negative_int_limit_value(self):
sys = 'systemd'
cmd = find_executable('python2') or '/usr/bin/python2'
opts = {
'-n': self.service,
'-v': None,
'--overwrite': None,
'--init-system=': sys,
'--limit-stack-size=': '-10'
}
r = _invoke_click('generate', [cmd], opts)
self.assertIn('All limits must be integers', r.output)
class TestDeploy(testtools.TestCase):
@classmethod
def setUpClass(cls):
cls.service_name = 'testservice'
cls.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def setUp(self):
super(TestDeploy, self).setUp()
def tearDown(self):
super(TestDeploy, self).tearDown()
def _verify_port_open(self):
# for some reason, socket does a bad job identifying opened
# and closed ports here. weird.
time.sleep(1)
if utils.IS_WIN:
self.assertEqual(self.sock.connect_ex(('127.0.0.1', 8000)), 0)
else:
subprocess.check_call(
'ss -lnpt | grep 8000', shell=True, stdout=subprocess.PIPE)
def _verify_port_closed(self):
time.sleep(1)
if utils.IS_WIN:
self.assertEqual(self.sock.connect_ex(('127.0.0.1', 8000)), 10056)
else:
try:
subprocess.check_call(
'ss -lnpt | grep 8000', shell=True, stdout=subprocess.PIPE)
except subprocess.CalledProcessError as ex:
self.assertIn('returned non-zero exit status 1', str(ex))
def _test_deploy_remove(self, system):
if system == 'nssm':
args = find_executable('python') or 'c:\\python27\\python'
else:
args = find_executable('python2') or '/usr/bin/python2'
init_system = {'--init-system=': system}
opts = {
'-n': self.service_name,
'-a': '-m SimpleHTTPServer',
'-d': None,
'-s': None,
'-v': None,
'--overwrite': None,
}
opts.update(init_system)
_invoke_click('generate', [args], opts)
self._verify_port_open()
if not utils.IS_WIN:
_invoke_click('stop', [self.service_name], init_system)
self._verify_port_closed()
_invoke_click('start', [self.service_name], init_system)
self._verify_port_open()
_invoke_click('restart', [self.service_name], init_system)
self._verify_port_open()
_invoke_click('remove', [self.service_name], init_system)
self._verify_port_closed()
def test_systemd(self):
if utils.IS_WIN:
self.skipTest('Irrelevant on Windows.')
if not init.systemd.is_system_exists():
self.skipTest('Systemd not found on this system.')
self._test_deploy_remove('systemd')
def test_upstart(self):
if utils.IS_WIN:
self.skipTest('Irrelevant on Windows.')
if not init.upstart.is_system_exists():
self.skipTest('Upstart not found on this system.')
self._test_deploy_remove('upstart')
def test_sysv(self):
if utils.IS_WIN:
self.skipTest('Irrelevant on Windows.')
if not init.sysv.is_system_exists():
self.skipTest('SysVinit not found on this system.')
self._test_deploy_remove('sysv')
def test_nssm(self):
if utils.IS_LINUX:
self.skipTest('Irrelevant on Linux.')
self._test_deploy_remove('nssm')
|
{
"content_hash": "7241e7b694c90a632021760db4fa711f",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 79,
"avg_line_length": 33.728,
"alnum_prop": 0.5583491461100569,
"repo_name": "nir0s/logrotated",
"id": "70c60be5d3dc70d957eb884a51fdc7821fe14759",
"size": "8432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "logrotated/tests/test_logrotated.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "598"
},
{
"name": "Python",
"bytes": "64782"
}
],
"symlink_target": ""
}
|
"""
The GA4GH data model. Defines all the methods required to translate
data in existing formats into GA4GH protocol types.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import atexit
import base64
import collections
import glob
import json
import os
import shutil
import tempfile
import ga4gh.exceptions as exceptions
def _cleanupHtslibsMess(indexDir):
"""
Cleanup the mess that htslib has left behind with the index files.
This is a temporary measure until we get a good interface for
dealing with indexes for remote files.
"""
if os.path.exists(indexDir):
shutil.rmtree(indexDir)
class PysamFileHandleCache(object):
"""
Cache for opened file handles. We use a deque which has the
advantage to have push/pop operations in O(1) We always add
elements on the left of the deque and pop elements from the right.
When a file is accessed via getFileHandle, its priority gets
updated, it is put at the "top" of the deque.
"""
def __init__(self):
self._cache = collections.deque()
self._memoTable = dict()
# Initialize the value even if it will be set up by the config
self._maxCacheSize = 50
def setMaxCacheSize(self, size):
"""
Sets the maximum size of the cache
"""
if size <= 0:
raise ValueError(
"The size of the cache must be a strictly positive value")
self._maxCacheSize = size
def _add(self, dataFile, handle):
"""
Add a file handle to the left of the deque
"""
self._cache.appendleft((dataFile, handle))
def _update(self, dataFile, handle):
"""
Update the priority of the file handle. The element is first
removed and then added to the left of the deque.
"""
self._cache.remove((dataFile, handle))
self._add(dataFile, handle)
def _removeLru(self):
"""
Remove the least recently used file handle from the cache.
The pop method removes an element from the right of the deque.
Returns the name of the file that has been removed.
"""
(dataFile, handle) = self._cache.pop()
handle.close()
return dataFile
def getCachedFiles(self):
"""
Returns all file names stored in the cache.
"""
return self._memoTable.keys()
def getFileHandle(self, dataFile, openMethod):
"""
Returns handle associated to the filename. If the file is
already opened, update its priority in the cache and return
its handle. Otherwise, open the file using openMethod, store
it in the cache and return the corresponding handle.
"""
if dataFile in self._memoTable:
handle = self._memoTable[dataFile]
self._update(dataFile, handle)
return handle
else:
try:
handle = openMethod(dataFile)
except ValueError:
raise exceptions.FileOpenFailedException(dataFile)
self._memoTable[dataFile] = handle
self._add(dataFile, handle)
if len(self._memoTable) > self._maxCacheSize:
dataFile = self._removeLru()
del self._memoTable[dataFile]
return handle
# LRU cache of open file handles
fileHandleCache = PysamFileHandleCache()
class CompoundId(object):
"""
Base class for an id composed of several different parts, separated
by a separator. Each compound ID consists of a set of fields, each
of which corresponds to a local ID in the data hierarchy. For example,
we might have fields like ["dataset", "variantSet"] for a variantSet.
These are available as cid.dataset, and cid.variantSet. The actual IDs
of the containing objects can be obtained using the corresponding
like cid.datasetId and cid.variantSetId.
"""
separator = ':'
fields = []
"""
The fields that the compound ID is composed of. These are parsed and
made available as attributes on the object.
"""
containerIds = []
"""
The fields of the ID form a breadcrumb trail through the data
hierarchy, and successive prefixes provide the IDs for objects
further up the tree. This list is a set of tuples giving the
name and length of a given prefix forming an identifier.
"""
def __init__(self, parentCompoundId, *localIds):
"""
Allocates a new CompoundId for the specified parentCompoundId and
local identifiers. This compoundId inherits all of the fields and
values from the parent compound ID, and must have localIds
corresponding to its fields. If no parent id is present,
parentCompoundId should be set to None.
"""
index = 0
if parentCompoundId is not None:
for field in parentCompoundId.fields:
setattr(self, field, getattr(parentCompoundId, field))
index += 1
for field, localId in zip(self.fields[index:], localIds):
setattr(self, field, str(localId))
if len(localIds) != len(self.fields) - index:
raise ValueError(
"Incorrect number of fields provided to instantiate ID")
for idFieldName, prefix in self.containerIds:
values = [getattr(self, f) for f in self.fields[:prefix + 1]]
containerId = self.separator.join(values)
obfuscated = self.obfuscate(containerId)
setattr(self, idFieldName, obfuscated)
def __str__(self):
values = [getattr(self, f) for f in self.fields]
compoundIdStr = self.separator.join(values)
return self.obfuscate(compoundIdStr)
@classmethod
def parse(cls, compoundIdStr):
"""
Parses the specified compoundId string and returns an instance
of this CompoundId class.
:raises: An ObjectWithIdNotFoundException if parsing fails. This is
because this method is a client-facing method, and if a malformed
identifier (under our internal rules) is provided, the response should
be that the identifier does not exist.
"""
if not isinstance(compoundIdStr, basestring):
raise exceptions.BadIdentifierException(compoundIdStr)
try:
deobfuscated = cls.deobfuscate(compoundIdStr)
except TypeError:
# When a string that cannot be converted to base64 is passed
# as an argument, b64decode raises a TypeError. We must treat
# this as an ID not found error.
raise exceptions.ObjectWithIdNotFoundException(compoundIdStr)
try:
splits = deobfuscated.split(cls.separator)
except UnicodeDecodeError:
# Sometimes base64 decoding succeeds but we're left with
# unicode gibberish. This is also and IdNotFound.
raise exceptions.ObjectWithIdNotFoundException(compoundIdStr)
if len(splits) != len(cls.fields):
raise exceptions.ObjectWithIdNotFoundException(compoundIdStr)
return cls(None, *splits)
@classmethod
def obfuscate(cls, idStr):
"""
Mildly obfuscates the specified ID string in an easily reversible
fashion. This is not intended for security purposes, but rather to
dissuade users from depending on our internal ID structures.
"""
return base64.b64encode(idStr)
@classmethod
def deobfuscate(cls, idStr):
"""
Reverses the obfuscation done by the :meth:`obfuscate` method.
"""
return base64.b64decode(idStr)
class ReferenceSetCompoundId(CompoundId):
"""
The compound ID for reference sets.
"""
fields = ['referenceSet']
containerIds = [('referenceSetId', 0)]
class ReferenceCompoundId(ReferenceSetCompoundId):
"""
The compound id for a reference
"""
fields = ReferenceSetCompoundId.fields + ['reference']
class DatasetCompoundId(CompoundId):
"""
The compound id for a data set
"""
fields = ['dataset']
containerIds = [('datasetId', 0)]
class VariantSetCompoundId(DatasetCompoundId):
"""
The compound id for a variant set
"""
fields = DatasetCompoundId.fields + ['variantSet']
containerIds = DatasetCompoundId.containerIds + [('variantSetId', 1)]
class VariantCompoundId(VariantSetCompoundId):
"""
The compound id for a variant
"""
fields = VariantSetCompoundId.fields + ['referenceName', 'start', 'md5']
class CallSetCompoundId(VariantSetCompoundId):
"""
The compound id for a callset
"""
fields = VariantSetCompoundId.fields + ['name']
class ReadGroupSetCompoundId(DatasetCompoundId):
"""
The compound id for a read group set
"""
fields = DatasetCompoundId.fields + ['readGroupSet']
containerIds = DatasetCompoundId.containerIds + [('readGroupSetId', 1)]
class ReadGroupCompoundId(ReadGroupSetCompoundId):
"""
The compound id for a read group
"""
fields = ReadGroupSetCompoundId.fields + ['readGroup']
containerIds = ReadGroupSetCompoundId.containerIds + [('readGroupId', 2)]
class ReadAlignmentCompoundId(ReadGroupCompoundId):
"""
The compound id for a read alignment
"""
fields = ReadGroupCompoundId.fields + ['readAlignment']
class DatamodelObject(object):
"""
Superclass of all datamodel types. A datamodel object is a concrete
representation of some data, either a single observation (such as a
read) or an aggregated set of related observations (such as a dataset).
Every datamodel object has an ID and a localId. The ID is an identifier
which uniquely idenfifies the object within a server instance. The
localId is a name that identifies the object with a given its
parent container.
"""
compoundIdClass = None
""" The class for compoundIds. Must be set in concrete subclasses. """
def __init__(self, parentContainer, localId):
self._parentContainer = parentContainer
self._localId = localId
parentId = None
if parentContainer is not None:
parentId = parentContainer.getCompoundId()
self._compoundId = self.compoundIdClass(parentId, localId)
def getId(self):
"""
Returns the string identifying this DatamodelObject within the
server.
"""
return str(self._compoundId)
def getCompoundId(self):
"""
Returns the CompoundId instance that identifies this object
within the server.
"""
return self._compoundId
def getLocalId(self):
"""
Returns the localId of this DatamodelObject. The localId of a
DatamodelObject is a name that identifies it within its parent
container.
"""
return self._localId
def getParentContainer(self):
"""
Returns the parent container for this DatamodelObject. This the
object that is one-level above this object in the data hierarchy.
For example, for a Variant this is the VariantSet that it belongs
to.
"""
return self._parentContainer
class PysamDatamodelMixin(object):
"""
A mixin class to simplify working with DatamodelObjects based on
directories of files interpreted using pysam. This mixin is designed
to work within the DatamodelObject hierarchy.
"""
samMin = 0
samMaxStart = 2**30 - 1
samMaxEnd = 2**30
vcfMin = -2**31
vcfMax = 2**31 - 1
fastaMin = 0
fastaMax = 2**30 - 1
rNameMin = 0
rNameMax = 85
maxStringLength = 2**10 # arbitrary
@classmethod
def sanitizeVariantFileFetch(cls, contig=None, start=None, stop=None):
if contig is not None:
contig = cls.sanitizeString(contig, 'contig')
if start is not None:
start = cls.sanitizeInt(start, cls.vcfMin, cls.vcfMax, 'start')
if stop is not None:
stop = cls.sanitizeInt(stop, cls.vcfMin, cls.vcfMax, 'stop')
if start is not None and stop is not None:
cls.assertValidRange(start, stop, 'start', 'stop')
return contig, start, stop
@classmethod
def sanitizeAlignmentFileFetch(
cls, referenceName=None, start=None, end=None):
if referenceName is not None:
referenceName = cls.sanitizeString(referenceName, 'referenceName')
if start is not None:
start = cls.sanitizeInt(
start, cls.samMin, cls.samMaxStart, 'start')
if end is not None:
end = cls.sanitizeInt(end, cls.samMin, cls.samMaxEnd, 'end')
if start is not None and end is not None:
cls.assertValidRange(start, end, 'start', 'end')
return referenceName, start, end
@classmethod
def sanitizeGetRName(cls, referenceId):
cls.assertInt(referenceId, 'referenceId')
cls.assertInRange(
referenceId, cls.rNameMin, cls.rNameMax, 'referenceId')
@classmethod
def assertValidRange(cls, start, end, startName, endName):
if start > end:
message = "invalid coordinates: {} ({}) " \
"greater than {} ({})".format(startName, start, endName, end)
raise exceptions.DatamodelValidationException(message)
@classmethod
def assertInRange(cls, attr, minVal, maxVal, attrName):
message = "invalid {} '{}' outside of range [{}, {}]"
if attr < minVal:
raise exceptions.DatamodelValidationException(message.format(
attrName, attr, minVal, maxVal))
if attr > maxVal:
raise exceptions.DatamodelValidationException(message.format(
attrName, attr, minVal, maxVal))
@classmethod
def assertInt(cls, attr, attrName):
if not isinstance(attr, int):
message = "invalid {} '{}' not an int".format(attrName, attr)
raise exceptions.DatamodelValidationException(message)
@classmethod
def sanitizeInt(cls, attr, minVal, maxVal, attrName):
cls.assertInt(attr, attrName)
if attr < minVal:
attr = minVal
if attr > maxVal:
attr = maxVal
return attr
@classmethod
def sanitizeString(cls, attr, attrName):
if not isinstance(attr, basestring):
message = "invalid {} '{}' not a string".format(
attrName, attr)
raise exceptions.DatamodelValidationException(message)
if isinstance(attr, unicode):
attr = attr.encode('utf8')
if len(attr) > cls.maxStringLength:
attr = attr[:cls.maxStringLength]
return attr
def _setAccessTimes(self, directoryPath):
"""
Sets the creationTime and accessTime for this file system based
DatamodelObject. This is derived from the ctime of the specified
directoryPath.
"""
ctimeInMillis = int(os.path.getctime(directoryPath) * 1000)
self._creationTime = ctimeInMillis
self._updatedTime = ctimeInMillis
def _scanDataFiles(self, dataDir, patterns):
"""
Scans the specified directory for files with the specified globbing
pattern and calls self._addDataFile for each. Raises an
EmptyDirException if no data files are found.
"""
numDataFiles = 0
for pattern in patterns:
scanPath = os.path.join(dataDir, pattern)
for filename in glob.glob(scanPath):
self._addDataFile(filename)
numDataFiles += 1
# This is a temporary workaround to allow us to use htslib's
# facility for working with remote files. The urls.json is
# definitely not a good idea and will be replaced later.
# We make a temporary file for each process so that it
# downloads its own copy and we are sure it's not overwriting
# the copy of another process. We then register a cleanup
# handler to get rid of these files on exit.
urlSource = os.path.join(dataDir, "urls.json")
if os.path.exists(urlSource):
with open(urlSource) as jsonFile:
urls = json.load(jsonFile)["urls"]
indexDir = tempfile.mkdtemp(prefix="htslib_mess.")
cwd = os.getcwd()
os.chdir(indexDir)
for url in urls:
self._addDataFile(url)
numDataFiles += 1
os.chdir(cwd)
atexit.register(_cleanupHtslibsMess, indexDir)
if numDataFiles == 0:
raise exceptions.EmptyDirException(dataDir, patterns)
def getFileHandle(self, dataFile):
return fileHandleCache.getFileHandle(dataFile, self.openFile)
|
{
"content_hash": "45c49e8a3b92b7638a714652a339c55b",
"timestamp": "",
"source": "github",
"line_count": 478,
"max_line_length": 78,
"avg_line_length": 35.22384937238494,
"alnum_prop": 0.6401971847716339,
"repo_name": "pansapiens/server",
"id": "cf48744d40d0a07571eaba63c952f1d00e6c4d2e",
"size": "16837",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "ga4gh/datamodel/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2991"
},
{
"name": "Python",
"bytes": "742528"
},
{
"name": "Shell",
"bytes": "1085"
}
],
"symlink_target": ""
}
|
from test_framework.test_framework import BitsendTestFramework
from test_framework.util import *
from test_framework.mininode import sha256, ripemd160, CTransaction, CTxIn, COutPoint, CTxOut
from test_framework.address import script_to_p2sh, key_to_p2pkh
from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG, OP_TRUE
from io import BytesIO
from test_framework.mininode import ToHex, FromHex, COIN
NODE_0 = 0
NODE_1 = 1
NODE_2 = 2
WIT_V0 = 0
WIT_V1 = 1
def witness_script(version, pubkey):
if (version == 0):
pubkeyhash = bytes_to_hex_str(ripemd160(sha256(hex_str_to_bytes(pubkey))))
pkscript = "0014" + pubkeyhash
elif (version == 1):
# 1-of-1 multisig
scripthash = bytes_to_hex_str(sha256(hex_str_to_bytes("5121" + pubkey + "51ae")))
pkscript = "0020" + scripthash
else:
assert("Wrong version" == "0 or 1")
return pkscript
def addlength(script):
scriptlen = format(len(script)//2, 'x')
assert(len(scriptlen) == 2)
return scriptlen + script
def create_witnessprogram(version, node, utxo, pubkey, encode_p2sh, amount):
pkscript = witness_script(version, pubkey)
if (encode_p2sh):
p2sh_hash = bytes_to_hex_str(ripemd160(sha256(hex_str_to_bytes(pkscript))))
pkscript = "a914"+p2sh_hash+"87"
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]} )
DUMMY_P2SH = "2MySexEGVzZpRgNQ1JdjdP5bRETznm3roQ2" # P2SH of "OP_1 OP_DROP"
outputs[DUMMY_P2SH] = amount
tx_to_witness = node.createrawtransaction(inputs,outputs)
#replace dummy output with our own
tx_to_witness = tx_to_witness[0:110] + addlength(pkscript) + tx_to_witness[-8:]
return tx_to_witness
def send_to_witness(version, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""):
tx_to_witness = create_witnessprogram(version, node, utxo, pubkey, encode_p2sh, amount)
if (sign):
signed = node.signrawtransaction(tx_to_witness)
assert("errors" not in signed or len(["errors"]) == 0)
return node.sendrawtransaction(signed["hex"])
else:
if (insert_redeem_script):
tx_to_witness = tx_to_witness[0:82] + addlength(insert_redeem_script) + tx_to_witness[84:]
return node.sendrawtransaction(tx_to_witness)
def getutxo(txid):
utxo = {}
utxo["vout"] = 0
utxo["txid"] = txid
return utxo
def find_unspent(node, min_value):
for utxo in node.listunspent():
if utxo['amount'] >= min_value:
return utxo
class SegWitTest(BitsendTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-logtimemicros", "-debug", "-walletprematurewitness", "-rpcserialversion=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-logtimemicros", "-debug", "-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-rpcserialversion=1"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-logtimemicros", "-debug", "-blockversion=536870915", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness"]))
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 1)
connect_nodes(self.nodes[0], 2)
self.is_network_split = False
self.sync_all()
def success_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 2)
sync_blocks(self.nodes)
def skip_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 1)
sync_blocks(self.nodes)
def fail_accept(self, node, txid, sign, redeem_script=""):
try:
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
except JSONRPCException as exp:
assert(exp.error["code"] == -26)
else:
raise AssertionError("Tx should not have been accepted")
def fail_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
try:
node.generate(1)
except JSONRPCException as exp:
assert(exp.error["code"] == -1)
else:
raise AssertionError("Created valid block when TestBlockValidity should have failed")
sync_blocks(self.nodes)
def run_test(self):
self.nodes[0].generate(161) #block 161
print("Verify sigops are counted in GBT with pre-BIP141 rules before the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({})
assert(tmpl['sizelimit'] == 1000000)
assert('weightlimit' not in tmpl)
assert(tmpl['sigoplimit'] == 20000)
assert(tmpl['transactions'][0]['hash'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 2)
tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
assert(tmpl['sizelimit'] == 1000000)
assert('weightlimit' not in tmpl)
assert(tmpl['sigoplimit'] == 20000)
assert(tmpl['transactions'][0]['hash'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 2)
self.nodes[0].generate(1) #block 162
balance_presetup = self.nodes[0].getbalance()
self.pubkey = []
p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh
wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness
for i in range(3):
newaddress = self.nodes[i].getnewaddress()
self.pubkey.append(self.nodes[i].validateaddress(newaddress)["pubkey"])
multiaddress = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]])
self.nodes[i].addwitnessaddress(newaddress)
self.nodes[i].addwitnessaddress(multiaddress)
p2sh_ids.append([])
wit_ids.append([])
for v in range(2):
p2sh_ids[i].append([])
wit_ids[i].append([])
for i in range(5):
for n in range(3):
for v in range(2):
wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999")))
p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], True, Decimal("49.999")))
self.nodes[0].generate(1) #block 163
sync_blocks(self.nodes)
# Make sure all nodes recognize the transactions as theirs
assert_equal(self.nodes[0].getbalance(), balance_presetup - 60*50 + 20*Decimal("49.999") + 50)
assert_equal(self.nodes[1].getbalance(), 20*Decimal("49.999"))
assert_equal(self.nodes[2].getbalance(), 20*Decimal("49.999"))
self.nodes[0].generate(260) #block 423
sync_blocks(self.nodes)
print("Verify default node can't accept any witness format txs before fork")
# unsigned, no scriptsig
self.fail_accept(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], False)
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], False)
# unsigned with redeem script
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], False, addlength(witness_script(0, self.pubkey[0])))
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], False, addlength(witness_script(1, self.pubkey[0])))
# signed
self.fail_accept(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True)
self.fail_accept(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True)
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True)
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True)
print("Verify witness txs are skipped for mining before the fork")
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) #block 424
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) #block 425
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][0], True) #block 426
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][0], True) #block 427
# TODO: An old node would see these txs without witnesses and be able to mine them
print("Verify unsigned bare witness txs in versionbits-setting blocks are valid before the fork")
self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][1], False) #block 428
self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][1], False) #block 429
print("Verify unsigned p2sh witness txs without a redeem script are invalid")
self.fail_accept(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][1], False)
self.fail_accept(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][1], False)
print("Verify unsigned p2sh witness txs with a redeem script in versionbits-settings blocks are valid before the fork")
self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][1], False, addlength(witness_script(0, self.pubkey[2]))) #block 430
self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][1], False, addlength(witness_script(1, self.pubkey[2]))) #block 431
print("Verify previous witness txs skipped for mining can now be mined")
assert_equal(len(self.nodes[2].getrawmempool()), 4)
block = self.nodes[2].generate(1) #block 432 (first block with new rules; 432 = 144 * 3)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[2].getrawmempool()), 0)
segwit_tx_list = self.nodes[2].getblock(block[0])["tx"]
assert_equal(len(segwit_tx_list), 5)
print("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag")
assert(self.nodes[2].getblock(block[0], False) != self.nodes[0].getblock(block[0], False))
assert(self.nodes[1].getblock(block[0], False) == self.nodes[2].getblock(block[0], False))
for i in range(len(segwit_tx_list)):
tx = FromHex(CTransaction(), self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[2].getrawtransaction(segwit_tx_list[i]) != self.nodes[0].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i], 0) == self.nodes[2].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) != self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i]) == self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) == bytes_to_hex_str(tx.serialize_without_witness()))
print("Verify witness txs without witness data are invalid after the fork")
self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][2], False)
self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][2], False)
self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][2], False, addlength(witness_script(0, self.pubkey[2])))
self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][2], False, addlength(witness_script(1, self.pubkey[2])))
print("Verify default node can now use witness txs")
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True) #block 432
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True) #block 433
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) #block 434
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) #block 435
print("Verify sigops are counted in GBT with BIP141 rules after the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
assert(tmpl['sizelimit'] >= 3999577) # actual maximum size is lower due to minimum mandatory non-witness data
assert(tmpl['weightlimit'] == 4000000)
assert(tmpl['sigoplimit'] == 80000)
assert(tmpl['transactions'][0]['txid'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 8)
self.nodes[0].generate(1) # Mine a block to clear the gbt cache
print("Non-segwit miners are able to use GBT response after activation.")
# Create a 3-tx chain: tx1 (non-segwit input, paying to a segwit output) ->
# tx2 (segwit input, paying to a non-segwit output) ->
# tx3 (non-segwit input, paying to a non-segwit output).
# tx1 is allowed to appear in the block, but no others.
txid1 = send_to_witness(1, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[0], False, Decimal("49.996"))
hex_tx = self.nodes[0].gettransaction(txid)['hex']
tx = FromHex(CTransaction(), hex_tx)
assert(tx.wit.is_null()) # This should not be a segwit input
assert(txid1 in self.nodes[0].getrawmempool())
# Now create tx2, which will spend from txid1.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid1, 16), 0), b''))
tx.vout.append(CTxOut(int(49.99*COIN), CScript([OP_TRUE])))
tx2_hex = self.nodes[0].signrawtransaction(ToHex(tx))['hex']
txid2 = self.nodes[0].sendrawtransaction(tx2_hex)
tx = FromHex(CTransaction(), tx2_hex)
assert(not tx.wit.is_null())
# Now create tx3, which will spend from txid2
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid2, 16), 0), b""))
tx.vout.append(CTxOut(int(49.95*COIN), CScript([OP_TRUE]))) # Huge fee
tx.calc_sha256()
txid3 = self.nodes[0].sendrawtransaction(ToHex(tx))
assert(tx.wit.is_null())
assert(txid3 in self.nodes[0].getrawmempool())
# Now try calling getblocktemplate() without segwit support.
template = self.nodes[0].getblocktemplate()
# Check that tx1 is the only transaction of the 3 in the template.
template_txids = [ t['txid'] for t in template['transactions'] ]
assert(txid2 not in template_txids and txid3 not in template_txids)
assert(txid1 in template_txids)
# Check that running with segwit support results in all 3 being included.
template = self.nodes[0].getblocktemplate({"rules": ["segwit"]})
template_txids = [ t['txid'] for t in template['transactions'] ]
assert(txid1 in template_txids)
assert(txid2 in template_txids)
assert(txid3 in template_txids)
# Mine a block to clear the gbt cache again.
self.nodes[0].generate(1)
print("Verify behaviour of importaddress, addwitnessaddress and listunspent")
# Some public keys to be used later
pubkeys = [
"0363D44AABD0F1699138239DF2F042C3282C0671CC7A76826A55C8203D90E39242", # cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb
"02D3E626B3E616FC8662B489C123349FECBFC611E778E5BE739B257EAE4721E5BF", # cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97
"04A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538A62F5BD8EC85C2477F39650BD391EA6250207065B2A81DA8B009FC891E898F0E", # 91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV
"02A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538", # cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd
"036722F784214129FEB9E8129D626324F3F6716555B603FFE8300BBCB882151228", # cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66
"0266A8396EE936BF6D99D17920DB21C6C7B1AB14C639D5CD72B300297E416FD2EC", # cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K
"0450A38BD7F0AC212FEBA77354A9B036A32E0F7C81FC4E0C5ADCA7C549C4505D2522458C2D9AE3CEFD684E039194B72C8A10F9CB9D4764AB26FCC2718D421D3B84", # 92h2XPssjBpsJN5CqSP7v9a7cf2kgDunBC6PDFwJHMACM1rrVBJ
]
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("92e6XLo5jVAVwrQKPNTs93oQco8f8sDNBcpv73Dsrs397fQtFQn")
uncompressed_spendable_address = ["mvozP4UwyGD2mGZU4D2eMvMLPB9WkMmMQu"]
self.nodes[0].importprivkey("cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR")
compressed_spendable_address = ["mmWQubrDomqpgSYekvsU7HWEVjLFHAakLe"]
assert ((self.nodes[0].validateaddress(uncompressed_spendable_address[0])['iscompressed'] == False))
assert ((self.nodes[0].validateaddress(compressed_spendable_address[0])['iscompressed'] == True))
self.nodes[0].importpubkey(pubkeys[0])
compressed_solvable_address = [key_to_p2pkh(pubkeys[0])]
self.nodes[0].importpubkey(pubkeys[1])
compressed_solvable_address.append(key_to_p2pkh(pubkeys[1]))
self.nodes[0].importpubkey(pubkeys[2])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[2])]
spendable_anytime = [] # These outputs should be seen anytime after importprivkey and addmultisigaddress
spendable_after_importaddress = [] # These outputs should be seen after importaddress
solvable_after_importaddress = [] # These outputs should be seen after importaddress but not spendable
unsolvable_after_importaddress = [] # These outputs should be unsolvable after importaddress
solvable_anytime = [] # These outputs should be solvable after importpubkey
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]]))
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]]))
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]]))
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], uncompressed_solvable_address[0]]))
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]]))
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], compressed_solvable_address[1]]))
unknown_address = ["mtKKyoHabkk6e4ppT7NaM7THqPUt7AzPrT", "2NDP3jLWAFT8NDAiUa9qiE6oBt2awmMq7Dx"]
# Test multisig_without_privkey
# We have 2 public keys without private keys, use addmultisigaddress to add to wallet.
# Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address.
multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])
script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG])
solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL]))
for i in compressed_spendable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# bare and p2sh multisig with compressed keys should always be spendable
spendable_anytime.extend([bare, p2sh])
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with compressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH, and witness with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2wpkh, p2sh_p2wpkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in uncompressed_spendable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# bare and p2sh multisig with uncompressed keys should always be spendable
spendable_anytime.extend([bare, p2sh])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress
spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# witness with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in compressed_solvable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
# Multisig without private is not seen after addmultisigaddress, but seen after importaddress
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with compressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH, and witness with compressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2wpkh, p2sh_p2wpkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in uncompressed_solvable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress
solvable_after_importaddress.extend([bare, p2sh])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# witness with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
op1 = CScript([OP_1])
op0 = CScript([OP_0])
# 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V
unsolvable_address = ["mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V", "2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe", script_to_p2sh(op1), script_to_p2sh(op0)]
unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D")
unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG])
unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)])
p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL])
p2wshop1 = CScript([OP_0, sha256(op1)])
unsolvable_after_importaddress.append(unsolvablep2pkh)
unsolvable_after_importaddress.append(unsolvablep2wshp2pkh)
unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script
unsolvable_after_importaddress.append(p2wshop1)
unseen_anytime.append(op0) # OP_0 will be imported as P2SH address with no script provided
unsolvable_after_importaddress.append(p2shop0)
spendable_txid = []
solvable_txid = []
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime, 1))
self.mine_and_test_listunspent(spendable_after_importaddress + solvable_after_importaddress + unseen_anytime + unsolvable_after_importaddress, 0)
importlist = []
for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
bare = hex_str_to_bytes(v['hex'])
importlist.append(bytes_to_hex_str(bare))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(bare)])))
else:
pubkey = hex_str_to_bytes(v['pubkey'])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG])
importlist.append(bytes_to_hex_str(p2pk))
importlist.append(bytes_to_hex_str(p2pkh))
importlist.append(bytes_to_hex_str(CScript([OP_0, hash160(pubkey)])))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pk)])))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pkh)])))
importlist.append(bytes_to_hex_str(unsolvablep2pkh))
importlist.append(bytes_to_hex_str(unsolvablep2wshp2pkh))
importlist.append(bytes_to_hex_str(op1))
importlist.append(bytes_to_hex_str(p2wshop1))
for i in importlist:
try:
self.nodes[0].importaddress(i,"",False,True)
except JSONRPCException as exp:
assert_equal(exp.error["message"], "The wallet already contains the private key for this address or script")
self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only
self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# addwitnessaddress should refuse to return a witness address if an uncompressed key is used or the address is
# not in the wallet
# note that no witness address should be returned by unsolvable addresses
# the multisig_without_privkey_address will fail because its keys were not added with importpubkey
for i in uncompressed_spendable_address + uncompressed_solvable_address + unknown_address + unsolvable_address + [multisig_without_privkey_address]:
try:
self.nodes[0].addwitnessaddress(i)
except JSONRPCException as exp:
assert_equal(exp.error["message"], "Public key or redeemscript not known to wallet, or the key is uncompressed")
else:
assert(False)
for i in compressed_spendable_address + compressed_solvable_address:
witaddress = self.nodes[0].addwitnessaddress(i)
# addwitnessaddress should return the same address if it is a known P2SH-witness address
assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# Repeat some tests. This time we don't add witness scripts with importaddress
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("927pw6RW8ZekycnXqBQ2JS5nPyo1yRfGNN8oq74HeddWSpafDJH")
uncompressed_spendable_address = ["mguN2vNSCEUh6rJaXoAVwY3YZwZvEmf5xi"]
self.nodes[0].importprivkey("cMcrXaaUC48ZKpcyydfFo8PxHAjpsYLhdsp6nmtB3E2ER9UUHWnw")
compressed_spendable_address = ["n1UNmpmbVUJ9ytXYXiurmGPQ3TRrXqPWKL"]
self.nodes[0].importpubkey(pubkeys[5])
compressed_solvable_address = [key_to_p2pkh(pubkeys[5])]
self.nodes[0].importpubkey(pubkeys[6])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[6])]
spendable_after_addwitnessaddress = [] # These outputs should be seen after importaddress
solvable_after_addwitnessaddress=[] # These outputs should be seen after importaddress but not spendable
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]]))
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]]))
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]]))
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], uncompressed_solvable_address[0]]))
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]]))
premature_witaddress = []
for i in compressed_spendable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after addwitnessaddress
spendable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH are spendable after addwitnessaddress
spendable_after_addwitnessaddress.extend([p2wpkh, p2sh_p2wpkh])
premature_witaddress.append(script_to_p2sh(p2wpkh))
for i in uncompressed_spendable_address + uncompressed_solvable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in compressed_solvable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
# P2WSH multisig without private key are seen after addwitnessaddress
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
solvable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2SH_P2PK, P2SH_P2PKH with compressed keys are seen after addwitnessaddress
solvable_after_addwitnessaddress.extend([p2wpkh, p2sh_p2wpkh])
premature_witaddress.append(script_to_p2sh(p2wpkh))
self.mine_and_test_listunspent(spendable_after_addwitnessaddress + solvable_after_addwitnessaddress + unseen_anytime, 0)
# addwitnessaddress should refuse to return a witness address if an uncompressed key is used
# note that a multisig address returned by addmultisigaddress is not solvable until it is added with importaddress
# premature_witaddress are not accepted until the script is added with addwitnessaddress first
for i in uncompressed_spendable_address + uncompressed_solvable_address + premature_witaddress + [compressed_solvable_address[1]]:
try:
self.nodes[0].addwitnessaddress(i)
except JSONRPCException as exp:
assert_equal(exp.error["message"], "Public key or redeemscript not known to wallet, or the key is uncompressed")
else:
assert(False)
# after importaddress it should pass addwitnessaddress
v = self.nodes[0].validateaddress(compressed_solvable_address[1])
self.nodes[0].importaddress(v['hex'],"",False,True)
for i in compressed_spendable_address + compressed_solvable_address + premature_witaddress:
witaddress = self.nodes[0].addwitnessaddress(i)
assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
spendable_txid.append(self.mine_and_test_listunspent(spendable_after_addwitnessaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_after_addwitnessaddress, 1))
self.mine_and_test_listunspent(unseen_anytime, 0)
# Check that spendable outputs are really spendable
self.create_and_mine_tx_from_txids(spendable_txid)
# import all the private keys so solvable addresses become spendable
self.nodes[0].importprivkey("cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb")
self.nodes[0].importprivkey("cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97")
self.nodes[0].importprivkey("91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV")
self.nodes[0].importprivkey("cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd")
self.nodes[0].importprivkey("cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66")
self.nodes[0].importprivkey("cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K")
self.create_and_mine_tx_from_txids(solvable_txid)
def mine_and_test_listunspent(self, script_list, ismine):
utxo = find_unspent(self.nodes[0], 50)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int('0x'+utxo['txid'],0), utxo['vout'])))
for i in script_list:
tx.vout.append(CTxOut(10000000, i))
tx.rehash()
signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
txid = self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
watchcount = 0
spendcount = 0
for i in self.nodes[0].listunspent():
if (i['txid'] == txid):
watchcount += 1
if (i['spendable'] == True):
spendcount += 1
if (ismine == 2):
assert_equal(spendcount, len(script_list))
elif (ismine == 1):
assert_equal(watchcount, len(script_list))
assert_equal(spendcount, 0)
else:
assert_equal(watchcount, 0)
return txid
def p2sh_address_to_script(self,v):
bare = CScript(hex_str_to_bytes(v['hex']))
p2sh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2wsh = CScript([OP_0, sha256(bare)])
p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL])
return([bare, p2sh, p2wsh, p2sh_p2wsh])
def p2pkh_address_to_script(self,v):
pubkey = hex_str_to_bytes(v['pubkey'])
p2wpkh = CScript([OP_0, hash160(pubkey)])
p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL])
p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL])
p2wsh_p2pk = CScript([OP_0, sha256(p2pk)])
p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)])
p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL])
p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL])
return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]
def create_and_mine_tx_from_txids(self, txids, success = True):
tx = CTransaction()
for i in txids:
txtmp = CTransaction()
txraw = self.nodes[0].getrawtransaction(i)
f = BytesIO(hex_str_to_bytes(txraw))
txtmp.deserialize(f)
for j in range(len(txtmp.vout)):
tx.vin.append(CTxIn(COutPoint(int('0x'+i,0), j)))
tx.vout.append(CTxOut(0, CScript()))
tx.rehash()
signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
if __name__ == '__main__':
SegWitTest().main()
|
{
"content_hash": "70b3b50228a99ae06addd7224134ffe2",
"timestamp": "",
"source": "github",
"line_count": 650,
"max_line_length": 213,
"avg_line_length": 60.58307692307692,
"alnum_prop": 0.6607582721755251,
"repo_name": "madzebra/BitSend",
"id": "c43e7377ea7e6deab670ac7c8cbcdc785fcf67b0",
"size": "39629",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/segwit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "36095"
},
{
"name": "C",
"bytes": "4046688"
},
{
"name": "C++",
"bytes": "5810551"
},
{
"name": "CSS",
"bytes": "68816"
},
{
"name": "HTML",
"bytes": "50622"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "277388"
},
{
"name": "Makefile",
"bytes": "114046"
},
{
"name": "Objective-C",
"bytes": "7725"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Protocol Buffer",
"bytes": "2328"
},
{
"name": "Python",
"bytes": "1202074"
},
{
"name": "QMake",
"bytes": "13495"
},
{
"name": "Roff",
"bytes": "18118"
},
{
"name": "Shell",
"bytes": "415657"
}
],
"symlink_target": ""
}
|
from operator import itemgetter, attrgetter
import random
import sys
import os
import math
import re
# GLOBAL VARIABLES
genetic_code = {
'0000':'0',
'0001':'1',
'0010':'2',
'0011':'3',
'0100':'4',
'0101':'5',
'0110':'6',
'0111':'7',
'1000':'8',
'1001':'9',
'1010':'+',
'1011':'-',
'1100':'*',
'1101':'/'
}
solution_found = False
popN = 100 # n number of chromos per population
genesPerCh = 75
max_iterations = 1000
target = 1111.0
crossover_rate = 0.7
mutation_rate = 0.05
"""Generates random population of chromos"""
def generatePop ():
chromos, chromo = [], []
for eachChromo in range(popN):
chromo = []
for bit in range(genesPerCh * 4):
chromo.append(random.randint(0,1))
chromos.append(chromo)
return chromos
"""Takes a binary list (chromo) and returns a protein (mathematical expression in string)"""
def translate (chromo):
protein, chromo_string = '',''
need_int = True
a, b = 0, 4 # ie from point a to point b (start to stop point in string)
for bit in chromo:
chromo_string += str(bit)
for gene in range(genesPerCh):
if chromo_string[a:b] == '1111' or chromo_string[a:b] == '1110':
continue
elif chromo_string[a:b] != '1010' and chromo_string[a:b] != '1011' and chromo_string[a:b] != '1100' and chromo_string[a:b] != '1101':
if need_int == True:
protein += genetic_code[chromo_string[a:b]]
need_int = False
a += 4
b += 4
continue
else:
a += 4
b += 4
continue
else:
if need_int == False:
protein += genetic_code[chromo_string[a:b]]
need_int = True
a += 4
b += 4
continue
else:
a += 4
b += 4
continue
if len(protein) %2 == 0:
protein = protein[:-1]
return protein
"""Evaluates the mathematical expressions in number + operator blocks of two"""
def evaluate(protein):
a = 3
b = 5
output = -1
lenprotein = len(protein) # i imagine this is quicker than calling len everytime?
if lenprotein == 0:
output = 0
if lenprotein == 1:
output = int(protein)
if lenprotein >= 3:
try :
output = eval(protein[0:3])
except ZeroDivisionError:
output = 0
if lenprotein > 4:
while b != lenprotein+2:
try :
output = eval(str(output)+protein[a:b])
except ZeroDivisionError:
output = 0
a+=2
b+=2
return output
"""Calulates fitness as a fraction of the total fitness"""
def calcFitness (errors):
fitnessScores = []
totalError = sum(errors)
i = 0
# fitness scores are a fraction of the total error
for error in errors:
fitnessScores.append (float(errors[i])/float(totalError))
i += 1
return fitnessScores
def displayFit (error):
bestFitDisplay = 100
dashesN = int(error * bestFitDisplay)
dashes = ''
for j in range(bestFitDisplay-dashesN):
dashes+=' '
for i in range(dashesN):
dashes+='+'
return dashes
"""Takes a population of chromosomes and returns a list of tuples where each chromo is paired to its fitness scores and ranked accroding to its fitness"""
def rankPop (chromos):
proteins, outputs, errors = [], [], []
i = 1
# translate each chromo into mathematical expression (protein), evaluate the output of the expression,
# calculate the inverse error of the output
print '%s: %s\t=%s \t%s %s' %('n'.rjust(5), 'PROTEIN'.rjust(30), 'OUTPUT'.rjust(10), 'INVERSE ERROR'.rjust(17), 'GRAPHICAL INVERSE ERROR'.rjust(105))
for chromo in chromos:
protein = translate(chromo)
proteins.append(protein)
output = evaluate(protein)
outputs.append(output)
try:
error = 1/math.fabs(target-output)
except ZeroDivisionError:
global solution_found
solution_found = True
error = 0
print '\nSOLUTION FOUND'
print '%s: %s \t=%s %s' %(str(i).rjust(5), protein.rjust(30), str(output).rjust(10), displayFit(1.3).rjust(130))
break
else:
#error = 1/math.fabs(target-output)
errors.append(error)
print '%s: %s \t=%s \t%s %s' %(str(i).rjust(5), protein.rjust(30), str(output).rjust(10), str(error).rjust(17), displayFit(error).rjust(105))
i+=1
fitnessScores = calcFitness (errors) # calc fitness scores from the erros calculated
pairedPop = zip ( chromos, proteins, outputs, fitnessScores) # pair each chromo with its protein, ouput and fitness score
rankedPop = sorted ( pairedPop,key = itemgetter(-1), reverse = True ) # sort the paired pop by ascending fitness score
return rankedPop
""" taking a ranked population selects two of the fittest members using roulette method"""
def selectFittest (fitnessScores, rankedChromos):
while 1 == 1: # ensure that the chromosomes selected for breeding are have different indexes in the population
index1 = roulette (fitnessScores)
index2 = roulette (fitnessScores)
if index1 == index2:
continue
else:
break
ch1 = rankedChromos[index1] # select and return chromosomes for breeding
ch2 = rankedChromos[index2]
return ch1, ch2
"""Fitness scores are fractions, their sum = 1. Fitter chromosomes have a larger fraction. """
def roulette (fitnessScores):
index = 0
cumalativeFitness = 0.0
r = random.random()
for i in range(len(fitnessScores)): # for each chromosome's fitness score
cumalativeFitness += fitnessScores[i] # add each chromosome's fitness score to cumalative fitness
if cumalativeFitness > r: # in the event of cumalative fitness becoming greater than r, return index of that chromo
return i
def crossover (ch1, ch2):
# at a random chiasma
r = random.randint(0,genesPerCh*4)
return ch1[:r]+ch2[r:], ch2[:r]+ch1[r:]
def mutate (ch):
mutatedCh = []
for i in ch:
if random.random() < mutation_rate:
if i == 1:
mutatedCh.append(0)
else:
mutatedCh.append(1)
else:
mutatedCh.append(i)
#assert mutatedCh != ch
return mutatedCh
"""Using breed and mutate it generates two new chromos from the selected pair"""
def breed (ch1, ch2):
newCh1, newCh2 = [], []
if random.random() < crossover_rate: # rate dependent crossover of selected chromosomes
newCh1, newCh2 = crossover(ch1, ch2)
else:
newCh1, newCh2 = ch1, ch2
newnewCh1 = mutate (newCh1) # mutate crossovered chromos
newnewCh2 = mutate (newCh2)
return newnewCh1, newnewCh2
""" Taking a ranked population return a new population by breeding the ranked one"""
def iteratePop (rankedPop):
fitnessScores = [ item[-1] for item in rankedPop ] # extract fitness scores from ranked population
rankedChromos = [ item[0] for item in rankedPop ] # extract chromosomes from ranked population
newpop = []
newpop.extend(rankedChromos[:popN/15]) # known as elitism, conserve the best solutions to new population
while len(newpop) != popN:
ch1, ch2 = [], []
ch1, ch2 = selectFittest (fitnessScores, rankedChromos) # select two of the fittest chromos
ch1, ch2 = breed (ch1, ch2) # breed them to create two new chromosomes
newpop.append(ch1) # and append to new population
newpop.append(ch2)
return newpop
def configureSettings ():
configure = raw_input ('T - Enter Target Number \tD - Default settings: ')
match1 = re.search( 't',configure, re.IGNORECASE )
if match1:
global target
target = input('Target int: ' )
def main():
configureSettings ()
chromos = generatePop() #generate new population of random chromosomes
iterations = 0
while iterations != max_iterations and solution_found != True:
# take the pop of random chromos and rank them based on their fitness score/proximity to target output
rankedPop = rankPop(chromos)
print '\nCurrent iterations:', iterations
if solution_found != True:
# if solution is not found iterate a new population from previous ranked population
chromos = []
chromos = iteratePop(rankedPop)
iterations += 1
else:
break
if __name__ == "__main__":
main()
|
{
"content_hash": "145533966236524bcb6cdbd7e07d6a33",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 154,
"avg_line_length": 29.673992673992675,
"alnum_prop": 0.6485619059375386,
"repo_name": "ActiveState/code",
"id": "4e49c91308855d2dce2fc72bf86b80d736eb7fd8",
"size": "8101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/578128_Genetic_Algorithm_Pythsource_code__AIJunkie/recipe-578128.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(name='moviemon',
version='1.0.11',
description='Everything about your movies within the command line.',
url='https://github.com/iCHAIT/moviemon',
author='Chaitanya Gupta',
author_email='cgupta319@gmail.com',
license='MIT',
packages=['moviemon'],
entry_points={
'console_scripts': ['moviemon=moviemon:main'],
},
install_requires=[
'guessit<2',
'terminaltables',
'docopt',
'tqdm',
'colorama'
],
keywords=['movies', 'CLI', 'movies-within-CLI', 'python'],
classifiers=[
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: User Interfaces',
'Topic :: Software Development :: Version Control',
],)
|
{
"content_hash": "30bc6b7897ff9193bf870a47fd9f5ea6",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 74,
"avg_line_length": 35.74285714285714,
"alnum_prop": 0.5507593924860112,
"repo_name": "iCHAIT/moviemon",
"id": "eab65aee694cd35e8a4d445d3556c5bf163ecc69",
"size": "1251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12837"
}
],
"symlink_target": ""
}
|
import cv2
import numpy as np
from object_detector.scanner import ImageScanner
import helpers
def test_build_image_pyramid():
# Given one sample image (100,100) and the following parameters
image = helpers.get_one_sample_image()
parameters = {"scale": 0.5, "min_y": 20, "min_x": 20}
# When building image pyramid
scanner = ImageScanner(image)
pyramid = [layer for layer in scanner.get_next_layer(scale=parameters["scale"], min_x=parameters["min_y"], min_y=parameters["min_x"])]
# Then it requires the following condition
# 1) number of pyramids
n_pyramids = 1
layer_ = image
while True:
h = int(layer_.shape[0] * parameters['scale'])
w = int(layer_.shape[1] * parameters['scale'])
layer_ = cv2.resize(layer_, (w, h))
if layer_.shape[0] < parameters['min_y'] and layer_.shape[1] < parameters['min_x']:
break
n_pyramids += 1
assert len(pyramid) == n_pyramids, "ImageScanner.get_next_layer() unit test failed"
# 2) similarity of image contents
for layer in pyramid:
img_from_layer = cv2.resize(layer, (image.shape[1], image.shape[0]))
rel_error = np.mean(np.absolute(img_from_layer - image) / image)
assert rel_error < np.max(image) * 0.03, "ImageScanner.get_next_layer() unit test failed. \
Relative Error between original image and layer should be less than 3% of maximum intensity"
def test_sliding_window():
# Given one sample image (100,100) and the following parameters
image = helpers.get_one_sample_image()
parameters = {"scale": 0.5, "min_x": 20, "min_y": 20, "step_y": 10, "step_x": 10, "win_y": 30, "win_x": 30}
# When performing sliding window in multi-pyramid
scanner = ImageScanner(image)
for layer in scanner.get_next_layer(parameters['scale'], parameters['min_y'], parameters['min_x']):
test_yx_pairs = []
for y in range(0, layer.shape[0] - parameters['win_y'], parameters['step_y']):
for x in range(0, layer.shape[1] - parameters['win_x'], parameters['step_x']):
test_yx_pairs.append((y,x))
for i, (y, x, patch) in enumerate(scanner.get_next_patch(parameters['step_y'], parameters['step_x'], parameters['win_y'], parameters['win_x'])):
assert patch.all() == layer[y:y+parameters['step_y'], x:x+parameters['step_x']].all()
assert test_yx_pairs[i][0] == y and test_yx_pairs[i][1] == x
#todo : bounding-box in original image (scanner.bounding_box)
if __name__ == "__main__":
import nose
nose.run()
|
{
"content_hash": "fc4987a372c948378db0300de338ae4e",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 152,
"avg_line_length": 36.35064935064935,
"alnum_prop": 0.5794926759556984,
"repo_name": "penny4860/object-detector",
"id": "659b7ce6487b502b4cbb257c657a4449df38fde2",
"size": "2799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_object_detector/scanner_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59819"
}
],
"symlink_target": ""
}
|
"""The version number defined here is read automatically in setup.py."""
__version__ = "0.14.1.dev20220804"
|
{
"content_hash": "797382b335859c1a96b752cf39a392d7",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 72,
"avg_line_length": 36.333333333333336,
"alnum_prop": 0.7064220183486238,
"repo_name": "quantumlib/qsim",
"id": "6bd197070705a05fe787c4fd173298b72b3b0ab1",
"size": "109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qsimcirq/_version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4156"
},
{
"name": "C++",
"bytes": "1221785"
},
{
"name": "CMake",
"bytes": "6358"
},
{
"name": "Cuda",
"bytes": "43196"
},
{
"name": "Dockerfile",
"bytes": "2349"
},
{
"name": "Makefile",
"bytes": "7676"
},
{
"name": "Python",
"bytes": "124050"
},
{
"name": "Shell",
"bytes": "13713"
},
{
"name": "Starlark",
"bytes": "30151"
}
],
"symlink_target": ""
}
|
from argparse import ArgumentParser
import json
import gzip
from collections import defaultdict
from sys import stderr
from morph_seg.preprocessing.token import Token
def parse_args():
p = ArgumentParser()
p.add_argument('input_file', nargs='+', type=str)
return p.parse_args()
def collect_corpus_stats(input_files):
corp = defaultdict(set)
all_words = set()
for i, infile in enumerate(input_files):
stderr.write('{}/{} {}\n'.format(i+1, len(input_files), infile))
with gzip.open(infile, 'rt') as f:
for line in f:
try:
token = Token.from_line(line)
except ValueError:
continue
for typ, pred in Token.predicates.items():
if pred(token) is True:
corp[typ].add(token)
all_words.add(token)
return corp, all_words
def collect_corpus(input_files, keep_word):
corp = set()
for i, infile in enumerate(input_files):
stderr.write('{}/{} {}\n'.format(i+1, len(input_files), infile))
with gzip.open(infile, 'rt') as f:
for line in f:
try:
token = Token.from_line(line)
except ValueError:
continue
if keep_word(token):
corp.add(token)
return corp
def instrumental(word):
return word.analysis == '[/N][Ins]' and \
len(word.word) != len(word.lemma)
def main():
args = parse_args()
corp = collect_corpus(args.input_file, instrumental)
for word in corp:
print("{}\t{}".format(word.lemma, word.word))
#for k, v in corp.items():
#print(k, len(v), float(len(v)) / len(all_words))
#print(len(all_words))
#for token in corp['lemma_change'] - corp['low_vowel_lengthening'] - corp['instrumental']:
#print(token)
if __name__ == '__main__':
main()
|
{
"content_hash": "8a36ab8ee80869684a77081b34e12fa7",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 94,
"avg_line_length": 29.62121212121212,
"alnum_prop": 0.5529411764705883,
"repo_name": "juditacs/morph-segmentation-experiments",
"id": "107646fa1de588f5eecf26f4a33c33815d63c1ba",
"size": "2119",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "morph_seg/preprocessing/create_instrumental_corp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "100878"
},
{
"name": "Python",
"bytes": "21110"
}
],
"symlink_target": ""
}
|
__author__ = 'andreasveit'
__version__ = '1.3'
# Interface for evaluating with the COCO-Text dataset.
# COCO-Text is a large dataset designed for text detection and recognition.
# This is a Python API that assists in evaluating text detection and recognition results
# on COCO-Text. The format of the COCO-Text annotations is described on
# the project website http://vision.cornell.edu/se3/coco-text/. In addition to this evaluation API, please download
# the COCO-Text tool API, both the COCO images and annotations.
# This dataset is based on Microsoft COCO. Please visit http://mscoco.org/
# for more information on COCO, including for the image data, object annotatins
# and caption annotations.
# The following functions are defined:
# getDetections - Compute TP, FN and FP
# evaluateAttribute - Evaluates accuracy for classifying text attributes
# evaluateTranscription - Evaluates accuracy of transcriptions
# area, intersect, iou_score, decode, inter - small helper functions
# printDetailedResults - Prints detailed results as reported in COCO-Text paper
# COCO-Text Evaluation Toolbox. Version 1.3
# Data, Data API and paper available at: http://vision.cornell.edu/se3/coco-text/
# Code written by Andreas Veit, 2016.
# Licensed under the Simplified BSD License [see bsd.txt]
import editdistance
import copy
import re
# Compute detections
def getDetections(groundtruth, evaluation, imgIds = None, annIds = [], detection_threshold = 0.5):
"""
A box is a match iff the intersection of union score is >= 0.5.
Params
------
Input dicts have the format of annotation dictionaries
"""
#parameters
detectRes = {}
# results are lists of dicts {gt_id: xxx, eval_id: yyy}
detectRes['true_positives'] = []
detectRes['false_negatives'] = []
detectRes['false_positives'] = []
# the default is set to evaluate on the validation set
if imgIds == None:
imgIds = groundtruth.val
imgIds = imgIds if len(imgIds)>0 else inter(groundtruth.imgToAnns.keys(), evaluation.imgToAnns.keys())
for cocoid in imgIds:
gt_bboxes = groundtruth.imgToAnns[cocoid] if cocoid in groundtruth.imgToAnns else []
eval_bboxes = copy.copy(evaluation.imgToAnns[cocoid]) if cocoid in evaluation.imgToAnns else []
for gt_box_id in gt_bboxes:
gt_box = groundtruth.anns[gt_box_id]['bbox']
max_iou = 0.0
match = None
for eval_box_id in eval_bboxes:
eval_box = evaluation.anns[eval_box_id]['bbox']
iou = iou_score(gt_box,eval_box)
if iou >= detection_threshold and iou > max_iou:
max_iou = iou
match = eval_box_id
if match is not None:
detectRes['true_positives'].append({'gt_id': gt_box_id, 'eval_id': match})
eval_bboxes.remove(match)
else:
detectRes['false_negatives'].append({'gt_id': gt_box_id})
if len(eval_bboxes)>0:
detectRes['false_positives'].extend([{'eval_id': eval_box_id} for eval_box_id in eval_bboxes])
return detectRes
def evaluateAttribute(groundtruth, evaluation, resultDict, attributes):
'''
Input:
groundtruth_Dict: dict, AnnFile format
evalDict: dict, AnnFile format
resultDict: dict, output from getDetections
attributes : list of strings, attribute categories
-----
Output:
'''
assert 'utf8_string' not in attributes, 'there is a separate function for utf8_string'
res = {}
for attribute in attributes:
correct = []
incorrect = []
for detection in resultDict['true_positives']:
gt_val = groundtruth.anns[detection['gt_id']][attribute]
eval_val = evaluation.anns[detection['eval_id']][attribute]
if gt_val==eval_val:
correct.append(detection)
else:
if gt_val!='na':
incorrect.append(detection)
res[attribute] = {'attribute': attribute, 'correct':len(correct), 'incorrect':len(incorrect), 'accuracy':len(correct)*1.0/len(correct+incorrect)}
return res
def evaluateEndToEnd(groundtruth, evaluation, imgIds = None, annIds = [], detection_threshold = 0.5):
"""
A box is a match iff the intersection of union score is >= 0.5.
Params
------
Input dicts have the format of annotation dictionaries
"""
#parameters
detectRes = {}
# results are lists of dicts {gt_id: xxx, eval_id: yyy}
detectRes['true_positives'] = []
detectRes['false_negatives'] = []
detectRes['false_positives'] = []
# the default is set to evaluate on the validation set
if imgIds == None:
imgIds = groundtruth.val
imgIds = imgIds if len(imgIds)>0 else inter(groundtruth.imgToAnns.keys(), evaluation.imgToAnns.keys())
for cocoid in imgIds:
gt_bboxes = groundtruth.imgToAnns[cocoid] if cocoid in groundtruth.imgToAnns else []
eval_bboxes = copy.copy(evaluation.imgToAnns[cocoid]) if cocoid in evaluation.imgToAnns else []
for gt_box_id in gt_bboxes:
gt_box = groundtruth.anns[gt_box_id]['bbox']
if 'utf8_string' not in groundtruth.anns[gt_box_id]:
continue
gt_val = decode(groundtruth.anns[gt_box_id]['utf8_string'])
max_iou = 0.0
match = None
for eval_box_id in eval_bboxes:
eval_box = evaluation.anns[eval_box_id]['bbox']
iou = iou_score(gt_box,eval_box)
if iou >=detection_threshold and iou > max_iou:
max_iou = iou
match = eval_box_id
if 'utf8_string' in evaluation.anns[eval_box_id]:
eval_val = decode(evaluation.anns[eval_box_id]['utf8_string'])
if editdistance.eval(gt_val, eval_val)==0:
break
if match is not None:
detectRes['true_positives'].append({'gt_id': gt_box_id, 'eval_id': match})
eval_bboxes.remove(match)
else:
detectRes['false_negatives'].append({'gt_id': gt_box_id})
if len(eval_bboxes)>0:
detectRes['false_positives'].extend([{'eval_id': eval_box_id} for eval_box_id in eval_bboxes])
resultDict = detectRes
res = {}
for setting, threshold in zip(['exact', 'distance1'],[0,1]):
correct = []
incorrect = []
ignore = []
for detection in resultDict['true_positives']:
if 'utf8_string' not in groundtruth.anns[detection['gt_id']]:
ignore.append(detection)
continue
gt_val = decode(groundtruth.anns[detection['gt_id']]['utf8_string'])
if len(gt_val)<3:
ignore.append(detection)
continue
if 'utf8_string' not in evaluation.anns[detection['eval_id']]:
incorrect.append(detection)
continue
eval_val = decode(evaluation.anns[detection['eval_id']]['utf8_string'])
detection['gt_string'] = gt_val
detection['eval_string'] = eval_val
if editdistance.eval(gt_val, eval_val)<=threshold:
correct.append(detection)
else:
incorrect.append(detection)
res[setting] = {'setting': setting, 'correct':correct, 'incorrect':incorrect, 'ignore':ignore, 'accuracy':len(correct)*1.0/len(correct+incorrect)}
return res
def area(bbox):
return bbox[2] * 1.0 * bbox[3] # width * height
def intersect(bboxA, bboxB):
"""Return a new bounding box that contains the intersection of
'self' and 'other', or None if there is no intersection
"""
new_top = max(bboxA[1], bboxB[1])
new_left = max(bboxA[0], bboxB[0])
new_right = min(bboxA[0]+bboxA[2], bboxB[0]+bboxB[2])
new_bottom = min(bboxA[1]+bboxA[3], bboxB[1]+bboxB[3])
if new_top < new_bottom and new_left < new_right:
return [new_left, new_top, new_right - new_left, new_bottom - new_top]
return None
def iou_score(bboxA, bboxB):
"""Returns the Intersection-over-Union score, defined as the area of
the intersection divided by the intersection over the union of
the two bounding boxes. This measure is symmetric.
"""
if intersect(bboxA, bboxB):
intersection_area = area(intersect(bboxA, bboxB))
else:
intersection_area = 0
union_area = area(bboxA) + area(bboxB) - intersection_area
if union_area > 0:
return float(intersection_area) / float(union_area)
else:
return 0
def decode(trans):
trans = trans.encode("ascii" ,'ignore')
trans = trans.replace('\n', ' ')
trans2 = re.sub('[^a-zA-Z0-9!?@\_\-\+\*\:\&\/ \.]', '', trans)
return trans2.lower()
def inter(list1, list2):
return list(set(list1).intersection(set(list2)))
def printDetailedResults(c_text, detection_results, transcription_results, name):
print(name)
#detected coco-text annids
found = [x['gt_id'] for x in detection_results['true_positives']]
n_found = [x['gt_id'] for x in detection_results['false_negatives']]
fp = [x['eval_id'] for x in detection_results['false_positives']]
leg_eng_mp = c_text.getAnnIds(imgIds=[], catIds=[('legibility','legible'),('language','english'),('class','machine printed')], areaRng=[])
leg_eng_hw = c_text.getAnnIds(imgIds=[], catIds=[('legibility','legible'),('language','english'),('class','handwritten')], areaRng=[])
leg_mp = c_text.getAnnIds(imgIds=[], catIds=[('legibility','legible'),('class','machine printed')], areaRng=[])
ileg_mp = c_text.getAnnIds(imgIds=[], catIds=[('legibility','illegible'),('class','machine printed')], areaRng=[])
leg_hw = c_text.getAnnIds(imgIds=[], catIds=[('legibility','legible'),('class','handwritten')], areaRng=[])
ileg_hw = c_text.getAnnIds(imgIds=[], catIds=[('legibility','illegible'),('class','handwritten')], areaRng=[])
leg_ot = c_text.getAnnIds(imgIds=[], catIds=[('legibility','legible'),('class','others')], areaRng=[])
ileg_ot = c_text.getAnnIds(imgIds=[], catIds=[('legibility','illegible'),('class','others')], areaRng=[])
#Detection
print()
print("Detection")
print("Recall")
if (len(inter(found+n_found, leg_mp)))>0:
lm = "%.2f"%(100*len(inter(found, leg_mp))*1.0/(len(inter(found+n_found, leg_mp))))
else:
lm = 0
print('legible & machine printed: ', lm)
if (len(inter(found+n_found, leg_hw)))>0:
lh = "%.2f"%(100*len(inter(found, leg_hw))*1.0/(len(inter(found+n_found, leg_hw))))
else:
lh = 0
print('legible & handwritten: ', lh)
if (len(inter(found+n_found, leg_ot)))>0:
lo = "%.2f"%(100*len(inter(found, leg_ot))*1.0/(len(inter(found+n_found, leg_ot))))
else:
lo = 0
# print 'legible & others: ', lo
if (len(inter(found+n_found, leg_mp+leg_hw)))>0:
lto = "%.2f"%(100*len(inter(found, leg_mp+leg_hw))*1.0/(len(inter(found+n_found, leg_mp+leg_hw))))
else:
lto = 0
print('legible overall: ', lto)
if (len(inter(found+n_found, ileg_mp)))>0:
ilm = "%.2f"%(100*len(inter(found, ileg_mp))*1.0/(len(inter(found+n_found, ileg_mp))))
else:
ilm = 0
print('illegible & machine printed: ', ilm)
if (len(inter(found+n_found, ileg_hw)))>0:
ilh = "%.2f"%(100*len(inter(found, ileg_hw))*1.0/(len(inter(found+n_found, ileg_hw))))
else:
ilh = 0
print('illegible & handwritten: ', ilh)
if (len(inter(found+n_found, ileg_ot)))>0:
ilo = "%.2f"%(100*len(inter(found, ileg_ot))*1.0/(len(inter(found+n_found, ileg_ot))))
else:
ilo = 0
# print 'illegible & others: ', ilo
if (len(inter(found+n_found, ileg_mp+ileg_hw)))>0:
ilto = "%.2f"%(100*len(inter(found, ileg_mp+ileg_hw))*1.0/(len(inter(found+n_found, ileg_mp+ileg_hw))))
else:
ilto = 0
print('illegible overall: ', ilto)
#total = "%.1f"%(100*len(found)*1.0/(len(found)+len(n_found)))
t_recall = 100*len(found)*1.0/(len(inter(found+n_found, leg_mp+leg_hw+ileg_mp+ileg_hw)))
total = "%.1f"%(t_recall)
print('total recall: ', total)
print("Precision")
t_precision = 100*len(found)*1.0/(len(found+fp))
precision = "%.2f"%(t_precision)
print('total precision: ', precision)
print("f-score")
f_score = "%.2f"%(2 * t_recall * t_precision / (t_recall + t_precision)) if (t_recall + t_precision)>0 else 0
print('f-score localization: ', f_score)
print()
print("Transcription")
transAcc = "%.2f"%(100*transcription_results['exact']['accuracy'])
transAcc1 = "%.2f"%(100*transcription_results['distance1']['accuracy'])
print('accuracy for exact matches: ', transAcc)
print('accuracy for matches with edit distance<=1: ', transAcc1)
print()
print('End-to-end')
TP_new = len(inter(found, leg_eng_mp+leg_eng_hw)) * transcription_results['exact']['accuracy']
FP_new = len(fp) + len(inter(found, leg_eng_mp+leg_eng_hw))*(1-transcription_results['exact']['accuracy'])
FN_new = len(inter(n_found, leg_eng_mp+leg_eng_hw)) + len(inter(found, leg_eng_mp+leg_eng_hw))*(1-transcription_results['exact']['accuracy'])
t_recall_new = 100 * TP_new / (TP_new + FN_new)
t_precision_new = 100 * TP_new / (TP_new + FP_new) if (TP_new + FP_new)>0 else 0
fscore = "%.2f"%(2 * t_recall_new * t_precision_new / (t_recall_new + t_precision_new)) if (t_recall_new + t_precision_new)>0 else 0
recall_new = "%.2f"%(t_recall_new)
precision_new = "%.2f"%(t_precision_new)
print('recall: ', recall_new, )
print('precision: ', precision_new)
print('End-to-end f-score: ', fscore)
print()
#print lm, ' & ', lh, ' & ', lto, ' & ', ilm, ' & ', ilh, ' & ', ilto, '&', total, ' & ', precision, ' & ', transAcc, ' & ', transAcc1, ' & ', fscore
print(lm, ' & ', lh, ' & ', ilm, ' & ', ilh, '&', total, ' & ', precision, ' & ', f_score, ' & ', transAcc, ' & ', recall_new, ' & ', precision_new, ' & ', fscore)
print()
|
{
"content_hash": "83cd36eae511321ed70ad85f2fc58eef",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 174,
"avg_line_length": 37.39204545454545,
"alnum_prop": 0.6511168515423188,
"repo_name": "NehaTelhan/CompVisionFinalProj",
"id": "94c4100b96e7e70565c072c2192e90b779a52ede",
"size": "13162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coco_evaluation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "124510"
},
{
"name": "Shell",
"bytes": "154"
}
],
"symlink_target": ""
}
|
"""Population controller and base classes for online hparam modification."""
import abc
import hashlib
import os
import pickle
import threading
import time
from typing import Any, MutableMapping, Optional, Sequence, Tuple, MutableSequence
import uuid
from absl import logging
import flax
from learned_optimization import filesystem
MutateState = Any
GenerationID = str
@flax.struct.dataclass
class Checkpoint:
# Parameters of the checkpoint. This is usally a path to a checkpoint.
params: Any
meta_params: Any # Hparams
generation_id: GenerationID
value: Any # evalution
parent: Optional[Tuple[GenerationID, int]]
step: int
time: float
@flax.struct.dataclass
class ActiveWorker:
# Parameters of the checkpoint. This is usally a path to a checkpoint.
params: Any
meta_params: Any # Hparams
generation_id: GenerationID
step: int
def make_gen_id() -> GenerationID:
return str(uuid.uuid4())
class Mutate(abc.ABC):
"""Base class for a mutator.
Manages updating workers in the population.
"""
def __init__(self):
pass
def init(self) -> MutateState:
return None
def update(
self, state: MutateState, current_workers: Sequence[ActiveWorker],
cache: MutableMapping[GenerationID, MutableMapping[int, Checkpoint]]
) -> Tuple[MutateState, Sequence[ActiveWorker]]:
raise NotImplementedError()
def get_worker_data(
self,
active_workers: Sequence[ActiveWorker],
cache: MutableMapping[GenerationID, # pylint: disable=unused-argument
MutableMapping[int, Checkpoint]],
worker_id: int, # pylint: disable=unused-argument
generation_id: GenerationID, # pylint: disable=unused-argument
step: int, # pylint: disable=unused-argument
params: Any, # pylint: disable=unused-argument
meta_params: Any # pylint: disable=unused-argument
) -> Sequence[ActiveWorker]: # pylint: disable=unused-argument
"""Get the configuration of the active worker."""
return active_workers
class IntKeyDict(dict):
"""A dictionary with integer keys which always sorts by this key."""
def values(self):
return [
v for k, v in sorted(
list(super(IntKeyDict, self).items()), key=lambda x: x[0])
]
def keys(self):
return [
k for k, v in sorted(
list(super(IntKeyDict, self).items()), key=lambda x: x[0])
]
class PopulationController:
"""Controller that manages a population of workers.
This should either be run locally, or with the courier wrappers
i.e. `start_courier_server`, `get_courier_client`.
"""
def __init__(self,
initial_population: Sequence[Any],
mutate: Mutate,
log_dir: Optional[str] = None):
self._log_dir = log_dir
self._mutate_state = mutate.init()
self.mutate = mutate
self._lock = threading.Lock()
if log_dir:
filesystem.make_dirs(log_dir)
if not self.load_state():
# If no state could be loaded, construct an empty worker from the
# passed in initial population.
step = 0
self._active_workers = [
ActiveWorker(None, i, make_gen_id(), step) for i in initial_population
]
self._cached = {}
for a in self._active_workers:
checkpoint = Checkpoint(
generation_id=a.generation_id,
params=a.params,
meta_params=a.meta_params,
parent=None,
step=step,
value=None,
time=time.time(),
)
self._cached[a.generation_id] = IntKeyDict()
self._cached[a.generation_id][step] = checkpoint
self.save_state()
def maybe_get_worker_data(
self, worker_id: int, generation_id: Optional[GenerationID],
step: Optional[int], params: Optional[Any],
meta_params: Optional[Any]) -> Optional[ActiveWorker]:
"""Get the currently running worker information.
Args:
worker_id: worker requesting id
generation_id: id on the worker requesting
step: the step of the current worker
params: the parameters of the current worker. This could be None.
meta_params: The hparams of the current worker.
Returns:
An instance of ActiveWorker if something about the worker needs to change
(i.e. reloading a params, or changing hparams) or None indicating all
parameters the worker are working on are fine and the worker should
continue to train.
"""
with self._lock:
old_state = self.serialized_state()
# also potentially mutate the cache
self._active_workers = self.mutate.get_worker_data(
self._active_workers, self._cached, worker_id, generation_id, step,
params, meta_params)
new_state = self.serialized_state()
# only save if we have to.
if new_state and new_state != old_state:
self.save_state()
# If the worker has no generation (e.g just started)
#. return the worker corresponding to it.
if generation_id is None:
return self._active_workers[worker_id]
# if somehow the worker is on a generation not in the cache,
# it is somehow out of sync with this class. In this case, also reset
# to the current worker.
elif generation_id not in self._cached:
logging.error("Potentially out of sync worker? Resetting worker to"
"what the population thinks it should be.")
# worker is out of sync. Return what is in the population.
return self._active_workers[worker_id]
# otherwise Checkpoint the current worker, then return the current worker.
elif self._active_workers[worker_id].generation_id != generation_id:
logging.info("Swaping worker with new configuration worker.")
# save the current checkpoint, but without an reward.
# but don't clobber an existing checkpoint if one exists.
if step not in self._cached[generation_id]:
checkpoint = Checkpoint(
generation_id=generation_id,
params=params,
meta_params=meta_params,
parent=(generation_id, step),
step=step,
value=None,
time=time.time(),
)
self._cached[generation_id][step] = checkpoint
self.save_state()
return self._active_workers[worker_id]
# last case is if the generation id matches. In this case, return None
# to signal that the worker is in sync with the population.
elif self._active_workers[worker_id].generation_id == generation_id:
return None
else:
assert False
def set_eval(self, worker_id: int, generation_id: GenerationID, step: int,
params: Any, value: Any):
"""Set some form of result from a worker at a given step."""
with self._lock:
if generation_id not in self._cached:
logging.warning(
"generation_id: %s was not created by this population? "
"Possibly due to premption of the worker or controller?",
generation_id)
return
meta_params = self._cached[generation_id].values()[0].meta_params
logging.info( # pylint: disable=logging-format-interpolation
f"set_eval(worker_id={worker_id}, generation_id={generation_id}, "
f"step={step}, params={params}, meta_params={meta_params}, "
f"value={value}")
checkpoint = Checkpoint(
generation_id=generation_id,
params=params,
meta_params=meta_params,
value=value,
parent=(generation_id, step),
step=step,
time=time.time(),
)
self._cached[generation_id][step] = checkpoint
if self._active_workers[worker_id].generation_id == generation_id:
# update active worker with the new step and params
# "cast" to a mutable sequence here to make pytype happy.
mut_active_workers = list(
self._active_workers) # type: MutableSequence[ActiveWorker]
mut_active_workers[worker_id] = self._active_workers[worker_id].replace(
step=step)
mut_active_workers[worker_id] = mut_active_workers[worker_id].replace(
params=params)
self._active_workers = mut_active_workers
# in light of this new value, run the mutator
self._mutate_state, self._active_workers = self.mutate.update(
self._mutate_state, self._active_workers, self._cached)
self.save_state()
def load_state(self) -> bool:
"""Load the state from disk."""
if self._log_dir:
path = os.path.join(self._log_dir, "population.state")
if filesystem.exists(path):
with filesystem.file_open(path, "rb") as f:
content = f.read()
self._active_workers, self._cached, self._mutate_state = pickle.loads(
content)
return True
return False
def serialized_state(self) -> Optional[bytes]:
"""Serialize state of this object."""
if self._log_dir:
state = (self._active_workers, self._cached, self._mutate_state)
content = pickle.dumps(state)
return content
else:
return None
def save_state(self):
"""Save state to disk."""
if self._log_dir:
content = self.serialized_state()
tmp_path = os.path.join(self._log_dir,
f"population_tmp_{str(uuid.uuid4())}.state")
with filesystem.file_open(tmp_path, "wb") as f:
f.write(content)
target_path = os.path.join(self._log_dir, "population.state")
filesystem.rename(tmp_path, target_path)
def start_courier_server(name: str, population: PopulationController):
"""Start courier server for a given population."""
import courier # pylint: disable=g-import-not-at-top
server = courier.Server(name)
server.Bind("maybe_get_worker_data", population.maybe_get_worker_data)
server.Bind("set_eval", population.set_eval)
server.Start()
return server
def get_courier_client(name: str):
import courier # pylint: disable=g-import-not-at-top
population = courier.Client(name)
return population
def uniquify_server_name(shared_str, name):
hmod = hashlib.sha256()
hmod.update(shared_str.encode("utf-8"))
hval = hmod.hexdigest()[0:20]
return str(hval) + "__" + name
|
{
"content_hash": "43c09097c1f22473f6b1d52623b356a9",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 82,
"avg_line_length": 33.60586319218241,
"alnum_prop": 0.6419501793156925,
"repo_name": "google/learned_optimization",
"id": "0329d18b991c24462a49c032cb8f16b860226be2",
"size": "10908",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "learned_optimization/population/population.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "177493"
},
{
"name": "Python",
"bytes": "1290675"
}
],
"symlink_target": ""
}
|
import sys
from dataclasses import dataclass
from getpass import getpass
from typing import Optional, Iterable, Dict
@dataclass
class Field:
name: str
label: Optional[str]
value: str
sensitive: bool
required: bool
default: str
@staticmethod
def build(name: str, label: Optional[str] = None, sensitive: Optional[bool] = False, required: Optional[bool] = True, default: Optional[str] = None):
return Field(
name=name,
label=label or name,
value=None,
sensitive=sensitive,
required=required,
default=default,
)
class Form:
def __init__(self, fields: Iterable[Field], max_retry_count: int = 3):
self.__max_retry_count = max_retry_count
self.__fields: Iterable[Field] = fields
def prompt(self):
for field in self.__fields:
prompt_user_for = getpass if field.sensitive else input
remaining_retry_count = self.__max_retry_count
value: Optional[str] = None
while not value and field.required:
if remaining_retry_count <= 0:
sys.stderr.write('<<< ERROR: You still have not given the valid input. Self-terminated for now '
'but you may rerun this command later.\n')
raise IOError('Invalid input for ' + field.label)
if remaining_retry_count < self.__max_retry_count:
sys.stderr.write('<<< WARNING: The input is invalid. Please try again.\n')
remaining_retry_count -= 1
value = prompt_user_for(f'>>> {field.label}: ')
field.value = value or None
return self
def to_dict(self) -> Dict[str, str]:
return {
field.name: field.value
for field in self.__fields
}
|
{
"content_hash": "66bc033001780c417f28fe1e113dcebd",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 153,
"avg_line_length": 32.44827586206897,
"alnum_prop": 0.5706695005313497,
"repo_name": "shiroyuki/gallium",
"id": "ce4c06da192218c2532eeab263dcffb366d82dce",
"size": "1882",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gallium/cli/form.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "152"
},
{
"name": "Makefile",
"bytes": "2417"
},
{
"name": "Python",
"bytes": "42040"
}
],
"symlink_target": ""
}
|
from openerp import tools
from openerp.osv import osv, fields
from openerp.osv.orm import except_orm
from openerp.tools import pickle
EXCLUDED_FIELDS = set((
'report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml',
'report_sxw_content_data', 'report_rml_content_data', 'search_view', ))
#: Possible slots to bind an action to with :meth:`~.set_action`
ACTION_SLOTS = [
"client_action_multi", # sidebar wizard action
"client_print_multi", # sidebar report printing button
"client_action_relate", # sidebar related link
"tree_but_open", # double-click on item in tree view
"tree_but_action", # deprecated: same as tree_but_open
]
class ir_values(osv.osv):
"""Holds internal model-specific action bindings and user-defined default
field values. definitions. This is a legacy internal model, mixing
two different concepts, and will likely be updated or replaced in a
future version by cleaner, separate models. You should not depend
explicitly on it.
The purpose of each ``ir.values`` entry depends on its type, defined
by the ``key`` column:
* 'default': user-defined default values, used when creating new
records of this model:
* 'action': binding of an action to a particular *action slot* of
this model, making the action easily available in the user
interface for this model.
The ``key2`` column acts as a qualifier, further refining the type
of the entry. The possible values are:
* for 'default' entries: an optional condition restricting the
cases where this particular default value will be applicable,
or ``False`` for no condition
* for 'action' entries: the ``key2`` qualifier is one of the available
action slots, defining how this action can be invoked:
* ``'client_print_multi'`` for report printing actions that will
be available on views displaying items from this model
* ``'client_action_multi'`` for assistants (wizards) actions
that will be available in views displaying objects of this model
* ``'client_action_relate'`` for links towards related documents
that should be available in views displaying objects of this model
* ``'tree_but_open'`` for actions that will be triggered when
double-clicking an item from this model in a hierarchical tree view
Each entry is specific to a model (``model`` column), and for ``'actions'``
type, may even be made specific to a given record of that model when the
``res_id`` column contains a record ID (``False`` means it's global for
all records).
The content of the entry is defined by the ``value`` column, which may either
contain an arbitrary value, or a reference string defining the action that
should be executed.
.. rubric:: Usage: default values
The ``'default'`` entries are usually defined manually by the
users, and set by their UI clients calling :meth:`~.set_default`.
These default values are then automatically used by the
ORM every time a new record is about to be created, i.e. when
:meth:`~openerp.osv.osv.osv.default_get`
or :meth:`~openerp.osv.osv.osv.create` are called.
.. rubric:: Usage: action bindings
Business applications will usually bind their actions during
installation, and OpenERP UI clients will apply them as defined,
based on the list of actions included in the result of
:meth:`~openerp.osv.osv.osv.fields_view_get`,
or directly returned by explicit calls to :meth:`~.get_actions`.
"""
_name = 'ir.values'
def _value_unpickle(self, cursor, user, ids, name, arg, context=None):
res = {}
for record in self.browse(cursor, user, ids, context=context):
value = record[name[:-9]]
if record.key == 'default' and value:
# default values are pickled on the fly
try:
value = str(pickle.loads(value))
except Exception:
pass
res[record.id] = value
return res
def _value_pickle(self, cursor, user, id, name, value, arg, context=None):
if context is None:
context = {}
ctx = context.copy()
if self.CONCURRENCY_CHECK_FIELD in ctx:
del ctx[self.CONCURRENCY_CHECK_FIELD]
record = self.browse(cursor, user, id, context=context)
if record.key == 'default':
# default values are pickled on the fly
value = pickle.dumps(value)
self.write(cursor, user, id, {name[:-9]: value}, context=ctx)
def onchange_object_id(self, cr, uid, ids, object_id, context=None):
if not object_id: return {}
act = self.pool.get('ir.model').browse(cr, uid, object_id, context=context)
return {
'value': {'model': act.model}
}
def onchange_action_id(self, cr, uid, ids, action_id, context=None):
if not action_id: return {}
act = self.pool.get('ir.actions.actions').browse(cr, uid, action_id, context=context)
return {
'value': {'value_unpickle': act.type+','+str(act.id)}
}
_columns = {
'name': fields.char('Name', required=True),
'model': fields.char('Model Name', select=True, required=True,
help="Model to which this entry applies"),
# TODO: model_id and action_id should be read-write function fields
'model_id': fields.many2one('ir.model', 'Model (change only)', size=128,
help="Model to which this entry applies - "
"helper field for setting a model, will "
"automatically set the correct model name"),
'action_id': fields.many2one('ir.actions.actions', 'Action (change only)',
help="Action bound to this entry - "
"helper field for binding an action, will "
"automatically set the correct reference"),
'value': fields.text('Value', help="Default value (pickled) or reference to an action"),
'value_unpickle': fields.function(_value_unpickle, fnct_inv=_value_pickle,
type='text',
string='Default value or action reference'),
'key': fields.selection([('action','Action'),('default','Default')],
'Type', select=True, required=True,
help="- Action: an action attached to one slot of the given model\n"
"- Default: a default value for a model field"),
'key2' : fields.char('Qualifier', select=True,
help="For actions, one of the possible action slots: \n"
" - client_action_multi\n"
" - client_print_multi\n"
" - client_action_relate\n"
" - tree_but_open\n"
"For defaults, an optional condition"
,),
'res_id': fields.integer('Record ID', select=True,
help="Database identifier of the record to which this applies. "
"0 = for all records"),
'user_id': fields.many2one('res.users', 'User', ondelete='cascade', select=True,
help="If set, action binding only applies for this user."),
'company_id': fields.many2one('res.company', 'Company', ondelete='cascade', select=True,
help="If set, action binding only applies for this company")
}
_defaults = {
'key': 'action',
'key2': 'tree_but_open',
}
def _auto_init(self, cr, context=None):
res = super(ir_values, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_values_key_model_key2_res_id_user_id_idx\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_values_key_model_key2_res_id_user_id_idx ON ir_values (key, model, key2, res_id, user_id)')
return res
def create(self, cr, uid, vals, context=None):
res = super(ir_values, self).create(cr, uid, vals, context=context)
self.get_defaults_dict.clear_cache(self)
return res
def write(self, cr, uid, ids, vals, context=None):
res = super(ir_values, self).write(cr, uid, ids, vals, context=context)
self.get_defaults_dict.clear_cache(self)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(ir_values, self).unlink(cr, uid, ids, context=context)
self.get_defaults_dict.clear_cache(self)
return res
def set_default(self, cr, uid, model, field_name, value, for_all_users=True, company_id=False, condition=False):
"""Defines a default value for the given model and field_name. Any previous
default for the same scope (model, field_name, value, for_all_users, company_id, condition)
will be replaced and lost in the process.
Defaults can be later retrieved via :meth:`~.get_defaults`, which will return
the highest priority default for any given field. Defaults that are more specific
have a higher priority, in the following order (highest to lowest):
* specific to user and company
* specific to user only
* specific to company only
* global to everyone
:param string model: model name
:param string field_name: field name to which the default applies
:param value: the default field value to set
:type value: any serializable Python value
:param bool for_all_users: whether the default should apply to everybody or only
the user calling the method
:param int company_id: optional ID of the company to which the default should
apply. If omitted, the default will be global. If True
is passed, the current user's company will be used.
:param string condition: optional condition specification that can be used to
restrict the applicability of the default values
(e.g. based on another field's value). This is an
opaque string as far as the API is concerned, but client
stacks typically use single-field conditions in the
form ``'key=stringified_value'``.
(Currently, the condition is trimmed to 200 characters,
so values that share the same first 200 characters always
match)
:return: id of the newly created ir.values entry
"""
if isinstance(value, unicode):
value = value.encode('utf8')
if company_id is True:
# should be company-specific, need to get company id
user = self.pool.get('res.users').browse(cr, uid, uid)
company_id = user.company_id.id
# remove existing defaults for the same scope
search_criteria = [
('key', '=', 'default'),
('key2', '=', condition and condition[:200]),
('model', '=', model),
('name', '=', field_name),
('user_id', '=', False if for_all_users else uid),
('company_id','=', company_id)
]
self.unlink(cr, uid, self.search(cr, uid, search_criteria))
return self.create(cr, uid, {
'name': field_name,
'value': pickle.dumps(value),
'model': model,
'key': 'default',
'key2': condition and condition[:200],
'user_id': False if for_all_users else uid,
'company_id': company_id,
})
def get_default(self, cr, uid, model, field_name, for_all_users=True, company_id=False, condition=False):
""" Return the default value defined for model, field_name, users, company and condition.
Return ``None`` if no such default exists.
"""
search_criteria = [
('key', '=', 'default'),
('key2', '=', condition and condition[:200]),
('model', '=', model),
('name', '=', field_name),
('user_id', '=', False if for_all_users else uid),
('company_id','=', company_id)
]
defaults = self.browse(cr, uid, self.search(cr, uid, search_criteria))
return pickle.loads(defaults[0].value.encode('utf-8')) if defaults else None
def get_defaults(self, cr, uid, model, condition=False):
"""Returns any default values that are defined for the current model and user,
(and match ``condition``, if specified), previously registered via
:meth:`~.set_default`.
Defaults are global to a model, not field-specific, but an optional
``condition`` can be provided to restrict matching default values
to those that were defined for the same condition (usually based
on another field's value).
Default values also have priorities depending on whom they apply
to: only the highest priority value will be returned for any
field. See :meth:`~.set_default` for more details.
:param string model: model name
:param string condition: optional condition specification that can be used to
restrict the applicability of the default values
(e.g. based on another field's value). This is an
opaque string as far as the API is concerned, but client
stacks typically use single-field conditions in the
form ``'key=stringified_value'``.
(Currently, the condition is trimmed to 200 characters,
so values that share the same first 200 characters always
match)
:return: list of default values tuples of the form ``(id, field_name, value)``
(``id`` is the ID of the default entry, usually irrelevant)
"""
# use a direct SQL query for performance reasons,
# this is called very often
query = """SELECT v.id, v.name, v.value FROM ir_values v
LEFT JOIN res_users u ON (v.user_id = u.id)
WHERE v.key = %%s AND v.model = %%s
AND (v.user_id = %%s OR v.user_id IS NULL)
AND (v.company_id IS NULL OR
v.company_id =
(SELECT company_id from res_users where id = %%s)
)
%s
ORDER BY v.user_id, u.company_id"""
params = ('default', model, uid, uid)
if condition:
query %= 'AND v.key2 = %s'
params += (condition[:200],)
else:
query %= 'AND v.key2 is NULL'
cr.execute(query, params)
# keep only the highest priority default for each field
defaults = {}
for row in cr.dictfetchall():
defaults.setdefault(row['name'],
(row['id'], row['name'], pickle.loads(row['value'].encode('utf-8'))))
return defaults.values()
# use ormcache: this is called a lot by BaseModel.default_get()!
@tools.ormcache(skiparg=2)
def get_defaults_dict(self, cr, uid, model, condition=False):
""" Returns a dictionary mapping field names with their corresponding
default value. This method simply improves the returned value of
:meth:`~.get_defaults`.
"""
return dict((f, v) for i, f, v in self.get_defaults(cr, uid, model, condition))
def set_action(self, cr, uid, name, action_slot, model, action, res_id=False):
"""Binds an the given action to the given model's action slot - for later
retrieval via :meth:`~.get_actions`. Any existing binding of the same action
to the same slot is first removed, allowing an update of the action's name.
See the class description for more details about the various action
slots: :class:`~ir_values`.
:param string name: action label, usually displayed by UI client
:param string action_slot: the action slot to which the action should be
bound to - one of ``client_action_multi``,
``client_print_multi``, ``client_action_relate``,
``tree_but_open``.
:param string model: model name
:param string action: action reference, in the form ``'model,id'``
:param int res_id: optional record id - will bind the action only to a
specific record of the model, not all records.
:return: id of the newly created ir.values entry
"""
assert isinstance(action, basestring) and ',' in action, \
'Action definition must be an action reference, e.g. "ir.actions.act_window,42"'
assert action_slot in ACTION_SLOTS, \
'Action slot (%s) must be one of: %r' % (action_slot, ACTION_SLOTS)
# remove existing action definition of same slot and value
search_criteria = [
('key', '=', 'action'),
('key2', '=', action_slot),
('model', '=', model),
('res_id', '=', res_id or 0), # int field -> NULL == 0
('value', '=', action),
]
self.unlink(cr, uid, self.search(cr, uid, search_criteria))
return self.create(cr, uid, {
'key': 'action',
'key2': action_slot,
'model': model,
'res_id': res_id,
'name': name,
'value': action,
})
def get_actions(self, cr, uid, action_slot, model, res_id=False, context=None):
"""Retrieves the list of actions bound to the given model's action slot.
See the class description for more details about the various action
slots: :class:`~.ir_values`.
:param string action_slot: the action slot to which the actions should be
bound to - one of ``client_action_multi``,
``client_print_multi``, ``client_action_relate``,
``tree_but_open``.
:param string model: model name
:param int res_id: optional record id - will bind the action only to a
specific record of the model, not all records.
:return: list of action tuples of the form ``(id, name, action_def)``,
where ``id`` is the ID of the default entry, ``name`` is the
action label, and ``action_def`` is a dict containing the
action definition as obtained by calling
:meth:`~openerp.osv.osv.osv.read` on the action record.
"""
assert action_slot in ACTION_SLOTS, 'Illegal action slot value: %s' % action_slot
# use a direct SQL query for performance reasons,
# this is called very often
query = """SELECT v.id, v.name, v.value FROM ir_values v
WHERE v.key = %s AND v.key2 = %s
AND v.model = %s
AND (v.res_id = %s
OR v.res_id IS NULL
OR v.res_id = 0)
ORDER BY v.id"""
cr.execute(query, ('action', action_slot, model, res_id or None))
results = {}
for action in cr.dictfetchall():
if not action['value']:
continue # skip if undefined
action_model_name, action_id = action['value'].split(',')
if action_model_name not in self.pool:
continue # unknow model? skip it
action_model = self.pool[action_model_name]
fields = [field for field in action_model._fields if field not in EXCLUDED_FIELDS]
# FIXME: needs cleanup
try:
action_def = action_model.read(cr, uid, int(action_id), fields, context)
if action_def:
if action_model_name in ('ir.actions.report.xml', 'ir.actions.act_window'):
groups = action_def.get('groups_id')
if groups:
cr.execute('SELECT 1 FROM res_groups_users_rel WHERE gid IN %s AND uid=%s',
(tuple(groups), uid))
if not cr.fetchone():
if action['name'] == 'Menuitem':
raise osv.except_osv('Error!',
'You do not have the permission to perform this operation!!!')
continue
# keep only the first action registered for each action name
results[action['name']] = (action['id'], action['name'], action_def)
except except_orm:
continue
return sorted(results.values())
def _map_legacy_model_list(self, model_list, map_fn, merge_results=False):
"""Apply map_fn to the various models passed, according to
legacy way to specify models/records.
"""
assert isinstance(model_list, (list, tuple)), \
"model_list should be in the form [model,..] or [(model,res_id), ..]"
results = []
for model in model_list:
res_id = False
if isinstance(model, (list, tuple)):
model, res_id = model
result = map_fn(model, res_id)
# some of the functions return one result at a time (tuple or id)
# and some return a list of many of them - care for both
if merge_results:
results.extend(result)
else:
results.append(result)
return results
# Backards-compatibility adapter layer to retrofit into split API
def set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=False, preserve_user=False, company=False):
"""Deprecated legacy method to set default values and bind actions to models' action slots.
Now dispatches to the newer API methods according to the value of ``key``: :meth:`~.set_default`
(``key=='default'``) or :meth:`~.set_action` (``key == 'action'``).
:deprecated: As of v6.1, ``set_default()`` or ``set_action()`` should be used directly.
"""
assert key in ['default', 'action'], "ir.values entry keys must be in ['default','action']"
if key == 'default':
def do_set(model,res_id):
return self.set_default(cr, uid, model, field_name=name, value=value,
for_all_users=(not preserve_user), company_id=company,
condition=key2)
elif key == 'action':
def do_set(model,res_id):
return self.set_action(cr, uid, name, action_slot=key2, model=model, action=value, res_id=res_id)
return self._map_legacy_model_list(models, do_set)
def get(self, cr, uid, key, key2, models, meta=False, context=None, res_id_req=False, without_user=True, key2_req=True):
"""Deprecated legacy method to get the list of default values or actions bound to models' action slots.
Now dispatches to the newer API methods according to the value of ``key``: :meth:`~.get_defaults`
(``key=='default'``) or :meth:`~.get_actions` (``key == 'action'``)
:deprecated: As of v6.1, ``get_defaults()`` or ``get_actions()`` should be used directly.
"""
assert key in ['default', 'action'], "ir.values entry keys must be in ['default','action']"
if key == 'default':
def do_get(model,res_id):
return self.get_defaults(cr, uid, model, condition=key2)
elif key == 'action':
def do_get(model,res_id):
return self.get_actions(cr, uid, action_slot=key2, model=model, res_id=res_id, context=context)
return self._map_legacy_model_list(models, do_get, merge_results=True)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
{
"content_hash": "fda4738efd27da6b0e18ba5701819eb3",
"timestamp": "",
"source": "github",
"line_count": 483,
"max_line_length": 137,
"avg_line_length": 52.387163561076605,
"alnum_prop": 0.5546377899853773,
"repo_name": "lbk0116/NTDP",
"id": "5204e691fd6c69247226b3b5de648aceb70880c0",
"size": "26282",
"binary": false,
"copies": "35",
"ref": "refs/heads/master",
"path": "openerp/addons/base/ir/ir_values.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "539622"
},
{
"name": "HTML",
"bytes": "46766"
},
{
"name": "JavaScript",
"bytes": "5052395"
},
{
"name": "Makefile",
"bytes": "12757"
},
{
"name": "NSIS",
"bytes": "18524"
},
{
"name": "Python",
"bytes": "3776867"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "4318"
},
{
"name": "XSLT",
"bytes": "27334"
}
],
"symlink_target": ""
}
|
from ..core import WesternCalendar
from ..registry_tools import iso_register
@iso_register('FR')
class France(WesternCalendar):
'France'
# Christian holidays
include_easter_monday = True
include_ascension = True
include_whit_monday = True
include_all_saints = True
include_assumption = True
# Civil holidays
include_labour_day = True
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(5, 8, "Victory in Europe Day"),
(7, 14, "Bastille Day"),
(11, 11, "Armistice Day"),
)
class FranceAlsaceMoselle(France):
"France Alsace/Moselle"
include_good_friday = True
include_boxing_day = True
|
{
"content_hash": "2915d212a33a72c71a7d8b3cc4abf1ca",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 55,
"avg_line_length": 23.964285714285715,
"alnum_prop": 0.6616989567809239,
"repo_name": "novafloss/workalendar",
"id": "b596d67066efa36e48f98d5c47567938579684db",
"size": "671",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "workalendar/europe/france.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1164"
},
{
"name": "Python",
"bytes": "383844"
}
],
"symlink_target": ""
}
|
import bpy
import bmesh
from mathutils import Vector
import numpy as np
import json
import sys
from pathlib import Path
UTILS_PATH = Path.home() / "Documents/python_workspace/data-science-learning"
sys.path.append(str(UTILS_PATH))
import importlib
import ds_utils.blender_utils
importlib.reload(ds_utils.blender_utils)
from ds_utils.blender_utils import init_grease_pencil, delete_all, draw_segment, create_object
def _update_obj(obj_name: str, vertices):
obj = bpy.context.scene.objects[obj_name]
mesh = obj.data
bm = bmesh.new()
# convert the current mesh to a bmesh (must be in edit mode)
bpy.ops.object.mode_set(mode='EDIT')
bm.from_mesh(mesh)
bpy.ops.object.mode_set(mode='OBJECT') # return to object mode
for v in bm.verts:
bm.verts.remove(v)
for v in vertices:
bm.verts.new(v)
# make the bmesh the object's mesh
bm.to_mesh(mesh)
bm.free() # always do this when finished
def _init_shape_keys(obj):
sk_basis = obj.shape_key_add(name='Basis')
sk_basis.interpolation = 'KEY_LINEAR'
obj.data.shape_keys.use_relative = False
def _update_shape_keys(obj, vertices, frame, frames_spacing=1):
# Create new shape-key block
block = obj.shape_key_add(name=str(frame), from_mix=False) # returns a key_blocks member
block.interpolation = 'KEY_LINEAR'
block.value = 0
# Update vertices position
for (vert, co) in zip(block.data, vertices):
if np.all(co == 0.):
continue
else:
vert.co = co
# Keyframe evaluation time
obj.data.shape_keys.eval_time = frame * 10
obj.data.shape_keys.keyframe_insert(data_path='eval_time', frame=frame*frames_spacing)
def main(points_filepath: str, scale_factor=10, translate_vector=(0, 0, 0), frames_spacing=1):
# load pose points
pose_points = np.load(points_filepath)
# scale
pose_points /= scale_factor
# TODO need like this because number of entries is variable
# if necessary expand pose coordinates to 3D and translate
for i in range(len(pose_points)):
if pose_points[i].shape[-1] == 2:
z = np.zeros(pose_points.shape[:-1] + (1,), dtype=pose_points.dtype)
pose_points = np.concatenate((pose_points, z), axis=-1)
# translate
pose_points += translate_vector
NUM_FRAMES = len(pose_points)
bpy.context.scene.frame_start = 0
bpy.context.scene.frame_end = NUM_FRAMES * frames_spacing
#delete_all()
# Create initial object (or update vertices if one already exist
obj_name = 'pose'
if obj_name not in bpy.context.scene.objects:
create_object(pose_points[0], edges=[], faces=[], obj_name=obj_name)
else:
_update_obj(obj_name, pose_points[0])
obj = bpy.context.scene.objects[obj_name]
_init_shape_keys(obj)
# Run animation across frames
#gp_layer = init_grease_pencil(clear_layer=True)
for frame in range(1, NUM_FRAMES): # start from 1 as first frame points have been used to instantiate object
if frame % 100 == 0:
print("Updating frame {}".format(frame))
# Shape keys
_update_shape_keys(obj, pose_points[frame], frame, frames_spacing)
# GP
#gp_frame = gp_layer.frames.new(frame * frames_spacing)
#for segment in pose_points[frame]:
# draw_segment(gp_frame, segment)
main(points_filepath=str(Path.home() / "all_keypoints.npy"),
frames_spacing=10)
|
{
"content_hash": "f4bc2589a4b851f93626e04143c2c9e7",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 113,
"avg_line_length": 30.973214285714285,
"alnum_prop": 0.6584029979821274,
"repo_name": "5agado/data-science-learning",
"id": "069a1bf9a7fe28ee148c4de2cbbe9b244edcfe4e",
"size": "3501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphics/blender/pose_anim.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "565"
},
{
"name": "Jupyter Notebook",
"bytes": "2011939"
},
{
"name": "Python",
"bytes": "550056"
}
],
"symlink_target": ""
}
|
""" Lazymq is UDP-semantic over TCP """
import asyncio
import socket
import time
from . import const
from .struct import Connection, Message
from .protocol import Protocol
from .log import l
from .crypt import LinkEncryption
from .tasks import Cleanup
# TODO: test reuse
# TODO: bandwidth limit
# TODO: does call_soon log exceptions?
# TODO: LazyMQ attrs to ready-only props
# TODO: documentation, more tests, refactoring, review
class LazyMQ(Protocol, LinkEncryption, Cleanup):
""" Sending and receiving message TCP without sockets. LazyMQ will handle
connection for you. It will keep a connection for a while for reuse and
then clean up the connection. """
def __init__(
self,
port = const.Config.PORT,
encoding = const.Config.ENCODING,
ip_protocols = const.Config.PROTOS,
bind_v6 = "",
bind_v4 = "",
cert_chain_pem = None,
loop = None,
):
self.port = port
self.encoding = encoding
self.ip_protocols = ip_protocols
self.bind_v6 = bind_v6
self.bind_v4 = bind_v4
self._loop = loop
self._servers = []
self._socks = []
self._connections = {}
self._future = asyncio.Future(loop=self.loop)
self._future_lock = asyncio.Lock(loop=self.loop)
self._waiters = 0
self._received = asyncio.Event(loop=self.loop)
self._queue = asyncio.Queue(loop=self.loop)
self._closed = asyncio.Event(loop=self.loop)
self.setup_tls(cert_chain_pem)
if not self._loop:
self._loop = asyncio.get_event_loop()
if ip_protocols & const.Protocols.IPV6:
self._start_server(socket.AF_INET6, bind_v6)
if ip_protocols & const.Protocols.IPV4:
self._start_server(socket.AF_INET, bind_v4)
asyncio.async(
self.run_cleanup(),
loop=self._loop,
)
l.debug("LazyMQ set up")
@property
def queue(self):
""" Return the queue.
:rtype: asyncio.Queue """
return self._queue
def _start_server(
self,
family,
bind,
):
""" Starts a server """
# Detecting dual_stack sockets seems not to work on some OSs
# so we always use two sockets
sock = socket.socket(family)
if family is socket.AF_INET6:
sock.setsockopt(
socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True,
)
sock.setsockopt(
socket.SOL_SOCKET,
socket.SO_REUSEADDR,
True,
)
sock.bind((bind, self.port))
sock.listen(const.Config.BACKLOG)
server = asyncio.start_server(
self._handle_connection,
loop = self._loop,
sock = sock,
backlog = const.Config.BACKLOG,
ssl = self._ssl_context,
)
l.debug("Server created: %s", server)
self._servers.append(server)
self._socks.append(sock)
@asyncio.coroutine
def close(self):
""" Closing everything """
self._closed.set()
for server in self._servers:
server.close()
yield from server.wait_closed()
# Give active reads a chance to die
yield from asyncio.sleep(0.1)
for sock in self._socks:
sock.close()
for conn in self._connections.values():
conn.close()
self._servers.clear()
self._socks.clear()
self._connections.clear()
if not self.loop.is_running():
self.loop.run_until_complete(asyncio.sleep(0))
@asyncio.coroutine
def _do_open(self, port, address):
""" Open a connection with a defined timeout """
reader, writer = yield from asyncio.wait_for(
asyncio.open_connection(
host = str(address),
port = port,
loop = self.loop,
ssl=self._ssl_context
),
const.Config.TIMEOUT,
)
conn = Connection(
reader,
writer,
)
handler = self._handle_connection(reader, writer, conn)
asyncio.async(
handler,
loop = self.loop,
)
peer = writer.get_extra_info('peername')
peer = self._make_connection_key(peer[0], peer[1])
# for consistency get the peername from the socket!
self._connections[peer] = conn
return conn
@asyncio.coroutine
def get_connection(
self,
port,
active_port = None,
address_v6 = None,
address_v4 = None,
):
""" Get an active connection to a host. Please provide a ipv4- and
ipv6-address, you can leave one address None, but its not
recommended. """
assert address_v4 or address_v6
port = int(port)
# We cannot use the same port we listen on as with UDP. So we need to
# handle that random port the message was sent from.
#
# 1. If we answer an message we try first to answer it through the
# incoming connection, which is identified by the active port.
# 2. If we can find that connection it is probably closed. So we check
# if we already have a connection to the remote-listening port
# 3. If there is no connection in the cache we connect to the
# remote-listening port and cache it.
#
# This is needed because we simulate UDP semantics over TCP, where you
# can just point at port and shot.
if active_port:
active_port = int(active_port)
try:
if address_v6:
return self._connections[(address_v6.packed, active_port)]
except KeyError:
pass
try:
if address_v4:
return self._connections[(address_v4.packed, active_port)]
except KeyError:
pass
try:
if address_v6:
return self._connections[(address_v6.packed, port)]
except KeyError:
pass
try:
if address_v4:
return self._connections[(address_v4.packed, port)]
except KeyError:
pass
try:
if address_v6:
return (yield from self._do_open(port, address_v6))
except OSError:
if not address_v4:
raise
if address_v4:
return (yield from self._do_open(port, address_v4))
raise Exception("I am a bug, please report me on github")
@property
def loop(self):
""" Returns the eventloop used by lazymq.
:rtype: asyncio.AbstractEventLoop """
return self._loop
@asyncio.coroutine
def start(self):
""" Start everything """
newservers = []
for server in self._servers:
newservers.append((yield from server))
self._servers = newservers
def send(self, message):
""" Send message """
self._fill_defaults(message)
@asyncio.coroutine
def receive(self):
""" Receive a message from the queue """
return (yield from self._queue.get())
@asyncio.coroutine
def communicate(
self,
message,
timeout=const.Config.TIMEOUT,
):
""" Deliver a message and wait for an answer. The identity of the
answer has to be same as the request. IMPORTANT: The message will still
be delivered to the queue, so please consume the message.
If you use this method so submit long-running tasks to remotes consider
setting a longer timeout. You should never set no timeout since the
remote can die during execution.
This method is a coroutine. """
self._waiters += 1
# l.debug("Starting to communicate")
yield from self.deliver(message)
while True:
try:
result = yield from asyncio.wait_for(
self._future,
timeout=timeout,
loop=self._loop,
)
# l.debug("Got a message")
if result.identity == message.identity:
return result
finally:
self._waiters -= 1
if not self._waiters:
self._future = asyncio.Future(loop=self.loop)
self._received.set()
self._waiters += 1
|
{
"content_hash": "833c7f1659099aa09d373ada091aa0ac",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 79,
"avg_line_length": 32.82835820895522,
"alnum_prop": 0.5402364173675835,
"repo_name": "ganwell/dht3k",
"id": "44055babc2b5543dc7099c05f8b3b0712834586b",
"size": "8836",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "lazymq/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "102559"
},
{
"name": "Shell",
"bytes": "6466"
}
],
"symlink_target": ""
}
|
from ddapp.consoleapp import ConsoleApp
from ddapp.debugVis import DebugData
import ddapp.visualization as vis
# initialize application components
app = ConsoleApp()
view = app.createView()
view.showMaximized()
# create a sphere primitive
d = DebugData()
d.addSphere(center=(0,0,0), radius=0.5)
# show the sphere in the visualization window
sphere = vis.showPolyData(d.getPolyData(), 'sphere')
sphere.setProperty('Color', [0,1,0])
# start the application
app.start()
|
{
"content_hash": "a29df36d10a30cbc177304325c17fc0d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 52,
"avg_line_length": 24.789473684210527,
"alnum_prop": 0.7664543524416136,
"repo_name": "manuelli/director",
"id": "8cfe9f7cdf0edbc7a23eb75be9f3cf9d941511a3",
"size": "471",
"binary": false,
"copies": "4",
"ref": "refs/heads/corl-master",
"path": "docs/sphinx/tutorials/visualization/drawShapes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "119719"
},
{
"name": "C++",
"bytes": "386385"
},
{
"name": "CMake",
"bytes": "47439"
},
{
"name": "GLSL",
"bytes": "15443"
},
{
"name": "MATLAB",
"bytes": "122694"
},
{
"name": "Makefile",
"bytes": "4876"
},
{
"name": "Python",
"bytes": "1993109"
},
{
"name": "Shell",
"bytes": "1337"
}
],
"symlink_target": ""
}
|
import sys
import gdb
import os
import os.path
pythondir = '/home/build/work/GCC-4-8-build/install-native/share/gcc-arm-none-eabi'
libdir = '/home/build/work/GCC-4-8-build/install-native/arm-none-eabi/lib/armv7-ar/thumb/softfp'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Load the pretty-printers.
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
|
{
"content_hash": "a319fee7a0756629a2506240517fddd5",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 96,
"avg_line_length": 38.65909090909091,
"alnum_prop": 0.702527924750147,
"repo_name": "marduino/stm32Proj",
"id": "274c522fb798389f55a0430d6437ff34ae4e1044",
"size": "2411",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/armgcc/arm-none-eabi/lib/armv7-ar/thumb/softfp/libstdc++.a-gdb.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "23333"
},
{
"name": "C",
"bytes": "4470437"
},
{
"name": "C++",
"bytes": "655791"
},
{
"name": "HTML",
"bytes": "109"
},
{
"name": "Makefile",
"bytes": "10291"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import os
import subprocess
import sys
import lit.Test
import lit.TestRunner
import lit.util
from lit.formats.base import TestFormat
kIsWindows = sys.platform in ['win32', 'cygwin']
class GoogleBenchmark(TestFormat):
def __init__(self, test_sub_dirs, test_suffix, benchmark_args=[]):
self.benchmark_args = list(benchmark_args)
self.test_sub_dirs = os.path.normcase(str(test_sub_dirs)).split(';')
# On Windows, assume tests will also end in '.exe'.
exe_suffix = str(test_suffix)
if kIsWindows:
exe_suffix += '.exe'
# Also check for .py files for testing purposes.
self.test_suffixes = {exe_suffix, test_suffix + '.py'}
def getBenchmarkTests(self, path, litConfig, localConfig):
"""getBenchmarkTests(path) - [name]
Return the tests available in gtest executable.
Args:
path: String path to a gtest executable
litConfig: LitConfig instance
localConfig: TestingConfig instance"""
# TODO: allow splitting tests according to the "benchmark family" so
# the output for a single family of tests all belongs to the same test
# target.
list_test_cmd = [path, '--benchmark_list_tests']
try:
output = subprocess.check_output(list_test_cmd,
env=localConfig.environment)
except subprocess.CalledProcessError as exc:
litConfig.warning(
"unable to discover google-benchmarks in %r: %s. Process output: %s"
% (path, sys.exc_info()[1], exc.output))
raise StopIteration
nested_tests = []
for ln in output.splitlines(False): # Don't keep newlines.
ln = lit.util.to_string(ln)
if not ln.strip():
continue
index = 0
while ln[index*2:index*2+2] == ' ':
index += 1
while len(nested_tests) > index:
nested_tests.pop()
ln = ln[index*2:]
if ln.endswith('.'):
nested_tests.append(ln)
elif any([name.startswith('DISABLED_')
for name in nested_tests + [ln]]):
# Gtest will internally skip these tests. No need to launch a
# child process for it.
continue
else:
yield ''.join(nested_tests) + ln
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for subdir in self.test_sub_dirs:
dir_path = os.path.join(source_path, subdir)
if not os.path.isdir(dir_path):
continue
for fn in lit.util.listdir_files(dir_path,
suffixes=self.test_suffixes):
# Discover the tests in this executable.
execpath = os.path.join(source_path, subdir, fn)
testnames = self.getBenchmarkTests(execpath, litConfig, localConfig)
for testname in testnames:
testPath = path_in_suite + (subdir, fn, testname)
yield lit.Test.Test(testSuite, testPath, localConfig,
file_path=execpath)
def execute(self, test, litConfig):
testPath,testName = os.path.split(test.getSourcePath())
while not os.path.exists(testPath):
# Handle GTest parametrized and typed tests, whose name includes
# some '/'s.
testPath, namePrefix = os.path.split(testPath)
testName = namePrefix + '/' + testName
cmd = [testPath, '--benchmark_filter=%s$' % testName ] + self.benchmark_args
if litConfig.noExecute:
return lit.Test.PASS, ''
try:
out, err, exitCode = lit.util.executeCommand(
cmd, env=test.config.environment,
timeout=litConfig.maxIndividualTestTime)
except lit.util.ExecuteCommandTimeoutException:
return (lit.Test.TIMEOUT,
'Reached timeout of {} seconds'.format(
litConfig.maxIndividualTestTime)
)
if exitCode:
return lit.Test.FAIL, ('exit code: %d\n' % exitCode) + out + err
passing_test_line = testName
if passing_test_line not in out:
msg = ('Unable to find %r in google benchmark output:\n\n%s%s' %
(passing_test_line, out, err))
return lit.Test.UNRESOLVED, msg
return lit.Test.PASS, err + out
|
{
"content_hash": "22db6cc0f4a4b05a6e67e0729cac22b9",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 84,
"avg_line_length": 38.75409836065574,
"alnum_prop": 0.5617597292724196,
"repo_name": "endlessm/chromium-browser",
"id": "e531cba0b36818e3745d01b94411992d6def086f",
"size": "4728",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "buildtools/third_party/libc++/trunk/utils/libcxx/test/googlebenchmark.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import cPickle as pckl
import codecs
import argparse
VOCAB_PATH = "../rsc/vocab.pickle"
def main():
""" Main method. """
argument_parser = init_argument_parser()
args = argument_parser.parse_args()
print args
# Save vocabulary to a pickle file
if args.write:
args_dict = vars(args)
viable_options = {"min_length", "max_length", "mwes"}
options = {
option: args_dict[option] for option in args_dict
if option in viable_options
}
total_vocab = merge_vocabularies(args.input)
print len(total_vocab)
save_vocab(total_vocab, options=options)
# Load vocabulary from a pickle file
elif args.read:
total_vocab = load_vocab()
print total_vocab, len(total_vocab)
def merge_vocabularies(paths):
"""
Merges multiple files containing vocabulary.
Args:
paths (list): List of path to input files.
Returns:
set: Set of all words in vocabulary.
"""
assert len(paths) > 0
total_vocab = set()
for path in paths:
total_vocab = total_vocab.union(read_vocabulary(path))
return total_vocab
def save_vocab(vocab, options={}):
"""
Saves vocabulary to a pickle file.
Args:
vocab (set): Set of all words in vocabulary.
options (dict): Filtering options.
"""
global VOCAB_PATH
encoded_vocab = set()
for entry in vocab:
try:
if False not in check_constraints(entry, options):
print entry
encoded_vocab.add(entry.decode('latin-1'))
except UnicodeEncodeError:
continue
with open(VOCAB_PATH, 'wb') as vocab_file:
pckl.dump(encoded_vocab, vocab_file)
def check_constraints(word, options):
"""
Enforce filtering constraints on the vocabulary.
Args:
word (str): Current vocabulary to be checked.
options (dict): Filtering options.
Returns:
list: List of filtering results with booleans for each check.
"""
# Defining checks
def _min_length_check(_word, min_length):
if len(_word) < min_length:
return False
return True
def _max_length_check(_word, max_length):
if len(_word) > max_length:
return False
return True
def _multi_word_check(_word, mwes):
return True if mwes else (' ' not in _word)
# Enforcing constraints
checks = {
"min_length": _min_length_check,
"max_length": _max_length_check,
"mwes": _multi_word_check
}
results = []
for option in options:
arg = options[option]
results.append(checks[option](word, arg))
return results
def load_vocab():
"""
Load vocabulary from pickle file.
Returns:
set: Set of all words in vocabulary.
"""
global VOCAB_PATH
with open(VOCAB_PATH, 'rb') as vocab_file:
vocab = pckl.load(vocab_file)
print vocab
decoded_vocab = set()
for entry in vocab:
decoded_vocab.add(entry.encode('latin-1'))
return decoded_vocab
def read_vocabulary(vocab_inpath):
"""
Read a vocabulary file with one word per line.
Args:
vocab_inpath (str): Path to vocabulary file.
Returns:
set: Set of all words in vocabulary.
"""
vocab = set()
with codecs.open(vocab_inpath, 'rb', 'utf-8') as vocab_infile:
line = vocab_infile.readline()
while line:
vocab.add(line.strip())
line = vocab_infile.readline()
return vocab
def init_argument_parser():
"""
Initialize the argument parser for this script.
Returns:
argparse.ArgumentParser: ArguementParser object
"""
argument_parser = argparse.ArgumentParser()
# Basic arguments
argument_parser.add_argument(
'--input',
nargs='+',
help='Paths to vocabulary files.'
)
argument_parser.add_argument(
'-r',
'--read',
action='store_true',
help='Enable reading mode.'
)
argument_parser.add_argument(
'-w',
'--write',
action='store_true',
help='Enable writing mode.'
)
# Filtering options
argument_parser.add_argument(
'--min',
type=int,
help='Minimum length of a word.'
)
argument_parser.add_argument(
'--max',
type=int,
help='Maximum length of a word.'
)
argument_parser.add_argument(
'--mwes',
action='store_true',
default=False,
help="Are multi-word entries allowed or not?"
)
return argument_parser
if __name__ == "__main__":
main()
|
{
"content_hash": "e76401ae6a34097ce17cd138aaec4ba5",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 69,
"avg_line_length": 22.436018957345972,
"alnum_prop": 0.5817490494296578,
"repo_name": "Kaleidophon/doppelmoppelbot",
"id": "bd60cf9e4e4bcc117a9f735147e4cc566df7e95b",
"size": "4734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "misc/create_vocab.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10234"
}
],
"symlink_target": ""
}
|
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
import FishGlobals
from toontown.fishing import DistributedPondBingoManager
from pandac.PandaModules import Vec3
from direct.task import Task
class DistributedFishingPond(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedFishingPond')
pollInterval = 0.5
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.notify.debug('init')
self.targets = {}
self.area = None
self.localToonBobPos = None
self.localToonSpot = None
self.pondBingoMgr = None
self.visitedSpots = {}
return
def disable(self):
self.visitedSpots.clear()
self.stopCheckingTargets()
DistributedObject.DistributedObject.disable(self)
def setArea(self, area):
self.area = area
def getArea(self):
return self.area
def addTarget(self, target):
self.notify.debug('addTarget: %s' % target)
self.targets[target.getDoId()] = target
def removeTarget(self, target):
self.notify.debug('removeTarget: %s' % target)
del self.targets[target.getDoId()]
def startCheckingTargets(self, spot, bobPos):
self.notify.debug('startCheckingTargets')
if base.wantBingo:
pass
self.localToonSpot = spot
self.localToonBobPos = bobPos
taskMgr.doMethodLater(self.pollInterval * 2, self.checkTargets, self.taskName('checkTargets'))
def stopCheckingTargets(self):
self.notify.debug('stopCheckingTargets')
taskMgr.remove(self.taskName('checkTargets'))
if not base.wantBingo:
self.localToonSpot = None
self.localToonBobPos = None
return
def checkTargets(self, task = None):
self.notify.debug('checkTargets')
if self.localToonSpot != None:
for target in self.targets.values():
targetPos = target.getPos(render)
distVec = Vec3(targetPos - self.localToonBobPos)
dist = distVec.length()
if dist < target.getRadius():
self.notify.debug('checkTargets: hit target: %s' % target.getDoId())
self.d_hitTarget(target)
return Task.done
taskMgr.doMethodLater(self.pollInterval, self.checkTargets, self.taskName('checkTargets'))
else:
self.notify.warning('localToonSpot became None while checking targets')
return Task.done
def d_hitTarget(self, target):
self.localToonSpot.hitTarget()
self.sendUpdate('hitTarget', [target.getDoId()])
def setPondBingoManager(self, pondBingoMgr):
self.pondBingoMgr = pondBingoMgr
def removePondBingoManager(self):
del self.pondBingoMgr
self.pondBingoMgr = None
return
def getPondBingoManager(self):
return self.pondBingoMgr
def hasPondBingoManager(self):
return (self.pondBingoMgr and [1] or [0])[0]
def handleBingoCatch(self, catch):
if self.pondBingoMgr:
self.pondBingoMgr.setLastCatch(catch)
def handleBingoBoot(self):
if self.pondBingoMgr:
self.pondBingoMgr.handleBoot()
def cleanupBingoMgr(self):
if self.pondBingoMgr:
self.pondBingoMgr.cleanup()
def setLocalToonSpot(self, spot = None):
self.localToonSpot = spot
if spot is not None and spot.getDoId() not in self.visitedSpots:
self.visitedSpots[spot.getDoId()] = spot
return
def showBingoGui(self):
if self.pondBingoMgr:
self.pondBingoMgr.showCard()
def getLocalToonSpot(self):
return self.localToonSpot
def resetSpotGui(self):
for spot in self.visitedSpots.values():
spot.resetCastGui()
def setSpotGui(self):
for spot in self.visitedSpots.values():
spot.setCastGui()
|
{
"content_hash": "ba66a10b9096508e76d173f763b3affb",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 102,
"avg_line_length": 32.785714285714285,
"alnum_prop": 0.6509319777293634,
"repo_name": "ToontownUprising/src",
"id": "dd2db7bfb69a22ef5884d4aa02f4d076179cd0c7",
"size": "4131",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "toontown/fishing/DistributedFishingPond.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "36"
},
{
"name": "Python",
"bytes": "16244807"
},
{
"name": "Shell",
"bytes": "11615"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from gevent.server import StreamServer
from http_server import parse_and_respond, make_ERROR_bytestring,\
make_OK_bytestring
from http_server import HttpServer
def generate_response(conn, addr):
# Mostly identical to HttpServer.listen_and_respond,
# but has different arguments and doesn't listen
received_message = conn.recv(HttpServer._buffsize)
print "Parsing message from", addr
print received_message
try:
asset, asset_type, asset_length = \
parse_and_respond(received_message)
except IOError as err:
if "Bad request" in err.message:
response = make_ERROR_bytestring(400)
elif "Bad method" in err.message:
response = make_ERROR_bytestring(405)
elif "Bad protocol" in err.message:
response = make_ERROR_bytestring(505)
elif "File not found" in err.message:
response = make_ERROR_bytestring(404)
else:
response = make_ERROR_bytestring(500)
print err
# Internal server error: WE SHOULD NEVER GET THIS
else:
response = make_OK_bytestring(asset_type, asset_length) + asset
conn.sendall(response)
conn.close()
class ConcurrentServer(StreamServer):
def __init__(self):
StreamServer.__init__(self, ('127.0.0.1', 8888), generate_response)
def runnicely(self):
try:
self.serve_forever()
except KeyboardInterrupt:
self.stop()
print "Exiting Gracefully"
if __name__ == '__main__':
server = ConcurrentServer()
server.runnicely()
|
{
"content_hash": "c739613b84a2db1465eb0b47dba149a5",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 75,
"avg_line_length": 31.653846153846153,
"alnum_prop": 0.6373025516403402,
"repo_name": "jbbrokaw/network-tools",
"id": "8536f7a8708c90257f76fb91f5b45dfd3925bd22",
"size": "1668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "concurrent_servers.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14746"
}
],
"symlink_target": ""
}
|
from pkg_resources import get_distribution
__version__ = get_distribution("cloud-pso-hive-bigquery").version
|
{
"content_hash": "4602984d34751761f1a9e5671ee0eb89",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 65,
"avg_line_length": 27.75,
"alnum_prop": 0.7747747747747747,
"repo_name": "GoogleCloudPlatform/professional-services",
"id": "30105056d22f6fa7d9b98c2fd20f0e6e20930669",
"size": "687",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tools/hive-bigquery/hive_to_bigquery/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "117994"
},
{
"name": "C++",
"bytes": "174"
},
{
"name": "CSS",
"bytes": "13405"
},
{
"name": "Component Pascal",
"bytes": "798"
},
{
"name": "Dockerfile",
"bytes": "15093"
},
{
"name": "Go",
"bytes": "352968"
},
{
"name": "HCL",
"bytes": "204776"
},
{
"name": "HTML",
"bytes": "1229668"
},
{
"name": "Java",
"bytes": "338810"
},
{
"name": "JavaScript",
"bytes": "59905"
},
{
"name": "Jinja",
"bytes": "60083"
},
{
"name": "Makefile",
"bytes": "14129"
},
{
"name": "Python",
"bytes": "2250081"
},
{
"name": "Scala",
"bytes": "978327"
},
{
"name": "Shell",
"bytes": "109299"
},
{
"name": "Smarty",
"bytes": "19839"
},
{
"name": "TypeScript",
"bytes": "147194"
}
],
"symlink_target": ""
}
|
class CertificatesManager(object):
"""Manager for certificates."""
def __init__(self, client):
self._client = client
def get_ca_certificate(self):
"""Get CA certificate.
:returns: PEM-formatted string.
:rtype: str
"""
resp, body = self._client.get('/certificates/ca', authenticated=False)
return resp.text
def get_signing_certificate(self):
"""Get signing certificate.
:returns: PEM-formatted string.
:rtype: str
"""
resp, body = self._client.get('/certificates/signing',
authenticated=False)
return resp.text
|
{
"content_hash": "c8b9ff3367176637e7d578587fe0e2d5",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 78,
"avg_line_length": 24.178571428571427,
"alnum_prop": 0.5583456425406204,
"repo_name": "magic0704/python-keystoneclient",
"id": "b8d2573f0e206ad3a40d1e0787037da082944a40",
"size": "1246",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "keystoneclient/v2_0/certificates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1536497"
},
{
"name": "Shell",
"bytes": "7107"
}
],
"symlink_target": ""
}
|
'''
New Integration Test for VM vga mode.
@author: quarkonics
'''
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.config_operations as conf_ops
import zstackwoodpecker.test_util as test_util
test_stub = test_lib.lib_get_test_stub()
_config_ = {
'timeout' : 360,
'noparallel' : False
}
vm = None
default_mode = None
def test():
global vm
global default_mode
# default_mode = conf_ops.get_global_config_value('kvm', 'videoType')
default_mode = conf_ops.change_global_config('vm', 'videoType', 'cirrus')
vm = test_stub.create_vm()
vm.check()
vm_mode = test_lib.lib_get_vm_video_type(vm.get_vm())
if vm_mode != 'cirrus':
test_util.test_fail('VM is expected to work in vga mode instead of %s' % (vm_mode))
vm.destroy()
vm.check()
conf_ops.change_global_config('vm', 'videoType', default_mode)
test_util.test_pass('Create VM Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
conf_ops.change_global_config('vm', 'videoType', default_mode)
global vm
if vm:
vm.destroy()
|
{
"content_hash": "5788d6b975ae89d9ed17968b8522cd95",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 91,
"avg_line_length": 28.78048780487805,
"alnum_prop": 0.638135593220339,
"repo_name": "zstackorg/zstack-woodpecker",
"id": "3d592562d46effbb212ca75f3e40746656073ea6",
"size": "1180",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "integrationtest/vm/basic/vga/test_vm_cirrus.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "46522"
},
{
"name": "Makefile",
"bytes": "692"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "2891030"
},
{
"name": "Shell",
"bytes": "54266"
}
],
"symlink_target": ""
}
|
import argparse
import asyncio
import os
import time
import csv
import functools
from collections import namedtuple
from random import randint
from jsonpickle import json
from stp_core.loop.looper import Looper
from stp_core.common.log import getlogger
from plenum.common.types import HA
from plenum.common.util import randomString
from stp_core.network.port_dispenser import genHa
from plenum.common.signer_did import DidSigner
from plenum.common.constants import \
TARGET_NYM, TXN_TYPE, NYM, \
ROLE, RAW, NODE,\
DATA, ALIAS, CLIENT_IP, \
CLIENT_PORT
from plenum.test.helper import eventually
from plenum.test.test_client import \
getAcksFromInbox, getNacksFromInbox, getRepliesFromInbox
from indy_common.constants import ATTRIB, GET_ATTR
from indy_client.client.wallet.attribute import Attribute, LedgerStore
from indy_client.client.wallet.wallet import Wallet
from indy_client.client.client import Client
from indy_common.identity import Identity
from indy_common.constants import GET_NYM
logger = getlogger()
TTL = 120.0 # 60.0
CONNECTION_TTL = 30.0
RETRY_WAIT = 0.25
def parseArgs():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--num_clients",
action="store",
type=int,
default=1,
dest="numberOfClients",
help="number of clients to use (set to -1 for all)")
parser.add_argument("-r", "--num_requests",
action="store",
type=int,
default=1,
dest="numberOfRequests",
help="number of clients to use")
parser.add_argument(
"-t",
"--request_type",
action="store",
type=str,
default="NYM",
dest="requestType",
help="type of requests to send, supported = NYM, GET_NYM, ATTRIB")
parser.add_argument("--at-once",
action='store_true',
dest="atOnce",
help="if set client send all request at once")
parser.add_argument("--timeout",
action="store",
type=int,
default=1,
dest="timeoutBetweenRequests",
help="number of seconds to sleep after each request")
parser.add_argument("--clients-list",
action="store",
default="{}/load_test_clients.list".format(
os.getcwd()),
dest="clientsListFilePath",
help="path to file with list of client names and keys")
parser.add_argument("--results-path",
action="store",
default=os.getcwd(),
dest="resultsPath",
help="output directory")
parser.add_argument("--skip-clients",
action="store",
type=int,
default=0,
dest="numberOfClientsToSkip",
help="number of clients to skip from clients list")
return parser.parse_args()
def createClientAndWalletWithSeed(name, seed, ha=None):
if isinstance(seed, str):
seed = seed.encode()
if not ha:
port = genHa()[1]
ha = HA('0.0.0.0', port)
wallet = Wallet(name)
wallet.addIdentifier(signer=DidSigner(seed=seed))
client = Client(name, ha=ha)
return client, wallet
class Rotator:
def __init__(self, collection):
self._collection = collection
self._index = 0
def __iter__(self):
return self
def __next__(self):
if len(self._collection) == 0:
raise StopIteration()
if self._index >= len(self._collection):
self._index = 0
x = self._collection[self._index]
self._index += 1
return x
class ClientPoll:
def __init__(self, filePath, limit=-1, skip=0):
self.__startPort = 5679
self.__filePath = filePath
self.__limit = limit
self.__skip = skip
self._clientsWallets = [self._spawnClient(name, seed)
for name, seed in self._readCredentials()]
@property
def clients(self):
for cli, _ in self._clientsWallets:
yield cli
@staticmethod
def randomRawAttr():
d = {"{}_{}".format(randomString(20), randint(100, 1000000)): "{}_{}".
format(randint(1000000, 1000000000000), randomString(50))}
return json.dumps(d)
def submitNym(self, reqsPerClient=1):
usedIdentifiers = set()
def newSigner():
while True:
signer = DidSigner()
idr = signer.identifier
if idr not in usedIdentifiers:
usedIdentifiers.add(idr)
return signer
def makeRequest(cli, wallet):
signer = newSigner()
idy = Identity(identifier=signer.identifier,
verkey=signer.verkey)
wallet.addTrustAnchoredIdentity(idy)
return self.submitGeneric(makeRequest, reqsPerClient)
def submitGetNym(self, reqsPerClient=1):
ids = Rotator([wallet.defaultId
for _, wallet in self._clientsWallets])
def makeRequest(cli, wallet):
op = {
TARGET_NYM: next(ids),
TXN_TYPE: GET_NYM,
}
req = wallet.signOp(op)
wallet.pendRequest(req)
return self.submitGeneric(makeRequest, reqsPerClient)
def submitSetAttr(self, reqsPerClient=1):
def makeRequest(cli, wallet):
attrib = Attribute(name=cli.name,
origin=wallet.defaultId,
value=self.randomRawAttr(),
ledgerStore=LedgerStore.RAW)
wallet.addAttribute(attrib)
return self.submitGeneric(makeRequest, reqsPerClient)
def submitGeneric(self, makeRequest, reqsPerClient):
corosArgs = []
for cli, wallet in self._clientsWallets:
for _ in range(reqsPerClient):
makeRequest(cli, wallet)
reqs = wallet.preparePending()
sentAt = time.time()
cli.submitReqs(*reqs)
for req in reqs:
corosArgs.append([cli, wallet, req, sentAt])
return corosArgs
def _readCredentials(self):
with open(self.__filePath, "r") as file:
creds = [line.strip().split(":") for i, line in enumerate(file)]
return map(lambda x: (x[0], str.encode(x[1])),
creds[self.__skip:self.__skip + self.__limit])
def _spawnClient(self, name, seed, host='0.0.0.0'):
self.__startPort += randint(100, 1000)
address = HA(host, self.__startPort)
logger.info("Seed for client {} is {}, "
"its len is {}".format(name, seed, len(seed)))
return createClientAndWalletWithSeed(name, seed, address)
resultsRowFieldNames = [
'signerName',
'signerId',
'dest',
'reqId',
'transactionType',
'sentAt',
'quorumAt',
'latency',
'ackNodes',
'nackNodes',
'replyNodes']
ResultRow = namedtuple('ResultRow', resultsRowFieldNames)
async def eventuallyAny(coroFunc, *args, retryWait: float = 0.01,
timeout: float = 5):
start = time.perf_counter()
def remaining():
return start + timeout - time.perf_counter()
remain = remaining()
data = None
while remain >= 0:
res = await coroFunc(*args)
(complete, data) = res
if complete:
return data
remain = remaining()
if remain > 0:
await asyncio.sleep(retryWait)
remain = remaining()
return data
async def checkReply(client, requestId, identifier):
hasConsensus = False
acks, nacks, replies = [], [], []
try:
# acks = client.reqRepStore.getAcks(requestId)
# nacks = client.reqRepStore.getNacks(requestId)
# replies = client.reqRepStore.getReplies(requestId)
acks = getAcksFromInbox(client, requestId)
nacks = getNacksFromInbox(client, requestId)
replies = getRepliesFromInbox(client, requestId)
hasConsensus = client.hasConsensus(identifier, requestId)
except KeyError:
logger.info("No replies for {}:{} yet".format(identifier, requestId))
except Exception as e:
logger.warn(
"Error occured during checking replies: {}".format(
repr(e)))
finally:
return hasConsensus, (hasConsensus, acks, nacks, replies)
async def checkReplyAndLogStat(client, wallet, request, sentAt, writeResultsRow, stats):
hasConsensus, ackNodes, nackNodes, replyNodes = \
await eventuallyAny(checkReply, client,
request.reqId, wallet.defaultId,
retryWait=RETRY_WAIT, timeout=TTL
)
endTime = time.time()
# TODO: only first hasConsensus=True make sense
quorumAt = endTime if hasConsensus else ""
latency = endTime - sentAt
row = ResultRow(signerName=wallet.name,
signerId=wallet.defaultId,
dest=request.operation.get('dest'),
reqId=request.reqId,
transactionType=request.operation['type'],
sentAt=sentAt,
quorumAt=quorumAt,
latency=latency,
ackNodes=",".join(ackNodes),
nackNodes=",".join(nackNodes.keys()),
replyNodes=",".join(replyNodes.keys()))
stats.append((latency, hasConsensus))
writeResultsRow(row._asdict())
def checkIfConnectedToAll(client):
connectedNodes = client.nodestack.connecteds
connectedNodesNum = len(connectedNodes)
totalNodes = len(client.nodeReg)
logger.info("Connected {} / {} nodes".
format(connectedNodesNum, totalNodes))
if connectedNodesNum == 0:
raise Exception("Not connected to any")
elif connectedNodesNum < totalNodes * 0.8:
raise Exception("Not connected fully")
else:
return True
def printCurrentTestResults(stats, testStartedAt):
totalNum = len(stats)
totalLatency = 0
successNum = 0
for lat, hasConsensus in stats:
totalLatency += lat
successNum += int(bool(hasConsensus))
avgLatency = totalLatency / totalNum if totalNum else 0.0
secSinceTestStart = time.time() - testStartedAt
failNum = totalNum - successNum
throughput = successNum / secSinceTestStart
errRate = failNum / secSinceTestStart
logger.info(
"""
================================
Test time: {}
Average latency: {}
Throughput: {}
Error rate: {}
Succeeded: {}
Failed: {}
================================
""".format(secSinceTestStart, avgLatency, throughput,
errRate, successNum, failNum)
)
def main(args):
resultsFileName = \
"perf_results_{x.numberOfClients}_" \
"{x.numberOfRequests}_{0}.csv".format(int(time.time()), x=args)
resultFilePath = os.path.join(args.resultsPath, resultsFileName)
logger.info("Results file: {}".format(resultFilePath))
def writeResultsRow(row):
if not os.path.exists(resultFilePath):
resultsFd = open(resultFilePath, "w")
resultsWriter = csv.DictWriter(
resultsFd, fieldnames=resultsRowFieldNames)
resultsWriter.writeheader()
resultsFd.close()
resultsFd = open(resultFilePath, "a")
resultsWriter = csv.DictWriter(
resultsFd, fieldnames=resultsRowFieldNames)
resultsWriter.writerow(row)
resultsFd.close()
stats = []
def buildCoros(coroFunc, corosArgs):
coros = []
for args in corosArgs:
argsExt = args + [writeResultsRow, stats]
coros.append(functools.partial(coroFunc, *argsExt))
return coros
clientPoll = ClientPoll(args.clientsListFilePath,
args.numberOfClients, args.numberOfClientsToSkip)
with Looper() as looper:
# connect
connectionCoros = []
for cli in clientPoll.clients:
looper.add(cli)
connectionCoros.append(
functools.partial(checkIfConnectedToAll, cli))
for coro in connectionCoros:
looper.run(eventually(coro,
timeout=CONNECTION_TTL,
retryWait=RETRY_WAIT,
verbose=False))
testStartedAt = time.time()
stats.clear()
requestType = args.requestType
sendRequests = {
"NYM": clientPoll.submitNym,
"GET_NYM": clientPoll.submitGetNym,
"ATTRIB": clientPoll.submitSetAttr,
"ATTR": clientPoll.submitSetAttr
}.get(requestType)
if sendRequests is None:
raise ValueError("Unsupported request type, "
"only NYM and ATTRIB/ATTR are supported")
def sendAndWaitReplies(numRequests):
corosArgs = sendRequests(numRequests)
coros = buildCoros(checkReplyAndLogStat, corosArgs)
for coro in coros:
task = eventually(coro,
retryWait=RETRY_WAIT,
timeout=TTL,
verbose=False)
looper.run(task)
printCurrentTestResults(stats, testStartedAt)
logger.info("Sent and waited for {} {} requests"
.format(len(coros), requestType))
if args.atOnce:
sendAndWaitReplies(numRequests=args.numberOfRequests)
else:
for i in range(args.numberOfRequests):
sendAndWaitReplies(numRequests=1)
if __name__ == '__main__':
commandLineArgs = parseArgs()
main(commandLineArgs)
|
{
"content_hash": "fe2a5fcdab9f40abde14c50e15fe2db1",
"timestamp": "",
"source": "github",
"line_count": 442,
"max_line_length": 88,
"avg_line_length": 32.3710407239819,
"alnum_prop": 0.5646491473301649,
"repo_name": "spivachuk/sovrin-node",
"id": "eb2d16eb1331408b166d92d4fc283d946ba9b76b",
"size": "14333",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/load_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3329"
},
{
"name": "Dockerfile",
"bytes": "7269"
},
{
"name": "Groovy",
"bytes": "8984"
},
{
"name": "Makefile",
"bytes": "11151"
},
{
"name": "Python",
"bytes": "1681637"
},
{
"name": "Ruby",
"bytes": "65393"
},
{
"name": "Rust",
"bytes": "25532"
},
{
"name": "Shell",
"bytes": "132633"
}
],
"symlink_target": ""
}
|
''' LICENSE
Copyright (c) 2010--2015, Deep Learning Tutorials Development Team
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Theano nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import cPickle
import gzip
import os
import numpy
from readcifar10 import ClassLabelToClassVector
def load_data(dataset):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
'''
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
# Check if dataset is in the data directory.
new_path = os.path.join(os.path.split(__file__)[0],
"data", data_file)
if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
dataset = new_path
if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
import urllib
origin = ('http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz')
print 'Downloading data from %s' % origin
urllib.urlretrieve(origin, dataset)
print '... loading data'
# Load the dataset
f = gzip.open(dataset, 'rb')
train_set, validsetdontuse, test_set = cPickle.load(f)
f.close()
#train_set, validsetdontuse, test_set format: tuple(input, target)
#input is an numpy.ndarray of 2 dimensions (a matrix)
#witch row's correspond to an example. target is a
#numpy.ndarray of 1 dimensions (vector)) that have the same length as
#the number of rows in the input. It should give the target
#target to the example with the same index in the input.
def VectorizeLabelsY(theset):
thesetX, thesetY = theset
return (thesetX, ClassLabelToClassVector(thesetY,10)) #10 classes for MNIST
return (VectorizeLabelsY(train_set), VectorizeLabelsY(test_set))
#return (train_set, test_set)
#
# This is here because it also comes from Theano and is covered by the above license.
#
def scale_to_unit_interval(ndar, eps=1e-8):
""" Scales all values in the ndarray ndar to be between 0 and 1 """
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(1,1),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
:type X: a 2-D ndarray or a tuple of 4 channels, elements of which can
be 2-D ndarrays or None;
:param X: a 2-D array in which every row is a flattened image.
:type img_shape: tuple; (height, width)
:param img_shape: the original shape of each image
:type tile_shape: tuple; (rows, cols)
:param tile_shape: the number of images to tile (rows, cols)
:param output_pixel_vals: if output should be pixel values (i.e. int8
values) or floats
:param scale_rows_to_unit_interval: if the values need to be scaled before
being plotted to [0,1] or not
:returns: array suitable for viewing as an image.
(See:`Image.fromarray`.)
:rtype: a 2-d array with same dtype as X.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [
(ishp + tsp) * tshp - tsp
for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)
]
if isinstance(X, tuple):
#assert len(X) == 4
# Create an output numpy ndarray to store the image
if output_pixel_vals:
out_array = numpy.zeros((out_shape[0], out_shape[1], len(X)), dtype='uint8')
else:
out_array = numpy.zeros((out_shape[0], out_shape[1], len(X)), dtype=X[0].dtype)
#colors default to 0, alpha defaults to 1 (opaque)
if len(X) == 4:
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
else:
if output_pixel_vals:
channel_defaults = [0 for i in range(len(X))]
else:
channel_defaults = [0. for i in range(len(X))]
for i in xrange(len(X)):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = numpy.zeros(
out_shape,
dtype=dt
) + channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = numpy.zeros(out_shape, dtype=dt)
###min_of_input = numpy.amin(X)
###max_of_input = numpy.amax(X)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(this_x.reshape(img_shape))
###this_img = (this_x.reshape(img_shape) - min_of_input) / (max_of_input - min_of_input)
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array
|
{
"content_hash": "d41a6f4817b0e7b5dfbf82ba8185f204",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 112,
"avg_line_length": 40.09004739336493,
"alnum_prop": 0.6011348859203216,
"repo_name": "jasonbunk/BunkNeuralNets",
"id": "99c64f37a36cd7521e395703ecbb9f8fb0727f49",
"size": "8459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "theanos_MNIST_loader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62083"
},
{
"name": "Shell",
"bytes": "144"
}
],
"symlink_target": ""
}
|
from collections import deque
import random
import time
def play(buttons=3, speed=5, itr=10):
gen_Order = deque(maxlen = itr)
for i in xrange(itr):
nextIn = random.randint(1,buttons)
gen_Order.append(nextIn)
print nextIn
time.sleep(0.75/speed)
while True:
userIn = int(raw_input())
if(not userIn == gen_Order.popleft()):
lose()
return
elif(not gen_Order):
win()
return
def lose():
print "You Lose"
def win():
print "You Win"
play(5, 5, 5)
|
{
"content_hash": "50d6aaffa8b4aedf3c3ca32a358b96dc",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 40,
"avg_line_length": 15.03125,
"alnum_prop": 0.6548856548856549,
"repo_name": "SeismicPi/SeismicPi",
"id": "ca36851f3e7f7163e2e8be5177f27b272e2215ff",
"size": "481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lessons/PiTapMemory/game.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "10749998"
},
{
"name": "C++",
"bytes": "3478736"
},
{
"name": "Eagle",
"bytes": "1249368"
},
{
"name": "Makefile",
"bytes": "371965"
},
{
"name": "Python",
"bytes": "41746"
}
],
"symlink_target": ""
}
|
"""Serve pre-compressed static content from GridFS with aiohttp.
Requires Python 3.5 or later and aiohttp 3.0 or later.
Start a MongoDB server on its default port, run this script, and visit:
http://localhost:8080/fs/my_file
"""
# -- include-start --
import asyncio
import gzip
import tempfile
import aiohttp.web
from motor.aiohttp import AIOHTTPGridFS
from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorGridFSBucket
client = AsyncIOMotorClient()
# Use Motor to put compressed data in GridFS, with filename "my_file".
async def put_gridfile():
with tempfile.TemporaryFile() as tmp:
with gzip.GzipFile(mode="wb", fileobj=tmp) as gzfile:
for _ in range(10):
gzfile.write(b"Nonesuch nonsense\n")
gfs = AsyncIOMotorGridFSBucket(client.my_database)
tmp.seek(0)
await gfs.upload_from_stream(
filename="my_file", source=tmp, metadata={"contentType": "text", "compressed": True}
)
asyncio.run(put_gridfile())
# Add "Content-Encoding: gzip" header for compressed data.
def gzip_header(response, gridout):
if gridout.metadata.get("compressed"):
response.headers["Content-Encoding"] = "gzip"
gridfs_handler = AIOHTTPGridFS(client.my_database, set_extra_headers=gzip_header)
app = aiohttp.web.Application()
# The GridFS URL pattern must have a "{filename}" variable.
resource = app.router.add_resource("/fs/{filename}")
resource.add_route("GET", gridfs_handler)
resource.add_route("HEAD", gridfs_handler)
aiohttp.web.run_app(app)
|
{
"content_hash": "bd6ce4aaa9b774742e5962d1ec73ac95",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 96,
"avg_line_length": 28.61111111111111,
"alnum_prop": 0.7126213592233009,
"repo_name": "mongodb/motor",
"id": "b0dddc6557d5dcc9ccd076d8f284cdf2cd352e1b",
"size": "1545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/examples/aiohttp_gridfs_example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "607021"
},
{
"name": "Shell",
"bytes": "3406"
}
],
"symlink_target": ""
}
|
THEMES = ["classic", "vim", "dark vim", "midnight", "solarized", "agr-256"]
from pudb.py3compat import execfile, raw_input
import urwid
def get_palette(may_use_fancy_formats, theme="classic"):
if may_use_fancy_formats:
def add_setting(color, setting):
return color+","+setting
else:
def add_setting(color, setting):
return color
palette_dict = { # {{{ ui
"header": ("black", "light gray", "standout"),
"selectable": ("black", "dark cyan"),
"focused selectable": ("black", "dark green"),
"button": (add_setting("white", "bold"), "dark blue"),
"focused button": ("light cyan", "black"),
"dialog title": (add_setting("white", "bold"), "dark cyan"),
"background": ("black", "light gray"),
"hotkey": (add_setting("black", "underline"), "light gray", "underline"),
"focused sidebar": (add_setting("yellow", "bold"), "light gray", "standout"),
"warning": (add_setting("white", "bold"), "dark red", "standout"),
"label": ("black", "light gray"),
"value": (add_setting("yellow", "bold"), "dark blue"),
"fixed value": ("light gray", "dark blue"),
"group head": (add_setting("dark blue", "bold"), "light gray"),
"search box": ("black", "dark cyan"),
"search not found": ("white", "dark red"),
# }}}
# {{{ shell
"command line edit": (add_setting("yellow", "bold"), "dark blue"),
"command line prompt": (add_setting("white", "bold"), "dark blue"),
"command line output": ("light cyan", "dark blue"),
"command line input": (add_setting("light cyan", "bold"), "dark blue"),
"command line error": (add_setting("light red", "bold"), "dark blue"),
"focused command line output": ("black", "dark green"),
"focused command line input": (add_setting("light cyan", "bold"), "dark green"),
"focused command line error": ("black", "dark green"),
"command line clear button": (add_setting("white", "bold"), "dark blue"),
"command line focused button": ("light cyan", "black"),
# }}}
# {{{ source
"breakpoint": ("black", "dark cyan"),
"focused breakpoint": ("black", "dark green"),
"current breakpoint": (add_setting("white", "bold"), "dark cyan"),
"focused current breakpoint": (add_setting("white", "bold"), "dark green", "bold"),
"source": (add_setting("yellow", "bold"), "dark blue"),
"focused source": ("black", "dark green"),
"highlighted source": ("black", "dark magenta"),
"current source": ("black", "dark cyan"),
"current focused source": (add_setting("white", "bold"), "dark cyan"),
"current highlighted source": ("white", "dark cyan"),
# {{{ highlighting
"line number": ("light gray", "dark blue"),
"keyword": (add_setting("white", "bold"), "dark blue"),
"name": ("light cyan", "dark blue"),
"literal": ("light magenta, bold", "dark blue"),
"string": (add_setting("light magenta", "bold"), "dark blue"),
"doublestring": (add_setting("light magenta", "bold"), "dark blue"),
"singlestring": (add_setting("light magenta", "bold"), "dark blue"),
"docstring": (add_setting("light magenta", "bold"), "dark blue"),
"punctuation": ("light gray", "dark blue"),
"comment": ("light gray", "dark blue"),
# }}}
# }}}
# {{{ breakpoints
"breakpoint marker": ("dark red", "dark blue"),
"breakpoint source": (add_setting("yellow", "bold"), "dark red"),
"breakpoint focused source": ("black", "dark red"),
"current breakpoint source": ("black", "dark red"),
"current breakpoint focused source": ("white", "dark red"),
# }}}
# {{{ variables view
"variables": ("black", "dark cyan"),
"variable separator": ("dark cyan", "light gray"),
"var label": ("dark blue", "dark cyan"),
"var value": ("black", "dark cyan"),
"focused var label": ("dark blue", "dark green"),
"focused var value": ("black", "dark green"),
"highlighted var label": ("white", "dark cyan"),
"highlighted var value": ("black", "dark cyan"),
"focused highlighted var label": ("white", "dark green"),
"focused highlighted var value": ("black", "dark green"),
"return label": ("white", "dark blue"),
"return value": ("black", "dark cyan"),
"focused return label": ("light gray", "dark blue"),
"focused return value": ("black", "dark green"),
# }}}
# {{{ stack
"stack": ("black", "dark cyan"),
"frame name": ("black", "dark cyan"),
"focused frame name": ("black", "dark green"),
"frame class": ("dark blue", "dark cyan"),
"focused frame class": ("dark blue", "dark green"),
"frame location": ("light cyan", "dark cyan"),
"focused frame location": ("light cyan", "dark green"),
"current frame name": (add_setting("white", "bold"),
"dark cyan"),
"focused current frame name": (add_setting("white", "bold"),
"dark green", "bold"),
"current frame class": ("dark blue", "dark cyan"),
"focused current frame class": ("dark blue", "dark green"),
"current frame location": ("light cyan", "dark cyan"),
"focused current frame location": ("light cyan", "dark green"),
# }}}
}
if theme == "classic":
pass
elif theme == "vim":
# {{{ vim theme
palette_dict.update({
"source": ("black", "default"),
"keyword": ("brown", "default"),
"kw_namespace": ("dark magenta", "default"),
"literal": ("black", "default"),
"string": ("dark red", "default"),
"doublestring": ("dark red", "default"),
"singlestring": ("dark red", "default"),
"docstring": ("dark red", "default"),
"punctuation": ("black", "default"),
"comment": ("dark blue", "default"),
"classname": ("dark cyan", "default"),
"name": ("dark cyan", "default"),
"line number": ("dark gray", "default"),
"breakpoint marker": ("dark red", "default"),
# {{{ shell
"command line edit":
("black", "default"),
"command line prompt":
(add_setting("black", "bold"), "default"),
"command line output":
(add_setting("black", "bold"), "default"),
"command line input":
("black", "default"),
"command line error":
(add_setting("light red", "bold"), "default"),
"focused command line output":
("black", "dark green"),
"focused command line input":
(add_setting("light cyan", "bold"), "dark green"),
"focused command line error":
("black", "dark green"),
# }}}
})
# }}}
elif theme == "dark vim":
# {{{ dark vim
palette_dict.update({
"header": ("black", "light gray", "standout"),
# {{{ variables view
"variables": ("black", "dark gray"),
"variable separator": ("dark cyan", "light gray"),
"var label": ("light gray", "dark gray"),
"var value": ("white", "dark gray"),
"focused var label": ("light gray", "light blue"),
"focused var value": ("white", "light blue"),
"highlighted var label": ("light gray", "dark green"),
"highlighted var value": ("white", "dark green"),
"focused highlighted var label": ("light gray", "light blue"),
"focused highlighted var value": ("white", "light blue"),
"return label": ("light gray", "dark gray"),
"return value": ("light cyan", "dark gray"),
"focused return label": ("yellow", "light blue"),
"focused return value": ("white", "light blue"),
# }}}
# {{{ stack view
"stack": ("black", "dark gray"),
"frame name": ("light gray", "dark gray"),
"focused frame name": ("light gray", "light blue"),
"frame class": ("dark blue", "dark gray"),
"focused frame class": ("dark blue", "light blue"),
"frame location": ("white", "dark gray"),
"focused frame location": ("white", "light blue"),
"current frame name": (add_setting("white", "bold"),
"dark gray"),
"focused current frame name": (add_setting("white", "bold"),
"light blue", "bold"),
"current frame class": ("dark blue", "dark gray"),
"focused current frame class": ("dark blue", "dark green"),
"current frame location": ("light cyan", "dark gray"),
"focused current frame location": ("light cyan", "light blue"),
# }}}
# {{{ breakpoint view
"breakpoint": ("light gray", "dark gray"),
"focused breakpoint": ("light gray", "light blue"),
"current breakpoint": (add_setting("white", "bold"), "dark gray"),
"focused current breakpoint":
(add_setting("white", "bold"), "light blue"),
# }}}
# {{{ ui widgets
"selectable": ("light gray", "dark gray"),
"focused selectable": ("white", "light blue"),
"button": ("light gray", "dark gray"),
"focused button": ("white", "light blue"),
"background": ("black", "light gray"),
"hotkey": (add_setting("black", "underline"), "light gray", "underline"),
"focused sidebar": ("light blue", "light gray", "standout"),
"warning": (add_setting("white", "bold"), "dark red", "standout"),
"label": ("black", "light gray"),
"value": ("white", "dark gray"),
"fixed value": ("light gray", "dark gray"),
"search box": ("white", "dark gray"),
"search not found": ("white", "dark red"),
"dialog title": (add_setting("white", "bold"), "dark gray"),
# }}}
# {{{ source view
"breakpoint marker": ("dark red", "black"),
"breakpoint source": ("light gray", "dark red"),
"breakpoint focused source": ("black", "dark red"),
"current breakpoint source": ("black", "dark red"),
"current breakpoint focused source": ("white", "dark red"),
# }}}
# {{{ highlighting
"source": ("white", "black"),
"focused source": ("white", "light blue"),
"highlighted source": ("black", "dark magenta"),
"current source": ("black", "light gray"),
"current focused source": ("white", "dark cyan"),
"current highlighted source": ("white", "dark cyan"),
"line number": ("dark gray", "black"),
"keyword": ("yellow", "black"),
"literal": ("dark magenta", "black"),
"string": ("dark magenta", "black"),
"doublestring": ("dark magenta", "black"),
"singlestring": ("dark magenta", "black"),
"docstring": ("dark magenta", "black"),
"name": ("light cyan", "black"),
"punctuation": ("yellow", "black"),
"comment": ("light blue", "black"),
# }}}
# {{{ shell
"command line edit":
("white", "black"),
"command line prompt":
(add_setting("yellow", "bold"), "black"),
"command line output":
(add_setting("yellow", "bold"), "black"),
"command line input":
("white", "black"),
"command line error":
(add_setting("light red", "bold"), "black"),
"focused command line output":
("black", "light blue"),
"focused command line input":
(add_setting("light cyan", "bold"), "light blue"),
"focused command line error":
("black", "light blue"),
# }}}
})
# }}}
elif theme == "midnight":
# {{{ midnight
# Based on XCode's midnight theme
# Looks best in a console with green text against black background
palette_dict.update({
"variables": ("white", "default"),
"var label": ("light blue", "default"),
"var value": ("white", "default"),
"stack": ("white", "default"),
"frame name": ("white", "default"),
"frame class": ("dark blue", "default"),
"frame location": ("light cyan", "default"),
"current frame name": (add_setting("white", "bold"), "default"),
"current frame class": ("dark blue", "default"),
"current frame location": ("light cyan", "default"),
"focused frame name": ("black", "dark green"),
"focused frame class": (add_setting("white", "bold"), "dark green"),
"focused frame location": ("dark blue", "dark green"),
"focused current frame name": ("black", "dark green"),
"focused current frame class": (add_setting("white", "bold"), "dark green"),
"focused current frame location": ("dark blue", "dark green"),
"breakpoint": ("default", "default"),
"search box": ("default", "default"),
"source": ("white", "default"),
"highlighted source": ("white", "light cyan"),
"current source": ("white", "light gray"),
"current focused source": ("white", "brown"),
"line number": ("light gray", "default"),
"keyword": ("dark magenta", "default"),
"name": ("white", "default"),
"literal": ("dark cyan", "default"),
"string": ("dark red", "default"),
"doublestring": ("dark red", "default"),
"singlestring": ("light blue", "default"),
"docstring": ("light red", "default"),
"backtick": ("light green", "default"),
"punctuation": ("white", "default"),
"comment": ("dark green", "default"),
"classname": ("dark cyan", "default"),
"funcname": ("white", "default"),
"breakpoint marker": ("dark red", "default"),
# {{{ shell
"command line edit": ("white", "default"),
"command line prompt": (add_setting("white", "bold"), "default"),
"command line output": (add_setting("white", "bold"), "default"),
"command line input": (add_setting("white", "bold"), "default"),
"command line error": (add_setting("light red", "bold"), "default"),
"focused command line output": ("black", "dark green"),
"focused command line input": (add_setting("white", "bold"), "dark green"),
"focused command line error": ("black", "dark green"),
"command line clear button": (add_setting("white", "bold"), "default"),
"command line focused button": ("black", "light gray"), # White
# doesn't work in curses mode
# }}}
})
# }}}
elif theme == "solarized":
# {{{ solarized
palette_dict.update({
# UI
"header": ("black", "light blue", "standout"),
"focused sidebar": ("yellow", "light blue", "standout"),
"group head": ("black", "light blue"),
"background": ("black", "light blue"),
"label": ("black", "light blue"),
"value": ("white", "dark blue"),
"fixed value": ("black", "light blue"),
"variables": ("light blue", "default"),
"var label": ("dark blue", "default"),
"var value": ("light blue", "default"),
"focused var label": ("white", "dark blue"),
"focused var value": ("black", "dark blue"),
"highlighted var label": ("white", "light green"),
"highlighted var value": ("white", "light green"),
"focused highlighted var label": ("white", "light green"),
"focused highlighted var value": ("white", "light green"),
"stack": ("light blue", "default"),
"frame name": ("dark blue", "default"),
"frame class": ("light blue", "default"),
"frame location": ("light green", "default"),
"focused frame name": ("white", "dark blue"),
"focused frame class": ("black", "dark blue"),
"focused frame location": ("dark gray", "dark blue"),
"focused current frame name": ("white", "light green"),
"focused current frame class": ("black", "light green"),
"focused current frame location": ("dark gray", "light green"),
"current frame name": ("white", "light green"),
"current frame class": ("black", "light green"),
"current frame location": ("dark gray", "light green"),
# breakpoints
"breakpoint": ("light blue", "default"),
"focused breakpoint": ("white", "light green"),
"current breakpoint": ("white", "dark blue"),
"focused current breakpoint": ("white", "dark blue"),
# source
"breakpoint source": ("light blue", "black"),
"current breakpoint source": ("black", "light green"),
"breakpoint focused source": ("dark gray", "dark blue"),
"current breakpoint focused source": ("black", "light green"),
"breakpoint marker": ("dark red", "default"),
"search box": ("default", "default"),
"source": ("light blue", "default"),
"current source": ("light gray", "light blue"),
"current focused source": ("light gray", "light blue"),
"focused source": ("dark gray", "dark blue"),
"current highlighted source": ("black", "dark cyan"),
"highlighted source": ("light blue", "black"),
"line number": ("light blue", "default"),
"keyword": ("dark green", "default"),
"name": ("light blue", "default"),
"literal": ("dark cyan", "default"),
"string": ("dark cyan", "default"),
"doublestring": ("dark cyan", "default"),
"singlestring": ("light blue", "default"),
"docstring": ("dark cyan", "default"),
"backtick": ("light green", "default"),
"punctuation": ("light blue", "default"),
"comment": ("light green", "default"),
"classname": ("dark blue", "default"),
"funcname": ("dark blue", "default"),
# shell
"command line edit": ("light blue", "default"),
"command line prompt": ("light blue", "default"),
"command line output": ("light blue", "default"),
"command line input": ("light blue", "default"),
"command line error": ("dark red", "default"),
"focused command line output": ("black", "light green"),
"focused command line input": ("black", "light green"),
"focused command line error": ("dark red", "light blue"),
"command line clear button": ("light blue", "default"),
"command line focused button": ("black", "light blue"),
})
# }}}
elif theme == "agr-256":
# {{{ agr-256
palette_dict.update({
"header": ("h235", "h252", "standout"),
# {{{ variables view
"variables": ("h235", "h233"),
"variable separator": ("h23", "h252"),
"var label": ("h111", "h233"),
"var value": ("h255", "h233"),
"focused var label": ("h192", "h24"),
"focused var value": ("h192", "h24"),
"highlighted var label": ("h252", "h22"),
"highlighted var value": ("h255", "h22"),
"focused highlighted var label": ("h252", "h64"),
"focused highlighted var value": ("h255", "h64"),
"return label": ("h113", "h233"),
"return value": ("h113", "h233"),
"focused return label": (add_setting("h192", "bold"), "h24"),
"focused return value": ("h192", "h24"),
# }}}
# {{{ stack view
"stack": ("h235", "h233"),
"frame name": ("h192", "h233"),
"focused frame name": ("h192", "h24"),
"frame class": ("h111", "h233"),
"focused frame class": ("h192", "h24"),
"frame location": ("h252", "h233"),
"focused frame location": ("h192", "h24"),
"current frame name": ("h255", "h22"),
"focused current frame name": ("h255", "h64"),
"current frame class": ("h111", "h22"),
"focused current frame class": ("h255", "h64"),
"current frame location": ("h252", "h22"),
"focused current frame location": ("h255", "h64"),
# }}}
# {{{ breakpoint view
"breakpoint": ("h80", "h233"),
"focused breakpoint": ("h192", "h24"),
"current breakpoint": (add_setting("h255", "bold"), "h22"),
"focused current breakpoint": (add_setting("h255", "bold"), "h64"),
# }}}
# {{{ ui widgets
"selectable": ("h252", "h235"),
"focused selectable": ("h255", "h24"),
"button": ("h252", "h235"),
"focused button": ("h255", "h24"),
"background": ("h235", "h252"),
"hotkey": (add_setting("h235", "underline"), "h252", "underline"),
"focused sidebar": ("h23", "h252", "standout"),
"warning": (add_setting("h255", "bold"), "h124", "standout"),
"label": ("h235", "h252"),
"value": ("h255", "h17"),
"fixed value": ("h252", "h17"),
"group head": (add_setting("h25", "bold"), "h252"),
"search box": ("h255", "h235"),
"search not found": ("h255", "h124"),
"dialog title": (add_setting("h255", "bold"), "h235"),
# }}}
# {{{ source view
"breakpoint marker": ("h160", "h235"),
"breakpoint source": ("h252", "h124"),
"breakpoint focused source": ("h192", "h124"),
"current breakpoint source": ("h192", "h124"),
"current breakpoint focused source": (add_setting("h192", "bold"), "h124"),
# }}}
# {{{ highlighting
"source": ("h255", "h235"),
"focused source": ("h192", "h24"),
"highlighted source": ("h252", "h22"),
"current source": (add_setting("h252", "bold"), "h23"),
"current focused source": (add_setting("h192", "bold"), "h23"),
"current highlighted source": ("h255", "h22"),
"line number": ("h241", "h235"),
"keyword": ("h111", "h235"),
"literal": ("h173", "h235"),
"string": ("h113", "h235"),
"doublestring": ("h113", "h235"),
"singlestring": ("h113", "h235"),
"docstring": ("h113", "h235"),
"name": ("h192", "h235"),
"punctuation": ("h223", "h235"),
"comment": ("h246", "h235"),
# }}}
# {{{ shell
"command line edit": ("h255", "h233"),
"command line prompt": (add_setting("h192", "bold"), "h233"),
"command line output": ("h80", "h233"),
"command line input": ("h255", "h233"),
"command line error": ("h160", "h233"),
"focused command line output": (add_setting("h192", "bold"), "h24"),
"focused command line input": ("h255", "h24"),
"focused command line error": ("h235", "h24"),
"command line clear button": (add_setting("h255", "bold"), "h233"),
"command line focused button": ("h255", "h24"),
# }}}
})
# }}}
else:
try:
symbols = {
"palette": palette_dict,
"add_setting": add_setting,
}
from os.path import expanduser, expandvars
execfile(expanduser(expandvars(theme)), symbols)
except:
print("Error when importing theme:")
from traceback import print_exc
print_exc()
raw_input("Hit enter:")
palette_list = []
for setting_name, color_values in palette_dict.items():
fg_color = color_values[0].lower().strip()
bg_color = color_values[1].lower().strip()
# Convert hNNN syntax to equivalent #RGB value
# (https://github.com/wardi/urwid/issues/24)
if fg_color.startswith('h') or bg_color.startswith('h'):
attr = urwid.AttrSpec(fg_color, bg_color, colors=256)
palette_list.append((setting_name, 'default', 'default', 'default',
attr.foreground,
attr.background))
else:
palette_list.append((setting_name,) + color_values)
return palette_list
# vim: foldmethod=marker
|
{
"content_hash": "375ae4758719f6e53f8776caf5c1c00f",
"timestamp": "",
"source": "github",
"line_count": 675,
"max_line_length": 91,
"avg_line_length": 37.69037037037037,
"alnum_prop": 0.49353405919578636,
"repo_name": "amigrave/pudb",
"id": "6eca6caff9e3a16a36db958017624b7d1283db82",
"size": "25441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pudb/theme.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "187883"
},
{
"name": "Shell",
"bytes": "115"
}
],
"symlink_target": ""
}
|
import logging
import os
from gensim.corpora import Dictionary
from gensim.models import TfidfModel, LsiModel
from clustering_system.model.ModelABC import ModelABC
class Lsa(ModelABC):
"""Represent news articles as vectors using Latent Semantic Indexing."""
def __init__(self, dictionary: Dictionary, corpus=None, size: int = 200, decay: float = 1.0,
lsa_filename: str = None, tfidf_filename: str = None):
"""
:param dictionary: A dictionary
:param corpus: A corpus for training
:param size: The length of feature vector
:param decay: The decay parameter
:param lsa_filename: File name of a previously trained model
:param tfidf_filename: File name of a previously trained TF-IDF model
"""
super().__init__(size)
# Check if we have already trained the Tfidf model
if tfidf_filename is not None and os.path.exists(tfidf_filename):
self.tfidf = TfidfModel.load(tfidf_filename)
else:
self.tfidf = TfidfModel(dictionary=dictionary)
# Check if we have already trained the Lsa model
if lsa_filename is not None and os.path.exists(lsa_filename):
self.lsa = LsiModel.load(lsa_filename)
logging.info("LSA model loaded")
else:
if corpus is None:
raise ValueError("Corpus must be provided to train LSI")
# Process the corpus
corpus_tfidf = self.tfidf[corpus]
self.lsa = LsiModel(corpus=corpus_tfidf, id2word=dictionary, num_topics=size, onepass=True, decay=decay)
def update(self, documents):
"""
Update model using documents.
:param documents: The new documents used for update
"""
self.lsa.add_documents(documents)
def save(self, filename: str):
"""
Save model to a file.
:param filename: A model file name
"""
self.lsa.save(filename)
self.tfidf.save(filename + '.tfidf')
def _get_vector_representation(self, items):
"""
Represent documents as vectors.
:param items: A list of documents
:return: A list of feature vectors.
"""
return self.lsa[self.tfidf[items]]
|
{
"content_hash": "13b04b2be910c15e096bf546c1a1c038",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 116,
"avg_line_length": 33.4264705882353,
"alnum_prop": 0.6234051913770348,
"repo_name": "vanam/clustering",
"id": "5a9b06af03bb0bfd6bd329ba2ead1380d65fd098",
"size": "2273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clustering_system/model/Lsa.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "171258"
},
{
"name": "Shell",
"bytes": "7833"
}
],
"symlink_target": ""
}
|
"""
Copyright 2016 ElasticBox All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import logging
import os
import sys
import unittest2
import xmlrunner
def get_parser():
parser = argparse.ArgumentParser(description='Run the ElasticKube tests suite.')
parser.add_argument(
'-v',
'--verbose',
dest='verbose',
action='store_true',
help='Set verbosity value to 2.')
parser.add_argument(
'-d',
'--debug',
dest='debug',
action='store_true',
help='Set logging value to DEBUG.')
parser.add_argument(
'--junitxml',
dest='junitxml',
default='',
help='Output result in file')
parser.add_argument("files", nargs="*")
return parser
def run_tests(test_files, verbose=False, debug=False, output=None):
suite = unittest2.TestSuite()
# Load tests
discovery_folder = os.path.dirname(os.path.realpath(__file__))
if test_files:
for test_file in test_files:
search_folder = os.path.join(discovery_folder, test_file)
pattern = '*test*.py'
if os.path.isfile(search_folder):
pattern = os.path.basename(test_file)
search_folder = os.path.dirname(search_folder)
tests = unittest2.loader.defaultTestLoader.discover(
search_folder,
pattern=pattern)
suite.addTest(tests)
else:
tests = unittest2.loader.defaultTestLoader.discover(
discovery_folder,
pattern='*test*.py')
suite.addTest(tests)
# Set output verbosity
if verbose:
verbosity = 2
else:
verbosity = 1
# Set logging level
if debug:
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
else:
logging.basicConfig(stream=sys.stdout, level=logging.ERROR)
# Run tests
if output:
with open(output, 'wb') as output_file:
test_result = xmlrunner.XMLTestRunner(verbosity=verbosity, output=output_file).run(suite)
else:
test_result = unittest2.TextTestRunner(verbosity=verbosity).run(suite)
if not test_result.wasSuccessful():
sys.exit(1)
def main():
parser = get_parser()
args = parser.parse_args()
run_tests(args.files, args.verbose, args.debug, args.junitxml)
if __name__ == '__main__':
main()
|
{
"content_hash": "3546a7e229e12b86306a19d0e4cf05a1",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 101,
"avg_line_length": 27.186915887850468,
"alnum_prop": 0.63733241663802,
"repo_name": "ElasticBox/elastickube",
"id": "497e7fd35ec4244c657e073595253285fbdc89e6",
"size": "2931",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tests/run_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "523708"
},
{
"name": "HTML",
"bytes": "105664"
},
{
"name": "JavaScript",
"bytes": "403206"
},
{
"name": "Nginx",
"bytes": "2992"
},
{
"name": "Python",
"bytes": "266887"
},
{
"name": "Ruby",
"bytes": "1485"
},
{
"name": "Shell",
"bytes": "39391"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render, redirect
from django.views import generic
from .models import CmisServer
class ServerList(generic.ListView):
model = CmisServer
class ServerCreate(generic.edit.CreateView):
model = CmisServer
fields = ['server_name', 'server_address']
success_url = '/'
def get_context_data(self, **kwargs):
context = super(ServerCreate, self).get_context_data()
context['servers'] = CmisServer.objects.all()
return context
class ServerUpdate(generic.edit.UpdateView):
model = CmisServer
fields = ['server_name', 'server_address']
success_url = '/'
def get_context_data(self, **kwargs):
context = super(ServerUpdate, self).get_context_data()
context['servers'] = CmisServer.objects.all()
return context
class ServerDelete(generic.edit.DeleteView):
model = CmisServer
success_url = '/'
template_name = 'servers/confirm_delete.html'
def post(self, request, **kwargs):
if 'cancel' in request.POST:
return redirect('/')
return super(ServerDelete, self).post(request, kwargs)
|
{
"content_hash": "0fb70fe000eeeae88206a7ea762b37de",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 62,
"avg_line_length": 30.68421052631579,
"alnum_prop": 0.6483704974271012,
"repo_name": "wandss/ExportingTool",
"id": "508b5d6e5cd34f07812ba74a6f127ddd2ffb6c93",
"size": "1184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fnetET/servers/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "912"
},
{
"name": "HTML",
"bytes": "28274"
},
{
"name": "JavaScript",
"bytes": "5570"
},
{
"name": "Python",
"bytes": "29050"
}
],
"symlink_target": ""
}
|
import asyncio
import asyncio_redis
from elude import config
from elude.servers import BaseServer
REDIS_REQUEST_WIP_KEY = '_elude:request_wip'
class RedisServer(BaseServer):
def __init__(self, proxy_gatherer, serialize_func, deserialize_func):
super().__init__(proxy_gatherer)
self.serialize = serialize_func
self.deserialize = deserialize_func
self._request_cache = {}
self._conn = None
@asyncio.coroutine
def connect(self):
if self._conn is None:
self._conn = yield from asyncio_redis.Pool.create(host=config.SERVER_REDIS_HOST, port=config.SERVER_REDIS_PORT, password=config.SERVER_REDIS_PASSWORD, db=config.SERVER_REDIS_DB, poolsize=3)
return self._conn
@asyncio.coroutine
def serve(self):
conn = yield from self.connect()
while True:
request_obj_raw = yield from conn.brpoplpush(config.SERVER_REDIS_REQUEST_KEY, REDIS_REQUEST_WIP_KEY)
try:
request_obj = self.deserialize(request_obj_raw)
self.put_request(request_obj)
except ValueError:
self.process_response({'id': None, 'error': {'code': -32700, 'message': 'Parse error'}})
conn.close()
def process_response(self, result):
@asyncio.coroutine
def really_process():
conn = yield from self.connect()
yield from conn.lpush(config.SERVER_REDIS_RESPONSE_KEY_PREFIX + str(result['id']), [self.serialize(result)])
#yield from self.conn.lrem(REDIS_REQUEST_WIP_KEY, , -1)
asyncio.async(really_process())
|
{
"content_hash": "cd3cebf023b126aa4835b87d6abae5f3",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 201,
"avg_line_length": 38.476190476190474,
"alnum_prop": 0.6386138613861386,
"repo_name": "leonth/elude",
"id": "265b9cba52e81f4f871ca6176fcfa13e57dde3b2",
"size": "1616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elude/servers/redis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16443"
}
],
"symlink_target": ""
}
|
"""All core functions functionalities tests"""
# =============================================================================
# IMPORTS
# =============================================================================
from corral import core, VERSION
import mock
from .base import BaseTest
# =============================================================================
# BASE CLASS
# =============================================================================
class TestCore(BaseTest):
def test_get_version(self):
actual = core.get_version()
expected = VERSION
self.assertEqual(actual, expected)
def test_setup_environment(self):
with mock.patch("corral.db.setup") as setup:
with mock.patch("corral.db.load_models_module") as load_mm:
core.setup_environment()
self.assertTrue(setup.called)
self.assertTrue(load_mm.called)
|
{
"content_hash": "41c0be42ba8d32eab12d67bb9bc847a2",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 30.129032258064516,
"alnum_prop": 0.4207708779443255,
"repo_name": "toros-astro/corral",
"id": "7a80ecc593d53ee3f44a38022cb4ff9fe2622c00",
"size": "2715",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_core.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Mako",
"bytes": "988"
},
{
"name": "Python",
"bytes": "279543"
},
{
"name": "TeX",
"bytes": "228160"
}
],
"symlink_target": ""
}
|
{
'name': 'Warning Messages and Alerts',
'version': '1.0',
'category': 'Tools',
'description': """
Module to trigger warnings in OpenERP objects.
==============================================
Warning messages can be displayed for objects like sale order, purchase order,
picking and invoice. The message is triggered by the form's onchange event.
""",
'author': 'OpenERP SA',
'depends': ['base', 'sale_stock', 'purchase'],
'data': ['warning_view.xml'],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
{
"content_hash": "70a2ee9db152922765f12edb2498206d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 78,
"avg_line_length": 32.78947368421053,
"alnum_prop": 0.6035313001605136,
"repo_name": "cristianquaglio/odoo",
"id": "b1b4165d4d98147050eaf2a94bc6c4e768de8fb8",
"size": "1603",
"binary": false,
"copies": "255",
"ref": "refs/heads/master",
"path": "addons/warning/__openerp__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "C++",
"bytes": "108790"
},
{
"name": "CSS",
"bytes": "671328"
},
{
"name": "HTML",
"bytes": "212829"
},
{
"name": "JavaScript",
"bytes": "5984109"
},
{
"name": "Makefile",
"bytes": "12332"
},
{
"name": "Mako",
"bytes": "561"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "8366254"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "19163"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "92945"
}
],
"symlink_target": ""
}
|
"""Tests for ceilometer/storage/impl_log.py
"""
from ceilometer.openstack.common import test
from ceilometer.storage import impl_log
class ConnectionTest(test.BaseTestCase):
def test_get_connection(self):
conn = impl_log.Connection(None)
conn.record_metering_data({'counter_name': 'test',
'resource_id': __name__,
'counter_volume': 1,
})
|
{
"content_hash": "9eefde83c5e17b1b26b6d92187192950",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 59,
"avg_line_length": 33.285714285714285,
"alnum_prop": 0.5493562231759657,
"repo_name": "nttdata-osscloud/ceilometer",
"id": "10062c87825f17b66cb53ff23920142e8a1071dc",
"size": "1152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceilometer/tests/storage/test_impl_log.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import unittest
from robot.utils.asserts import *
from robot.utils.text import cut_long_message, _count_line_lengths, \
_MAX_ERROR_LINES, _MAX_ERROR_LINE_LENGTH, _ERROR_CUT_EXPLN,\
get_console_length, pad_console_length
class NoCutting(unittest.TestCase):
def test_empty_string(self):
self._assert_no_cutting('')
def test_short_message(self):
self._assert_no_cutting('bar')
def test_few_short_lines(self):
self._assert_no_cutting('foo\nbar\zap\hello World!')
def test_max_number_of_short_lines(self):
self._assert_no_cutting('short line\n' * _MAX_ERROR_LINES)
def _assert_no_cutting(self, msg):
assert_equal(cut_long_message(msg), msg)
class TestCutting(unittest.TestCase):
def setUp(self):
self.lines = [ 'my error message %d' % i for i in range(_MAX_ERROR_LINES+1) ]
self.result = cut_long_message('\n'.join(self.lines)).splitlines()
self.limit = _MAX_ERROR_LINES/2
def test_more_than_max_number_of_lines(self):
assert_equal(len(self.result), _MAX_ERROR_LINES+1)
def test_cut_message_is_present(self):
assert_true(_ERROR_CUT_EXPLN in self.result)
def test_cut_message_starts_with_original_lines(self):
expected = self.lines[:self.limit]
actual = self.result[:self.limit]
assert_equal(actual, expected)
def test_cut_message_ends_with_original_lines(self):
expected = self.lines[-self.limit:]
actual = self.result[-self.limit:]
assert_equal(actual, expected)
class TestCuttingWithLinesLongerThanMax(unittest.TestCase):
def setUp(self):
self.lines = ['line %d' % i for i in range(_MAX_ERROR_LINES-1)]
self.lines.append('x' * (_MAX_ERROR_LINE_LENGTH+1) )
self.result = cut_long_message('\n'.join(self.lines)).splitlines()
def test_cut_message_present(self):
assert_true(_ERROR_CUT_EXPLN in self.result)
def test_correct_number_of_lines(self):
assert_equal(sum(_count_line_lengths(self.result)), _MAX_ERROR_LINES+1)
def test_correct_lines(self):
excpected = self.lines[:_MAX_ERROR_LINES/2] + [_ERROR_CUT_EXPLN] \
+ self.lines[-_MAX_ERROR_LINES/2+1:]
assert_equal(self.result, excpected)
def test_every_line_longer_than_limit(self):
# sanity check
lines = [('line %d' % i) * _MAX_ERROR_LINE_LENGTH for i in range(_MAX_ERROR_LINES+2)]
result = cut_long_message('\n'.join(lines)).splitlines()
assert_true(_ERROR_CUT_EXPLN in result)
assert_equal(result[0], lines[0])
assert_equal(result[-1], lines[-1])
assert_true(sum(_count_line_lengths(result)) <= _MAX_ERROR_LINES+1)
class TestCutHappensInsideLine(unittest.TestCase):
def test_long_line_cut_before_cut_message(self):
lines = ['line %d' % i for i in range(_MAX_ERROR_LINES)]
index = _MAX_ERROR_LINES/2-1
lines[index] = 'abcdefgh' * _MAX_ERROR_LINE_LENGTH
result = cut_long_message('\n'.join(lines)).splitlines()
self._assert_basics(result, lines)
expected = lines[index][:_MAX_ERROR_LINE_LENGTH-3] + '...'
assert_equal(result[index], expected)
def test_long_line_cut_after_cut_message(self):
lines = ['line %d' % i for i in range(_MAX_ERROR_LINES)]
index = _MAX_ERROR_LINES/2
lines[index] = 'abcdefgh' * _MAX_ERROR_LINE_LENGTH
result = cut_long_message('\n'.join(lines)).splitlines()
self._assert_basics(result, lines)
expected = '...' + lines[index][-_MAX_ERROR_LINE_LENGTH+3:]
assert_equal(result[index+1], expected)
def test_one_huge_line(self):
result = cut_long_message('0123456789' * _MAX_ERROR_LINES * _MAX_ERROR_LINE_LENGTH)
self._assert_basics(result.splitlines())
assert_true(result.startswith('0123456789'))
assert_true(result.endswith('0123456789'))
assert_true('...\n'+_ERROR_CUT_EXPLN+'\n...' in result)
def _assert_basics(self, result, input=None):
assert_equal(sum(_count_line_lengths(result)), _MAX_ERROR_LINES+1)
assert_true(_ERROR_CUT_EXPLN in result)
if input:
assert_equal(result[0], input[0])
assert_equal(result[-1], input[-1])
class TestCountLines(unittest.TestCase):
def test_no_lines(self):
assert_equal(_count_line_lengths([]), [])
def test_empty_line(self):
assert_equal(_count_line_lengths(['']), [1])
def test_shorter_than_max_lines(self):
lines = ['', '1', 'foo', 'barz and fooz', '', 'a bit longer line', '',
'This is a somewhat longer (but not long enough) error message']
assert_equal(_count_line_lengths(lines), [1] * len(lines))
def test_longer_than_max_lines(self):
lines = [ '1' * i * (_MAX_ERROR_LINE_LENGTH+3) for i in range(4) ]
assert_equal(_count_line_lengths(lines), [1,2,3,4])
def test_boundary(self):
b = _MAX_ERROR_LINE_LENGTH
lengths = [b-1, b, b+1, 2*b-1, 2*b, 2*b+1, 7*b-1, 7*b, 7*b+1]
lines = [ 'e'*length for length in lengths ]
assert_equal(_count_line_lengths(lines), [1, 1, 2, 2, 2, 3, 7, 7, 8])
class TestConsoleWidth(unittest.TestCase):
len16_asian = u'\u6c49\u5b57\u5e94\u8be5\u6b63\u786e\u5bf9\u9f50'
ten_normal = u'1234567890'
mixed_26 = u'012345\u6c49\u5b57\u5e94\u8be5\u6b63\u786e\u5bf9\u9f567890'
nfd = u'A\u030Abo'
def test_console_width(self):
assert_equal(get_console_length(self.ten_normal), 10)
def test_east_asian_width(self):
assert_equal(get_console_length(self.len16_asian), 16)
def test_combining_width(self):
assert_equal(get_console_length(self.nfd), 3)
def test_cut_right(self):
assert_equal(pad_console_length(self.ten_normal, 5), '12...')
assert_equal(pad_console_length(self.ten_normal, 15), self.ten_normal+' '*5)
assert_equal(pad_console_length(self.ten_normal, 10), self.ten_normal)
def test_cut_east_asian(self):
assert_equal(pad_console_length(self.len16_asian, 10), u'\u6c49\u5b57\u5e94... ')
assert_equal(pad_console_length(self.mixed_26, 11), u'012345\u6c49...')
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "96e383904c77ae231f4c9234b580f318",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 93,
"avg_line_length": 37.85454545454545,
"alnum_prop": 0.6288824847902658,
"repo_name": "kyle1986/robortframe",
"id": "74ef2249b7c414e5f45dabc91ab6fdc1c9006cc9",
"size": "6246",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "utest/utils/test_text.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "210"
},
{
"name": "CSS",
"bytes": "22850"
},
{
"name": "HTML",
"bytes": "137580"
},
{
"name": "Java",
"bytes": "59216"
},
{
"name": "JavaScript",
"bytes": "160117"
},
{
"name": "Python",
"bytes": "2072305"
},
{
"name": "RobotFramework",
"bytes": "1929991"
},
{
"name": "Shell",
"bytes": "281"
}
],
"symlink_target": ""
}
|
import atexit
import collections
import contextlib
import ctypes
import logging
import os
import platform
import re
import socket
import struct
import subprocess
import sys
import time
import zipfile
from telemetry.core import exceptions
from telemetry.core.platform import desktop_platform_backend
from telemetry.core.platform import platform_backend
from telemetry.core.platform.power_monitor import msr_power_monitor
from telemetry.core import util
from telemetry import decorators
from telemetry.util import cloud_storage
from telemetry.util import path
try:
import pywintypes # pylint: disable=F0401
import win32api # pylint: disable=F0401
from win32com.shell import shell # pylint: disable=F0401,E0611
from win32com.shell import shellcon # pylint: disable=F0401,E0611
import win32con # pylint: disable=F0401
import win32gui # pylint: disable=F0401
import win32process # pylint: disable=F0401
import win32security # pylint: disable=F0401
except ImportError:
pywintypes = None
shell = None
shellcon = None
win32api = None
win32con = None
win32gui = None
win32process = None
win32security = None
def _InstallWinRing0():
"""WinRing0 is used for reading MSRs."""
executable_dir = os.path.dirname(sys.executable)
python_is_64_bit = sys.maxsize > 2 ** 32
dll_file_name = 'WinRing0x64.dll' if python_is_64_bit else 'WinRing0.dll'
dll_path = os.path.join(executable_dir, dll_file_name)
os_is_64_bit = platform.machine().endswith('64')
driver_file_name = 'WinRing0x64.sys' if os_is_64_bit else 'WinRing0.sys'
driver_path = os.path.join(executable_dir, driver_file_name)
# Check for WinRing0 and download if needed.
if not (os.path.exists(dll_path) and os.path.exists(driver_path)):
win_binary_dir = os.path.join(
path.GetTelemetryDir(), 'bin', 'win', 'AMD64')
zip_path = os.path.join(win_binary_dir, 'winring0.zip')
cloud_storage.GetIfChanged(zip_path, bucket=cloud_storage.PUBLIC_BUCKET)
try:
with zipfile.ZipFile(zip_path, 'r') as zip_file:
error_message = (
'Failed to extract %s into %s. If python claims that '
'the zip file is locked, this may be a lie. The problem may be '
'that python does not have write permissions to the destination '
'directory.'
)
# Install DLL.
if not os.path.exists(dll_path):
try:
zip_file.extract(dll_file_name, executable_dir)
except:
logging.error(error_message % (dll_file_name, executable_dir))
raise
# Install kernel driver.
if not os.path.exists(driver_path):
try:
zip_file.extract(driver_file_name, executable_dir)
except:
logging.error(error_message % (driver_file_name, executable_dir))
raise
finally:
os.remove(zip_path)
def TerminateProcess(process_handle):
if not process_handle:
return
if win32process.GetExitCodeProcess(process_handle) == win32con.STILL_ACTIVE:
win32process.TerminateProcess(process_handle, 0)
process_handle.close()
class WinPlatformBackend(desktop_platform_backend.DesktopPlatformBackend):
def __init__(self):
super(WinPlatformBackend, self).__init__()
self._msr_server_handle = None
self._msr_server_port = None
self._power_monitor = msr_power_monitor.MsrPowerMonitor(self)
@classmethod
def IsPlatformBackendForHost(cls):
return sys.platform == 'win32'
def __del__(self):
self.close()
def close(self):
self.CloseMsrServer()
def CloseMsrServer(self):
if not self._msr_server_handle:
return
TerminateProcess(self._msr_server_handle)
self._msr_server_handle = None
self._msr_server_port = None
def IsThermallyThrottled(self):
raise NotImplementedError()
def HasBeenThermallyThrottled(self):
raise NotImplementedError()
def GetSystemCommitCharge(self):
performance_info = self._GetPerformanceInfo()
return performance_info.CommitTotal * performance_info.PageSize / 1024
@decorators.Cache
def GetSystemTotalPhysicalMemory(self):
performance_info = self._GetPerformanceInfo()
return performance_info.PhysicalTotal * performance_info.PageSize / 1024
def GetCpuStats(self, pid):
cpu_info = self._GetWin32ProcessInfo(win32process.GetProcessTimes, pid)
# Convert 100 nanosecond units to seconds
cpu_time = (cpu_info['UserTime'] / 1e7 +
cpu_info['KernelTime'] / 1e7)
return {'CpuProcessTime': cpu_time}
def GetCpuTimestamp(self):
"""Return current timestamp in seconds."""
return {'TotalTime': time.time()}
def GetMemoryStats(self, pid):
memory_info = self._GetWin32ProcessInfo(
win32process.GetProcessMemoryInfo, pid)
return {'VM': memory_info['PagefileUsage'],
'VMPeak': memory_info['PeakPagefileUsage'],
'WorkingSetSize': memory_info['WorkingSetSize'],
'WorkingSetSizePeak': memory_info['PeakWorkingSetSize']}
def KillProcess(self, pid, kill_process_tree=False):
# os.kill for Windows is Python 2.7.
cmd = ['taskkill', '/F', '/PID', str(pid)]
if kill_process_tree:
cmd.append('/T')
subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()
def GetSystemProcessInfo(self):
# [3:] To skip 2 blank lines and header.
lines = subprocess.Popen(
['wmic', 'process', 'get',
'CommandLine,CreationDate,Name,ParentProcessId,ProcessId',
'/format:csv'],
stdout=subprocess.PIPE).communicate()[0].splitlines()[3:]
process_info = []
for line in lines:
if not line:
continue
parts = line.split(',')
pi = {}
pi['ProcessId'] = int(parts[-1])
pi['ParentProcessId'] = int(parts[-2])
pi['Name'] = parts[-3]
creation_date = None
if parts[-4]:
creation_date = float(re.split('[+-]', parts[-4])[0])
pi['CreationDate'] = creation_date
pi['CommandLine'] = ','.join(parts[1:-4])
process_info.append(pi)
return process_info
def GetChildPids(self, pid):
"""Retunds a list of child pids of |pid|."""
ppid_map = collections.defaultdict(list)
creation_map = {}
for pi in self.GetSystemProcessInfo():
ppid_map[pi['ParentProcessId']].append(pi['ProcessId'])
if pi['CreationDate']:
creation_map[pi['ProcessId']] = pi['CreationDate']
def _InnerGetChildPids(pid):
if not pid or pid not in ppid_map:
return []
ret = [p for p in ppid_map[pid] if creation_map[p] >= creation_map[pid]]
for child in ret:
if child == pid:
continue
ret.extend(_InnerGetChildPids(child))
return ret
return _InnerGetChildPids(pid)
def GetCommandLine(self, pid):
for pi in self.GetSystemProcessInfo():
if pid == pi['ProcessId']:
return pi['CommandLine']
raise exceptions.ProcessGoneException()
@decorators.Cache
def GetArchName(self):
return platform.machine()
def GetOSName(self):
return 'win'
@decorators.Cache
def GetOSVersionName(self):
os_version = platform.uname()[3]
if os_version.startswith('5.1.'):
return platform_backend.XP
if os_version.startswith('6.0.'):
return platform_backend.VISTA
if os_version.startswith('6.1.'):
return platform_backend.WIN7
if os_version.startswith('6.2.'):
return platform_backend.WIN8
raise NotImplementedError('Unknown win version %s.' % os_version)
def CanFlushIndividualFilesFromSystemCache(self):
return True
def _GetWin32ProcessInfo(self, func, pid):
mask = (win32con.PROCESS_QUERY_INFORMATION |
win32con.PROCESS_VM_READ)
handle = None
try:
handle = win32api.OpenProcess(mask, False, pid)
return func(handle)
except pywintypes.error, e:
errcode = e[0]
if errcode == 87:
raise exceptions.ProcessGoneException()
raise
finally:
if handle:
win32api.CloseHandle(handle)
def _GetPerformanceInfo(self):
class PerformanceInfo(ctypes.Structure):
"""Struct for GetPerformanceInfo() call
http://msdn.microsoft.com/en-us/library/ms683210
"""
_fields_ = [('size', ctypes.c_ulong),
('CommitTotal', ctypes.c_size_t),
('CommitLimit', ctypes.c_size_t),
('CommitPeak', ctypes.c_size_t),
('PhysicalTotal', ctypes.c_size_t),
('PhysicalAvailable', ctypes.c_size_t),
('SystemCache', ctypes.c_size_t),
('KernelTotal', ctypes.c_size_t),
('KernelPaged', ctypes.c_size_t),
('KernelNonpaged', ctypes.c_size_t),
('PageSize', ctypes.c_size_t),
('HandleCount', ctypes.c_ulong),
('ProcessCount', ctypes.c_ulong),
('ThreadCount', ctypes.c_ulong)]
def __init__(self):
self.size = ctypes.sizeof(self)
# pylint: disable=bad-super-call
super(PerformanceInfo, self).__init__()
performance_info = PerformanceInfo()
ctypes.windll.psapi.GetPerformanceInfo(
ctypes.byref(performance_info), performance_info.size)
return performance_info
def IsCurrentProcessElevated(self):
if self.GetOSVersionName() < platform_backend.VISTA:
# TOKEN_QUERY is not defined before Vista. All processes are elevated.
return True
handle = win32process.GetCurrentProcess()
with contextlib.closing(
win32security.OpenProcessToken(handle, win32con.TOKEN_QUERY)) as token:
return bool(win32security.GetTokenInformation(
token, win32security.TokenElevation))
def LaunchApplication(
self, application, parameters=None, elevate_privilege=False):
"""Launch an application. Returns a PyHANDLE object."""
parameters = ' '.join(parameters) if parameters else ''
if elevate_privilege and not self.IsCurrentProcessElevated():
# Use ShellExecuteEx() instead of subprocess.Popen()/CreateProcess() to
# elevate privileges. A new console will be created if the new process has
# different permissions than this process.
proc_info = shell.ShellExecuteEx(
fMask=shellcon.SEE_MASK_NOCLOSEPROCESS | shellcon.SEE_MASK_NO_CONSOLE,
lpVerb='runas' if elevate_privilege else '',
lpFile=application,
lpParameters=parameters,
nShow=win32con.SW_HIDE)
if proc_info['hInstApp'] <= 32:
raise Exception('Unable to launch %s' % application)
return proc_info['hProcess']
else:
handle, _, _, _ = win32process.CreateProcess(
None, application + ' ' + parameters, None, None, False,
win32process.CREATE_NO_WINDOW, None, None, win32process.STARTUPINFO())
return handle
def CanMonitorPower(self):
return self._power_monitor.CanMonitorPower()
def CanMeasurePerApplicationPower(self):
return self._power_monitor.CanMeasurePerApplicationPower()
def StartMonitoringPower(self, browser):
self._power_monitor.StartMonitoringPower(browser)
def StopMonitoringPower(self):
return self._power_monitor.StopMonitoringPower()
def _StartMsrServerIfNeeded(self):
if self._msr_server_handle:
return
_InstallWinRing0()
self._msr_server_port = util.GetUnreservedAvailableLocalPort()
# It might be flaky to get a port number without reserving it atomically,
# but if the server process chooses a port, we have no way of getting it.
# The stdout of the elevated process isn't accessible.
parameters = (
os.path.join(os.path.dirname(__file__), 'msr_server_win.py'),
str(self._msr_server_port),
)
self._msr_server_handle = self.LaunchApplication(
sys.executable, parameters, elevate_privilege=True)
# Wait for server to start.
try:
socket.create_connection(('127.0.0.1', self._msr_server_port), 5).close()
except socket.error:
self.CloseMsrServer()
atexit.register(TerminateProcess, self._msr_server_handle)
def ReadMsr(self, msr_number, start=0, length=64):
self._StartMsrServerIfNeeded()
if not self._msr_server_handle:
raise OSError('Unable to start MSR server.')
sock = socket.create_connection(('127.0.0.1', self._msr_server_port), 0.1)
try:
sock.sendall(struct.pack('I', msr_number))
response = sock.recv(8)
finally:
sock.close()
return struct.unpack('Q', response)[0] >> start & ((1 << length) - 1)
def IsCooperativeShutdownSupported(self):
return True
def CooperativelyShutdown(self, proc, app_name):
pid = proc.pid
# http://timgolden.me.uk/python/win32_how_do_i/
# find-the-window-for-my-subprocess.html
#
# It seems that intermittently this code manages to find windows
# that don't belong to Chrome -- for example, the cmd.exe window
# running slave.bat on the tryservers. Try to be careful about
# finding only Chrome's windows. This works for both the browser
# and content_shell.
#
# It seems safest to send the WM_CLOSE messages after discovering
# all of the sub-process's windows.
def find_chrome_windows(hwnd, hwnds):
_, win_pid = win32process.GetWindowThreadProcessId(hwnd)
if (pid == win_pid and
win32gui.IsWindowVisible(hwnd) and
win32gui.IsWindowEnabled(hwnd) and
win32gui.GetClassName(hwnd).lower().startswith(app_name)):
hwnds.append(hwnd)
return True
hwnds = []
win32gui.EnumWindows(find_chrome_windows, hwnds)
if hwnds:
for hwnd in hwnds:
win32gui.SendMessage(hwnd, win32con.WM_CLOSE, 0, 0)
return True
else:
logging.info('Did not find any windows owned by target process')
return False
|
{
"content_hash": "57e28de508abb14a77223479e6f174b6",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 80,
"avg_line_length": 34.43532338308458,
"alnum_prop": 0.6626453803366322,
"repo_name": "Fireblend/chromium-crosswalk",
"id": "938414e87d3a1a52238cae6f992d01393fd3491c",
"size": "14006",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/core/platform/win_platform_backend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "34367"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "9397825"
},
{
"name": "C++",
"bytes": "235052525"
},
{
"name": "CSS",
"bytes": "951745"
},
{
"name": "DM",
"bytes": "60"
},
{
"name": "Emacs Lisp",
"bytes": "988"
},
{
"name": "Groff",
"bytes": "2494"
},
{
"name": "HTML",
"bytes": "29070071"
},
{
"name": "Java",
"bytes": "10089056"
},
{
"name": "JavaScript",
"bytes": "20170506"
},
{
"name": "Makefile",
"bytes": "68234"
},
{
"name": "Objective-C",
"bytes": "1639405"
},
{
"name": "Objective-C++",
"bytes": "9478782"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "178732"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "465313"
},
{
"name": "Python",
"bytes": "8146950"
},
{
"name": "Shell",
"bytes": "473684"
},
{
"name": "Standard ML",
"bytes": "5034"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
import json
class AttrProp(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, objtype):
return getattr(obj, self.name)
def __set__(self, obj, value):
attr_name = self.name.lstrip('_')
old = getattr(obj, self.name)
setattr(obj, self.name, value)
if obj.storage is not None:
obj.storage.on_identity_update(identity=obj,
attr=attr_name,
old=old,
value=value)
class Identity(object):
"""Representation of an AWS identity
Stores identity name and credentials
"""
__slots__ = ['_name', '_access_key_id', '_secret_access_key', 'storage']
def __init__(self, **kwargs):
self._name = kwargs.get('name')
self._access_key_id = kwargs.get('access_key_id')
self._secret_access_key = kwargs.get('secret_access_key')
self.storage = kwargs.get('storage')
@property
def id(self):
return self.access_key_id
name = AttrProp('_name')
access_key_id = AttrProp('_access_key_id')
secret_access_key = AttrProp('_secret_access_key')
def __eq__(self, other):
if not isinstance(other, Identity):
return False
for attr in self.__slots__:
if attr == 'storage':
continue
if getattr(self, attr) != getattr(other, attr):
return False
return True
def __ne__(self, other):
if not isinstance(other, Identity):
return True
for attr in self.__slots__:
if getattr(self, attr) != getattr(other, attr):
return True
return False
def __repr__(self):
return '<Identity: {0} ({1})>'.format(self, self.id)
def __str__(self):
return self.name
class IdentityEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Identity):
keys = (k.lstrip('_') for k in obj.__slots__ if k.startswith('_'))
return {k:getattr(obj, k) for k in keys}
return super(IdentityEncoder, self).default(obj)
|
{
"content_hash": "9403ee15148d0a076fca351811ed1150",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 78,
"avg_line_length": 35.885245901639344,
"alnum_prop": 0.5367747830059388,
"repo_name": "nocarryr/AWS-Identity-Manager",
"id": "7b4e3986188003decace18860d6df2203e55a0da",
"size": "2189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "awsident/identity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30850"
}
],
"symlink_target": ""
}
|
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, ButtonHolder, Submit
from plugin_manager.hosts import models
from django.contrib.auth.models import Group
class HostCreateForm(forms.ModelForm):
jenkins_password = forms.CharField(widget=forms.PasswordInput(), required=True)
jenkins_username = forms.CharField( required=True)
ssh_username = forms.CharField(required=True)
ssh_password = forms.CharField(widget=forms.PasswordInput(), required=True)
class Meta:
model = models.Host
fields = ['name', 'alias','ssh_username',
'ssh_password','jenkins_username','jenkins_password']
widgets = {'ssh_password': forms.PasswordInput,
'jenkins_password': forms.PasswordInput}
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.label_class = 'col-lg-2'
helper.field_class = 'col-lg-8'
helper.layout = Layout(
'name',
'alias',
'ssh_username',
'ssh_password',
'jenkins_username',
'jenkins_password',
ButtonHolder(
Submit('submit', 'Create Host', css_class='button')
)
)
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.layout = Layout(
'name',
'alias',
'ssh_username',
'ssh_password',
'jenkins_username',
'jenkins_password',
ButtonHolder(
Submit('submit', 'Create Host', css_class='button')
)
)
super(HostCreateForm, self).__init__(*args, **kwargs)
class HostUpdateForm(HostCreateForm):
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.label_class = 'col-lg-2'
helper.field_class = 'col-lg-8'
helper.layout = Layout(
'name',
'alias',
'ssh_username',
'ssh_password',
'jenkins_username',
'jenkins_password',
ButtonHolder(
Submit('submit', 'Update Host', css_class='button')
)
)
class HostPluginUpdateForm(forms.Form):
#name = forms.CharField(widget=forms.TextInput(attrs={'readonly':'readonly'}),required=True, label='Host Name')
choices = []
versions = forms.ChoiceField(required=True, label='Plugin Version to Install', initial=True,choices=choices)
class Meta:
#model = models.Host
fields = ['versions']
widgets = {}
def __init__(self, *args, **kwargs):
initial = kwargs.get('initial', {})
instance = kwargs.get('instance', {})
versions_kwarg = kwargs.pop('versions', None)
self.helper = FormHelper()
self.helper.layout = Layout(
'versions',
)
self.helper.add_input(Submit('submit', 'Update', css_class='btn-default btn-sm pull-right'))
self.helper.form_method = 'post'
#kwargs.pop('instance', None)
super(HostPluginUpdateForm, self).__init__(*args, **kwargs)
choices= []
self.choices = []
if versions_kwarg:
for choice in versions_kwarg:
self.choices.append(
(choice, choice)
)
self.fields['versions'].choices = self.choices
class HostPluginInstallForm(forms.Form):
#name = forms.CharField(widget=forms.TextInput(attrs={'readonly':'readonly'}),required=True, label='Host Name')
choices = []
versionchoices = []
plugins = forms.ChoiceField(required=True, label='Plugin to Install', initial=True,choices=choices)
versions = forms.ChoiceField(required=True, label='Version to Install', initial=True,choices=choices)
class Meta:
#model = models.Host
fields = ['plugins','versions']
widgets = {}
def __init__(self, *args, **kwargs):
initial = kwargs.get('initial', {})
instance = kwargs.get('instance', {})
plugins_kwarg = kwargs.pop('plugins', None)
self.helper = FormHelper()
self.helper.layout = Layout(
'plugins',
'versions',
)
self.helper.add_input(Submit('submit', 'Update', css_class='btn-default btn-sm pull-right'))
self.helper.form_method = 'post'
#kwargs.pop('instance', None)
super(HostPluginInstallForm, self).__init__(*args, **kwargs)
choices= []
self.choices = []
if plugins_kwarg:
for choice in plugins_kwarg:
self.choices.append(
(choice, choice)
)
self.fields['plugins'].choices = self.choices
class MembersAddForm(forms.Form):
first_name = forms.CharField(label="First Name", required=False)
last_name = forms.CharField(label="Last Name", required=False)
email = forms.CharField(label="Email", required=False)
user_level = forms.ModelChoiceField(Group.objects.all(),
label="User Level",
empty_label="Select...",
required=False)
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.layout = Layout(
'first_name',
'last_name',
'email',
'user_level',
)
self.helper.add_input(Submit('submit', 'Search', css_class='btn-default btn-sm pull-right'))
self.helper.form_method = 'get'
kwargs.pop('instance', None)
super(MembersAddForm, self).__init__(*args, **kwargs)
class UploadFileForm(forms.Form):
plugin_name = forms.CharField(max_length=50,required=False, label="Plugin Name ( if not defined we will use the filename )")
file = forms.FileField(label="Plugin file",required=True)
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.layout = Layout(
'plugin_name',
'file',
)
self.helper.add_input(Submit('submit', 'Install', css_class='btn-default btn-sm pull-right'))
super(UploadFileForm, self).__init__(*args, **kwargs)
class UploadFileFormWithName(forms.Form):
file = forms.FileField(label="Plugin file",required=True)
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.layout = Layout(
'plugin_name',
'file',
)
self.helper.add_input(Submit('submit', 'Install', css_class='btn-default btn-sm pull-right'))
super(UploadFileFormWithName, self).__init__(*args, **kwargs)
|
{
"content_hash": "c4453eceebaf1e92905407c359679d25",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 128,
"avg_line_length": 32.81773399014779,
"alnum_prop": 0.5780546382467727,
"repo_name": "ahharu/plugin-manager",
"id": "147c882cda1e6cfff18b78ff5b6bf1338288a7ac",
"size": "6662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugin_manager/hosts/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "68971"
},
{
"name": "HTML",
"bytes": "72414"
},
{
"name": "JavaScript",
"bytes": "313284"
},
{
"name": "Python",
"bytes": "138428"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from ..dcmstack import GroupAndStack
def test_GroupAndStack_inputs():
input_map = dict(dicom_files=dict(mandatory=True,
),
embed_meta=dict(),
exclude_regexes=dict(),
force_read=dict(usedefault=True,
),
include_regexes=dict(),
out_ext=dict(usedefault=True,
),
out_format=dict(),
out_path=dict(),
)
inputs = GroupAndStack.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_GroupAndStack_outputs():
output_map = dict(out_list=dict(),
)
outputs = GroupAndStack.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
{
"content_hash": "8981e7f8e308bcd4728af35d818a69cb",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 67,
"avg_line_length": 28,
"alnum_prop": 0.6428571428571429,
"repo_name": "mick-d/nipype",
"id": "208869f6676eafac682f1e7c4a3cbeb969a71b85",
"size": "950",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "nipype/interfaces/tests/test_auto_GroupAndStack.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "1854"
},
{
"name": "Matlab",
"bytes": "1999"
},
{
"name": "Python",
"bytes": "4607773"
},
{
"name": "Shell",
"bytes": "380"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
import tkinter as tk
from tkinter import ttk
from tkinter.filedialog import askopenfilename
from PIL import Image, ImageTk
import os
import Detachment as dt
import Application
TITLE_FONT_STYLE = 'Helvetica 18 bold underline'
HEADER_FONT_STYLE = 'Helvetica 14 bold'
HEADER_DATA_FONT_STYLE = 'Helvetica 12 bold'
timeblock_hour_selections = {
'1-hour timeblocks' : [ (0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), \
(8,), (9,), (10,), (11,), (12,), (13,), (14,), (15,), \
(16,), (17,), (18,), (19,), (20,), (21,), (22,), (23,) ], \
'3-hour timeblocks (starting at 0000)' : [ (0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11), \
(12, 13, 14), (15, 16, 17), (18, 19, 20), (21, 22, 23) ], \
'3-hour timeblocks (starting at 0100)' : [ (1, 2, 3), (4, 5, 6), (7, 8, 9), (10, 11, 12), \
(13, 14, 15), (16, 17, 18), (19, 20, 21), (22, 23, 0) ], \
'4-hour timeblocks' : [ (0, 1, 2, 3), (4, 5, 6, 7), (8, 9, 10, 11), \
(12, 13, 14, 15), (16, 17, 18, 19), (20, 21, 22, 23) ], \
'8-hour timeblocks' : [ (0, 1, 2, 3, 4, 5, 6, 7), (8, 9, 10, 11, 12, 13, 14, 15), (16, 17, 18, 19, 20, 21, 22, 23) ],
'full-day timeblocks' : [ (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23) ]
}
# Title: Understanding Busy Cops
class Title(tk.Frame):
def __init__(self, mainapp, *args, **kwargs):
tk.Frame.__init__(self, mainapp, *args, **kwargs)
self.text = tk.Label(self, text = "Understanding Busy Cops", font = TITLE_FONT_STYLE)
self.text.pack(side = 'left')
class Logo(tk.Frame):
def __init__(self, uploader, *args, **kwargs):
tk.Frame.__init__(self, uploader, *args, **kwargs)
self.img = Image.open("logo.png").resize((300, 200), Image.ANTIALIAS)
self.logo = ImageTk.PhotoImage(self.img) # PIL solution
self.label_logo = tk.Label(self, image=self.logo, anchor = "w")
self.label_logo.pack()
class DetachmentNameEntry(tk.Entry):
def __init__(self, inputframe, *args, **kwargs):
tk.Entry.__init__(self, inputframe, *args, **kwargs)
self.insert(0, "Name")
class FileUpload(tk.Frame):
def __init__(self, inputframe, *args, **kwargs):
tk.Frame.__init__(self, inputframe, *args, **kwargs)
self.parent = inputframe
self.file_name_entry = tk.Entry(self, justify = 'left', width=30)
self.browsebutton = tk.Button(self, text = 'Browse...', command = self.openfile, padx = 5)
self.file_name_entry.pack(side='left')
self.browsebutton.pack(side='left')
def openfile(self):
name = askopenfilename(title = "Select 911.csv file", \
initialdir = os.getcwd(), \
filetypes = [ ('Text files', '*.csv') ])
ext = os.path.splitext(name)[1] # TODO: csv check
if( name == "" and self.file_name_entry.get() == ""):
self.parent.errormsg['text'] = 'Please select a file.'
elif (name != "" ):
self.file_name_entry.delete(0, tk.END)
self.file_name_entry.insert(0, name)
self.file_name_entry.xview_moveto(1)
self.parent.errormsg['text'] = ''
class Input(tk.Frame):
def __init__(self, uploader, *args, **kwargs):
tk.Frame.__init__(self, uploader, *args, **kwargs)
self.parent = uploader
self.detachment_name_entry = DetachmentNameEntry(self, justify = 'left', width=40)
self.timeblock_hour_selection = timeblock_hour_selections['3-hour timeblocks (starting at 0000)']
# TODO: Put this in its own class?... somehow
self.timeblock_hours = tk.StringVar(self)
self.timeblock_hours.set('3-hour timeblocks (starting at 0000)')
self.timeblock_hours_choices = [ '1-hour timeblocks','3-hour timeblocks (starting at 0000)','3-hour timeblocks (starting at 0100)','4-hour timeblocks','8-hour timeblocks', 'full-day timeblocks']
self.hourblocksMenu = tk.OptionMenu(self, self.timeblock_hours, *self.timeblock_hours_choices)
self.hourblocksMenu.config(width = 34, anchor='w')
self.timeblock_hours.trace('w', self.change_hourblocks)
self.fileupload = FileUpload(self)
self.errormsg = tk.Label(self)
self.submitbutton = tk.Button(self, text = 'Submit', command = uploader.submit, padx = 5)
self.detachment_name_entry.pack(anchor='w')
self.hourblocksMenu.pack(anchor='w')
self.fileupload.pack(anchor='w')
self.errormsg.pack(anchor='w')
self.submitbutton.pack(anchor='w')
def change_hourblocks(self, *args):
timeblock_option = self.timeblock_hours.get()
self.timeblock_hour_selection = timeblock_hour_selections[timeblock_option]
class Uploader(tk.Frame):
def __init__(self, mainapp, *args, **kwargs):
tk.Frame.__init__(self, mainapp, *args, **kwargs)
self.mainapp = mainapp
self.logo = Logo(self)
self.input = Input(self)
self.logo.pack(side='left', expand=True)
self.input.pack(side='left', expand=True)
self.detachment_name = ""
self.timeblock_hour_selection = self.input.timeblock_hour_selection
self.filename = ""
self.done = 0
def submit(self):
self.detachment_name = self.input.detachment_name_entry.get()
self.timeblock_hour_selection = self.input.timeblock_hour_selection
self.filename = self.input.fileupload.file_name_entry.get()
self.detachment = dt.Detachment(self.filename, self.detachment_name, self.timeblock_hour_selection)
if (self.detachment.errorInInput is None):
self.pack_forget()
Application.Application(self.mainapp, self).pack(side="top", fill="both", expand=True)
else:
self.input.errormsg['text'] = self.detachment.error
def main():
root = tk.Tk()
root.title('Understanding Busy Cops')
root.geometry('1100x700')
title = Title(root)
title.pack(fill = 'x', expand = False, padx = 10)
Uploader(root).pack(side="top", fill="both", expand=True)
root.mainloop()
if __name__ == "__main__":
main()
|
{
"content_hash": "f89675c6a3b2c18ee05408ae9ced7711",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 202,
"avg_line_length": 42.80952380952381,
"alnum_prop": 0.5790560940727792,
"repo_name": "jphaupt/understanding-busy-cops",
"id": "3c98ec222978e75836e9a43d1a5edc354a7c1933",
"size": "6293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "75911"
}
],
"symlink_target": ""
}
|
"""
Unit tests for references
"""
import unittest
from morbo import *
connection.setup('morbotests')
class TestRelationships(unittest.TestCase):
def setUp(self):
for c in connection.database.collection_names():
try:
connection.database.drop_collection(c)
except:
pass
registry.clear()
def test_one(self):
class Foo(Model):
bar = One("Bar")
class Bar(Model):
pass
foo = Foo()
bar = Bar()
with self.assertRaises(AssertionError):
foo.bar = bar
bar.save()
foo.bar = bar
self.assertEquals(foo.bar, bar)
foo.save()
foo = Foo.find_one(foo._id)
self.assertEquals(foo.bar, bar)
def test_one_no_inverse(self):
with self.assertRaises(TypeError):
class Foo(Model):
bar = One("Bar", inverse="foo")
def test_one_to_one(self):
class Foo(Model):
bar = OneToOne("Bar", inverse="foo")
class Bar(Model):
pass
bar = Bar()
bar.save()
foo = Foo()
foo.save()
foo.bar = bar
self.assertEquals(bar.foo, foo)
def test_one_to_one_overdefined(self):
class Foo(Model):
bar = OneToOne("Bar", inverse="foo")
class Bar(Model):
foo = OneToOne(Foo, inverse="bar")
bar = Bar()
bar.save()
foo = Foo()
foo.save()
foo.bar = bar
self.assertEquals(bar.foo, foo)
def test_one_to_one_cascade(self):
class Foo(Model):
bar = OneToOne("Bar", inverse="foo", cascade=True)
class Bar(Model):
pass
bar = Bar()
bar.save()
bar2 = Bar()
bar2.save()
foo = Foo()
foo.save()
foo.bar = bar
self.assertEquals(Bar.count(), 2)
foo.remove()
self.assertEquals(Bar.count(), 1)
b = Bar.find_one()
self.assertEquals(b, bar2)
def test_one_to_many(self):
data = {
'San Francisco': [
'Alchemist', 'Bar Tartine', 'Lounge 3411', 'Tempest'
],
'New York': [
'Dead Rabbit', 'Death & Co.', 'Donna', 'Proletariat'
]
}
class City(Model):
name = Text()
bars = OneToMany("Bar", inverse="city")
class Bar(Model):
name = Text()
for city, bars in data.items():
c = City(name=city)
c.save()
for bar in bars:
b = Bar(name=bar)
b.save()
c.bars.add(b)
city = City.find_one({'name': 'San Francisco'})
self.assertEquals(city.bars.count(), 4)
self.assertEquals([b.name for b in city.bars.find().sort('name')], data['San Francisco'])
city = City.find_one({'name': 'New York'})
self.assertEquals(city.bars.count(), 4)
self.assertEquals([b.name for b in city.bars.find().sort('name')], data['New York'])
bar = Bar.find_one({'name':'Donna'})
self.assertEquals(bar.city.name, 'New York')
self.assertEquals(bar.city, city)
bar.remove()
self.assertEquals(city.bars.count(), 3)
b = city.bars.find_one()
name = b.name
self.assertIn(name, data['New York'])
city.bars.remove(b)
self.assertEquals(city.bars.count(), 2)
new_b = Bar.find_one({'name':name})
self.assertEquals(b, new_b)
def test_one_to_many_overdefined(self):
data = {
'San Francisco': [
'Alchemist', 'Bar Tartine', 'Lounge 3411', 'Tempest'
],
'New York': [
'Dead Rabbit', 'Death & Co.', 'Donna', 'Proletariat'
]
}
class City(Model):
name = Text()
bars = OneToMany("Bar", inverse="city")
class Bar(Model):
name = Text()
city = ManyToOne(City, inverse="bars")
for city, bars in data.items():
c = City(name=city)
c.save()
for bar in bars:
b = Bar(name=bar)
b.save()
c.bars.add(b)
city = City.find_one({'name': 'San Francisco'})
self.assertEquals(city.bars.count(), 4)
self.assertEquals([b.name for b in city.bars.find().sort('name')], data['San Francisco'])
city = City.find_one({'name': 'New York'})
self.assertEquals(city.bars.count(), 4)
self.assertEquals([b.name for b in city.bars.find().sort('name')], data['New York'])
bar = Bar.find_one({'name':'Donna'})
self.assertEquals(bar.city.name, 'New York')
self.assertEquals(bar.city, city)
bar.remove()
self.assertEquals(city.bars.count(), 3)
b = city.bars.find_one()
name = b.name
self.assertIn(name, data['New York'])
city.bars.remove(b)
self.assertEquals(city.bars.count(), 2)
new_b = Bar.find_one({'name':name})
self.assertEquals(b, new_b)
def test_one_to_many_cascade(self):
class City(Model):
name = Text()
bars = OneToMany("Bar", inverse="city", cascade=True)
class Bar(Model):
name = Text()
city_a = City(name="Foo City")
city_a.save()
city_b = City(name="Qux City")
city_b.save()
for i in range(0,10):
b = Bar(name="Bar a#%s" % str(i+1))
b.save()
city_a.bars.add(b)
b = Bar(name="Bar b#%s" % str(i+1))
b.save()
city_b.bars.add(b)
self.assertEquals(Bar.count(), 20)
city_a.remove()
self.assertEquals(Bar.count(), 10)
b = Bar.find().sort('name').next()
self.assertEquals(b.name, "Bar b#1")
def test_one_to_many_query(self):
class Foo(Model):
bars = OneToMany("Bar", inverse="foo")
class Bar(Model):
number = TypeOf(int)
foo = Foo()
foo.save()
self.assertEquals(foo.bars.count(), 0)
self.assertEquals(foo.bars.count({'number':{'$gt':5}}), 0)
for i in range(0, 20):
b = Bar(number=i)
b.save()
b.foo = foo
for i in range(0,10):
f = Foo()
f.save()
self.assertEquals(Foo.count(), 11)
self.assertEquals(Bar.count(), 20)
self.assertEquals(foo.bars.count(), 20)
self.assertEquals(foo.bars.count({'number':{'$gt':5}}), 14)
for i in range(0,10):
b = Bar(number=100 + i)
b.save()
self.assertEquals(Bar.count(), 30)
self.assertEquals(foo.bars.count(), 20)
all_bars = list(Bar.find({'number':{'$lt':5}}))
foo_bars = list(foo.bars.find({'number':{'$lt':5}}))
self.assertEquals(foo_bars, all_bars)
def _many_to_many_paces(self, storage_policy):
tags_by_doc = {
"foo": ["tofu", "seitan"],
"bar": ["seitan"],
"baz": ["tempeh", "tofu"],
"qux": ["seitan", "tofu"],
"goo": ["seitan"]
}
docs_by_tag = {}
class Document(Model):
content = Text()
tags = ManyToMany("Tag", inverse="documents", storage_policy=storage_policy)
class Tag(Model):
name = Text()
tags = {}
docs = {}
for content, tag_names in tags_by_doc.items():
doc = Document(content=content)
doc.save()
docs[content] = doc
for n in tag_names:
if n not in tags:
tag = Tag(name=n)
tag.save()
tags[n] = tag
doc.tags.add(tags[n])
if n not in docs_by_tag:
docs_by_tag[n] = []
docs_by_tag[n].append(content)
for d in [tags_by_doc, docs_by_tag]:
for k in d:
d[k].sort()
for k,v in tags_by_doc.items():
doc = Document.find_one({'content':k})
self.assertEquals([tag.name for tag in doc.tags.find().sort('name')], v)
for k,v in docs_by_tag.items():
tag = Tag.find_one({'name':k})
self.assertEquals([doc.content for doc in tag.documents.find().sort('content')], v)
tag = Tag.find_one({'name':'seitan'})
self.assertEquals(tag.documents.count(), 4)
doc = tag.documents.find_one()
self.assertIn('seitan', [tag.name for tag in doc.tags])
num_doc_tags = doc.tags.count()
tag.documents.remove(doc)
self.assertEquals(tag.documents.count(), 3)
self.assertEquals(doc.tags.count(), num_doc_tags-1)
doc = Document.find_one({'content':'baz'})
self.assertEquals(doc.tags.count({'name':'tofu'}), 1)
def test_many_to_many_locallist(self):
self._many_to_many_paces(ManyToMany.LocalList)
def test_many_to_many_remotelist(self):
self._many_to_many_paces(ManyToMany.RemoteList)
def test_many_to_many_localandremotelist(self):
self._many_to_many_paces(ManyToMany.LocalAndRemoteList)
def test_many_to_many_join(self):
self._many_to_many_paces(ManyToMany.Join('document_tag'))
def test_many_to_many_cascade(self):
class Foo(Model):
bars = ManyToMany("Bar", inverse="foos", cascade=True)
class Bar(Model):
pass
foos = []
for i in range(0,3):
f = Foo()
f.save()
for i in range(0,10):
b = Bar()
b.save()
f.bars.add(b)
foos.append(f)
self.assertEquals(Bar.count(), 30)
for foo in foos:
self.assertEquals(foo.bars.count(), 10)
foos[0].remove()
self.assertEquals(Bar.count(), 20)
for foo in foos[1:]:
self.assertEquals(foo.bars.count(), 10)
def test_class_remove_cascade(self):
class Foo(Model):
number = TypeOf(int)
bars = OneToMany("Bar", inverse="foo", cascade=True)
class Bar(Model):
number = TypeOf(int)
for i in range(0,5):
f = Foo()
f.number = i
f.save()
for j in range(0,10):
b = Bar()
b.number = j
b.save()
f.bars.add(b)
self.assertEquals(Foo.count(), 5)
self.assertEquals(Bar.count(), 50)
Foo.remove({'number': 4})
self.assertEquals(Foo.count(), 4)
self.assertEquals(Bar.count(), 40)
def test_redefine_backreference_fails(self):
class Foo(Model):
pass
class Bar(Model):
foos = ManyToOne(Foo, inverse="bars")
with self.assertRaises(AssertionError):
class Baz(Model):
foos = ManyToOne(Foo, inverse="bars")
class Qux(Model):
qugs = OneToMany("Qug", inverse="zzz")
with self.assertRaises(AssertionError):
class Quv(Model):
qugs = OneToMany("Qug", inverse="zzz")
def test_invalid_inverse_fails(self):
class Foo(Model):
bar = OneToOne("Bar", inverse="foo")
with self.assertRaises(TypeError):
class Bar(Model):
foo = OneToMany(Foo)
def test_remove_with_spec(self):
class Foo(Model):
bars = OneToMany("Bar")
class Bar(Model):
number = TypeOf(int)
foo = Foo()
foo.save()
for i in range(0,10):
b = Bar(number=i)
b.save()
foo.bars.add(b)
self.assertEquals(foo.bars.count(), 10)
foo.bars.remove({'number':{'$lt':5}})
self.assertEquals(foo.bars.count(), 5)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "ff339433b51a51a416b39d25e42a46be",
"timestamp": "",
"source": "github",
"line_count": 453,
"max_line_length": 97,
"avg_line_length": 28.858719646799116,
"alnum_prop": 0.45926719192228255,
"repo_name": "elishacook/morbo",
"id": "db166b47a6c623daa2cbb387baaeb8f69b7e67b0",
"size": "13073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_relationships.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "94708"
},
{
"name": "Shell",
"bytes": "4509"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from reparse.config import pattern_max_recursion_depth
from reparse.expression import Group, AlternatesGroup, Expression
from reparse.util import separate_string
class ExpressionGroupNotFound(Exception):
pass
class Function_Builder(object):
"""
Function Builder is an on-the-fly builder of functions for expressions
>>> def t(_):
... return _
>>> fb = Function_Builder({"hey":t})
>>> fb.get_function("hey", "") is t
True
"""
def __init__(self, functions_dict):
self._functions = functions_dict
def get_function(self, name, function_type, group_names=None):
if name in self._functions:
if function_type is "type":
def func(_):
if not any(_):
return None
return self._functions[name](_)
elif function_type is "group":
def func(_):
return self._functions[name](_[0])
elif function_type is "expression" and group_names is not None:
def func(_):
groups = dict(zip(group_names, _))
return self._functions[name](**groups)
elif function_type is "pattern":
def func(_):
return self._functions[name](*_)
else:
func = self._functions[name]
# DEFAULT FUNCTIONS
elif function_type is "group":
def func(_):
if _:
return _[0]
elif function_type is "type":
def func(_):
for i in _:
if i is not None:
return i
else:
def func(_):
if any(_):
return _
func.__name__ = str(name)
return func
def add_function(self, name, function):
self._functions[name] = function
class Expression_Builder(object):
""" Expression builder is useful for building
regex bits with Groups that cascade down::
from GROUP (group).?,?(group)|(group) (group)
to EXPRESSION (group) | (group)
to TYPE (group)
>>> dummy = lambda input: input
>>> get_function = lambda *_, **__: dummy
>>> function_builder = lambda: None
>>> function_builder.get_function = get_function
>>> expression = {'greeting':{'greeting':{'Expression': '(hey)|(cool)', 'Groups' : ['greeting', 'cooly']}}}
>>> eb = Expression_Builder(expression, function_builder)
>>> eb.get_type("greeting").findall("hey, cool!") # doctest: +IGNORE_UNICODE
[[('hey',), ('',)], [('',), ('cool',)]]
"""
def __init__(self, expressions_dict, function_builder):
self.type_db = {}
for expression_type, expressions in expressions_dict.items():
type_expressions = []
for name, expression in expressions.items():
groups = expression['Groups']
regex = expression['Expression']
lengths = [1] * len(groups)
group_functions = [function_builder.get_function(g, "group") for g in groups]
expression_final_function = \
function_builder.get_function(name, function_type="expression", group_names=groups)
e = Expression(regex, group_functions, lengths, expression_final_function)
type_expressions.append(e)
type_final_function = function_builder.get_function(expression_type, function_type="type")
self.type_db[expression_type] = AlternatesGroup(type_expressions, type_final_function)
def get_type(self, type_string):
if type_string in self.type_db:
return self.type_db[type_string]
def add_type(self, expression, type_string):
self.type_db[type_string] = expression
def build_pattern(pattern_name, pattern_regex, expression_builder, function_builder):
final_function = function_builder.get_function(pattern_name, "pattern")
inbetweens, expression_names = separate_string(pattern_regex)
expressions = []
for name in expression_names:
expression = expression_builder.get_type(name)
if expression is None:
raise ExpressionGroupNotFound("Expression Group ({}) not Found!".format(name))
expressions.append(expression)
return Group(expressions, final_function, inbetweens, pattern_name)
def _build(output_patterns, patterns_dict, expression_builder, function_builder, depth=0):
extra = {}
for name, pattern in patterns_dict.items():
try:
pat = build_pattern(name, pattern['Pattern'], expression_builder, function_builder)
pat.order = int(pattern.get('Order', 0))
output_patterns.append(pat)
expression_builder.add_type(pat, name)
except ExpressionGroupNotFound:
extra[name] = pattern
if len(extra) > 0 and depth < pattern_max_recursion_depth:
# Recursive building for patterns inside of patterns
return _build(output_patterns, extra, expression_builder, function_builder, depth + 1)
elif depth >= pattern_max_recursion_depth:
raise ExpressionGroupNotFound()
return output_patterns
def build_all(patterns, expressions, functions):
function_builder = Function_Builder(functions)
return _build([], patterns, Expression_Builder(expressions, function_builder), function_builder)
|
{
"content_hash": "2dd74952c7d7f0a64ba0ed44ad8cf44b",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 111,
"avg_line_length": 39.40425531914894,
"alnum_prop": 0.5860331173506119,
"repo_name": "andychase/reparse",
"id": "8b6fec64934dfc01c2d5f3d126d065887b62af10",
"size": "5556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reparse/builders.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23917"
}
],
"symlink_target": ""
}
|
from tempest.api.volume import base
from tempest import config
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
class VolumeSnapshotQuotasNegativeTestJSON(base.BaseVolumeAdminTest):
force_tenant_isolation = True
@classmethod
def skip_checks(cls):
super(VolumeSnapshotQuotasNegativeTestJSON, cls).skip_checks()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException('Cinder volume snapshots are disabled')
@classmethod
def setup_credentials(cls):
super(VolumeSnapshotQuotasNegativeTestJSON, cls).setup_credentials()
cls.demo_tenant_id = cls.os_primary.credentials.tenant_id
@classmethod
def resource_setup(cls):
super(VolumeSnapshotQuotasNegativeTestJSON, cls).resource_setup()
cls.default_volume_size = CONF.volume.volume_size
cls.shared_quota_set = {'gigabytes': 3 * cls.default_volume_size,
'volumes': 1, 'snapshots': 1}
# NOTE(gfidente): no need to restore original quota set
# after the tests as they only work with tenant isolation.
cls.admin_quotas_client.update_quota_set(
cls.demo_tenant_id,
**cls.shared_quota_set)
# NOTE(gfidente): no need to delete in tearDown as
# they are created using utility wrapper methods.
cls.volume = cls.create_volume()
cls.snapshot = cls.create_snapshot(volume_id=cls.volume['id'])
@decorators.attr(type='negative')
@decorators.idempotent_id('02bbf63f-6c05-4357-9d98-2926a94064ff')
def test_quota_volume_snapshots(self):
self.assertRaises(lib_exc.OverLimit,
self.snapshots_client.create_snapshot,
volume_id=self.volume['id'])
@decorators.attr(type='negative')
@decorators.idempotent_id('c99a1ca9-6cdf-498d-9fdf-25832babef27')
def test_quota_volume_gigabytes_snapshots(self):
self.addCleanup(self.admin_quotas_client.update_quota_set,
self.demo_tenant_id,
**self.shared_quota_set)
new_quota_set = {'gigabytes': 2 * self.default_volume_size,
'volumes': 1, 'snapshots': 2}
self.admin_quotas_client.update_quota_set(
self.demo_tenant_id,
**new_quota_set)
self.assertRaises(lib_exc.OverLimit,
self.snapshots_client.create_snapshot,
volume_id=self.volume['id'])
|
{
"content_hash": "37dc57c3b15a0598aab2b7ceee9353b2",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 76,
"avg_line_length": 41.68852459016394,
"alnum_prop": 0.6421549351160047,
"repo_name": "Juniper/tempest",
"id": "0f4e90fd3f2df85a142908f4f23abfe9308a2498",
"size": "3179",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/api/volume/admin/test_volume_snapshot_quotas_negative.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4194970"
},
{
"name": "Shell",
"bytes": "19343"
}
],
"symlink_target": ""
}
|
import json
from boxsdk.object.terms_of_service_user_status import TermsOfServiceUserStatus
from boxsdk.config import API
def test_get(test_terms_of_service_user_status, mock_box_session):
created_at = '2016-05-18T17:38:03-07:00'
expected_url = f'{API.BASE_API_URL}/terms_of_service_user_statuses/{test_terms_of_service_user_status.object_id}'
mock_box_session.get.return_value.json.return_value = {
'type': 'terms_of_service_user_status',
'id': test_terms_of_service_user_status.object_id,
'created_at': created_at
}
terms_of_service = test_terms_of_service_user_status.get()
mock_box_session.get.assert_called_once_with(expected_url, headers=None, params=None)
assert isinstance(terms_of_service, TermsOfServiceUserStatus)
assert terms_of_service.type == test_terms_of_service_user_status.object_type
assert terms_of_service.id == test_terms_of_service_user_status.object_id
assert terms_of_service.created_at == created_at
def test_accept(test_terms_of_service_user_status, mock_box_session):
expected_url = f'{API.BASE_API_URL}/terms_of_service_user_statuses/{test_terms_of_service_user_status.object_id}'
mock_box_session.put.return_value.json.return_value = {
'type': test_terms_of_service_user_status.object_type,
'id': test_terms_of_service_user_status.object_id,
'is_accepted': True
}
data = {'is_accepted': True}
terms_of_service_user_status = test_terms_of_service_user_status.accept()
mock_box_session.put.assert_called_once_with(expected_url, data=json.dumps(data), headers=None, params=None)
assert isinstance(terms_of_service_user_status, TermsOfServiceUserStatus)
assert terms_of_service_user_status.type == test_terms_of_service_user_status.object_type
assert terms_of_service_user_status.id == test_terms_of_service_user_status.object_id
assert terms_of_service_user_status.is_accepted is True
def test_reject(test_terms_of_service_user_status, mock_box_session):
expected_url = f'{API.BASE_API_URL}/terms_of_service_user_statuses/{test_terms_of_service_user_status.object_id}'
mock_box_session.put.return_value.json.return_value = {
'type': 'terms_of_service_user_status',
'id': test_terms_of_service_user_status.object_id,
'is_accepted': False
}
data = {'is_accepted': False}
terms_of_service_user_status = test_terms_of_service_user_status.reject()
mock_box_session.put.assert_called_once_with(expected_url, data=json.dumps(data), headers=None, params=None)
assert isinstance(terms_of_service_user_status, TermsOfServiceUserStatus)
assert terms_of_service_user_status.type == test_terms_of_service_user_status.object_type
assert terms_of_service_user_status.id == test_terms_of_service_user_status.object_id
assert terms_of_service_user_status.is_accepted is False
|
{
"content_hash": "e491ad0761a62a6e4cd4ee02982931bd",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 117,
"avg_line_length": 55.48076923076923,
"alnum_prop": 0.7265164644714038,
"repo_name": "box/box-python-sdk",
"id": "0c4362dd2b34f9640df3bc2ebf65d4f8caa6f7c4",
"size": "2885",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/unit/object/test_terms_of_service_user_status.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1036959"
},
{
"name": "Smarty",
"bytes": "527"
}
],
"symlink_target": ""
}
|
from setuptools import setup
from mycroft.util.setup_base import (
find_all_packages,
required,
get_version,
place_manifest
)
place_manifest('mycroft-base-MANIFEST.in')
setup(
name="mycroft-core",
version=get_version(),
install_requires=[required('requirements.txt')],
packages=find_all_packages("mycroft"),
include_package_data=True,
entry_points={
'console_scripts': [
'mycroft-speech-client=mycroft.client.speech.main:main',
'mycroft-messagebus=mycroft.messagebus.service.main:main',
'mycroft-skills=mycroft.skills.main:main',
'mycroft-audio=mycroft.audio.main:main',
'mycroft-echo-observer=mycroft.messagebus.client.ws:echo',
'mycroft-audio-test=mycroft.util.audio_test:main',
'mycroft-enclosure-client=mycroft.client.enclosure.main:main',
'mycroft-skill-container=mycroft.skills.container:main',
'mycroft-cli-client=mycroft.client.text.main:main'
]
}
)
|
{
"content_hash": "2af9bfee9e59022074e557845701ec95",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 74,
"avg_line_length": 32.1875,
"alnum_prop": 0.6563106796116505,
"repo_name": "aatchison/mycroft-core",
"id": "dc716ef5e8173edff246b71c23e08f54abf73d2a",
"size": "1610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mycroft-base-setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "549667"
},
{
"name": "Roff",
"bytes": "912"
},
{
"name": "Shell",
"bytes": "60054"
}
],
"symlink_target": ""
}
|
import logging
import numpy as np
import nibabel as nib
import scipy.ndimage as ndimage
from six import string_types
from .check import check_img
from nilearn._utils import check_niimg
from nilearn.image.image import new_img_like, _fast_smooth_array
log = logging.getLogger(__name__)
# def smooth_volume(nifti_file, smoothmm):
# """
#
# @param nifti_file: string
# @param smoothmm: int
# @return:
# """
# from nipy.algorithms.kernel_smooth import LinearFilter
# from nipy import load_image
# try:
# img = load_image(nifti_file)
# except Exception:
# log.exception('Error reading file {0}.'.format(nifti_file))
# raise
#
# if smoothmm <= 0:
# return img
#
# filter = LinearFilter(img.coordmap, img.shape)
# return filter.smooth(img)
#
def fwhm2sigma(fwhm):
"""Convert a FWHM value to sigma in a Gaussian kernel.
Parameters
----------
fwhm: float or numpy.array
fwhm value or values
Returns
-------
fwhm: float or numpy.array
sigma values
"""
fwhm = np.asarray(fwhm)
return fwhm / np.sqrt(8 * np.log(2))
def sigma2fwhm(sigma):
"""Convert a sigma in a Gaussian kernel to a FWHM value.
Parameters
----------
sigma: float or numpy.array
sigma value or values
Returns
-------
fwhm: float or numpy.array
fwhm values corresponding to `sigma` values
"""
sigma = np.asarray(sigma)
return np.sqrt(8 * np.log(2)) * sigma
def smooth_volume(image, smoothmm):
"""See smooth_img."""
return smooth_imgs(image, smoothmm)
def _smooth_data_array(arr, affine, fwhm, copy=True):
"""Smooth images with a a Gaussian filter.
Apply a Gaussian filter along the three first dimensions of arr.
Parameters
----------
arr: numpy.ndarray
3D or 4D array, with image number as last dimension.
affine: numpy.ndarray
Image affine transformation matrix for image.
fwhm: scalar, numpy.ndarray
Smoothing kernel size, as Full-Width at Half Maximum (FWHM) in millimeters.
If a scalar is given, kernel width is identical on all three directions.
A numpy.ndarray must have 3 elements, giving the FWHM along each axis.
copy: bool
if True, will make a copy of the input array. Otherwise will directly smooth the input array.
Returns
-------
smooth_arr: numpy.ndarray
"""
if arr.dtype.kind == 'i':
if arr.dtype == np.int64:
arr = arr.astype(np.float64)
else:
arr = arr.astype(np.float32)
if copy:
arr = arr.copy()
# Zeroe possible NaNs and Inf in the image.
arr[np.logical_not(np.isfinite(arr))] = 0
try:
# Keep the 3D part of the affine.
affine = affine[:3, :3]
# Convert from FWHM in mm to a sigma.
fwhm_sigma_ratio = np.sqrt(8 * np.log(2))
vox_size = np.sqrt(np.sum(affine ** 2, axis=0))
sigma = fwhm / (fwhm_sigma_ratio * vox_size)
for n, s in enumerate(sigma):
ndimage.gaussian_filter1d(arr, s, output=arr, axis=n)
except:
raise ValueError('Error smoothing the array.')
else:
return arr
def smooth_imgs(images, fwhm):
"""Smooth images using a Gaussian filter.
Apply a Gaussian filter along the three first dimensions of each image in images.
In all cases, non-finite values in input are zeroed.
Parameters
----------
imgs: str or img-like object or iterable of img-like objects
See boyle.nifti.read.read_img
Image(s) to smooth.
fwhm: scalar or numpy.ndarray
Smoothing kernel size, as Full-Width at Half Maximum (FWHM) in millimeters.
If a scalar is given, kernel width is identical on all three directions.
A numpy.ndarray must have 3 elements, giving the FWHM along each axis.
Returns
-------
smooth_imgs: nibabel.Nifti1Image or list of.
Smooth input image/s.
"""
if fwhm <= 0:
return images
if not isinstance(images, string_types) and hasattr(images, '__iter__'):
only_one = False
else:
only_one = True
images = [images]
result = []
for img in images:
img = check_img(img)
affine = img.get_affine()
smooth = _smooth_data_array(img.get_data(), affine, fwhm=fwhm, copy=True)
result.append(nib.Nifti1Image(smooth, affine))
if only_one:
return result[0]
else:
return result
def _smooth_array(arr, affine, fwhm=None, ensure_finite=True, copy=True, **kwargs):
"""Smooth images by applying a Gaussian filter.
Apply a Gaussian filter along the three first dimensions of arr.
This is copied and slightly modified from nilearn:
https://github.com/nilearn/nilearn/blob/master/nilearn/image/image.py
Added the **kwargs argument.
Parameters
==========
arr: numpy.ndarray
4D array, with image number as last dimension. 3D arrays are also
accepted.
affine: numpy.ndarray
(4, 4) matrix, giving affine transformation for image. (3, 3) matrices
are also accepted (only these coefficients are used).
If fwhm='fast', the affine is not used and can be None
fwhm: scalar, numpy.ndarray, 'fast' or None
Smoothing strength, as a full-width at half maximum, in millimeters.
If a scalar is given, width is identical on all three directions.
A numpy.ndarray must have 3 elements, giving the FWHM along each axis.
If fwhm == 'fast', a fast smoothing will be performed with
a filter [0.2, 1, 0.2] in each direction and a normalisation
to preserve the local average value.
If fwhm is None, no filtering is performed (useful when just removal
of non-finite values is needed).
ensure_finite: bool
if True, replace every non-finite values (like NaNs) by zero before
filtering.
copy: bool
if True, input array is not modified. False by default: the filtering
is performed in-place.
kwargs: keyword-arguments
Arguments for the ndimage.gaussian_filter1d function.
Returns
=======
filtered_arr: numpy.ndarray
arr, filtered.
Notes
=====
This function is most efficient with arr in C order.
"""
if arr.dtype.kind == 'i':
if arr.dtype == np.int64:
arr = arr.astype(np.float64)
else:
# We don't need crazy precision
arr = arr.astype(np.float32)
if copy:
arr = arr.copy()
if ensure_finite:
# SPM tends to put NaNs in the data outside the brain
arr[np.logical_not(np.isfinite(arr))] = 0
if fwhm == 'fast':
arr = _fast_smooth_array(arr)
elif fwhm is not None:
# Keep only the scale part.
affine = affine[:3, :3]
# Convert from a FWHM to a sigma:
fwhm_over_sigma_ratio = np.sqrt(8 * np.log(2))
vox_size = np.sqrt(np.sum(affine ** 2, axis=0))
sigma = fwhm / (fwhm_over_sigma_ratio * vox_size)
for n, s in enumerate(sigma):
ndimage.gaussian_filter1d(arr, s, output=arr, axis=n, **kwargs)
return arr
def smooth_img(imgs, fwhm, **kwargs):
"""Smooth images by applying a Gaussian filter.
Apply a Gaussian filter along the three first dimensions of arr.
In all cases, non-finite values in input image are replaced by zeros.
This is copied and slightly modified from nilearn:
https://github.com/nilearn/nilearn/blob/master/nilearn/image/image.py
Added the **kwargs argument.
Parameters
==========
imgs: Niimg-like object or iterable of Niimg-like objects
See http://nilearn.github.io/manipulating_images/manipulating_images.html#niimg.
Image(s) to smooth.
fwhm: scalar, numpy.ndarray, 'fast' or None
Smoothing strength, as a Full-Width at Half Maximum, in millimeters.
If a scalar is given, width is identical on all three directions.
A numpy.ndarray must have 3 elements, giving the FWHM along each axis.
If fwhm == 'fast', a fast smoothing will be performed with
a filter [0.2, 1, 0.2] in each direction and a normalisation
to preserve the scale.
If fwhm is None, no filtering is performed (useful when just removal
of non-finite values is needed)
Returns
=======
filtered_img: nibabel.Nifti1Image or list of.
Input image, filtered. If imgs is an iterable, then filtered_img is a
list.
"""
# Use hasattr() instead of isinstance to workaround a Python 2.6/2.7 bug
# See http://bugs.python.org/issue7624
if hasattr(imgs, "__iter__") \
and not isinstance(imgs, string_types):
single_img = False
else:
single_img = True
imgs = [imgs]
ret = []
for img in imgs:
img = check_niimg(img)
affine = img.get_affine()
filtered = _smooth_array(img.get_data(), affine, fwhm=fwhm,
ensure_finite=True, copy=True, **kwargs)
ret.append(new_img_like(img, filtered, affine, copy_header=True))
if single_img:
return ret[0]
else:
return ret
|
{
"content_hash": "031eb22ac1b40621c73b446892b58e24",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 101,
"avg_line_length": 31.45945945945946,
"alnum_prop": 0.6212414089347079,
"repo_name": "Neurita/boyle",
"id": "aa2ccc987c8d6e6a30249b8af51cd79fc2b7b4d0",
"size": "9313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boyle/nifti/smooth.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1687"
},
{
"name": "Python",
"bytes": "391188"
}
],
"symlink_target": ""
}
|
import os
class Processes():
PIDFILE = "git-events.pid"
def register_process(self):
pidfile = open(self.PIDFILE, 'w')
pidfile.write(str(os.getpid()))
pidfile.close()
def unregister_process(self):
try:
os.remove(self.PIDFILE)
except Exception as fileException:
pass
def is_running(self):
return os.path.isfile(self.PIDFILE)
def get_pid(self):
if not self.is_running():
return None
else:
handle = open(self.PIDFILE, 'r')
pid = int(handle.read())
handle.close()
return pid
class ProcessesProvider():
def __init__(self):
self.instance = None
def get(self):
if self.instance is None:
self.instance = Processes()
return self.instance
processes_provider = ProcessesProvider()
|
{
"content_hash": "b7b621a6ddf3e23df0e0550b8713d75a",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 44,
"avg_line_length": 21.73170731707317,
"alnum_prop": 0.5622895622895623,
"repo_name": "iddl/git-events",
"id": "23983b0e486d8b41df3e08da8448e9a1480aebc6",
"size": "891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "processes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19335"
}
],
"symlink_target": ""
}
|
"""Test for the TF-IDF example."""
import logging
import os
import re
import tempfile
import unittest
import apache_beam as beam
from apache_beam.examples.complete import tfidf
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.testing.util import open_shards
EXPECTED_RESULTS = set([
('ghi', '1.txt', 0.3662040962227032),
('abc', '1.txt', 0.0),
('abc', '3.txt', 0.0),
('abc', '2.txt', 0.0),
('def', '1.txt', 0.13515503603605478),
('def', '2.txt', 0.2027325540540822)])
EXPECTED_LINE_RE = r'\(u\'([a-z]*)\', \(\'.*([0-9]\.txt)\', (.*)\)\)'
class TfIdfTest(unittest.TestCase):
def create_file(self, path, contents):
logging.info('Creating temp file: %s', path)
with open(path, 'w') as f:
f.write(contents)
def test_tfidf_transform(self):
with TestPipeline() as p:
uri_to_line = p | 'create sample' >> beam.Create(
[('1.txt', 'abc def ghi'),
('2.txt', 'abc def'),
('3.txt', 'abc')])
result = (
uri_to_line
| tfidf.TfIdf()
| beam.Map(lambda (word, (uri, tfidf)): (word, uri, tfidf)))
assert_that(result, equal_to(EXPECTED_RESULTS))
# Run the pipeline. Note that the assert_that above adds to the pipeline
# a check that the result PCollection contains expected values.
# To actually trigger the check the pipeline must be run (e.g. by
# exiting the with context).
def test_basics(self):
# Setup the files with expected content.
temp_folder = tempfile.mkdtemp()
self.create_file(os.path.join(temp_folder, '1.txt'), 'abc def ghi')
self.create_file(os.path.join(temp_folder, '2.txt'), 'abc def')
self.create_file(os.path.join(temp_folder, '3.txt'), 'abc')
tfidf.run([
'--uris=%s/*' % temp_folder,
'--output', os.path.join(temp_folder, 'result')])
# Parse result file and compare.
results = []
with open_shards(os.path.join(
temp_folder, 'result-*-of-*')) as result_file:
for line in result_file:
match = re.search(EXPECTED_LINE_RE, line)
logging.info('Result line: %s', line)
if match is not None:
results.append(
(match.group(1), match.group(2), float(match.group(3))))
logging.info('Computed results: %s', set(results))
self.assertEqual(set(results), EXPECTED_RESULTS)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
{
"content_hash": "758a5894628d7b8a04127ce0797f4e70",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 78,
"avg_line_length": 33.27272727272727,
"alnum_prop": 0.6182669789227166,
"repo_name": "peihe/incubator-beam",
"id": "b6f88255887cb432d74eb0880a341004fef91a16",
"size": "3347",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/examples/complete/tfidf_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "47640"
},
{
"name": "Java",
"bytes": "11659452"
},
{
"name": "Protocol Buffer",
"bytes": "55082"
},
{
"name": "Python",
"bytes": "2859669"
},
{
"name": "Shell",
"bytes": "44966"
}
],
"symlink_target": ""
}
|
import heapq
def funnel(k, numbers):
if k == 0:
return []
heap = numbers[:k]
# heapify: Transform list into a heap, in-place, in linear time.
heapq.heapify(heap)
for num in numbers[k:]:
# heappushpop: Push item on the heap, then pop and return the smallest item from the heap.
# ref: https://gengwg.blogspot.com/2018/04/heapq-heap-queue-algorithm.html
heapq.heappushpop(heap, num)
#if heap[0] < num:
#heapq.heapreplace(heap, num)
return heap
if __name__ == '__main__':
print funnel(4, [3,1,4,1,5,9,2,6,5,3,5,8])
import random
# nums = [i for i in range(100)]
nums = [random.randint(0,100) for i in range(100)]
print funnel(10, nums)
|
{
"content_hash": "e33a1dbeef143845e0c6cbb9ddf46234",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 98,
"avg_line_length": 31.82608695652174,
"alnum_prop": 0.6024590163934426,
"repo_name": "gengwg/leetcode",
"id": "95332a4d8d001a9a9b4b51654545c6b354f8dffc",
"size": "997",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "top100numbers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "779"
},
{
"name": "Python",
"bytes": "627348"
},
{
"name": "SQLPL",
"bytes": "779"
},
{
"name": "Shell",
"bytes": "4149"
}
],
"symlink_target": ""
}
|
from horizon.test import helpers as test
class FivecircleTests(test.TestCase):
# Unit tests for AppCatalog.
def test_me(self):
self.assertTrue(1 + 1 == 2)
|
{
"content_hash": "1f9065e7ca1230e74bd9c7411a5e2250",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 40,
"avg_line_length": 24.714285714285715,
"alnum_prop": 0.6820809248554913,
"repo_name": "kfox1111/apps-catalog-ui",
"id": "5736aaee519abe344e1f4f6ee6ff14d34187edfb",
"size": "173",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "component_catalog/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4365"
},
{
"name": "JavaScript",
"bytes": "3968"
},
{
"name": "Python",
"bytes": "8148"
}
],
"symlink_target": ""
}
|
import json
import mock
from oslotest import mockpatch
from tempest.cmd import verify_tempest_config
from tempest import config
from tempest.tests import base
from tempest.tests import fake_config
class TestGetAPIVersions(base.TestCase):
def test_url_grab_versioned_nova_nossl(self):
base_url = 'http://127.0.0.1:8774/v2/'
endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
self.assertEqual('http://127.0.0.1:8774', endpoint)
def test_url_grab_versioned_nova_ssl(self):
base_url = 'https://127.0.0.1:8774/v3/'
endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
self.assertEqual('https://127.0.0.1:8774', endpoint)
class TestDiscovery(base.TestCase):
def setUp(self):
super(TestDiscovery, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
def test_get_keystone_api_versions(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': {'values': [{'id': 'v2.0'}, {'id': 'v3.0'}]}}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.Patch('httplib2.Http.request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
versions = verify_tempest_config._get_api_versions(fake_os, 'keystone')
self.assertIn('v2.0', versions)
self.assertIn('v3.0', versions)
def test_get_cinder_api_versions(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v1.0'}, {'id': 'v2.0'}]}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.Patch('httplib2.Http.request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
versions = verify_tempest_config._get_api_versions(fake_os, 'cinder')
self.assertIn('v1.0', versions)
self.assertIn('v2.0', versions)
def test_get_nova_versions(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v2.0'}, {'id': 'v3.0'}]}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.Patch('httplib2.Http.request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
versions = verify_tempest_config._get_api_versions(fake_os, 'nova')
self.assertIn('v2.0', versions)
self.assertIn('v3.0', versions)
def test_verify_api_versions(self):
api_services = ['cinder', 'glance', 'keystone']
fake_os = mock.MagicMock()
for svc in api_services:
m = 'verify_%s_api_versions' % svc
with mock.patch.object(verify_tempest_config, m) as verify_mock:
verify_tempest_config.verify_api_versions(fake_os, svc, True)
verify_mock.assert_called_once_with(fake_os, True)
def test_verify_api_versions_not_implemented(self):
api_services = ['cinder', 'glance', 'keystone']
fake_os = mock.MagicMock()
for svc in api_services:
m = 'verify_%s_api_versions' % svc
with mock.patch.object(verify_tempest_config, m) as verify_mock:
verify_tempest_config.verify_api_versions(fake_os, 'foo', True)
self.assertFalse(verify_mock.called)
def test_verify_keystone_api_versions_no_v3(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': {'values': [{'id': 'v2.0'}]}}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.Patch('httplib2.Http.request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_keystone_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v3',
'identity_feature_enabled',
False, True)
def test_verify_keystone_api_versions_no_v2(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': {'values': [{'id': 'v3.0'}]}}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.Patch('httplib2.Http.request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_keystone_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v2',
'identity_feature_enabled',
False, True)
def test_verify_cinder_api_versions_no_v2(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v1.0'}]}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.Patch('httplib2.Http.request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_cinder_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v2', 'volume_feature_enabled',
False, True)
def test_verify_cinder_api_versions_no_v1(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v2.0'}]}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.Patch('httplib2.Http.request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_cinder_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v1', 'volume_feature_enabled',
False, True)
def test_verify_glance_version_no_v2_with_v1_1(self):
def fake_get_versions():
return (['v1.1'])
fake_os = mock.MagicMock()
fake_os.image_client.get_versions = fake_get_versions
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v2', 'image_feature_enabled',
False, True)
def test_verify_glance_version_no_v2_with_v1_0(self):
def fake_get_versions():
return (['v1.0'])
fake_os = mock.MagicMock()
fake_os.image_client.get_versions = fake_get_versions
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v2', 'image_feature_enabled',
False, True)
def test_verify_glance_version_no_v1(self):
def fake_get_versions():
return (['v2.0'])
fake_os = mock.MagicMock()
fake_os.image_client.get_versions = fake_get_versions
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v1', 'image_feature_enabled',
False, True)
def test_verify_extensions_neutron(self):
def fake_list_extensions():
return {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
fake_os.network_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os,
'neutron', {})
self.assertIn('neutron', results)
self.assertIn('fake1', results['neutron'])
self.assertTrue(results['neutron']['fake1'])
self.assertIn('fake2', results['neutron'])
self.assertTrue(results['neutron']['fake2'])
self.assertIn('fake3', results['neutron'])
self.assertFalse(results['neutron']['fake3'])
self.assertIn('not_fake', results['neutron'])
self.assertFalse(results['neutron']['not_fake'])
def test_verify_extensions_neutron_all(self):
def fake_list_extensions():
return {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
fake_os.network_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'neutron', {})
self.assertIn('neutron', results)
self.assertIn('extensions', results['neutron'])
self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
sorted(results['neutron']['extensions']))
def test_verify_extensions_cinder(self):
def fake_list_extensions():
return {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
fake_os.volumes_extension_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os,
'cinder', {})
self.assertIn('cinder', results)
self.assertIn('fake1', results['cinder'])
self.assertTrue(results['cinder']['fake1'])
self.assertIn('fake2', results['cinder'])
self.assertTrue(results['cinder']['fake2'])
self.assertIn('fake3', results['cinder'])
self.assertFalse(results['cinder']['fake3'])
self.assertIn('not_fake', results['cinder'])
self.assertFalse(results['cinder']['not_fake'])
def test_verify_extensions_cinder_all(self):
def fake_list_extensions():
return {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
fake_os.volumes_extension_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'cinder', {})
self.assertIn('cinder', results)
self.assertIn('extensions', results['cinder'])
self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
sorted(results['cinder']['extensions']))
def test_verify_extensions_nova(self):
def fake_list_extensions():
return ([{'alias': 'fake1'}, {'alias': 'fake2'},
{'alias': 'not_fake'}])
fake_os = mock.MagicMock()
fake_os.extensions_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os,
'nova', {})
self.assertIn('nova', results)
self.assertIn('fake1', results['nova'])
self.assertTrue(results['nova']['fake1'])
self.assertIn('fake2', results['nova'])
self.assertTrue(results['nova']['fake2'])
self.assertIn('fake3', results['nova'])
self.assertFalse(results['nova']['fake3'])
self.assertIn('not_fake', results['nova'])
self.assertFalse(results['nova']['not_fake'])
def test_verify_extensions_nova_all(self):
def fake_list_extensions():
return ({'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]})
fake_os = mock.MagicMock()
fake_os.extensions_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'nova', {})
self.assertIn('nova', results)
self.assertIn('extensions', results['nova'])
self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
sorted(results['nova']['extensions']))
def test_verify_extensions_swift(self):
def fake_list_extensions():
return (None, {'fake1': 'metadata',
'fake2': 'metadata',
'not_fake': 'metadata',
'swift': 'metadata'})
fake_os = mock.MagicMock()
fake_os.account_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os, 'swift', {})
self.assertIn('swift', results)
self.assertIn('fake1', results['swift'])
self.assertTrue(results['swift']['fake1'])
self.assertIn('fake2', results['swift'])
self.assertTrue(results['swift']['fake2'])
self.assertIn('fake3', results['swift'])
self.assertFalse(results['swift']['fake3'])
self.assertIn('not_fake', results['swift'])
self.assertFalse(results['swift']['not_fake'])
def test_verify_extensions_swift_all(self):
def fake_list_extensions():
return (None, {'fake1': 'metadata',
'fake2': 'metadata',
'not_fake': 'metadata',
'swift': 'metadata'})
fake_os = mock.MagicMock()
fake_os.account_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'swift', {})
self.assertIn('swift', results)
self.assertIn('extensions', results['swift'])
self.assertEqual(sorted(['not_fake', 'fake1', 'fake2']),
sorted(results['swift']['extensions']))
|
{
"content_hash": "354a9500e81bf866b4970c637efecf6e",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 79,
"avg_line_length": 49.44378698224852,
"alnum_prop": 0.5569052178075634,
"repo_name": "eggmaster/tempest",
"id": "b9afd5e97cb21996817d67cf127fcbfd4f64e2ad",
"size": "17314",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tempest/tests/cmd/test_verify_tempest_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2724691"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
}
|
from AccessControl import ClassSecurityInfo
from Products.ATContentTypes.lib.historyaware import HistoryAwareMixin
from Products.Archetypes.public import *
from Products.Archetypes.references import HoldingReference
from Products.CMFCore.permissions import View, ModifyPortalContent
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.utils import safe_unicode
from bika.lims.browser import BrowserView
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.config import PROJECTNAME
from bika.lims.browser.widgets import DurationWidget
from bika.lims.browser.fields import DurationField
from bika.lims.content.bikaschema import BikaSchema
from bika.lims.interfaces import ISampleType
from magnitude import mg, MagnitudeError
from zope.interface import implements
import json
import plone
import sys
schema = BikaSchema.copy() + Schema((
DurationField('RetentionPeriod',
required = 1,
default_method = 'getDefaultLifetime',
widget = DurationWidget(
label=_("Retention Period"),
description =_(
"The period for which un-preserved samples of this type can be kept before "
"they expire and cannot be analysed any further"),
)
),
BooleanField('Hazardous',
default = False,
widget = BooleanWidget(
label=_("Hazardous"),
description=_("Samples of this type should be treated as hazardous"),
),
),
ReferenceField('SampleMatrix',
required = 0,
allowed_types = ('SampleMatrix',),
vocabulary = 'SampleMatricesVocabulary',
relationship = 'SampleTypeSampleMatrix',
referenceClass = HoldingReference,
widget = ReferenceWidget(
checkbox_bound = 0,
label=_("Sample Matrix"),
),
),
StringField('Prefix',
required = True,
widget = StringWidget(
label=_("Sample Type Prefix"),
),
),
StringField('MinimumVolume',
required = 1,
widget = StringWidget(
label=_("Minimum Volume"),
description=_("The minimum sample volume required for analysis eg. '10 ml' or '1 kg'."),
),
),
ReferenceField('ContainerType',
required = 0,
allowed_types = ('ContainerType',),
vocabulary = 'ContainerTypesVocabulary',
relationship = 'SampleTypeContainerType',
widget = ReferenceWidget(
checkbox_bound = 0,
label=_("Default Container Type"),
description =_(
"The default container type. New sample partitions "
"are automatically assigned a container of this "
"type, unless it has been specified in more details "
"per analysis service"),
),
),
ReferenceField('SamplePoints',
required = 0,
multiValued = 1,
allowed_types = ('SamplePoint',),
vocabulary = 'SamplePointsVocabulary',
relationship = 'SampleTypeSamplePoint',
widget = ReferenceWidget(
checkbox_bound = 0,
label=_("Sample Points"),
description =_("The list of sample points from which this sample "
"type can be collected. If no sample points are "
"selected, then all sample points are available."),
),
),
ComputedField(
'SamplePointTitle',
expression="[o.Title() for o in context.getSamplePoints()]",
widget = ComputedWidget(
visibile=False,
)
),
))
schema['description'].schemata = 'default'
schema['description'].widget.visible = True
class SampleType(BaseContent, HistoryAwareMixin):
implements(ISampleType)
security = ClassSecurityInfo()
displayContentsTab = False
schema = schema
_at_rename_after_creation = True
def _renameAfterCreation(self, check_auto_id=False):
from bika.lims.idserver import renameAfterCreation
renameAfterCreation(self)
def Title(self):
return safe_unicode(self.getField('title').get(self)).encode('utf-8')
def getJSMinimumVolume(self, **kw):
"""Try convert the MinimumVolume to 'ml' or 'g' so that JS has an
easier time working with it. If conversion fails, return raw value.
"""
default = self.Schema()['MinimumVolume'].get(self)
try:
mgdefault = default.split(' ', 1)
mgdefault = mg(float(mgdefault[0]), mgdefault[1])
except:
mgdefault = mg(0, 'ml')
try:
return str(mgdefault.ounit('ml'))
except:
pass
try:
return str(mgdefault.ounit('g'))
except:
pass
return str(default)
def getDefaultLifetime(self):
""" get the default retention period """
settings = getToolByName(self, 'bika_setup')
return settings.getDefaultSampleLifetime()
def SamplePointsVocabulary(self):
from bika.lims.content.samplepoint import SamplePoints
return SamplePoints(self, allow_blank=False)
def setSamplePoints(self, value, **kw):
""" For the moment, we're manually trimming the sampletype<>samplepoint
relation to be equal on both sides, here.
It's done strangely, because it may be required to behave strangely.
"""
bsc = getToolByName(self, 'bika_setup_catalog')
## convert value to objects
if value and type(value) == str:
value = [bsc(UID=value)[0].getObject(),]
elif value and type(value) in (list, tuple) and type(value[0]) == str:
value = [bsc(UID=uid)[0].getObject() for uid in value if uid]
## Find all SamplePoints that were removed
existing = self.Schema()['SamplePoints'].get(self)
removed = existing and [s for s in existing if s not in value] or []
added = value and [s for s in value if s not in existing] or []
ret = self.Schema()['SamplePoints'].set(self, value)
for sp in removed:
sampletypes = sp.getSampleTypes()
if self in sampletypes:
sampletypes.remove(self)
sp.setSampleTypes(sampletypes)
for sp in added:
sp.setSampleTypes(list(sp.getSampleTypes()) + [self,])
return ret
def getSamplePoints(self, **kw):
return self.Schema()['SamplePoints'].get(self)
def SampleMatricesVocabulary(self):
from bika.lims.content.samplematrix import SampleMatrices
return SampleMatrices(self, allow_blank=True)
def ContainerTypesVocabulary(self):
from bika.lims.content.containertype import ContainerTypes
return ContainerTypes(self, allow_blank=True)
registerType(SampleType, PROJECTNAME)
def SampleTypes(self, instance=None, allow_blank=False):
instance = instance or self
bsc = getToolByName(instance, 'bika_setup_catalog')
items = []
for st in bsc(portal_type='SampleType',
inactive_state='active',
sort_on = 'sortable_title'):
items.append((st.UID, st.Title))
items = allow_blank and [['','']] + list(items) or list(items)
return DisplayList(items)
|
{
"content_hash": "b537ba3976efb399930ec70e72abf6b7",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 100,
"avg_line_length": 36.64824120603015,
"alnum_prop": 0.6266282736870972,
"repo_name": "hocinebendou/bika.gsoc",
"id": "835d67b379fb71973c47c66ec8b7990b3914319b",
"size": "7293",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "bika/lims/content/sampletype.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "403"
},
{
"name": "COBOL",
"bytes": "5987"
},
{
"name": "CSS",
"bytes": "29758"
},
{
"name": "JavaScript",
"bytes": "411425"
},
{
"name": "Python",
"bytes": "4330980"
},
{
"name": "RobotFramework",
"bytes": "239735"
},
{
"name": "Shell",
"bytes": "11201"
}
],
"symlink_target": ""
}
|
from pyxl.codec.register import pyxl_decode
from pyxl.codec.tokenizer import PyxlParseError
from pyxl.codec.parser import ParseError
#import pyxl.codec.register
import os
import subprocess
passing_cases_path = os.path.dirname(os.path.abspath(__file__))
error_cases_path = os.path.join(passing_cases_path, 'error_cases')
def _expect_failure(file_name):
path = os.path.join(error_cases_path, file_name)
try:
with open(path) as f:
print(pyxl_decode(f.read()))
assert False, "successfully decoded file %r" % file_name
except (PyxlParseError, ParseError) as e:
print('.', end='')
def test_error_cases():
cases = os.listdir(error_cases_path)
for file_name in cases:
if file_name.endswith(".py"):
_expect_failure(file_name)
def test_passing_cases():
cases = os.listdir(passing_cases_path)
for file_name in cases:
if file_name in ('test_basic.py', 'test_rss.py'):
subprocess.Popen(['python', file_name]).wait()
elif file_name.startswith('test_'):
module = __import__(file_name[:-3])
if not module.test(): print('.', end='')
else: print('F (%s)' % file_name, end='')
if __name__ == '__main__':
test_error_cases()
print()
test_passing_cases()
print()
|
{
"content_hash": "2c488853cb17e736f447b66dce23992a",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 66,
"avg_line_length": 31.975609756097562,
"alnum_prop": 0.6231884057971014,
"repo_name": "lez/pyxl3",
"id": "e76be65e7ab1b383c95e98c60968d84c1d4d4f19",
"size": "1311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Emacs Lisp",
"bytes": "4107"
},
{
"name": "Python",
"bytes": "109136"
},
{
"name": "VimL",
"bytes": "38879"
}
],
"symlink_target": ""
}
|
""" BVT tests for Snapshots
"""
#Import Local Modules
import marvin
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from integration.lib.utils import *
from integration.lib.base import *
from integration.lib.common import *
from marvin import remoteSSHClient
class Services:
"""Test Snapshots Services
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "fr3sca",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 64, # In MBs
},
"disk_offering": {
"displaytext": "Small",
"name": "Small",
"disksize": 1
},
"server_with_disk":
{
"displayname": "Test VM -With Disk",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"server_without_disk":
{
"displayname": "Test VM-No Disk",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
# For NAT rule creation
"publicport": 22,
"protocol": 'TCP',
},
"recurring_snapshot":
{
"intervaltype": 'HOURLY',
# Frequency of snapshots
"maxsnaps": 1, # Should be min 2
"schedule": 1,
"timezone": 'US/Arizona',
# Timezone Formats - http://cloud.mindtouch.us/CloudStack_Documentation/Developer's_Guide%3A_CloudStack
},
"templates":
{
"displaytext": 'Template from snapshot',
"name": 'Template from snapshot',
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
"templatefilter": 'self',
},
"ostypeid": '5776c0d2-f331-42db-ba3a-29f1f8319bc9',
# Cent OS 5.3 (64 bit)
"diskdevice": "/dev/xvdb", # Data Disk
"rootdisk": "/dev/xvda", # Root Disk
"diskname": "Test Disk",
"size": 1, # GBs
"mount_dir": "/mnt/tmp",
"sub_dir": "test",
"sub_lvl_dir1": "test1",
"sub_lvl_dir2": "test2",
"random_data": "random.data",
"username": "root",
"password": "password",
"ssh_port": 22,
"sleep": 60,
"timeout": 10,
"mode": 'advanced',
# Networking mode, Advanced, Basic
}
class TestSnapshotRootDisk(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(TestSnapshotRootDisk, cls).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostypeid"]
)
cls.services["domainid"] = cls.domain.id
cls.services["server_without_disk"]["zoneid"] = cls.zone.id
cls.services["template"] = template.id
cls.services["zoneid"] = cls.zone.id
# Create VMs, NAT Rules etc
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.account.name
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.virtual_machine = cls.virtual_machine_with_disk = \
VirtualMachine.create(
cls.api_client,
cls.services["server_without_disk"],
templateid=template.id,
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
serviceofferingid=cls.service_offering.id,
mode=cls.services["mode"]
)
cls._cleanup = [
cls.service_offering,
cls.account,
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def test_01_snapshot_root_disk(self):
"""Test Snapshot Root Disk
"""
# Validate the following
# 1. listSnapshots should list the snapshot that was created.
# 2. verify that secondary storage NFS share contains
# the reqd volume under
# /secondary/snapshots//$account_id/$volumeid/$snapshot_uuid
# 3. verify backup_snap_id was non null in the `snapshots` table
volumes = list_volumes(
self.apiclient,
virtualmachineid=self.virtual_machine_with_disk.id,
type='ROOT',
listall=True
)
snapshot = Snapshot.create(
self.apiclient,
volumes[0].id,
account=self.account.account.name,
domainid=self.account.account.domainid
)
self.debug("Snapshot created: ID - %s" % snapshot.id)
snapshots = list_snapshots(
self.apiclient,
id=snapshot.id
)
self.assertEqual(
isinstance(snapshots, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
snapshots,
None,
"Check if result exists in list item call"
)
self.assertEqual(
snapshots[0].id,
snapshot.id,
"Check resource id in list resources call"
)
self.debug(
"select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';" \
% str(snapshot.id)
)
qresultset = self.dbclient.execute(
"select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';" \
% str(snapshot.id)
)
self.assertNotEqual(
len(qresultset),
0,
"Check DB Query result set"
)
qresult = qresultset[0]
snapshot_uuid = qresult[0] # backup_snap_id = snapshot UUID
account_id = qresult[1]
volume_id = qresult[2]
self.assertNotEqual(
str(snapshot_uuid),
'NULL',
"Check if backup_snap_id is not null"
)
# Get the Secondary Storage details from list Hosts
hosts = list_hosts(
self.apiclient,
type='SecondaryStorage',
zoneid=self.zone.id
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check list response returns a valid list"
)
uuids = []
for host in hosts:
# hosts[0].name = "nfs://192.168.100.21/export/test"
parse_url = (host.name).split('/')
# parse_url = ['nfs:', '', '192.168.100.21', 'export', 'test']
# Split IP address and export path from name
sec_storage_ip = parse_url[2]
# Sec Storage IP: 192.168.100.21
export_path = '/'.join(parse_url[3:])
# Export path: export/test
try:
# Login to VM to check snapshot present on sec disk
ssh_client = self.virtual_machine_with_disk.get_ssh_client()
cmds = [
"mkdir -p %s" % self.services["mount_dir"],
"mount %s/%s %s" % (
sec_storage_ip,
export_path,
self.services["mount_dir"]
),
"ls %s/snapshots/%s/%s" % (
self.services["mount_dir"],
account_id,
volume_id
),
]
for c in cmds:
self.debug(c)
result = ssh_client.execute(c)
self.debug(result)
except Exception:
self.fail("SSH failed for Virtual machine: %s" %
self.virtual_machine_with_disk.ipaddress)
uuids.append(result)
# Unmount the Sec Storage
cmds = [
"umount %s" % (self.services["mount_dir"]),
]
try:
for c in cmds:
self.debug(c)
result = ssh_client.execute(c)
self.debug(result)
except Exception as e:
self.fail("SSH failed for Virtual machine: %s" %
self.virtual_machine_with_disk.ipaddress)
res = str(uuids)
# Check snapshot UUID in secondary storage and database
self.assertEqual(
res.count(snapshot_uuid),
1,
"Check snapshot UUID in secondary storage and database"
)
return
class TestSnapshots(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(TestSnapshots, cls).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostypeid"]
)
cls.services["domainid"] = cls.domain.id
cls.services["server_with_disk"]["zoneid"] = cls.zone.id
cls.services["server_with_disk"]["diskoffering"] = cls.disk_offering.id
cls.services["server_without_disk"]["zoneid"] = cls.zone.id
cls.services["template"] = template.id
cls.services["zoneid"] = cls.zone.id
cls.services["diskoffering"] = cls.disk_offering.id
# Create VMs, NAT Rules etc
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.account.name
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.virtual_machine = cls.virtual_machine_with_disk = \
VirtualMachine.create(
cls.api_client,
cls.services["server_with_disk"],
templateid=template.id,
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
serviceofferingid=cls.service_offering.id,
mode=cls.services["mode"]
)
cls.virtual_machine_without_disk = \
VirtualMachine.create(
cls.api_client,
cls.services["server_without_disk"],
templateid=template.id,
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
serviceofferingid=cls.service_offering.id,
mode=cls.services["mode"]
)
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
cls.account,
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def test_02_snapshot_data_disk(self):
"""Test Snapshot Data Disk
"""
volume = list_volumes(
self.apiclient,
virtualmachineid=self.virtual_machine_with_disk.id,
type='DATADISK',
listall=True
)
self.assertEqual(
isinstance(volume, list),
True,
"Check list response returns a valid list"
)
self.debug("Creating a Snapshot from data volume: %s" % volume[0].id)
snapshot = Snapshot.create(
self.apiclient,
volume[0].id,
account=self.account.account.name,
domainid=self.account.account.domainid
)
snapshots = list_snapshots(
self.apiclient,
id=snapshot.id
)
self.assertEqual(
isinstance(snapshots, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
snapshots,
None,
"Check if result exists in list item call"
)
self.assertEqual(
snapshots[0].id,
snapshot.id,
"Check resource id in list resources call"
)
self.debug(
"select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';" \
% str(snapshot.id)
)
qresultset = self.dbclient.execute(
"select backup_snap_id, account_id, volume_id from snapshots where uuid = '%s';" \
% str(snapshot.id)
)
self.assertNotEqual(
len(qresultset),
0,
"Check DB Query result set"
)
qresult = qresultset[0]
snapshot_uuid = qresult[0] # backup_snap_id = snapshot UUID
account_id = qresult[1]
volume_id = qresult[2]
self.assertNotEqual(
str(snapshot_uuid),
'NULL',
"Check if backup_snap_id is not null"
)
# Get the Secondary Storage details from list Hosts
hosts = list_hosts(
self.apiclient,
type='SecondaryStorage',
zoneid=self.zone.id
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check list response returns a valid list"
)
uuids = []
for host in hosts:
# hosts[0].name = "nfs://192.168.100.21/export"
parse_url = (host.name).split('/')
# parse_url = ['nfs:', '', '192.168.100.21', 'export']
# Split IP address and export path from name
sec_storage_ip = parse_url[2]
# Sec Storage IP: 192.168.100.21
export_path = '/'.join(parse_url[3:])
# Export path: export
try:
# Login to VM to check snapshot present on sec disk
ssh_client = self.virtual_machine_with_disk.get_ssh_client()
cmds = [
"mkdir -p %s" % self.services["mount_dir"],
"mount %s/%s %s" % (
sec_storage_ip,
export_path,
self.services["mount_dir"]
),
"ls %s/snapshots/%s/%s" % (
self.services["mount_dir"],
account_id,
volume_id
),
]
for c in cmds:
self.debug(c)
result = ssh_client.execute(c)
self.debug(result)
except Exception as e:
self.fail("SSH failed for VM with IP: %s" %
self.virtual_machine_with_disk.ipaddress)
uuids.append(result)
# Unmount the Sec Storage
cmds = [
"umount %s" % (self.services["mount_dir"]),
]
try:
for c in cmds:
self.debug(c)
ssh_client.execute(c)
except Exception as e:
self.fail("SSH failed for VM with IP: %s" %
self.virtual_machine_with_disk.ipaddress)
res = str(uuids)
# Check snapshot UUID in secondary storage and database
self.assertEqual(
res.count(snapshot_uuid),
1,
"Check snapshot UUID in secondary storage and database"
)
return
def test_03_volume_from_snapshot(self):
"""Create volumes from snapshots
"""
#1. Login to machine; create temp/test directories on data volume
#2. Snapshot the Volume
#3. Create another Volume from snapshot
#4. Mount/Attach volume to another server
#5. Compare data
random_data_0 = random_gen(100)
random_data_1 = random_gen(100)
try:
ssh_client = self.virtual_machine.get_ssh_client()
#Format partition using ext3
format_volume_to_ext3(
ssh_client,
self.services["diskdevice"]
)
cmds = [
"mkdir -p %s" % self.services["mount_dir"],
"mount %s1 %s" % (
self.services["diskdevice"],
self.services["mount_dir"]
),
"mkdir -p %s/%s/{%s,%s} " % (
self.services["mount_dir"],
self.services["sub_dir"],
self.services["sub_lvl_dir1"],
self.services["sub_lvl_dir2"]
),
"echo %s > %s/%s/%s/%s" % (
random_data_0,
self.services["mount_dir"],
self.services["sub_dir"],
self.services["sub_lvl_dir1"],
self.services["random_data"]
),
"echo %s > %s/%s/%s/%s" % (
random_data_1,
self.services["mount_dir"],
self.services["sub_dir"],
self.services["sub_lvl_dir2"],
self.services["random_data"]
),
]
for c in cmds:
self.debug(c)
ssh_client.execute(c)
except Exception as e:
self.fail("SSH failed for VM with IP: %s" %
self.virtual_machine.ipaddress)
# Unmount the Sec Storage
cmds = [
"umount %s" % (self.services["mount_dir"]),
]
try:
for c in cmds:
self.debug(c)
ssh_client.execute(c)
except Exception as e:
self.fail("SSH failed for VM with IP: %s" %
self.virtual_machine.ipaddress)
list_volume_response = list_volumes(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='DATADISK',
listall=True
)
volume_response = list_volume_response[0]
#Create snapshot from attached volume
snapshot = Snapshot.create(
self.apiclient,
volume_response.id,
account=self.account.account.name,
domainid=self.account.account.domainid
)
self.debug("Created Snapshot from volume: %s" % volume_response.id)
#Create volume from snapshot
self.debug("Creating volume from snapshot: %s" % snapshot.id)
volume = Volume.create_from_snapshot(
self.apiclient,
snapshot.id,
self.services,
account=self.account.account.name,
domainid=self.account.account.domainid
)
volumes = list_volumes(
self.apiclient,
id=volume.id
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(volumes),
None,
"Check Volume list Length"
)
self.assertEqual(
volumes[0].id,
volume.id,
"Check Volume in the List Volumes"
)
#Attaching volume to new VM
new_virtual_machine = self.virtual_machine_without_disk
self.cleanup.append(new_virtual_machine)
cmd = attachVolume.attachVolumeCmd()
cmd.id = volume.id
cmd.virtualmachineid = new_virtual_machine.id
self.apiclient.attachVolume(cmd)
try:
#Login to VM to verify test directories and files
ssh = new_virtual_machine.get_ssh_client()
cmds = [
"mkdir -p %s" % self.services["mount_dir"],
"mount %s1 %s" % (
self.services["diskdevice"],
self.services["mount_dir"]
),
]
for c in cmds:
self.debug(c)
result = ssh.execute(c)
self.debug(result)
returned_data_0 = ssh.execute("cat %s/%s/%s/%s" % (
self.services["mount_dir"],
self.services["sub_dir"],
self.services["sub_lvl_dir1"],
self.services["random_data"]
))
returned_data_1 = ssh.execute("cat %s/%s/%s/%s" % (
self.services["mount_dir"],
self.services["sub_dir"],
self.services["sub_lvl_dir2"],
self.services["random_data"]
))
except Exception as e:
self.fail("SSH failed for VM with IP: %s" %
self.new_virtual_machine.ipaddress)
#Verify returned data
self.assertEqual(
random_data_0,
returned_data_0[0],
"Verify newly attached volume contents with existing one"
)
self.assertEqual(
random_data_1,
returned_data_1[0],
"Verify newly attached volume contents with existing one"
)
# Unmount the Sec Storage
cmds = [
"umount %s" % (self.services["mount_dir"]),
]
try:
for c in cmds:
ssh_client.execute(c)
except Exception as e:
self.fail("SSH failed for VM with IP: %s" %
self.new_virtual_machine.ipaddress)
return
def test_04_delete_snapshot(self):
"""Test Delete Snapshot
"""
#1. Snapshot the Volume
#2. Delete the snapshot
#3. Verify snapshot is removed by calling List Snapshots API
volumes = list_volumes(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='DATADISK',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check list response returns a valid list"
)
snapshot = Snapshot.create(
self.apiclient,
volumes[0].id,
account=self.account.account.name,
domainid=self.account.account.domainid
)
snapshot.delete(self.apiclient)
snapshots = list_snapshots(
self.apiclient,
id=snapshot.id
)
self.assertEqual(
snapshots,
None,
"Check if result exists in list item call"
)
return
def test_05_recurring_snapshot_root_disk(self):
"""Test Recurring Snapshot Root Disk
"""
#1. Create snapshot policy for root disk
#2. ListSnapshot policy should return newly created policy
#3. Verify only most recent number (maxsnaps) snapshots retailed
volume = list_volumes(
self.apiclient,
virtualmachineid=self.virtual_machine_with_disk.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volume, list),
True,
"Check list response returns a valid list"
)
recurring_snapshot = SnapshotPolicy.create(
self.apiclient,
volume[0].id,
self.services["recurring_snapshot"]
)
self.cleanup.append(recurring_snapshot)
#ListSnapshotPolicy should return newly created policy
list_snapshots_policy = list_snapshot_policy(
self.apiclient,
id=recurring_snapshot.id,
volumeid=volume[0].id
)
self.assertEqual(
isinstance(list_snapshots_policy, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
list_snapshots_policy,
None,
"Check if result exists in list item call"
)
snapshots_policy = list_snapshots_policy[0]
self.assertEqual(
snapshots_policy.id,
recurring_snapshot.id,
"Check recurring snapshot id in list resources call"
)
self.assertEqual(
snapshots_policy.maxsnaps,
self.services["recurring_snapshot"]["maxsnaps"],
"Check interval type in list resources call"
)
# Sleep for (maxsnaps+1) hours to verify
# only maxsnaps snapshots are retained
time.sleep(
(self.services["recurring_snapshot"]["maxsnaps"]) * 3600
)
timeout = self.services["timeout"]
while True:
snapshots = list_snapshots(
self.apiclient,
volumeid=volume[0].id,
intervaltype=\
self.services["recurring_snapshot"]["intervaltype"],
snapshottype='RECURRING',
listall=True
)
if isinstance(snapshots, list):
break
elif timeout == 0:
raise Exception("List snapshots API call failed.")
time.sleep(1)
timeout = timeout - 1
self.assertEqual(
isinstance(snapshots, list),
True,
"Check list response returns a valid list"
)
self.assertEqual(
len(snapshots),
self.services["recurring_snapshot"]["maxsnaps"],
"Check maximum number of recurring snapshots retained"
)
return
def test_06_recurring_snapshot_data_disk(self):
"""Test Recurring Snapshot data Disk
"""
#1. Create snapshot policy for data disk
#2. ListSnapshot policy should return newly created policy
#3. Verify only most recent number (maxsnaps) snapshots retailed
volume = list_volumes(
self.apiclient,
virtualmachineid=self.virtual_machine_with_disk.id,
type='DATADISK',
listall=True
)
self.assertEqual(
isinstance(volume, list),
True,
"Check list response returns a valid list"
)
recurring_snapshot = SnapshotPolicy.create(
self.apiclient,
volume[0].id,
self.services["recurring_snapshot"]
)
self.cleanup.append(recurring_snapshot)
#ListSnapshotPolicy should return newly created policy
list_snapshots_policy = list_snapshot_policy(
self.apiclient,
id=recurring_snapshot.id,
volumeid=volume[0].id
)
self.assertEqual(
isinstance(list_snapshots_policy, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
list_snapshots_policy,
None,
"Check if result exists in list item call"
)
snapshots_policy = list_snapshots_policy[0]
self.assertEqual(
snapshots_policy.id,
recurring_snapshot.id,
"Check recurring snapshot id in list resources call"
)
self.assertEqual(
snapshots_policy.maxsnaps,
self.services["recurring_snapshot"]["maxsnaps"],
"Check interval type in list resources call"
)
# Sleep for (maxsnaps) hours to verify only maxsnaps snapshots are
# retained
time.sleep(
(self.services["recurring_snapshot"]["maxsnaps"]) * 3600
)
timeout = self.services["timeout"]
while True:
snapshots = list_snapshots(
self.apiclient,
volumeid=volume[0].id,
intervaltype=\
self.services["recurring_snapshot"]["intervaltype"],
snapshottype='RECURRING',
listall=True
)
if isinstance(snapshots, list):
break
elif timeout == 0:
raise Exception("List snapshots API call failed.")
time.sleep(1)
timeout = timeout - 1
self.assertEqual(
isinstance(snapshots, list),
True,
"Check list response returns a valid list"
)
self.assertEqual(
len(snapshots),
self.services["recurring_snapshot"]["maxsnaps"],
"Check maximum number of recurring snapshots retained"
)
return
def test_07_template_from_snapshot(self):
"""Create Template from snapshot
"""
#1. Login to machine; create temp/test directories on data volume
#2. Snapshot the Volume
#3. Create Template from snapshot
#4. Deploy Virtual machine using this template
#5. Login to newly created virtual machine
#6. Compare data
random_data_0 = random_gen(100)
random_data_1 = random_gen(100)
try:
#Login to virtual machine
ssh_client = self.virtual_machine.get_ssh_client()
cmds = [
"mkdir -p %s" % self.services["mount_dir"],
"mount %s1 %s" % (
self.services["rootdisk"],
self.services["mount_dir"]
),
"mkdir -p %s/%s/{%s,%s} " % (
self.services["mount_dir"],
self.services["sub_dir"],
self.services["sub_lvl_dir1"],
self.services["sub_lvl_dir2"]
),
"echo %s > %s/%s/%s/%s" % (
random_data_0,
self.services["mount_dir"],
self.services["sub_dir"],
self.services["sub_lvl_dir1"],
self.services["random_data"]
),
"echo %s > %s/%s/%s/%s" % (
random_data_1,
self.services["mount_dir"],
self.services["sub_dir"],
self.services["sub_lvl_dir2"],
self.services["random_data"]
),
"sync",
]
for c in cmds:
self.debug(c)
result = ssh_client.execute(c)
self.debug(result)
except Exception as e:
self.fail("SSH failed for VM with IP address: %s" %
self.virtual_machine.ipaddress)
# Unmount the Volume
cmds = [
"umount %s" % (self.services["mount_dir"]),
]
for c in cmds:
self.debug(c)
ssh_client.execute(c)
volumes = list_volumes(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check list response returns a valid list"
)
volume = volumes[0]
#Create a snapshot of volume
snapshot = Snapshot.create(
self.apiclient,
volume.id,
account=self.account.account.name,
domainid=self.account.account.domainid
)
self.debug("Snapshot created from volume ID: %s" % volume.id)
# Generate template from the snapshot
template = Template.create_from_snapshot(
self.apiclient,
snapshot,
self.services["templates"]
)
self.cleanup.append(template)
self.debug("Template created from snapshot ID: %s" % snapshot.id)
# Verify created template
templates = list_templates(
self.apiclient,
templatefilter=\
self.services["templates"]["templatefilter"],
id=template.id
)
self.assertNotEqual(
templates,
None,
"Check if result exists in list item call"
)
self.assertEqual(
templates[0].id,
template.id,
"Check new template id in list resources call"
)
self.debug("Deploying new VM from template: %s" % template.id)
# Deploy new virtual machine using template
new_virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["server_without_disk"],
templateid=template.id,
accountid=self.account.account.name,
domainid=self.account.account.domainid,
serviceofferingid=self.service_offering.id,
mode=self.services["mode"]
)
self.cleanup.append(new_virtual_machine)
try:
#Login to VM & mount directory
ssh = new_virtual_machine.get_ssh_client()
cmds = [
"mkdir -p %s" % self.services["mount_dir"],
"mount %s1 %s" % (
self.services["rootdisk"],
self.services["mount_dir"]
)
]
for c in cmds:
ssh.execute(c)
returned_data_0 = ssh.execute("cat %s/%s/%s/%s" % (
self.services["mount_dir"],
self.services["sub_dir"],
self.services["sub_lvl_dir1"],
self.services["random_data"]
))
self.debug(returned_data_0)
returned_data_1 = ssh.execute("cat %s/%s/%s/%s" % (
self.services["mount_dir"],
self.services["sub_dir"],
self.services["sub_lvl_dir2"],
self.services["random_data"]
))
self.debug(returned_data_1)
except Exception as e:
self.fail("SSH failed for VM with IP address: %s" %
new_virtual_machine.ipaddress)
#Verify returned data
self.assertEqual(
random_data_0,
returned_data_0[0],
"Verify newly attached volume contents with existing one"
)
self.assertEqual(
random_data_1,
returned_data_1[0],
"Verify newly attached volume contents with existing one"
)
# Unmount the volume
cmds = [
"umount %s" % (self.services["mount_dir"]),
]
try:
for c in cmds:
self.debug(c)
ssh_client.execute(c)
except Exception as e:
self.fail("SSH failed for VM with IP address: %s" %
new_virtual_machine.ipaddress)
return
|
{
"content_hash": "b833fa34c51ea3d04f4abfa845c640d9",
"timestamp": "",
"source": "github",
"line_count": 1146,
"max_line_length": 141,
"avg_line_length": 41.54275741710297,
"alnum_prop": 0.3910897328180138,
"repo_name": "argv0/cloudstack",
"id": "510db0cfd359644461e8fbf1789da57dd6fa131f",
"size": "48395",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/integration/smoke/test_snapshots.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "52484672"
},
{
"name": "JavaScript",
"bytes": "1913801"
},
{
"name": "Perl",
"bytes": "824212"
},
{
"name": "Python",
"bytes": "2076246"
},
{
"name": "Ruby",
"bytes": "2166"
},
{
"name": "Shell",
"bytes": "445096"
}
],
"symlink_target": ""
}
|
import sys, os
try:
from djangotribune import __version__ as djangotribune_version
except ImportError:
djangotribune_version = "last repository version"
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'djangotribune'
copyright = u'2013, David THENON'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = djangotribune_version
# The full version, including alpha/beta/rc tags.
release = djangotribune_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'djangotribunedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'djangotribune.tex', u'djangotribune Documentation',
u'David THENON', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'djangotribune', u'djangotribune Documentation',
[u'David THENON'], 1)
]
|
{
"content_hash": "7192e57f2af39792e372199a9b862967",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 80,
"avg_line_length": 32.62980769230769,
"alnum_prop": 0.7101812288198026,
"repo_name": "cible/djangotribune",
"id": "75363dea76ec28aae59ca37b043a0e10ad459daf",
"size": "7211",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "37690"
},
{
"name": "HTML",
"bytes": "8647"
},
{
"name": "JavaScript",
"bytes": "85711"
},
{
"name": "Python",
"bytes": "127178"
},
{
"name": "Ruby",
"bytes": "966"
}
],
"symlink_target": ""
}
|
"""
Created on Thu Jul 27 01:08:03 2017
@author: jacob
"""
import numpy as np
from generate_dataset import Song
import matplotlib.pyplot as plt
max_tempo = 1500000
max_beats = 1500
num_notes = 128
def load_item(value):
return value[0].toarray()
def unique_notes(value):
notes = np.transpose(load_item(value) != 0)
return np.vstack({tuple(row) for row in notes}).shape[1]
def num_beats(song):
num_ticks = song.piano_roll.shape[1]
return num_ticks/song.ticks_per_beat
def validate(song):
return max_tempo > song.tempo and num_beats(song) < max_beats
def downsample_to(song, ticks_per_beat, errors=False, sample_many=False):
num_ticks = song.piano_roll.shape[1]
num_beats = num_ticks / song.ticks_per_beat
old_roll = song.piano_roll.toarray()
if(sample_many):
downsample_slice_options = map(lambda start: np.round(np.linspace(start, num_ticks - 1 , num_beats * ticks_per_beat)).astype(int), range(0, ticks_per_beat))
def score_slice(slices):
new_roll = old_roll[:,slices]
return np.count_nonzero(np.sum(new_roll[:,:-1] != new_roll[:,1:], axis=0))
optimal_slice = max(downsample_slice_options, key=score_slice)
else:
optimal_slice = np.round(np.linspace(0, num_ticks - 1 , num_beats * ticks_per_beat)).astype(int)
if errors:
new_roll = old_roll[:, optimal_slice]
num_difference_new = np.count_nonzero(np.sum(new_roll[:,:-1] != new_roll[:,1:], axis=0))
num_difference_old = np.count_nonzero(np.sum(old_roll[:,:-1] != old_roll[:,1:], axis=0))
return Song(old_roll[:,optimal_slice], ticks_per_beat, song.filepath, song.tempo), num_difference_new, num_difference_old
else:
return Song(old_roll[:,optimal_slice], ticks_per_beat, song.filepath, song.tempo)
dataset = list(map(lambda song: downsample_to(song, 16, errors=True), filter(validate, np.load('dataset.npy'))))
num_beats_in_songs = list(map(lambda song: num_beats(song[0]), dataset))
plt.hist(num_beats_in_songs, alpha=0.75, label=['Beats'])
plt.title('beats in songs')
plt.show()
#See how much information we lost
normal_changes = np.fromiter(map(lambda x: x[1], dataset), np.float)
old_changes = np.fromiter(map(lambda x: x[2], dataset), np.float)
data = np.divide(normal_changes, old_changes)
plt.hist(data, alpha=0.75, label=['Accuracy'])
plt.legend(loc='upper right')
plt.title('niave start')
plt.show()
print('Average Accuracy:', np.mean(data))
dataset = list(map(lambda song: downsample_to(song, 16, errors=True, sample_many=True), filter(validate, np.load('dataset.npy'))))
#See how much information we lost
normal_changes = np.fromiter(map(lambda x: x[1], dataset), np.float)
old_changes = np.fromiter(map(lambda x: x[2], dataset), np.float)
data = np.divide(normal_changes, old_changes)
plt.hist(data, alpha=0.75, label=['Accuracy'])
plt.legend(loc='upper right')
plt.title('smarter start')
plt.show()
print('Average Accuracy:', np.mean(data))
|
{
"content_hash": "4dc2c6ec615d93069fd9158098ff0623",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 164,
"avg_line_length": 39.77333333333333,
"alnum_prop": 0.6765001676164935,
"repo_name": "jacob-ruth/RuthNote",
"id": "6ae3bfab234f8abeb0e750f0eded9b41dea6a576",
"size": "3007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "load_dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19110"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
import os
import invoke
import fabric.api
import fabric.contrib.files
from .utils import cd, ssh_host
SALT_MASTER = "192.168.5.1"
@invoke.task(name="sync-changes")
def sync_changes():
# Push our changes to GitHub
# TODO: Determine what origin to use?
invoke.run("git push origin master", echo=True)
if os.path.isdir("pillar/prod/secrets"):
with cd("pillar/prod/secrets"):
# Push our changes into the secret repository
invoke.run("git push origin master", echo=True)
# SSH into the salt master and pull our changes
with ssh_host("salt.iad1.psf.io"):
with fabric.api.cd("/srv/salt"):
fabric.api.sudo("git pull --ff-only origin master")
with fabric.api.cd("/srv/pillar/prod/secrets"):
fabric.api.sudo("git pull --ff-only origin master")
@invoke.task
def bootstrap(host, codename="trusty", pre=[sync_changes]):
# If the host does not contain '.', we'll assume it's of the form
# [host].iad1.psf.io.
if "." not in host:
host += ".iad1.psf.io"
# SSH into the root user of this server and bootstrap the server.
with ssh_host("root@" + host):
# Make sure this host hasn't already been bootstrapped.
if fabric.contrib.files.exists("/etc/salt/minion.d/local.conf"):
raise RuntimeError("{} is already bootstrapped.".format(host))
# Ok, we're going to bootstrap, first we need to add our packages
fabric.api.run(
("echo 'deb [arch=amd64] https://s3.amazonaws.com/apt.psf.io/psf/ "
"{} main' > /etc/apt/sources.list.d/psf.list").format(codename)
)
fabric.api.put(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..",
"salt", "base", "config", "APT-GPG-KEY-PSF",
),
"/tmp/APT-GPG-KEY-PSF",
)
fabric.api.run("apt-key add - < /tmp/APT-GPG-KEY-PSF")
# If we're running precise we need to add a PPA
if codename == "precise":
fabric.api.run("add-apt-repository ppa:chris-lea/zeromq -y")
# Then we need to update our local apt
fabric.api.run("apt-get update -qy")
# Then, upgrade all of the packages that are currently on this
# machine.
fabric.api.run("apt-get upgrade -qy")
fabric.api.run("apt-get dist-upgrade -qy")
# Reboot the server to make sure any upgrades have been loaded.
fabric.api.reboot()
# Install salt-minion and python-apt so we can manage things with
# salt.
fabric.api.run("apt-get install -qy salt")
# Drop the /etc/salt/minion.d/local.conf onto the server so that it
# can connect with our salt master.
fabric.contrib.files.upload_template(
"conf/minion.conf",
"/etc/salt/minion.d/local.conf",
context={
"master": SALT_MASTER,
},
use_jinja=True,
mode=0o0644,
)
# Run salt-call state.highstate, this will fail the first time because
# the Master hasn't accepted our key yet.
fabric.api.run("salt-call state.highstate", warn_only=True)
# Get the minion ID of this server
minion_id = fabric.api.run("cat /etc/salt/minion_id")
# SSH into our salt master and accept the key for this server.
with ssh_host("salt.iad1.psf.io"):
fabric.api.sudo("salt-key -ya {}".format(minion_id))
# Finally SSH into our server one more time to run salt-call
# state.highstate for real this time.
with ssh_host("root@" + host):
fabric.api.run("salt-call state.highstate")
@invoke.task(default=True, pre=[sync_changes])
def highstate(hosts, dc="iad1"):
# Until invoke supports *args we need to hack around the lack of support
# for now.
hosts = [h.strip() for h in hosts.split(",") if h.strip()]
# Ensure we have some hosts
if not hosts:
raise ValueError("Must specify hosts for highstate")
# Loop over all the hosts and if they do not have a ., then we'll add
# .psf.io to them.
hosts = [h if "." in h else h + "." + dc + ".psf.io" for h in hosts]
# Loop over all the hosts and call salt-call state.highstate on them.
for host in hosts:
with ssh_host(host):
fabric.api.sudo("salt-call state.highstate")
|
{
"content_hash": "0c352f84d57e2bd75db422912a1205d3",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 79,
"avg_line_length": 35.10236220472441,
"alnum_prop": 0.6078959174517721,
"repo_name": "dstufft/psf-salt",
"id": "b68bfd98e536d417d2d9765cb44ee75e8feab959",
"size": "4458",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tasks/salt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "71839"
},
{
"name": "Nginx",
"bytes": "410"
},
{
"name": "Python",
"bytes": "45396"
},
{
"name": "SaltStack",
"bytes": "87349"
},
{
"name": "Scheme",
"bytes": "43279"
},
{
"name": "Shell",
"bytes": "1200"
}
],
"symlink_target": ""
}
|
import wx
import armid
from DimensionListCtrl import DimensionListCtrl
from Environment import Environment
class EnvironmentPropertiesPanel(wx.Panel):
def __init__(self,parent,dp):
wx.Panel.__init__(self,parent,armid.ENVIRONMENT_PANELENVIRONMENTPROPERTIES_ID)
self.theEnvironmentPanel= parent
mainSizer = wx.BoxSizer(wx.VERTICAL)
environmentBox = wx.StaticBox(self,-1,)
environmentFrameSizer = wx.StaticBoxSizer(environmentBox,wx.VERTICAL)
mainSizer.Add(environmentFrameSizer,1,wx.EXPAND)
self.environmentList = DimensionListCtrl(self,armid.ENVIRONMENT_LISTENVIRONMENTS_ID,wx.DefaultSize,'Environment','environment',dp,'Adding one or more environments indicates that this is a composite environment')
environmentFrameSizer.Add(self.environmentList,1,wx.EXPAND)
propertiesBox = wx.StaticBox(self,-1,'Duplication properties')
propertiesFrameSizer = wx.StaticBoxSizer(propertiesBox,wx.VERTICAL)
mainSizer.Add(propertiesFrameSizer,1,wx.EXPAND)
propertiesSizer = wx.FlexGridSizer(rows = 2, cols = 3)
propertiesFrameSizer.Add(propertiesSizer,1,wx.EXPAND)
propertiesSizer.Add(wx.StaticText(self,-1,'Override'))
self.overrideRadio = wx.RadioButton(self,armid.ENVIRONMENT_RADIOOVERRIDE_ID,style=wx.RB_GROUP)
self.overrideRadio.SetToolTip(wx.ToolTip('If an artifact exists in multiple environments, choose the artifact\'s value for the overriding environment.'))
propertiesSizer.Add(self.overrideRadio)
self.overrideCombo = wx.ComboBox(self,armid.ENVIRONMENT_COMBOOVERRIDE_ID,'',choices=[],style=wx.CB_READONLY | wx.CB_DROPDOWN)
propertiesSizer.Add(self.overrideCombo,0,wx.EXPAND)
propertiesSizer.Add(wx.StaticText(self,-1,'Maximise'))
self.maxRadio = wx.RadioButton(self,armid.ENVIRONMENT_RADIOMAXIMISE_ID)
self.maxRadio.SetToolTip(wx.ToolTip('If an artifact exists in multiple environments, choose the artifact\'s maximal values.'))
propertiesSizer.Add(self.maxRadio)
propertiesSizer.AddGrowableCol(2)
self.SetSizer(mainSizer)
self.environmentList.Bind(wx.EVT_LIST_INSERT_ITEM,self.onEnvironmentAdded)
self.environmentList.Bind(wx.EVT_LIST_DELETE_ITEM,self.onEnvironmentDeleted)
self.overrideRadio.Bind(wx.EVT_RADIOBUTTON,self.onOverrideClick)
self.maxRadio.Bind(wx.EVT_RADIOBUTTON,self.onMaximiseClick)
self.overrideRadio.Disable()
self.overrideCombo.Disable()
self.maxRadio.Disable()
def load(self,environment):
environments = environment.environments()
if (len(environments) > 0):
self.environmentList.load(environments)
if (environment.duplicateProperty() == 'Maximise'):
self.maxRadio.Enable()
self.maxRadio.SetValue(True)
self.overrideCombo.Disable()
else:
self.overrideRadio.Enable()
self.overrideCombo.Enable()
self.overrideRadio.SetValue(True)
self.overrideCombo.SetStringSelection(environment.overridingEnvironment())
def onOverrideClick(self,evt):
if (self.overrideRadio.GetValue() == True):
self.overrideCombo.Enable()
else:
self.overrideCombo.Disable()
def onMaximiseClick(self,evt):
if (self.maxRadio.GetValue() == True):
self.overrideCombo.Disable()
else:
self.overrideCombo.Enable()
def onEnvironmentAdded(self,evt):
currentEnvironmentSelection = ''
if (self.overrideCombo.GetCount() > 0):
currentEnvironmentSelection = self.overrideCombo.GetStringSelection()
newItem = self.environmentList.GetItemText(evt.GetIndex())
if (self.overrideCombo.FindString(newItem) == wx.NOT_FOUND):
self.overrideCombo.Append(newItem)
if (len(currentEnvironmentSelection) > 0):
self.overrideCombo.SetStringSelection(currentEnvironmentSelection)
self.overrideRadio.Enable()
self.overrideCombo.Enable()
self.maxRadio.Enable()
evt.Skip()
def onEnvironmentDeleted(self,evt):
currentEnvironmentSelection = ''
if (self.overrideCombo.GetCount() > 0):
currentEnvironmentSelection = self.overrideCombo.GetStringSelection()
deletedItem = self.environmentList.GetItemText(evt.GetIndex())
self.overrideCombo.Delete(self.overrideCombo.FindString(deletedItem))
environmentCount = self.overrideCombo.GetCount()
if ((deletedItem == currentEnvironmentSelection) or (environmentCount == 0)):
self.overrideCombo.SetValue('')
else:
self.overrideCombo.SetStringSelection(currentEnvironmentSelection)
if (environmentCount == 0):
self.overrideRadio.Disable()
self.overrideCombo.Disable()
self.maxRadio.Disable()
evt.Skip()
def environments(self):
return self.environmentList.dimensions()
def duplicateProperty(self):
if (self.maxRadio.GetValue() == True):
return 'Maximise'
else:
return 'Override'
def overridingEnvironment(self):
if (self.maxRadio.GetValue() == True):
return ''
else:
return self.overrideCombo.GetValue()
|
{
"content_hash": "2b57d8a48990bb3fb96dee43537e5fea",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 215,
"avg_line_length": 42.30769230769231,
"alnum_prop": 0.7412121212121212,
"repo_name": "RobinQuetin/CAIRIS-web",
"id": "b91b50cacf0837685616cc26b047d06f67c6d307",
"size": "5749",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cairis/cairis/EnvironmentPropertiesPanel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11265"
},
{
"name": "Mako",
"bytes": "13226"
},
{
"name": "Python",
"bytes": "3313365"
},
{
"name": "Shell",
"bytes": "19461"
},
{
"name": "XSLT",
"bytes": "35522"
}
],
"symlink_target": ""
}
|
from .export import *
from .html import HTMLExporter
from .slides import SlidesExporter
from .exporter import Exporter
from .latex import LatexExporter
from .markdown import MarkdownExporter
from .python import PythonExporter
from .rst import RSTExporter
|
{
"content_hash": "486cfcec93382f4a9e82e6e7d31363de",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 38,
"avg_line_length": 31.875,
"alnum_prop": 0.8392156862745098,
"repo_name": "marcoantoniooliveira/labweb",
"id": "c4eb2cb163f3639a8efc9956e580c05632d5a78f",
"size": "255",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "oscar/lib/python2.7/site-packages/IPython/nbconvert/exporters/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "1534157"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "JavaScript",
"bytes": "2968822"
},
{
"name": "LiveScript",
"bytes": "6103"
},
{
"name": "Puppet",
"bytes": "3507"
},
{
"name": "Python",
"bytes": "30402832"
},
{
"name": "Shell",
"bytes": "10782"
},
{
"name": "TeX",
"bytes": "56626"
},
{
"name": "XSLT",
"bytes": "49764"
}
],
"symlink_target": ""
}
|
"""
Objects for dealing with Laguerre series.
This module provides a number of objects (mostly functions) useful for
dealing with Laguerre series, including a `Laguerre` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `lagdomain` -- Laguerre series default domain, [-1,1].
- `lagzero` -- Laguerre series that evaluates identically to 0.
- `lagone` -- Laguerre series that evaluates identically to 1.
- `lagx` -- Laguerre series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `lagmulx` -- multiply a Laguerre series in ``P_i(x)`` by ``x``.
- `lagadd` -- add two Laguerre series.
- `lagsub` -- subtract one Laguerre series from another.
- `lagmul` -- multiply two Laguerre series.
- `lagdiv` -- divide one Laguerre series by another.
- `lagval` -- evaluate a Laguerre series at given points.
Calculus
--------
- `lagder` -- differentiate a Laguerre series.
- `lagint` -- integrate a Laguerre series.
Misc Functions
--------------
- `lagfromroots` -- create a Laguerre series with specified roots.
- `lagroots` -- find the roots of a Laguerre series.
- `lagvander` -- Vandermonde-like matrix for Laguerre polynomials.
- `lagfit` -- least-squares fit returning a Laguerre series.
- `lagtrim` -- trim leading coefficients from a Laguerre series.
- `lagline` -- Laguerre series of given straight line.
- `lag2poly` -- convert a Laguerre series to a polynomial.
- `poly2lag` -- convert a polynomial to a Laguerre series.
Classes
-------
- `Laguerre` -- A Laguerre series class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division
__all__ = ['lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline',
'lagadd', 'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagval',
'lagder', 'lagint', 'lag2poly', 'poly2lag', 'lagfromroots',
'lagvander', 'lagfit', 'lagtrim', 'lagroots', 'Laguerre']
import numpy as np
import numpy.linalg as la
import polyutils as pu
import warnings
from polytemplate import polytemplate
lagtrim = pu.trimcoef
def poly2lag(pol) :
"""
poly2lag(pol)
Convert a polynomial to a Laguerre series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Laguerre series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-d array containing the polynomial coefficients
Returns
-------
cs : ndarray
1-d array containing the coefficients of the equivalent Laguerre
series.
See Also
--------
lag2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.laguerre import poly2lag
>>> poly2lag(np.arange(4))
array([ 23., -63., 58., -18.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1) :
res = lagadd(lagmulx(res), pol[i])
return res
def lag2poly(cs) :
"""
Convert a Laguerre series to a polynomial.
Convert an array representing the coefficients of a Laguerre series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
cs : array_like
1-d array containing the Laguerre series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-d array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2lag
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.laguerre import lag2poly
>>> lag2poly([ 23., -63., 58., -18.])
array([ 0., 1., 2., 3.])
"""
from polynomial import polyadd, polysub, polymulx
[cs] = pu.as_series([cs])
n = len(cs)
if n == 1:
return cs
else:
c0 = cs[-2]
c1 = cs[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(cs[i - 2], (c1*(i - 1))/i)
c1 = polyadd(tmp, polysub((2*i - 1)*c1, polymulx(c1))/i)
return polyadd(c0, polysub(c1, polymulx(c1)))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Laguerre
lagdomain = np.array([0,1])
# Laguerre coefficients representing zero.
lagzero = np.array([0])
# Laguerre coefficients representing one.
lagone = np.array([1])
# Laguerre coefficients representing the identity x.
lagx = np.array([1, -1])
def lagline(off, scl) :
"""
Laguerre series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Laguerre series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.laguerre import lagline, lagval
>>> lagval(0,lagline(3, 2))
3.0
>>> lagval(1,lagline(3, 2))
5.0
"""
if scl != 0 :
return np.array([off + scl, -scl])
else :
return np.array([off])
def lagfromroots(roots) :
"""
Generate a Laguerre series with the given roots.
Return the array of coefficients for the P-series whose roots (a.k.a.
"zeros") are given by *roots*. The returned array of coefficients is
ordered from lowest order "term" to highest, and zeros of multiplicity
greater than one must be included in *roots* a number of times equal
to their multiplicity (e.g., if `2` is a root of multiplicity three,
then [2,2,2] must be in *roots*).
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-d array of the Laguerre series coefficients, ordered from low to
high. If all roots are real, ``out.dtype`` is a float type;
otherwise, ``out.dtype`` is a complex type, even if all the
coefficients in the result are real (see Examples below).
See Also
--------
polyfromroots, chebfromroots
Notes
-----
What is returned are the :math:`c_i` such that:
.. math::
\\sum_{i=0}^{n} c_i*P_i(x) = \\prod_{i=0}^{n} (x - roots[i])
where ``n == len(roots)`` and :math:`P_i(x)` is the `i`-th Laguerre
(basis) polynomial over the domain `[-1,1]`. Note that, unlike
`polyfromroots`, due to the nature of the Laguerre basis set, the
above identity *does not* imply :math:`c_n = 1` identically (see
Examples).
Examples
--------
>>> from numpy.polynomial.laguerre import lagfromroots, lagval
>>> coef = lagfromroots((-1, 0, 1))
>>> lagval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = lagfromroots((-1j, 1j))
>>> lagval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0 :
return np.ones(1)
else :
[roots] = pu.as_series([roots], trim=False)
prd = np.array([1], dtype=roots.dtype)
for r in roots:
prd = lagsub(lagmulx(prd), r*prd)
return prd
def lagadd(c1, c2):
"""
Add one Laguerre series to another.
Returns the sum of two Laguerre series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Laguerre series of their sum.
See Also
--------
lagsub, lagmul, lagdiv, lagpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Laguerre series
is a Laguerre series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.laguerre import lagadd
>>> lagadd([1, 2, 3], [1, 2, 3, 4])
array([ 2., 4., 6., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] += c2
ret = c1
else :
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def lagsub(c1, c2):
"""
Subtract one Laguerre series from another.
Returns the difference of two Laguerre series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Laguerre series coefficients representing their difference.
See Also
--------
lagadd, lagmul, lagdiv, lagpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Laguerre
series is a Laguerre series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.laguerre import lagsub
>>> lagsub([1, 2, 3, 4], [1, 2, 3])
array([ 0., 0., 0., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] -= c2
ret = c1
else :
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def lagmulx(cs):
"""Multiply a Laguerre series by x.
Multiply the Laguerre series `cs` by x, where x is the independent
variable.
Parameters
----------
cs : array_like
1-d array of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Laguerre
polynomials in the form
.. math::
xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x))
Examples
--------
>>> from numpy.polynomial.laguerre import lagmulx
>>> lagmulx([1, 2, 3])
array([ -1., -1., 11., -9.])
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
# The zero series needs special treatment
if len(cs) == 1 and cs[0] == 0:
return cs
prd = np.empty(len(cs) + 1, dtype=cs.dtype)
prd[0] = cs[0]
prd[1] = -cs[0]
for i in range(1, len(cs)):
prd[i + 1] = -cs[i]*(i + 1)
prd[i] += cs[i]*(2*i + 1)
prd[i - 1] -= cs[i]*i
return prd
def lagmul(c1, c2):
"""
Multiply one Laguerre series by another.
Returns the product of two Laguerre series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Laguerre series coefficients representing their product.
See Also
--------
lagadd, lagsub, lagdiv, lagpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Laguerre polynomial basis set. Thus, to express
the product as a Laguerre series, it is necessary to "re-project" the
product onto said basis set, which may produce "un-intuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagmul
>>> lagmul([1, 2, 3], [0, 1, 2])
array([ 8., -13., 38., -51., 36.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
cs = c2
xs = c1
else:
cs = c1
xs = c2
if len(cs) == 1:
c0 = cs[0]*xs
c1 = 0
elif len(cs) == 2:
c0 = cs[0]*xs
c1 = cs[1]*xs
else :
nd = len(cs)
c0 = cs[-2]*xs
c1 = cs[-1]*xs
for i in range(3, len(cs) + 1) :
tmp = c0
nd = nd - 1
c0 = lagsub(cs[-i]*xs, (c1*(nd - 1))/nd)
c1 = lagadd(tmp, lagsub((2*nd - 1)*c1, lagmulx(c1))/nd)
return lagadd(c0, lagsub(c1, lagmulx(c1)))
def lagdiv(c1, c2):
"""
Divide one Laguerre series by another.
Returns the quotient-with-remainder of two Laguerre series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-d arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Laguerre series coefficients representing the quotient and
remainder.
See Also
--------
lagadd, lagsub, lagmul, lagpow
Notes
-----
In general, the (polynomial) division of one Laguerre series by another
results in quotient and remainder terms that are not in the Laguerre
polynomial basis set. Thus, to express these results as a Laguerre
series, it is necessary to "re-project" the results onto the Laguerre
basis set, which may produce "un-intuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagdiv
>>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 0.]))
>>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 1., 1.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0 :
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2 :
return c1[:1]*0, c1
elif lc2 == 1 :
return c1/c2[-1], c1[:1]*0
else :
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = lagmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def lagpow(cs, pow, maxpower=16) :
"""Raise a Laguerre series to a power.
Returns the Laguerre series `cs` raised to the power `pow`. The
arguement `cs` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
cs : array_like
1d array of Laguerre series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to umanageable size. Default is 16
Returns
-------
coef : ndarray
Laguerre series of power.
See Also
--------
lagadd, lagsub, lagmul, lagdiv
Examples
--------
>>> from numpy.polynomial.laguerre import lagpow
>>> lagpow([1, 2, 3], 2)
array([ 14., -16., 56., -72., 54.])
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
power = int(pow)
if power != pow or power < 0 :
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower :
raise ValueError("Power is too large")
elif power == 0 :
return np.array([1], dtype=cs.dtype)
elif power == 1 :
return cs
else :
# This can be made more efficient by using powers of two
# in the usual way.
prd = cs
for i in range(2, power + 1) :
prd = lagmul(prd, cs)
return prd
def lagder(cs, m=1, scl=1) :
"""
Differentiate a Laguerre series.
Returns the series `cs` differentiated `m` times. At each iteration the
result is multiplied by `scl` (the scaling factor is for use in a linear
change of variable). The argument `cs` is the sequence of coefficients
from lowest order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
cs: array_like
1-d array of Laguerre series coefficients ordered from low to high.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
Returns
-------
der : ndarray
Laguerre series of the derivative.
See Also
--------
lagint
Notes
-----
In general, the result of differentiating a Laguerre series does not
resemble the same operation on a power series. Thus the result of this
function may be "un-intuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagder
>>> lagder([ 1., 1., 1., -3.])
array([ 1., 2., 3.])
>>> lagder([ 1., 0., 0., -4., 3.], m=2)
array([ 1., 2., 3.])
"""
cnt = int(m)
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0 :
raise ValueError("The order of derivation must be non-negative")
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if cnt == 0:
return cs
elif cnt >= len(cs):
return cs[:1]*0
else :
for i in range(cnt):
n = len(cs) - 1
cs *= scl
der = np.empty(n, dtype=cs.dtype)
for j in range(n, 0, -1):
der[j - 1] = -cs[j]
cs[j - 1] += cs[j]
cs = der
return cs
def lagint(cs, m=1, k=[], lbnd=0, scl=1):
"""
Integrate a Laguerre series.
Returns a Laguerre series that is the Laguerre series `cs`, integrated
`m` times from `lbnd` to `x`. At each iteration the resulting series
is **multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `cs` is a sequence of
coefficients, from lowest order Laguerre series "term" to highest,
e.g., [1,2,3] represents the series :math:`P_0(x) + 2P_1(x) + 3P_2(x)`.
Parameters
----------
cs : array_like
1-d array of Laguerre series coefficients, ordered from low to high.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
Returns
-------
S : ndarray
Laguerre series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
lagder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
:math:`dx = du/a`, so one will need to set `scl` equal to :math:`1/a`
- perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "re-projected" onto the C-series basis set. Thus, typically,
the result of this function is "un-intuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagint
>>> lagint([1,2,3])
array([ 1., 1., 1., -3.])
>>> lagint([1,2,3], m=2)
array([ 1., 0., 0., -4., 3.])
>>> lagint([1,2,3], k=1)
array([ 2., 1., 1., -3.])
>>> lagint([1,2,3], lbnd=-1)
array([ 11.5, 1. , 1. , -3. ])
>>> lagint([1,2], m=2, k=[1,2], lbnd=-1)
array([ 11.16666667, -5. , -3. , 2. ])
"""
cnt = int(m)
if np.isscalar(k) :
k = [k]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0 :
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt :
raise ValueError("Too many integration constants")
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if cnt == 0:
return cs
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt) :
n = len(cs)
cs *= scl
if n == 1 and cs[0] == 0:
cs[0] += k[i]
else:
tmp = np.empty(n + 1, dtype=cs.dtype)
tmp[0] = cs[0]
tmp[1] = -cs[0]
for j in range(1, n):
tmp[j] += cs[j]
tmp[j + 1] = -cs[j]
tmp[0] += k[i] - lagval(lbnd, tmp)
cs = tmp
return cs
def lagval(x, cs):
"""Evaluate a Laguerre series.
If `cs` is of length `n`, this function returns :
``p(x) = cs[0]*P_0(x) + cs[1]*P_1(x) + ... + cs[n-1]*P_{n-1}(x)``
If x is a sequence or array then p(x) will have the same shape as x.
If r is a ring_like object that supports multiplication and addition
by the values in `cs`, then an object of the same type is returned.
Parameters
----------
x : array_like, ring_like
Array of numbers or objects that support multiplication and
addition with themselves and with the elements of `cs`.
cs : array_like
1-d array of Laguerre coefficients ordered from low to high.
Returns
-------
values : ndarray, ring_like
If the return is an ndarray then it has the same shape as `x`.
See Also
--------
lagfit
Examples
--------
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.laguerre import lagval
>>> coef = [1,2,3]
>>> lagval(1, coef)
-0.5
>>> lagval([[1,2],[3,4]], coef)
array([[-0.5, -4. ],
[-4.5, -2. ]])
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if isinstance(x, tuple) or isinstance(x, list) :
x = np.asarray(x)
if len(cs) == 1 :
c0 = cs[0]
c1 = 0
elif len(cs) == 2 :
c0 = cs[0]
c1 = cs[1]
else :
nd = len(cs)
c0 = cs[-2]
c1 = cs[-1]
for i in range(3, len(cs) + 1) :
tmp = c0
nd = nd - 1
c0 = cs[-i] - (c1*(nd - 1))/nd
c1 = tmp + (c1*((2*nd - 1) - x))/nd
return c0 + c1*(1 - x)
def lagvander(x, deg) :
"""Vandermonde matrix of given degree.
Returns the Vandermonde matrix of degree `deg` and sample points `x`.
This isn't a true Vandermonde matrix because `x` can be an arbitrary
ndarray and the Laguerre polynomials aren't powers. If ``V`` is the
returned matrix and `x` is a 2d array, then the elements of ``V`` are
``V[i,j,k] = P_k(x[i,j])``, where ``P_k`` is the Laguerre polynomial
of degree ``k``.
Parameters
----------
x : array_like
Array of points. The values are converted to double or complex
doubles. If x is scalar it is converted to a 1D array.
deg : integer
Degree of the resulting matrix.
Returns
-------
vander : Vandermonde matrix.
The shape of the returned matrix is ``x.shape + (deg+1,)``. The last
index is the degree.
Examples
--------
>>> from numpy.polynomial.laguerre import lagvander
>>> x = np.array([0, 1, 2])
>>> lagvander(x, 3)
array([[ 1. , 1. , 1. , 1. ],
[ 1. , 0. , -0.5 , -0.66666667],
[ 1. , -1. , -1. , -0.33333333]])
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
v = np.empty((ideg + 1,) + x.shape, dtype=x.dtype)
v[0] = x*0 + 1
if ideg > 0 :
v[1] = 1 - x
for i in range(2, ideg + 1) :
v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i
return np.rollaxis(v, 0, v.ndim)
def lagfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Laguerre series to data.
Fit a Laguerre series ``p(x) = p[0] * P_{0}(x) + ... + p[deg] *
P_{deg}(x)`` of degree `deg` to points `(x, y)`. Returns a vector of
coefficients `p` that minimises the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Laguerre coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : present when `full` = True
Residuals of the least-squares fit, the effective rank of the
scaled Vandermonde matrix and its singular values, and the
specified value of `rcond`. For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
lagval : Evaluates a Laguerre series.
lagvander : Vandermonde matrix of Laguerre series.
polyfit : least squares fit using polynomials.
chebfit : least squares fit using Chebyshev series.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution are the coefficients ``c[i]`` of the Laguerre series
``P(x)`` that minimizes the squared error
``E = \\sum_j |y_j - P(x_j)|^2``.
This problem is solved by setting up as the overdetermined matrix
equation
``V(x)*c = y``,
where ``V`` is the Vandermonde matrix of `x`, the elements of ``c`` are
the coefficients to be solved for, and the elements of `y` are the
observed values. This equation is then solved using the singular value
decomposition of ``V``.
If some of the singular values of ``V`` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coeficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Laguerre series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.laguerre import lagfit, lagval
>>> x = np.linspace(0, 10)
>>> err = np.random.randn(len(x))/10
>>> y = lagval(x, [1, 2, 3]) + err
>>> lagfit(x, y, 2)
array([ 0.96971004, 2.00193749, 3.00288744])
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2 :
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices
lhs = lagvander(x, deg)
rhs = y
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights
if rhs.ndim == 2:
lhs *= w[:, np.newaxis]
rhs *= w[:, np.newaxis]
else:
lhs *= w[:, np.newaxis]
rhs *= w
# set rcond
if rcond is None :
rcond = len(x)*np.finfo(x.dtype).eps
# scale the design matrix and solve the least squares equation
scl = np.sqrt((lhs*lhs).sum(0))
c, resids, rank, s = la.lstsq(lhs/scl, rhs, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full :
return c, [resids, rank, s, rcond]
else :
return c
def lagroots(cs):
"""
Compute the roots of a Laguerre series.
Return the roots (a.k.a "zeros") of the Laguerre series represented by
`cs`, which is the sequence of coefficients from lowest order "term"
to highest, e.g., [1,2,3] is the series ``L_0 + 2*L_1 + 3*L_2``.
Parameters
----------
cs : array_like
1-d array of Laguerre series coefficients ordered from low to high.
Returns
-------
out : ndarray
Array of the roots. If all the roots are real, then so is the
dtype of ``out``; otherwise, ``out``'s dtype is complex.
See Also
--------
polyroots
chebroots
Notes
-----
Algorithm(s) used:
Remember: because the Laguerre series basis set is different from the
"standard" basis set, the results of this function *may* not be what
one is expecting.
Examples
--------
>>> from numpy.polynomial.laguerre import lagroots, lagfromroots
>>> coef = lagfromroots([0, 1, 2])
>>> coef
array([ 2., -8., 12., -6.])
>>> lagroots(coef)
array([ -4.44089210e-16, 1.00000000e+00, 2.00000000e+00])
"""
# cs is a trimmed copy
[cs] = pu.as_series([cs])
if len(cs) <= 1 :
return np.array([], dtype=cs.dtype)
if len(cs) == 2 :
return np.array([1 + cs[0]/cs[1]])
n = len(cs) - 1
cs /= cs[-1]
cmat = np.zeros((n,n), dtype=cs.dtype)
cmat[0, 0] = 1
cmat[1, 0] = -1
for i in range(1, n):
cmat[i - 1, i] = -i
cmat[i, i] = 2*i + 1
if i != n - 1:
cmat[i + 1, i] = -(i + 1)
else:
cmat[:, i] += cs[:-1]*(i + 1)
roots = la.eigvals(cmat)
roots.sort()
return roots
#
# Laguerre series class
#
exec polytemplate.substitute(name='Laguerre', nick='lag', domain='[-1,1]')
|
{
"content_hash": "8f86a8ea0a78a7746c2b9603e05934aa",
"timestamp": "",
"source": "github",
"line_count": 1146,
"max_line_length": 76,
"avg_line_length": 29.397033158813265,
"alnum_prop": 0.5770132684258957,
"repo_name": "stefanv/numpy",
"id": "1a75d4af48210c3a7eebb61af88829313dde06c3",
"size": "33689",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "numpy/polynomial/laguerre.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6184633"
},
{
"name": "C++",
"bytes": "297087"
},
{
"name": "CSS",
"bytes": "8887"
},
{
"name": "Fortran",
"bytes": "14157"
},
{
"name": "Objective-C",
"bytes": "135"
},
{
"name": "Perl",
"bytes": "458"
},
{
"name": "Python",
"bytes": "5338917"
},
{
"name": "Shell",
"bytes": "3545"
}
],
"symlink_target": ""
}
|
"""
Useful expressions common to many neural network applications.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2013, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import numpy as np
from theano import tensor as T
from pylearn2.expr.nnet import pseudoinverse_softmax_numpy
from pylearn2.expr.nnet import softmax_numpy
from pylearn2.expr.nnet import softmax_ratio
from pylearn2.expr.nnet import compute_recall
from pylearn2.utils import sharedX
def test_softmax_ratio():
# Tests that the numerically stabilized version of the softmax ratio
# matches the naive implementation, for small input values
n = 3
m = 4
rng = np.random.RandomState([2013, 3, 23])
Z_numer = sharedX(rng.randn(m, n))
Z_denom = sharedX(rng.randn(m, n))
numer = T.nnet.softmax(Z_numer)
denom = T.nnet.softmax(Z_denom)
naive = numer / denom
stable = softmax_ratio(numer, denom)
naive = naive.eval()
stable = stable.eval()
assert np.allclose(naive, stable)
def test_pseudoinverse_softmax_numpy():
rng = np.random.RandomState([2013, 3, 28])
p = np.abs(rng.randn(5))
p /= p.sum()
z = pseudoinverse_softmax_numpy(p)
zbroad = z.reshape(1, z.size)
p2 = softmax_numpy(zbroad)
p2 = p2[0, :]
assert np.allclose(p, p2)
def test_compute_recall():
"""
Tests whether compute_recall function works as
expected.
"""
tp_pyval = 4
ys_pyval = np.asarray([0, 1, 1, 0, 1, 1, 0])
tp = sharedX(tp_pyval, name="tp")
ys = sharedX(ys_pyval, name="ys_pyval")
recall_py = tp_pyval / ys_pyval.sum()
recall = compute_recall(ys, tp)
assert np.allclose(recall.eval(),
recall_py)
|
{
"content_hash": "bd432e6cb87840d866754a48aad8aa69",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 72,
"avg_line_length": 25.26388888888889,
"alnum_prop": 0.6531061022539857,
"repo_name": "shiquanwang/pylearn2",
"id": "c4eae80138611d9916da34d51dfbd468448f692b",
"size": "1819",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pylearn2/expr/tests/test_nnet.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "53316"
},
{
"name": "C++",
"bytes": "46935"
},
{
"name": "CSS",
"bytes": "10655"
},
{
"name": "Cuda",
"bytes": "1267472"
},
{
"name": "Objective-C",
"bytes": "953"
},
{
"name": "Python",
"bytes": "3452538"
},
{
"name": "Shell",
"bytes": "4195"
}
],
"symlink_target": ""
}
|
import logging
import unittest
import apache_beam as beam
from apache_beam.runners.portability import fn_api_runner
from apache_beam.runners.portability import maptask_executor_runner_test
class FnApiRunnerTest(
maptask_executor_runner_test.MapTaskExecutorRunnerTest):
def create_pipeline(self):
return beam.Pipeline(
runner=fn_api_runner.FnApiRunner())
def test_combine_per_key(self):
# TODO(robertwb): Implement PGBKCV operation.
pass
# Inherits all tests from maptask_executor_runner.MapTaskExecutorRunner
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
{
"content_hash": "ac0d8281a866d8357a1097793787f38b",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 73,
"avg_line_length": 25.48,
"alnum_prop": 0.7551020408163265,
"repo_name": "wtanaka/beam",
"id": "91590351e99ef7aaaffbaa7ead6c65e1194ec8ce",
"size": "1422",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/runners/portability/fn_api_runner_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "50057"
},
{
"name": "Java",
"bytes": "11703716"
},
{
"name": "Protocol Buffer",
"bytes": "55082"
},
{
"name": "Python",
"bytes": "2856764"
},
{
"name": "Shell",
"bytes": "44966"
}
],
"symlink_target": ""
}
|
import abc
from selenium.webdriver.support.event_firing_webdriver import EventFiringWebElement
from robot.api import logger
class Event:
@abc.abstractmethod
def trigger(self, *args, **kwargs):
pass
def _unwrap_eventfiring_element(element):
"""Workaround for Selenium 3 bug.
References:
https://github.com/SeleniumHQ/selenium/issues/7877
https://github.com/SeleniumHQ/selenium/pull/8348
https://github.com/SeleniumHQ/selenium/issues/7467
https://github.com/SeleniumHQ/selenium/issues/6604
"""
logger.debug("Workaround for Selenium 3 bug.")
if not isinstance(element, EventFiringWebElement) or selenium_major_version() >= 4:
return element
return element.wrapped_element
def selenium_major_version():
import selenium
selenium_version = selenium.__version__
(major, *sub_versions) = selenium_version.split(".")
return int(major)
|
{
"content_hash": "b550f300e8b8c0bcee9f85b7324b1dd5",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 87,
"avg_line_length": 27.727272727272727,
"alnum_prop": 0.7180327868852459,
"repo_name": "rtomac/robotframework-selenium2library",
"id": "a190c702a64d4f9b0de03d84cdfc6f708af044ab",
"size": "1609",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/SeleniumLibrary/utils/events/event.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1739"
},
{
"name": "HTML",
"bytes": "381713"
},
{
"name": "JavaScript",
"bytes": "9665"
},
{
"name": "Python",
"bytes": "283075"
},
{
"name": "RobotFramework",
"bytes": "100940"
}
],
"symlink_target": ""
}
|
"""Support for Spider thermostats."""
import logging
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
STATE_COOL, STATE_HEAT, STATE_IDLE, SUPPORT_FAN_MODE,
SUPPORT_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from . import DOMAIN as SPIDER_DOMAIN
DEPENDENCIES = ['spider']
FAN_LIST = [
'Auto',
'Low',
'Medium',
'High',
'Boost 10',
'Boost 20',
'Boost 30',
]
OPERATION_LIST = [
STATE_HEAT,
STATE_COOL,
]
HA_STATE_TO_SPIDER = {
STATE_COOL: 'Cool',
STATE_HEAT: 'Heat',
STATE_IDLE: 'Idle',
}
SPIDER_STATE_TO_HA = {value: key for key, value in HA_STATE_TO_SPIDER.items()}
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Spider thermostat."""
if discovery_info is None:
return
devices = [SpiderThermostat(hass.data[SPIDER_DOMAIN]['controller'], device)
for device in hass.data[SPIDER_DOMAIN]['thermostats']]
add_entities(devices, True)
class SpiderThermostat(ClimateDevice):
"""Representation of a thermostat."""
def __init__(self, api, thermostat):
"""Initialize the thermostat."""
self.api = api
self.thermostat = thermostat
@property
def supported_features(self):
"""Return the list of supported features."""
supports = SUPPORT_TARGET_TEMPERATURE
if self.thermostat.has_operation_mode:
supports |= SUPPORT_OPERATION_MODE
if self.thermostat.has_fan_mode:
supports |= SUPPORT_FAN_MODE
return supports
@property
def unique_id(self):
"""Return the id of the thermostat, if any."""
return self.thermostat.id
@property
def name(self):
"""Return the name of the thermostat, if any."""
return self.thermostat.name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self.thermostat.current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.thermostat.target_temperature
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self.thermostat.temperature_steps
@property
def min_temp(self):
"""Return the minimum temperature."""
return self.thermostat.minimum_temperature
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.thermostat.maximum_temperature
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
return SPIDER_STATE_TO_HA[self.thermostat.operation_mode]
@property
def operation_list(self):
"""Return the list of available operation modes."""
return OPERATION_LIST
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self.thermostat.set_temperature(temperature)
def set_operation_mode(self, operation_mode):
"""Set new target operation mode."""
self.thermostat.set_operation_mode(
HA_STATE_TO_SPIDER.get(operation_mode))
@property
def current_fan_mode(self):
"""Return the fan setting."""
return self.thermostat.current_fan_speed
def set_fan_mode(self, fan_mode):
"""Set fan mode."""
self.thermostat.set_fan_speed(fan_mode)
@property
def fan_list(self):
"""List of available fan modes."""
return FAN_LIST
def update(self):
"""Get the latest data."""
self.thermostat = self.api.get_thermostat(self.unique_id)
|
{
"content_hash": "bfac8317b781138b33c46bbc6619b49d",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 79,
"avg_line_length": 26.807947019867548,
"alnum_prop": 0.6395750988142292,
"repo_name": "jamespcole/home-assistant",
"id": "3b612441a8846633d9dbc82b3343bb221d48bdd0",
"size": "4048",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/spider/climate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "826"
},
{
"name": "Python",
"bytes": "14822074"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
}
|
"""Util for modifying the GRR server configuration."""
import argparse
import ConfigParser
import getpass
import json
import os
import re
# importing readline enables the raw_input calls to have history etc.
import readline # pylint: disable=unused-import
import sys
import urlparse
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=g-bad-import-order,unused-import
from grr.lib import aff4
from grr.lib import artifact
from grr.lib import artifact_lib
from grr.lib import config_lib
from grr.lib import flags
# pylint: disable=g-import-not-at-top,no-name-in-module
try:
# FIXME(dbilby): Temporary hack until key_utils is deprecated.
from grr.lib import key_utils
except ImportError:
pass
from grr.lib import maintenance_utils
from grr.lib import rdfvalue
from grr.lib import startup
from grr.lib import utils
from grr.lib.aff4_objects import users
# pylint: enable=g-import-not-at-top,no-name-in-module
parser = flags.PARSER
parser.description = ("Set configuration parameters for the GRR Server."
"\nThis script has numerous subcommands to perform "
"various actions. When you are first setting up, you "
"probably only care about 'initialize'.")
# Generic arguments.
parser.add_argument(
"--share_dir", default="/usr/share/grr",
help="Path to the directory containing grr data.")
subparsers = parser.add_subparsers(
title="subcommands", dest="subparser_name", description="valid subcommands")
# Subparsers.
parser_memory = subparsers.add_parser(
"load_memory_drivers", help="Load memory drivers from disk to database.")
parser_generate_keys = subparsers.add_parser(
"generate_keys", help="Generate crypto keys in the configuration.")
parser_repack_clients = subparsers.add_parser(
"repack_clients",
help="Repack the clients binaries with the current configuration.")
parser_initialize = subparsers.add_parser(
"initialize",
help="Interactively run all the required steps to setup a new GRR install.")
# Update an existing user.
parser_update_user = subparsers.add_parser(
"update_user", help="Update user settings.")
parser_update_user.add_argument("username", help="Username to update.")
parser_update_user.add_argument(
"--password", default=None, help="Reset the password for this user..")
parser_update_user.add_argument(
"--label", default=[], action="append",
help=("Labels to set the user object. These are used to control access."
"Note that previous labels are cleared."))
parser_add_user = subparsers.add_parser(
"add_user", help="Add a new user.")
parser_add_user.add_argument("username", help="Username to update.")
parser_add_user.add_argument(
"--noadmin", default=False, action="store_true",
help="Don't create the user as an administrator.")
def UpdateUser(username, password, labels):
"""Implementation of the update_user command."""
with aff4.FACTORY.Create("aff4:/users/%s" % username,
"GRRUser", mode="rw") as fd:
# Note this accepts blank passwords as valid.
fd.SetPassword(password)
if labels:
# Allow labels to be comma separated list of labels.
expanded_labels = []
for label in labels:
if "," in label:
expanded_labels.extend(label.split(","))
else:
expanded_labels.append(label)
fd.SetLabels(*expanded_labels)
print "Updating user %s" % username
ShowUser(username)
# Show user account.
parser_show_user = subparsers.add_parser(
"show_user", help="Display user settings or list all users.")
parser_show_user.add_argument(
"username", default=None, nargs="?",
help="Username to display. If not specified, list all users.")
def ShowUser(username):
"""Implementation o the show_user command."""
if username is None:
fd = aff4.FACTORY.Open("aff4:/users")
for user in fd.OpenChildren():
if isinstance(user, users.GRRUser):
print user.Describe()
else:
user = aff4.FACTORY.Open("aff4:/users/%s" % username)
if isinstance(user, users.GRRUser):
print user.Describe()
else:
print "User %s not found" % username
# Generate Keys Arguments
parser_generate_keys.add_argument(
"--overwrite", default=False, action="store_true",
help="Required to overwrite existing keys.")
# Repack arguments.
parser_repack_clients.add_argument(
"--upload", default=True, action="store_false",
help="Upload the client binaries to the datastore.")
# Parent parser used in other upload based parsers.
parser_upload_args = argparse.ArgumentParser(add_help=False)
parser_upload_signed_args = argparse.ArgumentParser(add_help=False)
# Upload arguments.
parser_upload_args.add_argument(
"--file", help="The file to upload", required=True)
parser_upload_args.add_argument(
"--dest_path", required=False, default=None,
help="The destination path to upload the file to, specified in aff4: form,"
"e.g. aff4:/config/test.raw")
parser_upload_args.add_argument(
"--overwrite", default=False, action="store_true",
help="Required to overwrite existing files.")
parser_upload_signed_args.add_argument(
"--platform", required=True, choices=maintenance_utils.SUPPORTED_PLATFORMS,
default="windows",
help="The platform the file will be used on. This determines which signing"
" keys to use, and the path on the server the file will be uploaded to.")
parser_upload_signed_args.add_argument(
"--arch", required=True, choices=maintenance_utils.SUPPORTED_ARCHICTECTURES,
default="amd64",
help="The architecture the file will be used on. This determines "
" the path on the server the file will be uploaded to.")
# Upload parsers.
parser_upload_raw = subparsers.add_parser(
"upload_raw", parents=[parser_upload_args],
help="Upload a raw file to an aff4 path.")
parser_upload_artifact = subparsers.add_parser(
"upload_artifact", parents=[parser_upload_args],
help="Upload a raw json artifact file.")
parser_upload_python = subparsers.add_parser(
"upload_python", parents=[parser_upload_args, parser_upload_signed_args],
help="Sign and upload a 'python hack' which can be used to execute code on "
"a client.")
parser_upload_exe = subparsers.add_parser(
"upload_exe", parents=[parser_upload_args, parser_upload_signed_args],
help="Sign and upload an executable which can be used to execute code on "
"a client.")
parser_upload_memory_driver = subparsers.add_parser(
"upload_memory_driver",
parents=[parser_upload_args, parser_upload_signed_args],
help="Sign and upload a memory driver for a specific platform.")
def LoadMemoryDrivers(grr_dir):
"""Load memory drivers from disk to database."""
for client_context in [["Platform:Darwin", "Arch:amd64"],
["Platform:Windows", "Arch:i386"],
["Platform:Windows", "Arch:amd64"]]:
file_paths = config_lib.CONFIG.Get(
"MemoryDriver.driver_files", context=client_context)
aff4_paths = config_lib.CONFIG.Get(
"MemoryDriver.aff4_paths", context=client_context)
if len(file_paths) != len(aff4_paths):
print "Length mismatch:"
print "%s.", file_paths
print "%s.", aff4_paths
raise RuntimeError("Could not find all files/aff4 paths.")
for file_path, aff4_path in zip(file_paths, aff4_paths):
f_path = os.path.join(grr_dir, file_path)
print "Signing and uploading %s to %s" % (f_path, aff4_path)
up_path = maintenance_utils.UploadSignedDriverBlob(
open(f_path).read(), aff4_path=aff4_path,
client_context=client_context)
print "uploaded %s" % up_path
def ImportConfig(filename, config):
"""Reads an old config file and imports keys and user accounts."""
sections_to_import = ["PrivateKeys"]
entries_to_import = ["Client.driver_signing_public_key",
"Client.executable_signing_public_key",
"CA.certificate",
"Frontend.certificate"]
options_imported = 0
old_config = config_lib.CONFIG.MakeNewConfig()
old_config.Initialize(filename)
for entry in old_config.raw_data.keys():
try:
section = entry.split(".")[0]
if section in sections_to_import or entry in entries_to_import:
config.Set(entry, old_config.Get(entry))
print "Imported %s." % entry
options_imported += 1
except Exception as e: # pylint: disable=broad-except
print "Exception during import of %s: %s" % (entry, e)
return options_imported
def GenerateDjangoKey(config):
"""Update a config with a random django key."""
try:
secret_key = config["AdminUI.django_secret_key"]
except ConfigParser.NoOptionError:
secret_key = "CHANGE_ME" # This is the config file default.
if not secret_key or secret_key.strip().upper() == "CHANGE_ME":
key = utils.GeneratePassphrase(length=100)
config.Set("AdminUI.django_secret_key", key)
else:
print "Not updating django_secret_key as it is already set."
def GenerateKeys(config):
"""Generate the keys we need for a GRR server."""
if not hasattr(key_utils, "MakeCACert"):
parser.error("Generate keys can only run with open source key_utils.")
if (config.Get("PrivateKeys.server_key", default=None) and
not flags.FLAGS.overwrite):
raise RuntimeError("Config %s already has keys, use --overwrite to "
"override." % config.parser)
print "Generating executable signing key"
priv_key, pub_key = key_utils.GenerateRSAKey()
config.Set("PrivateKeys.executable_signing_private_key", priv_key)
config.Set("Client.executable_signing_public_key", pub_key)
print "Generating driver signing key"
priv_key, pub_key = key_utils.GenerateRSAKey()
config.Set("PrivateKeys.driver_signing_private_key", priv_key)
config.Set("Client.driver_signing_public_key", pub_key)
print "Generating CA keys"
ca_cert, ca_pk, _ = key_utils.MakeCACert()
cipher = None
config.Set("CA.certificate", ca_cert.as_pem())
config.Set("PrivateKeys.ca_key", ca_pk.as_pem(cipher))
print "Generating Server keys"
server_cert, server_key = key_utils.MakeCASignedCert("grr", ca_pk, bits=2048)
config.Set("Frontend.certificate", server_cert.as_pem())
config.Set("PrivateKeys.server_key", server_key.as_pem(cipher))
print "Generating Django Secret key (used for xsrf protection etc)"
GenerateDjangoKey(config)
def RetryQuestion(question_text, output_re="", default_val=""):
"""Continually ask a question until the output_re is matched."""
while True:
if default_val:
new_text = "%s [%s]: " % (question_text, default_val)
else:
new_text = "%s: " % question_text
output = raw_input(new_text) or default_val
output = output.strip()
if not output_re or re.match(output_re, output):
break
else:
print "Invalid input, must match %s" % output_re
return output
def ConfigureBaseOptions(config):
"""Configure the basic options required to run the server."""
print "We are now going to configure the server using a bunch of questions.\n"
print """\nFor GRR to work each client has to be able to communicate with the
server. To do this we normally need a public dns name or IP address to
communicate with. In the standard configuration this will be used to host both
the client facing server and the admin user interface.\n"""
print "Guessing public hostname of your server..."
try:
hostname = maintenance_utils.GuessPublicHostname()
print "Using %s as public hostname" % hostname
except (OSError, IOError):
print "Sorry, we couldn't guess your public hostname"
hostname = RetryQuestion("Please enter your public hostname e.g. "
"grr.example.com", "^([\\.A-Za-z0-9-]+)*$")
print """\n\nServer URL
The Server URL specifies the URL that the clients will connect to
communicate with the server. This needs to be publically accessible. By default
this will be port 8080 with the URL ending in /control.
"""
location = RetryQuestion("Server URL", "^http://.*/control$",
"http://%s:8080/control" % hostname)
config.Set("Client.control_urls", [location])
frontend_port = urlparse.urlparse(location).port or 80
if frontend_port != config_lib.CONFIG.Get("Frontend.bind_port"):
config.Set("Frontend.bind_port", frontend_port)
print "\nSetting the frontend listening port to %d.\n" % frontend_port
print "Please make sure that this matches your client settings.\n"
print """\nUI URL:
The UI URL specifies where the Administrative Web Interface can be found.
"""
ui_url = RetryQuestion("AdminUI URL", "^http://.*$",
"http://%s:8000" % hostname)
config.Set("AdminUI.url", ui_url)
print """\nMonitoring/Email domain name:
Emails concerning alerts or updates must be sent to this domain.
"""
domain = RetryQuestion("Email domain", "^([\\.A-Za-z0-9-]+)*$",
"example.com")
config.Set("Logging.domain", domain)
print """\nMonitoring email address
Address where monitoring events get sent, e.g. crashed clients, broken server
etc.
"""
email = RetryQuestion("Monitoring email", "",
"grr-monitoring@%s" % domain)
config.Set("Monitoring.alert_email", email)
print """\nEmergency email address
Address where high priority events such as an emergency ACL bypass are sent.
"""
emergency_email = RetryQuestion("Monitoring emergency email", "",
"grr-emergency@%s" % domain)
config.Set("Monitoring.emergency_access_email", emergency_email)
config.Write()
print ("Configuration parameters set. You can edit these in %s" %
config.parser)
def Initialize(config=None):
"""Initialize or update a GRR configuration."""
print "Checking write access on config %s" % config.parser
if not os.access(config.parser.filename, os.W_OK):
raise IOError("Config not writeable (need sudo?)")
print "\nStep 0: Importing Configuration from previous installation."
options_imported = 0
prev_config_file = config.Get("ConfigUpdater.old_config", default=None)
if prev_config_file and os.access(prev_config_file, os.R_OK):
print "Found config file %s." % prev_config_file
if raw_input("Do you want to import this configuration?"
" [yN]: ").upper() == "Y":
options_imported = ImportConfig(prev_config_file, config)
else:
print "No old config file found."
print "\nStep 1: Key Generation"
if config.Get("PrivateKeys.server_key", default=None):
if options_imported > 0:
print ("Since you have imported keys from another installation in the "
"last step,\nyou probably do not want to generate new keys now.")
if ((raw_input("You already have keys in your config, do you want to"
" overwrite them? [yN]: ").upper() or "N") == "Y"):
flags.FLAGS.overwrite = True
GenerateKeys(config)
else:
GenerateKeys(config)
print "\nStep 2: Setting Basic Configuration Parameters"
ConfigureBaseOptions(config)
# Now load our modified config.
startup.ConfigInit()
print "\nStep 3: Adding Admin User"
password = getpass.getpass(prompt="Please enter password for user 'admin': ")
UpdateUser("admin", password, ["admin"])
print "User admin added."
print "\nStep 4: Uploading Memory Drivers to the Database"
LoadMemoryDrivers(flags.FLAGS.share_dir)
print "\nStep 5: Repackaging clients with new configuration."
# We need to update the config to point to the installed templates now.
config.Set("ClientBuilder.executables_path", os.path.join(
flags.FLAGS.share_dir, "executables"))
# Build debug binaries, then build release binaries.
maintenance_utils.RepackAllBinaries(upload=True, debug_build=True)
maintenance_utils.RepackAllBinaries(upload=True)
print "\nInitialization complete, writing configuration."
config.Write()
print "Please restart the service for it to take effect.\n\n"
def UploadRaw(file_path, aff4_path):
"""Upload a file to the datastore."""
full_path = rdfvalue.RDFURN(aff4_path).Add(os.path.basename(file_path))
fd = aff4.FACTORY.Create(full_path, "AFF4Image", mode="w")
fd.Write(open(file_path).read(1024*1024*30))
fd.Close()
return str(fd.urn)
def main(unused_argv):
"""Main."""
config_lib.CONFIG.AddContext("Commandline Context")
config_lib.CONFIG.AddContext("ConfigUpdater Context")
startup.Init()
try:
print "Using configuration %s" % config_lib.CONFIG.parser
except AttributeError:
raise RuntimeError("No valid config specified.")
if flags.FLAGS.subparser_name == "load_memory_drivers":
LoadMemoryDrivers(flags.FLAGS.share_dir)
elif flags.FLAGS.subparser_name == "generate_keys":
try:
GenerateKeys(config_lib.CONFIG)
except RuntimeError, e:
# GenerateKeys will raise if keys exist and --overwrite is not set.
print "ERROR: %s" % e
sys.exit(1)
config_lib.CONFIG.Write()
elif flags.FLAGS.subparser_name == "repack_clients":
maintenance_utils.RepackAllBinaries(upload=flags.FLAGS.upload)
maintenance_utils.RepackAllBinaries(upload=flags.FLAGS.upload,
debug_build=True)
elif flags.FLAGS.subparser_name == "initialize":
Initialize(config_lib.CONFIG)
elif flags.FLAGS.subparser_name == "show_user":
ShowUser(flags.FLAGS.username)
elif flags.FLAGS.subparser_name == "update_user":
UpdateUser(flags.FLAGS.username, flags.FLAGS.password, flags.FLAGS.label)
elif flags.FLAGS.subparser_name == "add_user":
password = getpass.getpass(prompt="Please enter password for user '%s': " %
flags.FLAGS.username)
labels = []
if not flags.FLAGS.noadmin:
labels.append("admin")
UpdateUser(flags.FLAGS.username, password, labels)
elif flags.FLAGS.subparser_name == "upload_python":
content = open(flags.FLAGS.file).read(1024*1024*30)
aff4_path = flags.FLAGS.dest_path
if not aff4_path:
python_hack_root_urn = config_lib.CONFIG.Get("Config.python_hack_root")
aff4_path = python_hack_root_urn.Add(os.path.basename(flags.FLAGS.file))
context = ["Platform:%s" % flags.FLAGS.platform.title(),
"Client"]
maintenance_utils.UploadSignedConfigBlob(content, aff4_path=aff4_path,
client_context=context)
elif flags.FLAGS.subparser_name == "upload_exe":
content = open(flags.FLAGS.file).read(1024*1024*30)
context = ["Platform:%s" % flags.FLAGS.platform.title(),
"Client"]
if flags.FLAGS.dest_path:
dest_path = rdfvalue.RDFURN(flags.FLAGS.dest_path)
else:
dest_path = config_lib.CONFIG.Get(
"Executables.aff4_path", context=context).Add(
os.path.basename(flags.FLAGS.file))
# Now upload to the destination.
uploaded = maintenance_utils.UploadSignedConfigBlob(
content, aff4_path=dest_path, client_context=context)
print "Uploaded to %s" % dest_path
elif flags.FLAGS.subparser_name == "upload_memory_driver":
client_context = ["Platform:%s" % flags.FLAGS.platform.title(),
"Arch:%s" % flags.FLAGS.arch]
content = open(flags.FLAGS.file).read(1024*1024*30)
if flags.FLAGS.dest_path:
uploaded = maintenance_utils.UploadSignedDriverBlob(
content, aff4_path=flags.FLAGS.dest_path,
client_context=client_context)
else:
uploaded = maintenance_utils.UploadSignedDriverBlob(
content, client_context=client_context)
print "Uploaded to %s" % uploaded
elif flags.FLAGS.subparser_name == "upload_raw":
if not flags.FLAGS.dest_path:
flags.FLAGS.dest_path = aff4.ROOT_URN.Add("config").Add("raw")
uploaded = UploadRaw(flags.FLAGS.file, flags.FLAGS.dest_path)
print "Uploaded to %s" % uploaded
elif flags.FLAGS.subparser_name == "upload_artifact":
json.load(open(flags.FLAGS.file)) # Check it will parse.
base_urn = aff4.ROOT_URN.Add("artifact_store")
try:
artifact.UploadArtifactYamlFile(
open(flags.FLAGS.file).read(1000000), base_urn=base_urn, token=None,
overwrite=flags.FLAGS.overwrite)
except artifact_lib.ArtifactDefinitionError as e:
print "Error %s. You may need to set --overwrite." % e
if __name__ == "__main__":
flags.StartMain(main)
|
{
"content_hash": "605b6c0e513c2426c8be84d2448f8dce",
"timestamp": "",
"source": "github",
"line_count": 562,
"max_line_length": 80,
"avg_line_length": 36.46797153024911,
"alnum_prop": 0.6843620395218346,
"repo_name": "simsong/grr-insider",
"id": "fb8650fa28f9c0de1d1b556731102d1692df8ce0",
"size": "20517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/config_updater.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "C++",
"bytes": "55149"
},
{
"name": "CSS",
"bytes": "36308"
},
{
"name": "JavaScript",
"bytes": "679269"
},
{
"name": "Python",
"bytes": "3553249"
},
{
"name": "Shell",
"bytes": "30813"
}
],
"symlink_target": ""
}
|
import sys
from fetch_data import FetchData
from markov_python.cc_markov import MarkovChain
"""
Goofy first attempt at a Python application that uses the Codecademy
markov_python module to create fun/dumb/whatever responses based on
data pulled from various web locations.
Pretty lame, but I didn't want to spend much time on it...
Expects at least one URL on the command line for a source of text to
pull and search.
Example: run.py http://www.textfiles.com/sf/adams.txt http://www.textfiles.com/sf/alt3.txt
"""
def main(args):
mc = MarkovChain()
for a in args[1::]:
fd = FetchData(a)
mc.add_string(fd.fetch_data())
chain = mc.generate_text()
out = (" ").join(chain)
print out
if __name__ == "__main__":
main(sys.argv)
|
{
"content_hash": "111d26a9eee2363e4b595405c7c1eee3",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 90,
"avg_line_length": 23.181818181818183,
"alnum_prop": 0.6967320261437908,
"repo_name": "omnifice/markov_chain",
"id": "fdaa6e19eca87fadb0f5b0cda80f52d1043772f7",
"size": "788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1881"
}
],
"symlink_target": ""
}
|
import os
from google.cloud import storage
import pytest
import requests
import generate_signed_urls
BUCKET = os.environ['CLOUD_STORAGE_BUCKET']
GOOGLE_APPLICATION_CREDENTIALS = os.environ['GOOGLE_APPLICATION_CREDENTIALS']
@pytest.fixture
def test_blob():
"""Provides a pre-existing blob in the test bucket."""
bucket = storage.Client().bucket(BUCKET)
blob = bucket.blob('storage_snippets_test_sigil')
blob.upload_from_string('Hello, is it me you\'re looking for?')
return blob
def test_generate_get_signed_url(test_blob, capsys):
get_signed_url = generate_signed_urls.generate_signed_url(
service_account_file=GOOGLE_APPLICATION_CREDENTIALS,
bucket_name=BUCKET, object_name=test_blob.name,
expiration=60)
response = requests.get(get_signed_url)
assert response.ok
|
{
"content_hash": "2ee5a477c3e66bf12d097fb5dce7f26c",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 77,
"avg_line_length": 29.571428571428573,
"alnum_prop": 0.7282608695652174,
"repo_name": "GoogleCloudPlatform/python-docs-samples",
"id": "2f3c426a6f6c7c889fc5bc1d56bbb0dd381b08c1",
"size": "1404",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "storage/signed_urls/generate_signed_urls_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8008"
},
{
"name": "Dockerfile",
"bytes": "62031"
},
{
"name": "HTML",
"bytes": "69878"
},
{
"name": "JavaScript",
"bytes": "26494"
},
{
"name": "Jinja",
"bytes": "1892"
},
{
"name": "Jupyter Notebook",
"bytes": "47951698"
},
{
"name": "Makefile",
"bytes": "932"
},
{
"name": "Procfile",
"bytes": "138"
},
{
"name": "PureBasic",
"bytes": "11115"
},
{
"name": "Python",
"bytes": "5323502"
},
{
"name": "Shell",
"bytes": "78261"
}
],
"symlink_target": ""
}
|
__author__ = 'gjp'
"""File with Constants used in fiware-facts project.
"""
# HTTP CONSTANTS
CONTENT_HEADER = u'content-type'
JSON_TYPE = u'application/json'
REMOTE_ADDR = u'REMOTE_ADDR'
REMOTE_PORT = u'REMOTE_PORT'
# ORION CONSTANTS
CONTEXT_RESPONSES = u'contextResponses'
CONTEXT_ELEMENT = u'contextElement'
CONTEXT_ATTRIBUTES = u'attributes'
CONTEXT_ATTRIBUTES_NAME = u'name'
CONTEXT_ATTRIBUTES_VALUE = u'value'
|
{
"content_hash": "ef24edd13d1b887f7e6aa2638fc3aa07",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 52,
"avg_line_length": 24.58823529411765,
"alnum_prop": 0.7464114832535885,
"repo_name": "Fiware/cloud.Facts",
"id": "258cb438b0ad4aff50c79289700c05cb0f17495c",
"size": "1237",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "facts/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "API Blueprint",
"bytes": "12835"
},
{
"name": "Cucumber",
"bytes": "33954"
},
{
"name": "Python",
"bytes": "167098"
},
{
"name": "Ruby",
"bytes": "1891"
},
{
"name": "Shell",
"bytes": "6076"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('passive_data_kit', '0038_auto_20180821_1414'),
]
operations = [
migrations.AddField(
model_name='dataservermetadatum',
name='last_updated',
field=models.DateTimeField(blank=True, null=True),
),
]
|
{
"content_hash": "fc388700752caace5727f43a5d367e8f",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 62,
"avg_line_length": 23.4375,
"alnum_prop": 0.6,
"repo_name": "audaciouscode/PassiveDataKit-Django",
"id": "7093d628f7e1098541a8fbd43dcb62719c417d80",
"size": "472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/0039_dataservermetadatum_last_updated.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1703"
},
{
"name": "HTML",
"bytes": "98679"
},
{
"name": "JavaScript",
"bytes": "161391"
},
{
"name": "Python",
"bytes": "426910"
}
],
"symlink_target": ""
}
|
import collections
import logging
import sys
from telemetry.core.backends.chrome_inspector import inspector_backend
from telemetry.core import exceptions
def DebuggerUrlToId(debugger_url):
return debugger_url.split('/')[-1]
class InspectorBackendList(collections.Sequence):
"""A dynamic sequence of active InspectorBackends."""
def __init__(self, browser_backend):
"""Constructor.
Args:
browser_backend: The BrowserBackend instance to query for
InspectorBackends.
"""
self._browser_backend = browser_backend
self._devtools_context_map_backend = None
# A list of filtered contexts.
self._filtered_context_ids = []
# A cache of inspector backends, by context ID.
self._wrapper_dict = {}
@property
def _devtools_client(self):
return self._browser_backend.devtools_client
@property
def app(self):
return self._browser_backend.app
def GetContextInfo(self, context_id):
return self._devtools_context_map_backend.GetContextInfo(context_id)
def ShouldIncludeContext(self, _context):
"""Override this method to control which contexts are included."""
return True
def CreateWrapper(self, inspector_backend_instance):
"""Override to return the wrapper API over InspectorBackend.
The wrapper API is the public interface for InspectorBackend. It
may expose whatever methods are desired on top of that backend.
"""
raise NotImplementedError
# TODO(nednguyen): Remove this method and turn inspector_backend_list API to
# dictionary-like API (crbug.com/398467)
def __getitem__(self, index):
self._Update()
if index >= len(self._filtered_context_ids):
logging.error('About to explode: _filtered_context_ids = %s',
repr({
"index": index,
"context_ids": self._filtered_context_ids
}))
context_id = self._filtered_context_ids[index]
return self.GetBackendFromContextId(context_id)
def GetTabById(self, identifier):
self._Update()
return self.GetBackendFromContextId(identifier)
def GetBackendFromContextId(self, context_id):
self._Update()
if context_id not in self._wrapper_dict:
try:
backend = self._devtools_context_map_backend.GetInspectorBackend(
context_id)
except exceptions.Error as e:
self._HandleDevToolsConnectionError(e)
raise e
# Propagate KeyError from GetInspectorBackend call.
wrapper = self.CreateWrapper(backend)
self._wrapper_dict[context_id] = wrapper
return self._wrapper_dict[context_id]
def __iter__(self):
self._Update()
return iter(self._filtered_context_ids)
def __len__(self):
self._Update()
return len(self._filtered_context_ids)
def _Update(self):
backends_map = self._devtools_client.GetUpdatedInspectableContexts()
self._devtools_context_map_backend = backends_map
# Clear context ids that do not appear in the inspectable contexts.
context_ids = [context['id'] for context in backends_map.contexts]
self._filtered_context_ids = [context_id
for context_id in self._filtered_context_ids
if context_id in context_ids]
# Add new context ids.
for context in backends_map.contexts:
if (context['id'] not in self._filtered_context_ids and
self.ShouldIncludeContext(context)):
self._filtered_context_ids.append(context['id'])
# Clean up any backends for contexts that have gone away.
for context_id in self._wrapper_dict.keys():
if context_id not in self._filtered_context_ids:
del self._wrapper_dict[context_id]
def _HandleDevToolsConnectionError(self, error):
"""Called when handling errors in connecting to the DevTools websocket.
This can be overwritten by sub-classes to add more debugging information to
errors.
"""
pass
|
{
"content_hash": "608c95fd1eb07de624e8d06537c68ff9",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 79,
"avg_line_length": 33.23529411764706,
"alnum_prop": 0.6788874841972187,
"repo_name": "fujunwei/chromium-crosswalk",
"id": "89a7ada251cb34d720d6e15958438ca41c37e353",
"size": "4118",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/core/backends/chrome_inspector/inspector_backend_list.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "23829"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "4116349"
},
{
"name": "C++",
"bytes": "233601977"
},
{
"name": "CSS",
"bytes": "931025"
},
{
"name": "Emacs Lisp",
"bytes": "988"
},
{
"name": "HTML",
"bytes": "28881204"
},
{
"name": "Java",
"bytes": "9824090"
},
{
"name": "JavaScript",
"bytes": "19683742"
},
{
"name": "Makefile",
"bytes": "68017"
},
{
"name": "Objective-C",
"bytes": "1478432"
},
{
"name": "Objective-C++",
"bytes": "8653645"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "171186"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "456460"
},
{
"name": "Python",
"bytes": "7963013"
},
{
"name": "Shell",
"bytes": "468673"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
"""Schemas for user profiles and preferences."""
from flask_babelex import lazy_gettext as _
from marshmallow import Schema, ValidationError, fields
def validate_visibility(value):
"""Check if the value is a valid visibility setting."""
if value not in ["public", "restricted"]:
raise ValidationError(
message=str(_("Value must be either 'public' or 'restricted'."))
)
class UserProfileSchema(Schema):
"""The default user profile schema."""
full_name = fields.String()
affiliations = fields.String()
class UserPreferencesSchema(Schema):
"""The default schema for user preferences."""
visibility = fields.String(validate=validate_visibility)
email_visibility = fields.String(validate=validate_visibility)
|
{
"content_hash": "7d880a4a9c97c166efd9c3d788ed32ad",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 76,
"avg_line_length": 28.666666666666668,
"alnum_prop": 0.7028423772609819,
"repo_name": "inveniosoftware/invenio-accounts",
"id": "05654acb3ff4772c116cbe9d15e553fd0af6a47d",
"size": "1007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invenio_accounts/profiles/schemas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "32362"
},
{
"name": "Python",
"bytes": "225061"
},
{
"name": "Shell",
"bytes": "837"
}
],
"symlink_target": ""
}
|
import abc
import fixtures
import netaddr
import six
from neutron.agent.linux import bridge_lib
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.common import constants as n_const
from neutron.openstack.common import uuidutils
from neutron.tests import base as tests_base
from neutron.tests.common import base as common_base
from neutron.tests import tools
NS_PREFIX = 'func-'
BR_PREFIX = 'test-br'
PORT_PREFIX = 'test-port'
VETH0_PREFIX = 'test-veth0'
VETH1_PREFIX = 'test-veth1'
def get_rand_port_name():
return tests_base.get_rand_name(max_length=n_const.DEVICE_NAME_MAX_LEN,
prefix=PORT_PREFIX)
def increment_ip_cidr(ip_cidr, offset=1):
"""Increment ip_cidr offset times.
example: increment_ip_cidr("1.2.3.4/24", 2) ==> "1.2.3.6/24"
"""
net0 = netaddr.IPNetwork(ip_cidr)
net = netaddr.IPNetwork(ip_cidr)
net.value += offset
if not net0.network < net.ip < net0.broadcast:
tools.fail(
'Incorrect ip_cidr,offset tuple (%s,%s): "incremented" ip_cidr is '
'outside ip_cidr' % (ip_cidr, offset))
return str(net)
def set_namespace_gateway(port_dev, gateway_ip):
"""Set gateway for the namespace associated to the port."""
if not port_dev.namespace:
tools.fail('tests should not change test machine gateway')
port_dev.route.add_gateway(gateway_ip)
class NamespaceFixture(fixtures.Fixture):
"""Create a namespace.
:ivar ip_wrapper: created namespace
:type ip_wrapper: IPWrapper
:ivar name: created namespace name
:type name: str
"""
def __init__(self, prefix=NS_PREFIX):
super(NamespaceFixture, self).__init__()
self.prefix = prefix
def setUp(self):
super(NamespaceFixture, self).setUp()
ip = ip_lib.IPWrapper()
self.name = self.prefix + uuidutils.generate_uuid()
self.ip_wrapper = ip.ensure_namespace(self.name)
self.addCleanup(self.destroy)
def destroy(self):
if self.ip_wrapper.netns.exists(self.name):
self.ip_wrapper.netns.delete(self.name)
class VethFixture(fixtures.Fixture):
"""Create a veth.
:ivar ports: created veth ports
:type ports: IPDevice 2-uplet
"""
def setUp(self):
super(VethFixture, self).setUp()
ip_wrapper = ip_lib.IPWrapper()
def _create_veth(name0):
name1 = name0.replace(VETH0_PREFIX, VETH1_PREFIX)
return ip_wrapper.add_veth(name0, name1)
self.ports = common_base.create_resource(VETH0_PREFIX, _create_veth)
self.addCleanup(self.destroy)
def destroy(self):
for port in self.ports:
ip_wrapper = ip_lib.IPWrapper(port.namespace)
try:
ip_wrapper.del_veth(port.name)
break
except RuntimeError:
# NOTE(cbrandily): It seems a veth is automagically deleted
# when a namespace owning a veth endpoint is deleted.
pass
@six.add_metaclass(abc.ABCMeta)
class PortFixture(fixtures.Fixture):
"""Create a port.
:ivar port: created port
:type port: IPDevice
:ivar bridge: port bridge
"""
def __init__(self, bridge=None, namespace=None):
self.bridge = bridge
self.namespace = namespace
@abc.abstractmethod
def _create_bridge_fixture(self):
pass
@abc.abstractmethod
def setUp(self):
super(PortFixture, self).setUp()
if not self.bridge:
self.bridge = self.useFixture(self._create_bridge_fixture()).bridge
class OVSBridgeFixture(fixtures.Fixture):
"""Create an OVS bridge.
:ivar bridge: created bridge
:type bridge: OVSBridge
"""
def setUp(self):
super(OVSBridgeFixture, self).setUp()
ovs = ovs_lib.BaseOVS()
self.bridge = common_base.create_resource(BR_PREFIX, ovs.add_bridge)
self.addCleanup(self.bridge.destroy)
class OVSPortFixture(PortFixture):
def _create_bridge_fixture(self):
return OVSBridgeFixture()
def setUp(self):
super(OVSPortFixture, self).setUp()
port_name = common_base.create_resource(PORT_PREFIX, self.create_port)
self.addCleanup(self.bridge.delete_port, port_name)
self.port = ip_lib.IPDevice(port_name)
ns_ip_wrapper = ip_lib.IPWrapper(self.namespace)
ns_ip_wrapper.add_device_to_namespace(self.port)
self.port.link.set_up()
def create_port(self, name):
self.bridge.add_port(name, ('type', 'internal'))
return name
class LinuxBridgeFixture(fixtures.Fixture):
"""Create a linux bridge.
:ivar bridge: created bridge
:type bridge: BridgeDevice
:ivar namespace: created bridge namespace
:type namespace: str
"""
def setUp(self):
super(LinuxBridgeFixture, self).setUp()
self.namespace = self.useFixture(NamespaceFixture()).name
self.bridge = common_base.create_resource(
BR_PREFIX,
bridge_lib.BridgeDevice.addbr,
namespace=self.namespace)
self.addCleanup(self.bridge.delbr)
self.bridge.link.set_up()
self.addCleanup(self.bridge.link.set_down)
class LinuxBridgePortFixture(PortFixture):
"""Create a linux bridge port.
:ivar port: created port
:type port: IPDevice
:ivar br_port: bridge side veth peer port
:type br_port: IPDevice
"""
def _create_bridge_fixture(self):
return LinuxBridgeFixture()
def setUp(self):
super(LinuxBridgePortFixture, self).setUp()
self.port, self.br_port = self.useFixture(VethFixture()).ports
# bridge side
br_ip_wrapper = ip_lib.IPWrapper(self.bridge.namespace)
br_ip_wrapper.add_device_to_namespace(self.br_port)
self.bridge.addif(self.br_port)
self.br_port.link.set_up()
# port side
ns_ip_wrapper = ip_lib.IPWrapper(self.namespace)
ns_ip_wrapper.add_device_to_namespace(self.port)
self.port.link.set_up()
class VethBridge(object):
def __init__(self, ports):
self.ports = ports
self.unallocated_ports = set(self.ports)
def allocate_port(self):
try:
return self.unallocated_ports.pop()
except KeyError:
tools.fail('All FakeBridge ports (%s) are already allocated.' %
len(self.ports))
class VethBridgeFixture(fixtures.Fixture):
"""Simulate a bridge with a veth.
:ivar bridge: created bridge
:type bridge: FakeBridge
"""
def setUp(self):
super(VethBridgeFixture, self).setUp()
ports = self.useFixture(VethFixture()).ports
self.bridge = VethBridge(ports)
class VethPortFixture(PortFixture):
"""Create a veth bridge port.
:ivar port: created port
:type port: IPDevice
"""
def _create_bridge_fixture(self):
return VethBridgeFixture()
def setUp(self):
super(VethPortFixture, self).setUp()
self.port = self.bridge.allocate_port()
ns_ip_wrapper = ip_lib.IPWrapper(self.namespace)
ns_ip_wrapper.add_device_to_namespace(self.port)
self.port.link.set_up()
|
{
"content_hash": "bdc4875e0dd290b70b94662c232072fa",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 79,
"avg_line_length": 28.116731517509727,
"alnum_prop": 0.6397730417935233,
"repo_name": "waltBB/neutron_read",
"id": "4ccf06d2007ffddc54ebb45c31080a5fa763ae77",
"size": "7844",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/tests/common/net_helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "980"
},
{
"name": "Python",
"bytes": "7051018"
},
{
"name": "Shell",
"bytes": "12287"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from flask import Blueprint, render_template, current_app
<% if ((package.services.mongodb && package.flask.mongoengine) || package.flask.sqlalchemy) { %>
from <%= package.pythonName %>.models.user import User
<% } %>
mod = Blueprint('index', __name__)
@mod.route("/")
def index():
<% if (package.services.mongodb && package.flask.mongoengine) { %>
users = list(User.objects.all())
<% } else if (package.flask.sqlalchemy) { %>
users = list(current_app.db.session.query(User).all())
<% } else { %>
users = []
<% } %>
return render_template('index.html', dt=datetime.now().strftime("%d %M %Y - %H %m %s"), users=users)
|
{
"content_hash": "5365177d3a546e8dbb093f7a2d3e645d",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 104,
"avg_line_length": 31.136363636363637,
"alnum_prop": 0.6335766423357664,
"repo_name": "heynemann/generator-flask-app",
"id": "7cda31a1d909a8c1ab5415a105ea465c3a58ac61",
"size": "1021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/templates/_handlers_index.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2468"
},
{
"name": "CoffeeScript",
"bytes": "288"
},
{
"name": "HTML",
"bytes": "4198"
},
{
"name": "JavaScript",
"bytes": "15136"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "42467"
}
],
"symlink_target": ""
}
|
import os.path
import shutil
import fixtures
import stubout
from glance.api.middleware import context
from glance.common import config
from glance.tests import utils as test_utils
class TestPasteApp(test_utils.BaseTestCase):
def setUp(self):
super(TestPasteApp, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.addCleanup(self.stubs.UnsetAll)
def _do_test_load_paste_app(self,
expected_app_type,
make_paste_file=True,
paste_flavor=None,
paste_config_file=None,
paste_append=None):
def _writeto(path, str):
with open(path, 'wb') as f:
f.write(str or '')
f.flush()
def _appendto(orig, copy, str):
shutil.copy(orig, copy)
with open(copy, 'ab') as f:
f.write(str or '')
f.flush()
self.config(flavor=paste_flavor,
config_file=paste_config_file,
group='paste_deploy')
temp_dir = self.useFixture(fixtures.TempDir()).path
temp_file = os.path.join(temp_dir, 'testcfg.conf')
_writeto(temp_file, '[DEFAULT]\n')
config.parse_args(['--config-file', temp_file])
paste_to = temp_file.replace('.conf', '-paste.ini')
if not paste_config_file and make_paste_file:
paste_from = os.path.join(os.getcwd(),
'etc/glance-registry-paste.ini')
_appendto(paste_from, paste_to, paste_append)
app = config.load_paste_app('glance-registry')
self.assertIsInstance(app, expected_app_type)
def test_load_paste_app(self):
expected_middleware = context.UnauthenticatedContextMiddleware
self._do_test_load_paste_app(expected_middleware)
def test_load_paste_app_paste_config_not_found(self):
expected_middleware = context.UnauthenticatedContextMiddleware
self.assertRaises(RuntimeError, self._do_test_load_paste_app,
expected_middleware, make_paste_file=False)
def test_load_paste_app_with_paste_flavor(self):
pipeline = ('[pipeline:glance-registry-incomplete]\n'
'pipeline = context registryapp')
expected_middleware = context.ContextMiddleware
self._do_test_load_paste_app(expected_middleware,
paste_flavor='incomplete',
paste_append=pipeline)
def test_load_paste_app_with_paste_config_file(self):
paste_config_file = os.path.join(os.getcwd(),
'etc/glance-registry-paste.ini')
expected_middleware = context.UnauthenticatedContextMiddleware
self._do_test_load_paste_app(expected_middleware,
paste_config_file=paste_config_file)
def test_get_path_non_exist(self):
self.assertRaises(RuntimeError, config._get_deployment_config_file)
|
{
"content_hash": "fb0fc41579d23f5179d9344a66d68a8e",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 75,
"avg_line_length": 37.51807228915663,
"alnum_prop": 0.5764290301862556,
"repo_name": "tanglei528/glance",
"id": "e1c772253b55564e7635543c67d8bce37259691b",
"size": "3750",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glance/tests/unit/common/test_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3193082"
},
{
"name": "Shell",
"bytes": "7168"
}
],
"symlink_target": ""
}
|
from Quartz import *
from PyObjCTools.TestSupport import *
try:
unicode
except NameError:
unicode = str
class TestCAEmitterLayer (TestCase):
@min_os_level('10.6')
def testMethods10_6(self):
self.assertResultHasType(CAEmitterLayer.emitterPosition, CGPoint.__typestr__)
self.assertArgHasType(CAEmitterLayer.setEmitterPosition_, 0, CGPoint.__typestr__)
self.assertResultHasType(CAEmitterLayer.emitterSize, CGSize.__typestr__)
self.assertArgHasType(CAEmitterLayer.setEmitterSize_, 0, CGSize.__typestr__)
self.assertResultIsBOOL(CAEmitterLayer.preservesDepth)
self.assertArgIsBOOL(CAEmitterLayer.setPreservesDepth_, 0)
@min_os_level('10.6')
def testConstants10_6(self):
self.assertIsInstance(kCAEmitterLayerPoint, unicode)
self.assertIsInstance(kCAEmitterLayerLine, unicode)
self.assertIsInstance(kCAEmitterLayerRectangle, unicode)
self.assertIsInstance(kCAEmitterLayerCuboid, unicode)
self.assertIsInstance(kCAEmitterLayerCircle, unicode)
self.assertIsInstance(kCAEmitterLayerSphere, unicode)
self.assertIsInstance(kCAEmitterLayerPoints, unicode)
self.assertIsInstance(kCAEmitterLayerOutline, unicode)
self.assertIsInstance(kCAEmitterLayerSurface, unicode)
self.assertIsInstance(kCAEmitterLayerVolume, unicode)
self.assertIsInstance(kCAEmitterLayerUnordered, unicode)
self.assertIsInstance(kCAEmitterLayerOldestFirst, unicode)
self.assertIsInstance(kCAEmitterLayerOldestLast, unicode)
self.assertIsInstance(kCAEmitterLayerBackToFront, unicode)
self.assertIsInstance(kCAEmitterLayerAdditive, unicode)
if __name__ == "__main__":
main()
|
{
"content_hash": "9e53fe14da51642779e99d7660a2ea90",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 89,
"avg_line_length": 43.3,
"alnum_prop": 0.7482678983833718,
"repo_name": "albertz/music-player",
"id": "89e619434d860df4b15a2fe33d6445eccc79b493",
"size": "1732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mac/pyobjc-framework-Quartz/PyObjCTest/test_caemitterlayer.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "47481"
},
{
"name": "C",
"bytes": "435926"
},
{
"name": "C++",
"bytes": "149133"
},
{
"name": "CSS",
"bytes": "16435"
},
{
"name": "HTML",
"bytes": "914432"
},
{
"name": "JavaScript",
"bytes": "52869"
},
{
"name": "M",
"bytes": "10808"
},
{
"name": "Makefile",
"bytes": "13304"
},
{
"name": "Mathematica",
"bytes": "61418"
},
{
"name": "Objective-C",
"bytes": "2082720"
},
{
"name": "Objective-C++",
"bytes": "62427"
},
{
"name": "PostScript",
"bytes": "2783"
},
{
"name": "Prolog",
"bytes": "217"
},
{
"name": "Python",
"bytes": "7789845"
},
{
"name": "QMake",
"bytes": "9667"
},
{
"name": "Roff",
"bytes": "8329"
},
{
"name": "Shell",
"bytes": "3521"
}
],
"symlink_target": ""
}
|
class EventListener:
def __init__(self, events=[]):
self._events=events
def append(self, event):
self._events.append(event)
def fire(self, e):
for _event in self._events:
_event(e)
class IndexedDB:
def __init__(self):
if not __BRYTHON__.has_indexedDB:
raise NotImplementedError("Your browser doesn't support indexedDB")
return
self._indexedDB=__BRYTHON__.indexedDB()
self._db=None
self._version=None
def _onsuccess(self, event):
self._db=event.target.result
def open(self, name, onsuccess, version=1.0, onerror=None,
onupgradeneeded=None):
self._version=version
_result=self._indexedDB.open(name, version)
_success=EventListener([self._onsuccess, onsuccess])
_result.onsuccess=_success.fire
_result.onupgradeneeded=onupgradeneeded
#if onerror is None:
def onerror(e):
print("onerror: %s:%s" % (e.type, e.target.result))
def onblocked(e):
print("blocked: %s:%s" % (e.type, e.result))
_result.onerror=onerror
_result.onblocked=onblocked
def transaction(self, entities, mode='read'):
return Transaction(self._db.transaction(entities, mode))
class Transaction:
def __init__(self, transaction):
self._transaction=transaction
def objectStore(self, name):
return ObjectStore(self._transaction.objectStore(name))
class ObjectStore:
def __init__(self, objectStore):
self._objectStore=objectStore
self._data=[]
def clear(self, onsuccess=None, onerror=None):
_result=self._objectStore.clear()
if onsuccess is not None:
_result.onsuccess=onsuccess
if onerror is not None:
_result.onerror=onerror
def _helper(self, func, object, onsuccess=None, onerror=None):
_result=func(object)
if onsuccess is not None:
_result.onsuccess=onsuccess
if onerror is not None:
_result.onerror=onerror
def put(self, object, key=None, onsuccess=None, onerror=None):
_r=self._objectStore.put(object, key)
_r.onsuccess=onsuccess
_r.onerror=onerror
def add(self, object, onsuccess=None, onerror=None):
self._helper(self._objectStore.add, object, onsuccess, onerror)
def delete(self, index, onsuccess=None, onerror=None):
self._helper(self._objectStore.delete, index, onsuccess, onerror)
def query(self, *args):
self._data=[]
def onsuccess(event):
cursor=event.target.result
if cursor is not None:
self._data.append(cursor.value)
cursor.continue()
self._objectStore.openCursor(args).onsuccess=onsuccess
def fetchall(self):
yield self._data
def get(self, key, onsuccess=None, onerror=None):
self._helper(self._objectStore.get, key, onsuccess, onerror)
|
{
"content_hash": "2b2d09c728bb260cf638b0e7bd788050",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 76,
"avg_line_length": 28.316831683168317,
"alnum_prop": 0.6447552447552447,
"repo_name": "coursemdetw/reeborg_tw",
"id": "c580e98b3b1fce1991da1a7e1ba2eb93312ec2de",
"size": "2860",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "src/libraries/brython/Lib/browser/indexed_db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "195060"
},
{
"name": "JavaScript",
"bytes": "1308242"
},
{
"name": "Python",
"bytes": "2842856"
},
{
"name": "Shell",
"bytes": "26128"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field numerator_options on 'Formula'
db.create_table(u'survey_formula_numerator_options', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('formula', models.ForeignKey(orm['survey.formula'], null=False)),
('questionoption', models.ForeignKey(orm['survey.questionoption'], null=False))
))
db.create_unique(u'survey_formula_numerator_options', ['formula_id', 'questionoption_id'])
# Adding M2M table for field denominator_options on 'Formula'
db.create_table(u'survey_formula_denominator_options', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('formula', models.ForeignKey(orm['survey.formula'], null=False)),
('questionoption', models.ForeignKey(orm['survey.questionoption'], null=False))
))
db.create_unique(u'survey_formula_denominator_options', ['formula_id', 'questionoption_id'])
def backwards(self, orm):
# Removing M2M table for field numerator_options on 'Formula'
db.delete_table('survey_formula_numerator_options')
# Removing M2M table for field denominator_options on 'Formula'
db.delete_table('survey_formula_denominator_options')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'locations.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'point': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Point']", 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': u"orm['locations.LocationType']"})
},
u'locations.locationtype': {
'Meta': {'object_name': 'LocationType'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'primary_key': 'True'})
},
u'locations.point': {
'Meta': {'object_name': 'Point'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'})
},
'survey.answerrule': {
'Meta': {'object_name': 'AnswerRule'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch_rule'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'condition': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'next_question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_question_rules'", 'null': 'True', 'to': "orm['survey.Question']"}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rule'", 'null': 'True', 'to': "orm['survey.Question']"}),
'validate_with_max_value': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'validate_with_min_value': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'validate_with_option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answer_rule'", 'null': 'True', 'to': "orm['survey.QuestionOption']"}),
'validate_with_question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'validate_with_value': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'})
},
'survey.backend': {
'Meta': {'object_name': 'Backend'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'})
},
'survey.batch': {
'Meta': {'unique_together': "(('survey', 'name'),)", 'object_name': 'Batch'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch'", 'null': 'True', 'to': "orm['survey.Survey']"})
},
'survey.batchlocationstatus': {
'Meta': {'object_name': 'BatchLocationStatus'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'open_locations'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'open_batches'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.batchquestionorder': {
'Meta': {'object_name': 'BatchQuestionOrder'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch_question_order'", 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_batch_order'", 'to': "orm['survey.Question']"})
},
'survey.formula': {
'Meta': {'object_name': 'Formula'},
'count': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'as_count'", 'null': 'True', 'to': "orm['survey.Question']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'as_denominator'", 'null': 'True', 'to': "orm['survey.Question']"}),
'denominator_options': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'denominator_options'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['survey.QuestionOption']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'formula'", 'null': 'True', 'to': "orm['survey.Indicator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'numerator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'as_numerator'", 'null': 'True', 'to': "orm['survey.Question']"}),
'numerator_options': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'numerator_options'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['survey.QuestionOption']"})
},
'survey.groupcondition': {
'Meta': {'unique_together': "(('value', 'attribute', 'condition'),)", 'object_name': 'GroupCondition'},
'attribute': ('django.db.models.fields.CharField', [], {'default': "'AGE'", 'max_length': '20'}),
'condition': ('django.db.models.fields.CharField', [], {'default': "'EQUALS'", 'max_length': '20'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'conditions'", 'symmetrical': 'False', 'to': "orm['survey.HouseholdMemberGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'survey.household': {
'Meta': {'object_name': 'Household'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household_code': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'households'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'random_sample_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'survey_household'", 'null': 'True', 'to': "orm['survey.Survey']"}),
'uid': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'survey.householdbatchcompletion': {
'Meta': {'object_name': 'HouseholdBatchCompletion'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch_completion_households'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch_completion_batches'", 'null': 'True', 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Investigator']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.householdhead': {
'Meta': {'object_name': 'HouseholdHead', '_ormbases': ['survey.HouseholdMember']},
u'householdmember_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['survey.HouseholdMember']", 'unique': 'True', 'primary_key': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'default': "'16'", 'max_length': '100'}),
'resident_since_month': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5'}),
'resident_since_year': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1984'})
},
'survey.householdmember': {
'Meta': {'object_name': 'HouseholdMember'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'household_member'", 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'survey.householdmemberbatchcompletion': {
'Meta': {'object_name': 'HouseholdMemberBatchCompletion'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_households'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_batches'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_member_batches'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_batches'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.householdmembergroup': {
'Meta': {'object_name': 'HouseholdMemberGroup'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'unique': 'True', 'max_length': '5'})
},
'survey.indicator': {
'Meta': {'object_name': 'Indicator'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Batch']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'measure': ('django.db.models.fields.CharField', [], {'default': "'Percentage'", 'max_length': '255'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'module': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'indicator'", 'to': "orm['survey.QuestionModule']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'survey.investigator': {
'Meta': {'object_name': 'Investigator'},
'age': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backend': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Backend']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_blocked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'English'", 'max_length': '100', 'null': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'weights': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'survey.locationautocomplete': {
'Meta': {'object_name': 'LocationAutoComplete'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'survey.locationcode': {
'Meta': {'object_name': 'LocationCode'},
'code': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'code'", 'to': u"orm['locations.Location']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.locationtypedetails': {
'Meta': {'object_name': 'LocationTypeDetails'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'details'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'has_code': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length_of_code': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'location_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'details'", 'to': u"orm['locations.LocationType']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'survey.locationweight': {
'Meta': {'object_name': 'LocationWeight'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'weight'", 'to': u"orm['locations.Location']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'selection_probability': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'location_weight'", 'to': "orm['survey.Survey']"})
},
'survey.multichoiceanswer': {
'Meta': {'object_name': 'MultiChoiceAnswer'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.QuestionOption']", 'null': 'True'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Batch']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'multichoiceanswer'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'multichoiceanswer'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'multichoiceanswer'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'is_old': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.numericalanswer': {
'Meta': {'object_name': 'NumericalAnswer'},
'answer': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '5', 'null': 'True'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Batch']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'numericalanswer'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'numericalanswer'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'numericalanswer'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'is_old': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.question': {
'Meta': {'object_name': 'Question'},
'answer_type': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'batches': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'questions'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_group'", 'null': 'True', 'to': "orm['survey.HouseholdMemberGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'module': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'module_question'", 'null': 'True', 'to': "orm['survey.QuestionModule']"}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['survey.Question']"}),
'subquestion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'survey.questionmodule': {
'Meta': {'object_name': 'QuestionModule'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'survey.questionoption': {
'Meta': {'object_name': 'QuestionOption'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'null': 'True', 'to': "orm['survey.Question']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'survey.randomhouseholdselection': {
'Meta': {'object_name': 'RandomHouseHoldSelection'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'no_of_households': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'selected_households': ('django.db.models.fields.CharField', [], {'max_length': '510'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'random_household'", 'null': 'True', 'to': "orm['survey.Survey']"})
},
'survey.survey': {
'Meta': {'object_name': 'Survey'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'has_sampling': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'sample_size': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10', 'max_length': '2'}),
'type': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'survey.textanswer': {
'Meta': {'object_name': 'TextAnswer'},
'answer': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Batch']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'textanswer'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'textanswer'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'textanswer'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'is_old': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.uploaderrorlog': {
'Meta': {'object_name': 'UploadErrorLog'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'row_number': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'survey.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'userprofile'", 'unique': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['survey']
|
{
"content_hash": "16ca5f4367b913fa268e3fd45c5d78f7",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 229,
"avg_line_length": 90.31343283582089,
"alnum_prop": 0.5716410510659395,
"repo_name": "unicefuganda/mics",
"id": "76136f69d0dff758d80ac8912ed4f32923cc0f85",
"size": "36330",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "survey/migrations/0114_auto.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "37725"
},
{
"name": "JavaScript",
"bytes": "390607"
},
{
"name": "Python",
"bytes": "5209696"
},
{
"name": "Shell",
"bytes": "1277"
}
],
"symlink_target": ""
}
|
import sys
import os
import os.path
import subprocess
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinx.ext.autodoc', 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'lantern'
copyright = '2018, Tim Paine'
author = 'Tim Paine'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'v0.1.6'
# The full version, including alpha/beta/rc tags.
release = 'v0.1.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'lanterndoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'lantern.tex', 'lantern Documentation',
'Tim Paine', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'lantern', 'lantern Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'lantern', 'lantern Documentation',
author, 'lantern', 'One line description of project.',
'Miscellaneous'),
]
def run_apidoc(_):
out_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'api'))
proj_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'lantern'))
cmd_path = 'sphinx-apidoc'
if hasattr(sys, 'real_prefix'): # Check to see if we are in a virtualenv
# If we are, assemble the path manually
cmd_path = os.path.abspath(os.path.join(sys.prefix, 'bin', 'sphinx-apidoc'))
subprocess.check_call([cmd_path,
'-E',
'-M',
'-o',
out_dir,
proj_dir,
'--force'])
def setup(app):
app.connect('builder-inited', run_apidoc)
|
{
"content_hash": "fb8f3cf7548be8177e198c5d24f53793",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 104,
"avg_line_length": 31.563953488372093,
"alnum_prop": 0.6422913980475226,
"repo_name": "timkpaine/lantern",
"id": "0e574b3461361ae4d2c6d52484caa58fc9b397ae",
"size": "6112",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2428"
},
{
"name": "JavaScript",
"bytes": "1323"
},
{
"name": "Jupyter Notebook",
"bytes": "22181"
},
{
"name": "Makefile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "183613"
},
{
"name": "Shell",
"bytes": "131"
}
],
"symlink_target": ""
}
|
import json
import logging
from threading import RLock, Thread
from typing import Any, Dict, List, Optional, Tuple, Union
from urllib.parse import unquote_plus, urlparse
from websocket import ABNF, STATUS_NORMAL, WebSocketApp, enableTrace
from streamlink.logger import TRACE, root as rootlogger
from streamlink.session import Streamlink
log = logging.getLogger(__name__)
class WebsocketClient(Thread):
_id: int = 0
ws: WebSocketApp
def __init__(
self,
session: Streamlink,
url: str,
subprotocols: Optional[List[str]] = None,
header: Optional[Union[List, Dict]] = None,
cookie: Optional[str] = None,
sockopt: Optional[Tuple] = None,
sslopt: Optional[Dict] = None,
host: Optional[str] = None,
origin: Optional[str] = None,
suppress_origin: bool = False,
ping_interval: Union[int, float] = 0,
ping_timeout: Optional[Union[int, float]] = None,
ping_payload: str = ""
):
if rootlogger.level <= TRACE:
enableTrace(True, log)
if not header:
header = []
if not any(True for h in header if h.startswith("User-Agent: ")):
header.append(f"User-Agent: {session.http.headers['User-Agent']}")
proxy_options = {}
http_proxy: Optional[str] = session.get_option("http-proxy")
if http_proxy:
p = urlparse(http_proxy)
proxy_options["proxy_type"] = p.scheme
proxy_options["http_proxy_host"] = p.hostname
if p.port: # pragma: no branch
proxy_options["http_proxy_port"] = p.port
if p.username: # pragma: no branch
proxy_options["http_proxy_auth"] = unquote_plus(p.username), unquote_plus(p.password or "")
self._reconnect = False
self._reconnect_lock = RLock()
self.session = session
self._ws_init(url, subprotocols, header, cookie)
self._ws_rundata = dict(
sockopt=sockopt,
sslopt=sslopt,
host=host,
origin=origin,
suppress_origin=suppress_origin,
ping_interval=ping_interval,
ping_timeout=ping_timeout,
ping_payload=ping_payload,
**proxy_options
)
self._id += 1
super().__init__(
name=f"Thread-{self.__class__.__name__}-{self._id}",
daemon=True
)
def _ws_init(self, url, subprotocols, header, cookie):
self.ws = WebSocketApp(
url=url,
subprotocols=subprotocols,
header=header,
cookie=cookie,
on_open=self.on_open,
on_error=self.on_error,
on_close=self.on_close,
on_ping=self.on_ping,
on_pong=self.on_pong,
on_message=self.on_message,
on_cont_message=self.on_cont_message,
on_data=self.on_data
)
def run(self) -> None:
while True:
log.debug(f"Connecting to: {self.ws.url}")
self.ws.run_forever(**self._ws_rundata)
# check if closed via a reconnect() call
with self._reconnect_lock:
if not self._reconnect:
return
self._reconnect = False
# ----
def reconnect(
self,
url: str = None,
subprotocols: Optional[List[str]] = None,
header: Optional[Union[List, Dict]] = None,
cookie: Optional[str] = None,
closeopts: Optional[Dict] = None
) -> None:
with self._reconnect_lock:
# ws connection is not active (anymore)
if not self.ws.keep_running:
return
log.debug("Reconnecting...")
self._reconnect = True
self.ws.close(**(closeopts or {}))
self._ws_init(
url=self.ws.url if url is None else url,
subprotocols=self.ws.subprotocols if subprotocols is None else subprotocols,
header=self.ws.header if header is None else header,
cookie=self.ws.cookie if cookie is None else cookie
)
def close(self, status: int = STATUS_NORMAL, reason: Union[str, bytes] = "", timeout: int = 3) -> None:
self.ws.close(status=status, reason=bytes(reason, encoding="utf-8"), timeout=timeout)
if self.is_alive(): # pragma: no branch
self.join()
def send(self, data: Union[str, bytes], opcode: int = ABNF.OPCODE_TEXT) -> None:
return self.ws.send(data, opcode)
def send_json(self, data: Any) -> None:
return self.send(json.dumps(data, indent=None, separators=(",", ":")))
# ----
# noinspection PyMethodMayBeStatic
def on_open(self, wsapp: WebSocketApp) -> None:
log.debug(f"Connected: {wsapp.url}") # pragma: no cover
# noinspection PyMethodMayBeStatic
# noinspection PyUnusedLocal
def on_error(self, wsapp: WebSocketApp, error: Exception) -> None:
log.error(error) # pragma: no cover
# noinspection PyMethodMayBeStatic
# noinspection PyUnusedLocal
def on_close(self, wsapp: WebSocketApp, status: int, message: str) -> None:
log.debug(f"Closed: {wsapp.url}") # pragma: no cover
def on_ping(self, wsapp: WebSocketApp, data: str) -> None:
pass # pragma: no cover
def on_pong(self, wsapp: WebSocketApp, data: str) -> None:
pass # pragma: no cover
def on_message(self, wsapp: WebSocketApp, data: str) -> None:
pass # pragma: no cover
def on_cont_message(self, wsapp: WebSocketApp, data: str, cont: Any) -> None:
pass # pragma: no cover
def on_data(self, wsapp: WebSocketApp, data: str, data_type: int, cont: Any) -> None:
pass # pragma: no cover
|
{
"content_hash": "3bbd59f097afe847a00d79a5b1890426",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 107,
"avg_line_length": 34.44378698224852,
"alnum_prop": 0.5753306991925786,
"repo_name": "gravyboat/streamlink",
"id": "515fc6e46b1c1c3323d5eeb4b8137cdd76791f76",
"size": "5821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/streamlink/plugin/api/websocket.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1392475"
},
{
"name": "Shell",
"bytes": "6280"
}
],
"symlink_target": ""
}
|
from os import listdir, remove
from os.path import exists, join
from .output import generate_output, generate_output_name
def process_queue(source_directory, destination_directory, queue_directory, encoding_profile):
queue = listdir(queue_directory)
for queued_name in queue:
source_file = join(source_directory, queued_name)
if exists(source_file):
destination_file = generate_output_name(source_file, destination_directory)
generate_output(source_file, destination_file, encoding_profile)
queued_file = join(queue_directory, queued_name)
remove(queued_file)
else:
print("Warning: {0} does not exist".format(source_file))
|
{
"content_hash": "131b3114cc07e865c4367b876b91ac17",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 94,
"avg_line_length": 43.23529411764706,
"alnum_prop": 0.6761904761904762,
"repo_name": "kbarnes3/MKVtoMP4",
"id": "3bf40963e53ed4145778a9d40ba42777be0fd0bb",
"size": "735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/MKVtoMP4/queue.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PowerShell",
"bytes": "3938"
},
{
"name": "Python",
"bytes": "21289"
}
],
"symlink_target": ""
}
|
import unittest
from Change import *
class ChangeTest(unittest.TestCase):
def setUp(self):
self.change = Change()
def test_zero_cents(self):
self.assertEqual(self.change.makeChange(0), {})
def test_one_cent(self):
self.assertEqual(self.change.makeChange(1), {'p':1})
def test_four_cents(self):
self.assertEqual(self.change.makeChange(4), {'p':4})
def test_five_cents(self):
self.assertEqual(self.change.makeChange(5), {'n':1})
def test_eight_cents(self):
self.assertEqual(self.change.makeChange(8), {'n':1, 'p':3})
def test_ten_cents(self):
self.assertEqual(self.change.makeChange(10), {'d':1})
def test_twenty_five_cents(self):
self.assertEqual(self.change.makeChange(25), {'q':1})
def test_forty_eight_cents(self):
self.assertEqual(self.change.makeChange(48), {'q':1, 'd':2, 'p':3})
def test_fifty_cents(self):
self.assertEqual(self.change.makeChange(50), {'h':1})
def test_seventy_four_cents(self):
self.assertEqual(self.change.makeChange(74), {'h':1, 'd':2, 'p':4})
def test_ninety_one_cents(self):
self.assertEqual(self.change.makeChange(91), {'h':1, 'q':1, 'd':1, 'n':1, 'p':1})
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "d70abd984dc92555f860fdb0f3b17634",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 85,
"avg_line_length": 28.3953488372093,
"alnum_prop": 0.6502866502866503,
"repo_name": "Bjornkjohnson/makeChangePython",
"id": "72e1a02a5b8e77f23a9b756be2951466409cf819",
"size": "1221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ChangeTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1712"
}
],
"symlink_target": ""
}
|
import frappe, subprocess, os
from six.moves import input
def setup_database(force, source_sql=None, verbose=False):
root_conn = get_root_connection()
root_conn.commit()
root_conn.sql("DROP DATABASE IF EXISTS `{0}`".format(frappe.conf.db_name))
root_conn.sql("DROP USER IF EXISTS {0}".format(frappe.conf.db_name))
root_conn.sql("CREATE DATABASE `{0}`".format(frappe.conf.db_name))
root_conn.sql("CREATE user {0} password '{1}'".format(frappe.conf.db_name,
frappe.conf.db_password))
root_conn.sql("GRANT ALL PRIVILEGES ON DATABASE `{0}` TO {0}".format(frappe.conf.db_name))
# we can't pass psql password in arguments in postgresql as mysql. So
# set password connection parameter in environment variable
subprocess_env = os.environ.copy()
subprocess_env['PGPASSWORD'] = str(frappe.conf.db_password)
# bootstrap db
if not source_sql:
source_sql = os.path.join(os.path.dirname(__file__), 'framework_postgres.sql')
subprocess.check_output([
'psql', frappe.conf.db_name, '-h', frappe.conf.db_host or 'localhost', '-U',
frappe.conf.db_name, '-f', source_sql
], env=subprocess_env)
frappe.connect()
def setup_help_database(help_db_name):
root_conn = get_root_connection()
root_conn.sql("DROP DATABASE IF EXISTS `{0}`".format(help_db_name))
root_conn.sql("DROP USER IF EXISTS {0}".format(help_db_name))
root_conn.sql("CREATE DATABASE `{0}`".format(help_db_name))
root_conn.sql("CREATE user {0} password '{1}'".format(help_db_name, help_db_name))
root_conn.sql("GRANT ALL PRIVILEGES ON DATABASE `{0}` TO {0}".format(help_db_name))
def get_root_connection(root_login=None, root_password=None):
import getpass
if not frappe.local.flags.root_connection:
if not root_login:
root_login = frappe.conf.get("root_login") or None
if not root_login:
root_login = input("Enter postgres super user: ")
if not root_password:
root_password = frappe.conf.get("root_password") or None
if not root_password:
root_password = getpass.getpass("Postgres super user password: ")
frappe.local.flags.root_connection = frappe.database.get_db(user=root_login, password=root_password)
return frappe.local.flags.root_connection
|
{
"content_hash": "aaf6caae6c7a4baabe3d5c2cf83eae49",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 102,
"avg_line_length": 39.94444444444444,
"alnum_prop": 0.7204450625869263,
"repo_name": "vjFaLk/frappe",
"id": "1dc1ea4c97b1c4e90550beda73af1922e4244502",
"size": "2157",
"binary": false,
"copies": "1",
"ref": "refs/heads/parsimony-production",
"path": "frappe/database/postgres/setup_db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "290337"
},
{
"name": "HTML",
"bytes": "179507"
},
{
"name": "JavaScript",
"bytes": "2179734"
},
{
"name": "Less",
"bytes": "146135"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "2774237"
},
{
"name": "SCSS",
"bytes": "15721"
},
{
"name": "Shell",
"bytes": "3875"
},
{
"name": "Vue",
"bytes": "95109"
}
],
"symlink_target": ""
}
|
''' Generate a changelog for JupyterLab from the GitHub releases '''
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import re
import requests
import dateutil.parser
# Get the list of releases.
r = requests.get('https://api.github.com/repos/jupyterlab/jupyterlab/releases')
if r.status_code == 200:
releases = r.json()
with open('CHANGELOG.md', 'w') as f:
f.write('# JupyterLab Changelog\n\n')
for release in releases:
name = release['name']
tag_name = release['tag_name']
tag_url = release['html_url']
tag_date = dateutil.parser.parse(release['published_at'])
notes = release['body'].replace('\r\n', '\n')
notes = re.sub(r'#([0-9]+)',
r'[#\1](https://github.com/jupyterlab/jupyterlab/issues/\1)',
notes)
title = f'{name} ({tag_name})' if name != tag_name else name
f.write(f'## [{title}]({tag_url})\n')
f.write(f'#### {tag_date.strftime("%b %d, %Y")}\n')
f.write(notes)
f.write('\n\n')
|
{
"content_hash": "01a0d223ba740c829b50519586640420",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 88,
"avg_line_length": 37.25806451612903,
"alnum_prop": 0.5567099567099567,
"repo_name": "jupyter/jupyterlab",
"id": "1259e289c4436ba89f3e2040d6203da96b2d8c59",
"size": "1155",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scripts/generate_changelog.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7475"
},
{
"name": "CSS",
"bytes": "94068"
},
{
"name": "HTML",
"bytes": "1493"
},
{
"name": "JavaScript",
"bytes": "9240"
},
{
"name": "Makefile",
"bytes": "7654"
},
{
"name": "Python",
"bytes": "74649"
},
{
"name": "Shell",
"bytes": "2344"
},
{
"name": "TypeScript",
"bytes": "1090669"
}
],
"symlink_target": ""
}
|
import inspect
import pydoc
import re
from django.utils.datastructures import SortedDict
def get_arguments(method):
args, varargs, keywords, defaults = inspect.getargspec(method)
if args[0] in ('self', 'cls'):
args = args[1:]
else:
args = list(args)
kwargs = {}
defaults = list(defaults or [])
while defaults:
kwargs[args.pop()] = defaults.pop(0)
return tuple(args), kwargs
def get_method_info(method):
info = {}
help_text = pydoc.getdoc(method)
args = SortedDict()
desc_re = re.compile(':(?P<desc>param|parameter|arg|argument|key|keyword)\s+(?P<name>.+):\s+(?P<value>.+)')
type_re = re.compile(':(?P<type>type)\s+(?P<name>.+):\s+(?P<value>.+)')
for expression in (desc_re, type_re):
for match in expression.finditer(help_text):
data = match.groupdict()
if 'desc' in data:
key = 'desc'
else:
key = 'type'
name = data['name']
value = data['value']
args.setdefault(name, {})
args[name][key] = value
help_text = expression.sub('', help_text)
if args:
info['args'] = args
desc_re = re.compile(':(?P<desc>returns?):\s+(?P<value>.+)')
type_re = re.compile(':(?P<type>rtype):\s+(?P<value>.+)')
for expression in (desc_re, type_re):
match = expression.search(help_text)
if match:
data = match.groupdict()
if 'desc' in data:
key = 'desc'
else:
key = 'type'
value = data['value']
info.setdefault('returns', {})
info['returns'][key] = value
help_text = expression.sub('', help_text)
info['help_text'] = help_text.strip()
info['signature'] = get_signature(method)
return info
def get_signature(method):
args, varargs, varkw, defaults = inspect.getargspec(method)
if args[0] in ('self', 'cls'):
args = args[1:]
return inspect.formatargspec(args, varargs, varkw, defaults).strip('()')
|
{
"content_hash": "26e368adf233c40ca94829e73eac1915",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 111,
"avg_line_length": 28.47945205479452,
"alnum_prop": 0.5473785473785474,
"repo_name": "apn-online/django-xmlrpc-wp",
"id": "d26c5794c4dbd47930c9958c6be2bae90b662a6d",
"size": "2079",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "xmlrpc_wp/utils.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "448"
},
{
"name": "JavaScript",
"bytes": "10084"
},
{
"name": "Python",
"bytes": "39511"
}
],
"symlink_target": ""
}
|
import asyncio
import json
import signal
import sys
import aiohttp
loop = asyncio.get_event_loop()
client = aiohttp.ClientSession(loop=loop)
async def get_json(client, url):
async with client.get(url) as response:
assert response.status == 200
return await response.read()
async def get_reddit_top(subreddit, client):
data1 = await get_json(client, 'https://www.reddit.com/r/' + subreddit + '/top.json?sort=top&t=day&limit=5')
j = json.loads(data1.decode('utf-8'))
for i in j['data']['children']:
score = i['data']['score']
title = i['data']['title']
link = i['data']['url']
print(str(score) + ': ' + title + ' (' + link + ')')
print('DONE:', subreddit + '\n')
def signal_handler(signal, frame):
loop.stop()
client.close()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
asyncio.ensure_future(get_reddit_top('python', client))
asyncio.ensure_future(get_reddit_top('programming', client))
asyncio.ensure_future(get_reddit_top('compsci', client))
loop.run_forever()
|
{
"content_hash": "e0b7b0b9f2666b03719e15d43531e2e6",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 112,
"avg_line_length": 25.38095238095238,
"alnum_prop": 0.648217636022514,
"repo_name": "sparky952/Dashie",
"id": "6af24956ba3a24dac2029d48c1d593099e869364",
"size": "1066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dashie/reddit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "259492"
}
],
"symlink_target": ""
}
|
from tempest.api.volume import base
from tempest import config
from tempest.lib import decorators
CONF = config.CONF
class VolumesSnapshotListTestJSON(base.BaseVolumeTest):
@classmethod
def skip_checks(cls):
super(VolumesSnapshotListTestJSON, cls).skip_checks()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
@classmethod
def resource_setup(cls):
super(VolumesSnapshotListTestJSON, cls).resource_setup()
volume_origin = cls.create_volume()
# Create snapshots with params
for _ in range(3):
snapshot = cls.create_snapshot(volume_origin['id'])
cls.snapshot = snapshot
def _list_by_param_values_and_assert(self, with_detail=False, **params):
"""list or list_details with given params and validates result."""
fetched_snap_list = self.snapshots_client.list_snapshots(
detail=with_detail, **params)['snapshots']
# Validating params of fetched snapshots
for snap in fetched_snap_list:
for key in params:
msg = "Failed to list snapshots %s by %s" % \
('details' if with_detail else '', key)
self.assertEqual(params[key], snap[key], msg)
def _list_snapshots_by_param_limit(self, limit, expected_elements):
"""list snapshots by limit param"""
# Get snapshots list using limit parameter
fetched_snap_list = self.snapshots_client.list_snapshots(
limit=limit)['snapshots']
# Validating filtered snapshots length equals to expected_elements
self.assertEqual(expected_elements, len(fetched_snap_list))
@decorators.idempotent_id('59f41f43-aebf-48a9-ab5d-d76340fab32b')
def test_snapshots_list_with_params(self):
"""list snapshots with params."""
# Verify list snapshots by display_name filter
params = {'name': self.snapshot['name']}
self._list_by_param_values_and_assert(**params)
# Verify list snapshots by status filter
params = {'status': 'available'}
self._list_by_param_values_and_assert(**params)
# Verify list snapshots by status and display name filter
params = {'status': 'available',
'name': self.snapshot['name']}
self._list_by_param_values_and_assert(**params)
@decorators.idempotent_id('220a1022-1fcd-4a74-a7bd-6b859156cda2')
def test_snapshots_list_details_with_params(self):
"""list snapshot details with params."""
# Verify list snapshot details by display_name filter
params = {'name': self.snapshot['name']}
self._list_by_param_values_and_assert(with_detail=True, **params)
# Verify list snapshot details by status filter
params = {'status': 'available'}
self._list_by_param_values_and_assert(with_detail=True, **params)
# Verify list snapshot details by status and display name filter
params = {'status': 'available',
'name': self.snapshot['name']}
self._list_by_param_values_and_assert(with_detail=True, **params)
@decorators.idempotent_id('db4d8e0a-7a2e-41cc-a712-961f6844e896')
def test_snapshot_list_param_limit(self):
# List returns limited elements
self._list_snapshots_by_param_limit(limit=1, expected_elements=1)
@decorators.idempotent_id('a1427f61-420e-48a5-b6e3-0b394fa95400')
def test_snapshot_list_param_limit_equals_infinite(self):
# List returns all elements when request limit exceeded
# snapshots number
snap_list = self.snapshots_client.list_snapshots()['snapshots']
self._list_snapshots_by_param_limit(limit=100000,
expected_elements=len(snap_list))
@decorators.idempotent_id('e3b44b7f-ae87-45b5-8a8c-66110eb24d0a')
def test_snapshot_list_param_limit_equals_zero(self):
# List returns zero elements
self._list_snapshots_by_param_limit(limit=0, expected_elements=0)
def _list_snapshots_param_sort(self, sort_key, sort_dir):
"""list snapshots by sort param"""
snap_list = self.snapshots_client.list_snapshots(
sort_key=sort_key, sort_dir=sort_dir)['snapshots']
self.assertNotEmpty(snap_list)
if sort_key == 'display_name':
sort_key = 'name'
# Note: On Cinder API, 'display_name' works as a sort key
# on a request, a volume name appears as 'name' on the response.
# So Tempest needs to change the key name here for this inconsistent
# API behavior.
sorted_list = [snapshot[sort_key] for snapshot in snap_list]
msg = 'The list of snapshots was not sorted correctly.'
self.assertEqual(sorted(sorted_list, reverse=(sort_dir == 'desc')),
sorted_list, msg)
@decorators.idempotent_id('c5513ada-64c1-4d28-83b9-af3307ec1388')
def test_snapshot_list_param_sort_id_asc(self):
self._list_snapshots_param_sort(sort_key='id', sort_dir='asc')
@decorators.idempotent_id('8a7fe058-0b41-402a-8afd-2dbc5a4a718b')
def test_snapshot_list_param_sort_id_desc(self):
self._list_snapshots_param_sort(sort_key='id', sort_dir='desc')
@decorators.idempotent_id('4052c3a0-2415-440a-a8cc-305a875331b0')
def test_snapshot_list_param_sort_created_at_asc(self):
self._list_snapshots_param_sort(sort_key='created_at', sort_dir='asc')
@decorators.idempotent_id('dcbbe24a-f3c0-4ec8-9274-55d48db8d1cf')
def test_snapshot_list_param_sort_created_at_desc(self):
self._list_snapshots_param_sort(sort_key='created_at', sort_dir='desc')
@decorators.idempotent_id('d58b5fed-0c37-42d3-8c5d-39014ac13c00')
def test_snapshot_list_param_sort_name_asc(self):
self._list_snapshots_param_sort(sort_key='display_name',
sort_dir='asc')
@decorators.idempotent_id('96ba6f4d-1f18-47e1-b4bc-76edc6c21250')
def test_snapshot_list_param_sort_name_desc(self):
self._list_snapshots_param_sort(sort_key='display_name',
sort_dir='desc')
@decorators.idempotent_id('05489dde-44bc-4961-a1f5-3ce7ee7824f7')
def test_snapshot_list_param_marker(self):
# The list of snapshots should end before the provided marker
snap_list = self.snapshots_client.list_snapshots()['snapshots']
# list_snapshots will take the reverse order as they are created.
snapshot_id_list = [snap['id'] for snap in snap_list][::-1]
params = {'marker': snapshot_id_list[1]}
snap_list = self.snapshots_client.list_snapshots(**params)['snapshots']
fetched_list_id = [snap['id'] for snap in snap_list]
# Verify the list of snapshots ends before the provided
# marker(second snapshot), therefore only the first snapshot
# should displayed.
self.assertEqual(snapshot_id_list[:1], fetched_list_id)
@decorators.idempotent_id('ca96d551-17c6-4e11-b0e8-52d3bb8a63c7')
def test_snapshot_list_param_offset(self):
params = {'offset': 2, 'limit': 3}
snap_list = self.snapshots_client.list_snapshots(**params)['snapshots']
# Verify the list of snapshots skip offset=2 from the first element
# (total 3 elements), therefore only one snapshot should display
self.assertEqual(1, len(snap_list))
|
{
"content_hash": "be568d7495b2cce120403b3135a39f92",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 79,
"avg_line_length": 47.22784810126582,
"alnum_prop": 0.6554543017957652,
"repo_name": "cisco-openstack/tempest",
"id": "f4f039c3cc188fa699ff82f728894f6fe9f4898c",
"size": "8035",
"binary": false,
"copies": "1",
"ref": "refs/heads/proposed",
"path": "tempest/api/volume/test_volumes_snapshots_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4431271"
},
{
"name": "Shell",
"bytes": "7435"
}
],
"symlink_target": ""
}
|
import random
from django.utils.encoding import smart_unicode
import jinja2
from jingo import register, env
from tower import ugettext as _
import amo
@register.function
def emaillink(email, title=None, klass=None):
if not email:
return ""
fallback = email[::-1] # reverse
# inject junk somewhere
i = random.randint(0, len(email) - 1)
fallback = u"%s%s%s" % (jinja2.escape(fallback[:i]),
u'<span class="i">null</span>',
jinja2.escape(fallback[i:]))
# replace @ and .
fallback = fallback.replace('@', '@').replace('.', '.')
if title:
title = jinja2.escape(title)
else:
title = '<span class="emaillink">%s</span>' % fallback
node = (u'<a%s href="#">%s</a><span class="emaillink js-hidden">%s</span>'
% ((' class="%s"' % klass) if klass else '', title, fallback))
return jinja2.Markup(node)
@register.filter
def user_link(user):
if not user:
return ''
return jinja2.Markup(_user_link(user))
@register.function
def users_list(users, size=None, max_text_length=None):
if not users:
return ''
tail = []
if size and size < len(users):
users = users[:size]
tail = [_('others', 'user_list_others')]
if max_text_length:
user_list = [_user_link(user, max_text_length) for user in users]
else:
user_list = map(_user_link, users)
return jinja2.Markup(', '.join(user_list + tail))
@register.inclusion_tag('users/helpers/addon_users_list.html')
@jinja2.contextfunction
def addon_users_list(context, addon):
ctx = dict(context.items())
ctx.update(addon=addon, amo=amo)
return ctx
def _user_link(user, max_text_length=None):
if isinstance(user, basestring):
return user
username = user.name
if max_text_length and len(user.name) > max_text_length:
username = user.name[:max_text_length].strip() + '...'
return u'<a href="%s" title="%s">%s</a>' % (
user.get_url_path(), jinja2.escape(user.name),
jinja2.escape(smart_unicode(username)))
@register.filter
@jinja2.contextfilter
def user_vcard(context, user, table_class='person-info', is_profile=False):
c = dict(context.items())
c.update({
'profile': user,
'table_class': table_class,
'is_profile': is_profile
})
t = env.get_template('users/vcard.html').render(c)
return jinja2.Markup(t)
@register.inclusion_tag('users/report_abuse.html')
@jinja2.contextfunction
def user_report_abuse(context, hide, profile):
new = dict(context.items())
new.update({'hide': hide, 'profile': profile,
'abuse_form': context['abuse_form']})
return new
@register.filter
def contribution_type(type):
return amo.CONTRIB_TYPES[type]
@register.function
def user_data(amo_user):
anonymous, currency, pre_auth, email = True, 'USD', False, ''
if hasattr(amo_user, 'is_anonymous'):
anonymous = amo_user.is_anonymous()
if not anonymous:
email = amo_user.email
return {'anonymous': anonymous, 'currency': currency, 'email': email}
|
{
"content_hash": "ed7250c14380ed123f94d8f4522ac74e",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 78,
"avg_line_length": 27.00854700854701,
"alnum_prop": 0.6186708860759493,
"repo_name": "anaran/olympia",
"id": "25949e56f2e4653b469054947715edb16cde31e0",
"size": "3160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/users/helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4145"
},
{
"name": "CSS",
"bytes": "655390"
},
{
"name": "JavaScript",
"bytes": "1274629"
},
{
"name": "Puppet",
"bytes": "13790"
},
{
"name": "Python",
"bytes": "3648656"
},
{
"name": "Shell",
"bytes": "18646"
}
],
"symlink_target": ""
}
|
from django import forms
from django.http import HttpResponse
from django.template import Context, Template
from django.views.generic.edit import UpdateView
from .models import Article
class ArticleForm(forms.ModelForm):
content = forms.CharField(strip=False, widget=forms.Textarea)
class Meta:
model = Article
fields = "__all__"
class ArticleFormView(UpdateView):
model = Article
success_url = "/"
form_class = ArticleForm
def form_view(request):
class Form(forms.Form):
number = forms.FloatField()
template = Template("<html>{{ form }}</html>")
context = Context({"form": Form()})
return HttpResponse(template.render(context))
|
{
"content_hash": "ee3862efe27cce0bc7895ad5f6ada630",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 65,
"avg_line_length": 24.06896551724138,
"alnum_prop": 0.6948424068767909,
"repo_name": "takis/django",
"id": "b03472824d1a7bc2d8b03b219e44ce9461d1359b",
"size": "698",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "tests/forms_tests/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "91756"
},
{
"name": "HTML",
"bytes": "238967"
},
{
"name": "JavaScript",
"bytes": "157514"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "16141182"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "392"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.