text stringlengths 4 1.02M | meta dict |
|---|---|
import logging
import server.util.tags as tag_utl
from server.cache import cache
from server.auth import user_admin_mediacloud_client
import server.views.sources.apicache as apicache
from server.views.stories import QUERY_LAST_MONTH, QUERY_ENGLISH_LANGUAGE
logger = logging.getLogger(__name__)
@cache.cache_on_arguments()
def cached_geotag_count(query):
user_mc = user_admin_mediacloud_client()
res = user_mc.storyTagCount(query, [QUERY_LAST_MONTH, QUERY_ENGLISH_LANGUAGE], tag_sets_id=tag_utl.GEO_TAG_SET)
full_count = apicache.timeperiod_story_count(query, QUERY_LAST_MONTH)['count']
for r in res:
r['pct'] = (float(r['count'])/float(full_count))
r['value'] = (float(r['count']))
return res
| {
"content_hash": "4fb5c1cca7a0652877b557da96fc4624",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 115,
"avg_line_length": 36.65,
"alnum_prop": 0.7216916780354706,
"repo_name": "mitmedialab/MediaCloud-Web-Tools",
"id": "6a67085b1372805e56d3dd6a6541cde42fbd2fdd",
"size": "733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/views/sources/geocount.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "88991"
},
{
"name": "HTML",
"bytes": "1288"
},
{
"name": "JavaScript",
"bytes": "684272181"
},
{
"name": "Python",
"bytes": "700197"
},
{
"name": "Shell",
"bytes": "268"
}
],
"symlink_target": ""
} |
"""
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`sphx_glr_auto_examples_model_selection_plot_roc_crossval.py`.
"""
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm
from sklearn.metrics import roc_curve, auc
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
def ROC_plot(ddir, stp, num_feat, nhlayer, v_size):
# ddir = '/media/zero/41FF48D81730BD9B/Final_Thesies/results/SVM_results/deep21_v_200__0__15_1_min/'
trainset = np.loadtxt(ddir+'train_vector_dataset.csv', delimiter=',')
X_train = trainset[:, 1:]
ty_train = trainset[:,0]
y_train = np.zeros((trainset.shape[0],2))
for i in range(trainset.shape[0]):
if ty_train[i] == 1:
y_train[i,0] = 1
else:
y_train[i,1] = 1
testset = np.loadtxt(ddir+'test_vector_dataset.csv', delimiter=',')
X_test = testset[:, 1:]
ty_test = testset[:,0]
y_test = np.zeros((testset.shape[0],2))
for i in range(testset.shape[0]):
if ty_test[i] == 1:
y_test[i,0] = 1
else:
y_test[i,1] = 1
n_classes = 2
classifier = OneVsRestClassifier(svm.LinearSVC(penalty='l2',tol=0.001, C=1.0, loss='hinge',
fit_intercept=True, intercept_scaling=1.0, dual=True, verbose=0, random_state=None, max_iter=100000))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
lw = 2
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
plt.plot(fpr[0], tpr[0], color='aqua', lw=lw,
label='ROC curve of paraphrase {0} (area = {1:0.2f})'
''.format(0, roc_auc[0]))
plt.plot(fpr[1], tpr[1], color='darkorange', lw=lw,
label='ROC curve of nonparaphrase {0} (area = {1:0.2f})'
''.format(1, roc_auc[1]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC for (hidden layer='+str(nhlayer)+'; stopword='+str(stp)+'; number_feature='+str(num_feat)+'; vect_size='+str(v_size)+')')
plt.legend(loc="lower right")
plt.savefig(ddir+'ROC.png',dpi=1000)
plt.close()
if __name__ == '__main__':
ddir = '/media/zero/41FF48D81730BD9B/Final_Thesies/results/SVM_results/deep21_v_200__0__15_1_min/'
ROC_plot(ddir)
| {
"content_hash": "4da432e2276cc591500173a9abb1085b",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 140,
"avg_line_length": 36,
"alnum_prop": 0.6255144032921811,
"repo_name": "deepakrana47/DT-RAE",
"id": "0707c79ab9da39e7f81d4f0b8b7b82c42b0cb9cd",
"size": "4860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "140951"
}
],
"symlink_target": ""
} |
from qingcloud.cli.misc.utils import explode_array
from qingcloud.cli.iaas_client.actions.base import BaseAction
class JoinInstanceGroupAction(BaseAction):
action = 'JoinInstanceGroup'
command = 'join-instance-group'
usage = '%(prog)s -i "instance_id,..." -g <group_id> [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument('-i', '--instances', dest='instances',
action='store', type=str, default=None,
help='the comma separated IDs of instances you want to join into group.')
parser.add_argument('-g', '--instance_group', dest='instance_group',
action='store', type=str,
help='the group id of destination group.')
return parser
@classmethod
def build_directive(cls, options):
instances = explode_array(options.instances)
instance_group = options.instance_group
required_params = {
'instances': instances,
'instance_group': instance_group
}
for param in required_params:
if required_params[param] is None or required_params[param] == '':
print('error: [%s] should be specified' % param)
return None
return {
'instances': instances,
'instance_group': instance_group
}
| {
"content_hash": "05c39a223bf67a9c73e0bb63051b903a",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 101,
"avg_line_length": 33.88095238095238,
"alnum_prop": 0.5790583274771609,
"repo_name": "yunify/qingcloud-cli",
"id": "c31b917488e3bba484496c51347b579e5c1db6be",
"size": "2256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qingcloud/cli/iaas_client/actions/instance_groups/join_instance_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "852"
},
{
"name": "Python",
"bytes": "607642"
}
],
"symlink_target": ""
} |
import unittest
import json
import msgpack
from cellardoor.views import MinimalView
class TestMinimalView(unittest.TestCase):
def test_list_response(self):
"""
Should return a simple list for list get methods
"""
view = MinimalView()
objs = [{'foo':123}, {'foo':456}]
content_type, result = view.get_list_response('application/json', objs)
self.assertEquals(content_type, 'application/json')
self.assertEquals(result, json.dumps(objs))
content_type, result = view.get_list_response('application/x-msgpack', objs)
self.assertEquals(content_type, 'application/x-msgpack')
self.assertEquals(result, msgpack.packb(objs))
def test_individual_response(self):
"""
Should return a single object for individual get methods
"""
view = MinimalView()
obj = {'foo':123}
content_type, result = view.get_individual_response('application/json', obj)
self.assertEquals(content_type, 'application/json')
self.assertEquals(result, json.dumps(obj))
content_type, result = view.get_individual_response('application/x-msgpack', obj)
self.assertEquals(content_type, 'application/x-msgpack')
self.assertEquals(result, msgpack.packb(obj)) | {
"content_hash": "8424d9be585027a1d4029e64e1987a52",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 83,
"avg_line_length": 31.07894736842105,
"alnum_prop": 0.7298899237933955,
"repo_name": "cooper-software/cellardoor",
"id": "b609000e0610a8ba02da01c869c380e602cc9854",
"size": "1181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_view_minimal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "239308"
}
],
"symlink_target": ""
} |
import sys, os
try:
from Queue import Queue
except ImportError:
from queue import Queue
import tempfile
from datetime import datetime
class MemorySubscriber:
def __init__(self):
''' Initializes the empty queue for a particular subscriber. '''
self.messages = Queue()
def getNext(self):
''' Returns the next message available in the queue. Returns None if queue is empty. '''
if self.messages.qsize() == 0:
return None
else:
return self.messages.get(block=False, timeout=None)
def getAll(self):
''' Get all the messages available in the queue, appends them in the 'item' list and returns the list. '''
items = []
maxItemsToRetreive = self.messages.qsize()
for numOfItemsRetrieved in range(0, maxItemsToRetreive):
try:
if numOfItemsRetrieved == maxItemsToRetreive:
break
items.append(self.messages.get_nowait())
except Empty, e:
break
return items
def getCount(self):
''' Returns the number of messages available in the queue. '''
return self.messages.qsize()
def put(self, message):
''' Puts the message in the queue. '''
self.messages.put_nowait(message)
class MemoryPubSub:
''' This class is invoked when the user chooses to store the messages queues in the memory '''
def __init__(self):
''' Intializes with no subscribers available '''
self.subscribers = {}
def publish(self, topic, message):
''' When this method is invoked with a topic name and the message, it looks for all the subscribers for that particular topic and pushes the message to the subscribers. Returns the number of subscribers available. '''
subscribers = self.subscribers.get(topic, [])
for each_subscriber in subscribers:
each_subscriber.put(message)
return "Published to " + str(len(subscribers)) + " subscribers"
def subscribe(self, *topics):
''' When this method is invoked with a list of topics, it creates a MemorySubscriber() object and for all the topics appends the object into the dictionary and returns the handle to the MemorySubscriber class. '''
subscriber = MemorySubscriber()
for topic in topics:
subscribers = self.subscribers.setdefault(topic, [])
subscribers.append(subscriber)
return subscriber
class SQLiteSubscriber:
def __init__(self, cursor, topics, timestamp):
''' Initializes empty queue and gets list of topics, SQLite cursor and timestamp. '''
self.messages = Queue()
self.timestamp = timestamp
self.topics = list(topics)
self.cursor = cursor
def getNext(self):
''' Get the next message from the queue for given list of topics. '''
# Get messages from the SQLite database using the subscriber's timestamp.
for topic in self.topics:
self.cursor.execute("SELECT message, timestamp from mps_messages WHERE topic=:topic and timestamp>:timestamp", {"topic": topic, "timestamp": self.timestamp})
data = self.cursor.fetchall()
for each_record in data:
self.messages.put_nowait(each_record[0])
# Update the timestamp
self.timestamp = datetime.now()
if self.messages.qsize() == 0:
return None
else:
return self.messages.get(block=False, timeout=None)
def getAll(self):
''' Get all messages from the queue. '''
# For given list of topics, get all the messages from the SQLite db using subscriber's timestamp
for topic in self.topics:
self.cursor.execute("SELECT message, timestamp from mps_messages WHERE topic=:topic and timestamp>:timestamp", {"topic": topic, "timestamp": self.timestamp})
data = self.cursor.fetchall()
for each_record in data:
self.messages.put_nowait(each_record[0])
# Update the timestamp
self.timestamp = datetime.now()
items = []
maxItemsToRetreive = self.messages.qsize()
# Put messages in the queue
for numOfItemsRetrieved in range(0, maxItemsToRetreive):
try:
if numOfItemsRetrieved == maxItemsToRetreive:
break
items.append(self.messages.get_nowait())
except Empty, e:
break
return items
def getCount(self):
''' Returns number of available messages in the message queue. '''
return self.messages.qsize()
def closeTopic(self, topic):
''' Stop listening to a topic by its name. '''
self.topics.remove(topic)
class SQLitePubSub:
def __init__(self, directory=tempfile.gettempdir(), db_name='minpubsub_sqlite.db'):
''' Intialize the package, db connectionn and the cursor. '''
try:
import sqlite3
except ImportError:
print "sqlite3 package could not be imported. Exiting."
sys.exit(0)
if not os.path.isdir(directory):
raise ValueError('the given Path "', directory, '" is not a valid directory.')
if not db_name:
db_name='minpubsub_sqlite.db'
self.connection = sqlite3.connect(directory+'/'+db_name)
self.cursor = self.connection.cursor()
self.cursor.execute("CREATE TABLE IF NOT EXISTS mps_messages(topic VARCHAR(100), message VARCHAR(1000), timestamp VARCHAR(100))")
def publish(self, topic, message):
''' Publish a message for a topic and store it in db. '''
self.cursor.execute("INSERT INTO mps_messages VALUES(:topic, :message, :timestamp)", {"topic": topic, "message": message, "timestamp": datetime.now()})
self.connection.commit()
def subscribe(self, *topics):
''' Subscribe to a list of topics. '''
# Get the timestamp of subscription
timestamp = datetime.now()
subscriber = SQLiteSubscriber(self.cursor, topics, timestamp)
return subscriber
class MySQLSubscriber:
def __init__(self, cursor, topics, timestamp):
self.messages = Queue()
self.timestamp = timestamp
self.topics = list(topics)
self.cursor = cursor
def getNext(self):
for topic in self.topics:
self.cursor.execute("SELECT message, timestamp from mps_messages WHERE topic=%s and timestamp>%s", (topic, self.timestamp))
data = self.cursor.fetchall()
for each_record in data:
self.messages.put_nowait(each_record[0])
self.timestamp = datetime.now()
if self.messages.qsize() == 0:
return None
else:
return self.messages.get(block=False, timeout=None)
def getAll(self):
for topic in self.topics:
self.cursor.execute("SELECT message, timestamp from mps_messages WHERE topic=%s and timestamp>%s", (topic, self.timestamp))
data = self.cursor.fetchall()
for each_record in data:
self.messages.put_nowait(each_record[0])
self.timestamp = datetime.now()
items = []
maxItemsToRetreive = self.messages.qsize()
for numOfItemsRetrieved in range(0, maxItemsToRetreive):
try:
if numOfItemsRetrieved == maxItemsToRetreive:
break
items.append(self.messages.get_nowait())
except Empty, e:
break
return items
def getCount(self):
return self.messages.qsize()
def closeTopic(self, topic):
self.topics.remove(topic)
class MySQLPubSub:
def __init__(self, *argv):
try:
import MySQLdb
except ImportError:
print "MySQLdb package could not be imported. Exiting."
sys.exit(0)
try:
self.connection = MySQLdb.connect(argv[0], argv[1], argv[2], argv[3])
self.cursor = self.connection.cursor()
self.cursor.execute("SHOW TABLES LIKE 'mps_messages'")
table_data = self.cursor.fetchall()
available_tables = []
for each_item in table_data:
available_tables.append(each_item[0])
if "mps_messages" not in available_tables:
self.cursor.execute("CREATE TABLE IF NOT EXISTS mps_messages(topic VARCHAR(100), message VARCHAR(1000), timestamp VARCHAR(100))")
except:
print "Error connecting to MySQL database"
sys.exit(0)
def publish(self, topic, message):
self.cursor.execute("INSERT INTO mps_messages VALUES(%s, %s, %s)", (topic, message, datetime.now()))
def subscribe(self, *topics):
timestamp = datetime.now()
subscriber = MySQLSubscriber(self.cursor, topics, timestamp)
return subscriber
class MongoDBSubscriber:
def __init__(self, collection, topics, timestamp):
self.messages = Queue()
self.collection = collection
self.topics = list(topics)
self.timestamp = timestamp
def getNext(self):
self.cursor = self.collection.find({'topic': {'$in': self.topics}, 'timestamp': {'$gte': self.timestamp}})
for data in self.cursor:
self.messages.put_nowait(data['message'])
self.timestamp = datetime.now()
if self.messages.qsize() == 0:
return None
else:
return self.messages.get(block=False, timeout=None)
def getAll(self):
self.cursor = self.collection.find({'topic': {'$in': self.topics}, 'timestamp': {'$gte': self.timestamp}})
for data in self.cursor:
self.messages.put_nowait(data['message'])
self.timestamp = datetime.now()
items = []
maxItemsToRetreive = self.messages.qsize()
for numOfItemsRetrieved in range(0, maxItemsToRetreive):
try:
if numOfItemsRetrieved == maxItemsToRetreive:
break
items.append(self.messages.get_nowait())
except Empty, e:
break
return items
def getCount(self):
return self.messages.qsize()
def closeTopic(self, topic):
self.topics.remove(topic)
class MongoDBPubSub:
def __init__(self, *argv):
try:
import pymongo
except ImportError:
print "Pymongo package could not be imported. Exiting."
sys.exit(0)
try:
self.connection = pymongo.Connection(argv[0], int(argv[1]))
self.db = self.connection.minpubsub
self.collection = self.db['mps_messages']
except:
print "Failed to connect to Mongo database. Exiting."
sys.exit(0)
def publish(self, topic, message):
self.collection.insert({'topic': topic, 'message': message, 'timestamp': datetime.now()})
def subscribe(self, *topics):
timestamp = datetime.now()
subscriber = MongoDBSubscriber(self.collection, topics, timestamp)
return subscriber
def create(name, *argv):
if name == 'memory':
handler = MemoryPubSub()
return handler
elif name == 'sqlite':
handler = SQLitePubSub(*argv)
return handler
elif name == 'mysql':
handler = MySQLPubSub(*argv)
return handler
elif name == 'mongo' or name == 'mongodb':
handler = MongoDBPubSub(*argv)
return handler
else:
print "Option not found! Exiting."
sys.exit(0)
| {
"content_hash": "6c4e4c9a21a389cb57b6734cc88c67d9",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 225,
"avg_line_length": 38.29903536977492,
"alnum_prop": 0.5922256737469566,
"repo_name": "jyotiska/minpubsub",
"id": "8e8e6d4e2d2b99fbb5c8c339674869716ab55d31",
"size": "11911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "minpubsub.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16733"
}
],
"symlink_target": ""
} |
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from pytest import raises
from tambo import dispatcher
class MySubCommand(object):
def __init__(self, argv):
self.argv = argv
def parse_args(self):
return self.argv
class MyNewSubCommand(object):
def __init__(self, argv):
self.argv = argv
def main(self):
return self.argv
def parse_args(self):
raise RuntimeError('this method should not be called')
class Test_dispatching_mapped_classes(object):
def test_does_nothing_if_the_mapper_is_empty(self):
transport = dispatcher.Transport([])
assert transport.dispatch() is None
def test_does_nothing_if_the_mapper_cannot_match_the_subcommand(self):
transport = dispatcher.Transport(['bin/foo', 'bar', 'boo'])
assert transport.dispatch() is None
def test_returns_parse_args_called_with_the_instance(self):
transport = dispatcher.Transport(['/usr/bin/foo', 'foo'])
transport.mapper = {'foo': MySubCommand}
assert transport.dispatch() == ['foo']
def test_returns_parse_args_called_with_the_instance_with_exit(self):
transport = dispatcher.Transport(['/usr/bin/foo', 'foo'])
transport.mapper = {'foo': MySubCommand}
with raises(SystemExit) as err:
transport.dispatch(with_exit=True)
def test_returns_parse_args_called_with_the_instance_with_main(self):
transport = dispatcher.Transport(['/usr/bin/foo', 'foo'])
transport.mapper = {'foo': MyNewSubCommand}
result = transport.dispatch()
assert result == ['foo']
def test_complains_about_unknown_commands(self):
fake_out = StringIO()
transport = dispatcher.Transport(['bin/foo', 'bar', 'boo'],
writer=fake_out)
transport.dispatch()
assert fake_out.getvalue() == 'Unknown command(s): bar boo\n'
| {
"content_hash": "0f65b5022a035b8fcbd94cf717b8349e",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 74,
"avg_line_length": 31.693548387096776,
"alnum_prop": 0.6371501272264631,
"repo_name": "alfredodeza/tambo",
"id": "6ba3f955de5d6fafe5b6e7f17cdd7df8e9465b53",
"size": "1965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tambo/tests/test_dispatcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17699"
}
],
"symlink_target": ""
} |
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import assert_raises_message, eq_
class _BooleanProcessorTest(fixtures.TestBase):
def test_int_to_bool_none(self):
eq_(
self.module.int_to_boolean(None),
None
)
def test_int_to_bool_zero(self):
eq_(
self.module.int_to_boolean(0),
False
)
def test_int_to_bool_one(self):
eq_(
self.module.int_to_boolean(1),
True
)
def test_int_to_bool_positive_int(self):
eq_(
self.module.int_to_boolean(12),
True
)
def test_int_to_bool_negative_int(self):
eq_(
self.module.int_to_boolean(-4),
True
)
class PyBooleanProcessorTest(_BooleanProcessorTest):
@classmethod
def setup_class(cls):
from sqlalchemy import processors
cls.module = type(
"util", (object,),
dict(
(k, staticmethod(v))
for k, v in list(processors.py_fallback().items())
)
)
def test_bool_to_int_false(self):
from sqlalchemy import processors
eq_(processors.boolean_to_int(False), 0)
def test_bool_to_int_true(self):
from sqlalchemy import processors
eq_(processors.boolean_to_int(True), 1)
def test_bool_to_int_positive_int(self):
from sqlalchemy import processors
eq_(processors.boolean_to_int(5), 1)
def test_bool_to_int_negative_int(self):
from sqlalchemy import processors
eq_(processors.boolean_to_int(-10), 1)
def test_bool_to_int_zero(self):
from sqlalchemy import processors
eq_(processors.boolean_to_int(0), 0)
def test_bool_to_int_one(self):
from sqlalchemy import processors
eq_(processors.boolean_to_int(1), 1)
class CBooleanProcessorTest(_BooleanProcessorTest):
__requires__ = ('cextensions',)
@classmethod
def setup_class(cls):
from sqlalchemy import cprocessors
cls.module = cprocessors
class _DateProcessorTest(fixtures.TestBase):
def test_date_no_string(self):
assert_raises_message(
ValueError,
"Couldn't parse date string '2012' - value is not a string",
self.module.str_to_date, 2012
)
def test_datetime_no_string(self):
assert_raises_message(
ValueError,
"Couldn't parse datetime string '2012' - value is not a string",
self.module.str_to_datetime, 2012
)
def test_time_no_string(self):
assert_raises_message(
ValueError,
"Couldn't parse time string '2012' - value is not a string",
self.module.str_to_time, 2012
)
def test_date_invalid_string(self):
assert_raises_message(
ValueError,
"Couldn't parse date string: '5:a'",
self.module.str_to_date, "5:a"
)
def test_datetime_invalid_string(self):
assert_raises_message(
ValueError,
"Couldn't parse datetime string: '5:a'",
self.module.str_to_datetime, "5:a"
)
def test_time_invalid_string(self):
assert_raises_message(
ValueError,
"Couldn't parse time string: '5:a'",
self.module.str_to_time, "5:a"
)
class PyDateProcessorTest(_DateProcessorTest):
@classmethod
def setup_class(cls):
from sqlalchemy import processors
cls.module = type("util", (object,),
dict(
(k, staticmethod(v))
for k, v in list(processors.py_fallback().items())
)
)
class CDateProcessorTest(_DateProcessorTest):
__requires__ = ('cextensions',)
@classmethod
def setup_class(cls):
from sqlalchemy import cprocessors
cls.module = cprocessors
class _DistillArgsTest(fixtures.TestBase):
def test_distill_none(self):
eq_(
self.module._distill_params(None, None),
[]
)
def test_distill_no_multi_no_param(self):
eq_(
self.module._distill_params((), {}),
[]
)
def test_distill_dict_multi_none_param(self):
eq_(
self.module._distill_params(None, {"foo": "bar"}),
[{"foo": "bar"}]
)
def test_distill_dict_multi_empty_param(self):
eq_(
self.module._distill_params((), {"foo": "bar"}),
[{"foo": "bar"}]
)
def test_distill_single_dict(self):
eq_(
self.module._distill_params(({"foo": "bar"},), {}),
[{"foo": "bar"}]
)
def test_distill_single_list_strings(self):
eq_(
self.module._distill_params((["foo", "bar"],), {}),
[["foo", "bar"]]
)
def test_distill_single_list_tuples(self):
eq_(
self.module._distill_params(
([("foo", "bar"), ("bat", "hoho")],), {}),
[('foo', 'bar'), ('bat', 'hoho')]
)
def test_distill_single_list_tuple(self):
eq_(
self.module._distill_params(([("foo", "bar")],), {}),
[('foo', 'bar')]
)
def test_distill_multi_list_tuple(self):
eq_(
self.module._distill_params(
([("foo", "bar")], [("bar", "bat")]), {}),
([('foo', 'bar')], [('bar', 'bat')])
)
def test_distill_multi_strings(self):
eq_(
self.module._distill_params(("foo", "bar"), {}),
[('foo', 'bar')]
)
def test_distill_single_list_dicts(self):
eq_(
self.module._distill_params(
([{"foo": "bar"}, {"foo": "hoho"}],), {}),
[{'foo': 'bar'}, {'foo': 'hoho'}]
)
def test_distill_single_string(self):
eq_(
self.module._distill_params(("arg",), {}),
[["arg"]]
)
def test_distill_multi_string_tuple(self):
eq_(
self.module._distill_params((("arg", "arg"),), {}),
[("arg", "arg")]
)
class PyDistillArgsTest(_DistillArgsTest):
@classmethod
def setup_class(cls):
from sqlalchemy.engine import util
cls.module = type("util", (object,),
dict(
(k, staticmethod(v))
for k, v in list(util.py_fallback().items())
)
)
class CDistillArgsTest(_DistillArgsTest):
__requires__ = ('cextensions', )
@classmethod
def setup_class(cls):
from sqlalchemy import cutils as util
cls.module = util
| {
"content_hash": "e5eab634efdefe8de0000bc2c8e9dec4",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 76,
"avg_line_length": 26.944,
"alnum_prop": 0.5216745843230404,
"repo_name": "robin900/sqlalchemy",
"id": "47302af979fafa12eccc513f4fcffbdc4cecf7d9",
"size": "6736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/engine/test_processors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "46256"
},
{
"name": "Python",
"bytes": "9080563"
}
],
"symlink_target": ""
} |
import os
import shutil
import sys
import datetime
from invoke import task
from invoke.util import cd
from pelican.server import ComplexHTTPRequestHandler, RootedHTTPServer
CONFIG = {
# Local path configuration (can be absolute or relative to tasks.py)
'deploy_path': 'output',
# Port for `serve`
'port': 8000,
}
@task
def clean(c):
"""Remove generated files"""
if os.path.isdir(CONFIG['deploy_path']):
shutil.rmtree(CONFIG['deploy_path'])
os.makedirs(CONFIG['deploy_path'])
@task
def build(c):
"""Build local version of site"""
c.run('pelican -s pelicanconf.py')
@task
def rebuild(c):
"""`build` with the delete switch"""
c.run('pelican -d -s pelicanconf.py')
@task
def regenerate(c):
"""Automatically regenerate site upon file modification"""
c.run('pelican -r -s pelicanconf.py')
@task
def serve(c):
"""Serve site at http://localhost:8000/"""
class AddressReuseTCPServer(RootedHTTPServer):
allow_reuse_address = True
server = AddressReuseTCPServer(
CONFIG['deploy_path'],
('', CONFIG['port']),
ComplexHTTPRequestHandler)
sys.stderr.write('Serving on port {port} ...\n'.format(**CONFIG))
server.serve_forever()
@task
def reserve(c):
"""`build`, then `serve`"""
build(c)
serve(c)
@task
def preview(c):
"""Build production version of site"""
c.run('pelican -s publishconf.py')
@task
def publish(c):
"""Publish to production via rsync"""
c.run('pelican -s publishconf.py')
c.run(
'rsync --delete --exclude ".DS_Store" -pthrvz -c '
'{} {production}:{dest_path}'.format(
CONFIG['deploy_path'].rstrip('/') + '/',
**CONFIG))
| {
"content_hash": "ff0f9943dc1ba6921b9c49f9e925f641",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 72,
"avg_line_length": 22.973333333333333,
"alnum_prop": 0.6302959953569356,
"repo_name": "minhhh/pelican_git",
"id": "6a6ca239a3289cd665e1054ea85cb27da42cdd75",
"size": "1748",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "blog/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "165571"
},
{
"name": "HTML",
"bytes": "16079"
},
{
"name": "JavaScript",
"bytes": "5083"
},
{
"name": "Makefile",
"bytes": "3374"
},
{
"name": "Python",
"bytes": "18337"
},
{
"name": "Shell",
"bytes": "2178"
}
],
"symlink_target": ""
} |
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
###
def file_reader(filename):
lcl_file = open(filename, "rU")
lcl_string = lcl_file.read()
lcl_string = lcl_string.split()
dic = {}
for word in lcl_string:
if word.lower() in dic:
dic[word] +=1
else:
dic[word] = 1
lcl_file.close()
return dic
def print_words(filename):
dic = file_reader(filename)
for ele in dic.keys():
print ele + " " + str(dic[ele])
def get_key_value(dic_pair):
return dic_pair[1]
def print_top(filename):
dic = file_reader(filename)
for ele in sorted(dic.items(),key=get_key_value, reverse = True):
print ele[0] + " " + str(ele[1])
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
| {
"content_hash": "ad31d23f9fdbfc92983e4c05e438af6d",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 79,
"avg_line_length": 29.094117647058823,
"alnum_prop": 0.7027901334411646,
"repo_name": "nikraina/google-python-exercises",
"id": "9b6bcdfae29fe24f6acce47a06339affcd378ef0",
"size": "2704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basic/wordcount.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "191608"
},
{
"name": "HTML",
"bytes": "648137"
},
{
"name": "Python",
"bytes": "79357"
}
],
"symlink_target": ""
} |
import argparse
import glob
import json
import os
import sys
from typing import List
import inspect_compute_results
def exact_match(reference_result: str, variant_result: str) -> bool:
result, _ = inspect_compute_results.exactdiff_ssbos(reference_result, variant_result)
return result
def fuzzy_match(reference_result: str, variant_result: str, args: argparse.Namespace) -> bool:
if args.rel_tol:
rel_tol = float(args.rel_tol)
else:
rel_tol = float(inspect_compute_results.DEFAULT_REL_TOL)
if args.abs_tol:
abs_tol = float(args.abs_tol)
else:
abs_tol = float(inspect_compute_results.DEFAULT_ABS_TOL)
result, _ = inspect_compute_results.fuzzydiff_ssbos(reference_result, variant_result,
rel_tol=rel_tol, abs_tol=abs_tol)
return result
def main_helper(args: List[str]) -> None:
description = (
'Report results for a compute shader family.')
parser = argparse.ArgumentParser(description=description)
# Required arguments
parser.add_argument(
'results_directory',
help='A directory containing results for a compute shader family')
parser.add_argument(
'--rel_tol',
help=(
'Relative tolerance parameter for fuzzy diffing, default: '
+ inspect_compute_results.DEFAULT_REL_TOL))
parser.add_argument(
'--abs_tol',
help=(
'Absolute tolerance parameter for fuzzy diffing, default: '
+ inspect_compute_results.DEFAULT_ABS_TOL))
args = parser.parse_args(args)
results_directory = args.results_directory
if not os.path.isdir(results_directory):
raise FileNotFoundError('Specified results directory "' + results_directory + '" not found')
reference_result = results_directory + os.sep + 'reference.info.json'
if not os.path.isfile(reference_result):
raise FileNotFoundError(
'No results found for reference shader; expected file "reference.info.json" missing')
with open(reference_result, 'r') as reference_result_file:
reference_json = json.load(reference_result_file)
reference_status = reference_json['status']
sys.stdout.write(reference_result + ': ' + reference_status + '\n')
if reference_status != 'SUCCESS':
sys.stdout.write('Cannot compare variant results as reference failed')
return
for variant_result in glob.glob(results_directory + os.sep + 'variant*.info.json'):
with open(variant_result, 'r') as variant_result_file:
variant_json = json.load(variant_result_file)
variant_status = variant_json['status']
sys.stdout.write(variant_result + ': ' + variant_status)
if variant_status == 'SUCCESS':
if exact_match(reference_result, variant_result):
sys.stdout.write(', EXACT_MATCH')
elif fuzzy_match(reference_result, variant_result, args):
sys.stdout.write(', FUZZY_MATCH')
else:
sys.stdout.write(', DIFFERENT')
sys.stdout.write('\n')
if __name__ == '__main__':
main_helper(sys.argv[1:])
| {
"content_hash": "9eae791007fefdd2fe0942b921fdc318",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 100,
"avg_line_length": 35.764044943820224,
"alnum_prop": 0.6415331448319196,
"repo_name": "google/graphicsfuzz",
"id": "eb181c9adf8361e51b1a51b63d5d55dbc951444d",
"size": "3805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/src/main/python/drivers/report-compute-shader-family-results.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "21057"
},
{
"name": "Batchfile",
"bytes": "18712"
},
{
"name": "C",
"bytes": "1261"
},
{
"name": "C++",
"bytes": "112737"
},
{
"name": "CMake",
"bytes": "3664"
},
{
"name": "CSS",
"bytes": "6774"
},
{
"name": "Dockerfile",
"bytes": "4035"
},
{
"name": "GLSL",
"bytes": "570713"
},
{
"name": "HTML",
"bytes": "9966"
},
{
"name": "Java",
"bytes": "3314649"
},
{
"name": "JavaScript",
"bytes": "75538"
},
{
"name": "Python",
"bytes": "709540"
},
{
"name": "Shell",
"bytes": "62877"
},
{
"name": "Thrift",
"bytes": "7878"
}
],
"symlink_target": ""
} |
import dataclasses
from typing import ClassVar, List, Set, Tuple, Type
@dataclasses.dataclass
class A:
a: List[int] = <error descr="mutable default 'list' is not allowed">[]</error>
b: List[int] = <error descr="mutable default 'list' is not allowed">list()</error>
c: Set[int] = <error descr="mutable default 'set' is not allowed">{1}</error>
d: Set[int] = <error descr="mutable default 'set' is not allowed">set()</error>
e: Tuple[int, ...] = <error descr="mutable default 'tuple' is not allowed">()</error>
f: Tuple[int, ...] = <error descr="mutable default 'tuple' is not allowed">tuple()</error>
g: ClassVar[List[int]] = []
h: ClassVar = []
a2: Type[List[int]] = list
b2: Type[Set[int]] = set
c2: Type[Tuple[int, ...]] = tuple | {
"content_hash": "323ef84cf6fd2750de2fb0dbcccc4961",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 94,
"avg_line_length": 45.64705882352941,
"alnum_prop": 0.6314432989690721,
"repo_name": "mglukhikh/intellij-community",
"id": "447d948ebbb304e02c538036f4da132930af0dc7",
"size": "776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/testData/inspections/PyDataclassInspection/defaultFieldValue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "20665"
},
{
"name": "AspectJ",
"bytes": "182"
},
{
"name": "Batchfile",
"bytes": "60827"
},
{
"name": "C",
"bytes": "211435"
},
{
"name": "C#",
"bytes": "1264"
},
{
"name": "C++",
"bytes": "197674"
},
{
"name": "CMake",
"bytes": "1675"
},
{
"name": "CSS",
"bytes": "201445"
},
{
"name": "CoffeeScript",
"bytes": "1759"
},
{
"name": "Erlang",
"bytes": "10"
},
{
"name": "Groovy",
"bytes": "3243028"
},
{
"name": "HLSL",
"bytes": "57"
},
{
"name": "HTML",
"bytes": "1899088"
},
{
"name": "J",
"bytes": "5050"
},
{
"name": "Java",
"bytes": "165554704"
},
{
"name": "JavaScript",
"bytes": "570364"
},
{
"name": "Jupyter Notebook",
"bytes": "93222"
},
{
"name": "Kotlin",
"bytes": "4611299"
},
{
"name": "Lex",
"bytes": "147047"
},
{
"name": "Makefile",
"bytes": "2352"
},
{
"name": "NSIS",
"bytes": "51276"
},
{
"name": "Objective-C",
"bytes": "27861"
},
{
"name": "Perl",
"bytes": "903"
},
{
"name": "Perl 6",
"bytes": "26"
},
{
"name": "Protocol Buffer",
"bytes": "6680"
},
{
"name": "Python",
"bytes": "25439881"
},
{
"name": "Roff",
"bytes": "37534"
},
{
"name": "Ruby",
"bytes": "1217"
},
{
"name": "Scala",
"bytes": "11698"
},
{
"name": "Shell",
"bytes": "66341"
},
{
"name": "Smalltalk",
"bytes": "338"
},
{
"name": "TeX",
"bytes": "25473"
},
{
"name": "Thrift",
"bytes": "1846"
},
{
"name": "TypeScript",
"bytes": "9469"
},
{
"name": "Visual Basic",
"bytes": "77"
},
{
"name": "XSLT",
"bytes": "113040"
}
],
"symlink_target": ""
} |
'''
Created on Mar 18, 2014
Copyright (c) 2014-2015 Dario Bonino
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License
@author: bonino
'''
def prime(number):
'''
Checks if the given number is a prime
Returns True if the number is a prime, false otherwise
'''
# if no divisor could be found the number is a prime
found = False
# check even numbers except 2
if((number % 2 == 0) and (number > 2)):
found = True
else:
# iterate over odd numbers only
for i in range(3, number / 2, 2):
# check the remainder of the division
if(number % i == 0):
# set the prime flag at true
found = True
# stop searching
break
# if not found is a prime
return not found
if __name__ == '__main__':
number_as_string = raw_input("Insert a number:\n>");
# convert to an integer number
number = int(number_as_string)
if(number > 0):
# check if prime
if(prime(number)):
print "The number %d is a prime." % (number)
else:
print "The number %d is not a prime" % (number)
else:
print "Zero or negative numbers cannot be used..."
| {
"content_hash": "25e46cc1739cb14f76c73f88c0d1b150",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 72,
"avg_line_length": 29.70689655172414,
"alnum_prop": 0.6198491004062682,
"repo_name": "AmI-2015/python-lab1",
"id": "c0dac302d6e67b44af267f99971be4e520f8ce24",
"size": "1723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "primes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11959"
}
],
"symlink_target": ""
} |
import re
import os
import glob
import argparse
import sys
def create_parser():
"""Return the argument parser"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', dest='input_path', required=True,
help='''The input folder path, containing the data. ''')
return parser
def tidy_htseq(my_file, path):
f = open(my_file, "r")
for line in f:
if line.startswith('Input for'):
fluff, file_name = line.rsplit("/", 1)
sample, extension = file_name.split(".", 1)
if re.search(r'\w+\t\d*', line):
match_stats = re.search(r'^__\w+\t\d*', line)
if match_stats:
stats_file = open(os.path.join(path + '/' + sample + "_stats.txt"), "a")
stats_file.write(line)
stats_file.close()
else:
counts = open(os.path.join(path + '/' + sample + "_htseq-counts.txt"), "a")
counts.write(line)
counts.close()
f.close()
print ("All done with %s" % sample)
def find_moi(input_path):
count_files = glob.glob(os.path.join(input_path+"/*.sh.*"))
print ("Found %d count files" % len(count_files))
for sample_counts in count_files:
tidy_htseq(sample_counts, input_path)
def main(argv=None):
"""Program wrapper
:param argv:
"""
if argv is None:
argv = sys.argv[1:]
parser = create_parser()
args = parser.parse_args(argv)
input_path = args.input_path
find_moi(input_path)
return 0
if __name__ == '__main__':
import doctest
doctest.testmod()
sys.exit(main())
| {
"content_hash": "a38f9b25c78267d1165eca3b6b9a43dc",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 91,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.5509342977697408,
"repo_name": "Joannacodes/RNA-Seq-pipeline-SGE-cluster",
"id": "169fa208b53fcea53289416028db6f6aa8dc8f46",
"size": "1880",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "htseq-2-R.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12431"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.sql import SqlManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-sql
# USAGE
python get_a_hub_database_schema..py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = SqlManagementClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-1111-2222-3333-444444444444",
)
response = client.sync_groups.list_hub_schemas(
resource_group_name="syncgroupcrud-65440",
server_name="syncgroupcrud-8475",
database_name="syncgroupcrud-4328",
sync_group_name="syncgroupcrud-3187",
)
for item in response:
print(item)
# x-ms-original-file: specification/sql/resource-manager/Microsoft.Sql/preview/2020-11-01-preview/examples/SyncGroupGetHubSchema.json
if __name__ == "__main__":
main()
| {
"content_hash": "63bdc85ef40060a5b0ca86cd52c675f4",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 133,
"avg_line_length": 33.888888888888886,
"alnum_prop": 0.719672131147541,
"repo_name": "Azure/azure-sdk-for-python",
"id": "85716fad4a252ef5090ab4739c06314b83f5b809",
"size": "1688",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/sql/azure-mgmt-sql/generated_samples/get_a_hub_database_schema..py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._management_policies_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ManagementPoliciesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.storage.v2021_04_01.aio.StorageManagementClient`'s
:attr:`management_policies` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get(
self,
resource_group_name: str,
account_name: str,
management_policy_name: Union[str, _models.ManagementPolicyName],
**kwargs: Any
) -> _models.ManagementPolicy:
"""Gets the managementpolicy associated with the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param management_policy_name: The name of the Storage Account Management Policy. It should
always be 'default'. "default" Required.
:type management_policy_name: str or
~azure.mgmt.storage.v2021_04_01.models.ManagementPolicyName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementPolicy or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_04_01.models.ManagementPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01")) # type: Literal["2021-04-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ManagementPolicy]
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
management_policy_name=management_policy_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ManagementPolicy", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}"} # type: ignore
@overload
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
management_policy_name: Union[str, _models.ManagementPolicyName],
properties: _models.ManagementPolicy,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ManagementPolicy:
"""Sets the managementpolicy to the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param management_policy_name: The name of the Storage Account Management Policy. It should
always be 'default'. "default" Required.
:type management_policy_name: str or
~azure.mgmt.storage.v2021_04_01.models.ManagementPolicyName
:param properties: The ManagementPolicy set to a storage account. Required.
:type properties: ~azure.mgmt.storage.v2021_04_01.models.ManagementPolicy
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementPolicy or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_04_01.models.ManagementPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
management_policy_name: Union[str, _models.ManagementPolicyName],
properties: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ManagementPolicy:
"""Sets the managementpolicy to the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param management_policy_name: The name of the Storage Account Management Policy. It should
always be 'default'. "default" Required.
:type management_policy_name: str or
~azure.mgmt.storage.v2021_04_01.models.ManagementPolicyName
:param properties: The ManagementPolicy set to a storage account. Required.
:type properties: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementPolicy or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_04_01.models.ManagementPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
management_policy_name: Union[str, _models.ManagementPolicyName],
properties: Union[_models.ManagementPolicy, IO],
**kwargs: Any
) -> _models.ManagementPolicy:
"""Sets the managementpolicy to the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param management_policy_name: The name of the Storage Account Management Policy. It should
always be 'default'. "default" Required.
:type management_policy_name: str or
~azure.mgmt.storage.v2021_04_01.models.ManagementPolicyName
:param properties: The ManagementPolicy set to a storage account. Is either a model type or a
IO type. Required.
:type properties: ~azure.mgmt.storage.v2021_04_01.models.ManagementPolicy or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementPolicy or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_04_01.models.ManagementPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01")) # type: Literal["2021-04-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ManagementPolicy]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(properties, (IO, bytes)):
_content = properties
else:
_json = self._serialize.body(properties, "ManagementPolicy")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
management_policy_name=management_policy_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ManagementPolicy", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
management_policy_name: Union[str, _models.ManagementPolicyName],
**kwargs: Any
) -> None:
"""Deletes the managementpolicy associated with the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param management_policy_name: The name of the Storage Account Management Policy. It should
always be 'default'. "default" Required.
:type management_policy_name: str or
~azure.mgmt.storage.v2021_04_01.models.ManagementPolicyName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01")) # type: Literal["2021-04-01"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
management_policy_name=management_policy_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}"} # type: ignore
| {
"content_hash": "1a1e487b4c7d9bd98ce6d84c268e1a72",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 225,
"avg_line_length": 46.04532577903683,
"alnum_prop": 0.6651285837332349,
"repo_name": "Azure/azure-sdk-for-python",
"id": "286ef9889935cb8bf5a753e1e3cd24bcc78e9d9a",
"size": "16754",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_04_01/aio/operations/_management_policies_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""Implementation of Nash Conv metric for a policy.
In the context of mean field games, the Nash Conv is the difference between:
- the value of a policy against the distribution of that policy,
- and the best response against the distribution of the policy.
"""
from open_spiel.python import policy as policy_std
from open_spiel.python.mfg import value
from open_spiel.python.mfg.algorithms import best_response_value
from open_spiel.python.mfg.algorithms import distribution
from open_spiel.python.mfg.algorithms import policy_value
class NashConv(object):
"""Computes the Nash Conv of a policy."""
def __init__(self, game, policy: policy_std.Policy, root_state=None):
"""Initializes the nash conv.
Args:
game: The game to analyze.
policy: A `policy.Policy` object.
root_state: The state of the game at which to start. If `None`, the game
root state is used.
"""
self._game = game
self._policy = policy
if root_state is None:
self._root_states = game.new_initial_states()
else:
self._root_states = [root_state]
self._distrib = distribution.DistributionPolicy(
self._game, self._policy, root_state=root_state)
self._pi_value = policy_value.PolicyValue(
self._game,
self._distrib,
self._policy,
value.TabularValueFunction(self._game),
root_state=root_state)
self._br_value = best_response_value.BestResponse(
self._game,
self._distrib,
value.TabularValueFunction(self._game),
root_state=root_state)
def nash_conv(self):
"""Returns the nash conv.
Returns:
A float representing the nash conv for the policy.
"""
return sum([
self._br_value.eval_state(state) - self._pi_value.eval_state(state)
for state in self._root_states
])
def br_values(self):
"""Returns the best response values to the policy distribution.
Returns:
A List[float] representing the best response values for a policy
distribution.
"""
return [self._br_value.eval_state(state) for state in self._root_states]
@property
def distribution(self) -> distribution.DistributionPolicy:
return self._distrib
| {
"content_hash": "e9048d5cab9f9ade65a02b2839056c47",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 78,
"avg_line_length": 32.18840579710145,
"alnum_prop": 0.6771724448446645,
"repo_name": "deepmind/open_spiel",
"id": "d1eed724828419aa911d7727ee42277094cf5ddc",
"size": "2815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "open_spiel/python/mfg/algorithms/nash_conv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6640"
},
{
"name": "C++",
"bytes": "4649139"
},
{
"name": "CMake",
"bytes": "78467"
},
{
"name": "Go",
"bytes": "18010"
},
{
"name": "Julia",
"bytes": "16727"
},
{
"name": "Jupyter Notebook",
"bytes": "148663"
},
{
"name": "Python",
"bytes": "2823600"
},
{
"name": "Rust",
"bytes": "18562"
},
{
"name": "Shell",
"bytes": "51087"
}
],
"symlink_target": ""
} |
import mparser
import environment
import coredata
import dependencies
import mlog
import build
import optinterpreter
import wrap
import mesonlib
import os, sys, platform, subprocess, shutil, uuid, re
from functools import wraps
import importlib
class InterpreterException(coredata.MesonException):
pass
class InvalidCode(InterpreterException):
pass
class InvalidArguments(InterpreterException):
pass
# Decorators for method calls.
def check_stringlist(a, msg='Arguments must be strings.'):
if not isinstance(a, list):
raise InvalidArguments('Argument not a list.')
if not all(isinstance(s, str) for s in a):
raise InvalidArguments(msg)
def noPosargs(f):
@wraps(f)
def wrapped(self, node, args, kwargs):
if len(args) != 0:
raise InvalidArguments('Function does not take positional arguments.')
return f(self, node, args, kwargs)
return wrapped
def noKwargs(f):
@wraps(f)
def wrapped(self, node, args, kwargs):
if len(kwargs) != 0:
raise InvalidArguments('Function does not take keyword arguments.')
return f(self, node, args, kwargs)
return wrapped
def stringArgs(f):
@wraps(f)
def wrapped(self, node, args, kwargs):
assert(isinstance(args, list))
check_stringlist(args)
return f(self, node, args, kwargs)
return wrapped
def stringifyUserArguments(args):
if isinstance(args, list):
return '[%s]' % ', '.join([stringifyUserArguments(x) for x in args])
elif isinstance(args, int):
return str(args)
elif isinstance(args, str):
return "'%s'" % args
raise InvalidArguments('Function accepts only strings, integers, lists and lists thereof.')
class InterpreterObject():
def __init__(self):
self.methods = {}
def method_call(self, method_name, args, kwargs):
if method_name in self.methods:
return self.methods[method_name](args, kwargs)
raise InvalidCode('Unknown method "%s" in object.' % method_name)
class TryRunResultHolder(InterpreterObject):
def __init__(self, res):
super().__init__()
self.res = res
self.methods.update({'returncode' : self.returncode_method,
'compiled' : self.compiled_method,
'stdout' : self.stdout_method,
'stderr' : self.stderr_method,
})
def returncode_method(self, args, kwargs):
return self.res.returncode
def compiled_method(self, args, kwargs):
return self.res.compiled
def stdout_method(self, args, kwargs):
return self.res.stdout
def stderr_method(self, args, kwargs):
return self.res.stderr
class RunProcess(InterpreterObject):
def __init__(self, command_array, source_dir, build_dir, subdir, in_builddir=False):
super().__init__()
pc = self.run_command(command_array, source_dir, build_dir, subdir, in_builddir)
(stdout, stderr) = pc.communicate()
self.returncode = pc.returncode
self.stdout = stdout.decode().replace('\r\n', '\n')
self.stderr = stderr.decode().replace('\r\n', '\n')
self.methods.update({'returncode' : self.returncode_method,
'stdout' : self.stdout_method,
'stderr' : self.stderr_method,
})
def run_command(self, command_array, source_dir, build_dir, subdir, in_builddir):
cmd_name = command_array[0]
env = {'MESON_SOURCE_ROOT' : source_dir,
'MESON_BUILD_ROOT' : build_dir,
'MESON_SUBDIR' : subdir}
if in_builddir:
cwd = os.path.join(build_dir, subdir)
else:
cwd = os.path.join(source_dir, subdir)
child_env = os.environ.copy()
child_env.update(env)
try:
return subprocess.Popen(command_array, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=child_env, cwd=cwd)
except FileNotFoundError:
pass
# Was not a command, is a program in path?
exe = shutil.which(cmd_name)
if exe is not None:
command_array = [exe] + command_array[1:]
return subprocess.Popen(command_array, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=child_env, cwd=cwd)
# No? Maybe it is a script in the source tree.
fullpath = os.path.join(source_dir, subdir, cmd_name)
command_array = [fullpath] + command_array[1:]
try:
return subprocess.Popen(command_array, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=child_env, cwd=cwd)
except FileNotFoundError:
raise InterpreterException('Could not execute command "%s".' % cmd_name)
def returncode_method(self, args, kwargs):
return self.returncode
def stdout_method(self, args, kwargs):
return self.stdout
def stderr_method(self, args, kwargs):
return self.stderr
class ConfigureFileHolder(InterpreterObject):
def __init__(self, subdir, sourcename, targetname, configuration_data):
InterpreterObject.__init__(self)
self.held_object = build.ConfigureFile(subdir, sourcename, targetname, configuration_data)
class ConfigurationDataHolder(InterpreterObject):
def __init__(self):
super().__init__()
self.used = False # These objects become immutable after use in configure_file.
self.held_object = build.ConfigurationData()
self.methods.update({'set': self.set_method,
'set10': self.set10_method,
})
def is_used(self):
return self.used
def mark_used(self):
self.used = True
def validate_args(self, args):
if len(args) != 2:
raise InterpreterException("Configuration set requires 2 arguments.")
if self.used:
raise InterpreterException("Can not set values on configuration object that has been used.")
name = args[0]
val = args[1]
if not isinstance(name, str):
raise InterpreterException("First argument to set must be a string.")
return (name, val)
def set_method(self, args, kwargs):
(name, val) = self.validate_args(args)
self.held_object.values[name] = val
def set10_method(self, args, kwargs):
(name, val) = self.validate_args(args)
if val:
self.held_object.values[name] = 1
else:
self.held_object.values[name] = 0
def get(self, name):
return self.held_object.values[name]
def keys(self):
return self.held_object.values.keys()
# Interpreter objects can not be pickled so we must have
# these wrappers.
class DependencyHolder(InterpreterObject):
def __init__(self, dep):
InterpreterObject.__init__(self)
self.held_object = dep
self.methods.update({'found' : self.found_method})
def found_method(self, args, kwargs):
return self.held_object.found()
class InternalDependencyHolder(InterpreterObject):
def __init__(self, dep):
InterpreterObject.__init__(self)
self.held_object = dep
self.methods.update({'found' : self.found_method})
def found_method(self, args, kwargs):
return True
class ExternalProgramHolder(InterpreterObject):
def __init__(self, ep):
InterpreterObject.__init__(self)
self.held_object = ep
self.methods.update({'found': self.found_method})
def found_method(self, args, kwargs):
return self.found()
def found(self):
return self.held_object.found()
def get_command(self):
return self.held_object.fullpath
def get_name(self):
return self.held_object.name
class ExternalLibraryHolder(InterpreterObject):
def __init__(self, el):
InterpreterObject.__init__(self)
self.held_object = el
self.methods.update({'found': self.found_method})
def found(self):
return self.held_object.found()
def found_method(self, args, kwargs):
return self.found()
def get_filename(self):
return self.held_object.fullpath
def get_name(self):
return self.held_object.name
def get_compile_args(self):
return self.held_object.get_compile_args()
def get_link_args(self):
return self.held_object.get_link_args()
def get_exe_args(self):
return self.held_object.get_exe_args()
class GeneratorHolder(InterpreterObject):
def __init__(self, interpreter, args, kwargs):
super().__init__()
self.interpreter = interpreter
self.held_object = build.Generator(args, kwargs)
self.methods.update({'process' : self.process_method})
def process_method(self, args, kwargs):
if len(kwargs) > 0:
raise InvalidArguments('Process does not take keyword arguments.')
check_stringlist(args)
gl = GeneratedListHolder(self)
[gl.add_file(os.path.join(self.interpreter.subdir, a)) for a in args]
return gl
class GeneratedListHolder(InterpreterObject):
def __init__(self, arg1):
super().__init__()
if isinstance(arg1, GeneratorHolder):
self.held_object = build.GeneratedList(arg1.held_object)
else:
self.held_object = arg1
def add_file(self, a):
self.held_object.add_file(a)
class BuildMachine(InterpreterObject):
def __init__(self):
InterpreterObject.__init__(self)
self.methods.update({'system' : self.system_method,
'cpu' : self.cpu_method,
'endian' : self.endian_method,
})
# Python is inconsistent in its platform module.
# It returns different values for the same cpu.
# For x86 it might return 'x86', 'i686' or somesuch.
# Do some canonicalization.
def cpu_method(self, args, kwargs):
trial = platform.machine().lower()
if trial.startswith('i') and trial.endswith('86'):
return 'x86'
# This might be wrong. Maybe we should return the more
# specific string such as 'armv7l'. Need to get user
# feedback first.
if trial.startswith('arm'):
return 'arm'
# Add fixes here as bugs are reported.
return trial
def system_method(self, args, kwargs):
return platform.system().lower()
def endian_method(self, args, kwargs):
return sys.byteorder
# This class will provide both host_machine and
# target_machine
class CrossMachineInfo(InterpreterObject):
def __init__(self, cross_info):
InterpreterObject.__init__(self)
self.info = cross_info
self.methods.update({'system' : self.system_method,
'cpu' : self.cpu_method,
'endian' : self.endian_method,
})
def system_method(self, args, kwargs):
return self.info['system']
def cpu_method(self, args, kwargs):
return self.info['cpu']
def endian_method(self, args, kwargs):
return self.info['endian']
class IncludeDirsHolder(InterpreterObject):
def __init__(self, curdir, dirs):
super().__init__()
self.held_object = build.IncludeDirs(curdir, dirs)
class Headers(InterpreterObject):
def __init__(self, src_subdir, sources, kwargs):
InterpreterObject.__init__(self)
self.sources = sources
self.source_subdir = src_subdir
self.install_subdir = kwargs.get('subdir', '')
self.custom_install_dir = kwargs.get('install_dir', None)
if self.custom_install_dir is not None:
if not isinstance(self.custom_install_dir, str):
raise InterpreterException('Custom_install_dir must be a string.')
def set_install_subdir(self, subdir):
self.install_subdir = subdir
def get_install_subdir(self):
return self.install_subdir
def get_source_subdir(self):
return self.source_subdir
def get_sources(self):
return self.sources
def get_custom_install_dir(self):
return self.custom_install_dir
class Data(InterpreterObject):
def __init__(self, in_sourcetree, source_subdir, sources, kwargs):
InterpreterObject.__init__(self)
self.in_sourcetree = in_sourcetree
self.source_subdir = source_subdir
self.sources = sources
kwsource = kwargs.get('sources', [])
if not isinstance(kwsource, list):
kwsource = [kwsource]
self.sources += kwsource
check_stringlist(self.sources)
self.install_dir = kwargs.get('install_dir', None)
if not isinstance(self.install_dir, str):
raise InterpreterException('Custom_install_dir must be a string.')
def get_source_subdir(self):
return self.source_subdir
def get_sources(self):
return self.sources
def get_install_dir(self):
return self.install_dir
class InstallDir(InterpreterObject):
def __init__(self, source_subdir, installable_subdir, install_dir):
InterpreterObject.__init__(self)
self.source_subdir = source_subdir
self.installable_subdir = installable_subdir
self.install_dir = install_dir
class Man(InterpreterObject):
def __init__(self, source_subdir, sources, kwargs):
InterpreterObject.__init__(self)
self.source_subdir = source_subdir
self.sources = sources
self.validate_sources()
if len(kwargs) > 1:
raise InvalidArguments('Man function takes at most one keyword arguments.')
self.custom_install_dir = kwargs.get('install_dir', None)
if self.custom_install_dir is not None and not isinstance(self.custom_install_dir, str):
raise InterpreterException('Custom_install_dir must be a string.')
def validate_sources(self):
for s in self.sources:
num = int(s.split('.')[-1])
if num < 1 or num > 8:
raise InvalidArguments('Man file must have a file extension of a number between 1 and 8')
def get_custom_install_dir(self):
return self.custom_install_dir
def get_sources(self):
return self.sources
def get_source_subdir(self):
return self.source_subdir
class GeneratedObjectsHolder(InterpreterObject):
def __init__(self, held_object):
super().__init__()
self.held_object = held_object
class BuildTargetHolder(InterpreterObject):
def __init__(self, target, interp):
super().__init__()
self.held_object = target
self.interpreter = interp
self.methods.update({'extract_objects' : self.extract_objects_method,
'extract_all_objects' : self.extract_all_objects_method,
'get_id': self.get_id_method,
'outdir' : self.outdir_method,
})
def is_cross(self):
return self.held_object.is_cross()
def outdir_method(self, args, kwargs):
return self.interpreter.backend.get_target_dir(self.held_object)
def extract_objects_method(self, args, kwargs):
gobjs = self.held_object.extract_objects(args)
return GeneratedObjectsHolder(gobjs)
def extract_all_objects_method(self, args, kwargs):
gobjs = self.held_object.extract_all_objects()
return GeneratedObjectsHolder(gobjs)
def get_id_method(self, args, kwargs):
return self.held_object.get_id()
class ExecutableHolder(BuildTargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
class StaticLibraryHolder(BuildTargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
class SharedLibraryHolder(BuildTargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
class JarHolder(BuildTargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
class CustomTargetHolder(InterpreterObject):
def __init__(self, object_to_hold):
self.held_object = object_to_hold
def is_cross(self):
return self.held_object.is_cross()
def extract_objects_method(self, args, kwargs):
gobjs = self.held_object.extract_objects(args)
return GeneratedObjectsHolder(gobjs)
class RunTargetHolder(InterpreterObject):
def __init__(self, name, command, args, subdir):
self.held_object = build.RunTarget(name, command, args, subdir)
class Test(InterpreterObject):
def __init__(self, name, exe, is_parallel, cmd_args, env, should_fail, valgrind_args, timeout):
InterpreterObject.__init__(self)
self.name = name
self.exe = exe
self.is_parallel = is_parallel
self.cmd_args = cmd_args
self.env = env
self.should_fail = should_fail
self.valgrind_args = valgrind_args
self.timeout = timeout
def get_exe(self):
return self.exe
def get_name(self):
return self.name
class SubprojectHolder(InterpreterObject):
def __init__(self, subinterpreter):
super().__init__()
self.subinterpreter = subinterpreter
self.methods.update({'get_variable' : self.get_variable_method,
})
def get_variable_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Get_variable takes one argument.')
varname = args[0]
if not isinstance(varname, str):
raise InterpreterException('Get_variable takes a string argument.')
return self.subinterpreter.variables[varname]
class CompilerHolder(InterpreterObject):
def __init__(self, compiler, env):
InterpreterObject.__init__(self)
self.compiler = compiler
self.environment = env
self.methods.update({'compiles': self.compiles_method,
'get_id': self.get_id_method,
'sizeof': self.sizeof_method,
'has_header': self.has_header_method,
'run' : self.run_method,
'has_function' : self.has_function_method,
'has_member' : self.has_member_method,
'has_type' : self.has_type_method,
'alignment' : self.alignment_method,
'version' : self.version_method,
'cmd_array' : self.cmd_array_method,
})
def version_method(self, args, kwargs):
return self.compiler.version
def cmd_array_method(self, args, kwargs):
return self.compiler.exelist
def alignment_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Alignment method takes exactly one positional argument.')
check_stringlist(args)
typename = args[0]
result = self.compiler.alignment(typename, self.environment)
mlog.log('Checking for alignment of "', mlog.bold(typename), '": ', result, sep='')
return result
def run_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Run method takes exactly one positional argument.')
check_stringlist(args)
code = args[0]
testname = kwargs.get('name', '')
if not isinstance(testname, str):
raise InterpreterException('Testname argument must be a string.')
result = self.compiler.run(code)
if len(testname) > 0:
if not result.compiled:
h = mlog.red('DID NOT COMPILE')
elif result.returncode == 0:
h = mlog.green('YES')
else:
h = mlog.red('NO (%d)' % result.returncode)
mlog.log('Checking if "', mlog.bold(testname), '" runs : ', h, sep='')
return TryRunResultHolder(result)
def get_id_method(self, args, kwargs):
return self.compiler.get_id()
def has_member_method(self, args, kwargs):
if len(args) != 2:
raise InterpreterException('Has_member takes exactly two arguments.')
check_stringlist(args)
typename = args[0]
membername = args[1]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_function must be a string.')
had = self.compiler.has_member(typename, membername, prefix)
if had:
hadtxt = mlog.green('YES')
else:
hadtxt = mlog.red('NO')
mlog.log('Checking whether type "', mlog.bold(typename),
'" has member "', mlog.bold(membername), '": ', hadtxt, sep='')
return had
def has_function_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Has_function takes exactly one argument.')
check_stringlist(args)
funcname = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_function must be a string.')
had = self.compiler.has_function(funcname, prefix, self.environment)
if had:
hadtxt = mlog.green('YES')
else:
hadtxt = mlog.red('NO')
mlog.log('Checking for function "', mlog.bold(funcname), '": ', hadtxt, sep='')
return had
def has_type_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Has_type takes exactly one argument.')
check_stringlist(args)
typename = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_type must be a string.')
had = self.compiler.has_type(typename, prefix)
if had:
hadtxt = mlog.green('YES')
else:
hadtxt = mlog.red('NO')
mlog.log('Checking for type "', mlog.bold(typename), '": ', hadtxt, sep='')
return had
def sizeof_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Sizeof takes exactly one argument.')
check_stringlist(args)
element = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of sizeof must be a string.')
esize = self.compiler.sizeof(element, prefix, self.environment)
mlog.log('Checking for size of "%s": %d' % (element, esize))
return esize
def compiles_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('compiles method takes exactly one argument.')
check_stringlist(args)
string = args[0]
testname = kwargs.get('name', '')
if not isinstance(testname, str):
raise InterpreterException('Testname argument must be a string.')
result = self.compiler.compiles(string)
if len(testname) > 0:
if result:
h = mlog.green('YES')
else:
h = mlog.red('NO')
mlog.log('Checking if "', mlog.bold(testname), '" compiles : ', h, sep='')
return result
def has_header_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('has_header method takes exactly one argument.')
check_stringlist(args)
string = args[0]
haz = self.compiler.has_header(string)
if haz:
h = mlog.green('YES')
else:
h = mlog.red('NO')
mlog.log('Has header "%s":' % string, h)
return haz
class ModuleState:
pass
class ModuleHolder(InterpreterObject):
def __init__(self, modname, module, interpreter):
InterpreterObject.__init__(self)
self.modname = modname
self.held_object = module
self.interpreter = interpreter
def method_call(self, method_name, args, kwargs):
try:
fn = getattr(self.held_object, method_name)
except AttributeError:
raise InvalidArguments('Module %s does not have method %s.' % (self.modname, method_name))
state = ModuleState()
state.build_to_src = os.path.relpath(self.interpreter.environment.get_source_dir(),
self.interpreter.environment.get_build_dir())
state.subdir = self.interpreter.subdir
state.environment = self.interpreter.environment
state.project_name = self.interpreter.build.project_name
state.project_version = self.interpreter.build.dep_manifest[self.interpreter.active_projectname]
state.compilers = self.interpreter.build.compilers
state.targets = self.interpreter.build.targets
state.headers = self.interpreter.build.get_headers()
state.man = self.interpreter.build.get_man()
state.pkgconfig_gens = self.interpreter.build.pkgconfig_gens
state.global_args = self.interpreter.build.global_args
value = fn(state, args, kwargs)
return self.interpreter.module_method_callback(value)
class MesonMain(InterpreterObject):
def __init__(self, build, interpreter):
InterpreterObject.__init__(self)
self.build = build
self.interpreter = interpreter
self.methods.update({'get_compiler': self.get_compiler_method,
'is_cross_build' : self.is_cross_build_method,
'has_exe_wrapper' : self.has_exe_wrapper_method,
'is_unity' : self.is_unity_method,
'is_subproject' : self.is_subproject_method,
'current_source_dir' : self.current_source_dir_method,
'current_build_dir' : self.current_build_dir_method,
'source_root' : self.source_root_method,
'build_root' : self.build_root_method,
'add_install_script' : self.add_install_script_method,
'install_dependency_manifest': self.install_dependency_manifest_method,
'project_version': self.project_version_method,
})
def add_install_script_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Set_install_script takes exactly one argument.')
check_stringlist(args)
scriptbase = args[0]
scriptfile = os.path.join(self.interpreter.environment.source_dir,
self.interpreter.subdir, scriptbase)
if not os.path.isfile(scriptfile):
raise InterpreterException('Can not find install script %s.' % scriptbase)
self.build.install_scripts.append(build.InstallScript([scriptfile]))
def current_source_dir_method(self, args, kwargs):
src = self.interpreter.environment.source_dir
sub = self.interpreter.subdir
if sub == '':
return src
return os.path.join(src, sub)
def current_build_dir_method(self, args, kwargs):
src = self.interpreter.environment.build_dir
sub = self.interpreter.subdir
if sub == '':
return src
return os.path.join(src, sub)
def source_root_method(self, args, kwargs):
return self.interpreter.environment.source_dir
def build_root_method(self, args, kwargs):
return self.interpreter.environment.build_dir
def has_exe_wrapper_method(self, args, kwargs):
if self.is_cross_build_method(None, None) and 'binaries' in self.build.environment.cross_info.config:
return 'exe_wrap' in self.build.environment.cross_info.config['binaries']
return True # This is semantically confusing.
def is_cross_build_method(self, args, kwargs):
return self.build.environment.is_cross_build()
def get_compiler_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('get_compiler_method must have one and only one argument.')
cname = args[0]
native = kwargs.get('native', None)
if native is None:
if self.build.environment.is_cross_build():
native = False
else:
native = True
if not isinstance(native, bool):
raise InterpreterException('Type of "native" must be a boolean.')
if native:
clist = self.build.compilers
else:
clist = self.build.cross_compilers
for c in clist:
if c.get_language() == cname:
return CompilerHolder(c, self.build.environment)
raise InterpreterException('Tried to access compiler for unspecified language "%s".' % cname)
def is_unity_method(self, args, kwargs):
return self.build.environment.coredata.unity
def is_subproject_method(self, args, kwargs):
return self.interpreter.is_subproject()
def install_dependency_manifest_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Must specify manifest install file name')
if not isinstance(args[0], str):
raise InterpreterException('Argument must be a string.')
self.build.dep_manifest_name = args[0]
def project_version_method(self, args, kwargs):
return self.build.dep_manifest[self.interpreter.active_projectname]
class Interpreter():
def __init__(self, build, backend, subproject='', subdir='', subproject_dir='subprojects'):
self.build = build
self.backend = backend
self.subproject = subproject
self.subdir = subdir
self.source_root = build.environment.get_source_dir()
self.subproject_dir = subproject_dir
option_file = os.path.join(self.source_root, self.subdir, 'meson_options.txt')
if os.path.exists(option_file):
oi = optinterpreter.OptionInterpreter(self.subproject, \
self.build.environment.cmd_line_options)
oi.process(option_file)
self.build.environment.merge_options(oi.options)
mesonfile = os.path.join(self.source_root, self.subdir, environment.build_filename)
if not os.path.isfile(mesonfile):
raise InvalidArguments('Missing Meson file in %s' % mesonfile)
code = open(mesonfile).read()
if len(code.strip()) == 0:
raise InvalidCode('Builder file is empty.')
assert(isinstance(code, str))
try:
self.ast = mparser.Parser(code).parse()
except coredata.MesonException as me:
me.file = environment.build_filename
raise me
self.sanity_check_ast()
self.variables = {}
self.builtin = {}
self.builtin['build_machine'] = BuildMachine()
if not self.build.environment.is_cross_build():
self.builtin['host_machine'] = self.builtin['build_machine']
self.builtin['target_machine'] = self.builtin['build_machine']
else:
cross_info = self.build.environment.cross_info
if cross_info.has_host():
self.builtin['host_machine'] = CrossMachineInfo(cross_info.config['host_machine'])
else:
self.builtin['host_machine'] = self.builtin['build_machine']
if cross_info.has_target():
self.builtin['target_machine'] = CrossMachineInfo(cross_info.config['target_machine'])
else:
self.builtin['target_machine'] = self.builtin['host_machine']
self.builtin['meson'] = MesonMain(build, self)
self.environment = build.environment
self.build_func_dict()
self.build_def_files = [os.path.join(self.subdir, environment.build_filename)]
self.coredata = self.environment.get_coredata()
self.generators = []
self.visited_subdirs = {}
self.global_args_frozen = False
self.subprojects = {}
self.subproject_stack = []
def build_func_dict(self):
self.funcs = {'project' : self.func_project,
'message' : self.func_message,
'error' : self.func_error,
'executable': self.func_executable,
'dependency' : self.func_dependency,
'static_library' : self.func_static_lib,
'shared_library' : self.func_shared_lib,
'jar' : self.func_jar,
'build_target': self.func_build_target,
'custom_target' : self.func_custom_target,
'run_target' : self.func_run_target,
'generator' : self.func_generator,
'test' : self.func_test,
'install_headers' : self.func_install_headers,
'install_man' : self.func_install_man,
'subdir' : self.func_subdir,
'install_data' : self.func_install_data,
'install_subdir' : self.func_install_subdir,
'configure_file' : self.func_configure_file,
'include_directories' : self.func_include_directories,
'add_global_arguments' : self.func_add_global_arguments,
'add_languages' : self.func_add_languages,
'find_program' : self.func_find_program,
'find_library' : self.func_find_library,
'configuration_data' : self.func_configuration_data,
'run_command' : self.func_run_command,
'gettext' : self.func_gettext,
'option' : self.func_option,
'get_option' : self.func_get_option,
'subproject' : self.func_subproject,
'pkgconfig_gen' : self.func_pkgconfig_gen,
'vcs_tag' : self.func_vcs_tag,
'set_variable' : self.func_set_variable,
'import' : self.func_import,
'files' : self.func_files,
'declare_dependency': self.func_declare_dependency,
}
def module_method_callback(self, invalues):
unwrap_single = False
if invalues is None:
return
if not isinstance(invalues, list):
unwrap_single = True
invalues = [invalues]
outvalues = []
for v in invalues:
if isinstance(v, build.CustomTarget):
if v.name in self.build.targets:
raise InterpreterException('Tried to create target %s which already exists.' % v.name)
self.build.targets[v.name] = v
outvalues.append(CustomTargetHolder(v))
elif isinstance(v, int) or isinstance(v, str):
outvalues.append(v)
elif isinstance(v, build.Executable):
if v.name in self.build.targets:
raise InterpreterException('Tried to create target %s which already exists.' % v.name)
self.build.targets[v.name] = v
outvalues.append(ExecutableHolder(v))
elif isinstance(v, list):
outvalues.append(self.module_method_callback(v))
elif isinstance(v, build.GeneratedList):
outvalues.append(GeneratedListHolder(v))
elif isinstance(v, build.RunTarget):
if v.name in self.build.targets:
raise InterpreterException('Tried to create target %s which already exists.' % v.name)
self.build.targets[v.name] = v
elif isinstance(v, build.InstallScript):
self.build.install_scripts.append(v)
else:
print(v)
raise InterpreterException('Module returned a value of unknown type.')
if len(outvalues) == 1 and unwrap_single:
return outvalues[0]
return outvalues
def get_build_def_files(self):
return self.build_def_files
def get_variables(self):
return self.variables
def sanity_check_ast(self):
if not isinstance(self.ast, mparser.CodeBlockNode):
raise InvalidCode('AST is of invalid type. Possibly a bug in the parser.')
if len(self.ast.lines) == 0:
raise InvalidCode('No statements in code.')
first = self.ast.lines[0]
if not isinstance(first, mparser.FunctionNode) or first.func_name != 'project':
raise InvalidCode('First statement must be a call to project')
def run(self):
self.evaluate_codeblock(self.ast)
mlog.log('Build targets in project:', mlog.bold(str(len(self.build.targets))))
def evaluate_codeblock(self, node):
if node is None:
return
if not isinstance(node, mparser.CodeBlockNode):
e = InvalidCode('Tried to execute a non-codeblock. Possibly a bug in the parser.')
e.lineno = node.lineno
e.colno = node.colno
raise e
statements = node.lines
i = 0
while i < len(statements):
cur = statements[i]
try:
self.evaluate_statement(cur)
except Exception as e:
if not(hasattr(e, 'lineno')):
e.lineno = cur.lineno
e.colno = cur.colno
e.file = os.path.join(self.subdir, 'meson.build')
raise e
i += 1 # In THE FUTURE jump over blocks and stuff.
def get_variable(self, varname):
if varname in self.builtin:
return self.builtin[varname]
if varname in self.variables:
return self.variables[varname]
raise InvalidCode('Unknown variable "%s".' % varname)
def func_set_variable(self, node, args, kwargs):
if len(args) != 2:
raise InvalidCode('Set_variable takes two arguments.')
varname = args[0]
value = self.to_native(args[1])
self.set_variable(varname, value)
@stringArgs
@noKwargs
def func_import(self, node, args, kwargs):
if len(args) != 1:
raise InvalidCode('Import takes one argument.')
modname = args[0]
if not modname in self.environment.coredata.modules:
module = importlib.import_module('modules.' + modname).initialize()
self.environment.coredata.modules[modname] = module
return ModuleHolder(modname, self.environment.coredata.modules[modname], self)
@stringArgs
@noKwargs
def func_files(self, node, args, kwargs):
return [mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, fname) for fname in args]
@noPosargs
def func_declare_dependency(self, node, args, kwargs):
incs = kwargs.get('include_directories', [])
if not isinstance(incs, list):
incs = [incs]
libs = kwargs.get('link_with', [])
if not isinstance(libs, list):
libs = [libs]
sources = kwargs.get('sources', [])
if not isinstance(sources, list):
sources = [sources]
sources = self.source_strings_to_files(self.flatten(sources))
dep = dependencies.InternalDependency(incs, libs, sources)
return InternalDependencyHolder(dep)
def set_variable(self, varname, variable):
if variable is None:
raise InvalidCode('Can not assign None to variable.')
if not isinstance(varname, str):
raise InvalidCode('First argument to set_variable must be a string.')
if not self.is_assignable(variable):
raise InvalidCode('Assigned value not of assignable type.')
if re.fullmatch('[_a-zA-Z][_0-9a-zA-Z]*', varname) is None:
raise InvalidCode('Invalid variable name: ' + varname)
if varname in self.builtin:
raise InvalidCode('Tried to overwrite internal variable "%s"' % varname)
self.variables[varname] = variable
def evaluate_statement(self, cur):
if isinstance(cur, mparser.FunctionNode):
return self.function_call(cur)
elif isinstance(cur, mparser.AssignmentNode):
return self.assignment(cur)
elif isinstance(cur, mparser.MethodNode):
return self.method_call(cur)
elif isinstance(cur, mparser.StringNode):
return cur.value
elif isinstance(cur, mparser.BooleanNode):
return cur.value
elif isinstance(cur, mparser.IfClauseNode):
return self.evaluate_if(cur)
elif isinstance(cur, mparser.IdNode):
return self.get_variable(cur.value)
elif isinstance(cur, mparser.ComparisonNode):
return self.evaluate_comparison(cur)
elif isinstance(cur, mparser.ArrayNode):
return self.evaluate_arraystatement(cur)
elif isinstance(cur, mparser.NumberNode):
return cur.value
elif isinstance(cur, mparser.AndNode):
return self.evaluate_andstatement(cur)
elif isinstance(cur, mparser.OrNode):
return self.evaluate_orstatement(cur)
elif isinstance(cur, mparser.NotNode):
return self.evaluate_notstatement(cur)
elif isinstance(cur, mparser.UMinusNode):
return self.evaluate_uminusstatement(cur)
elif isinstance(cur, mparser.ArithmeticNode):
return self.evaluate_arithmeticstatement(cur)
elif isinstance(cur, mparser.ForeachClauseNode):
return self.evaluate_foreach(cur)
elif isinstance(cur, mparser.PlusAssignmentNode):
return self.evaluate_plusassign(cur)
elif isinstance(cur, mparser.IndexNode):
return self.evaluate_indexing(cur)
elif self.is_elementary_type(cur):
return cur
else:
raise InvalidCode("Unknown statement.")
def validate_arguments(self, args, argcount, arg_types):
if argcount is not None:
if argcount != len(args):
raise InvalidArguments('Expected %d arguments, got %d.' %
(argcount, len(args)))
for i in range(min(len(args), len(arg_types))):
wanted = arg_types[i]
actual = args[i]
if wanted != None:
if not isinstance(actual, wanted):
raise InvalidArguments('Incorrect argument type.')
def func_run_command(self, node, args, kwargs):
if len(args) < 1:
raise InterpreterException('Not enough arguments')
cmd = args[0]
cargs = args[1:]
if isinstance(cmd, ExternalProgramHolder):
cmd = cmd.get_command()
elif isinstance(cmd, str):
cmd = [cmd]
else:
raise InterpreterException('First argument is of incorrect type.')
check_stringlist(cargs, 'Run_command arguments must be strings.')
args = cmd + cargs
in_builddir = kwargs.get('in_builddir', False)
if not isinstance(in_builddir, bool):
raise InterpreterException('in_builddir must be boolean.')
return RunProcess(args, self.environment.source_dir, self.environment.build_dir,
self.subdir, in_builddir)
@stringArgs
def func_gettext(self, nodes, args, kwargs):
if len(args) != 1:
raise InterpreterException('Gettext requires one positional argument (package name).')
packagename = args[0]
languages = kwargs.get('languages', None)
check_stringlist(languages, 'Argument languages must be a list of strings.')
# TODO: check that elements are strings
if len(self.build.pot) > 0:
raise InterpreterException('More than one gettext definition currently not supported.')
self.build.pot.append((packagename, languages, self.subdir))
def func_option(self, nodes, args, kwargs):
raise InterpreterException('Tried to call option() in build description file. All options must be in the option file.')
def func_pkgconfig_gen(self, nodes, args, kwargs):
if len(args) > 0:
raise InterpreterException('Pkgconfig_gen takes no positional arguments.')
libs = kwargs.get('libraries', [])
if not isinstance(libs, list):
libs = [libs]
for l in libs:
if not (isinstance(l, SharedLibraryHolder) or isinstance(l, StaticLibraryHolder)):
raise InterpreterException('Library argument not a library object.')
subdirs = kwargs.get('subdirs', ['.'])
if not isinstance(subdirs, list):
subdirs = [subdirs]
for h in subdirs:
if not isinstance(h, str):
raise InterpreterException('Header argument not string.')
version = kwargs.get('version', '')
if not isinstance(version, str):
raise InterpreterException('Version must be a string.')
name = kwargs.get('name', None)
if not isinstance(name, str):
raise InterpreterException('Name not specified.')
filebase = kwargs.get('filebase', name)
if not isinstance(filebase, str):
raise InterpreterException('Filebase must be a string.')
description = kwargs.get('description', None)
if not isinstance(description, str):
raise InterpreterException('Description is not a string.')
p = build.PkgConfigGenerator(libs, subdirs, name, description, version, filebase)
self.build.pkgconfig_gens.append(p)
@stringArgs
@noKwargs
def func_subproject(self, nodes, args, kwargs):
if len(args) != 1:
raise InterpreterException('Subproject takes exactly one argument')
dirname = args[0]
if self.subdir != '':
segs = os.path.split(self.subdir)
if len(segs) != 2 or segs[0] != self.subproject_dir:
raise InterpreterException('Subprojects must be defined at the root directory.')
if dirname in self.subproject_stack:
fullstack = self.subproject_stack + [dirname]
incpath = ' => '.join(fullstack)
raise InterpreterException('Recursive include of subprojects: %s.' % incpath)
if dirname in self.subprojects:
return self.subprojects[dirname]
r = wrap.Resolver(os.path.join(self.build.environment.get_source_dir(), self.subproject_dir))
resolved = r.resolve(dirname)
if resolved is None:
raise InterpreterException('Subproject directory does not exist and can not be downloaded.')
subdir = os.path.join(self.subproject_dir, resolved)
os.makedirs(os.path.join(self.build.environment.get_build_dir(), subdir), exist_ok=True)
self.global_args_frozen = True
mlog.log('\nExecuting subproject ', mlog.bold(dirname), '.\n', sep='')
subi = Interpreter(self.build, self.backend, dirname, subdir, self.subproject_dir)
subi.subprojects = self.subprojects
subi.subproject_stack = self.subproject_stack + [dirname]
current_active = self.active_projectname
subi.run()
self.active_projectname = current_active
mlog.log('\nSubproject', mlog.bold(dirname), 'finished.')
self.build.subprojects[dirname] = True
self.subprojects.update(subi.subprojects)
self.subprojects[dirname] = SubprojectHolder(subi)
self.build_def_files += subi.build_def_files
return self.subprojects[dirname]
@stringArgs
@noKwargs
def func_get_option(self, nodes, args, kwargs):
if len(args) != 1:
raise InterpreterException('Argument required for get_option.')
optname = args[0]
if optname not in coredata.builtin_options and self.is_subproject():
optname = self.subproject + ':' + optname
try:
return self.environment.get_coredata().get_builtin_option(optname)
except RuntimeError:
pass
if optname not in self.environment.coredata.user_options:
raise InterpreterException('Tried to access unknown option "%s".' % optname)
return self.environment.coredata.user_options[optname].value
@noKwargs
def func_configuration_data(self, node, args, kwargs):
if len(args) != 0:
raise InterpreterException('configuration_data takes no arguments')
return ConfigurationDataHolder()
@stringArgs
def func_project(self, node, args, kwargs):
if len(args) < 2:
raise InvalidArguments('Not enough arguments to project(). Needs at least the project name and one language')
if not self.is_subproject():
self.build.project_name = args[0]
self.active_projectname = args[0]
self.build.dep_manifest[args[0]] = kwargs.get('version', 'undefined')
if self.subproject in self.build.projects:
raise InvalidCode('Second call to project().')
if not self.is_subproject() and 'subproject_dir' in kwargs:
self.subproject_dir = kwargs['subproject_dir']
self.build.projects[self.subproject] = args[0]
mlog.log('Project name: ', mlog.bold(args[0]), sep='')
self.add_languages(node, args[1:])
langs = self.coredata.compilers.keys()
if 'vala' in langs:
if not 'c' in langs:
raise InterpreterException('Compiling Vala requires a C compiler')
@noKwargs
@stringArgs
def func_add_languages(self, node, args, kwargs):
self.add_languages(node, args)
@noKwargs
def func_message(self, node, args, kwargs):
# reduce arguments again to avoid flattening posargs
(posargs, kwargs) = self.reduce_arguments(node.args)
if len(posargs) != 1:
raise InvalidArguments('Expected 1 argument, got %d' % len(posargs))
arg = posargs[0]
if isinstance(arg, list):
argstr = stringifyUserArguments(arg)
elif isinstance(arg, str):
argstr = arg
elif isinstance(arg, int):
argstr = str(arg)
else:
raise InvalidArguments('Function accepts only strings, integers, lists and lists thereof.')
mlog.log(mlog.bold('Message:'), argstr)
return
@noKwargs
def func_error(self, node, args, kwargs):
self.validate_arguments(args, 1, [str])
raise InterpreterException('Error encountered: ' + args[0])
def add_languages(self, node, args):
need_cross_compiler = self.environment.is_cross_build() and self.environment.cross_info.need_cross_compiler()
for lang in args:
lang = lang.lower()
if lang in self.coredata.compilers:
comp = self.coredata.compilers[lang]
cross_comp = self.coredata.cross_compilers.get(lang, None)
else:
cross_comp = None
if lang == 'c':
comp = self.environment.detect_c_compiler(False)
if need_cross_compiler:
cross_comp = self.environment.detect_c_compiler(True)
elif lang == 'cpp':
comp = self.environment.detect_cpp_compiler(False)
if need_cross_compiler:
cross_comp = self.environment.detect_cpp_compiler(True)
elif lang == 'objc':
comp = self.environment.detect_objc_compiler(False)
if need_cross_compiler:
cross_comp = self.environment.detect_objc_compiler(True)
elif lang == 'objcpp':
comp = self.environment.detect_objcpp_compiler(False)
if need_cross_compiler:
cross_comp = self.environment.detect_objcpp_compiler(True)
elif lang == 'java':
comp = self.environment.detect_java_compiler()
if need_cross_compiler:
cross_comp = comp # Java is platform independent.
elif lang == 'cs':
comp = self.environment.detect_cs_compiler()
if need_cross_compiler:
cross_comp = comp # C# is platform independent.
elif lang == 'vala':
comp = self.environment.detect_vala_compiler()
if need_cross_compiler:
cross_comp = comp # Vala is too (I think).
elif lang == 'rust':
comp = self.environment.detect_rust_compiler()
if need_cross_compiler:
cross_comp = comp # FIXME, probably not correct.
elif lang == 'fortran':
comp = self.environment.detect_fortran_compiler(False)
if need_cross_compiler:
cross_comp = self.environment.detect_fortran_compiler(True)
else:
raise InvalidCode('Tried to use unknown language "%s".' % lang)
comp.sanity_check(self.environment.get_scratch_dir())
self.coredata.compilers[lang] = comp
if cross_comp is not None:
cross_comp.sanity_check(self.environment.get_scratch_dir())
self.coredata.cross_compilers[lang] = cross_comp
mlog.log('Native %s compiler: ' % lang, mlog.bold(' '.join(comp.get_exelist())), ' (%s %s)' % (comp.id, comp.version), sep='')
if not comp.get_language() in self.coredata.external_args:
(ext_compile_args, ext_link_args) = environment.get_args_from_envvars(comp.get_language())
self.coredata.external_args[comp.get_language()] = ext_compile_args
self.coredata.external_link_args[comp.get_language()] = ext_link_args
self.build.add_compiler(comp)
if need_cross_compiler:
mlog.log('Cross %s compiler: ' % lang, mlog.bold(' '.join(cross_comp.get_exelist())), ' (%s %s)' % (cross_comp.id, cross_comp.version), sep='')
self.build.add_cross_compiler(cross_comp)
if self.environment.is_cross_build() and not need_cross_compiler:
self.build.add_cross_compiler(comp)
def func_find_program(self, node, args, kwargs):
self.validate_arguments(args, 1, [str])
required = kwargs.get('required', True)
if not isinstance(required, bool):
raise InvalidArguments('"required" argument must be a boolean.')
exename = args[0]
if exename in self.coredata.ext_progs and\
self.coredata.ext_progs[exename].found():
return ExternalProgramHolder(self.coredata.ext_progs[exename])
# Search for scripts relative to current subdir.
search_dir = os.path.join(self.environment.get_source_dir(), self.subdir)
extprog = dependencies.ExternalProgram(exename, search_dir=search_dir)
progobj = ExternalProgramHolder(extprog)
self.coredata.ext_progs[exename] = extprog
if required and not progobj.found():
raise InvalidArguments('Program "%s" not found.' % exename)
return progobj
def func_find_library(self, node, args, kwargs):
self.validate_arguments(args, 1, [str])
required = kwargs.get('required', True)
if not isinstance(required, bool):
raise InvalidArguments('"required" argument must be a boolean.')
libname = args[0]
if libname in self.coredata.ext_libs and\
self.coredata.ext_libs[libname].found():
return ExternalLibraryHolder(self.coredata.ext_libs[libname])
if 'dirs' in kwargs:
search_dirs = kwargs['dirs']
if not isinstance(search_dirs, list):
search_dirs = [search_dirs]
for i in search_dirs:
if not isinstance(i, str):
raise InvalidCode('Directory entry is not a string.')
if not os.path.isabs(i):
raise InvalidCode('Search directory %s is not an absolute path.' % i)
else:
search_dirs = None
result = self.environment.find_library(libname, search_dirs)
extlib = dependencies.ExternalLibrary(libname, result)
libobj = ExternalLibraryHolder(extlib)
self.coredata.ext_libs[libname] = extlib
if required and not libobj.found():
raise InvalidArguments('External library "%s" not found.' % libname)
return libobj
def func_dependency(self, node, args, kwargs):
self.validate_arguments(args, 1, [str])
name = args[0]
identifier = dependencies.get_dep_identifier(name, kwargs)
if identifier in self.coredata.deps:
dep = self.coredata.deps[identifier]
else:
dep = dependencies.Dependency() # Returns always false for dep.found()
if not dep.found():
dep = dependencies.find_external_dependency(name, self.environment, kwargs)
self.coredata.deps[identifier] = dep
return DependencyHolder(dep)
def func_executable(self, node, args, kwargs):
return self.build_target(node, args, kwargs, ExecutableHolder)
def func_static_lib(self, node, args, kwargs):
return self.build_target(node, args, kwargs, StaticLibraryHolder)
def func_shared_lib(self, node, args, kwargs):
return self.build_target(node, args, kwargs, SharedLibraryHolder)
def func_jar(self, node, args, kwargs):
return self.build_target(node, args, kwargs, JarHolder)
def func_build_target(self, node, args, kwargs):
if 'target_type' not in kwargs:
raise InterpreterException('Missing target_type keyword argument')
target_type = kwargs.pop('target_type')
if target_type == 'executable':
return self.func_executable(node, args, kwargs)
elif target_type == 'shared_library':
return self.func_shared_lib(node, args, kwargs)
elif target_type == 'static_library':
return self.func_static_lib(node, args, kwargs)
elif target_type == 'jar':
return self.func_jar(node, args, kwargs)
else:
raise InterpreterException('Unknown target_type.')
def func_vcs_tag(self, node, args, kwargs):
fallback = kwargs.pop('fallback', None)
if not isinstance(fallback, str):
raise InterpreterException('Keyword argument must exist and be a string.')
replace_string = kwargs.pop('replace_string', '@VCS_TAG@')
regex_selector = '(.*)' # default regex selector for custom command: use complete output
vcs_cmd = kwargs.get('command', None)
if vcs_cmd and not isinstance(vcs_cmd, list):
vcs_cmd = [vcs_cmd]
source_dir = os.path.normpath(os.path.join(self.environment.get_source_dir(), self.subdir))
if vcs_cmd:
# Is the command an executable in path or maybe a script in the source tree?
vcs_cmd[0] = shutil.which(vcs_cmd[0]) or os.path.join(source_dir, vcs_cmd[0])
else:
vcs = mesonlib.detect_vcs(source_dir)
if vcs:
mlog.log('Found %s repository at %s' % (vcs['name'], vcs['wc_dir']))
vcs_cmd = vcs['get_rev'].split()
regex_selector = vcs['rev_regex']
else:
vcs_cmd = [' '] # executing this cmd will fail in vcstagger.py and force to use the fallback string
scriptfile = os.path.join(self.environment.get_script_dir(), 'vcstagger.py')
# vcstagger.py parameters: infile, outfile, fallback, source_dir, replace_string, regex_selector, command...
kwargs['command'] = [sys.executable, scriptfile, '@INPUT0@', '@OUTPUT0@', fallback, source_dir, replace_string, regex_selector] + vcs_cmd
kwargs.setdefault('build_always', True)
return self.func_custom_target(node, [kwargs['output']], kwargs)
@stringArgs
def func_custom_target(self, node, args, kwargs):
if len(args) != 1:
raise InterpreterException('Incorrect number of arguments')
name = args[0]
tg = CustomTargetHolder(build.CustomTarget(name, self.subdir, kwargs))
self.add_target(name, tg.held_object)
return tg
@noKwargs
def func_run_target(self, node, args, kwargs):
if len(args) < 2:
raise InterpreterException('Incorrect number of arguments')
for i in args:
try:
i = i.held_object
except AttributeError:
pass
if not isinstance(i, (str, build.BuildTarget)):
mlog.debug('Wrong type:', str(i))
raise InterpreterException('Invalid argument to run_target.')
name = args[0]
command = args[1]
cmd_args = args[2:]
tg = RunTargetHolder(name, command, cmd_args, self.subdir)
self.add_target(name, tg.held_object)
return tg
def func_generator(self, node, args, kwargs):
gen = GeneratorHolder(self, args, kwargs)
self.generators.append(gen)
return gen
def func_test(self, node, args, kwargs):
if len(args) != 2:
raise InterpreterException('Incorrect number of arguments')
if not isinstance(args[0], str):
raise InterpreterException('First argument of test must be a string.')
if not isinstance(args[1], (ExecutableHolder, JarHolder, ExternalProgramHolder)):
raise InterpreterException('Second argument must be executable.')
par = kwargs.get('is_parallel', True)
if not isinstance(par, bool):
raise InterpreterException('Keyword argument is_parallel must be a boolean.')
cmd_args = kwargs.get('args', [])
if not isinstance(cmd_args, list):
cmd_args = [cmd_args]
for i in cmd_args:
if not isinstance(i, (str, mesonlib.File)):
raise InterpreterException('Command line arguments must be strings')
envlist = kwargs.get('env', [])
if not isinstance(envlist, list):
envlist = [envlist]
env = {}
for e in envlist:
if '=' not in e:
raise InterpreterException('Env var definition must be of type key=val.')
(k, val) = e.split('=', 1)
k = k.strip()
val = val.strip()
if ' ' in k:
raise InterpreterException('Env var key must not have spaces in it.')
env[k] = val
valgrind_args = kwargs.get('valgrind_args', [])
if not isinstance(valgrind_args, list):
valgrind_args = [valgrind_args]
for a in valgrind_args:
if not isinstance(a, str):
raise InterpreterException('Valgrind_arg not a string.')
should_fail = kwargs.get('should_fail', False)
if not isinstance(should_fail, bool):
raise InterpreterException('Keyword argument should_fail must be a boolean.')
timeout = kwargs.get('timeout', 30)
if not isinstance(timeout, int):
raise InterpreterException('Timeout must be an integer.')
t = Test(args[0], args[1].held_object, par, cmd_args, env, should_fail, valgrind_args, timeout)
self.build.tests.append(t)
mlog.debug('Adding test "', mlog.bold(args[0]), '".', sep='')
@stringArgs
def func_install_headers(self, node, args, kwargs):
h = Headers(self.subdir, args, kwargs)
self.build.headers.append(h)
return h
@stringArgs
def func_install_man(self, node, args, kwargs):
m = Man(self.subdir, args, kwargs)
self.build.man.append(m)
return m
@noKwargs
def func_subdir(self, node, args, kwargs):
self.validate_arguments(args, 1, [str])
if '..' in args[0]:
raise InvalidArguments('Subdir contains ..')
if self.subdir == '' and args[0] == self.subproject_dir:
raise InvalidArguments('Must not go into subprojects dir with subdir(), use subproject() instead.')
prev_subdir = self.subdir
subdir = os.path.join(prev_subdir, args[0])
if subdir in self.visited_subdirs:
raise InvalidArguments('Tried to enter directory "%s", which has already been visited.'\
% subdir)
self.visited_subdirs[subdir] = True
self.subdir = subdir
try:
os.makedirs(os.path.join(self.environment.build_dir, subdir))
except FileExistsError:
pass
buildfilename = os.path.join(self.subdir, environment.build_filename)
self.build_def_files.append(buildfilename)
absname = os.path.join(self.environment.get_source_dir(), buildfilename)
if not os.path.isfile(absname):
raise InterpreterException('Nonexistant build def file %s.' % buildfilename)
code = open(absname).read()
assert(isinstance(code, str))
try:
codeblock = mparser.Parser(code).parse()
except coredata.MesonException as me:
me.file = buildfilename
raise me
self.evaluate_codeblock(codeblock)
self.subdir = prev_subdir
@stringArgs
def func_install_data(self, node, args, kwargs):
data = Data(True, self.subdir, args, kwargs)
self.build.data.append(data)
return data
@stringArgs
def func_install_subdir(self, node, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Install_subdir requires exactly one argument.')
if not 'install_dir' in kwargs:
raise InvalidArguments('Missing keyword argument install_dir')
install_dir = kwargs['install_dir']
if not isinstance(install_dir, str):
raise InvalidArguments('Keyword argument install_dir not a string.')
idir = InstallDir(self.subdir, args[0], install_dir)
self.build.install_dirs.append(idir)
return idir
def func_configure_file(self, node, args, kwargs):
if len(args) > 0:
raise InterpreterException("configure_file takes only keyword arguments.")
if not 'input' in kwargs:
raise InterpreterException('Required keyword argument "input" not defined.')
if not 'output' in kwargs:
raise InterpreterException('Required keyword argument "output" not defined.')
inputfile = kwargs['input']
output = kwargs['output']
if not isinstance(inputfile, str):
raise InterpreterException('Input must be a string.')
if not isinstance(output, str):
raise InterpreterException('Output must be a string.')
if 'configuration' in kwargs:
conf = kwargs['configuration']
if not isinstance(conf, ConfigurationDataHolder):
raise InterpreterException('Argument "configuration" is not of type configuration_data')
conffile = os.path.join(self.subdir, inputfile)
if conffile not in self.build_def_files:
self.build_def_files.append(conffile)
os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True)
ifile_abs = os.path.join(self.environment.source_dir, self.subdir, inputfile)
ofile_abs = os.path.join(self.environment.build_dir, self.subdir, output)
mesonlib.do_conf_file(ifile_abs, ofile_abs, conf.held_object)
conf.mark_used()
elif 'command' in kwargs:
res = self.func_run_command(node, kwargs['command'], {})
if res.returncode != 0:
raise InterpreterException('Running configure command failed.\n%s\n%s' %
(res.stdout, res.stderr))
else:
raise InterpreterException('Configure_file must have either "configuration" or "command".')
if isinstance(kwargs.get('install_dir', None), str):
self.build.data.append(Data(False, self.subdir, [output], kwargs))
return mesonlib.File.from_built_file(self.subdir, output)
@stringArgs
@noKwargs
def func_include_directories(self, node, args, kwargs):
absbase = os.path.join(self.environment.get_source_dir(), self.subdir)
for a in args:
absdir = os.path.join(absbase, a)
if not os.path.isdir(absdir):
raise InvalidArguments('Include dir %s does not exist.' % a)
i = IncludeDirsHolder(self.subdir, args)
return i
@stringArgs
def func_add_global_arguments(self, node, args, kwargs):
if self.subproject != '':
raise InvalidCode('Global arguments can not be set in subprojects because there is no way to make that reliable.')
if self.global_args_frozen:
raise InvalidCode('Tried to set global arguments after a build target has been declared.\nThis is not permitted. Please declare all global arguments before your targets.')
if not 'language' in kwargs:
raise InvalidCode('Missing language definition in add_global_arguments')
lang = kwargs['language'].lower()
if lang in self.build.global_args:
self.build.global_args[lang] += args
else:
self.build.global_args[lang] = args
def flatten(self, args):
if isinstance(args, mparser.StringNode):
return args.value
if isinstance(args, str):
return args
if isinstance(args, InterpreterObject):
return args
if isinstance(args, int):
return args
result = []
for a in args:
if isinstance(a, list):
rest = self.flatten(a)
result = result + rest
elif isinstance(a, mparser.StringNode):
result.append(a.value)
else:
result.append(a)
return result
def source_strings_to_files(self, sources):
results = []
for s in sources:
if isinstance(s, mesonlib.File) or isinstance(s, GeneratedListHolder) or \
isinstance(s, CustomTargetHolder):
pass
elif isinstance(s, str):
s = mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, s)
else:
raise InterpreterException("Source item is not string or File-type object.")
results.append(s)
return results
def add_target(self, name, tobj):
if name in coredata.forbidden_target_names:
raise InvalidArguments('Target name "%s" is reserved for Meson\'s internal use. Please rename.'\
% name)
# To permit an executable and a shared library to have the
# same name, such as "foo.exe" and "libfoo.a".
idname = tobj.get_id()
if idname in self.build.targets:
raise InvalidCode('Tried to create target "%s", but a target of that name already exists.' % name)
self.build.targets[idname] = tobj
if idname not in self.coredata.target_guids:
self.coredata.target_guids[idname] = str(uuid.uuid4()).upper()
def build_target(self, node, args, kwargs, targetholder):
name = args[0]
sources = args[1:]
if self.environment.is_cross_build():
if kwargs.get('native', False):
is_cross = False
else:
is_cross = True
else:
is_cross = False
try:
kw_src = self.flatten(kwargs['sources'])
if not isinstance(kw_src, list):
kw_src = [kw_src]
except KeyError:
kw_src = []
sources += kw_src
sources = self.source_strings_to_files(sources)
objs = self.flatten(kwargs.get('objects', []))
kwargs['dependencies'] = self.flatten(kwargs.get('dependencies', []))
if not isinstance(objs, list):
objs = [objs]
self.check_sources_exist(os.path.join(self.source_root, self.subdir), sources)
if targetholder is ExecutableHolder:
targetclass = build.Executable
elif targetholder is SharedLibraryHolder:
targetclass = build.SharedLibrary
elif targetholder is StaticLibraryHolder:
targetclass = build.StaticLibrary
elif targetholder is JarHolder:
targetclass = build.Jar
else:
mlog.debug('Unknown target type:', str(targetholder))
raise RuntimeError('Unreachable code')
target = targetclass(name, self.subdir, self.subproject, is_cross, sources, objs, self.environment, kwargs)
l = targetholder(target, self)
self.add_target(name, l.held_object)
self.global_args_frozen = True
return l
def check_sources_exist(self, subdir, sources):
for s in sources:
if not isinstance(s, str):
continue # This means a generated source and they always exist.
fname = os.path.join(subdir, s)
if not os.path.isfile(fname):
raise InterpreterException('Tried to add non-existing source %s.' % s)
def function_call(self, node):
func_name = node.func_name
(posargs, kwargs) = self.reduce_arguments(node.args)
if func_name in self.funcs:
return self.funcs[func_name](node, self.flatten(posargs), kwargs)
else:
raise InvalidCode('Unknown function "%s".' % func_name)
def is_assignable(self, value):
if isinstance(value, InterpreterObject) or \
isinstance(value, dependencies.Dependency) or\
isinstance(value, str) or\
isinstance(value, int) or \
isinstance(value, list) or \
isinstance(value, mesonlib.File):
return True
return False
def assignment(self, node):
assert(isinstance(node, mparser.AssignmentNode))
var_name = node.var_name
if not isinstance(var_name, str):
raise InvalidArguments('Tried to assign value to a non-variable.')
value = self.evaluate_statement(node.value)
value = self.to_native(value)
if not self.is_assignable(value):
raise InvalidCode('Tried to assign an invalid value to variable.')
self.set_variable(var_name, value)
return value
def reduce_arguments(self, args):
assert(isinstance(args, mparser.ArgumentNode))
if args.incorrect_order():
raise InvalidArguments('All keyword arguments must be after positional arguments.')
reduced_pos = [self.evaluate_statement(arg) for arg in args.arguments]
reduced_kw = {}
for key in args.kwargs.keys():
if not isinstance(key, str):
raise InvalidArguments('Keyword argument name is not a string.')
a = args.kwargs[key]
reduced_kw[key] = self.evaluate_statement(a)
if not isinstance(reduced_pos, list):
reduced_pos = [reduced_pos]
return (reduced_pos, reduced_kw)
def string_method_call(self, obj, method_name, args):
obj = self.to_native(obj)
if method_name == 'strip':
return obj.strip()
elif method_name == 'format':
return self.format_string(obj, args)
elif method_name == 'split':
(posargs, _) = self.reduce_arguments(args)
if len(posargs) > 1:
raise InterpreterException('Split() must have at most one argument.')
elif len(posargs) == 1:
s = posargs[0]
if not isinstance(s, str):
raise InterpreterException('Split() argument must be a string')
return obj.split(s)
else:
return obj.split()
raise InterpreterException('Unknown method "%s" for a string.' % method_name)
def to_native(self, arg):
if isinstance(arg, mparser.StringNode) or \
isinstance(arg, mparser.NumberNode) or \
isinstance(arg, mparser.BooleanNode):
return arg.value
return arg
def format_string(self, templ, args):
templ = self.to_native(templ)
if isinstance(args, mparser.ArgumentNode):
args = args.arguments
for (i, arg) in enumerate(args):
arg = self.to_native(self.evaluate_statement(arg))
if isinstance(arg, bool): # Python boolean is upper case.
arg = str(arg).lower()
templ = templ.replace('@{}@'.format(i), str(arg))
return templ
def method_call(self, node):
invokable = node.source_object
if isinstance(invokable, mparser.IdNode):
object_name = invokable.value
obj = self.get_variable(object_name)
else:
obj = self.evaluate_statement(invokable)
method_name = node.name
if method_name == 'extract_objects' and self.environment.coredata.unity:
raise InterpreterException('Single object files can not be extracted in Unity builds.')
args = node.args
if isinstance(obj, mparser.StringNode):
obj = obj.get_value()
if isinstance(obj, str):
return self.string_method_call(obj, method_name, args)
if isinstance(obj, list):
return self.array_method_call(obj, method_name, self.reduce_arguments(args)[0])
if not isinstance(obj, InterpreterObject):
raise InvalidArguments('Variable "%s" is not callable.' % object_name)
(args, kwargs) = self.reduce_arguments(args)
if method_name == 'extract_objects':
self.validate_extraction(obj.held_object)
return obj.method_call(method_name, args, kwargs)
# Only permit object extraction from the same subproject
def validate_extraction(self, buildtarget):
if not self.subdir.startswith(self.subproject_dir):
if buildtarget.subdir.startswith(self.subproject_dir):
raise InterpreterException('Tried to extract objects from a subproject target.')
else:
if not buildtarget.subdir.startswith(self.subproject_dir):
raise InterpreterException('Tried to extract objects from the main project from a subproject.')
if self.subdir.split('/')[1] != buildtarget.subdir.split('/')[1]:
raise InterpreterException('Tried to extract objects from a different subproject.')
def array_method_call(self, obj, method_name, args):
if method_name == 'contains':
return self.check_contains(obj, args)
elif method_name == 'length':
return len(obj)
elif method_name == 'get':
index = args[0]
if not isinstance(index, int):
raise InvalidArguments('Array index must be a number.')
if index < -len(obj) or index >= len(obj):
raise InvalidArguments('Array index %s is out of bounds for array of size %d.' % (index, len(obj)))
return obj[index]
raise InterpreterException('Arrays do not have a method called "%s".' % method_name)
def check_contains(self, obj, args):
if len(args) != 1:
raise InterpreterException('Contains method takes exactly one argument.')
item = args[0]
for element in obj:
if isinstance(element, list):
found = self.check_contains(element, args)
if found:
return True
try:
if element == item:
return True
except Exception:
pass
return False
def evaluate_if(self, node):
assert(isinstance(node, mparser.IfClauseNode))
for i in node.ifs:
result = self.evaluate_statement(i.condition)
if not(isinstance(result, bool)):
print(result)
raise InvalidCode('If clause does not evaluate to true or false.')
if result:
self.evaluate_codeblock(i.block)
return
if not isinstance(node.elseblock, mparser.EmptyNode):
self.evaluate_codeblock(node.elseblock)
def evaluate_foreach(self, node):
assert(isinstance(node, mparser.ForeachClauseNode))
varname = node.varname.value
items = self.evaluate_statement(node.items)
if not isinstance(items, list):
raise InvalidArguments('Items of foreach loop is not an array')
for item in items:
self.set_variable(varname, item)
self.evaluate_codeblock(node.block)
def evaluate_plusassign(self, node):
assert(isinstance(node, mparser.PlusAssignmentNode))
varname = node.var_name
addition = self.evaluate_statement(node.value)
# Remember that all variables are immutable. We must always create a
# full new variable and then assign it.
old_variable = self.get_variable(varname)
if not isinstance(old_variable, list):
raise InvalidArguments('The += operator currently only works with arrays.')
# Add other data types here.
else:
if isinstance(addition, list):
new_value = old_variable + addition
else:
new_value = old_variable + [addition]
self.set_variable(varname, new_value)
def evaluate_indexing(self, node):
assert(isinstance(node, mparser.IndexNode))
iobject = self.evaluate_statement(node.iobject)
if not isinstance(iobject, list):
raise InterpreterException('Tried to index a non-array object.')
index = self.evaluate_statement(node.index)
if not isinstance(index, int):
raise InterpreterException('Index value is not an integer.')
if index < -len(iobject) or index >= len(iobject):
raise InterpreterException('Index %d out of bounds of array of size %d.' % (index, len(iobject)))
return iobject[index]
def is_elementary_type(self, v):
if isinstance(v, (int, float, str, bool, list)):
return True
return False
def evaluate_comparison(self, node):
v1 = self.evaluate_statement(node.left)
v2 = self.evaluate_statement(node.right)
if self.is_elementary_type(v1):
val1 = v1
else:
val1 = v1.value
if self.is_elementary_type(v2):
val2 = v2
else:
val2 = v2.value
if node.ctype == '==':
return val1 == val2
elif node.ctype == '!=':
return val1 != val2
else:
raise InvalidCode('You broke me.')
def evaluate_andstatement(self, cur):
l = self.evaluate_statement(cur.left)
if isinstance(l, mparser.BooleanNode):
l = l.value
if not isinstance(l, bool):
raise InterpreterException('First argument to "and" is not a boolean.')
if not l:
return False
r = self.evaluate_statement(cur.right)
if isinstance(r, mparser.BooleanNode):
r = r.value
if not isinstance(r, bool):
raise InterpreterException('Second argument to "and" is not a boolean.')
return r
def evaluate_orstatement(self, cur):
l = self.evaluate_statement(cur.left)
if isinstance(l, mparser.BooleanNode):
l = l.get_value()
if not isinstance(l, bool):
raise InterpreterException('First argument to "or" is not a boolean.')
if l:
return True
r = self.evaluate_statement(cur.right)
if isinstance(r, mparser.BooleanNode):
r = r.get_value()
if not isinstance(r, bool):
raise InterpreterException('Second argument to "or" is not a boolean.')
return r
def evaluate_notstatement(self, cur):
v = self.evaluate_statement(cur.value)
if isinstance(v, mparser.BooleanNode):
v = v.value
if not isinstance(v, bool):
raise InterpreterException('Argument to "not" is not a boolean.')
return not v
def evaluate_uminusstatement(self, cur):
v = self.evaluate_statement(cur.value)
if isinstance(v, mparser.NumberNode):
v = v.value
if not isinstance(v, int):
raise InterpreterException('Argument to negation is not an integer.')
return -v
def evaluate_arithmeticstatement(self, cur):
l = self.to_native(self.evaluate_statement(cur.left))
r = self.to_native(self.evaluate_statement(cur.right))
if cur.operation == 'add':
try:
return l + r
except Exception as e:
raise InvalidCode('Invalid use of addition: ' + str(e))
elif cur.operation == 'sub':
if not isinstance(l, int) or not isinstance(r, int):
raise InvalidCode('Subtraction works only with integers.')
return l - r
elif cur.operation == 'mul':
if not isinstance(l, int) or not isinstance(r, int):
raise InvalidCode('Multiplication works only with integers.')
return l * r
elif cur.operation == 'div':
if not isinstance(l, int) or not isinstance(r, int):
raise InvalidCode('Division works only with integers.')
return l // r
else:
raise InvalidCode('You broke me.')
def evaluate_arraystatement(self, cur):
(arguments, kwargs) = self.reduce_arguments(cur.args)
if len(kwargs) > 0:
raise InvalidCode('Keyword arguments are invalid in array construction.')
return arguments
def is_subproject(self):
return self.subproject != ''
| {
"content_hash": "63e49f0e669bf91d8b08b6cc50514151",
"timestamp": "",
"source": "github",
"line_count": 2039,
"max_line_length": 183,
"avg_line_length": 42.41441883276116,
"alnum_prop": 0.5987766381832268,
"repo_name": "yuhangwang/meson",
"id": "5da6d8b352adf6c1640da15f236a6b1aea5a90cb",
"size": "87076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "interpreter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C",
"bytes": "29538"
},
{
"name": "C#",
"bytes": "631"
},
{
"name": "C++",
"bytes": "10198"
},
{
"name": "Emacs Lisp",
"bytes": "1226"
},
{
"name": "FORTRAN",
"bytes": "1359"
},
{
"name": "Groff",
"bytes": "175"
},
{
"name": "Inno Setup",
"bytes": "372"
},
{
"name": "Java",
"bytes": "302"
},
{
"name": "Lex",
"bytes": "110"
},
{
"name": "Objective-C",
"bytes": "462"
},
{
"name": "Objective-C++",
"bytes": "87"
},
{
"name": "Protocol Buffer",
"bytes": "46"
},
{
"name": "Python",
"bytes": "590002"
},
{
"name": "Rust",
"bytes": "376"
},
{
"name": "Shell",
"bytes": "2144"
},
{
"name": "Vala",
"bytes": "2730"
},
{
"name": "Yacc",
"bytes": "50"
}
],
"symlink_target": ""
} |
import quex.engine.state_machine.algorithm.beautifier as beautifier
import quex.engine.state_machine.algorithm.acceptance_pruning as acceptance_pruning
from quex.blackboard import E_PreContextIDs, setup as Setup
def do(the_state_machine, pre_context_sm, BeginOfLinePreContextF):
"""Sets up a pre-condition to the given state machine. This process
is entirely different from any sequentializing or parallelization
of state machines. Here, the state machine representing the pre-
condition is **not** webbed into the original state machine!
Instead, the following happens:
-- the pre-condition state machine is inverted, because
it is to be walked through backwards.
-- the inverted state machine is marked with the state machine id
of the_state_machine.
-- the original state machine will refer to the inverse
state machine of the pre-condition.
-- the initial state origins and the origins of the acceptance
states are marked as 'pre-conditioned' indicating the id
of the inverted state machine of the pre-condition.
"""
#___________________________________________________________________________________________
# (*) do some consistency checking
# -- state machines with no states are senseless here.
assert not the_state_machine.is_empty()
assert pre_context_sm is None or not pre_context_sm.is_empty()
# -- trivial pre-conditions should be added last, for simplicity
#___________________________________________________________________________________________
if pre_context_sm is None:
if BeginOfLinePreContextF:
# Mark all acceptance states with the 'trivial pre-context BeginOfLine' flag
for state in the_state_machine.get_acceptance_state_list():
state.set_pre_context_id(E_PreContextIDs.BEGIN_OF_LINE)
return None
# (*) invert the state machine of the pre-condition
inverse_pre_context = pre_context_sm.get_inverse()
if BeginOfLinePreContextF:
# Extend the existing pre-context with a preceeding 'begin-of-line'.
inverse_pre_context.mount_newline_to_acceptance_states(Setup.dos_carriage_return_newline_f, InverseF=True)
# (*) Clean up what has been done by inversion (and optionally 'BeginOfLinePreContextF')
inverse_pre_context = beautifier.do(inverse_pre_context)
# (*) Once an acceptance state is reached no further analysis is necessary.
acceptance_pruning.do(inverse_pre_context)
# (*) let the state machine refer to it
# [Is this necessary? Is it not enough that the acceptance origins point to it? <fschaef>]
pre_context_sm_id = inverse_pre_context.get_id()
# (*) create origin data, in case where there is none yet create new one.
# (do not delete, otherwise existing information gets lost)
for state in the_state_machine.states.values():
if not state.is_acceptance(): continue
state.set_pre_context_id(pre_context_sm_id)
return inverse_pre_context
| {
"content_hash": "a228595b4821fd208cb11398c5dd43cd",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 114,
"avg_line_length": 50.25,
"alnum_prop": 0.6324626865671642,
"repo_name": "coderjames/pascal",
"id": "3fa1bcd532f046a386bf78cc0f9d6cc4c9a927a7",
"size": "3271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quex-0.63.1/quex/engine/state_machine/setup_pre_context.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "194851"
},
{
"name": "C++",
"bytes": "78624"
},
{
"name": "Delphi",
"bytes": "5659"
},
{
"name": "Python",
"bytes": "1350210"
}
],
"symlink_target": ""
} |
"""Find out how to 'clear the board' in Pyramid Solitaire.
The design is meant to be simple to understand so it is less likely to have
bugs, but to make Pyramid Solitaire solvable for the worst case scenarios, we
must do a bit of optimization work on the state representation.
This implementation skips all of the precalculations of the Java/Lisp versions
to keep things as simple as possible while still using the optimization of
cramming the entire state into an integer value.
This still needs more than 8GB RAM in the worst case because this algorithm
skips some of the features like unwinnable state detection."""
import collections
import solvers.deck
def card_value(card):
"""Return the card's numeric value according to Pyramid Solitaire rules.
Aces are always 1, Jacks are 11, Queens are 12, and Kings are 13."""
return 1 + "A23456789TJQK".index(solvers.deck.card_rank(card))
def cards_are_removable(card1, card2=None):
"""Return true if the card or cards can be removed together.
Kings can be removed by themselves, and pairs of cards that add to 13."""
values = [card_value(c) if c else 0 for c in [card1, card2]]
return sum(values) == 13
class State:
"""A state in Pyramid Solitaire, represented by a 60-bit integer value.
This class only has static methods, meant to be called on integer values.
The reason is to save as much memory as possible (we'll be creating tens
of millions of these).
It's tempting to represent the state as lists of cards in the tableau,
stock, and waste piles, but it's too slow and memory usage is too high.
The trick to this state representation is that it holds data that refers
to the deck of cards, without containing a reference to the deck. So we
need the deck of cards to understand the state of the game.
Bits 0-51: "deck_flags" - 52 bits representing whether or not each card
in the deck remains in the game.
Bits 52-57: "stock_index" - 6 bits containing a number from 28 to 52,
an index into the deck for the card at the top of the stock
pile. Cards with index higher than this are the remainder of
the stock pile. Cards with index below this (and above 27) are
the cards in the waste pile. Hint for understanding how it
works: incrementing this stock index moves the top card of the
stock pile to the top of the waste pile.
Bits 58-59: 2 bits to indicate how many times the waste pile has been
recycled."""
EMPTY_STOCK = 52
EMPTY_WASTE = 27
INITIAL_STATE = (28 << 52) | ((2**52) - 1)
# bits set on the Nth tableau card and the cards covering it from below
UNCOVERED_MASKS = [
0b1111111111111111111111111111,
0b0111111011111011110111011010,
0b1111110111110111101110110100,
0b0011111001111001110011001000,
0b0111110011110011100110010000,
0b1111100111100111001100100000,
0b0001111000111000110001000000,
0b0011110001110001100010000000,
0b0111100011100011000100000000,
0b1111000111000110001000000000,
0b0000111000011000010000000000,
0b0001110000110000100000000000,
0b0011100001100001000000000000,
0b0111000011000010000000000000,
0b1110000110000100000000000000,
0b0000011000001000000000000000,
0b0000110000010000000000000000,
0b0001100000100000000000000000,
0b0011000001000000000000000000,
0b0110000010000000000000000000,
0b1100000100000000000000000000,
0b0000001000000000000000000000,
0b0000010000000000000000000000,
0b0000100000000000000000000000,
0b0001000000000000000000000000,
0b0010000000000000000000000000,
0b0100000000000000000000000000,
0b1000000000000000000000000000,
]
@staticmethod
def deck_flags(state):
"""Return the state's deck flags."""
return state & 0xFFFFFFFFFFFFF
@staticmethod
def is_tableau_empty(state):
return (state & 0xFFFFFFF) == 0
@staticmethod
def stock_index(state):
"""Return the state's stock index, the top card of the stock pile.
If the stock index is 52, it means the stock pile is empty."""
return (state >> 52) & 0b111111
@staticmethod
def cycle(state):
"""Return the state's cycle, the times the waste pile was recycled."""
return (state >> 58) & 0b11
@staticmethod
def waste_index(state):
"""Return the state's waste index, the top card of the waste pile.
If the waste index is 27, it means the waste pile is empty."""
index = State.stock_index(state) - 1
mask = 1 << index
while index > State.EMPTY_WASTE:
if (state & mask) != 0:
break
mask >>= 1
index -= 1
return index
@staticmethod
def _adjust_stock_index(state):
"""Return the state with its stock index adjusted correctly.
Basically the stock index must point to a card that remains in the
game or else be 52 to indicate the stock pile is empty. This makes sure
every state has a single unique representation - you can't have two
states that are effectively the same but have different stock indexes
because one points to the actual top card and the other points to
some card that no longer remains in the game."""
index = State.stock_index(state)
state = state & 0xC0FFFFFFFFFFFFF # remove the stock index
mask = 1 << index
while index < State.EMPTY_STOCK:
if (state & mask) != 0:
break
mask <<= 1
index += 1
return state | (index << 52)
@staticmethod
def _uncovered_indexes(deck_flags):
"""Return deck indexes of uncovered tableau cards."""
flags = deck_flags & 0xFFFFFFF
def is_uncovered(index):
return (1 << index) == (flags & State.UNCOVERED_MASKS[index])
return [i for i in range(28) if is_uncovered(i)]
@staticmethod
def successors(state, deck):
"""Return a list of successor states to this state.
Actions that can be performed (if applicable):
1. Recycle the waste pile.
2. Draw a card from the stock pile to the waste pile.
3. Remove a King from the tableau.
4. Remove a King from the stock pile.
5. Remove a King from the waste pile.
6. Remove a pair of cards from the tableau.
7. Remove a pair of cards, one each from the tableau and stock pile.
8. Remove a pair of cards, one each from the tableau and waste pile.
9. Remove a pair of cards, one each from the stock and waste piles."""
def remove(deck_flags, *indexes):
"""Remove the cards at the indexes from the deck_flags value."""
for index in indexes:
deck_flags ^= (1 << index)
return deck_flags
results = []
deck_flags = State.deck_flags(state)
uncovered = State._uncovered_indexes(deck_flags)
stock_index = State.stock_index(state)
waste_index = State.waste_index(state)
cycle = State.cycle(state)
def create(deck_flags=deck_flags, stock_index=stock_index, cycle=cycle):
"""Create a new state given the individual parts of the state."""
new_state = (cycle << 58) | (stock_index << 52) | deck_flags
return State._adjust_stock_index(new_state)
is_stock_empty = stock_index == State.EMPTY_STOCK
is_waste_empty = waste_index == State.EMPTY_WASTE
stock_card = deck[stock_index] if not is_stock_empty else None
waste_card = deck[waste_index] if not is_waste_empty else None
has_both = stock_card and waste_card
if not stock_card and cycle < 2:
# 1. recycle the waste pile
results.append(create(stock_index=28, cycle=cycle+1))
if stock_card:
# 2. draw a card from stock to waste
results.append(create(stock_index=stock_index+1))
if stock_card and cards_are_removable(stock_card):
# 4. remove a King from the stock pile
results.append(create(deck_flags=remove(deck_flags, stock_index)))
if waste_card and cards_are_removable(waste_card):
# 5. remove a King from the waste pile
results.append(create(remove(deck_flags, waste_index)))
if has_both and cards_are_removable(stock_card, waste_card):
# 9. remove the cards on the stock and waste piles
results.append(create(remove(deck_flags, stock_index, waste_index)))
for i in uncovered:
if cards_are_removable(deck[i]):
# 3. remove a King from the tableau
results.append(create(remove(deck_flags, i)))
else:
if stock_card and cards_are_removable(deck[i], stock_card):
# 7. remove the cards from the tableau/stock pile
results.append(create(remove(deck_flags, i, stock_index)))
if waste_card and cards_are_removable(deck[i], waste_card):
# 8. remove the cards from the tableau/waste pile
results.append(create(remove(deck_flags, i, waste_index)))
for j in uncovered:
if cards_are_removable(deck[i], deck[j]):
# 6. remove two cards from the tableau
results.append(create(remove(deck_flags, i, j)))
return results
def path(state, seen_states, deck):
"""Return the actions to take to get to this state from the start."""
def is_bit_set(bits, n):
"""Return true if the nth bit of bits is equal to 1."""
return (bits & (1 << n)) != 0
def action(state, next_state):
"""Return the action taken to go from state to next_state."""
diffs = state ^ next_state # XOR to see which bits changed
deck_diff = State.deck_flags(diffs)
cycle_diff = State.cycle(diffs)
if cycle_diff:
return 'Recycle'
elif deck_diff:
cards = [deck[i] for i in range(52) if is_bit_set(deck_diff, i)]
return f"Remove {' and '.join(cards)}"
else:
return 'Draw'
actions = []
while state in seen_states:
prev_state = seen_states[state]
actions.append(action(prev_state, state))
state = prev_state
return list(reversed(actions))
def solve(deck):
"""Return a solution to removing all tableau cards in Pyramid Solitaire."""
fringe = collections.deque()
seen_states = dict()
fringe.append(State.INITIAL_STATE)
while fringe:
state = fringe.popleft()
if State.is_tableau_empty(state):
return path(state, seen_states, deck)
for next_state in State.successors(state, deck):
if next_state not in seen_states:
seen_states[next_state] = state
fringe.append(next_state)
return []
| {
"content_hash": "33c8771188a6f1e0a26800271c1e0efb",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 80,
"avg_line_length": 40.92673992673993,
"alnum_prop": 0.6359974939586504,
"repo_name": "mchung94/solitaire-player",
"id": "5457bf2716e1fb28051408674f4fcfb33fdb558f",
"size": "11173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysolvers/solvers/pyramid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "245310"
},
{
"name": "Python",
"bytes": "32105"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'main'
# General information about the project.
project = u'UrPas'
copyright = u'2015, Ivo Valchev and Borislav Rusinov'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'UrPasdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('main', 'UrPas.tex', u'UrPas Documentation',
u'Ivo Valchev and Borislav Rusinov', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('main', 'urpas', u'UrPas Documentation',
[u'Ivo Valchev and Borislav Rusinov'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('main', 'UrPas', u'UrPas Documentation',
u'Ivo Valchev and Borislav Rusinov', 'UrPas', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "74e1496e7e987ac946730341a4a3adcb",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 84,
"avg_line_length": 31.677419354838708,
"alnum_prop": 0.7048116089613035,
"repo_name": "I-Valchev/UrPas",
"id": "8d8ce45c7510fb690c61ca5b087630066f0afa8d",
"size": "8274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/conf.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "23118"
},
{
"name": "CSS",
"bytes": "19984"
},
{
"name": "HTML",
"bytes": "201505"
},
{
"name": "JavaScript",
"bytes": "75039"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "Python",
"bytes": "556532"
}
],
"symlink_target": ""
} |
from telemetry import multi_page_benchmark
from telemetry import util
class Dromaeo(multi_page_benchmark.MultiPageBenchmark):
def MeasurePage(self, page, tab, results):
js_is_done = 'window.document.cookie.indexOf("__done=1") >= 0'
def _IsDone():
return bool(tab.EvaluateJavaScript(js_is_done))
util.WaitFor(_IsDone, 600, poll_interval=5)
js_get_results = 'JSON.stringify(window.automation.GetResults())'
print js_get_results
score = eval(tab.EvaluateJavaScript(js_get_results))
def Escape(k):
chars = [' ', '-', '/', '(', ')', '*']
for c in chars:
k = k.replace(c, '_')
return k
suffix = page.url[page.url.index('?') + 1 : page.url.index('&')]
for k, v in score.iteritems():
data_type = 'unimportant'
if k == suffix:
data_type = 'default'
results.Add('score', 'runs/s', v, chart_name=Escape(k),
data_type=data_type)
| {
"content_hash": "39803f2f24caf83b0736382ef71156d1",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 69,
"avg_line_length": 34.333333333333336,
"alnum_prop": 0.6148867313915858,
"repo_name": "nacl-webkit/chrome_deps",
"id": "d10f429cbf79a212c1b063b97f124ef10e945c8d",
"size": "1094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/perf/perf_tools/dromaeo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1173441"
},
{
"name": "Awk",
"bytes": "9519"
},
{
"name": "C",
"bytes": "74568368"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "156174457"
},
{
"name": "DOT",
"bytes": "1559"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Java",
"bytes": "3088381"
},
{
"name": "JavaScript",
"bytes": "18179048"
},
{
"name": "Logos",
"bytes": "4517"
},
{
"name": "M",
"bytes": "2190"
},
{
"name": "Matlab",
"bytes": "3044"
},
{
"name": "Objective-C",
"bytes": "6965520"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "932725"
},
{
"name": "Python",
"bytes": "8458718"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3621"
},
{
"name": "Shell",
"bytes": "1526176"
},
{
"name": "Tcl",
"bytes": "277077"
},
{
"name": "XSLT",
"bytes": "13493"
}
],
"symlink_target": ""
} |
import os
import re
import sys
def get_list_includes():
return "src/cpu/kernels/assembly " \
"src/core/NEON/kernels/assembly " \
"src/core/NEON/kernels/convolution/winograd " \
"include/linux include " \
". ".split()
def get_list_flags( filename, arch):
assert arch in ["armv7", "aarch64"]
flags = ["-std=c++14"]
flags.append("-DARM_COMPUTE_CPP_SCHEDULER=1")
flags.append("-DARM_COMPUTE_CL")
flags.append("-DENABLE_EXPERIMENTAL_DYNAMIC_FUSION")
if arch == "aarch64":
flags.append("-DARM_COMPUTE_AARCH64_V8_2")
return flags
def filter_files( list_files ):
to_check = []
for f in list_files:
if os.path.splitext(f)[1] != ".cpp":
continue
# Skip OMPScheduler as it causes problems in clang
if (("OMPScheduler.cpp" in f) or
("CLTracePoint.cpp" in f) or
("NETracePoint.cpp" in f) or
("TracePoint.cpp" in f)):
continue
to_check.append(f)
return to_check
def filter_clang_tidy_lines( lines ):
out = []
print_context=False
for i in range(0, len(lines)):
line = lines[i]
if "/arm_conv/" in line:
continue
if "/arm_gemm/" in line:
continue
if "/convolution/" in line:
continue
if "/validate_examples/" in line:
continue
if "error:" in line:
if (("Version.cpp" in line and "arm_compute_version.embed" in line and "file not found" in line) or
("arm_fp16.h" in line) or
("omp.h" in line) or
("cast from pointer to smaller type 'uintptr_t' (aka 'unsigned int') loses information" in line) or
("cast from pointer to smaller type 'cl_context_properties' (aka 'int') loses information" in line) or
("cast from pointer to smaller type 'std::uintptr_t' (aka 'unsigned int') loses information" in line) or
("NEMath.inl" in line and "statement expression not allowed at file scope" in line) or
("Utils.h" in line and "no member named 'unmap' in 'arm_compute::Tensor'" in line) or
("Utils.h" in line and "no member named 'map' in 'arm_compute::Tensor'" in line) or
("CPUUtils.cpp" in line and "'asm/hwcap.h' file not found" in line) or
("CPUUtils.cpp" in line and "use of undeclared identifier 'HWCAP_SVE'" in line) or
("sve" in line) or
("'arm_compute_version.embed' file not found" in line) ):
print_context=False
continue
out.append(line)
print_context=True
elif "warning:" in line:
if ("uninitialized record type: '__ret'" in line or
"local variable '__bound_functor' is still referred to by the global variable '__once_callable'" in line or
"assigning newly created 'gsl::owner<>'" in line or
"calling legacy resource function without passing a 'gsl::owner<>'" in line or
"deleting a pointer through a type that is not marked 'gsl::owner<>'" in line or
(any(f in line for f in ["Error.cpp","Error.h"]) and "thrown exception type is not nothrow copy constructible" in line) or
(any(f in line for f in ["Error.cpp","Error.h"]) and "uninitialized record type: 'args'" in line) or
(any(f in line for f in ["Error.cpp","Error.h"]) and "do not call c-style vararg functions" in line) or
(any(f in line for f in ["Error.cpp","Error.h"]) and "do not define a C-style variadic function" in line) or
("TensorAllocator.cpp" in line and "warning: pointer parameter 'ptr' can be pointer to const" in line) or
("TensorAllocator.cpp" in line and "warning: do not declare C-style arrays" in line) or
("RawTensor.cpp" in line and "warning: pointer parameter 'ptr' can be pointer to const" in line) or
("RawTensor.cpp" in line and "warning: do not declare C-style arrays" in line) or
("NEMinMaxLocationKernel.cpp" in line and "move constructors should be marked noexcept" in line) or
("NEMinMaxLocationKernel.cpp" in line and "move assignment operators should be marked noexcept" in line) or
("CLMinMaxLocationKernel.cpp" in line and "Forming reference to null pointer" in line) or
("PMUCounter.cpp" in line and "consider replacing 'long long' with 'int64'" in line) or
("Validation.cpp" in line and "parameter 'classified_labels' is unused" in line) or
("Validation.cpp" in line and "parameter 'expected_labels' is unused" in line) or
("Reference.cpp" in line and "parameter 'rois' is unused" in line) or
("Reference.cpp" in line and "parameter 'shapes' is unused" in line) or
("Reference.cpp" in line and re.search(r"parameter '[^']+' is unused", line)) or
("ReferenceCPP.cpp" in line and "parameter 'rois' is unused" in line) or
("ReferenceCPP.cpp" in line and "parameter 'srcs' is unused" in line) or
("ReferenceCPP.cpp" in line and re.search(r"parameter '[^']+' is unused", line)) or
("NEGEMMMatrixMultiplyKernel.cpp" in line and "do not use C-style cast to convert between unrelated types" in line) or
("NEPoolingLayerKernel.cpp" in line and "do not use C-style cast to convert between unrelated types" in line) or
("NESoftmaxLayerKernel.cpp" in line and "macro argument should be enclosed in parentheses" in line) or
("GraphUtils.cpp" in line and "consider replacing 'unsigned long' with 'uint32'" in line) or
("GraphUtils.cpp" in line and "consider replacing 'unsigned long' with 'uint64'" in line) or
("ConvolutionLayer.cpp" in line and "move assignment operators should be marked noexcept" in line) or
("ConvolutionLayer.cpp" in line and "move constructors should be marked noexcept" in line) or
("parameter 'memory_manager' is unused" in line) or
("parameter 'memory_manager' is copied for each invocation but only used as a const reference" in line) or
("DeconvolutionLayer.cpp" in line and "casting (double + 0.5) to integer leads to incorrect rounding; consider using lround" in line) or
("NEWinogradLayerKernel.cpp" in line and "use '= default' to define a trivial destructor" in line) or
("NEGEMMLowpMatrixMultiplyCore.cpp" in line and "constructor does not initialize these fields" in line) or
("NEGEMMLowpAssemblyMatrixMultiplyCore" in line and "constructor does not initialize these fields" in line) or
("CpuDepthwiseConv2dNativeKernel" in line and re.search(r"parameter '[^']+' is unused", line)) or
("CpuDepthwiseConv2dAssemblyDispatch" in line and re.search(r"parameter '[^']+' is unused", line)) or
("CpuDepthwiseConv2dAssemblyDispatch" in line and "modernize-use-equals-default" in line) or
("CPUUtils.cpp" in line and "consider replacing 'unsigned long' with 'uint64'" in line) or
("CPUUtils.cpp" in line and "parameter 'cpusv' is unused" in line) or
("CPUUtils.cpp" in line and "warning: uninitialized record type" in line) or
("Utils.h" in line and "warning: Use of zero-allocated memory" in line) or
("sve" in line) or
("CpuDepthwiseConv2dNativeKernel.cpp" in line and "misc-non-private-member-variables-in-classes" in line)): # This is to prevent false positive, should be reassessed with the newer clang-tidy
print_context=False
continue
if "do not use C-style cast to convert between unrelated types" in line:
if i + 1 < len(lines) and "vgetq_lane_f16" in lines[i + 1]:
print_context=False
continue
if "use 'using' instead of 'typedef'" in line:
if i + 1 < len(lines) and "BOOST_FIXTURE_TEST_SUITE" in lines[i + 1]:
print_context=False
continue
if "do not call c-style vararg functions" in line:
if (i + 1 < len(lines) and
("BOOST_TEST" in lines[i + 1] or
"BOOST_FAIL" in lines[i + 1] or
"BOOST_CHECK_THROW" in lines[i + 1] or
"ARM_COMPUTE_ERROR_VAR" in lines[i + 1] or
"ARM_COMPUTE_RETURN_ON" in lines[i + 1] or
"syscall" in lines[i + 1])):
print_context=False
continue
out.append(line)
print_context=True
elif (("CLMinMaxLocationKernel.cpp" in line and "'?' condition is false" in line) or
("CLMinMaxLocationKernel.cpp" in line and "Assuming the condition is false" in line) or
("CLMinMaxLocationKernel.cpp" in line and "Assuming pointer value is null" in line) or
("CLMinMaxLocationKernel.cpp" in line and "Forming reference to null pointer" in line)):
print_context=False
continue
elif print_context:
out.append(line)
return out
if __name__ == "__main__":
if len(sys.argv) != 2:
print("usage: {} CLANG-TIDY_OUTPUT_FILE".format(sys.argv[0]))
sys.exit(1)
errors = []
with open(sys.argv[1], mode="r") as clang_tidy_file:
lines = clang_tidy_file.readlines()
errors = filter_clang_tidy_lines(lines)
print("\n".join(errors))
sys.exit(0 if len(errors) == 0 else 1)
| {
"content_hash": "50dc4098afaa15b3be8e59303cea7395",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 206,
"avg_line_length": 57.917159763313606,
"alnum_prop": 0.5959337964854925,
"repo_name": "ARM-software/ComputeLibrary",
"id": "0a0de84bab15b7b63ba5cbfb5d9ffa840331ddbc",
"size": "9811",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scripts/clang_tidy_rules.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3062248"
},
{
"name": "C++",
"bytes": "34872664"
},
{
"name": "Go",
"bytes": "4183"
},
{
"name": "Python",
"bytes": "122193"
},
{
"name": "Shell",
"bytes": "3515"
}
],
"symlink_target": ""
} |
import argparse
from azure.mgmt.signalr.models import UpstreamTemplate
from knack.log import get_logger
from knack.util import CLIError
logger = get_logger(__name__)
# pylint: disable=protected-access, too-few-public-methods
class UpstreamTemplateAddAction(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
kwargs = {}
for item in values:
try:
key, value = item.split('=', 1)
kwargs[key.replace('-', '_')] = value
except ValueError:
raise CLIError('usage error: {} KEY=VALUE [KEY=VALUE ...]'.format(option_string))
value = UpstreamTemplate(**kwargs)
super().__call__(parser, namespace, value, option_string)
| {
"content_hash": "8c89e5123f2b40000290054efcc85259",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 97,
"avg_line_length": 38.05,
"alnum_prop": 0.6360052562417872,
"repo_name": "yugangw-msft/azure-cli",
"id": "9208a7dd254ed1a702d61bf88e1f2f381379679e",
"size": "1139",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "src/azure-cli/azure/cli/command_modules/signalr/_actions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "5355"
},
{
"name": "Batchfile",
"bytes": "14110"
},
{
"name": "Bicep",
"bytes": "1679"
},
{
"name": "C#",
"bytes": "1971"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "Dockerfile",
"bytes": "8427"
},
{
"name": "HTML",
"bytes": "794"
},
{
"name": "JavaScript",
"bytes": "1404"
},
{
"name": "Jupyter Notebook",
"bytes": "389"
},
{
"name": "PowerShell",
"bytes": "1781"
},
{
"name": "Python",
"bytes": "24270340"
},
{
"name": "Rich Text Format",
"bytes": "12032"
},
{
"name": "Roff",
"bytes": "1036959"
},
{
"name": "Shell",
"bytes": "56023"
},
{
"name": "TSQL",
"bytes": "1145"
}
],
"symlink_target": ""
} |
import math
import numpy as np
from numba import cuda, float32
from numba.cuda.testing import unittest
import numba.cuda.random
from numba.cuda.testing import skip_on_cudasim, CUDATestCase
from numba.cuda.random import \
xoroshiro128p_uniform_float32, xoroshiro128p_normal_float32, \
xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64
from numba.core import config
# Distributions
UNIFORM = 1
NORMAL = 2
@cuda.jit
def rng_kernel_float32(states, out, count, distribution):
thread_id = cuda.grid(1)
for i in range(count):
if distribution == UNIFORM:
out[thread_id * count + i] = xoroshiro128p_uniform_float32(states, thread_id)
elif distribution == NORMAL:
out[thread_id * count + i] = xoroshiro128p_normal_float32(states, thread_id)
@cuda.jit
def rng_kernel_float64(states, out, count, distribution):
thread_id = cuda.grid(1)
for i in range(count):
if distribution == UNIFORM:
out[thread_id * count + i] = xoroshiro128p_uniform_float64(states, thread_id)
elif distribution == NORMAL:
out[thread_id * count + i] = xoroshiro128p_normal_float64(states, thread_id)
class TestCudaRandomXoroshiro128p(CUDATestCase):
def test_create(self):
states = cuda.random.create_xoroshiro128p_states(10, seed=1)
s = states.copy_to_host()
self.assertEqual(len(np.unique(s)), 10)
def test_create_subsequence_start(self):
states = cuda.random.create_xoroshiro128p_states(10, seed=1)
s1 = states.copy_to_host()
states = cuda.random.create_xoroshiro128p_states(10, seed=1,
subsequence_start=3)
s2 = states.copy_to_host()
# Starting seeds should match up with offset of 3
np.testing.assert_array_equal(s1[3:], s2[:-3])
def test_create_stream(self):
stream = cuda.stream()
states = cuda.random.create_xoroshiro128p_states(10, seed=1, stream=stream)
s = states.copy_to_host()
self.assertEqual(len(np.unique(s)), 10)
def check_uniform(self, kernel_func, dtype):
states = cuda.random.create_xoroshiro128p_states(32 * 2, seed=1)
out = np.zeros(2 * 32 * 32, dtype=np.float32)
kernel_func[2, 32](states, out, 32, UNIFORM)
self.assertAlmostEqual(out.min(), 0.0, delta=1e-3)
self.assertAlmostEqual(out.max(), 1.0, delta=1e-3)
self.assertAlmostEqual(out.mean(), 0.5, delta=1.5e-2)
self.assertAlmostEqual(out.std(), 1.0/(2*math.sqrt(3)), delta=6e-3)
def test_uniform_float32(self):
self.check_uniform(rng_kernel_float32, np.float32)
@skip_on_cudasim('skip test for speed under cudasim')
def test_uniform_float64(self):
self.check_uniform(rng_kernel_float64, np.float64)
def check_normal(self, kernel_func, dtype):
states = cuda.random.create_xoroshiro128p_states(32 * 2, seed=1)
out = np.zeros(2 * 32 * 32, dtype=dtype)
kernel_func[2, 32](states, out, 32, NORMAL)
self.assertAlmostEqual(out.mean(), 0.0, delta=4e-3)
self.assertAlmostEqual(out.std(), 1.0, delta=2e-3)
def test_normal_float32(self):
self.check_normal(rng_kernel_float32, np.float32)
@skip_on_cudasim('skip test for speed under cudasim')
def test_normal_float64(self):
self.check_normal(rng_kernel_float64, np.float64)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "f019e0ba68f01f2e6fcfd54646e8d4ce",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 89,
"avg_line_length": 34.22,
"alnum_prop": 0.6598480420806546,
"repo_name": "sklam/numba",
"id": "7673a924d37892716bfea072dd29ec7597bd35da",
"size": "3422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numba/cuda/tests/cudapy/test_random.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6783"
},
{
"name": "C",
"bytes": "638283"
},
{
"name": "C++",
"bytes": "52741"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Python",
"bytes": "7918676"
},
{
"name": "Shell",
"bytes": "7823"
}
],
"symlink_target": ""
} |
import setuptools
setuptools.setup(
name="avro_codec",
version="2.0.0",
author="Data and Analytics",
author_email="data@gc.com",
description="An avro codec which exposes an API similar to the standard library's marshal, pickle and json modules",
license="MIT",
keywords="avro encode decode codec",
url="http://github.com/gamechanger/avro_codec",
packages=["avro_codec"],
long_description="",
install_requires=['avro==1.7.7', 'fastavro==0.22.5'],
tests_require=['nose']
)
| {
"content_hash": "c7edf0ca78b18637cc26b33c19c06691",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 120,
"avg_line_length": 30.647058823529413,
"alnum_prop": 0.6660268714011516,
"repo_name": "gamechanger/avro_codec",
"id": "fab7241afea17c632de14768a7cd77b5d9b0e751",
"size": "521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4801"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 30, transform = "RelativeDifference", sigma = 0.0, exog_count = 20, ar_order = 0); | {
"content_hash": "cb77ad2b6a53ba3a2f639490b5f956dd",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 175,
"avg_line_length": 39.285714285714285,
"alnum_prop": 0.7163636363636363,
"repo_name": "antoinecarme/pyaf",
"id": "fc7a0be87214f18a6389e88e6e3d714c1a2d00be",
"size": "275",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_RelativeDifference/trend_Lag1Trend/cycle_30/ar_/test_artificial_1024_RelativeDifference_Lag1Trend_30__20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
"""
Given a list of words, we may encode it by writing a reference string S and a
list of indexes A.
For example, if the list of words is ["time", "me", "bell"], we can write it as
S = "time#bell#" and indexes = [0, 2, 5].
Then for each index, we will recover the word by reading from the reference
string from that index until we reach a "#" character.
What is the length of the shortest reference string S possible that encodes the
given words?
Example:
Input: words = ["time", "me", "bell"]
Output: 10
Explanation: S = "time#bell#" and indexes = [0, 2, 5].
Note:
1 <= words.length <= 2000.
1 <= words[i].length <= 7.
Each word has only lowercase letters.
"""
from typing import List
class Solution:
def minimumLengthEncoding(self, words: List[str]) -> int:
"""
suffix trie
only suffix matters
fast trie with dict
"""
root = {}
ends = []
for word in set(words):
cur = root
for c in word[::-1]:
nxt = cur.get(c, {})
cur[c] = nxt
cur = nxt
ends.append((cur, len(word)))
return sum(
l + 1
for node, l in ends
if len(node) == 0 # no child
)
if __name__ == "__main__":
assert Solution().minimumLengthEncoding(["time", "me", "bell"]) == 10
| {
"content_hash": "8c96cc4c013b6cad18c17740686fcb07",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 79,
"avg_line_length": 24.267857142857142,
"alnum_prop": 0.5614422369389257,
"repo_name": "algorhythms/LeetCode",
"id": "3f9affa6610b425235ebaff302de5b1c4a757b97",
"size": "1378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "820 Short Encoding of Words.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1444167"
}
],
"symlink_target": ""
} |
from .base_test import BaseTest
| {
"content_hash": "bdd332d64e136c31e483f3ce1b0daea4",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 31,
"avg_line_length": 32,
"alnum_prop": 0.8125,
"repo_name": "VirgilSecurity/virgil-sdk-python",
"id": "df77b31f7b89f6755ae580fdcf0557ddcd36e36c",
"size": "1642",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "virgil_sdk/tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "977"
},
{
"name": "Python",
"bytes": "297316"
}
],
"symlink_target": ""
} |
import urllib2
import httplib
import socket
import json
import re
import sys
from telemetry.core import util
from telemetry.core import exceptions
from telemetry.core import user_agent
from telemetry.core import wpr_modes
from telemetry.core import wpr_server
from telemetry.core.chrome import extension_dict_backend
from telemetry.core.chrome import tab_list_backend
from telemetry.core.chrome import tracing_backend
from telemetry.test import options_for_unittests
class ExtensionsNotSupportedException(Exception):
pass
class BrowserBackend(object):
"""A base class for browser backends. Provides basic functionality
once a remote-debugger port has been established."""
WEBPAGEREPLAY_HOST = '127.0.0.1'
def __init__(self, is_content_shell, supports_extensions, options):
self.browser_type = options.browser_type
self.is_content_shell = is_content_shell
self._supports_extensions = supports_extensions
self.options = options
self._browser = None
self._port = None
self._inspector_protocol_version = 0
self._chrome_branch_number = 0
self._webkit_base_revision = 0
self._tracing_backend = None
self.webpagereplay_local_http_port = util.GetAvailableLocalPort()
self.webpagereplay_local_https_port = util.GetAvailableLocalPort()
self.webpagereplay_remote_http_port = self.webpagereplay_local_http_port
self.webpagereplay_remote_https_port = self.webpagereplay_local_https_port
if options.dont_override_profile and not options_for_unittests.AreSet():
sys.stderr.write('Warning: Not overriding profile. This can cause '
'unexpected effects due to profile-specific settings, '
'such as about:flags settings, cookies, and '
'extensions.\n')
self._tab_list_backend = tab_list_backend.TabListBackend(self)
self._extension_dict_backend = None
if supports_extensions:
self._extension_dict_backend = \
extension_dict_backend.ExtensionDictBackend(self)
def SetBrowser(self, browser):
self._browser = browser
self._tab_list_backend.Init()
@property
def browser(self):
return self._browser
@property
def supports_extensions(self):
"""True if this browser backend supports extensions."""
return self._supports_extensions
@property
def tab_list_backend(self):
return self._tab_list_backend
@property
def extension_dict_backend(self):
return self._extension_dict_backend
def GetBrowserStartupArgs(self):
args = []
args.extend(self.options.extra_browser_args)
args.append('--disable-background-networking')
args.append('--metrics-recording-only')
args.append('--no-first-run')
if self.options.wpr_mode != wpr_modes.WPR_OFF:
args.extend(wpr_server.GetChromeFlags(
self.WEBPAGEREPLAY_HOST,
self.webpagereplay_remote_http_port,
self.webpagereplay_remote_https_port))
args.extend(user_agent.GetChromeUserAgentArgumentFromType(
self.options.browser_user_agent_type))
extensions = [extension.local_path for extension in
self.options.extensions_to_load if not extension.is_component]
extension_str = ','.join(extensions)
if len(extensions) > 0:
args.append('--load-extension=%s' % extension_str)
component_extensions = [extension.local_path for extension in
self.options.extensions_to_load if extension.is_component]
component_extension_str = ','.join(component_extensions)
if len(component_extensions) > 0:
args.append('--load-component-extension=%s' % component_extension_str)
return args
@property
def wpr_mode(self):
return self.options.wpr_mode
def _WaitForBrowserToComeUp(self, timeout=None):
def IsBrowserUp():
try:
self.Request('', timeout=timeout)
except (socket.error, httplib.BadStatusLine, urllib2.URLError):
return False
else:
return True
try:
util.WaitFor(IsBrowserUp, timeout=30)
except util.TimeoutException:
raise exceptions.BrowserGoneException()
def AllExtensionsLoaded():
for e in self.options.extensions_to_load:
if not e.extension_id in self._extension_dict_backend:
return False
extension_object = self._extension_dict_backend[e.extension_id]
extension_object.WaitForDocumentReadyStateToBeInteractiveOrBetter()
return True
if self._supports_extensions:
util.WaitFor(AllExtensionsLoaded, timeout=30)
def _PostBrowserStartupInitialization(self):
# Detect version information.
data = self.Request('version')
resp = json.loads(data)
if 'Protocol-Version' in resp:
self._inspector_protocol_version = resp['Protocol-Version']
if 'Browser' in resp:
branch_number_match = re.search('Chrome/\d+\.\d+\.(\d+)\.\d+',
resp['Browser'])
else:
branch_number_match = re.search(
'Chrome/\d+\.\d+\.(\d+)\.\d+ (Mobile )?Safari',
resp['User-Agent'])
webkit_version_match = re.search('\((trunk)?\@(\d+)\)',
resp['WebKit-Version'])
if branch_number_match:
self._chrome_branch_number = int(branch_number_match.group(1))
else:
# Content Shell returns '' for Browser, for now we have to
# fall-back and assume branch 1025.
self._chrome_branch_number = 1025
if webkit_version_match:
self._webkit_base_revision = int(webkit_version_match.group(2))
return
# Detection has failed: assume 18.0.1025.168 ~= Chrome Android.
self._inspector_protocol_version = 1.0
self._chrome_branch_number = 1025
self._webkit_base_revision = 106313
def Request(self, path, timeout=None):
url = 'http://localhost:%i/json' % self._port
if path:
url += '/' + path
req = urllib2.urlopen(url, timeout=timeout)
return req.read()
@property
def chrome_branch_number(self):
return self._chrome_branch_number
@property
def supports_tab_control(self):
return self._chrome_branch_number >= 1303
@property
def supports_tracing(self):
return self.is_content_shell or self._chrome_branch_number >= 1385
def StartTracing(self):
if self._tracing_backend is None:
self._tracing_backend = tracing_backend.TracingBackend(self._port)
self._tracing_backend.BeginTracing()
def StopTracing(self):
self._tracing_backend.EndTracing()
def GetTraceResultAndReset(self):
return self._tracing_backend.GetTraceResultAndReset()
def GetRemotePort(self, _):
return util.GetAvailableLocalPort()
def Close(self):
if self._tracing_backend:
self._tracing_backend.Close()
self._tracing_backend = None
def CreateForwarder(self, *port_pairs):
raise NotImplementedError()
def IsBrowserRunning(self):
raise NotImplementedError()
def GetStandardOutput(self):
raise NotImplementedError()
| {
"content_hash": "2174225affb8d5f9eeb4bdc2be3e6b3b",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 80,
"avg_line_length": 33.60576923076923,
"alnum_prop": 0.6835479256080115,
"repo_name": "timopulkkinen/BubbleFish",
"id": "dce70e69423bfa50b1e6ef7b85bf4775e79becf8",
"size": "7157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/core/chrome/browser_backend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1174304"
},
{
"name": "Awk",
"bytes": "9519"
},
{
"name": "C",
"bytes": "75801820"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "161884021"
},
{
"name": "DOT",
"bytes": "1559"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Java",
"bytes": "3531849"
},
{
"name": "JavaScript",
"bytes": "18556005"
},
{
"name": "Logos",
"bytes": "4517"
},
{
"name": "Matlab",
"bytes": "5234"
},
{
"name": "Objective-C",
"bytes": "7254742"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "933011"
},
{
"name": "Python",
"bytes": "8808682"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3621"
},
{
"name": "Shell",
"bytes": "1537764"
},
{
"name": "Tcl",
"bytes": "277077"
},
{
"name": "XML",
"bytes": "13493"
}
],
"symlink_target": ""
} |
from testtools import skipIf
from heat.engine import clients
from heat.engine import environment
from heat.tests.v1_1 import fakes
from heat.common import exception
from heat.common import template_format
from heat.engine import resources
from heat.engine.resources import instance as instances
from heat.engine import service
from heat.openstack.common.importutils import try_import
from heat.engine import parser
from heat.tests.common import HeatTestCase
from heat.tests import utils
test_template_volumeattach = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"DeletionPolicy": "Delete",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": "test_KeyName"
}
},
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"Properties" : {
"Size" : "6",
"AvailabilityZone" : "nova"
}
},
"MountPoint" : {
"Type" : "AWS::EC2::VolumeAttachment",
"Properties" : {
"InstanceId" : { "Ref" : "WikiDatabase" },
"VolumeId" : { "Ref" : "DataVolume" },
"Device" : "/dev/%s"
}
}
}
}
'''
test_template_ref = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
},
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"Properties" : {
"Size" : "6",
"AvailabilityZone" : "nova"
}
},
"MountPoint" : {
"Type" : "AWS::EC2::VolumeAttachment",
"Properties" : {
"InstanceId" : { "Ref" : "%s" },
"VolumeId" : { "Ref" : "DataVolume" },
"Device" : "/dev/vdb"
}
}
}
}
'''
test_template_findinmap_valid = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2 KeyPair to' + \
'enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
},
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"Properties" : {
"Size" : "6",
"AvailabilityZone" : "nova"
}
},
"MountPoint" : {
"Type" : "AWS::EC2::VolumeAttachment",
"Properties" : {
"InstanceId" : { "Ref" : "WikiDatabase" },
"VolumeId" : { "Ref" : "DataVolume" },
"Device" : "/dev/vdb"
}
}
}
}
'''
test_template_findinmap_invalid = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2 KeyPair to enable SSH ' + \
'access to the instances",' + \
''' "Type" : "String"
}
},
"Mappings" : {
"AWSInstanceType2Arch" : {
"t1.micro" : { "Arch" : "64" },
"m1.small" : { "Arch" : "64" },
"m1.medium" : { "Arch" : "64" },
"m1.large" : { "Arch" : "64" },
"m1.xlarge" : { "Arch" : "64" },
"m2.xlarge" : { "Arch" : "64" },
"m2.2xlarge" : { "Arch" : "64" },
"m2.4xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" },
"cc1.4xlarge" : { "Arch" : "64HVM" },
"cc2.8xlarge" : { "Arch" : "64HVM" },
"cg1.4xlarge" : { "Arch" : "64HVM" }
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
''' + \
'"ImageId" : { "Fn::FindInMap" : [ "DistroArch2AMI", { "Ref" : ' + \
'"LinuxDistribution" },' + \
'{ "Fn::FindInMap" : [ "AWSInstanceType2Arch", { "Ref" : ' + \
'"InstanceType" }, "Arch" ] } ] },' + \
'''
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName"}
}
},
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"Properties" : {
"Size" : "6",
"AvailabilityZone" : "nova"
}
},
"MountPoint" : {
"Type" : "AWS::EC2::VolumeAttachment",
"Properties" : {
"InstanceId" : { "Ref" : "WikiDatabase" },
"VolumeId" : { "Ref" : "DataVolume" },
"Device" : "/dev/vdb"
}
}
}
}
'''
test_template_invalid_resources = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "AWS CloudFormation Sample Template for xyz.",
"Parameters" : {
"InstanceType" : {
"Description" : "Defined instance type",
"Type" : "String",
"Default" : "node.ee",
"AllowedValues" : ["node.ee", "node.apache", "node.api"],
"ConstraintDescription" : "must be a valid instance type."
}
},
"Resources" : {
"Type" : "AWS::EC2::Instance",
"Metadata" : {
},
"Properties" : {
"ImageId" : { "Ref" : "centos-6.4-20130701-0" },
"InstanceType" : { "Ref" : "InstanceType" }
}
}
}
'''
test_template_invalid_property = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"UnknownProperty": "unknown"
}
}
}
}
'''
test_template_unimplemented_property = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"SourceDestCheck": "false"
}
}
}
}
'''
test_template_invalid_deletion_policy = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"DeletionPolicy": "Destroy",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
}
}
}
'''
test_template_snapshot_deletion_policy = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"DeletionPolicy": "Snapshot",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
}
}
}
'''
test_template_volume_snapshot = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Resources" : {
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"DeletionPolicy": "Snapshot",
"Properties" : {
"Size" : "6",
"AvailabilityZone" : "nova"
}
}
}
}
'''
test_unregistered_key = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
}
}
}
'''
test_template_image = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
}
}
}
'''
test_template_invalid_secgroups = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"SecurityGroups": [ "default" ],
"NetworkInterfaces": [ "mgmt", "data" ]
}
}
}
}
'''
test_template_invalid_secgroupids = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"SecurityGroupIds": [ "default" ],
"NetworkInterfaces": [ "mgmt", "data" ]
}
}
}
}
'''
test_template_nova_client_exception = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Resources" : {
"Instance": {
"Type": "AWS::EC2::Instance",
"DeletionPolicy": "Delete",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large"
}
}
}
}
'''
test_template_unique_logical_name = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
},
"AName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"AName": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"NetworkInterfaces": [ "mgmt", "data" ]
}
}
}
}
'''
class validateTest(HeatTestCase):
def setUp(self):
super(validateTest, self).setUp()
resources.initialise()
self.fc = fakes.FakeClient()
resources.initialise()
utils.setup_dummy_db()
self.ctx = utils.dummy_context()
def test_validate_volumeattach_valid(self):
t = template_format.parse(test_template_volumeattach % 'vdq')
stack = parser.Stack(self.ctx, 'test_stack', parser.Template(t))
volumeattach = stack['MountPoint']
self.assertTrue(volumeattach.validate() is None)
def test_validate_volumeattach_invalid(self):
t = template_format.parse(test_template_volumeattach % 'sda')
stack = parser.Stack(self.ctx, 'test_stack', parser.Template(t))
volumeattach = stack['MountPoint']
self.assertRaises(exception.StackValidationFailed,
volumeattach.validate)
def test_validate_ref_valid(self):
t = template_format.parse(test_template_ref % 'WikiDatabase')
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertEqual(res['Description'], 'test.')
def test_validate_hot_valid(self):
t = template_format.parse(
"""
heat_template_version: 2013-05-23
description: test.
resources:
my_instance:
type: AWS::EC2::Instance
""")
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertEqual(res['Description'], 'test.')
def test_validate_ref_invalid(self):
t = template_format.parse(test_template_ref % 'WikiDatabasez')
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertNotEqual(res['Description'], 'Successfully validated')
def test_validate_findinmap_valid(self):
t = template_format.parse(test_template_findinmap_valid)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertEqual(res['Description'], 'test.')
def test_validate_findinmap_invalid(self):
t = template_format.parse(test_template_findinmap_invalid)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertNotEqual(res['Description'], 'Successfully validated')
def test_validate_parameters(self):
t = template_format.parse(test_template_ref % 'WikiDatabase')
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertEqual(res['Parameters'], {'KeyName': {
'Type': 'String',
'Description': 'Name of an existing EC2KeyPair to enable SSH '
'access to the instances'}})
def test_validate_properties(self):
t = template_format.parse(test_template_invalid_property)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertEqual(res, {'Error': 'Unknown Property UnknownProperty'})
def test_invalid_resources(self):
t = template_format.parse(test_template_invalid_resources)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertEqual({'Error': 'Resources must contain Resource. '
'Found a [string] instead'},
res)
def test_unimplemented_property(self):
t = template_format.parse(test_template_unimplemented_property)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertEqual(
res,
{'Error': 'Property SourceDestCheck not implemented yet'})
def test_invalid_deletion_policy(self):
t = template_format.parse(test_template_invalid_deletion_policy)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertEqual(res, {'Error': 'Invalid DeletionPolicy Destroy'})
def test_snapshot_deletion_policy(self):
t = template_format.parse(test_template_snapshot_deletion_policy)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertEqual(
res, {'Error': 'Snapshot DeletionPolicy not supported'})
@skipIf(try_import('cinderclient.v1.volume_backups') is None,
'unable to import volume_backups')
def test_volume_snapshot_deletion_policy(self):
t = template_format.parse(test_template_volume_snapshot)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertEqual(res, {'Description': u'test.', 'Parameters': {}})
def test_unregistered_key(self):
t = template_format.parse(test_unregistered_key)
template = parser.Template(t)
params = {'KeyName': 'not_registered'}
stack = parser.Stack(self.ctx, 'test_stack', template,
environment.Environment(params))
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
resource = stack['Instance']
self.assertRaises(exception.UserKeyPairMissing, resource.validate)
def test_unregistered_image(self):
t = template_format.parse(test_template_image)
template = parser.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template,
environment.Environment({'KeyName': 'test'}))
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
resource = stack['Instance']
self.assertRaises(exception.ImageNotFound, resource.validate)
self.m.VerifyAll()
def test_duplicated_image(self):
t = template_format.parse(test_template_image)
template = parser.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template,
environment.Environment({'KeyName': 'test'}))
class image_type(object):
def __init__(self, id, name):
self.id = id
self.name = name
image_list = [image_type(id='768b5464-3df5-4abf-be33-63b60f8b99d0',
name='image_name'),
image_type(id='a57384f5-690f-48e1-bf46-c4291e6c887e',
name='image_name')]
self.m.StubOutWithMock(self.fc.images, 'list')
self.fc.images.list().AndReturn(image_list)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
resource = stack['Instance']
self.assertRaises(exception.NoUniqueImageFound, resource.validate)
self.m.VerifyAll()
def test_invalid_security_groups_with_nics(self):
t = template_format.parse(test_template_invalid_secgroups)
template = parser.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template,
environment.Environment({'KeyName': 'test'}))
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
resource = stack['Instance']
self.assertRaises(exception.ResourcePropertyConflict,
resource.validate)
def test_invalid_security_group_ids_with_nics(self):
t = template_format.parse(test_template_invalid_secgroupids)
template = parser.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template,
environment.Environment({'KeyName': 'test'}))
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
resource = stack['Instance']
self.assertRaises(exception.ResourcePropertyConflict,
resource.validate)
def test_client_exception_from_nova_client(self):
t = template_format.parse(test_template_nova_client_exception)
template = parser.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template)
self.m.StubOutWithMock(self.fc.images, 'list')
self.fc.images.list()\
.AndRaise(clients.novaclient.exceptions.ClientException(500))
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
self.assertRaises(exception.Error, stack.validate)
self.m.VerifyAll()
def test_validate_unique_logical_name(self):
t = template_format.parse(test_template_unique_logical_name)
template = parser.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template,
environment.Environment({'AName': 'test',
'KeyName': 'test'}))
self.assertRaises(exception.StackValidationFailed, stack.validate)
| {
"content_hash": "575118b1e573b35e75cb1b309572afae",
"timestamp": "",
"source": "github",
"line_count": 805,
"max_line_length": 76,
"avg_line_length": 29.40248447204969,
"alnum_prop": 0.5369048122016139,
"repo_name": "savi-dev/heat",
"id": "edf8d43b1b24157b405800ae039df60caed9395e",
"size": "24288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/tests/test_validate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import io
import os
import socket
import requests
from loguru import logger
from flexget import plugin
from flexget.event import event
try:
import mechanicalsoup
except ImportError:
mechanicalsoup = None
logger = logger.bind(name='formlogin')
class FormLogin:
"""
Login on form
"""
schema = {
'type': 'object',
'properties': {
'url': {'type': 'string', 'format': 'url'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'userfield': {'type': 'string'},
'passfield': {'type': 'string'},
},
'required': ['url', 'username', 'password'],
'additionalProperties': False,
}
def on_task_start(self, task, config):
if not mechanicalsoup:
raise plugin.PluginError(
'mechanicalsoup required (python module), please install it.', logger
)
userfield = config.get('userfield', 'username')
passfield = config.get('passfield', 'password')
url = config['url']
username = config['username']
password = config['password']
# Mechanicalsoup will override our session user agent header unless we explicitly pass it in
user_agent = task.requests.headers.get('User-Agent')
br = mechanicalsoup.StatefulBrowser(session=task.requests, user_agent=user_agent)
try:
response = br.open(url)
except requests.RequestException:
# TODO: improve error handling
logger.opt(exception=True).debug('Exception getting login page.')
raise plugin.PluginError('Unable to get login page', logger)
# br.set_debug(True)
num_forms = len(br.get_current_page().find_all('form'))
if not num_forms:
raise plugin.PluginError('Unable to find any forms on {}'.format(url), logger)
try:
for form_num in range(num_forms):
br.select_form(nr=form_num)
try:
br[userfield] = username
br[passfield] = password
break
except mechanicalsoup.LinkNotFoundError:
pass
else:
received = os.path.join(task.manager.config_base, 'received')
if not os.path.isdir(received):
os.mkdir(received)
filename = os.path.join(received, '%s.formlogin.html' % task.name)
with io.open(filename, 'wb') as f:
f.write(response.content)
logger.critical(
'I have saved the login page content to {} for you to view', filename
)
raise plugin.PluginError('Unable to find login fields', logger)
except socket.timeout:
raise plugin.PluginError('Timed out on url %s' % url)
try:
br.submit_selected()
except requests.RequestException:
logger.opt(exception=True).debug('Exception submitting login form.')
raise plugin.PluginError('Unable to post login form', logger)
@event('plugin.register')
def register_plugin():
plugin.register(FormLogin, 'form', api_ver=2)
| {
"content_hash": "627b71eda4148cad857d270fe0362687",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 100,
"avg_line_length": 32.80808080808081,
"alnum_prop": 0.5689655172413793,
"repo_name": "malkavi/Flexget",
"id": "2fb88063480916238987e0e6af8a420a472f599c",
"size": "3248",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "flexget/plugins/operate/formlogin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "Dockerfile",
"bytes": "623"
},
{
"name": "HTML",
"bytes": "84425"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3514392"
},
{
"name": "SRecode Template",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "1530"
}
],
"symlink_target": ""
} |
import os
import sys
import subprocess
# resolve cython executable
cython = None
def resolve_cython():
global cython
for executable in ('cython', 'cython-2.7'):
for path in os.environ['PATH'].split(':'):
if not os.path.exists(path):
continue
if executable in os.listdir(path):
cython = os.path.join(path, executable)
return
def do(fn):
print('cythonize:', fn)
assert(fn.endswith('.pyx'))
parts = fn.split('/')
if parts[0] == '.':
parts.pop(0)
modname = parts[-1][:-4]
package = '_'.join(parts[:-1])
# cythonize
subprocess.Popen([cython, fn], env=os.environ).communicate()
if not package:
print('no need to rewrite', fn)
else:
# get the .c, and change the initXXX
fn_c = fn[:-3] + 'c'
with open(fn_c) as fd:
data = fd.read()
modname = modname.split('.')[-1]
pac_mod = '{}_{}'.format(package, modname)
fmts = ('init{}(void)', 'PyInit_{}(void)', 'Pyx_NAMESTR("{}")', '"{}"')
for i, fmt in enumerate(fmts):
pat = fmt.format(modname)
sub = fmt.format(pac_mod)
print('{}: {} -> {}'.format(i + 1, pat, sub))
data = data.replace(pat, sub)
print('rewrite', fn_c)
with open(fn_c, 'w') as fd:
fd.write(data)
if __name__ == '__main__':
print('-- cythonize', sys.argv)
resolve_cython()
for fn in sys.argv[1:]:
do(fn)
| {
"content_hash": "af1dfc11a7f0b26786a75d8c4e9b891a",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 79,
"avg_line_length": 28.641509433962263,
"alnum_prop": 0.5118577075098815,
"repo_name": "rnixx/kivy-ios",
"id": "c73dd6416a71aa99d0c21e8e0e94d8304745d3fc",
"size": "1541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/cythonize.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "350"
},
{
"name": "Objective-C",
"bytes": "4442"
},
{
"name": "Python",
"bytes": "200855"
}
],
"symlink_target": ""
} |
import requests
import os
# For this to work, the digitalocean DO_API_KEY, DO_CLIENT_KEY and DO_DEFAULT_SSH_KEY
# should be set as environmental variables
# linux examples in ~./bashrc
# export DO_API_KEY=###########################
# export DO_CLIENT_KEY=#########################
# export DO_DEFAULT_SSH_KEY=#########################
# Client Code and Api Code are both provided on the digital ocean
baseDOurl = 'https://api.digitalocean.com'
def get_env_variable(var_name):
"""" Try and get the environmental variable """
try:
return os.environ[var_name]
except:
error_msg = "Set environmental variable %s" % var_name
print error_msg
clientIdVar = get_env_variable('DO_CLIENT_KEY')
apiVar = get_env_variable('DO_API_KEY')
defaultSSHkey = get_env_variable('DO_DEFAULT_SSH_KEY')
doPayload = {'client_id': clientIdVar,
'api_key': apiVar
}
# Information about the currently running droplets
# GET
# https://api.digitalocean.com/droplets/?client_id=[client_id]&api_key=[api_key]
def dropletInfo():
infoURL = baseDOurl + '/droplets/'
try:
get_response = requests.get(url=infoURL, params=doPayload)
try:
activeDroplets = get_response.json()
except:
print('Unable to convert activeDroplets to json()')
# print('Connection Success! \nStatus Code: ' + str(get_response.status_code))
print activeDroplets
except:
print('Connection Error / Status Code')
# + str(get_response.status_code))
try:
dDroplets = activeDroplets.get('droplets')
print('\n' + str(len(dDroplets)) + ' Droplet(s) Currently Running\n')
for d in dDroplets:
print ('Droplet ID: ' + str(d['id'])
+ '\nName: ' + str(d['name'])
+ '\nIP: ' + str(d['ip_address'])
+ '\nStatus: ' + str(d['status']) + ' \n')
except:
print('error getting droplets')
print infoURL
# Create A Droplet
# First determine what your image will be
# GET
# https://api.digitalocean.com/images/[image_id_or_slug]/?client_id=[client_id]&api_key=[api_key]
def getImageInfo(image_id):
# 3101045 = 'Ubuntu 12.04.4 x64'#'Ubuntu 12.04.4 x64'
getThisImage = image_id
showImgURL = baseDOurl + '/images/' + str(getThisImage) + '/'
showImages = requests.get(showImgURL, params=doPayload)
print showImages.json()
# Get the available SSH Key numbers
# GET
# https://api.digitalocean.com/ssh_keys/?client_id=[client_id]&api_key=[api_key]
def getSSHKeys():
sshURL = baseDOurl + '/ssh_keys/'
sshKeys = requests.get(sshURL, params=doPayload)
print sshKeys.json()
# Actually create the droplet
# GET
# https://api.digitalocean.com/droplets/new?client_id=[client_id]&api_key=[api_key]&name=[droplet_name]&size_id=[size_id]&image_id=[image_id]®ion_id=[region_id]&ssh_key_ids=[ssh_key_id1],[ssh_key_id2]
def createDroplet(new_droplet_name):
newDropletName = new_droplet_name
newDropletSize = 66
newDropletRegion = 4
newDropletImage = 3101045 # 'Ubuntu 12.04.4 x64'
newDropletSSH = defaultSSHkey
createPayload = {'client_id': clientIdVar,
'api_key': apiVar,
'name': newDropletName,
'size_id': newDropletSize,
'image_id': newDropletImage,
'region_id': newDropletRegion,
'ssh_key_ids': newDropletSSH}
createDropletURL = baseDOurl + '/droplets/new'
newDropletResponse = requests.get(createDropletURL, params=createPayload)
print newDropletResponse.json()
# Destroy A Droplet
# GET
# https://api.digitalocean.com/droplets/[droplet_id]/destroy/?client_id=[client_id]&api_key=[api_key]
def destroyDroplet(droplet_to_destroy):
destroyDropletId = droplet_to_destroy
destroyDropletURL = baseDOurl + '/droplets/' + \
str(destroyDropletId) + '/destroy/'
destroyResponse = requests.get(destroyDropletURL, params=doPayload)
print destroyResponse.json()
# Rebuild the droplet
# GET
# https://api.digitalocean.com/droplets/[droplet_id]/rebuild/?image_id=[image_id]&client_id=[client_id]&api_key=[api_key]
def rebuildDroplet(droplet_to_rebuild):
rebuildDropletID = droplet_to_rebuild
rebuildImage = 3101045 # 'Ubuntu 12.04.4 x64'
rebuildPayload = {'image_id': rebuildImage,
'client_id': clientIdVar,
'api_key': apiVar,
}
rebuildDropletURL = 'https://api.digitalocean.com/droplets/' + \
str(rebuildDropletID) + '/rebuild/'
rebuildResponse = requests.get(rebuildDropletURL, params=rebuildPayload)
print rebuildResponse.json()
| {
"content_hash": "63fafc19026d08dce9d570ea77730ab4",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 203,
"avg_line_length": 33.401408450704224,
"alnum_prop": 0.6316677208517816,
"repo_name": "alexkyllo/school-manager",
"id": "8ba26064a39a53f9097ef391a91420989d5a3214",
"size": "4770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "digital_ocean_scripts/do_scripts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "98116"
},
{
"name": "HTML",
"bytes": "47624"
},
{
"name": "JavaScript",
"bytes": "408548"
},
{
"name": "Python",
"bytes": "98421"
}
],
"symlink_target": ""
} |
import can
bus = can.Bus("can0", bustype="socketcan")
axisID = 0x1
print("Requesting AXIS_STATE_FULL_CALIBRATION_SEQUENCE (0x03) on axisID: " + str(axisID))
msg = can.Message(arbitration_id=axisID << 5 | 0x07, data=[3, 0, 0, 0, 0, 0, 0, 0], dlc=8, is_extended_id=False)
print(msg)
try:
bus.send(msg)
print("Message sent on {}".format(bus.channel_info))
except can.CanError:
print("Message NOT sent! Please verify can0 is working first")
print("Waiting for calibration to finish...")
# Read messages infinitely and wait for the right ID to show up
while True:
msg = bus.recv()
if msg.arbitration_id == (axisID << 5 | 0x01):
current_state = msg.data[4] | msg.data[5] << 8 | msg.data[6] << 16 | msg.data[7] << 24
if current_state == 0x1:
print("\nAxis has returned to Idle state.")
break
for msg in bus:
if(msg.arbitration_id == (axisID << 5 | 0x01)):
errorCode = msg.data[0] | msg.data[1] << 8 | msg.data[2] << 16 | msg.data[3] << 24
print("\nReceived Axis heartbeat message:")
if errorCode == 0x0:
print("No errors")
else:
print("Axis error! Error code: "+str(hex(errorCode)))
break
print("\nPutting axis",axisID,"into AXIS_STATE_CLOSED_LOOP_CONTROL (0x08)...")
msg = can.Message(arbitration_id=axisID << 5 | 0x07, data=[8, 0, 0, 0, 0, 0, 0, 0], dlc=8, is_extended_id=False)
print(msg)
try:
bus.send(msg)
print("Message sent on {}".format(bus.channel_info))
except can.CanError:
print("Message NOT sent!")
for msg in bus:
if msg.arbitration_id == (axisID << 5 | 0x01):
print("\nReceived Axis heartbeat message:")
if msg.data[4] == 0x8:
print("Axis has entered closed loop")
else:
print("Axis failed to enter closed loop")
break | {
"content_hash": "8098cb16ca4423f5b700dea4e45ae365",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 112,
"avg_line_length": 34.64150943396226,
"alnum_prop": 0.6089324618736384,
"repo_name": "madcowswe/ODrive",
"id": "f6ce94f275b84c062fd6812cbfce6027d1af3998",
"size": "1836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/can_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "23976"
},
{
"name": "Batchfile",
"bytes": "43"
},
{
"name": "C",
"bytes": "5321437"
},
{
"name": "C++",
"bytes": "389065"
},
{
"name": "GDB",
"bytes": "147"
},
{
"name": "Lua",
"bytes": "22719"
},
{
"name": "MATLAB",
"bytes": "3265"
},
{
"name": "Makefile",
"bytes": "6674"
},
{
"name": "Python",
"bytes": "198203"
},
{
"name": "Shell",
"bytes": "949"
}
],
"symlink_target": ""
} |
import os
import io
import sys
import time
import glob
import threading
from Queue import Queue
import cv2
from PIL import Image
import numpy as np
from stir.exception import *
from stir.cmd import run
from magnolia.utility import *
from magnolia.utility import LOG as L
class PatternMatchObject(object):
def __init__(self, _target, _box):
self.target = _target
self.box = _box
def __repr__(self):
return "PatternMatchObject()"
def __str__(self):
return "Target, Box : %s, %s" % (os.path.basename(self.target), self.box)
class MinicapProc(object):
def __init__(self, parent, debug=False):
self.base = parent
self._loop_flag = True
self._debug = debug
self._pattern_match = None
self.patternmatch_result = Queue()
self._capture = None
self.capture_result = Queue()
self.counter = 0
def start(self):
self.base.minicap.start()
self.loop = threading.Thread(target=self.main_loop).start()
def finish(self):
self._loop_flag = False; time.sleep(2)
self.base.minicap.finish()
def __save(self, filename, data):
with open(filename, "wb") as f:
f.write(data)
f.flush()
def __save_cv(self, filename, img_cv):
return cv2.imwrite(filename, img_cv)
def __save_evidence(self, number, data):
if number < 10: number = "0000%s" % str(number)
elif number < 100: number = "000%s" % str(number)
elif number < 1000: number = "00%s" % str(number)
elif number < 10000: number = "0%s" % str(number)
else: number = str(number)
self.__save_cv(os.path.join(TMP_EVIDENCE_DIR, "image_%s.png" % number), data)
def search_pattern(self, target, box=None, timeout=5):
self._pattern_match = PatternMatchObject(target, box)
L.info(self._pattern_match)
for _ in xrange(timeout):
result = self.patternmatch_result.get()
if result != None: break;
self._pattern_match = None
return result
def capture_image(self, filename, timeout=1):
self._capture = filename
for _ in xrange(timeout):
result = self.capture_result.get()
if result: break
abspath = os.path.join(TMP_DIR, filename)
self._capture = None
return abspath
def create_video(self, src, dst, filename="output.avi"):
output = os.path.join(dst, filename)
if os.path.exists(output):
os.remove(output)
cmd = r'%s -r 3 -i %s -vcodec mjpeg %s' % (
FFMPEG_BIN, os.path.join(src, "image_%05d.png"), os.path.join(dst, filename))
L.debug(run(cmd)[0])
def main_loop(self):
if self._debug: cv2.namedWindow("debug")
while self._loop_flag:
data = self.base.minicap.picture.get()
image_pil = Image.open(io.BytesIO(data))
image_cv = cv2.cvtColor(np.asarray(image_pil), cv2.COLOR_RGB2BGR)
if self._capture != None:
outputfile = os.path.join(TMP_DIR, self._capture)
result = self.__save_cv(outputfile, image_cv)
self.capture_result.put(result)
if self._pattern_match != None:
result, image_cv = self.base.pic.search_pattern(
image_cv, self._pattern_match.target, self._pattern_match.box, TMP_DIR)
self.patternmatch_result.put(result)
if self.counter % 10 == 0:
self.__save_evidence(self.counter / 10, image_cv)
if self._debug:
w = int(self.base.adb.get().MINICAP_WIDTH) / 2
h = int(self.base.adb.get().MINICAP_HEIGHT) / 2
if int(self.base.adb.get().ROTATE) == 0:
resize_image_cv = cv2.resize(image_cv, (h, w))
else:
resize_image_cv = cv2.resize(image_cv, (w, h))
cv2.imshow('debug', resize_image_cv)
key = cv2.waitKey(5)
if key == 27: break
self.counter += 1
if self._debug: cv2.destroyAllWindows()
| {
"content_hash": "439cc57eb68245974b009d5a2f8263b4",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 91,
"avg_line_length": 32.77165354330709,
"alnum_prop": 0.5679961556943777,
"repo_name": "setsulla/stir",
"id": "b09dab7bf9eb5c907e04b011987e291cef240cd7",
"size": "4162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/magnolia/minicap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "176459"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0002_auto_20171113_1527'),
]
operations = [
migrations.AddField(
model_name='category',
name='order',
field=models.IntegerField(blank=True, null=True, verbose_name='Order'),
),
]
| {
"content_hash": "145272e75fa202a45914a3dbdf727108",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 83,
"avg_line_length": 22.833333333333332,
"alnum_prop": 0.6034063260340633,
"repo_name": "sanoma/django-arctic",
"id": "26fcce6c303ab9762d88568ca8dd97ced4ce837d",
"size": "482",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "example/articles/migrations/0003_category_order.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "82255"
},
{
"name": "Dockerfile",
"bytes": "364"
},
{
"name": "HTML",
"bytes": "73412"
},
{
"name": "JavaScript",
"bytes": "33592"
},
{
"name": "Python",
"bytes": "224568"
}
],
"symlink_target": ""
} |
import unittest
from streamlink import Streamlink
from streamlink.plugins.stream import StreamURL
from streamlink.plugin.plugin import stream_weight
from streamlink.stream import *
class TestPluginStream(unittest.TestCase):
def setUp(self):
self.session = Streamlink()
def assertDictHas(self, a, b):
for key, value in a.items():
self.assertEqual(b[key], value)
def _test_akamaihd(self, surl, url):
channel = self.session.resolve_url(surl)
streams = channel.get_streams()
self.assertTrue("live" in streams)
stream = streams["live"]
self.assertTrue(isinstance(stream, AkamaiHDStream))
self.assertEqual(stream.url, url)
def _test_hls(self, surl, url):
channel = self.session.resolve_url(surl)
streams = channel.get_streams()
self.assertTrue("live" in streams)
stream = streams["live"]
self.assertTrue(isinstance(stream, HLSStream))
self.assertEqual(stream.url, url)
def _test_rtmp(self, surl, url, params):
channel = self.session.resolve_url(surl)
streams = channel.get_streams()
self.assertTrue("live" in streams)
stream = streams["live"]
self.assertTrue(isinstance(stream, RTMPStream))
self.assertEqual(stream.params["rtmp"], url)
self.assertDictHas(params, stream.params)
def _test_http(self, surl, url, params):
channel = self.session.resolve_url(surl)
streams = channel.get_streams()
self.assertTrue("live" in streams)
stream = streams["live"]
self.assertTrue(isinstance(stream, HTTPStream))
self.assertEqual(stream.url, url)
self.assertDictHas(params, stream.args)
def test_plugin_rtmp(self):
self._test_rtmp("rtmp://hostname.se/stream",
"rtmp://hostname.se/stream", dict())
self._test_rtmp("rtmp://hostname.se/stream live=1 qarg='a \\'string' noq=test",
"rtmp://hostname.se/stream", dict(live=True, qarg='a \'string', noq="test"))
self._test_rtmp("rtmp://hostname.se/stream live=1 num=47",
"rtmp://hostname.se/stream", dict(live=True, num=47))
self._test_rtmp("rtmp://hostname.se/stream conn=['B:1','S:authMe','O:1','NN:code:1.23','NS:flag:ok','O:0']",
"rtmp://hostname.se/stream",
dict(conn=['B:1', 'S:authMe', 'O:1', 'NN:code:1.23', 'NS:flag:ok', 'O:0']))
def test_plugin_hls(self):
self._test_hls("hls://https://hostname.se/playlist.m3u8",
"https://hostname.se/playlist.m3u8")
self._test_hls("hls://hostname.se/playlist.m3u8",
"http://hostname.se/playlist.m3u8")
def test_plugin_akamaihd(self):
self._test_akamaihd("akamaihd://http://hostname.se/stream",
"http://hostname.se/stream")
self._test_akamaihd("akamaihd://hostname.se/stream",
"http://hostname.se/stream")
def test_plugin_http(self):
self._test_http("httpstream://http://hostname.se/auth.php auth=('test','test2')",
"http://hostname.se/auth.php", dict(auth=("test", "test2")))
self._test_http("httpstream://hostname.se/auth.php auth=('test','test2')",
"http://hostname.se/auth.php", dict(auth=("test", "test2")))
self._test_http("httpstream://https://hostname.se/auth.php verify=False params={'key': 'a value'}",
"https://hostname.se/auth.php?key=a+value", dict(verify=False, params=dict(key='a value')))
def test_parse_params(self):
self.assertEqual(
dict(verify=False, params=dict(key="a value")),
StreamURL._parse_params("""verify=False params={'key': 'a value'}""")
)
self.assertEqual(
dict(verify=False),
StreamURL._parse_params("""verify=False""")
)
self.assertEqual(
dict(conn=['B:1', 'S:authMe', 'O:1', 'NN:code:1.23', 'NS:flag:ok', 'O:0']),
StreamURL._parse_params(""""conn=['B:1', 'S:authMe', 'O:1', 'NN:code:1.23', 'NS:flag:ok', 'O:0']""")
)
def test_stream_weight(self):
self.assertEqual(
(720, "pixels"),
stream_weight("720p"))
self.assertEqual(
(721, "pixels"),
stream_weight("720p+"))
self.assertEqual(
(780, "pixels"),
stream_weight("720p60"))
self.assertTrue(
stream_weight("720p+") > stream_weight("720p"))
self.assertTrue(
stream_weight("720p") == stream_weight("720p"))
self.assertTrue(
stream_weight("720p_3000k") > stream_weight("720p_2500k"))
self.assertTrue(
stream_weight("720p60_3000k") > stream_weight("720p_3000k"))
self.assertTrue(
stream_weight("720p_3000k") < stream_weight("720p+_3000k"))
self.assertTrue(
stream_weight("3000k") > stream_weight("2500k"))
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "f65d02ad75fa8ef696669207b7ece3ef",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 116,
"avg_line_length": 37.21739130434783,
"alnum_prop": 0.5697040498442367,
"repo_name": "mmetak/streamlink",
"id": "a2e3770584564c2bb19e03a56f0029b1285ab7d6",
"size": "5136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_plugin_stream.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "932019"
},
{
"name": "Shell",
"bytes": "16668"
}
],
"symlink_target": ""
} |
"""Batch Fetch Claims Messages.
Synopsis
========
:samp:`fetch.py {description.csv}...`
Description
===========
Reads the CSV-format descriptions, either from stdin or files supplied as command-line
arguments. Each file contains rows which provide the claim identifiers. Other
attributes are silently ignored.
This program calls the claim fetch service, and writes a log of success
and failures.
Options
=======
:file:`*description.csv`* a CSV-format files which identifies claims to fetch.
If omitted, :file:`stdin` is read.
Environment Variables
=====================
:envvar:`DJANGO_SETTINGS_MODULE` is the Django Settings Module that defines the
database and other runtime environment parameters.
CSV FILE FORMAT
===============
The claim description file has the following format. Either the columns MUST be in
the following order, or the first row MUST have these column titles.
- :samp:`CLAIM-ID`. This is the unique claim ID which will be assigned.
Other columns are permitted in this file. They are ignored. For example, the following
additional column is often used.
- :samp:`GWID` This is the HIPAA Gateway Transaction ID for the claim, used to retrieve it
from FACETS.
"""
from __future__ import print_function
import X12.file
import logging, sys
import xmlrpclib
import csv
wsClaims= xmlrpclib.ServerProxy( "http://slott:slott@localhost:18000/RPC2/claim", allow_none=True )
wsAutomod= xmlrpclib.ServerProxy( "http://slott:slott@localhost:18000/RPC2/automod", allow_none=True )
def fetchClaims( csvFile ):
log= logging.getLogger( "web.claims_837.fetchClaims" )
csvReader= csv.DictReader( csvFile )
for row in csvReader:
claimId= row["CLAIM-ID"]
try:
status, claim = wsClaims.fetch( claimId )
if status == "OK":
print( claim )
else:
print( "***", claim )
except xmlrpclib.ProtocolError as e:
print( e )
print( "*** Could not fetch", claimId )
def getCounts( csvFile ):
log= logging.getLogger( "web.claims_837.getCounts" )
csvReader= csv.DictReader( csvFile )
for row in csvReader:
status, counts = wsAutomod.getCounts(
row["LOCATION"], row["TYPE"],
row["BENEFIT"], row["TYPE-OF-SERVICE"], "Base" )
if status != "OK":
print( "***", counts )
continue
for prop, count in counts:
status, claims = wsAutomod.mod( prop, "" )
print( status, map( str, claims ) )
if __name__ == "__main__":
with open(r"..\..\test\test_description.csv","rb") as claims:
#fetchClaims( claims )
getCounts( claims )
| {
"content_hash": "fba28a494843efd42a6ca45cb93fbefa",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 102,
"avg_line_length": 32.023529411764706,
"alnum_prop": 0.6429096252755327,
"repo_name": "jdavisp3/TigerShark",
"id": "74e4425ae52859a17b20a202727e702a1a7de60c",
"size": "2744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/claims_837/fetch.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "36926"
},
{
"name": "HTML",
"bytes": "120474"
},
{
"name": "JavaScript",
"bytes": "46605"
},
{
"name": "PLpgSQL",
"bytes": "113"
},
{
"name": "Perl",
"bytes": "23425"
},
{
"name": "Python",
"bytes": "4426497"
}
],
"symlink_target": ""
} |
from utils import Utils
from controller import homeController, imgController, grafoController, feedController
# Frameworks
from flask import Flask, render_template, request, redirect, url_for, make_response, jsonify, Response
from functools import wraps
import facebook
# Bibliotecas padrão
#
app = Flask(__name__)
# URL's
@app.route("/refresh_facebook_data", methods=["GET", "POST"])
def refresh():
if not Utils.api:
url = facebook.auth_url("573216906148994", "http://localhost:5000/token", perms=["user_friends", "email", "read_stream", "read_mailbox", "user_status", "publish_actions"])
return redirect(url)
# Recuperando os objetos
perfil_publico = Utils.api.get_object("me")
perfil_publico["picture"] = Utils.api.get_object("me/picture")["data"]
amigos = Utils.api.get_connections("me", "taggable_friends")
feed = Utils.api.get_object("me/posts")
conversations = Utils.api.get_object("me?fields=conversations")
posts = feed["data"]
# print conversations["conversations"]["data"][0]
# Grava no banco
homeController.salvaAmigo(amigos["data"], perfil_publico["id"])
homeController.salvaPerfil(perfil_publico)
feedController.salvaPosts(posts)
if request.method == "GET":
return redirect(url_for("login"))
else:
return redirect(url_for("home"))
@app.route("/token")
def token():
'''
Usado para converter o 'code' retornado pela "auth_url" em 'access_token'
'''
code = request.args.get("code", '')
if not Utils.token:
Utils.token = facebook.get_access_token_from_code(code, "http://localhost:5000/token", "573216906148994", "1e7c79732df000c0bb045327e1da5379")
Utils.api = facebook.GraphAPI(Utils.token["access_token"])
print Utils.token
return redirect(url_for("homeLoad"))
@app.route("/login", methods=["GET", "POST"])
def login():
# Primeira vez de login, defina uma senha para acesso local
set_senha = False
if Utils.api:
set_senha = True
if request.method == "GET":
return render_template("login.html", set_senha=set_senha)
elif request.method == "POST":
email = request.form["email"]
senha = request.form["senha"]
perfil = homeController.obterPerfilPorEmail(email)
if perfil and set_senha:
# Definindo a nova senha para acesso local e redirecionando para a tela inicial
homeController.setSenha(perfil[0], senha)
Utils.pid = perfil[0].id
return redirect(url_for("home"))
elif perfil:
# Verifica usuário já cadastrado
if perfil[0].password == senha:
Utils.pid = perfil[0].id
print Utils.pid
return redirect(url_for("home"))
else:
return redirect(url_for("login"))
else:
# Realiza a coleta de dados do facebook
return redirect(url_for("refresh"))
@app.route("/home")
def home():
'''
GET /home
Carrega a página inicial
'''
perfil_publico = homeController.obterPerfil(Utils.pid)
amigos = homeController.obterAmigos(Utils.pid)
return render_template("home.html", perfil_publico=perfil_publico, logout=url_for("login"), amigos=amigos)
@app.route("/feed")
def feed():
'''
GET /feed
Armazena e retorna suas atualizações recentes
'''
usuario = feedController.obtemPerfilUsuario(Utils.pid)
posts = feedController.obterPosts(Utils.pid)
for p in posts:
print p.comments
return render_template("feed.html", perfil_publico=usuario, posts=posts, len_posts=len(posts))
@app.route("/postar", methods=["POST"])
def postar():
print request.form["post"]
if Utils.api:
post = request.form["post"]
Utils.api.put_object("me", "feed", message=post)
return redirect(url_for("refresh"))
@app.route("/home/load")
def homeLoad():
'''
GET /home/load
Mostra tela de carregando antes de iniciar o sistema
'''
return render_template("carregando.html")
# retorna as fotos salvas no banco
@app.route("/picture/<pid>.jpg")
def getImage(pid):
'''
GET /picture/*.jpg
Mapeia imagens do banco como links
'''
image = imgController.getImg(pid)
response = make_response(image)
response.headers['Content-Type'] = 'image/jpeg'
response.headers['Content-Disposition'] = 'attachment; filename=img.jpg'
return response
@app.route("/grafo.json")
def grafoJson():
'''
GET /grafo.json
Retorna o objeto JSON usado no grafo de conexões
'''
grafo = grafoController.getGrafo(Utils.pid)
return jsonify(**grafo)
| {
"content_hash": "eb8bce89ca313c0c87161a4a21c6e0da",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 173,
"avg_line_length": 29.781690140845072,
"alnum_prop": 0.7143532750059116,
"repo_name": "diogocs1/facebookexplorer",
"id": "cff6d276669aa8536af8fd2bee8826fa40e7f559",
"size": "4270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flaskapp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1282"
},
{
"name": "HTML",
"bytes": "7999"
},
{
"name": "JavaScript",
"bytes": "1672"
},
{
"name": "Python",
"bytes": "15612"
}
],
"symlink_target": ""
} |
import settings
import shutil
import os.path
import os
import sys
from tqdm import tqdm
import common.database as db
from sqlalchemy_continuum_vendored.utils import version_table
class Spinner(object):
def __init__(self):
# outStr = "-\\|/"
self.outStr = "|-"
self.outStar = "*x"
self.outMatch = r"\/"
self.outClean = "Dd"
self.outInt = 0
self.x = 0
self.dlen = 0
self.itemLen = len(self.outStr)
self.prints = 0
def next(self, star=False, clean=False, hashmatch=False, vlen=1, output=True):
self.outInt += 1
self.dlen += vlen
#sys.stdout.write( "\r%s\r" % outStrs[self.outInt])
if output:
if self.prints % 80 == 0:
sys.stdout.write("\r %9d %9d " % (self.outInt, self.dlen))
self.x = (self.x + 1) % self.itemLen
if star:
sys.stdout.write(self.outStar[self.x])
elif clean:
sys.stdout.write(self.outClean[self.x])
elif hashmatch:
sys.stdout.write(self.outMatch[self.x])
else:
sys.stdout.write(self.outStr[self.x])
sys.stdout.flush()
self.prints += 1
def sync_raw_with_filesystem():
with db.session_context() as sess:
print("Loading files from database...")
est = sess.execute("SELECT reltuples::BIGINT AS estimate FROM pg_class WHERE relname='raw_web_pages';")
res = est.scalar()
print("Estimated row-count: %s" % res)
in_db = []
with tqdm(total=res) as pbar:
for fspath, in sess.query(db.RawWebPages.fspath).yield_per(5000):
if fspath:
in_db.append(fspath)
pbar.update(n=1)
in_db = set(in_db)
tgtpath = settings.RAW_RESOURCE_DIR
print("")
print("Enumerating files from disk...")
agg_files = []
have_files = []
with tqdm(total=len(in_db)) as pbar:
for root, dirs, files in os.walk(tgtpath):
for filen in files:
fqpath = os.path.join(root, filen)
fpath = fqpath[len(tgtpath)+1:]
if fpath in in_db:
have_files.append(fpath)
else:
agg_files.append(fpath)
fqpath = os.path.join(tgtpath, fpath)
# os.unlink(fqpath)
print("\rDeleting: %s " % fqpath)
pbar.update(n=1)
print()
print("Found %s files (%s unique)" % (len(agg_files), len(set(agg_files))))
missing_files = set(in_db) - set(have_files)
for filen in agg_files:
print("Should delete: '%s'" % filen)
for filen in missing_files:
print("Missing: '%s'" % filen)
sess.query(db.RawWebPages).filter(db.RawWebPages.fspath == filen).update({"state" : "new", "fspath" : None})
sess.commit()
def sync_filtered_with_filesystem():
with db.session_context() as sess:
tgtpath = settings.RESOURCE_DIR
ctbl = version_table(db.RawWebPages.__table__)
print("Loading files from database...")
# spinner1 = Spinner()
est = sess.execute("SELECT reltuples::BIGINT AS estimate FROM pg_class WHERE relname='raw_web_pages';")
res = est.scalar()
vest = sess.execute("SELECT reltuples::BIGINT AS estimate FROM pg_class WHERE relname='raw_web_pages_version';")
vres = vest.scalar()
print("Estimated row-count: %s, version table: %s" % (res, vres))
in_main_db = []
with tqdm(total=res) as pbar:
for row in sess.query(db.WebFiles).yield_per(10000):
if row.fspath:
in_main_db.append(row.fspath)
pbar.update(n=1)
in_history_db = []
with tqdm(total=vres) as pbar:
for rfspath, in sess.query(ctbl.c.fspath).yield_per(1000):
if rfspath:
in_history_db.append(rfspath)
pbar.update(n=1)
origl_main = len(in_main_db)
origl_hist = len(in_history_db)
in_db_main = set(in_main_db)
in_db_hist = set(in_history_db)
in_db = in_db_main + in_db_hist
print("")
print("%s files, %s unique" % ((origl_main, origl_hist), (len(in_db_main), len(in_db_hist))))
print("Enumerating files from disk...")
agg_files = []
have_files = []
# spinner2 = Spinner()
with tqdm(total=len(in_db)) as pbar:
for root, _, files in os.walk(tgtpath):
for filen in files:
fqpath = os.path.join(root, filen)
fpath = fqpath[len(tgtpath)+1:]
if fpath in in_db:
pbar.update(n=1)
# spinner2.next(star=True, vlen=0)
have_files.append(fpath)
else:
pbar.update(n=1)
# spinner2.next(vlen=1)
agg_files.append(fpath)
fqpath = os.path.join(tgtpath, fpath)
# os.unlink(fqpath)
print("\rDeleting: %s " % fqpath)
| {
"content_hash": "2f4c825c5c1b00eaad83f858ba934d79",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 114,
"avg_line_length": 24.649425287356323,
"alnum_prop": 0.6374446257868968,
"repo_name": "fake-name/ReadableWebProxy",
"id": "c8108cc7d05e7f1f6c11bf3588848d69b8602cc6",
"size": "4291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/management/file_cleanup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def binaryTreePaths(self, root: TreeNode) -> List[str]:
result = []
path = []
def dfs(root):
if root is None:
return
path.append(str(root.val))
if root.left is None and root.right is None:
result.append('->'.join(path))
else:
dfs(root.left)
dfs(root.right)
path.pop()
dfs(root)
return result | {
"content_hash": "cff5c7d6a3bf4efcbcfe1d5ba4b6b1fe",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 59,
"avg_line_length": 26.26086956521739,
"alnum_prop": 0.4586092715231788,
"repo_name": "jiadaizhao/LeetCode",
"id": "a5a819a2965192f5249b2e39943176db8ff1d904",
"size": "641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "0201-0300/0257-Binary Tree Paths/0257-Binary Tree Paths.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1140864"
},
{
"name": "Java",
"bytes": "34062"
},
{
"name": "Python",
"bytes": "758800"
},
{
"name": "Shell",
"bytes": "698"
},
{
"name": "TSQL",
"bytes": "774"
}
],
"symlink_target": ""
} |
import numpy as np
from scipy import sparse as sp
import tensorflow as tf
from .session_management import get_session
def create_tensorrec_iterator(name):
"""
Creates a TensorFlow Iterator that is ready for the standard TensorRec data format.
:param name: str
The name for this Iterator.
:return: tf.data.Iterator
"""
return tf.data.Iterator.from_structure(
output_types=(tf.int64, tf.int64, tf.float32, tf.int64, tf.int64),
output_shapes=([None], [None], [None], [], []),
shared_name=name
)
def create_tensorrec_dataset_from_sparse_matrix(sparse_matrix):
"""
Creates a TensorFlow Dataset containing the data from the given sparse matrix.
:param sparse_matrix: scipy.sparse matrix
The data to be contained in this Dataset.
:return: tf.data.Dataset
"""
if not isinstance(sparse_matrix, sp.coo_matrix):
sparse_matrix = sp.coo_matrix(sparse_matrix)
row_index = np.array([sparse_matrix.row], dtype=np.int64)
col_index = np.array([sparse_matrix.col], dtype=np.int64)
values = np.array([sparse_matrix.data], dtype=np.float32)
n_dim_0 = np.array([sparse_matrix.shape[0]], dtype=np.int64)
n_dim_1 = np.array([sparse_matrix.shape[1]], dtype=np.int64)
tensor_slices = (row_index, col_index, values, n_dim_0, n_dim_1)
return tf.data.Dataset.from_tensor_slices(tensor_slices)
def write_tfrecord_from_sparse_matrix(tfrecord_path, sparse_matrix):
"""
Writes the contents of a sparse matrix to a TFRecord file.
:param tfrecord_path: str
:param sparse_matrix: scipy.sparse matrix
:return: str
The tfrecord path
"""
dataset = create_tensorrec_dataset_from_sparse_matrix(sparse_matrix=sparse_matrix)
return write_tfrecord_from_tensorrec_dataset(tfrecord_path=tfrecord_path,
dataset=dataset)
def get_dimensions_from_tensorrec_dataset(dataset):
"""
Given a TensorFlow Dataset in the standard TensorRec format, returns the dimensions of the SparseTensor to be
populated by the Dataset.
:param dataset: tf.data.Dataset
:return: (int, int)
"""
session = get_session()
iterator = create_tensorrec_iterator('dims_iterator')
initializer = iterator.make_initializer(dataset)
_, _, _, tf_d0, tf_d1 = iterator.get_next()
session.run(initializer)
d0, d1 = session.run([tf_d0, tf_d1])
return d0, d1
def write_tfrecord_from_tensorrec_dataset(tfrecord_path, dataset):
"""
Writes the contents of a TensorRec Dataset to a TFRecord file.
:param tfrecord_path: str
:param dataset: tf.data.Dataset
:return: str
The tfrecord path
"""
session = get_session()
iterator = create_tensorrec_iterator('dataset_writing_iterator')
initializer = iterator.make_initializer(dataset)
tf_row_index, tf_col_index, tf_values, tf_d0, tf_d1 = iterator.get_next()
session.run(initializer)
row_index, col_index, values, d0, d1 = session.run([tf_row_index, tf_col_index, tf_values, tf_d0, tf_d1])
def _int64_feature(int_values):
return tf.train.Feature(int64_list=tf.train.Int64List(value=int_values))
def _float_feature(float_values):
return tf.train.Feature(float_list=tf.train.FloatList(value=float_values))
writer = tf.python_io.TFRecordWriter(tfrecord_path)
feature = {
'row_index': _int64_feature(row_index),
'col_index': _int64_feature(col_index),
'values': _float_feature(values),
'd0': _int64_feature([d0]),
'd1': _int64_feature([d1]),
}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
writer.close()
return tfrecord_path
def create_tensorrec_dataset_from_tfrecord(tfrecord_path):
"""
Loads a TFRecord file and creates a Dataset with the contents.
:param tfrecord_path: str
:return: tf.data.Dataset
"""
def parse_tensorrec_tfrecord(example_proto):
features = {
'row_index': tf.FixedLenSequenceFeature((), tf.int64, allow_missing=True),
'col_index': tf.FixedLenSequenceFeature((), tf.int64, allow_missing=True),
'values': tf.FixedLenSequenceFeature((), tf.float32, allow_missing=True),
'd0': tf.FixedLenFeature((), tf.int64),
'd1': tf.FixedLenFeature((), tf.int64),
}
parsed_features = tf.parse_single_example(example_proto, features)
return (parsed_features['row_index'], parsed_features['col_index'], parsed_features['values'],
parsed_features['d0'], parsed_features['d1'])
dataset = tf.data.TFRecordDataset(tfrecord_path).map(parse_tensorrec_tfrecord)
return dataset
| {
"content_hash": "565e7693eb1b18e60353c9f009c33071",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 113,
"avg_line_length": 37.47244094488189,
"alnum_prop": 0.6656860685017861,
"repo_name": "jfkirk/tensorrec",
"id": "405a648d06ff97c705768fd2939aecb13f9aa9a9",
"size": "4759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorrec/input_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "161191"
}
],
"symlink_target": ""
} |
"""Base class for sparse matrix formats using compressed storage
"""
__all__ = []
from warnings import warn
import numpy as np
from base import spmatrix, isspmatrix, SparseEfficiencyWarning
from data import _data_matrix
import sparsetools
from sputils import upcast, upcast_char, to_native, isdense, isshape, \
getdtype, isscalarlike, isintlike
class _cs_matrix(_data_matrix):
"""base matrix class for compressed row and column oriented matrices"""
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isspmatrix(arg1):
if arg1.format == self.format and copy:
arg1 = arg1.copy()
else:
arg1 = arg1.asformat(self.format)
self._set_self( arg1 )
elif isinstance(arg1, tuple):
if isshape(arg1):
# It's a tuple of matrix dimensions (M, N)
# create empty matrix
self.shape = arg1 #spmatrix checks for errors here
M, N = self.shape
self.data = np.zeros(0, getdtype(dtype, default=float))
self.indices = np.zeros(0, np.intc)
self.indptr = np.zeros(self._swap((M,N))[0] + 1, dtype=np.intc)
else:
if len(arg1) == 2:
# (data, ij) format
from coo import coo_matrix
other = self.__class__( coo_matrix(arg1, shape=shape) )
self._set_self( other )
elif len(arg1) == 3:
# (data, indices, indptr) format
(data, indices, indptr) = arg1
self.indices = np.array(indices, copy=copy)
self.indptr = np.array(indptr, copy=copy)
self.data = np.array(data, copy=copy, dtype=getdtype(dtype, data))
else:
raise ValueError("unrecognized %s_matrix constructor usage" %
self.format)
else:
#must be dense
try:
arg1 = np.asarray(arg1)
except:
raise ValueError("unrecognized %s_matrix constructor usage" %
self.format)
from coo import coo_matrix
self._set_self( self.__class__(coo_matrix(arg1, dtype=dtype)) )
# Read matrix dimensions given, if any
if shape is not None:
self.shape = shape # spmatrix will check for errors
else:
if self.shape is None:
# shape not already set, try to infer dimensions
try:
major_dim = len(self.indptr) - 1
minor_dim = self.indices.max() + 1
except:
raise ValueError('unable to infer matrix dimensions')
else:
self.shape = self._swap((major_dim,minor_dim))
if dtype is not None:
self.data = self.data.astype(dtype)
self.check_format(full_check=False)
def getnnz(self):
return self.indptr[-1]
nnz = property(fget=getnnz)
def _set_self(self, other, copy=False):
"""take the member variables of other and assign them to self"""
if copy:
other = other.copy()
self.data = other.data
self.indices = other.indices
self.indptr = other.indptr
self.shape = other.shape
def check_format(self, full_check=True):
"""check whether the matrix format is valid
Parameters
==========
- full_check : {bool}
- True - rigorous check, O(N) operations : default
- False - basic check, O(1) operations
"""
#use _swap to determine proper bounds
major_name,minor_name = self._swap(('row','column'))
major_dim,minor_dim = self._swap(self.shape)
# index arrays should have integer data types
if self.indptr.dtype.kind != 'i':
warn("indptr array has non-integer dtype (%s)" \
% self.indptr.dtype.name )
if self.indices.dtype.kind != 'i':
warn("indices array has non-integer dtype (%s)" \
% self.indices.dtype.name )
# only support 32-bit ints for now
self.indptr = np.asarray(self.indptr, dtype=np.intc)
self.indices = np.asarray(self.indices, dtype=np.intc)
self.data = to_native(self.data)
# check array shapes
if np.rank(self.data) != 1 or np.rank(self.indices) != 1 or np.rank(self.indptr) != 1:
raise ValueError('data, indices, and indptr should be rank 1')
# check index pointer
if (len(self.indptr) != major_dim + 1 ):
raise ValueError("index pointer size (%d) should be (%d)" %
(len(self.indptr), major_dim + 1))
if (self.indptr[0] != 0):
raise ValueError("index pointer should start with 0")
# check index and data arrays
if (len(self.indices) != len(self.data)):
raise ValueError("indices and data should have the same size")
if (self.indptr[-1] > len(self.indices)):
raise ValueError("Last value of index pointer should be less than "
"the size of index and data arrays")
self.prune()
if full_check:
#check format validity (more expensive)
if self.nnz > 0:
if self.indices.max() >= minor_dim:
raise ValueError("%s index values must be < %d" %
(minor_name,minor_dim))
if self.indices.min() < 0:
raise ValueError("%s index values must be >= 0" %
minor_name)
if np.diff(self.indptr).min() < 0:
raise ValueError("index pointer values must form a "
"non-decreasing sequence")
#if not self.has_sorted_indices():
# warn('Indices were not in sorted order. Sorting indices.')
# self.sort_indices()
# assert(self.has_sorted_indices())
#TODO check for duplicates?
def __add__(self,other):
# First check if argument is a scalar
if isscalarlike(other):
if other == 0:
return self.copy()
else: # Now we would add this scalar to every element.
raise NotImplementedError('adding a nonzero scalar to a '
'sparse matrix is not supported')
elif isspmatrix(other):
if (other.shape != self.shape):
raise ValueError("inconsistent shapes")
return self._binopt(other,'_plus_')
elif isdense(other):
# Convert this matrix to a dense matrix and add them
return self.todense() + other
else:
raise NotImplementedError
def __radd__(self,other):
return self.__add__(other)
def __sub__(self,other):
# First check if argument is a scalar
if isscalarlike(other):
if other == 0:
return self.copy()
else: # Now we would add this scalar to every element.
raise NotImplementedError('adding a nonzero scalar to a '
'sparse matrix is not supported')
elif isspmatrix(other):
if (other.shape != self.shape):
raise ValueError("inconsistent shapes")
return self._binopt(other,'_minus_')
elif isdense(other):
# Convert this matrix to a dense matrix and subtract them
return self.todense() - other
else:
raise NotImplementedError
def __rsub__(self,other): # other - self
#note: this can't be replaced by other + (-self) for unsigned types
if isscalarlike(other):
if other == 0:
return -self.copy()
else: # Now we would add this scalar to every element.
raise NotImplementedError('adding a nonzero scalar to a '
'sparse matrix is not supported')
elif isdense(other):
# Convert this matrix to a dense matrix and subtract them
return other - self.todense()
else:
raise NotImplementedError
def __truediv__(self,other):
if isscalarlike(other):
return self * (1./other)
elif isspmatrix(other):
if other.shape != self.shape:
raise ValueError('inconsistent shapes')
return self._binopt(other,'_eldiv_')
else:
raise NotImplementedError
def multiply(self, other):
"""Point-wise multiplication by another matrix
"""
if other.shape != self.shape:
raise ValueError('inconsistent shapes')
if isdense(other):
return np.multiply(self.todense(),other)
else:
other = self.__class__(other)
return self._binopt(other,'_elmul_')
###########################
# Multiplication handlers #
###########################
def _mul_vector(self, other):
M,N = self.shape
# output array
result = np.zeros(M, dtype=upcast_char(self.dtype.char,
other.dtype.char))
# csr_matvec or csc_matvec
fn = getattr(sparsetools,self.format + '_matvec')
fn(M, N, self.indptr, self.indices, self.data, other, result)
return result
def _mul_multivector(self, other):
M,N = self.shape
n_vecs = other.shape[1] #number of column vectors
result = np.zeros((M,n_vecs), dtype=upcast_char(self.dtype.char,
other.dtype.char))
# csr_matvecs or csc_matvecs
fn = getattr(sparsetools,self.format + '_matvecs')
fn(M, N, n_vecs, self.indptr, self.indices, self.data, other.ravel(), result.ravel())
return result
def _mul_sparse_matrix(self, other):
M, K1 = self.shape
K2, N = other.shape
major_axis = self._swap((M,N))[0]
indptr = np.empty(major_axis + 1, dtype=np.intc)
other = self.__class__(other) #convert to this format
fn = getattr(sparsetools, self.format + '_matmat_pass1')
fn( M, N, self.indptr, self.indices, \
other.indptr, other.indices, \
indptr)
nnz = indptr[-1]
indices = np.empty(nnz, dtype=np.intc)
data = np.empty(nnz, dtype=upcast(self.dtype,other.dtype))
fn = getattr(sparsetools, self.format + '_matmat_pass2')
fn( M, N, self.indptr, self.indices, self.data, \
other.indptr, other.indices, other.data, \
indptr, indices, data)
return self.__class__((data,indices,indptr),shape=(M,N))
def diagonal(self):
"""Returns the main diagonal of the matrix
"""
#TODO support k-th diagonal
fn = getattr(sparsetools, self.format + "_diagonal")
y = np.empty( min(self.shape), dtype=upcast(self.dtype) )
fn(self.shape[0], self.shape[1], self.indptr, self.indices, self.data, y)
return y
def sum(self, axis=None):
"""Sum the matrix over the given axis. If the axis is None, sum
over both rows and columns, returning a scalar.
"""
# The spmatrix base class already does axis=0 and axis=1 efficiently
# so we only do the case axis=None here
if axis is None:
return self.data.sum()
else:
return spmatrix.sum(self,axis)
raise ValueError("axis out of bounds")
#######################
# Getting and Setting #
#######################
def __getitem__(self, key):
if isinstance(key, tuple):
row = key[0]
col = key[1]
#TODO implement CSR[ [1,2,3], X ] with sparse matmat
#TODO make use of sorted indices
if isintlike(row) and isintlike(col):
return self._get_single_element(row,col)
else:
major,minor = self._swap((row,col))
if isintlike(major) and isinstance(minor,slice):
minor_shape = self._swap(self.shape)[1]
start, stop, stride = minor.indices(minor_shape)
out_shape = self._swap( (1, stop-start) )
return self._get_slice( major, start, stop, stride, out_shape)
elif isinstance( row, slice) or isinstance(col, slice):
return self._get_submatrix( row, col )
else:
raise NotImplementedError
elif isintlike(key):
return self[key, :]
else:
raise IndexError("invalid index")
def _get_single_element(self,row,col):
M, N = self.shape
if (row < 0):
row += M
if (col < 0):
col += N
if not (0<=row<M) or not (0<=col<N):
raise IndexError("index out of bounds")
major_index, minor_index = self._swap((row,col))
start = self.indptr[major_index]
end = self.indptr[major_index+1]
indxs = np.where(minor_index == self.indices[start:end])[0]
num_matches = len(indxs)
if num_matches == 0:
# entry does not appear in the matrix
return 0
elif num_matches == 1:
return self.data[start:end][indxs[0]]
else:
raise ValueError('nonzero entry (%d,%d) occurs more than once' % (row,col))
def _get_slice(self, i, start, stop, stride, shape):
"""Returns a copy of the elements
[i, start:stop:string] for row-oriented matrices
[start:stop:string, i] for column-oriented matrices
"""
if stride != 1:
raise ValueError("slicing with step != 1 not supported")
if stop <= start:
raise ValueError("slice width must be >= 1")
#TODO make [i,:] faster
#TODO implement [i,x:y:z]
indices = []
for ind in xrange(self.indptr[i], self.indptr[i+1]):
if self.indices[ind] >= start and self.indices[ind] < stop:
indices.append(ind)
index = self.indices[indices] - start
data = self.data[indices]
indptr = np.array([0, len(indices)])
return self.__class__((data, index, indptr), shape=shape, \
dtype=self.dtype)
def _get_submatrix( self, slice0, slice1 ):
"""Return a submatrix of this matrix (new matrix is created)."""
slice0, slice1 = self._swap((slice0,slice1))
shape0, shape1 = self._swap(self.shape)
def _process_slice( sl, num ):
if isinstance( sl, slice ):
i0, i1 = sl.start, sl.stop
if i0 is None:
i0 = 0
elif i0 < 0:
i0 = num + i0
if i1 is None:
i1 = num
elif i1 < 0:
i1 = num + i1
return i0, i1
elif np.isscalar( sl ):
if sl < 0:
sl += num
return sl, sl + 1
else:
return sl[0], sl[1]
def _in_bounds( i0, i1, num ):
if not (0<=i0<num) or not (0<i1<=num) or not (i0<i1):
raise IndexError("index out of bounds: 0<=%d<%d, 0<=%d<%d, %d<%d" %
(i0, num, i1, num, i0, i1))
i0, i1 = _process_slice( slice0, shape0 )
j0, j1 = _process_slice( slice1, shape1 )
_in_bounds( i0, i1, shape0 )
_in_bounds( j0, j1, shape1 )
aux = sparsetools.get_csr_submatrix( shape0, shape1,
self.indptr, self.indices,
self.data,
i0, i1, j0, j1 )
data, indices, indptr = aux[2], aux[1], aux[0]
shape = self._swap( (i1 - i0, j1 - j0) )
return self.__class__( (data,indices,indptr), shape=shape )
def __setitem__(self, key, val):
if isinstance(key, tuple):
row,col = key
if not (isscalarlike(row) and isscalarlike(col)):
raise NotImplementedError("Fancy indexing in assignment not "
"supported for csr matrices.")
M, N = self.shape
if (row < 0):
row += M
if (col < 0):
col += N
if not (0<=row<M) or not (0<=col<N):
raise IndexError("index out of bounds")
major_index, minor_index = self._swap((row,col))
start = self.indptr[major_index]
end = self.indptr[major_index+1]
indxs = np.where(minor_index == self.indices[start:end])[0]
num_matches = len(indxs)
if not np.isscalar(val):
raise ValueError('setting an array element with a sequence')
val = self.dtype.type(val)
if num_matches == 0:
#entry not already present
warn('changing the sparsity structure of a %s_matrix is expensive. ' \
'lil_matrix is more efficient.' % self.format, \
SparseEfficiencyWarning)
if self.has_sorted_indices:
# preserve sorted order
newindx = start + self.indices[start:end].searchsorted(minor_index)
else:
newindx = start
val = np.array([val], dtype=self.data.dtype)
minor_index = np.array([minor_index], dtype=self.indices.dtype)
self.data = np.concatenate((self.data[:newindx], val, self.data[newindx:]))
self.indices = np.concatenate((self.indices[:newindx], minor_index, self.indices[newindx:]))
self.indptr = self.indptr.copy()
self.indptr[major_index+1:] += 1
elif num_matches == 1:
#entry appears exactly once
self.data[start:end][indxs[0]] = val
else:
#entry appears more than once
raise ValueError('nonzero entry (%d,%d) occurs more than once'
% (row,col))
self.check_format(full_check=True)
else:
# We should allow slices here!
raise IndexError("invalid index")
######################
# Conversion methods #
######################
def todia(self):
return self.tocoo(copy=False).todia()
def todok(self):
return self.tocoo(copy=False).todok()
def tocoo(self,copy=True):
"""Return a COOrdinate representation of this matrix
When copy=False the index and data arrays are not copied.
"""
major_dim,minor_dim = self._swap(self.shape)
data = self.data
minor_indices = self.indices
if copy:
data = data.copy()
minor_indices = minor_indices.copy()
major_indices = np.empty(len(minor_indices), dtype=np.intc)
sparsetools.expandptr(major_dim,self.indptr,major_indices)
row,col = self._swap((major_indices,minor_indices))
from coo import coo_matrix
return coo_matrix((data,(row,col)), self.shape)
def toarray(self, order=None, out=None):
"""See the docstring for `spmatrix.toarray`."""
return self.tocoo(copy=False).toarray(order=order, out=out)
##############################################################
# methods that examine or modify the internal data structure #
##############################################################
def eliminate_zeros(self):
"""Remove zero entries from the matrix
The is an *in place* operation
"""
fn = sparsetools.csr_eliminate_zeros
M,N = self._swap(self.shape)
fn( M, N, self.indptr, self.indices, self.data)
self.prune() #nnz may have changed
def sum_duplicates(self):
"""Eliminate duplicate matrix entries by adding them together
The is an *in place* operation
"""
self.sort_indices()
fn = sparsetools.csr_sum_duplicates
M,N = self._swap(self.shape)
fn( M, N, self.indptr, self.indices, self.data)
self.prune() #nnz may have changed
def __get_sorted(self):
"""Determine whether the matrix has sorted indices
Returns
- True: if the indices of the matrix are in sorted order
- False: otherwise
"""
#first check to see if result was cached
if not hasattr(self,'__has_sorted_indices'):
fn = sparsetools.csr_has_sorted_indices
self.__has_sorted_indices = \
fn( len(self.indptr) - 1, self.indptr, self.indices)
return self.__has_sorted_indices
def __set_sorted(self, val):
self.__has_sorted_indices = bool(val)
has_sorted_indices = property(fget=__get_sorted, fset=__set_sorted)
def sorted_indices(self):
"""Return a copy of this matrix with sorted indices
"""
A = self.copy()
A.sort_indices()
return A
# an alternative that has linear complexity is the following
# although the previous option is typically faster
#return self.toother().toother()
def sort_indices(self):
"""Sort the indices of this matrix *in place*
"""
if not self.has_sorted_indices:
fn = sparsetools.csr_sort_indices
fn( len(self.indptr) - 1, self.indptr, self.indices, self.data)
self.has_sorted_indices = True
def prune(self):
"""Remove empty space after all non-zero elements.
"""
major_dim = self._swap(self.shape)[0]
if len(self.indptr) != major_dim + 1:
raise ValueError('index pointer has invalid length')
if len(self.indices) < self.nnz:
raise ValueError('indices array has fewer than nnz elements')
if len(self.data) < self.nnz:
raise ValueError('data array has fewer than nnz elements')
self.data = self.data[:self.nnz]
self.indices = self.indices[:self.nnz]
###################
# utility methods #
###################
# needed by _data_matrix
def _with_data(self,data,copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays
(i.e. .indptr and .indices) are copied.
"""
if copy:
return self.__class__((data,self.indices.copy(),self.indptr.copy()), \
shape=self.shape,dtype=data.dtype)
else:
return self.__class__((data,self.indices,self.indptr), \
shape=self.shape,dtype=data.dtype)
def _binopt(self, other, op):
"""apply the binary operation fn to two sparse matrices"""
other = self.__class__(other)
# e.g. csr_plus_csr, csr_minus_csr, etc.
fn = getattr(sparsetools, self.format + op + self.format)
maxnnz = self.nnz + other.nnz
indptr = np.empty_like(self.indptr)
indices = np.empty(maxnnz, dtype=np.intc)
data = np.empty(maxnnz, dtype=upcast(self.dtype,other.dtype))
fn(self.shape[0], self.shape[1], \
self.indptr, self.indices, self.data,
other.indptr, other.indices, other.data,
indptr, indices, data)
actual_nnz = indptr[-1]
indices = indices[:actual_nnz]
data = data[:actual_nnz]
if actual_nnz < maxnnz // 2:
#too much waste, trim arrays
indices = indices.copy()
data = data.copy()
A = self.__class__((data, indices, indptr), shape=self.shape)
return A
| {
"content_hash": "69d9f94a8192aca5f42902953dcc25c7",
"timestamp": "",
"source": "github",
"line_count": 691,
"max_line_length": 108,
"avg_line_length": 35.26049204052099,
"alnum_prop": 0.5228401395444284,
"repo_name": "teoliphant/scipy",
"id": "fd891654962e614cecf00d8e122c52de3452dfb6",
"size": "24365",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scipy/sparse/compressed.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11530901"
},
{
"name": "C++",
"bytes": "7695320"
},
{
"name": "FORTRAN",
"bytes": "5898903"
},
{
"name": "Matlab",
"bytes": "1861"
},
{
"name": "Objective-C",
"bytes": "137083"
},
{
"name": "Python",
"bytes": "5863600"
},
{
"name": "Shell",
"bytes": "1793"
}
],
"symlink_target": ""
} |
"""Some generic utilities for dealing with classes, urls, and serialization."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import logging
import os
import re
import stat
import socket
import sys
import warnings
from signal import signal, SIGINT, SIGABRT, SIGTERM
try:
from signal import SIGKILL
except ImportError:
SIGKILL=None
from types import FunctionType
try:
import cPickle
pickle = cPickle
except:
cPickle = None
import pickle
import zmq
from zmq.log import handlers
from traitlets.log import get_logger
from decorator import decorator
from traitlets.config.application import Application
from jupyter_client.localinterfaces import localhost, is_public_ip, public_ips
from ipython_genutils.py3compat import string_types, iteritems, itervalues
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class Namespace(dict):
"""Subclass of dict for attribute access to keys."""
def __getattr__(self, key):
"""getattr aliased to getitem"""
if key in self:
return self[key]
else:
raise NameError(key)
def __setattr__(self, key, value):
"""setattr aliased to setitem, with strict"""
if hasattr(dict, key):
raise KeyError("Cannot override dict keys %r"%key)
self[key] = value
class ReverseDict(dict):
"""simple double-keyed subset of dict methods."""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self._reverse = dict()
for key, value in iteritems(self):
self._reverse[value] = key
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self._reverse[key]
def __setitem__(self, key, value):
if key in self._reverse:
raise KeyError("Can't have key %r on both sides!"%key)
dict.__setitem__(self, key, value)
self._reverse[value] = key
def pop(self, key):
value = dict.pop(self, key)
self._reverse.pop(value)
return value
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
@decorator
def log_errors(f, self, *args, **kwargs):
"""decorator to log unhandled exceptions raised in a method.
For use wrapping on_recv callbacks, so that exceptions
do not cause the stream to be closed.
"""
try:
return f(self, *args, **kwargs)
except Exception:
self.log.error("Uncaught exception in %r" % f, exc_info=True)
def is_url(url):
"""boolean check for whether a string is a zmq url"""
if '://' not in url:
return False
proto, addr = url.split('://', 1)
if proto.lower() not in ['tcp','pgm','epgm','ipc','inproc']:
return False
return True
def validate_url(url):
"""validate a url for zeromq"""
if not isinstance(url, string_types):
raise TypeError("url must be a string, not %r"%type(url))
url = url.lower()
proto_addr = url.split('://')
assert len(proto_addr) == 2, 'Invalid url: %r'%url
proto, addr = proto_addr
assert proto in ['tcp','pgm','epgm','ipc','inproc'], "Invalid protocol: %r"%proto
# domain pattern adapted from http://www.regexlib.com/REDetails.aspx?regexp_id=391
# author: Remi Sabourin
pat = re.compile(r'^([\w\d]([\w\d\-]{0,61}[\w\d])?\.)*[\w\d]([\w\d\-]{0,61}[\w\d])?$')
if proto == 'tcp':
lis = addr.split(':')
assert len(lis) == 2, 'Invalid url: %r'%url
addr,s_port = lis
try:
port = int(s_port)
except ValueError:
raise AssertionError("Invalid port %r in url: %r"%(port, url))
assert addr == '*' or pat.match(addr) is not None, 'Invalid url: %r'%url
else:
# only validate tcp urls currently
pass
return True
def validate_url_container(container):
"""validate a potentially nested collection of urls."""
if isinstance(container, string_types):
url = container
return validate_url(url)
elif isinstance(container, dict):
container = itervalues(container)
for element in container:
validate_url_container(element)
def split_url(url):
"""split a zmq url (tcp://ip:port) into ('tcp','ip','port')."""
proto_addr = url.split('://')
assert len(proto_addr) == 2, 'Invalid url: %r'%url
proto, addr = proto_addr
lis = addr.split(':')
assert len(lis) == 2, 'Invalid url: %r'%url
addr,s_port = lis
return proto,addr,s_port
def disambiguate_ip_address(ip, location=None):
"""turn multi-ip interfaces '0.0.0.0' and '*' into a connectable address
Explicit IP addresses are returned unmodified.
Parameters
----------
ip : IP address
An IP address, or the special values 0.0.0.0, or *
location: IP address, optional
A public IP of the target machine.
If location is an IP of the current machine,
localhost will be returned,
otherwise location will be returned.
"""
if ip in {'0.0.0.0', '*'}:
if not location:
# unspecified location, localhost is the only choice
ip = localhost()
elif is_public_ip(location):
# location is a public IP on this machine, use localhost
ip = localhost()
elif not public_ips():
# this machine's public IPs cannot be determined,
# assume `location` is not this machine
warnings.warn("IPython could not determine public IPs", RuntimeWarning)
ip = location
else:
# location is not this machine, do not use loopback
ip = location
return ip
def disambiguate_url(url, location=None):
"""turn multi-ip interfaces '0.0.0.0' and '*' into connectable
ones, based on the location (default interpretation is localhost).
This is for zeromq urls, such as ``tcp://*:10101``.
"""
try:
proto,ip,port = split_url(url)
except AssertionError:
# probably not tcp url; could be ipc, etc.
return url
ip = disambiguate_ip_address(ip,location)
return "%s://%s:%s"%(proto,ip,port)
#--------------------------------------------------------------------------
# helpers for implementing old MEC API via view.apply
#--------------------------------------------------------------------------
def interactive(f):
"""decorator for making functions appear as interactively defined.
This results in the function being linked to the user_ns as globals()
instead of the module globals().
"""
# build new FunctionType, so it can have the right globals
# interactive functions never have closures, that's kind of the point
if isinstance(f, FunctionType):
mainmod = __import__('__main__')
f = FunctionType(f.__code__, mainmod.__dict__,
f.__name__, f.__defaults__,
)
# associate with __main__ for uncanning
f.__module__ = '__main__'
return f
@interactive
def _push(**ns):
"""helper method for implementing `client.push` via `client.apply`"""
user_ns = globals()
tmp = '_IP_PUSH_TMP_'
while tmp in user_ns:
tmp = tmp + '_'
try:
for name, value in ns.items():
user_ns[tmp] = value
exec("%s = %s" % (name, tmp), user_ns)
finally:
user_ns.pop(tmp, None)
@interactive
def _pull(keys):
"""helper method for implementing `client.pull` via `client.apply`"""
if isinstance(keys, (list,tuple, set)):
return [eval(key, globals()) for key in keys]
else:
return eval(keys, globals())
@interactive
def _execute(code):
"""helper method for implementing `client.execute` via `client.apply`"""
exec(code, globals())
#--------------------------------------------------------------------------
# extra process management utilities
#--------------------------------------------------------------------------
_random_ports = set()
def select_random_ports(n):
"""Selects and return n random ports that are available."""
ports = []
for i in range(n):
sock = socket.socket()
sock.bind(('', 0))
while sock.getsockname()[1] in _random_ports:
sock.close()
sock = socket.socket()
sock.bind(('', 0))
ports.append(sock)
for i, sock in enumerate(ports):
port = sock.getsockname()[1]
sock.close()
ports[i] = port
_random_ports.add(port)
return ports
def signal_children(children):
"""Relay interupt/term signals to children, for more solid process cleanup."""
def terminate_children(sig, frame):
log = get_logger()
log.critical("Got signal %i, terminating children..."%sig)
for child in children:
child.terminate()
sys.exit(sig != SIGINT)
# sys.exit(sig)
for sig in (SIGINT, SIGABRT, SIGTERM):
signal(sig, terminate_children)
def generate_exec_key(keyfile):
import uuid
newkey = str(uuid.uuid4())
with open(keyfile, 'w') as f:
# f.write('ipython-key ')
f.write(newkey+'\n')
# set user-only RW permissions (0600)
# this will have no effect on Windows
os.chmod(keyfile, stat.S_IRUSR|stat.S_IWUSR)
def integer_loglevel(loglevel):
try:
loglevel = int(loglevel)
except ValueError:
if isinstance(loglevel, str):
loglevel = getattr(logging, loglevel)
return loglevel
def connect_logger(logname, context, iface, root="ip", loglevel=logging.DEBUG):
logger = logging.getLogger(logname)
if any([isinstance(h, handlers.PUBHandler) for h in logger.handlers]):
# don't add a second PUBHandler
return
loglevel = integer_loglevel(loglevel)
lsock = context.socket(zmq.PUB)
lsock.connect(iface)
handler = handlers.PUBHandler(lsock)
handler.setLevel(loglevel)
handler.root_topic = root
logger.addHandler(handler)
logger.setLevel(loglevel)
return logger
def connect_engine_logger(context, iface, engine, loglevel=logging.DEBUG):
from ipyparallel.engine.log import EnginePUBHandler
logger = logging.getLogger()
if any([isinstance(h, handlers.PUBHandler) for h in logger.handlers]):
# don't add a second PUBHandler
return
loglevel = integer_loglevel(loglevel)
lsock = context.socket(zmq.PUB)
lsock.connect(iface)
handler = EnginePUBHandler(engine, lsock)
handler.setLevel(loglevel)
logger.addHandler(handler)
logger.setLevel(loglevel)
return logger
def local_logger(logname, loglevel=logging.DEBUG):
loglevel = integer_loglevel(loglevel)
logger = logging.getLogger(logname)
if any([isinstance(h, logging.StreamHandler) for h in logger.handlers]):
# don't add a second StreamHandler
return
handler = logging.StreamHandler()
handler.setLevel(loglevel)
formatter = logging.Formatter("%(asctime)s.%(msecs).03d [%(name)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(loglevel)
return logger
def set_hwm(sock, hwm=0):
"""set zmq High Water Mark on a socket
in a way that always works for various pyzmq / libzmq versions.
"""
import zmq
for key in ('HWM', 'SNDHWM', 'RCVHWM'):
opt = getattr(zmq, key, None)
if opt is None:
continue
try:
sock.setsockopt(opt, hwm)
except zmq.ZMQError:
pass
def int_keys(dikt):
"""Rekey a dict that has been forced to cast number keys to str for JSON
where there should be ints.
"""
for k in list(dikt):
if isinstance(k, string_types):
nk = None
try:
nk = int(k)
except ValueError:
try:
nk = float(k)
except ValueError:
continue
if nk in dikt:
raise KeyError("already have key %r" % nk)
dikt[nk] = dikt.pop(k)
return dikt
| {
"content_hash": "d76cecd3757e98557a0f7855a87370ce",
"timestamp": "",
"source": "github",
"line_count": 410,
"max_line_length": 90,
"avg_line_length": 30.75609756097561,
"alnum_prop": 0.5762093576526566,
"repo_name": "fzheng/codejam",
"id": "5dfb213ebfbb311c572232ad837024ed2c1ef5bd",
"size": "12610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/ipyparallel/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "26929"
},
{
"name": "CSS",
"bytes": "70961"
},
{
"name": "HTML",
"bytes": "80615"
},
{
"name": "Java",
"bytes": "376384"
},
{
"name": "JavaScript",
"bytes": "5201764"
},
{
"name": "Jupyter Notebook",
"bytes": "13408"
},
{
"name": "Makefile",
"bytes": "2379"
},
{
"name": "Python",
"bytes": "16542061"
},
{
"name": "Smarty",
"bytes": "22430"
},
{
"name": "TeX",
"bytes": "85477"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow import flags
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.lite.experimental.examples.lstm.tflite_rnn import TfLiteRNNCell
from tensorflow.lite.python.op_hint import convert_op_hints_to_stubs
from tensorflow.lite.python.op_hint import find_all_hinted_output_nodes
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.tools import optimize_for_inference_lib
FLAGS = flags.FLAGS
# Number of steps to train model.
TRAIN_STEPS = 1
CONFIG = tf.ConfigProto(device_count={"GPU": 0})
class BidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
def __init__(self, *args, **kwargs):
super(BidirectionalSequenceRnnTest, self).__init__(*args, **kwargs)
# Define constants
# Unrolled through 28 time steps
self.time_steps = 28
# Rows of 28 pixels
self.n_input = 28
# Learning rate for Adam optimizer
self.learning_rate = 0.001
# MNIST is meant to be classified in 10 classes(0-9).
self.n_classes = 10
# Batch size
self.batch_size = 16
# Rnn Units.
self.num_units = 16
def setUp(self):
super(BidirectionalSequenceRnnTest, self).setUp()
# Import MNIST dataset
data_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
self.mnist = input_data.read_data_sets(data_dir, one_hot=True)
def buildRnnLayer(self):
return tf.nn.rnn_cell.MultiRNNCell([
TfLiteRNNCell(self.num_units, name="rnn1"),
TfLiteRNNCell(self.num_units, name="rnn2")
])
def buildModel(self, fw_rnn_layer, bw_rnn_layer):
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units * 2, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
# input image placeholder
x = tf.placeholder(
"float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")
rnn_input = tf.unstack(x, self.time_steps, 1)
outputs, _, _ = tf.nn.static_bidirectional_rnn(
fw_rnn_layer, bw_rnn_layer, rnn_input, dtype="float32")
# Compute logits by multiplying outputs[-1] of shape [batch_size,num_units]
# by the softmax layer's out_weight of shape [num_units,n_classes]
# plus out_bias
prediction = tf.matmul(outputs[-1], out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
def trainModel(self, x, prediction, output_class, sess):
# input label placeholder
y = tf.placeholder("float", [None, self.n_classes])
# Loss function
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# Optimization
opt = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(loss)
# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, shuffle=False)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
def saveAndRestoreModel(self, fw_rnn_layer, bw_rnn_layer, sess, saver):
model_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
saver.save(sess, model_dir)
# Reset the graph.
tf.reset_default_graph()
x, prediction, output_class = self.buildModel(fw_rnn_layer, bw_rnn_layer)
new_sess = tf.Session(config=CONFIG)
saver = tf.train.Saver()
saver.restore(new_sess, model_dir)
return x, prediction, output_class, new_sess
def getInferenceResult(self, x, output_class, sess):
b1, _ = self.mnist.train.next_batch(batch_size=1)
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})
# It is important to keep all the ophint output nodes.
hinted_outputs_nodes = find_all_hinted_output_nodes(sess)
hinted_outputs_nodes.append(output_class.op.name)
frozen_graph = tf.graph_util.convert_variables_to_constants(
sess, sess.graph_def, hinted_outputs_nodes)
return sample_input, expected_output, frozen_graph
def tfliteInvoke(self, graph, test_inputs, outputs):
tf.reset_default_graph()
# Turn the input into placeholder of shape 1
tflite_input = tf.placeholder(
"float", [1, self.time_steps, self.n_input], name="INPUT_IMAGE_LITE")
tf.import_graph_def(graph, name="", input_map={"INPUT_IMAGE": tflite_input})
with tf.Session() as sess:
curr = sess.graph_def
curr = convert_op_hints_to_stubs(graph_def=curr)
curr = optimize_for_inference_lib.optimize_for_inference(
curr, ["INPUT_IMAGE_LITE"], ["OUTPUT_CLASS"],
[tf.float32.as_datatype_enum])
converter = tf.lite.TFLiteConverter(curr, [tflite_input], [outputs])
tflite = converter.convert()
interpreter = tf.lite.Interpreter(model_content=tflite)
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]["index"]
interpreter.set_tensor(input_index, test_inputs)
interpreter.invoke()
output_index = interpreter.get_output_details()[0]["index"]
result = interpreter.get_tensor(output_index)
# Reset all variables so it will not pollute other inferences.
interpreter.reset_all_variables()
return result
def testStaticRnnMultiRnnCell(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(self.buildRnnLayer(),
self.buildRnnLayer())
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(), self.buildRnnLayer(), sess, saver)
test_inputs, expected_output, frozen_graph = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(frozen_graph, test_inputs, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
if __name__ == "__main__":
test.main()
| {
"content_hash": "46eec977fdad84e4d2bb6ee8f432af3f",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 80,
"avg_line_length": 36.85549132947977,
"alnum_prop": 0.6863237139272271,
"repo_name": "jendap/tensorflow",
"id": "7a937ce47f7fb049f50307c049f33f8e0060b986",
"size": "7065",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/lite/experimental/examples/lstm/bidirectional_sequence_rnn_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2867"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "606044"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "55619540"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "78675"
},
{
"name": "Go",
"bytes": "1383418"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "900190"
},
{
"name": "Jupyter Notebook",
"bytes": "2510235"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "77367"
},
{
"name": "Objective-C",
"bytes": "16140"
},
{
"name": "Objective-C++",
"bytes": "102889"
},
{
"name": "PHP",
"bytes": "14644"
},
{
"name": "Pascal",
"bytes": "399"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "45358371"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "530065"
},
{
"name": "Smarty",
"bytes": "25609"
}
],
"symlink_target": ""
} |
import json
path = 'data/usagov_bitly_data2012-05-21-1337634399.txt'
records = [json.loads(line) for line in open(path)]
# <codecell>
# Display a couple of records
records[0:2]
# <codecell>
# Display the user agent from the first record
records[0]["a"]
# <codecell>
# Utility function: get counts for each element from a collection
from collections import defaultdict
def get_counts(sequence):
counts = defaultdict(int) # values will initialize to 0
for x in sequence:
count[x] += 1
return counts
# <codecell>
# How a time zone looks like
records[0]['tz']
# <codecell>
# Check if a time zone is listed for the record
def time_zone_listed(record):
return 'tz' in record
time_zone_listed(records[0])
# <codecell>
# See where the timezone is not listed
[item for item in records if not time_zone_listed(item)]
# <codecell>
time_zones = [item['tz'] for item in records if time_zone_listed(item)]
time_zones[0:3]
# <codecell>
from collections import Counter
Counter(time_zones).most_common(10)
| {
"content_hash": "16ac33da4aaeffcdf2990f1cbc75cb56",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 71,
"avg_line_length": 19,
"alnum_prop": 0.7014354066985646,
"repo_name": "traims/nasa-mars-curiosity",
"id": "0373c7e15b7e88eaba70b5e1f9a90b244aebe584",
"size": "1111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "explore_a_single_data_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1111"
}
],
"symlink_target": ""
} |
from datetime import date
from time import strptime
from django.db import models
from ..magic import MisencodedCharField, MisencodedTextField
MARKET_SECTION_CHOICES = (
("nabizim", "Nabízím"),
("shanim", "Sháním"),
("vymenim", "Vyměním"),
("daruji", "Daruji"),
)
class Dating(models.Model):
name = MisencodedCharField(
max_length=40, blank=True, null=True, db_column="jmeno", verbose_name="Jméno"
)
email = MisencodedCharField(max_length=40, blank=True, null=True)
phone = MisencodedCharField(
max_length=20,
blank=True,
null=True,
db_column="telefon",
verbose_name="Telefon",
)
mobile = MisencodedCharField(
max_length=20, blank=True, null=True, db_column="mobil", verbose_name="Mobil"
)
age = models.IntegerField(
blank=True, null=True, db_column="vek", verbose_name="Věk"
)
area = MisencodedCharField(
max_length=40, blank=True, null=True, db_column="okres", verbose_name="Okres"
)
experience = MisencodedCharField(
max_length=20,
blank=True,
null=True,
db_column="doba",
verbose_name="Doba hraní DrD",
)
published = models.DateTimeField(
blank=True, null=True, db_column="datum", verbose_name="Datum"
)
text = MisencodedTextField(blank=True, null=True, db_column="text")
group = MisencodedCharField(
max_length=20, blank=True, null=True, db_column="sekce", verbose_name="Sekce"
)
class Meta:
db_table = "seznamka"
verbose_name = "Seznamka"
verbose_name_plural = "Seznamky"
def __str__(self):
return f"{self.name} ve skupině {self.group}"
class Market(models.Model):
group = MisencodedCharField(
max_length=20,
choices=MARKET_SECTION_CHOICES,
db_column="sekce",
verbose_name="Sekce",
)
name = MisencodedCharField(
max_length=30, blank=True, null=True, db_column="jmeno", verbose_name="Jméno"
)
mail = MisencodedCharField(
max_length=30, blank=True, null=True, db_column="mail", verbose_name="E-mail"
)
phone = MisencodedCharField(
max_length=15,
blank=True,
null=True,
db_column="telefon",
verbose_name="Telefon",
)
mobile = MisencodedCharField(
max_length=15, blank=True, null=True, db_column="mobil", verbose_name="Mobil"
)
area = MisencodedCharField(
max_length=20, blank=True, null=True, db_column="okres", verbose_name="Okres"
)
text = MisencodedTextField()
# WARNING WARNING WARNING, not a Date, but a varchar instead!
# Old version stores in the Czech format: dd. mm. YYYY (where d/m is without leading 0)
# See https://github.com/dracidoupe/graveyard/issues/195
published_varchar = MisencodedCharField(
max_length=12, db_column="datum", verbose_name="Přidáno"
)
@property
def published(self):
# Windows workaround as `%-d` is platform-specific
try:
return date(*(strptime(self.published_varchar, "%-d. %-m. %Y")[0:3]))
except ValueError:
return date(*(strptime(self.published_varchar, "%d. %m. %Y")[0:3]))
class Meta:
db_table = "inzerce"
verbose_name = "Inzerce"
verbose_name_plural = "Inzerce"
| {
"content_hash": "0227cf0bd4e0eec0406eb53b29f03ef3",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 91,
"avg_line_length": 31.528301886792452,
"alnum_prop": 0.6205864751645721,
"repo_name": "dracidoupe/graveyard",
"id": "d82c46a0eb5abae3c6a6691c873a0844d3a504a7",
"size": "3355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ddcz/models/used/social.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "API Blueprint",
"bytes": "4273"
},
{
"name": "CSS",
"bytes": "37578"
},
{
"name": "Dockerfile",
"bytes": "208"
},
{
"name": "HTML",
"bytes": "101149"
},
{
"name": "JavaScript",
"bytes": "2417"
},
{
"name": "Python",
"bytes": "766548"
},
{
"name": "Shell",
"bytes": "5103"
}
],
"symlink_target": ""
} |
import pickle
import os
import numpy as np
from binary import *
from sklearn import svm
from sklearn.calibration import CalibratedClassifierCV
class BinaryComputerVisionPredictor(object):
def __init__(self, feature_extractor, num_splits=4, computer_vision_cache=None):
self.feature_extractor = feature_extractor
self.num_splits = num_splits
self.computer_vision_cache = computer_vision_cache
def predict_probs(self, images, labels, valid_train=None, cache_name=None, cv_worker=None, naive=False):
# Compute image features
image_names = [im.fname for im in images]
if self.computer_vision_cache: cache_name = self.computer_vision_cache
if cache_name is None or not os.path.isfile(cache_name):
print "Extracting features for " + str(len(image_names)) + " images to " + cache_name + "..."
features = self.feature_extractor.extract_features([os.path.abspath(f) for f in image_names])
'''
features_d = {}
for i in range(len(image_names)): features_d[image_names[i]] = features[i]
with open(cache_name, 'wb') as f:
pickle.dump(features_d, f)
'''
np.savez(cache_name+'.npz', features=features)
with open(cache_name, 'wb') as f:
pickle.dump(image_names, f)
else:
features = np.load(cache_name+'.npz')['features']
with open(cache_name, 'rb') as f:
image_names_cached = pickle.load(f)
image_names_d = {image_names_cached[i]:i for i in range(len(image_names_cached))}
features_d = {image_names_cached[i]:features[i,:] for i in range(len(image_names_cached))}
image_names_new = []
for i in image_names:
if not i in image_names_d:
image_names_new.append(i)
if len(image_names_new): # Handle the case where new images have been added since features were cached
features = self.feature_extractor.extract_features(image_names_new)
for i in range(len(image_names_new)): features_d[image_names_new[i]] = features[i]
with open(cache_name, 'wb') as f:
pickle.dump(image_names, f)
features = np.asarray([features_d[image_names[i]] for i in range(len(image_names))])
np.savez(cache_name+'.npz', features=features)
else:
features = np.asarray([features_d[image_names[i]] for i in range(len(image_names))])
perm_inds = np.random.permutation(len(image_names))
probs = {}
v = np.asarray(valid_train) if valid_train else np.asarray([True for i in range(len(image_names))])
for n in range(self.num_splits):
start_ind = (n*len(image_names))/self.num_splits
end_ind = ((n+1)*len(image_names))/self.num_splits
sp, ep, test_inds = perm_inds[:start_ind], perm_inds[end_ind:], perm_inds[start_ind:end_ind]
train_inds = np.concatenate((sp[v[sp]], ep[v[ep]]))
val_inds = test_inds[v[test_inds]]
Y_train = np.asarray([labels[i].label for i in train_inds])
p = [(.5,.5) for i in range(len(test_inds))]
if len(train_inds)>0 and (Y_train==0).sum()>0 and (Y_train==1).sum()>0 and len(val_inds)>0:
Y_val = np.asarray([labels[i].label for i in val_inds])
if (Y_val==0).sum()>0 and (Y_val==1).sum()>0:
X_val = np.asarray([features[i] for i in val_inds])
X_train = np.asarray([features[i] for i in train_inds])
X_test = np.asarray([features[i] for i in test_inds])
Y_test = np.asarray([labels[i].label for i in test_inds])
print "Train: " + str(len(train_inds)) + "," + str((Y_train==0).sum()) + " neg," + str((Y_train==1).sum()) + " pos"
clf = svm.LinearSVC()
clf.fit(X_train, Y_train)
clf_prob = CalibratedClassifierCV(clf, cv="prefit", method='sigmoid')
clf_prob.fit(X_val, Y_val)
p = clf_prob.predict_proba(X_test)
print "Val: " + str(len(val_inds)) + "," + str((Y_val==0).sum()) + " neg," + str((Y_val==1).sum()) + " pos, min_p=" + str(np.asarray([p[i][1] for i in range(len(val_inds))]).min()) + " max_p=" + str(np.asarray([p[i][1] for i in range(len(val_inds))]).max())
for i in range(len(test_inds)):
probs[image_names[test_inds[i]]] = p[i][1]
retval = []
for i in range(len(images)):
cv_pred = CrowdLabelBinaryClassification(images[i], cv_worker, label=(1.0 if probs[image_names[i]]>.5 else 0.0))
cv_pred.prob = probs[image_names[i]]
retval.append(cv_pred)
return retval
| {
"content_hash": "f0b8a5a54f51700033ab6ffa77739261",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 267,
"avg_line_length": 52.845238095238095,
"alnum_prop": 0.6186077945483217,
"repo_name": "sbranson/online_crowdsourcing",
"id": "3b21d00da04dc38e8d876eb37473713d20bbf7b7",
"size": "4439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crowdsourcing/annotation_types/classification/binary_cv_predictor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "15706"
},
{
"name": "Python",
"bytes": "444456"
}
],
"symlink_target": ""
} |
from lasagne.layers import Conv2DLayer, MaxPool2DLayer
from lasagne.layers import InputLayer, DenseLayer, batch_norm
from lasagne.layers import SpatialPyramidPoolingLayer
import lasagne
def build_architecture(input_shape, trained_weights=None):
net = {}
net['input'] = InputLayer((None,1,None,None))
net['large_conv1'] = batch_norm(Conv2DLayer(net['input'], num_filters=32, filter_size=11, stride=4, pad=5, flip_filters=False))
net['large_pool1'] = MaxPool2DLayer(net['large_conv1'], pool_size=3)
net['large_conv2'] = batch_norm(Conv2DLayer(net['large_pool1'], num_filters=64, filter_size=5, pad=2, flip_filters=False))
net['large_pool2'] = MaxPool2DLayer(net['large_conv2'], pool_size=3, stride=2)
net['large_conv3'] = batch_norm(Conv2DLayer(net['large_pool2'], num_filters=128, filter_size=3, pad=1, flip_filters=False))
net['large_conv4'] = batch_norm(Conv2DLayer(net['large_conv3'], num_filters=128, filter_size=3, pad=1, flip_filters=False))
net['large_pool4'] = MaxPool2DLayer(net['large_conv4'], pool_size=2)
net['large_conv5'] = batch_norm(Conv2DLayer(net['large_pool4'], num_filters=128, filter_size=3, pad=1, flip_filters=False))
net['large_pool5'] = SpatialPyramidPoolingLayer(net['large_conv5'], implementation='kaiming')
net['fc1'] = batch_norm(DenseLayer(net['large_pool5'], num_units=2048))
net['fc2'] = batch_norm(DenseLayer(net['fc1'], num_units=2048))
if trained_weights:
lasagne.layers.set_all_param_values(net['fc2'], trained_weights)
return net
| {
"content_hash": "8dda1683e8d52877928105506373b5e4",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 131,
"avg_line_length": 51.43333333333333,
"alnum_prop": 0.7064160725858717,
"repo_name": "luizgh/sigver_wiwd",
"id": "66ad9ec1ba5cc4d036becb7e40f10fab3f2a6762",
"size": "1543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "signet_spp_600dpi.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "264725"
},
{
"name": "Python",
"bytes": "29399"
}
],
"symlink_target": ""
} |
"""
Reads a list of intervals and a maf. Produces a new maf containing the
blocks or parts of blocks in the original that overlapped the intervals.
It is assumed that each file `maf_fname` has a corresponding `maf_fname`.index
file.
NOTE: If two intervals overlap the same block it will be written twice. With
non-overlapping intervals and --chop this is never a problem.
NOTE: Intervals are origin-zero, half-open. For example, the interval 100,150
is 50 bases long, and there are 100 bases to its left in the sequence.
NOTE: Intervals are relative to the + strand, regardless of the strands in
the alignments.
WARNING: bz2/bz2t support and file cache support are new and not as well
tested.
usage: %prog maf_fname1 maf_fname2 ... [options] < interval_file
-m, --mincols=0: Minimum length (columns) required for alignment to be output
-c, --chop: Should blocks be chopped to only portion overlapping (no by default)
-s, --src=s: Use this src for all intervals
-p, --prefix=p: Prepend this to each src before lookup
-d, --dir=d: Write each interval as a separate file in this directory
-S, --strand: Strand is included as an additional column, and the blocks are reverse complemented (if necessary) so that they are always on that strand w/r/t the src species.
-C, --usecache: Use a cache that keeps blocks of the MAF files in memory (requires ~20MB per MAF)
"""
import psyco_full
from bx.cookbook import doc_optparse
import bx.align.maf
from bx import misc
import os
import sys
def main():
# Parse Command Line
options, args = doc_optparse.parse( __doc__ )
try:
maf_files = args
if options.mincols: mincols = int( options.mincols )
else: mincols = 0
if options.src: fixed_src = options.src
else: fixed_src = None
if options.prefix: prefix = options.prefix
else: prefix = None
if options.dir: dir = options.dir
else: dir = None
chop = bool( options.chop )
do_strand = bool( options.strand )
use_cache = bool( options.usecache )
except:
doc_optparse.exit()
# Open indexed access to mafs
index = bx.align.maf.MultiIndexed( maf_files, keep_open=True,
parse_e_rows=True,
use_cache=use_cache )
# Start MAF on stdout
if dir is None:
out = bx.align.maf.Writer( sys.stdout )
# Iterate over input ranges
for line in sys.stdin:
strand = None
fields = line.split()
if fixed_src:
src, start, end = fixed_src, int( fields[0] ), int( fields[1] )
if do_strand: strand = fields[2]
else:
src, start, end = fields[0], int( fields[1] ), int( fields[2] )
if do_strand: strand = fields[3]
if prefix: src = prefix + src
# Find overlap with reference component
blocks = index.get( src, start, end )
# Open file if needed
if dir:
out = bx.align.maf.Writer( open( os.path.join( dir, "%s:%09d-%09d.maf" % ( src, start, end ) ), 'w' ) )
# Write each intersecting block
if chop:
for block in blocks:
for ref in block.get_components_by_src( src ):
slice_start = max( start, ref.get_forward_strand_start() )
slice_end = min( end, ref.get_forward_strand_end() )
if (slice_end <= slice_start): continue
sliced = block.slice_by_component( ref, slice_start, slice_end )
# If the block is shorter than the minimum allowed size, stop
if mincols and ( sliced.text_size < mincols ):
continue
# If the reference component is empty, don't write the block
if sliced.get_component_by_src( src ).size < 1:
continue
# Keep only components that are not empty
sliced.components = [ c for c in sliced.components if c.size > 0 ]
# Reverse complement if needed
if ( strand != None ) and ( ref.strand != strand ):
sliced = sliced.reverse_complement()
# Write the block
out.write( sliced )
else:
for block in blocks:
out.write( block )
if dir:
out.close()
# Close output MAF
out.close()
index.close()
if __name__ == "__main__":
main()
| {
"content_hash": "c765d13839c23ca4646bc89086fc0a67",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 181,
"avg_line_length": 41.5,
"alnum_prop": 0.5772375215146299,
"repo_name": "bxlab/HiFive_Paper",
"id": "fcfbe98ffb4ae54b545c710abec2247233a0e3bc",
"size": "4690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Scripts/HiCLib/bx-python-0.7.1/build/scripts-2.7/maf_extract_ranges_indexed.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5096"
},
{
"name": "C",
"bytes": "107381"
},
{
"name": "C++",
"bytes": "182835"
},
{
"name": "CMake",
"bytes": "3353"
},
{
"name": "Forth",
"bytes": "152"
},
{
"name": "Makefile",
"bytes": "22978"
},
{
"name": "Perl",
"bytes": "25453"
},
{
"name": "Python",
"bytes": "4229513"
},
{
"name": "R",
"bytes": "43022"
},
{
"name": "Shell",
"bytes": "10798"
}
],
"symlink_target": ""
} |
import json
import re
import unittest
from collections import namedtuple
from unittest import mock
import sqlalchemy
from cryptography.fernet import Fernet
from parameterized import parameterized
from airflow import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models import Connection, crypto
from airflow.providers.sqlite.hooks.sqlite import SqliteHook
from tests.test_utils.config import conf_vars
ConnectionParts = namedtuple("ConnectionParts", ["conn_type", "login", "password", "host", "port", "schema"])
class UriTestCaseConfig:
def __init__(
self,
test_conn_uri: str,
test_conn_attributes: dict,
description: str,
):
"""
:param test_conn_uri: URI that we use to create connection
:param test_conn_attributes: we expect a connection object created with `test_uri` to have these
attributes
:param description: human-friendly name appended to parameterized test
"""
self.test_uri = test_conn_uri
self.test_conn_attributes = test_conn_attributes
self.description = description
@staticmethod
def uri_test_name(func, num, param):
return "{}_{}_{}".format(func.__name__, num, param.args[0].description.replace(' ', '_'))
class TestConnection(unittest.TestCase):
def setUp(self):
crypto._fernet = None
def tearDown(self):
crypto._fernet = None
@conf_vars({('core', 'fernet_key'): ''})
def test_connection_extra_no_encryption(self):
"""
Tests extras on a new connection without encryption. The fernet key
is set to a non-base64-encoded string and the extra is stored without
encryption.
"""
test_connection = Connection(extra='testextra')
self.assertFalse(test_connection.is_extra_encrypted)
self.assertEqual(test_connection.extra, 'testextra')
@conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()})
def test_connection_extra_with_encryption(self):
"""
Tests extras on a new connection with encryption.
"""
test_connection = Connection(extra='testextra')
self.assertTrue(test_connection.is_extra_encrypted)
self.assertEqual(test_connection.extra, 'testextra')
def test_connection_extra_with_encryption_rotate_fernet_key(self):
"""
Tests rotating encrypted extras.
"""
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
with conf_vars({('core', 'fernet_key'): key1.decode()}):
test_connection = Connection(extra='testextra')
self.assertTrue(test_connection.is_extra_encrypted)
self.assertEqual(test_connection.extra, 'testextra')
self.assertEqual(Fernet(key1).decrypt(test_connection._extra.encode()), b'testextra')
# Test decrypt of old value with new key
with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}):
crypto._fernet = None
self.assertEqual(test_connection.extra, 'testextra')
# Test decrypt of new value with new key
test_connection.rotate_fernet_key()
self.assertTrue(test_connection.is_extra_encrypted)
self.assertEqual(test_connection.extra, 'testextra')
self.assertEqual(Fernet(key2).decrypt(test_connection._extra.encode()), b'testextra')
test_from_uri_params = [
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra=None,
),
description='without extras',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?'
'extra1=a%20value&extra2=%2Fpath%2F',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra_dejson={'extra1': 'a value', 'extra2': '/path/'},
),
description='with extras',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?extra1=a%20value&extra2=',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra_dejson={'extra1': 'a value', 'extra2': ''},
),
description='with empty extras',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation%3Ax%3Ay:1234/schema?'
'extra1=a%20value&extra2=%2Fpath%2F',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location:x:y',
schema='schema',
login='user',
password='password',
port=1234,
extra_dejson={'extra1': 'a value', 'extra2': '/path/'},
),
description='with colon in hostname',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password%20with%20space@host%2Flocation%3Ax%3Ay:1234/schema',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location:x:y',
schema='schema',
login='user',
password='password with space',
port=1234,
),
description='with encoded password',
),
UriTestCaseConfig(
test_conn_uri='scheme://domain%2Fuser:password@host%2Flocation%3Ax%3Ay:1234/schema',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location:x:y',
schema='schema',
login='domain/user',
password='password',
port=1234,
),
description='with encoded user',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password%20with%20space@host:1234/schema%2Ftest',
test_conn_attributes=dict(
conn_type='scheme',
host='host',
schema='schema/test',
login='user',
password='password with space',
port=1234,
),
description='with encoded schema',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password%20with%20space@host:1234',
test_conn_attributes=dict(
conn_type='scheme',
host='host',
schema='',
login='user',
password='password with space',
port=1234,
),
description='no schema',
),
UriTestCaseConfig(
test_conn_uri='google-cloud-platform://?extra__google_cloud_platform__key_'
'path=%2Fkeys%2Fkey.json&extra__google_cloud_platform__scope='
'https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fcloud-platform&extra'
'__google_cloud_platform__project=airflow',
test_conn_attributes=dict(
conn_type='google_cloud_platform',
host='',
schema='',
login=None,
password=None,
port=None,
extra_dejson=dict(
extra__google_cloud_platform__key_path='/keys/key.json',
extra__google_cloud_platform__scope='https://www.googleapis.com/auth/cloud-platform',
extra__google_cloud_platform__project='airflow',
),
),
description='with underscore',
),
UriTestCaseConfig(
test_conn_uri='scheme://host:1234',
test_conn_attributes=dict(
conn_type='scheme',
host='host',
schema='',
login=None,
password=None,
port=1234,
),
description='without auth info',
),
UriTestCaseConfig(
test_conn_uri='scheme://%2FTmP%2F:1234',
test_conn_attributes=dict(
conn_type='scheme',
host='/TmP/',
schema='',
login=None,
password=None,
port=1234,
),
description='with path',
),
UriTestCaseConfig(
test_conn_uri='scheme:///airflow',
test_conn_attributes=dict(
conn_type='scheme',
schema='airflow',
),
description='schema only',
),
UriTestCaseConfig(
test_conn_uri='scheme://@:1234',
test_conn_attributes=dict(
conn_type='scheme',
port=1234,
),
description='port only',
),
UriTestCaseConfig(
test_conn_uri='scheme://:password%2F%21%40%23%24%25%5E%26%2A%28%29%7B%7D@',
test_conn_attributes=dict(
conn_type='scheme',
password='password/!@#$%^&*(){}',
),
description='password only',
),
UriTestCaseConfig(
test_conn_uri='scheme://login%2F%21%40%23%24%25%5E%26%2A%28%29%7B%7D@',
test_conn_attributes=dict(
conn_type='scheme',
login='login/!@#$%^&*(){}',
),
description='login only',
),
]
# pylint: disable=undefined-variable
@parameterized.expand([(x,) for x in test_from_uri_params], UriTestCaseConfig.uri_test_name)
def test_connection_from_uri(self, test_config: UriTestCaseConfig):
connection = Connection(uri=test_config.test_uri)
for conn_attr, expected_val in test_config.test_conn_attributes.items():
actual_val = getattr(connection, conn_attr)
if expected_val is None:
self.assertIsNone(expected_val)
if isinstance(expected_val, dict):
self.assertDictEqual(expected_val, actual_val)
else:
self.assertEqual(expected_val, actual_val)
# pylint: disable=undefined-variable
@parameterized.expand([(x,) for x in test_from_uri_params], UriTestCaseConfig.uri_test_name)
def test_connection_get_uri_from_uri(self, test_config: UriTestCaseConfig):
"""
This test verifies that when we create a conn_1 from URI, and we generate a URI from that conn, that
when we create a conn_2 from the generated URI, we get an equivalent conn.
1. Parse URI to create `Connection` object, `connection`.
2. Using this connection, generate URI `generated_uri`..
3. Using this`generated_uri`, parse and create new Connection `new_conn`.
4. Verify that `new_conn` has same attributes as `connection`.
"""
connection = Connection(uri=test_config.test_uri)
generated_uri = connection.get_uri()
new_conn = Connection(uri=generated_uri)
self.assertEqual(connection.conn_type, new_conn.conn_type)
self.assertEqual(connection.login, new_conn.login)
self.assertEqual(connection.password, new_conn.password)
self.assertEqual(connection.host, new_conn.host)
self.assertEqual(connection.port, new_conn.port)
self.assertEqual(connection.schema, new_conn.schema)
self.assertDictEqual(connection.extra_dejson, new_conn.extra_dejson)
# pylint: disable=undefined-variable
@parameterized.expand([(x,) for x in test_from_uri_params], UriTestCaseConfig.uri_test_name)
def test_connection_get_uri_from_conn(self, test_config: UriTestCaseConfig):
"""
This test verifies that if we create conn_1 from attributes (rather than from URI), and we generate a
URI, that when we create conn_2 from this URI, we get an equivalent conn.
1. Build conn init params using `test_conn_attributes` and store in `conn_kwargs`
2. Instantiate conn `connection` from `conn_kwargs`.
3. Generate uri `get_uri` from this conn.
4. Create conn `new_conn` from this uri.
5. Verify `new_conn` has same attributes as `connection`.
"""
conn_kwargs = {}
for k, v in test_config.test_conn_attributes.items():
if k == 'extra_dejson':
conn_kwargs.update({'extra': json.dumps(v)})
else:
conn_kwargs.update({k: v})
connection = Connection(conn_id='test_conn', **conn_kwargs) # type: ignore
gen_uri = connection.get_uri()
new_conn = Connection(conn_id='test_conn', uri=gen_uri)
for conn_attr, expected_val in test_config.test_conn_attributes.items():
actual_val = getattr(new_conn, conn_attr)
if expected_val is None:
self.assertIsNone(expected_val)
if isinstance(expected_val, dict):
self.assertDictEqual(expected_val, actual_val)
else:
self.assertEqual(expected_val, actual_val)
@parameterized.expand(
[
(
"http://:password@host:80/database",
ConnectionParts(
conn_type="http", login='', password="password", host="host", port=80, schema="database"
),
),
(
"http://user:@host:80/database",
ConnectionParts(
conn_type="http", login="user", password=None, host="host", port=80, schema="database"
),
),
(
"http://user:password@/database",
ConnectionParts(
conn_type="http", login="user", password="password", host="", port=None, schema="database"
),
),
(
"http://user:password@host:80/",
ConnectionParts(
conn_type="http", login="user", password="password", host="host", port=80, schema=""
),
),
(
"http://user:password@/",
ConnectionParts(
conn_type="http", login="user", password="password", host="", port=None, schema=""
),
),
(
"postgresql://user:password@%2Ftmp%2Fz6rqdzqh%2Fexample%3Awest1%3Atestdb/testdb",
ConnectionParts(
conn_type="postgres",
login="user",
password="password",
host="/tmp/z6rqdzqh/example:west1:testdb",
port=None,
schema="testdb",
),
),
(
"postgresql://user@%2Ftmp%2Fz6rqdzqh%2Fexample%3Aeurope-west1%3Atestdb/testdb",
ConnectionParts(
conn_type="postgres",
login="user",
password=None,
host="/tmp/z6rqdzqh/example:europe-west1:testdb",
port=None,
schema="testdb",
),
),
(
"postgresql://%2Ftmp%2Fz6rqdzqh%2Fexample%3Aeurope-west1%3Atestdb",
ConnectionParts(
conn_type="postgres",
login=None,
password=None,
host="/tmp/z6rqdzqh/example:europe-west1:testdb",
port=None,
schema="",
),
),
]
)
def test_connection_from_with_auth_info(self, uri, uri_parts):
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, uri_parts.conn_type)
self.assertEqual(connection.login, uri_parts.login)
self.assertEqual(connection.password, uri_parts.password)
self.assertEqual(connection.host, uri_parts.host)
self.assertEqual(connection.port, uri_parts.port)
self.assertEqual(connection.schema, uri_parts.schema)
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI': 'postgres://username:password@ec2.compute.com:5432/the_database',
},
)
def test_using_env_var(self):
conn = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', conn.host)
self.assertEqual('the_database', conn.schema)
self.assertEqual('username', conn.login)
self.assertEqual('password', conn.password)
self.assertEqual(5432, conn.port)
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgres://ec2.compute.com/the_database',
},
)
def test_using_unix_socket_env_var(self):
conn = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', conn.host)
self.assertEqual('the_database', conn.schema)
self.assertIsNone(conn.login)
self.assertIsNone(conn.password)
self.assertIsNone(conn.port)
def test_param_setup(self):
conn = Connection(
conn_id='local_mysql',
conn_type='mysql',
host='localhost',
login='airflow',
password='airflow',
schema='airflow',
)
self.assertEqual('localhost', conn.host)
self.assertEqual('airflow', conn.schema)
self.assertEqual('airflow', conn.login)
self.assertEqual('airflow', conn.password)
self.assertIsNone(conn.port)
def test_env_var_priority(self):
conn = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', conn.host)
with mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_AIRFLOW_DB': 'postgres://username:password@ec2.compute.com:5432/the_database',
},
):
conn = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', conn.host)
self.assertEqual('the_database', conn.schema)
self.assertEqual('username', conn.login)
self.assertEqual('password', conn.password)
self.assertEqual(5432, conn.port)
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI': 'postgres://username:password@ec2.compute.com:5432/the_database',
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgres://ec2.compute.com/the_database',
},
)
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI': 'postgres://username:password@ec2.compute.com:5432/the_database',
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgres://ec2.compute.com/the_database',
},
)
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url))
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI': 'postgres://username:password@ec2.compute.com:5432/the_database',
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgres://ec2.compute.com/the_database',
},
)
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
def test_connection_mixed(self):
with self.assertRaisesRegex(
AirflowException,
re.escape(
"You must create an object using the URI or individual values (conn_type, host, login, "
"password, schema, port or extra).You can't mix these two ways to create this object."
),
):
Connection(conn_id="TEST_ID", uri="mysql://", schema="AAA")
| {
"content_hash": "b6106e63a44d73ec34917ccdb7ce3369",
"timestamp": "",
"source": "github",
"line_count": 532,
"max_line_length": 110,
"avg_line_length": 39.721804511278194,
"alnum_prop": 0.5549403747870528,
"repo_name": "DinoCow/airflow",
"id": "2723c3fc0846b2cf749ecba413eb674d8141bafd",
"size": "21919",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/models/test_connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "56963"
},
{
"name": "HTML",
"bytes": "140781"
},
{
"name": "JavaScript",
"bytes": "1370838"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1473771"
},
{
"name": "Shell",
"bytes": "18638"
}
],
"symlink_target": ""
} |
from mnist import *
import deepstacks
from deepstacks.macros import *
from deepstacks.lasagne import curr_layer,curr_stacks,curr_flags,curr_model
def dropout(p):
return ((0,0,0,0,0,0,{'layer':(lasagne.layers.DropoutLayer,curr_layer,{'p':p})}),)
def build_cnn(input_var=None):
network = lasagne.layers.InputLayer(shape=(500, 1, 28, 28),
input_var=input_var)
network,stacks,layers,errors,watchpoints=deepstacks.lasagne.build_network(network,(
# Mark this block as a reusable bock named 'ae'
(share,'ae',(
('input',32,5,1,0,0,{}),
(0,0,2,2,0,0,{'maxpool'}),
(0,32,5,1,0,0,{}),
(0,0,2,2,'source',0,{'maxpool'}),
(0,0,2,0,0,0,{'upscale'}),
(0,32,5,1,0,0,{}),
(0,0,2,0,0,0,{'upscale'}),
(0,1,5,1,0,0,{'equal':['input','recon',lasagne.objectives.squared_error]}),
)),
# Roll orig image by 4 pixels at axis 2, save as 'input2'
('input',0,0,0,'input2',0,{'nonlinearity':lambda x:T.roll(x,4,2)}),
# Reuse block 'ae': just like calling a function. Before enter 'ae',
# 'input' will be replaced with 'input2'; when leaving 'ae' store 'source' to
# 'source2', and restore value of 'input' and 'source'.
#
# Params of 'ae' are shared, and gradients backpropagate along both
# paths. If you want to prevent gradients backpropagating along this
# path, you can replace 'ae' with ['ae']
(call,'ae',{'input':'input2','source':None},{'source':'source2'}),
# Use 'source' as current layer, you can prevent gradients
# backpropagating through 'source' too if you want, just replace
# 'source' with ['source']
('source',),
(dropout,0.5),
(0,256,0,0,0,0,{'dense'}),
(dropout,0.5),
(0,10,0,0,0,0,{'dense':True,'nonlinearity':lasagne.nonlinearities.softmax}),
))
return network,stacks,layers,errors,watchpoints
# Following is copied from mnist.py with small changes, search 'MODIFY' to see what changed.
# ############################## Main program ################################
# Everything else will be handled in our main program now. We could pull out
# more functions to better separate the code, but it wouldn't make it any
# easier to read.
def main(model='mlp', num_epochs=500):
# Load the dataset
print("Loading data...")
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
# Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
# Create neural network model (depending on first command line parameter)
print("Building model and compiling functions...")
# MODIFY BEGIN
errors,watchpoints = {},{}
assert model=='cnn'
# MODIFY END
if model == 'mlp':
network = build_mlp(input_var)
elif model.startswith('custom_mlp:'):
depth, width, drop_in, drop_hid = model.split(':', 1)[1].split(',')
network = build_custom_mlp(input_var, int(depth), int(width),
float(drop_in), float(drop_hid))
elif model == 'cnn':
# MODIFY BEGIN
#network = build_cnn(input_var)
network,stacks,paramlayers,errors,watchpoints = build_cnn(input_var)
# MODIFY END
else:
print("Unrecognized model type %r." % model)
return
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# MODIFY BEGIN
loss = deepstacks.lasagne.get_loss(errors,watchpoints,loss)[0]
# MODIFY END
# We could add some weight decay as well here, see lasagne.regularization.
# Create update expressions for training, i.e., how to modify the
# parameters at each training step. Here, we'll use Stochastic Gradient
# Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more.
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=0.01, momentum=0.9)
# Create a loss expression for validation/testing. The crucial difference
# here is that we do a deterministic forward pass through the network,
# disabling dropout layers.
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
# As a bonus, also create an expression for the classification accuracy:
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], loss, updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
# Finally, launch the training loop.
print("Starting training...")
# We iterate over epochs:
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, y_train, 500, shuffle=True):
inputs, targets = batch
train_err += train_fn(inputs, targets)
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
# After training, we compute and print the test error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100))
# Optionally, you could now dump the network weights to a file like this:
# np.savez('model.npz', *lasagne.layers.get_all_param_values(network))
#
# And load them again later on like this:
# with np.load('model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
if __name__ == '__main__':
if ('--help' in sys.argv) or ('-h' in sys.argv):
print("Trains a neural network on MNIST using Lasagne.")
print("Usage: %s [MODEL [EPOCHS]]" % sys.argv[0])
print()
print("MODEL: 'mlp' for a simple Multi-Layer Perceptron (MLP),")
print(" 'custom_mlp:DEPTH,WIDTH,DROP_IN,DROP_HID' for an MLP")
print(" with DEPTH hidden layers of WIDTH units, DROP_IN")
print(" input dropout and DROP_HID hidden dropout,")
print(" 'cnn' for a simple Convolutional Neural Network (CNN).")
print("EPOCHS: number of training epochs to perform (default: 500)")
else:
kwargs = {}
if len(sys.argv) > 1:
kwargs['model'] = sys.argv[1]
if len(sys.argv) > 2:
kwargs['num_epochs'] = int(sys.argv[2])
main(**kwargs)
| {
"content_hash": "409c85225bb5b32821aadf11008154d0",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 92,
"avg_line_length": 43.08163265306123,
"alnum_prop": 0.6049265750828992,
"repo_name": "guoxuesong/deepstacks",
"id": "6008e622455fdc6bf550ecb1f89e8ab40b3da1cf",
"size": "8526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/lasagne/4.reuse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "291724"
},
{
"name": "Shell",
"bytes": "2213"
}
],
"symlink_target": ""
} |
from __future__ import print_function
# the following is a hack to get the baseclient to import whether we're in a
# package or not. This makes pep8 unhappy hence the annotations.
try:
# baseclient and this client are in a package
from .baseclient import BaseClient as _BaseClient # @UnusedImport
except ImportError:
# no they aren't
from baseclient import BaseClient as _BaseClient # @Reimport
class kb_gblocks(object):
def __init__(
self, url=None, timeout=30 * 60, user_id=None,
password=None, token=None, ignore_authrc=False,
trust_all_ssl_certificates=False,
auth_svc='https://ci.kbase.us/services/auth/api/legacy/KBase/Sessions/Login'):
if url is None:
raise ValueError('A url is required')
self._service_ver = None
self._client = _BaseClient(
url, timeout=timeout, user_id=user_id, password=password,
token=token, ignore_authrc=ignore_authrc,
trust_all_ssl_certificates=trust_all_ssl_certificates,
auth_svc=auth_svc)
def run_Gblocks(self, params, context=None):
"""
Method for trimming MSAs of either DNA or PROTEIN sequences
**
** input_type: MSA
** output_type: MSA
:param params: instance of type "Gblocks_Params" (Gblocks Input
Params) -> structure: parameter "workspace_name" of type
"workspace_name" (** The workspace object refs are of form: ** **
objects = ws.get_objects([{'ref':
params['workspace_id']+'/'+params['obj_name']}]) ** ** "ref" means
the entire name combining the workspace id and the object name **
"id" is a numerical identifier of the workspace or object, and
should just be used for workspace ** "name" is a string identifier
of a workspace or object. This is received from Narrative.),
parameter "desc" of String, parameter "input_ref" of type
"data_obj_ref", parameter "output_name" of type "data_obj_name",
parameter "trim_level" of Long, parameter "min_seqs_for_conserved"
of Long, parameter "min_seqs_for_flank" of Long, parameter
"max_pos_contig_nonconserved" of Long, parameter "min_block_len"
of Long, parameter "remove_mask_positions_flag" of Long
:returns: instance of type "Gblocks_Output" (Gblocks Output) ->
structure: parameter "report_name" of type "data_obj_name",
parameter "report_ref" of type "data_obj_ref"
"""
return self._client.call_method('kb_gblocks.run_Gblocks',
[params], self._service_ver, context)
def status(self, context=None):
return self._client.call_method('kb_gblocks.status',
[], self._service_ver, context)
| {
"content_hash": "571b00b0a1d2e9608b1c306598cb6fb3",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 90,
"avg_line_length": 49.93103448275862,
"alnum_prop": 0.6153314917127072,
"repo_name": "dcchivian/kb_gblocks",
"id": "af1686669a5d2c2cf27eed0ab7e755237d88f253",
"size": "3136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/kb_gblocks/kb_gblocksClient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "930"
},
{
"name": "Java",
"bytes": "16012"
},
{
"name": "JavaScript",
"bytes": "4602"
},
{
"name": "Makefile",
"bytes": "2979"
},
{
"name": "Perl",
"bytes": "13903"
},
{
"name": "Python",
"bytes": "509023"
},
{
"name": "Ruby",
"bytes": "1844"
},
{
"name": "Shell",
"bytes": "1669"
}
],
"symlink_target": ""
} |
from setuptools import find_packages, setup
setup(
name='ci',
version='0.0.1',
url='https://github.com/hail-is/hail.git',
author='Hail Team',
author_email='hail@broadinstitute.org',
description='Hail CI/CD System',
packages=find_packages(),
include_package_data=True,
)
| {
"content_hash": "35955e81a9b99893c2bfc9ec8c13b8cc",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 46,
"avg_line_length": 25.25,
"alnum_prop": 0.6633663366336634,
"repo_name": "hail-is/hail",
"id": "721339c91bd155af5503ff4c2937a8857ca80e7c",
"size": "303",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "ci/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7729"
},
{
"name": "C",
"bytes": "779"
},
{
"name": "C++",
"bytes": "171899"
},
{
"name": "CMake",
"bytes": "3045"
},
{
"name": "CSS",
"bytes": "666"
},
{
"name": "Dockerfile",
"bytes": "10056"
},
{
"name": "Emacs Lisp",
"bytes": "377"
},
{
"name": "HCL",
"bytes": "54923"
},
{
"name": "HTML",
"bytes": "155946"
},
{
"name": "Java",
"bytes": "38401"
},
{
"name": "JavaScript",
"bytes": "877"
},
{
"name": "Jupyter Notebook",
"bytes": "305748"
},
{
"name": "MLIR",
"bytes": "20"
},
{
"name": "Makefile",
"bytes": "61284"
},
{
"name": "Python",
"bytes": "5635857"
},
{
"name": "R",
"bytes": "3038"
},
{
"name": "SCSS",
"bytes": "33487"
},
{
"name": "Scala",
"bytes": "5050997"
},
{
"name": "Shell",
"bytes": "75539"
},
{
"name": "XSLT",
"bytes": "5748"
}
],
"symlink_target": ""
} |
'''
Test metadata functions
'''
from nose.tools import assert_equals, with_setup
from tests.util import setup, teardown, session_scope, EMPTY_RASTER, FakeTask
from tasks.meta import (OBSColumnTable, OBSColumn, OBSTable, OBSColumnTableTile,
OBSTag, OBSColumnTag)
from tasks.targets import TagTarget
def populate():
with session_scope() as session:
population_tag = OBSTag(id='population', name='population', type='catalog')
source_tag = OBSTag(id='us_census', name='US Census', type='source')
session.add(population_tag)
session.add(source_tag)
datacols = {
'median_rent': OBSColumn(id='"us.census.acs".median_rent', type='numeric'),
'total_pop': OBSColumn(id='"us.census.acs".total_pop', type='numeric'),
'male_pop': OBSColumn(id='"us.census.acs".male_pop', type='numeric'),
'female_pop': OBSColumn(id='"us.census.acs".female_pop', type='numeric'),
}
for numerator_col in ('male_pop', 'female_pop', ):
datacol = datacols[numerator_col]
datacol.targets[datacols['total_pop']] = 'denominator'
session.add(datacol)
tract_geoid = OBSColumn(id='"us.census.acs".tract_2013_geoid', type='text')
puma_geoid = OBSColumn(id='"us.census.acs".puma_2013_geoid', type='text')
tract_geom = OBSColumn(id='"us.census.tiger".tract', type='geometry')
tables = {
'tract': OBSTable(id='"us.census.acs".extract_2013_5yr_tract',
tablename='us_census_acs2013_5yr_tract'),
'puma': OBSTable(id='"us.census.acs".extract_2013_5yr_puma',
tablename='us_census_acs2013_5yr_puma')
}
geom_table = OBSTable(id='"us.census.acs".tract_geoms',
tablename='tract_geoms')
session.add(OBSColumnTable(table=tables['tract'],
column=tract_geoid,
colname='geoid'))
session.add(OBSColumnTable(table=tables['puma'],
column=puma_geoid,
colname='geoid'))
session.add(OBSColumnTable(table=geom_table,
column=tract_geoid,
colname='geoid'))
geom_coltable = OBSColumnTable(table=geom_table,
column=tract_geom,
colname='the_geom')
session.add(geom_coltable)
session.add(OBSColumnTableTile(column_id=geom_coltable.column.id,
table_id=geom_coltable.table.id,
tile_id=1,
tile=EMPTY_RASTER
))
for colname, datacol in datacols.items():
if colname.endswith('pop'):
datacol.tags.append(TagTarget(population_tag, FakeTask()))
datacol.tags.append(TagTarget(source_tag, FakeTask()))
for table in list(tables.values()):
coltable = OBSColumnTable(column=datacol,
table=table,
colname=colname)
session.add(coltable)
session.add(datacol)
for table in list(tables.values()):
session.add(table)
@with_setup(setup, teardown)
def test_columns_in_tables():
'''
Tables can refer to columns.
'''
populate()
with session_scope() as session:
table = session.query(OBSTable).get('"us.census.acs".extract_2013_5yr_puma')
assert_equals(5, len(table.columns))
@with_setup(setup, teardown)
def test_tables_in_columns():
'''
Columns can refer to tables.
'''
populate()
with session_scope() as session:
column = session.query(OBSColumn).get('"us.census.acs".median_rent')
assert_equals(2, len(column.tables))
@with_setup(setup, teardown)
def test_tags_in_columns():
'''
Columns can refer to tags.
'''
populate()
with session_scope() as session:
column = session.query(OBSColumn).get('"us.census.acs".total_pop')
assert_equals(['US Census', 'population'], sorted([tag.name for tag in column.tags]))
@with_setup(setup, teardown)
def test_columns_in_tags():
'''
Tags can refer to columns.
'''
populate()
with session_scope() as session:
tag = session.query(OBSTag).get('population')
tag2 = session.query(OBSTag).get('us_census')
assert_equals(3, len(tag.columns))
assert_equals(3, len(tag2.columns))
assert_equals(tag.type, 'catalog')
assert_equals(tag2.type, 'source')
@with_setup(setup, teardown)
def test_column_to_column_target():
'''
Columns can refer to other columns as a target.
'''
populate()
with session_scope() as session:
column = session.query(OBSColumn).get('"us.census.acs".female_pop')
assert_equals(0, len(column.sources))
assert_equals(1, len(column.targets))
target, reltype = list(column.targets.items())[0]
assert_equals(target.id, '"us.census.acs".total_pop')
assert_equals(reltype, 'denominator')
@with_setup(setup, teardown)
def test_delete_column_deletes_relevant_related_objects():
populate()
with session_scope() as session:
assert_equals(session.query(OBSColumn).count(), 7)
assert_equals(session.query(OBSTable).count(), 3)
assert_equals(session.query(OBSColumnTable).count(), 12)
session.delete(session.query(OBSColumn).get('"us.census.acs".median_rent'))
assert_equals(session.query(OBSColumn).count(), 6)
assert_equals(session.query(OBSTable).count(), 3)
assert_equals(session.query(OBSColumnTable).count(), 10)
@with_setup(setup, teardown)
def test_delete_table_deletes_relevant_related_objects():
populate()
with session_scope() as session:
assert_equals(session.query(OBSColumn).count(), 7)
assert_equals(session.query(OBSTable).count(), 3)
assert_equals(session.query(OBSColumnTable).count(), 12)
session.delete(session.query(OBSTable).get('"us.census.acs".extract_2013_5yr_tract'))
assert_equals(session.query(OBSColumn).count(), 7)
assert_equals(session.query(OBSTable).count(), 2)
assert_equals(session.query(OBSColumnTable).count(), 7)
@with_setup(setup, teardown)
def test_delete_tag_deletes_relevant_related_objects():
populate()
with session_scope() as session:
assert_equals(session.query(OBSColumn).count(), 7)
assert_equals(session.query(OBSColumnTag).count(), 6)
assert_equals(session.query(OBSTag).count(), 2)
session.delete(session.query(OBSTag).get('population'))
assert_equals(session.query(OBSColumn).count(), 7)
assert_equals(session.query(OBSColumnTag).count(), 3)
assert_equals(session.query(OBSTag).count(), 1)
@with_setup(setup, teardown)
def test_delete_columntable_removes_tiles():
populate()
with session_scope() as session:
assert_equals(session.query(OBSColumnTableTile).count(), 1)
session.delete(session.query(OBSColumnTableTile).get(
('"us.census.acs".tract_geoms', '"us.census.tiger".tract', 1, )))
assert_equals(session.query(OBSColumnTableTile).count(), 0)
| {
"content_hash": "39d3c3dc3047d49f5c1941b755b2a15d",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 93,
"avg_line_length": 40.84699453551912,
"alnum_prop": 0.5961204013377926,
"repo_name": "CartoDB/bigmetadata",
"id": "ba366b55352a973dfa75816d344c6b5e4b9de4f0",
"size": "7475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_meta.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "143"
},
{
"name": "Dockerfile",
"bytes": "2305"
},
{
"name": "HTML",
"bytes": "19058"
},
{
"name": "JavaScript",
"bytes": "5864"
},
{
"name": "Makefile",
"bytes": "27552"
},
{
"name": "PLpgSQL",
"bytes": "32699"
},
{
"name": "Python",
"bytes": "2967442"
},
{
"name": "Shell",
"bytes": "11590"
}
],
"symlink_target": ""
} |
"""Standard text cleaning for pandas, used by many other functions, for more granularity use the composite
functions separately"""
from usherwood_ds.nlp.processing.stopwords import stopword_removal
from usherwood_ds.nlp.preprocessing.cleaning import clean_text
from usherwood_ds.nlp.preprocessing.stemming import Stemmer
from usherwood_ds.nlp.preprocessing.social_feature_extraction import extract_hashtags, \
extract_mentioned_users, extract_urls
__author__ = "Peter J Usherwood"
__python_version__ = "3.5"
def preprocess_df(data,
text_field_key ='Snippet',
language='english',
additional_list=[],
adhoc_stopwords=[],
remove_hashtag_words=False,
remove_mentioned_authors=True,
remove_urls=True,
stopped_not_stemmed=False,
pos_tuples=False):
"""
Basic wrapper for cleaning text data in a pandas dataframe column
:param data: Pandas dataframe
:param text_field_key: The field name of the text to be cleaned
:param language: Primary language (see stopwords/stemming)
:param additional_list: List of additional pre set stopwords (see stopwords)
:param adhoc_stopwords: List of adhoc stopwords (see stopwords)
:param remove_hashtag_words: Bool, remove the words that appear as hashtags and replace with token
:param remove_mentioned_authors: Bool, remove the at mentioned authors and replace with token
:param remove_urls: Bool, remove urls and replace with token
:param stopped_not_stemmed: Return a field of cleaned and stopword removed text, useful for the categorizer
:param pos_tuples: Bool, if tokens are a list of pos_tuples set this to true
:return: data with additional text/pos_tuple columns showing the cleaning process
"""
stemmer = Stemmer(language=language)
if not pos_tuples:
data['Cleaned'] = data.ix[:, text_field_key]
print('Loaded')
data['Hashtags'] = data.ix[:, 'Cleaned'].apply(
lambda e: extract_hashtags(text_string=e,
remove_hashtags=False,
replace_with_token=False)[1])
data['Cleaned'] = data.ix[:, 'Cleaned'].apply(
lambda e: extract_hashtags(text_string=e,
remove_hashtags=False,
replace_with_token=remove_hashtag_words)[0])
data['At Mentions'] = data.ix[:, 'Cleaned'].apply(
lambda e: extract_mentioned_users(text_string=e,
remove_users=False,
replace_with_token=False)[1])
data['Cleaned'] = data.ix[:, 'Cleaned'].apply(
lambda e: extract_mentioned_users(text_string=e,
remove_users=False,
replace_with_token=remove_mentioned_authors)[0])
data['Extracted URLs'] = data.ix[:, 'Cleaned'].apply(
lambda e: extract_urls(text_string=e,
remove_urls=False,
replace_with_token=False)[1])
data['Cleaned'] = data.ix[:, 'Cleaned'].apply(
lambda e: extract_urls(text_string=e,
remove_urls=False,
replace_with_token=remove_urls)[0])
print('Removed social features. Hashtags:', str(remove_hashtag_words),
'At Mentions:', str(remove_mentioned_authors),
'URLs:', str(remove_urls))
data['Cleaned'] = data.ix[:, 'Cleaned'].apply(lambda e: clean_text(text_string=e))
print('Cleaned Text')
data['Stemmed'] = data.ix[:, 'Cleaned'].apply(lambda e: stemmer.stem_text(text_string=e))
print('Stemmed Text')
data['Preprocessed'] = data.ix[:, 'Stemmed'].apply(lambda e: stopword_removal(text_string=e,
language=language,
additional_language_list=
additional_list,
adhoc_list=adhoc_stopwords))
print('Removed Stopwords')
if stopped_not_stemmed:
data['Stopped'] = data.ix[:, 'Cleaned'].apply(lambda e: stopword_removal(text_string=e,
language=language,
additional_language_list=
additional_list,
adhoc_list=adhoc_stopwords))
print('Stopped not Stemmed')
else:
print('Loaded')
data['Cleaned'] = data.ix[:, text_field_key].apply(lambda e: clean_text(tokens=e, pos_tuples=True))
print('Cleaned Text')
data['Stemmed'] = data.ix[:, 'Cleaned'].apply(lambda e: stemmer.stem_text(tokens=e,
pos_tuples=True))
print('Stemmed Text')
data['Preprocessed'] = data.ix[:, 'Stemmed'].apply(lambda e: stopword_removal(tokens=e,
pos_tuples=True,
language=language,
additional_language_list=
additional_list,
adhoc_list=adhoc_stopwords))
print('Removed Stopwords')
if stopped_not_stemmed:
data['Stopped'] = data.ix[:, 'Cleaned'].apply(lambda e: stopword_removal(tokens=e,
pos_tuples=True,
language=language,
additional_language_list=
additional_list,
adhoc_list=adhoc_stopwords))
return data
def preprocess_string(text_string=None,
tokens=None,
pos_tuples=False,
language='english',
additional_list = [],
adhoc_stopwords = []):
"""
Function that carries out all standard preprocessing on a tring or list of tokens (normal or pos)
:param text_string: text string to be preprocessed (only give one of this and tokens)
:param tokens: list of tokens (normal or pos) to be preprocessed (only give one of this and text_string)
:param language: Primary language (see stopwords/stemming)
:param additional_list: List of additional pre set stopwords (see stopwords)
:param adhoc_stopwords: List of adhoc stopwords (see stopwords)
:return: preprocessed text in either string or list depending on (and matching) input
"""
stemmer = Stemmer(language=language)
if text_string:
text = clean_text(text_string=text_string)
text = stemmer.stem_text(text_string=text, language=language)
text = stopword_removal(text_string=text,
language=language,
additional_list=additional_list,
adhoc_stopwords=adhoc_stopwords)
preped = text
else:
tokens = clean_text(tokens=tokens, pos_tuples=pos_tuples)
tokens = stemmer.stem_text(tokens=tokens, pos_tuples=pos_tuples, language=language)
tokens = stopword_removal(tokens=tokens,
pos_tuples=pos_tuples,
language=language,
additional_list=additional_list,
adhoc_stopwords=adhoc_stopwords)
preped = tokens
return preped
| {
"content_hash": "62af4e0672e4f4ec77c0f3184e21b15f",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 114,
"avg_line_length": 51.89820359281437,
"alnum_prop": 0.4878273912541825,
"repo_name": "Usherwood/usherwood_ds",
"id": "295878c30e234f202818f0b0d7b23efac130308f",
"size": "8690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "usherwood_ds/nlp/preprocessing/preprocess.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "76416"
},
{
"name": "Python",
"bytes": "245786"
}
],
"symlink_target": ""
} |
import unittest
from wikipedia_searcher.wikipedia_searcher import WikipediaSearcher
class TestWikipediaSearcher(unittest.TestCase):
def setUp(self):
self.searcher = WikipediaSearcher()
def test_full_text_english(self):
result = self.searcher.simple_entry_search('kusareru', action='full')
self.assertEqual(len(result), 1)
self.assertEqual(result[0][0], u'http://en.wikipedia.org/wiki/List_of_jōyō_kanji')
def test_full_text_unicode(self):
result = self.searcher.simple_entry_search(u'アスクライブ', action='full', language='ja')
self.assertEqual(len(result), 3)
def test_full_text_unicode_query_continue(self):
result = self.searcher.simple_entry_search(u'生きていたい', action='full', language='ja')
self.assertEqual(len(result), 19)
def test_exact(self):
result = self.searcher.simple_entry_search(u'Toyotomi Hideyoshi',language='en')
self.assertEqual(len(result), 1)
self.assertEqual(result[0][0], u'<http://en.wikipedia.org/wiki/Toyotomi_Hideyoshi>')
self.assertEqual(result[0][1], u'"Toyotomi Hideyoshi"@en')
self.assertEqual(result[0][3], u'')
def test_exact_redirect(self):
result = self.searcher.simple_entry_search(u'Masuko',language='en')
self.assertEqual(len(result), 1)
self.assertEqual(result[0][0], u'<http://en.wikipedia.org/wiki/Masuko>')
self.assertEqual(result[0][1], u'"Masuko"@en')
self.assertEqual(result[0][2], u'')
self.assertEqual(result[0][3], u'"Aitarō Masuko"@en')
def test_exact_unicode(self):
result = self.searcher.simple_entry_search(u'明治神宮', language='ja', action='exact')
self.assertEqual(len(result), 1)
self.assertEqual(result[0][0], u'<http://ja.wikipedia.org/wiki/明治神宮>')
self.assertEqual(result[0][1], u'"明治神宮"@ja')
self.assertEqual(result[0][3], u'')
def test_forward(self):
result = self.searcher.simple_entry_search(u'Hideyoshi', language='en', action='forward')
self.assertEqual(len(result), 14)
def test_forward_unicode(self):
result = self.searcher.simple_entry_search(u'プログラミング', language='ja', action='forward')
self.assertGreater(len(result), 1)
def test_invalid_action_name(self):
args = {
'word':u'Hideyoshi',
'language':'en',
'action':'invalid_action'
}
self.assertRaises(ValueError, self.searcher.simple_entry_search, **args)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "3afa1e8c9ff8e7a2ce6d0da51764adb3",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 97,
"avg_line_length": 42.45,
"alnum_prop": 0.6450726344719278,
"repo_name": "yustoris/wikipedia_searcher",
"id": "82e23247c245c9f80951ba5a675c0b67d5fab4d9",
"size": "2636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_wikipedia_searcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9854"
}
],
"symlink_target": ""
} |
"""
Write a program to check whether a given number is an ugly number.
Ugly numbers are positive numbers whose prime factors only include 2, 3, 5. For example, 6, 8 are ugly while 14 is not ugly since it includes another prime factor 7.
Note that 1 is typically treated as an ugly number.
"""
class Solution(object):
def isUgly(self, num):
"""
:type num: int
:rtype: bool
"""
if num < = 5:
return True
if num <= 0:
return False
temp = num
while temp %2 == 0:
temp = temp/2
while temp%3 == 0:
temp = temp/3
while temp%5 == 0:
temp = temp/5
return not temp > 1
| {
"content_hash": "923bb8b4bdcbe2e503c7392ee1aaf96f",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 165,
"avg_line_length": 25.655172413793103,
"alnum_prop": 0.5255376344086021,
"repo_name": "yingcuhk/LeetCode",
"id": "80941aea8356e48d9380606fc82581eedcbcfdac",
"size": "744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Algorithms/#263 Ugly Number/PythonCode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126729"
}
],
"symlink_target": ""
} |
"""Generates a sequence of operations in Reverse Polish Notation.
Runs the verilog through yosys and nexpnr to get the delay estimate.
"""
import argparse
import csv
import enum
import multiprocessing as mp
import os
import random
import re
import subprocess
import tempfile
import time
from typing import Tuple, Type
import numpy as np
NUMOPS = 8
MAXLEN = 8
NUM_PROCESSES = 16
class Op(enum.Enum):
NOP = 0
PARAM1 = 1
PARAM2 = 2
ADD2 = 3
NEG = 4
XOR2 = 5
MUL2 = 6
AND2 = 7
OR2 = 8
consumed_stack_slots = {
Op.NOP: 0,
Op.PARAM1: 0,
Op.PARAM2: 0,
Op.ADD2: 2,
Op.NEG: 1,
Op.XOR2: 2,
Op.MUL2: 2,
Op.AND2: 2,
Op.OR2: 2
}
to_str = {
Op.PARAM1: 'x1',
Op.PARAM2: 'x2',
Op.ADD2: '+',
Op.NEG: '~',
Op.XOR2: '^',
Op.MUL2: '*',
Op.AND2: '&',
Op.OR2: '|'
}
VERILOG_TEMPLATE = """
module my_module(
input wire [3:0] x1,
input wire [3:0] x2,
output [3:0] out
);
assign out = {};
endmodule
"""
binary_ops = [Op.ADD2, Op.XOR2, Op.MUL2, Op.AND2, Op.OR2]
def gen() -> Tuple[Type[Op], Type[str]]:
"""Returns random sequence of ops in RPN & corresponding Verilog."""
oplist = list(Op)
while True:
ops = [Op.PARAM1, Op.PARAM2]
stack = ['x1', 'x2']
# First pick a length limit, using exponential dropoff probability
length = np.random.choice(np.arange(1, MAXLEN+1),
p=[(1 << i) / ((1 << MAXLEN) - 1) \
for i in range(MAXLEN)])
while len(ops) < length:
op = random.choice([op for op in oplist[3:]])
# Append random operands
while consumed_stack_slots[op] > len(stack):
new_op = random.choice([op for op in oplist[1:3]])
stack.append(to_str[new_op])
ops.append(new_op)
ops.append(op)
if op in binary_ops:
arg1 = stack.pop()
arg2 = stack.pop()
stack.append('({}{}{})'.format(arg1, to_str[op], arg2))
elif op == Op.NEG:
arg = stack.pop()
stack.append('~{}'.format(arg))
# Only use if all operands are used up.
if len(stack) == 1:
while len(ops) < MAXLEN:
ops.append(Op.NOP)
if len(ops) > MAXLEN:
continue
return ops, stack[0]
def parse_log(filename: str) -> str:
"""Extracts and returns the delay from the given log file path."""
with open(filename, 'r') as log_file:
info = log_file.read()
delay_statement = 'Max delay <async> -> <async>: '
locs = [m.start() for m in re.finditer(delay_statement, info)]
# If there is no delay statement, we know it has been optimized
# to a constant.
if not locs:
return '0.0'
# Want to extract the final delay
idx = locs[-1] + len(delay_statement)
end = idx
while info[end] != ' ':
end += 1
return info[idx:end]
def yosys_and_nextpnr(expr: str) -> float:
"""Runs Yosys and nextpnr tools to get delay estimate."""
with tempfile.TemporaryDirectory() as tempdir:
with open('{}/sample.v'.format(tempdir), 'w+') as verilog_file, \
open('{}/sample.json'.format(tempdir), 'w+') as json_file, \
open('{}/sample.log'.format(tempdir), 'w+') as log_file:
verilog_file.write(VERILOG_TEMPLATE.format(expr))
subprocess.run([
'/usr/local/google/home/brjiang/Documents/yosys/yosys', '-p',
'read_verilog {}; synth_ecp5 -top my_module -json {}'.format(
verilog_file.name, json_file.name)
],
stdout=subprocess.DEVNULL,
check=True)
subprocess.run([
'nextpnr-ecp5', '--json', json_file.name, '--package', 'CABGA381',
'--log', log_file.name
],
stderr=subprocess.DEVNULL,
check=True)
delay = parse_log(log_file.name)
return delay
def gen_csv(num_samples: int, name: str):
with open(name, 'w+') as f:
writer = csv.writer(f, delimiter=',')
for _ in range(num_samples):
ops, expr = gen()
codevec = [str(op.value) for op in ops]
codevec.append(str(yosys_and_nextpnr(expr)))
writer.writerow(codevec)
def main(num_samples):
start = time.time()
processes = []
for i in range(NUM_PROCESSES):
processes.append(
mp.Process(
target=gen_csv,
args=[
num_samples // NUM_PROCESSES,
'./data/data_{}_{}_{}.csv'.format(NUMOPS, MAXLEN, i)
]))
processes[i].start()
for i in range(NUM_PROCESSES):
processes[i].join()
end = time.time()
print('Time elapsed: {} s'.format(end - start))
# Append individual files back together
with open('./data/data_{}_{}.csv'.format(NUMOPS, MAXLEN), 'w+') as outfile:
for i in range(NUM_PROCESSES):
filename = './data/data_{}_{}_{}.csv'.format(NUMOPS, MAXLEN, i)
with open(filename, 'r') as f:
for line in f:
outfile.write(line)
os.remove(filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('num_samples', type=int)
args = parser.parse_args()
main(args.num_samples)
| {
"content_hash": "8b7a6e05ef72e7d505d365e2b4e74274",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 77,
"avg_line_length": 26.941798941798943,
"alnum_prop": 0.5760015710919089,
"repo_name": "google/xls",
"id": "bd8eea84333db416cbb40d36bfd8bf08d4f31cf4",
"size": "5674",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "xls/experimental/ml_delay_model/expr_generator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9907"
},
{
"name": "C++",
"bytes": "11361427"
},
{
"name": "CSS",
"bytes": "1953"
},
{
"name": "JavaScript",
"bytes": "59480"
},
{
"name": "NASL",
"bytes": "9203"
},
{
"name": "Pawn",
"bytes": "28321"
},
{
"name": "Python",
"bytes": "697569"
},
{
"name": "Rust",
"bytes": "1839912"
},
{
"name": "Shell",
"bytes": "13738"
},
{
"name": "Standard ML",
"bytes": "590"
},
{
"name": "Starlark",
"bytes": "773086"
},
{
"name": "Verilog",
"bytes": "42214"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('benchmarks', '0009_auto_20150724_1438'),
]
operations = [
migrations.RemoveField(
model_name='benchmark',
name='name',
),
migrations.AlterField(
model_name='benchmark',
name='benchmark_type',
field=models.SmallIntegerField(db_index=True),
),
]
| {
"content_hash": "8a45e26e7c7807b9291c887e93094456",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 58,
"avg_line_length": 22.90909090909091,
"alnum_prop": 0.5773809523809523,
"repo_name": "jayfk/cloudbench.io",
"id": "b07cb303442a654cb54be2185c9e423ca3f1fa6e",
"size": "528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudbench/benchmarks/migrations/0010_auto_20150724_1505.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "202803"
},
{
"name": "HTML",
"bytes": "25974"
},
{
"name": "JavaScript",
"bytes": "2394"
},
{
"name": "Nginx",
"bytes": "948"
},
{
"name": "Python",
"bytes": "125168"
},
{
"name": "Shell",
"bytes": "248"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('crowdsourcing', '0119_auto_20160714_0423'),
]
operations = [
migrations.RemoveField(
model_name='rating',
name='project',
),
migrations.AddField(
model_name='rating',
name='task',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='crowdsourcing.Task'),
),
]
| {
"content_hash": "a1838cf198a59f4018f9cd60a03c7080",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 117,
"avg_line_length": 25.391304347826086,
"alnum_prop": 0.6061643835616438,
"repo_name": "shirishgoyal/crowdsource-platform",
"id": "c51d165ae32a05100365ef83d1a5e384bd1d62a7",
"size": "654",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop2",
"path": "crowdsourcing/migrations/0120_auto_20160719_1953.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "63075"
},
{
"name": "HTML",
"bytes": "229504"
},
{
"name": "JavaScript",
"bytes": "312581"
},
{
"name": "Python",
"bytes": "748797"
},
{
"name": "Shell",
"bytes": "838"
}
],
"symlink_target": ""
} |
import unittest, os, json
from subprocess import PIPE, Popen, STDOUT
from system_test import TestCase, Qdrouterd, main_module, DIR, TIMEOUT, Process
from qpid_dispatch.management.client import Node
from proton import SASL
class RouterTestPlainSaslCommon(TestCase):
@classmethod
def router(cls, name, connection):
config = Qdrouterd.Config(connection)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=False))
@classmethod
def createSaslFiles(cls):
# Create a sasl database.
p = Popen(['saslpasswd2', '-c', '-p', '-f', 'qdrouterd.sasldb', '-u', 'domain.com', 'test'],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
result = p.communicate('password')
assert p.returncode == 0, \
"saslpasswd2 exit status %s, output:\n%s" % (p.returncode, result)
# Create a SASL configuration file.
with open('tests-mech-PLAIN.conf', 'w') as sasl_conf:
sasl_conf.write("""
pwcheck_method: auxprop
auxprop_plugin: sasldb
sasldb_path: qdrouterd.sasldb
mech_list: ANONYMOUS DIGEST-MD5 EXTERNAL PLAIN
# The following line stops spurious 'sql_select option missing' errors when cyrus-sql-sasl plugin is installed
sql_select: dummy select
""")
class RouterTestPlainSasl(RouterTestPlainSaslCommon):
@classmethod
def setUpClass(cls):
"""
Tests the sasl_username, sasl_password property of the dispatch router.
Creates two routers (QDR.X and QDR.Y) and sets up PLAIN authentication on QDR.X.
QDR.Y connects to QDR.X by providing a sasl_username and a sasl_password.
"""
super(RouterTestPlainSasl, cls).setUpClass()
if not SASL.extended():
return
super(RouterTestPlainSasl, cls).createSaslFiles()
cls.routers = []
x_listener_port = cls.tester.get_port()
y_listener_port = cls.tester.get_port()
super(RouterTestPlainSasl, cls).router('X', [
('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port,
'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}),
# This unauthenticated listener is for qdstat to connect to it.
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(),
'authenticatePeer': 'no'}),
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(),
'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}),
('router', {'workerThreads': 1,
'id': 'QDR.X',
'mode': 'interior',
'saslConfigName': 'tests-mech-PLAIN',
'saslConfigPath': os.getcwd()}),
])
super(RouterTestPlainSasl, cls).router('Y', [
('connector', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port,
# Provide a sasl user name and password to connect to QDR.X
'saslMechanisms': 'PLAIN',
'saslUsername': 'test@domain.com',
'saslPassword': 'password'}),
('router', {'workerThreads': 1,
'mode': 'interior',
'id': 'QDR.Y'}),
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': y_listener_port}),
])
cls.routers[1].wait_router_connected('QDR.X')
def test_inter_router_plain_exists(self):
"""
Check authentication of inter-router link is PLAIN.
This test makes executes a qdstat -c via an unauthenticated listener to
QDR.X and makes sure that the output has an "inter-router" connection to
QDR.Y whose authentication is PLAIN. This ensures that QDR.Y did not
somehow use SASL ANONYMOUS to connect to QDR.X
"""
if not SASL.extended():
self.skipTest("Cyrus library not available. skipping test")
p = self.popen(
['qdstat', '-b', str(self.routers[0].addresses[1]), '-c'],
name='qdstat-'+self.id(), stdout=PIPE, expect=None)
out = p.communicate()[0]
assert p.returncode == 0, \
"qdstat exit status %s, output:\n%s" % (p.returncode, out)
self.assertIn("inter-router", out)
self.assertIn("test@domain.com(PLAIN)", out)
def test_qdstat_connect_sasl(self):
"""
Make qdstat use sasl plain authentication.
"""
if not SASL.extended():
self.skipTest("Cyrus library not available. skipping test")
p = self.popen(
['qdstat', '-b', str(self.routers[0].addresses[2]), '-c', '--sasl-mechanisms=PLAIN',
'--sasl-username=test@domain.com', '--sasl-password=password'],
name='qdstat-'+self.id(), stdout=PIPE, expect=None)
out = p.communicate()[0]
assert p.returncode == 0, \
"qdstat exit status %s, output:\n%s" % (p.returncode, out)
split_list = out.split()
# There will be 2 connections that have authenticated using SASL PLAIN. One inter-router connection
# and the other connection that this qdstat client is making
self.assertEqual(2, split_list.count("test@domain.com(PLAIN)"))
self.assertEqual(1, split_list.count("inter-router"))
self.assertEqual(1, split_list.count("normal"))
def test_qdstat_connect_sasl_password_file(self):
"""
Make qdstat use sasl plain authentication with client password specified in a file.
"""
if not SASL.extended():
self.skipTest("Cyrus library not available. skipping test")
password_file = os.getcwd() + '/sasl-client-password-file.txt'
# Create a SASL configuration file.
with open(password_file, 'w') as sasl_client_password_file:
sasl_client_password_file.write("password")
sasl_client_password_file.close()
p = self.popen(
['qdstat', '-b', str(self.routers[0].addresses[2]), '-c', '--sasl-mechanisms=PLAIN',
'--sasl-username=test@domain.com', '--sasl-password-file=' + password_file],
name='qdstat-'+self.id(), stdout=PIPE, expect=None)
out = p.communicate()[0]
assert p.returncode == 0, \
"qdstat exit status %s, output:\n%s" % (p.returncode, out)
split_list = out.split()
# There will be 2 connections that have authenticated using SASL PLAIN. One inter-router connection
# and the other connection that this qdstat client is making
self.assertEqual(2, split_list.count("test@domain.com(PLAIN)"))
self.assertEqual(1, split_list.count("inter-router"))
self.assertEqual(1, split_list.count("normal"))
class RouterTestPlainSaslOverSsl(RouterTestPlainSaslCommon):
@staticmethod
def ssl_file(name):
return os.path.join(DIR, 'ssl_certs', name)
@classmethod
def setUpClass(cls):
"""
Tests the sasl_username, sasl_password property of the dispatch router.
Creates two routers (QDR.X and QDR.Y) and sets up PLAIN authentication on QDR.X.
QDR.Y connects to QDR.X by providing a sasl_username and a sasl_password.
This PLAIN authentication is done over a TLS connection.
"""
super(RouterTestPlainSaslOverSsl, cls).setUpClass()
if not SASL.extended():
return
super(RouterTestPlainSaslOverSsl, cls).createSaslFiles()
cls.routers = []
x_listener_port = cls.tester.get_port()
y_listener_port = cls.tester.get_port()
super(RouterTestPlainSaslOverSsl, cls).router('X', [
('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port,
'sslProfile':'server-ssl-profile',
'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}),
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(),
'authenticatePeer': 'no'}),
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(),
'sslProfile':'server-ssl-profile',
'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}),
('sslProfile', {'name': 'server-ssl-profile',
'certDb': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'keyFile': cls.ssl_file('server-private-key.pem'),
'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS',
'password': 'server-password'}),
('router', {'workerThreads': 1,
'id': 'QDR.X',
'mode': 'interior',
'saslConfigName': 'tests-mech-PLAIN',
'saslConfigPath': os.getcwd()}),
])
super(RouterTestPlainSaslOverSsl, cls).router('Y', [
# This router will act like a client. First an SSL connection will be established and then
# we will have SASL plain authentication over SSL.
('connector', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port,
'sslProfile': 'client-ssl-profile',
'verifyHostName': 'no',
# Provide a sasl user name and password to connect to QDR.X
'saslMechanisms': 'PLAIN',
'saslUsername': 'test@domain.com',
'saslPassword': 'password'}),
('router', {'workerThreads': 1,
'mode': 'interior',
'id': 'QDR.Y'}),
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': y_listener_port}),
('sslProfile', {'name': 'client-ssl-profile',
'certDb': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('client-certificate.pem'),
'keyFile': cls.ssl_file('client-private-key.pem'),
'password': 'client-password'}),
])
cls.routers[1].wait_router_connected('QDR.X')
def test_aaa_qdstat_connect_sasl_over_ssl(self):
"""
Make qdstat use sasl plain authentication over ssl.
"""
if not SASL.extended():
self.skipTest("Cyrus library not available. skipping test")
p = self.popen(
['qdstat', '-b', str(self.routers[0].addresses[2]), '-c',
# The following are SASL args
'--sasl-mechanisms=PLAIN',
'--sasl-username=test@domain.com',
'--sasl-password=password',
# The following are SSL args
'--ssl-disable-peer-name-verify',
'--ssl-trustfile=' + self.ssl_file('ca-certificate.pem'),
'--ssl-certificate=' + self.ssl_file('client-certificate.pem'),
'--ssl-key=' + self.ssl_file('client-private-key.pem'),
'--ssl-password=client-password'],
name='qdstat-'+self.id(), stdout=PIPE, expect=None)
out = p.communicate()[0]
assert p.returncode == 0, \
"qdstat exit status %s, output:\n%s" % (p.returncode, out)
split_list = out.split()
# There will be 2 connections that have authenticated using SASL PLAIN. One inter-router connection
# and the other connection that this qdstat client is making
self.assertEqual(2, split_list.count("test@domain.com(PLAIN)"))
self.assertEqual(1, split_list.count("inter-router"))
self.assertEqual(1, split_list.count("normal"))
def test_inter_router_plain_over_ssl_exists(self):
"""The setUpClass sets up two routers with SASL PLAIN enabled over TLS.
This test makes executes a query for type='org.apache.qpid.dispatch.connection' over
an unauthenticated listener to
QDR.X and makes sure that the output has an "inter-router" connection to
QDR.Y whose authentication is PLAIN. This ensures that QDR.Y did not
somehow use SASL ANONYMOUS to connect to QDR.X
Also makes sure that TLSv1.x was used as sslProto
"""
if not SASL.extended():
self.skipTest("Cyrus library not available. skipping test")
local_node = Node.connect(self.routers[0].addresses[1], timeout=TIMEOUT)
results = local_node.query(type='org.apache.qpid.dispatch.connection').results
# sslProto should be TLSv1.x
self.assertTrue(u'TLSv1' in results[0][10])
# role should be inter-router
self.assertEqual(u'inter-router', results[0][3])
# sasl must be plain
self.assertEqual(u'PLAIN', results[0][6])
# user must be test@domain.com
self.assertEqual(u'test@domain.com', results[0][8])
class RouterTestVerifyHostNameYes(RouterTestPlainSaslCommon):
@staticmethod
def ssl_file(name):
return os.path.join(DIR, 'ssl_certs', name)
@classmethod
def setUpClass(cls):
"""
Tests the verifyHostName property of the connector. The hostname on the server certificate we use is
A1.Good.Server.domain.com and the host is 0.0.0.0 on the client router initiating the SSL connection.
Since the host names do not match and the verifyHostName is set to true, the client router
will NOT be able make a successful SSL connection the server router.
"""
super(RouterTestVerifyHostNameYes, cls).setUpClass()
if not SASL.extended():
return
super(RouterTestVerifyHostNameYes, cls).createSaslFiles()
cls.routers = []
x_listener_port = cls.tester.get_port()
y_listener_port = cls.tester.get_port()
super(RouterTestVerifyHostNameYes, cls).router('X', [
('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port,
'sslProfile':'server-ssl-profile',
'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}),
# This unauthenticated listener is for qdstat to connect to it.
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(),
'authenticatePeer': 'no'}),
('sslProfile', {'name': 'server-ssl-profile',
'certDb': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'keyFile': cls.ssl_file('server-private-key.pem'),
'password': 'server-password'}),
('router', {'workerThreads': 1,
'id': 'QDR.X',
'mode': 'interior',
'saslConfigName': 'tests-mech-PLAIN',
'saslConfigPath': os.getcwd()}),
])
super(RouterTestVerifyHostNameYes, cls).router('Y', [
('connector', {'host': '127.0.0.1', 'role': 'inter-router', 'port': x_listener_port,
'sslProfile': 'client-ssl-profile',
'verifyHostName': 'yes',
'saslMechanisms': 'PLAIN',
'saslUsername': 'test@domain.com',
'saslPassword': 'password'}),
('router', {'workerThreads': 1,
'mode': 'interior',
'id': 'QDR.Y'}),
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': y_listener_port}),
('sslProfile', {'name': 'client-ssl-profile',
'certDb': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('client-certificate.pem'),
'keyFile': cls.ssl_file('client-private-key.pem'),
'password': 'client-password'}),
])
cls.routers[0].wait_ports()
cls.routers[1].wait_ports()
try:
# This will time out because there is no inter-router connection
cls.routers[1].wait_connectors(timeout=3)
except:
pass
def test_no_inter_router_connection(self):
"""
Tests to make sure that there are no 'inter-router' connections.
The connection to the other router will not happen because the connection failed
due to setting 'verifyHostName': 'yes'
"""
if not SASL.extended():
self.skipTest("Cyrus library not available. skipping test")
local_node = Node.connect(self.routers[1].addresses[0], timeout=TIMEOUT)
results = local_node.query(type='org.apache.qpid.dispatch.connection').results
# There should be only two connections.
# There will be no inter-router connection
self.assertEqual(2, len(results))
self.assertEqual('in', results[0][4])
self.assertEqual('normal', results[0][3])
self.assertEqual('anonymous', results[0][8])
self.assertEqual('normal', results[1][3])
self.assertEqual('anonymous', results[1][8])
class RouterTestVerifyHostNameNo(RouterTestPlainSaslCommon):
@staticmethod
def ssl_file(name):
return os.path.join(DIR, 'ssl_certs', name)
x_listener_port = None
@classmethod
def setUpClass(cls):
"""
Tests the verifyHostName property of the connector. The hostname on the server certificate we use is
A1.Good.Server.domain.com and the host is 0.0.0.0 on the client router initiating the SSL connection.
Since the host names do not match but verifyHostName is set to false, the client router
will be successfully able to make an SSL connection the server router.
"""
super(RouterTestVerifyHostNameNo, cls).setUpClass()
if not SASL.extended():
return
super(RouterTestVerifyHostNameNo, cls).createSaslFiles()
cls.routers = []
x_listener_port = cls.tester.get_port()
RouterTestVerifyHostNameNo.x_listener_port = x_listener_port
y_listener_port = cls.tester.get_port()
super(RouterTestVerifyHostNameNo, cls).router('X', [
('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port,
'sslProfile':'server-ssl-profile',
'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}),
# This unauthenticated listener is for qdstat to connect to it.
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(),
'authenticatePeer': 'no'}),
('sslProfile', {'name': 'server-ssl-profile',
'certDb': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'keyFile': cls.ssl_file('server-private-key.pem'),
'password': 'server-password'}),
('router', {'workerThreads': 1,
'id': 'QDR.X',
'mode': 'interior',
'saslConfigName': 'tests-mech-PLAIN',
'saslConfigPath': os.getcwd()}),
])
super(RouterTestVerifyHostNameNo, cls).router('Y', [
# This router will act like a client. First an SSL connection will be established and then
# we will have SASL plain authentication over SSL.
('connector', {'name': 'connectorToX',
'host': '127.0.0.1', 'role': 'inter-router',
'port': x_listener_port,
'sslProfile': 'client-ssl-profile',
# Provide a sasl user name and password to connect to QDR.X
'saslMechanisms': 'PLAIN',
'verifyHostName': 'no',
'saslUsername': 'test@domain.com', 'saslPassword': 'password'}),
('router', {'workerThreads': 1,
'mode': 'interior',
'id': 'QDR.Y'}),
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': y_listener_port}),
('sslProfile', {'name': 'client-ssl-profile',
'certDb': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('client-certificate.pem'),
'keyFile': cls.ssl_file('client-private-key.pem'),
'password': 'client-password'}),
])
cls.routers[0].wait_ports()
cls.routers[1].wait_ports()
cls.routers[1].wait_router_connected('QDR.X')
@staticmethod
def ssl_file(name):
return os.path.join(DIR, 'ssl_certs', name)
def common_asserts(self, results):
search = "QDR.X"
found = False
for N in range(0, len(results)):
if results[N][5] == search:
found = True
break
self.assertTrue(found, "Connection to %s not found" % search)
# sslProto should be TLSv1.x
self.assertTrue(u'TLSv1' in results[N][10])
# role should be inter-router
self.assertEqual(u'inter-router', results[N][3])
# sasl must be plain
self.assertEqual(u'PLAIN', results[N][6])
# user must be test@domain.com
self.assertEqual(u'test@domain.com', results[N][8])
def test_inter_router_plain_over_ssl_exists(self):
"""
Tests to make sure that an inter-router connection exists between the routers since verifyHostName is 'no'.
"""
if not SASL.extended():
self.skipTest("Cyrus library not available. skipping test")
local_node = Node.connect(self.routers[1].addresses[0], timeout=TIMEOUT)
results = local_node.query(type='org.apache.qpid.dispatch.connection').results
self.common_asserts(results)
def test_zzz_delete_create_ssl_profile(self):
"""
Deletes a connector and its corresponding ssl profile and recreates both
"""
if not SASL.extended():
self.skipTest("Cyrus library not available. skipping test")
local_node = self.routers[1].management
connections = local_node.query(type='org.apache.qpid.dispatch.connection').get_entities()
self.assertIn("QDR.X", [c.container for c in connections]) # We can find the connection before
local_node.delete(type='connector', name='connectorToX')
local_node.delete(type='sslProfile', name='client-ssl-profile')
connections = local_node.query(type='org.apache.qpid.dispatch.connection').get_entities()
self.assertNotIn("QDR.X", [c.container for c in connections]) # Should not be present now
# re-create the ssl profile
local_node.create({'type': 'sslProfile',
'name': 'client-ssl-profile',
'certFile': self.ssl_file('client-certificate.pem'),
'keyFile': self.ssl_file('client-private-key.pem'),
'password': 'client-password',
'certDb': self.ssl_file('ca-certificate.pem')})
# re-create connector
local_node.create({'type': 'connector',
'name': 'connectorToX',
'host': '127.0.0.1',
'port': self.x_listener_port,
'saslMechanisms': 'PLAIN',
'sslProfile': 'client-ssl-profile',
'role': 'inter-router',
'verifyHostName': False,
'saslUsername': 'test@domain.com',
'saslPassword': 'password'})
self.routers[1].wait_connectors()
results = local_node.query(type='org.apache.qpid.dispatch.connection').results
self.common_asserts(results)
if __name__ == '__main__':
unittest.main(main_module())
| {
"content_hash": "5e5162ecf906bb266d3fba2099cab64c",
"timestamp": "",
"source": "github",
"line_count": 552,
"max_line_length": 149,
"avg_line_length": 45.93297101449275,
"alnum_prop": 0.5357128771445474,
"repo_name": "lulf/qpid-dispatch",
"id": "05e591f18f6c4a411d6264c590b77d7ebefed0ae",
"size": "26145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/system_tests_sasl_plain.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1237842"
},
{
"name": "C++",
"bytes": "33945"
},
{
"name": "CMake",
"bytes": "26159"
},
{
"name": "CSS",
"bytes": "151847"
},
{
"name": "HTML",
"bytes": "105127"
},
{
"name": "Java",
"bytes": "1940"
},
{
"name": "JavaScript",
"bytes": "1948607"
},
{
"name": "Objective-C",
"bytes": "8049"
},
{
"name": "Python",
"bytes": "935562"
},
{
"name": "Shell",
"bytes": "22812"
}
],
"symlink_target": ""
} |
import sys
def formatFloat(flt):
# Remove any trailing 0's. If that leaves just a period, remove it too. We
# can't strip both at once otherwise '0.000' becomes '' and we want '0'
return str(flt).rstrip('0').rstrip('.')
class Node(object):
def __init__(self):
self._id = 0
self._idSet = False
self._name = ""
self._nameSet = False
self._translate = []
self._translateSet = False
self._inputs = []
self._inputsSet = False
self._altInputs = []
self._altInputsSet = False
self._parent = 0
self._parentSet = False
# Incremented by connect, decremented by disconnect
# Initialized by calling Brain.countOutputConnections
self._numOutputs = 0
def _setId(self, val):
self._id = val
self._idSet = True
def _getId(self): return self._id
id = property(_getId, _setId)
def _setName(self, val):
self._name = val
self._nameSet = True
def _getName(self): return self._name
name = property(_getName, _setName)
def _setTranslate(self, val):
self._translate = val
self._translateSet = True
def _getTranslate(self): return self._translate
translate = property(_getTranslate, _setTranslate)
def _setInputs(self, val):
self._inputs = val
self._inputsSet = True
def _getInputs(self): return self._inputs
inputs = property(_getInputs, _setInputs)
def _setAltInputs(self, val):
self._altInputs = val
self._altInputsSet = True
def _getAltInputs(self): return self._altInputs
altInputs = property(_getAltInputs, _setAltInputs)
def _setParent(self, val):
self._parent = val
self._parentSet = True
def _getParent(self): return self._parent
parent = property(_getParent, _setParent)
def _setNumOutputs(self, val): self._numOutputs = val
def _getNumOutputs(self): return self._numOutputs
numOutputs = property(_getNumOutputs, _setNumOutputs)
def connect(self, node):
self._numOutputs += 1
def disconnect(self, node):
self._numOutputs -= 1
def load(self, fileHandle):
line = ""
for line in fileHandle:
tokens = line.strip().split()
if not line[0].isspace():
break
if tokens:
if tokens[0] == "id":
self.id = int(tokens[1])
elif tokens[0] == "name":
self.name = " ".join(tokens[1:])
elif tokens[0] == "translate":
self.translate = [ int(tokens[1]), int(tokens[2]) ]
elif tokens[0] == "parent":
self.parent = int(tokens[1])
elif len(tokens) > 2 and (tokens[1] == "inputs" or tokens[1] == "input"):
self.inputs = [ int(input) for input in tokens[2:] ]
elif len(tokens) > 3 and tokens[1] == "alt" and (tokens[2] == "inputs" or tokens[2] == "input"):
self.altInputs = [ int(input) for input in tokens[3:] ]
else:
self._parseTokens(tokens)
return line
def _dumpHeader(self, fileHandle):
if self._idSet:
fileHandle.write(" id %d\n" % self.id)
if self._nameSet:
fileHandle.write(" name %s\n" % self.name)
if self._translateSet:
fileHandle.write(" translate %d %d\n" % (self.translate[0], self.translate[1]))
def _dumpFooter(self, fileHandle):
if self._inputsSet:
if len(self.inputs) == 1:
fileHandle.write(" %d input" % len(self.inputs))
else:
fileHandle.write(" %d inputs" % len(self.inputs))
for input in self.inputs:
fileHandle.write(" %d" % input)
fileHandle.write("\n")
if self._altInputsSet:
if len(self.altInputs) == 1:
fileHandle.write(" %d alt input" % len(self.altInputs))
else:
fileHandle.write(" %d alt inputs" % len(self.altInputs))
for input in self.altInputs:
fileHandle.write(" %d" % input)
fileHandle.write("\n")
if self._parentSet:
fileHandle.write(" parent %d\n" % self.parent)
class Input(Node):
kIntegrateValues = ["position", "speed"]
def __init__(self):
super(Input, self).__init__()
self._channel = ""
self._channelSet = False
self._integrate = ""
self._integrateSet = False
self._range = []
self._rangeSet = False
self._output = 0
self._outputSet = False
def _setChannel(self, val):
self._channel = val
self._channelSet = True
def _getChannel(self): return self._channel
channel = property(_getChannel, _setChannel)
def _setIntegrate(self, val):
self._integrate = val
self._integrateSet = True
def _getIntegrate(self): return self._integrate
integrate = property(_getIntegrate, _setIntegrate)
def _setRange(self, val):
self._range = val
self._rangeSet = True
def _getRange(self): return self._range
range = property(_getRange, _setRange)
def _setOutput(self, val):
self._output = val
self._outputSet = True
def _getOutput(self): return self._output
output = property(_getOutput, _setOutput)
def _parseTokens(self, tokens):
if tokens[0] == "channel":
if len(tokens) > 1:
self.channel = tokens[1]
elif tokens[0] == "integrate":
self.integrate = tokens[1]
elif tokens[0] == "range":
self.range = [ float(tokens[1]), float(tokens[2]) ]
elif tokens[0] == "output":
self.output = float(tokens[1])
def dump(self, fileHandle):
fileHandle.write("fuzzy input\n")
self._dumpHeader(fileHandle)
if self._channelSet:
fileHandle.write(" channel %s\n" % self.channel)
if self._outputSet:
fileHandle.write(" output %f\n" % self.output)
if self._integrateSet:
fileHandle.write(" integrate %s\n" % self.integrate)
if self._rangeSet:
fileHandle.write(" range %f %f\n" % (self.range[0], self.range[1]))
self._dumpFooter(fileHandle)
class Output(Node):
kIntegrateValues = ["position", "speed"]
kDefuzzValues = ["COM", "MOM", "BLEND"]
def __init__(self):
super(Output, self).__init__()
self._channel = ""
self._channelSet = False
self._defuzz = ""
self._defuzzSet = False
self._integrate = ""
self._integrateSet = False
self._range = []
self._rangeSet = False
self._delay = 0.0
self._delaySet = False
self._rate = 0.0
self._rateSet = False
self._output = 0.0
self._outputSet = False
self._manual = False
def _setChannel(self, val):
self._channel = val
self._channelSet = True
def _getChannel(self): return self._channel
channel = property(_getChannel, _setChannel)
def _setDefuzz(self, val):
self._defuzz = val
self._defuzzSet = True
def _getDefuzz(self): return self._defuzz
defuzz = property(_getDefuzz, _setDefuzz)
def _setIntegrate(self, val):
self._integrate = val
self._integrateSet = True
def _getIntegrate(self): return self._integrate
integrate = property(_getIntegrate, _setIntegrate)
def _setRange(self, val):
self._range = val
self._rangeSet = True
def _getRange(self): return self._range
range = property(_getRange, _setRange)
def _setDelay(self, val):
self._delay = val
self._delaySet = True
def _getDelay(self): return self._delay
delay = property(_getDelay, _setDelay)
def _setRate(self, val):
self._rate = val
self._rateSet = True
def _getRate(self): return self._rate
rate = property(_getRate, _setRate)
def _setOutput(self, val):
self._output = val
self._outputSet = True
def _getOutput(self): return self._output
output = property(_getOutput, _setOutput)
def _setManual(self, val):
self._manual = val
def _getManual(self): return self._manual
manual = property(_getManual, _setManual)
def _parseTokens(self, tokens):
''' If Manual is enabled the output value is stored in the 'output'
token.'''
if tokens[0] == "channel":
if len(tokens) > 1:
self.channel = tokens[1]
elif tokens[0] == "defuzz":
self.defuzz = tokens[1]
elif tokens[0] == "integrate":
self.integrate = tokens[1]
elif tokens[0] == "range":
self.range = [ float(tokens[1]), float(tokens[2]) ]
elif tokens[0] == "delay":
self.delay = float(tokens[1])
elif tokens[0] == "rate":
self.rate = float(tokens[1])
elif tokens[0] == "output":
self.output = float(tokens[1])
elif tokens[0] == "manual":
self.manual = True
def dump(self, fileHandle):
fileHandle.write("fuzzy output\n")
self._dumpHeader(fileHandle)
if self._channelSet:
fileHandle.write(" channel %s\n" % self.channel)
if self._outputSet:
fileHandle.write(" output %f\n" % self.output)
if self._manual:
fileHandle.write(" manual\n")
if self._defuzzSet:
fileHandle.write(" defuzz %s\n" % self.defuzz)
if self._integrateSet:
fileHandle.write(" integrate %s\n" % self.integrate)
if self._rangeSet:
fileHandle.write(" range %f %f\n" % (self.range[0], self.range[1]))
if self._delaySet:
fileHandle.write(" delay %f\n" % self.delay)
if self._rateSet:
fileHandle.write(" rate %f\n" % self.rate)
self._dumpFooter(fileHandle)
class Fuzz(Node):
kInterpolationValues = ["cosine", "linear"]
kInferenceValues = ["z", "lamda", "pi", "s", "singleton"]
kInferenceNum = {'z':2,
'lamda':3,
'pi':4,
's':2,
'singleton':1}
def __init__(self):
super(Fuzz, self).__init__()
self._inference = ""
self._inferenceSet = False
self._inferencePoints = []
self._interpolation = ""
self._interpolationSet = False
self._wrap = False
def _setInference(self, val):
self._inference = val
self._inferenceSet = True
def _getInference(self): return self._inference
inference = property(_getInference, _setInference)
def _setInferencePoints(self, val):
self._inferencePoints = val
def _getInferencePoints(self): return self._inferencePoints
inferencePoints = property(_getInferencePoints, _setInferencePoints)
def _setInterpolation(self, val):
self._interpolation = val
self._interpolationSet = True
def _getInterpolation(self): return self._interpolation
interpolation = property(_getInterpolation, _setInterpolation)
def _setWrap(self, val):
self._wrap = val
def _getWrap(self): return self._wrap
wrap = property(_getWrap, _setWrap)
def _parseTokens(self, tokens):
if tokens[0] == "wrap":
self.wrap = True
elif len(tokens) > 1 and tokens[1] == "interpolation":
self.interpolation = tokens[0]
elif len(tokens) > 2 and tokens[1] == "inference":
self.inference = tokens[0]
self.inferencePoints = [ float(pt) for pt in tokens[2:] ]
def dump(self, fileHandle):
fileHandle.write("fuzzy fuzz\n")
self._dumpHeader(fileHandle)
if self._inferenceSet:
fileHandle.write(" %s inference" % self.inference)
for point in self.inferencePoints:
fileHandle.write(" %f" % point)
fileHandle.write("\n")
if self._interpolationSet:
fileHandle.write(" %s interpolation\n" % self.interpolation)
if self._wrap:
fileHandle.write(" wrap\n")
self._dumpFooter(fileHandle)
class Rule(Node):
kTypeValues = ["min", "prod"]
def __init__(self):
super(Rule, self).__init__()
self._weight = 0.0
self._weightSet = False
self._type = "min"
self._typeSet = False
def _setWeight(self, val):
self._weight = val
self._weightSet = True
def _getWeight(self): return self._weight
weight = property(_getWeight, _setWeight)
def _setType(self, val):
self._type = val
self._typeSet = True
def _getType(self): return self._type
type = property(_getType, _setType)
def _parseTokens(self, tokens):
if tokens[0] == "weight":
self.weight = float(tokens[1])
elif tokens[0] == "and":
self.type = tokens[1]
def dump(self, fileHandle):
fileHandle.write("fuzzy rule\n")
self._dumpHeader(fileHandle)
if self._weightSet:
fileHandle.write(" weight %f\n" % self.weight)
if self._typeSet:
fileHandle.write(" and %s\n" % self.type)
self._dumpFooter(fileHandle)
class Or(Node):
kTypeValues = ["max", "sum"]
def __init__(self):
super(Or, self).__init__()
self._weight = 0.0
self._weightSet = False
self._type = "max"
self._typeSet = False
def _setWeight(self, val):
self._weight = val
self._weightSet = True
def _getWeight(self): return self._weight
weight = property(_getWeight, _setWeight)
def _setType(self, val):
self._type = val
self._typeSet = True
def _getType(self): return self._type
type = property(_getType, _setType)
def _parseTokens(self, tokens):
if tokens[0] == "weight":
self.weight = float(tokens[1])
elif tokens[0] == "or":
self.type = tokens[1]
def dump(self, fileHandle):
fileHandle.write("fuzzy or\n")
self._dumpHeader(fileHandle)
if self._weightSet:
fileHandle.write(" weight %f\n" % self.weight)
if self._typeSet:
fileHandle.write(" or %s\n" % self.type)
self._dumpFooter(fileHandle)
class Defuzz(Node):
def __init__(self):
super(Defuzz, self).__init__()
self._defuzz = 0.0
self._defuzzSet = False
self._isElse = False
def _setDefuzz(self, val):
self._defuzz = val
self._defuzzSet = True
def _getDefuzz(self): return self._defuzz
defuzz = property(_getDefuzz, _setDefuzz)
def _setIsElse(self, val):
self._isElse = val
def _getIsElse(self): return self._isElse
isElse = property(_getIsElse, _setIsElse)
def _parseTokens(self, tokens):
if tokens[0] == "defuzz":
self.defuzz = float(tokens[1])
elif tokens[0] == "else":
self.isElse = True
def dump(self, fileHandle):
fileHandle.write("fuzzy defuzz\n")
self._dumpHeader(fileHandle)
if self._defuzzSet:
fileHandle.write(" defuzz %f\n" % self.defuzz)
if self._isElse:
fileHandle.write(" else\n")
self._dumpFooter(fileHandle)
class Noise(Node):
def __init__(self):
super(Noise, self).__init__()
self._rate = 0
self._rateSet = False
self._seed = 0
self._seedSet = False
self._output = 0.0
self._outputSet = False
def _setRate(self, val):
self._rate = val
self._rateSet = True
def _getRate(self): return self._rate
rate = property(_getRate, _setRate)
def _setSeed(self, val):
self._seed = val
self._seedSet = True
def _getSeed(self): return self._seed
seed = property(_getSeed, _setSeed)
def _setOutput(self, val):
self._output = val
self._outputSet = True
def _getOutput(self): return self._output
output = property(_getOutput, _setOutput)
def _parseTokens(self, tokens):
'''If Manual is checked, the noise value is written as 'output'.'''
if tokens[0] == "rate":
self.rate = float(tokens[1])
elif tokens[0] == "seed":
self.seed = int(tokens[1])
elif tokens[0] == "output":
self.output = float(tokens[1])
def dump(self, fileHandle):
fileHandle.write("fuzzy noise\n")
self._dumpHeader(fileHandle)
if self._rateSet:
fileHandle.write(" rate %s\n" % formatFloat(self.rate))
if self._seedSet:
fileHandle.write(" seed %d\n" % self.seed)
if self._outputSet:
fileHandle.write(" output %s\n" % formatFloat(self.output))
self._dumpFooter(fileHandle)
class Timer(Node):
kTriggerValues = ["if_stopped", "always"]
def __init__(self):
super(Timer, self).__init__()
self._rate = 0
self._rateSet = False
self._trigger = ""
self._triggerSet= False
self._range = [0.0, 1.0]
self._rangeSet = False
self._endless = False
def _setRate(self, val):
self._rate = val
self._rateSet = True
def _getRate(self): return self._rate
rate = property(_getRate, _setRate)
def _setTrigger(self, val):
self._trigger = val
self._triggerSet = True
def _getTrigger(self): return self._trigger
trigger = property(_getTrigger, _setTrigger)
def _setRange(self, val):
self._range = val
self._rangeSet = True
def _getRange(self): return self._range
range = property(_getRange, _setRange)
def _setEndless(self, val):
self._endless = val
def _getEndless(self): return self._endless
endless = property(_getEndless, _setEndless)
def _parseTokens(self, tokens):
if tokens[0] == "rate":
self.rate = float(tokens[1])
elif tokens[0] == "trigger":
self.trigger = tokens[1]
elif tokens[0] == "range":
self.range = [float(tokens[1]), float(tokens[2])]
elif tokens[0] == "endless":
self.endless = True
def dump(self, fileHandle):
fileHandle.write("fuzzy timer\n")
self._dumpHeader(fileHandle)
if self._rateSet:
fileHandle.write(" rate %s\n" % formatFloat(self.rate))
if self._triggerSet:
fileHandle.write(" trigger %s\n" % self.trigger)
if self._rangeSet:
fileHandle.write(" range %s %s\n" % (formatFloat(self.range[0]), formatFloat(self.range[1])))
if self._endless:
fileHandle.write(" endless\n")
self._dumpFooter(fileHandle)
class Macro(Node):
def __init__(self):
super(Macro, self).__init__()
self._child = 0
self._childSet = False
def _setChild(self, val):
self._child = val
self._childSet = True
def _getChild(self): return self._child
child = property(_getChild, _setChild)
def _parseTokens(self, tokens):
if tokens[0] == "child":
self.child = int(tokens[1])
def dump(self, fileHandle):
fileHandle.write("fuzzy macro\n")
self._dumpHeader(fileHandle)
if self._childSet:
fileHandle.write(" child %d\n" % self.child)
self._dumpFooter(fileHandle)
class Comment(Node):
def __init__(self):
super(Comment, self).__init__()
def dump(self, fileHandle):
fileHandle.write("fuzzy commend\n")
self._dumpHeader(fileHandle)
self._dumpFooter(fileHandle)
class Brain:
def __init__(self):
# list of nodes indexed by id
# probably non-sparse, but theoretically sparse
self._nodes = []
# list of nodes ordered by when they were loaded (so that we can
# export the nodes in the same order they were imported)
# non-sparse
self._ordered = []
def addNode(self, node):
if node.id >= len(self._nodes):
self._nodes.extend([0] * (node.id - len(self._nodes) + 1))
self._nodes[node.id] = node
self._ordered.append(node)
def nodes(self):
return self._ordered
def getNode(self, name):
for node in self._ordered:
if node.name == name:
return node
raise Error("No node named %s" % name)
def loadNode(self, fileHandle, tokens):
if "input" == tokens[1]:
node = Input()
elif "output" == tokens[1]:
node = Output()
elif "fuzz" == tokens[1]:
node = Fuzz()
elif "defuzz" == tokens[1]:
node = Defuzz()
elif "rule" == tokens[1]:
node = Rule()
elif "or" == tokens[1]:
node = Or()
elif "timer" == tokens[1]:
node = Timer()
elif "noise" == tokens[1]:
node = Noise()
elif "macro" == tokens[1]:
node = Macro()
elif "comment" == tokens[1]:
node = Comment()
else:
raise Exception("Unknown fuzzy node type '%s'." % tokens[1])
line = node.load(fileHandle)
self.addNode(node)
return line
def countOutputConnections(self):
for node in self._ordered:
for input in node.inputs:
self._nodes[input].numOutputs += 1
for input in node.altInputs:
self._nodes[input].numOutputs += 1
def dump(self, fileHandle):
for node in self._ordered:
node.dump(fileHandle)
| {
"content_hash": "9df4d89fcd20543de63d64bce8747092",
"timestamp": "",
"source": "github",
"line_count": 675,
"max_line_length": 100,
"avg_line_length": 27.554074074074073,
"alnum_prop": 0.6534759933329749,
"repo_name": "redpawfx/massiveImporter",
"id": "88ebef2acebd73212e13185c5210cd00c5f63c73",
"size": "19715",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ns/bridge/data/Brain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "324293"
}
],
"symlink_target": ""
} |
import numpy as np
from scipy import sparse as sp
__author__ = "Irshad Ahmad Bhat"
__version__ = "1.0"
__email__ = "irshad.bhat@research.iiit.ac.in"
class OneHotEncoder():
"""Transforms categorical features to continuous numeric features"""
def __init__(self,sparse=True):
self.sparse = sparse
def fit(self, X):
data = np.asarray(X)
unique_feats = []
offset = 0
for i in range(data.shape[1]):
feat_set_i = set(data[:,i])
d = {val:i+offset for i,val in enumerate(feat_set_i)}
unique_feats.append(d)
offset += len(feat_set_i)
self.unique_feats = unique_feats
return self
def transform(self, X):
X = np.atleast_2d(X)
if self.sparse:
one_hot_matrix = sp.lil_matrix((len(X), sum(len(i) for i in self.unique_feats)))
else:
one_hot_matrix = np.zeros((len(X), sum(len(i) for i in self.unique_feats)), bool)
for i,vec in enumerate(X):
for j,val in enumerate(vec):
if val in self.unique_feats[j]:
one_hot_matrix[i, self.unique_feats[j][val]] = 1.0
return sp.csr_matrix(one_hot_matrix) if self.sparse else one_hot_matrix
| {
"content_hash": "64ec0f57a05a16a51b5019b7b5075f51",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 93,
"avg_line_length": 34.054054054054056,
"alnum_prop": 0.5634920634920635,
"repo_name": "irshadbhat/python-irtrans",
"id": "d65e3638a797f9128f0c783e20b490cc0b1ee54a",
"size": "1308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "irtrans/_utils/one_hot_repr.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31579"
}
],
"symlink_target": ""
} |
import sys
import os.path
# Ensure that we can import the "steve" package.
THIS_DIR = os.path.realpath(os.path.dirname(__file__))
sys.path.insert(0, os.path.dirname(THIS_DIR))
import steve.vtypes.stv
# The stv module loads the stv_tool module. Tweak it.
stv_tool = steve.vtypes.stv.stv_tool
stv_tool.VERBOSE = True
def main(mtgdir):
rawfile = os.path.join(mtgdir, 'raw_board_votes.txt')
labelfile = os.path.join(mtgdir, 'board_nominations.ini')
assert os.path.exists(rawfile)
assert os.path.exists(labelfile)
labelmap = stv_tool.read_labelmap(labelfile)
votes = stv_tool.read_votefile(rawfile).values()
# Construct a label-sorted list of names from the labelmap.
names = [name for _, name in sorted(labelmap.items())]
kv = {
'labelmap': labelmap,
'seats': 9,
}
# NOTE: for backwards-compat, the tally() function accepts a
# list of names with caller-defined sorting.
human, _ = steve.vtypes.stv.tally(votes, kv, names)
# For the comparison purposes:
print(human)
print('Done!')
if __name__ == '__main__':
main(sys.argv[1])
| {
"content_hash": "2ec6810430921da95a5043db367c8a8c",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 64,
"avg_line_length": 26.738095238095237,
"alnum_prop": 0.6642920747996438,
"repo_name": "apache/steve",
"id": "c9b2b282e8ee2937b8e9609623883fa692012511",
"size": "2006",
"binary": false,
"copies": "2",
"ref": "refs/heads/trunk",
"path": "v3/test/run_stv.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21341"
},
{
"name": "Dockerfile",
"bytes": "1066"
},
{
"name": "HTML",
"bytes": "55475"
},
{
"name": "JavaScript",
"bytes": "126554"
},
{
"name": "Pascal",
"bytes": "15944"
},
{
"name": "Perl",
"bytes": "22798"
},
{
"name": "Python",
"bytes": "212264"
},
{
"name": "Ruby",
"bytes": "5898"
},
{
"name": "Shell",
"bytes": "2936"
}
],
"symlink_target": ""
} |
import json
import logging
import datetime
from octopus import constant
from octopus import err
log = logging.getLogger(constant.LOGGER_NAME)
_SERVICE_STR_FORMATTER = '{name}: {addr}'
class Service(object):
def __init__(self, service_name, name, service_info):
"""
:param service_name:
:param name:
:param service_info:
:type service_name: str
:type name: str
:type service_info: str
:return:
"""
self.addr = None # 该服务地址,形如 {"addr": "1.2.3.4", "port": 8888}
""":type: dict"""
self.timeout = None # 连接该服务的超时时间,单位:秒。 如 0.3
""":type: float"""
self.service_name = service_name # 该服务的名称。 如 cache
""":type: str"""
self.name = name # 本服务在etcd中的key, 有etcd随机分配
self._add_time = None
""":type: datetime.datetime"""
self._update_time = None
""":type: datetime.datetime"""
self._result = None
""":type: etcd.EtcdResult"""
self._parse_service_info(service_info)
self._add_time = datetime.datetime.now()
def update(self, service_info):
"""
:param service_info:
:type service_info: str
:return:
"""
self._parse_service_info(service_info)
self._update_time = datetime.datetime.now()
def _parse_service_info(self, service_info):
"""
:param service_info:
:type service_info: str
:return:
"""
try:
info = json.loads(service_info)
log.debug('service_info: %s', info)
self.addr = info['addr']
self.timeout = info.get('timeout')
except Exception as e:
log.warn('parse service_info error: %s', e)
raise err.OctpServiceInfoError('Got invalid service_info(%s) from etcd. Should be ignore.', service_info)
def __str__(self):
return _SERVICE_STR_FORMATTER.format(name=self.name, addr=self.addr)
| {
"content_hash": "ab0c10f64fe85ec955ee943bc01fb7c8",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 117,
"avg_line_length": 25.883116883116884,
"alnum_prop": 0.5584545910687406,
"repo_name": "ideascf/octopus",
"id": "1b32775615ddd0f2c768affa02ba8a5828d3b971",
"size": "2098",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "service/service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41703"
}
],
"symlink_target": ""
} |
import requests
URL_COIN = 'https://www.cryptocompare.com/api/data/coinlist/'
URL_EXCHANGES = 'https://min-api.cryptocompare.com/data/all/exchanges'
URL_PRICE = 'https://min-api.cryptocompare.com/data/'
URL_SNAPSHOT = 'https://www.cryptocompare.com/api/data/'
class CryptoCompareAPI:
def __init__(self):
pass
def api_query(self, method, **params):
url_params = []
if method in ['coinlist']:
return requests.get(URL_COIN)
if method in ['coinsnapshot']:
for k, v in params.items():
url_params.append(k + "=" + v)
return requests.get(URL_SNAPSHOT + method + '/?' + "&".join(url_params))
if method in ['pricemultifull']:
for k, v in params.items():
if k in ['fsyms', 'tsyms', 'e']:
url_params.append(k + "=" + v)
return requests.get(URL_PRICE + method + '?' + "&".join(url_params))
if method in ['exchanges']:
return requests.get(URL_EXCHANGES)
def get_coinlist(self):
return self.api_query('coinlist').json()
def get_exchange_pairs(self):
return self.api_query('exchanges').json()
def get_price(self, params):
return self.api_query('price', **params).json()
def get_price_multi(self, params):
return self.api_query('pricemulti', **params).json()
def get_price_multifull(self, params):
return self.api_query('pricemultifull', **params).json()
def get_coin_snapshot(self, params):
return self.api_query('coinsnapshot', **params).json()
| {
"content_hash": "e6c042ced597a8331b7c549d7e3e06e1",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 84,
"avg_line_length": 31.352941176470587,
"alnum_prop": 0.5884928080050031,
"repo_name": "meister245/CryptoLunch",
"id": "f8b07243c2f7bbeea122b63bf54ce486907c7160",
"size": "1599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/api/cryptocompare_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18237"
}
],
"symlink_target": ""
} |
from django import forms
from django.forms import CharField
from mybitbank.libs.connections import connector
from mybitbank.apps.transfer.forms import CoinAddress, CoinProviderId
from mybitbank.libs import misc
class CoinAccountName(CharField):
def validate(self, value):
super(CharField, self).validate(value)
class CoinCurrency(CharField):
def validate(self, value):
super(CharField, self).validate(value)
supported_currencies = connector.services.keys()
if value not in supported_currencies:
raise forms.ValidationError("This currency is not supported: %s" % value)
class AddAddressBookForm(forms.Form):
name = forms.CharField(required=True, initial="")
address = CoinAddress(required=True, initial="")
provider_id = CoinProviderId(required=True, initial=misc.getInitialProviderId(connector))
comment = forms.CharField(required=False, initial="")
def clean_address(self):
address = self.cleaned_data['address']
return address.strip()
def clean(self):
cleaned_data = super(AddAddressBookForm, self).clean()
return cleaned_data
| {
"content_hash": "b1ad5e61cbc58f1ecb20172f7a42718f",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 93,
"avg_line_length": 36,
"alnum_prop": 0.71875,
"repo_name": "ychaim/mybitbank",
"id": "ac4c69f53ba6c324cffcef497cbf354357a911a2",
"size": "1152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mybitbank/apps/addressbook/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "105105"
},
{
"name": "HTML",
"bytes": "74263"
},
{
"name": "JavaScript",
"bytes": "182459"
},
{
"name": "Python",
"bytes": "227251"
}
],
"symlink_target": ""
} |
"""This module is deprecated. Please use :mod:`airflow.providers.google.suite.transfers.gcs_to_gdrive`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.google.suite.transfers.gcs_to_gdrive import GCSToGoogleDriveOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.suite.transfers.gcs_to_gdrive.",
DeprecationWarning,
stacklevel=2,
)
| {
"content_hash": "2a0aba76cfe5ac050feb4df762f7f188",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 106,
"avg_line_length": 34.916666666666664,
"alnum_prop": 0.7732696897374701,
"repo_name": "sekikn/incubator-airflow",
"id": "72627ce4b57e6d57687f7002d2a5f4cdee3be572",
"size": "1206",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/contrib/operators/gcs_to_gdrive_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "15900"
},
{
"name": "HTML",
"bytes": "151266"
},
{
"name": "JavaScript",
"bytes": "25486"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10792443"
},
{
"name": "Shell",
"bytes": "243458"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
import sys
from types import GeneratorType
from libcloud.test import MockHttp, unittest
from libcloud.utils.py3 import ET, httplib
from libcloud.utils.xml import findall, findtext, fixxpath
from libcloud.common.types import InvalidCredsError
from libcloud.compute.base import Node, NodeLocation, NodeAuthPassword
from libcloud.test.compute import TestCaseMixin
from libcloud.test.secrets import DIMENSIONDATA_PARAMS
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.common.dimensiondata import (
TYPES_URN,
DimensionDataTag,
DimensionDataPort,
DimensionDataTagKey,
DimensionDataPortList,
DimensionDataIpAddress,
DimensionDataServerDisk,
NetworkDomainServicePlan,
DimensionDataAPIException,
DimensionDataChildPortList,
DimensionDataIpAddressList,
DimensionDataServerVMWareTools,
DimensionDataChildIpAddressList,
DimensionDataServerCpuSpecification,
)
from libcloud.compute.drivers.dimensiondata import DimensionDataNic
from libcloud.compute.drivers.dimensiondata import DimensionDataNodeDriver as DimensionData
class DimensionData_v2_3_Tests(unittest.TestCase, TestCaseMixin):
def setUp(self):
DimensionData.connectionCls.active_api_version = "2.3"
DimensionData.connectionCls.conn_class = DimensionDataMockHttp
DimensionDataMockHttp.type = None
self.driver = DimensionData(*DIMENSIONDATA_PARAMS)
def test_invalid_region(self):
with self.assertRaises(ValueError):
DimensionData(*DIMENSIONDATA_PARAMS, region="blah")
def test_invalid_creds(self):
DimensionDataMockHttp.type = "UNAUTHORIZED"
with self.assertRaises(InvalidCredsError):
self.driver.list_nodes()
def test_get_account_details(self):
DimensionDataMockHttp.type = None
ret = self.driver.connection.get_account_details()
self.assertEqual(ret.full_name, "Test User")
self.assertEqual(ret.first_name, "Test")
self.assertEqual(ret.email, "test@example.com")
def test_list_locations_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_locations()
self.assertEqual(len(ret), 5)
first_loc = ret[0]
self.assertEqual(first_loc.id, "NA3")
self.assertEqual(first_loc.name, "US - West")
self.assertEqual(first_loc.country, "US")
def test_list_nodes_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes()
self.assertEqual(len(ret), 7)
def test_node_extras(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes()
self.assertTrue(isinstance(ret[0].extra["vmWareTools"], DimensionDataServerVMWareTools))
self.assertTrue(isinstance(ret[0].extra["cpu"], DimensionDataServerCpuSpecification))
self.assertTrue(isinstance(ret[0].extra["disks"], list))
self.assertTrue(isinstance(ret[0].extra["disks"][0], DimensionDataServerDisk))
self.assertEqual(ret[0].extra["disks"][0].size_gb, 10)
self.assertTrue(isinstance(ret[1].extra["disks"], list))
self.assertTrue(isinstance(ret[1].extra["disks"][0], DimensionDataServerDisk))
self.assertEqual(ret[1].extra["disks"][0].size_gb, 10)
def test_server_states(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes()
self.assertTrue(ret[0].state == "running")
self.assertTrue(ret[1].state == "starting")
self.assertTrue(ret[2].state == "stopping")
self.assertTrue(ret[3].state == "reconfiguring")
self.assertTrue(ret[4].state == "running")
self.assertTrue(ret[5].state == "terminated")
self.assertTrue(ret[6].state == "stopped")
self.assertEqual(len(ret), 7)
def test_list_nodes_response_PAGINATED(self):
DimensionDataMockHttp.type = "PAGINATED"
ret = self.driver.list_nodes()
self.assertEqual(len(ret), 9)
def test_paginated_mcp2_call_EMPTY(self):
# cache org
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = "EMPTY"
node_list_generator = self.driver.connection.paginated_request_with_orgId_api_2(
"server/server"
)
empty_node_list = []
for node_list in node_list_generator:
empty_node_list.extend(node_list)
self.assertTrue(len(empty_node_list) == 0)
def test_paginated_mcp2_call_PAGED_THEN_EMPTY(self):
# cache org
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = "PAGED_THEN_EMPTY"
node_list_generator = self.driver.connection.paginated_request_with_orgId_api_2(
"server/server"
)
final_node_list = []
for node_list in node_list_generator:
final_node_list.extend(node_list)
self.assertTrue(len(final_node_list) == 2)
def test_paginated_mcp2_call_with_page_size(self):
# cache org
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = "PAGESIZE50"
node_list_generator = self.driver.connection.paginated_request_with_orgId_api_2(
"server/server", page_size=50
)
self.assertTrue(isinstance(node_list_generator, GeneratorType))
# We're making sure here the filters make it to the URL
# See _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_ALLFILTERS for asserts
def test_list_nodes_response_strings_ALLFILTERS(self):
DimensionDataMockHttp.type = "ALLFILTERS"
ret = self.driver.list_nodes(
ex_location="fake_loc",
ex_name="fake_name",
ex_ipv6="fake_ipv6",
ex_ipv4="fake_ipv4",
ex_vlan="fake_vlan",
ex_image="fake_image",
ex_deployed=True,
ex_started=True,
ex_state="fake_state",
ex_network="fake_network",
ex_network_domain="fake_network_domain",
)
self.assertTrue(isinstance(ret, list))
self.assertEqual(len(ret), 7)
node = ret[3]
self.assertTrue(isinstance(node.extra["disks"], list))
self.assertTrue(isinstance(node.extra["disks"][0], DimensionDataServerDisk))
self.assertEqual(node.size.id, "1")
self.assertEqual(node.image.id, "3ebf3c0f-90fe-4a8b-8585-6e65b316592c")
self.assertEqual(node.image.name, "WIN2008S/32")
disk = node.extra["disks"][0]
self.assertEqual(disk.id, "c2e1f199-116e-4dbc-9960-68720b832b0a")
self.assertEqual(disk.scsi_id, 0)
self.assertEqual(disk.size_gb, 50)
self.assertEqual(disk.speed, "STANDARD")
self.assertEqual(disk.state, "NORMAL")
def test_list_nodes_response_LOCATION(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_locations()
first_loc = ret[0]
ret = self.driver.list_nodes(ex_location=first_loc)
for node in ret:
self.assertEqual(node.extra["datacenterId"], "NA3")
def test_list_nodes_response_LOCATION_STR(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_nodes(ex_location="NA3")
for node in ret:
self.assertEqual(node.extra["datacenterId"], "NA3")
def test_list_sizes_response(self):
DimensionDataMockHttp.type = None
ret = self.driver.list_sizes()
self.assertEqual(len(ret), 1)
size = ret[0]
self.assertEqual(size.name, "default")
def test_reboot_node_response(self):
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
ret = node.reboot()
self.assertTrue(ret is True)
def test_reboot_node_response_INPROGRESS(self):
DimensionDataMockHttp.type = "INPROGRESS"
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
with self.assertRaises(DimensionDataAPIException):
node.reboot()
def test_destroy_node_response(self):
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
ret = node.destroy()
self.assertTrue(ret is True)
def test_destroy_node_response_RESOURCE_BUSY(self):
DimensionDataMockHttp.type = "INPROGRESS"
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
with self.assertRaises(DimensionDataAPIException):
node.destroy()
def test_list_images(self):
images = self.driver.list_images()
self.assertEqual(len(images), 3)
self.assertEqual(images[0].name, "RedHat 6 64-bit 2 CPU")
self.assertEqual(images[0].id, "c14b1a46-2428-44c1-9c1a-b20e6418d08c")
self.assertEqual(images[0].extra["location"].id, "NA9")
self.assertEqual(images[0].extra["cpu"].cpu_count, 2)
self.assertEqual(images[0].extra["OS_displayName"], "REDHAT6/64")
def test_clean_failed_deployment_response_with_node(self):
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
ret = self.driver.ex_clean_failed_deployment(node)
self.assertTrue(ret is True)
def test_clean_failed_deployment_response_with_node_id(self):
node = "e75ead52-692f-4314-8725-c8a4f4d13a87"
ret = self.driver.ex_clean_failed_deployment(node)
self.assertTrue(ret is True)
def test_ex_list_customer_images(self):
images = self.driver.ex_list_customer_images()
self.assertEqual(len(images), 3)
self.assertEqual(images[0].name, "ImportedCustomerImage")
self.assertEqual(images[0].id, "5234e5c7-01de-4411-8b6e-baeb8d91cf5d")
self.assertEqual(images[0].extra["location"].id, "NA9")
self.assertEqual(images[0].extra["cpu"].cpu_count, 4)
self.assertEqual(images[0].extra["OS_displayName"], "REDHAT6/64")
def test_create_mcp1_node_optional_param(self):
root_pw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
network = self.driver.ex_list_networks()[0]
cpu_spec = DimensionDataServerCpuSpecification(
cpu_count="4", cores_per_socket="2", performance="STANDARD"
)
disks = [DimensionDataServerDisk(scsi_id="0", speed="HIGHPERFORMANCE")]
node = self.driver.create_node(
name="test2",
image=image,
auth=root_pw,
ex_description="test2 node",
ex_network=network,
ex_is_started=False,
ex_memory_gb=8,
ex_disks=disks,
ex_cpu_specification=cpu_spec,
ex_primary_dns="10.0.0.5",
ex_secondary_dns="10.0.0.6",
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_mcp1_node_response_no_pass_random_gen(self):
image = self.driver.list_images()[0]
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=None,
ex_description="test2 node",
ex_network=network,
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
self.assertTrue("password" in node.extra)
def test_create_mcp1_node_response_no_pass_customer_windows(self):
image = self.driver.ex_list_customer_images()[1]
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=None,
ex_description="test2 node",
ex_network=network,
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
self.assertTrue("password" in node.extra)
def test_create_mcp1_node_response_no_pass_customer_windows_STR(self):
image = self.driver.ex_list_customer_images()[1].id
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=None,
ex_description="test2 node",
ex_network=network,
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
self.assertTrue("password" in node.extra)
def test_create_mcp1_node_response_no_pass_customer_linux(self):
image = self.driver.ex_list_customer_images()[0]
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=None,
ex_description="test2 node",
ex_network=network,
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
self.assertTrue("password" not in node.extra)
def test_create_mcp1_node_response_no_pass_customer_linux_STR(self):
image = self.driver.ex_list_customer_images()[0].id
network = self.driver.ex_list_networks()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=None,
ex_description="test2 node",
ex_network=network,
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
self.assertTrue("password" not in node.extra)
def test_create_mcp1_node_response_STR(self):
rootPw = "pass123"
image = self.driver.list_images()[0].id
network = self.driver.ex_list_networks()[0].id
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network=network,
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_response_network_domain(self):
rootPw = NodeAuthPassword("pass123")
location = self.driver.ex_get_location_by_id("NA9")
image = self.driver.list_images(location=location)[0]
network_domain = self.driver.ex_list_network_domains(location=location)[0]
vlan = self.driver.ex_list_vlans(location=location)[0]
cpu = DimensionDataServerCpuSpecification(
cpu_count=4, cores_per_socket=1, performance="HIGHPERFORMANCE"
)
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain=network_domain,
ex_vlan=vlan,
ex_is_started=False,
ex_cpu_specification=cpu,
ex_memory_gb=4,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_response_network_domain_STR(self):
rootPw = NodeAuthPassword("pass123")
location = self.driver.ex_get_location_by_id("NA9")
image = self.driver.list_images(location=location)[0]
network_domain = self.driver.ex_list_network_domains(location=location)[0].id
vlan = self.driver.ex_list_vlans(location=location)[0].id
cpu = DimensionDataServerCpuSpecification(
cpu_count=4, cores_per_socket=1, performance="HIGHPERFORMANCE"
)
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain=network_domain,
ex_vlan=vlan,
ex_is_started=False,
ex_cpu_specification=cpu,
ex_memory_gb=4,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_mcp1_node_no_network(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
with self.assertRaises(InvalidRequestError):
self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network=None,
ex_is_started=False,
)
def test_create_node_mcp1_ipv4(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network="fakenetwork",
ex_primary_ipv4="10.0.0.1",
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_mcp1_network(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network="fakenetwork",
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_mcp2_vlan(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_vlan="fakevlan",
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_mcp2_ipv4(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_ipv4="10.0.0.1",
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_network_domain_no_vlan_or_ipv4(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fake_network_domain",
ex_is_started=False,
)
def test_create_node_response(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test3",
image=image,
auth=rootPw,
ex_network_domain="fakenetworkdomain",
ex_primary_nic_vlan="fakevlan",
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_ms_time_zone(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test3",
image=image,
auth=rootPw,
ex_network_domain="fakenetworkdomain",
ex_primary_nic_vlan="fakevlan",
ex_microsoft_time_zone="040",
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_ambigious_mcps_fail(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(
name="test3",
image=image,
auth=rootPw,
ex_network_domain="fakenetworkdomain",
ex_network="fakenetwork",
ex_primary_nic_vlan="fakevlan",
)
def test_create_node_no_network_domain_fail(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(
name="test3", image=image, auth=rootPw, ex_primary_nic_vlan="fakevlan"
)
def test_create_node_no_primary_nic_fail(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(
name="test3",
image=image,
auth=rootPw,
ex_network_domain="fakenetworkdomain",
)
def test_create_node_primary_vlan_nic(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test3",
image=image,
auth=rootPw,
ex_network_domain="fakenetworkdomain",
ex_primary_nic_vlan="fakevlan",
ex_primary_nic_network_adapter="v1000",
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_primary_ipv4(self):
rootPw = "pass123"
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test3",
image=image,
auth=rootPw,
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_both_primary_nic_and_vlan_fail(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(
name="test3",
image=image,
auth=rootPw,
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
ex_primary_nic_vlan="fakevlan",
)
def test_create_node_cpu_specification(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
cpu_spec = DimensionDataServerCpuSpecification(
cpu_count="4", cores_per_socket="2", performance="STANDARD"
)
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
ex_is_started=False,
ex_cpu_specification=cpu_spec,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_memory(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
ex_is_started=False,
ex_memory_gb=8,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_disks(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
disks = [DimensionDataServerDisk(scsi_id="0", speed="HIGHPERFORMANCE")]
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
ex_is_started=False,
ex_disks=disks,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_disks_fail(self):
root_pw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
disks = "blah"
with self.assertRaises(TypeError):
self.driver.create_node(
name="test2",
image=image,
auth=root_pw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
ex_is_started=False,
ex_disks=disks,
)
def test_create_node_ipv4_gateway(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
ex_is_started=False,
ex_ipv4_gateway="10.2.2.2",
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_network_domain_no_vlan_no_ipv4_fail(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
with self.assertRaises(ValueError):
self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fake_network_domain",
ex_is_started=False,
)
def test_create_node_mcp2_additional_nics_legacy(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
additional_vlans = ["fakevlan1", "fakevlan2"]
additional_ipv4 = ["10.0.0.2", "10.0.0.3"]
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_ipv4="10.0.0.1",
ex_additional_nics_vlan=additional_vlans,
ex_additional_nics_ipv4=additional_ipv4,
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_bad_additional_nics_ipv4(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
with self.assertRaises(TypeError):
self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fake_network_domain",
ex_vlan="fake_vlan",
ex_additional_nics_ipv4="badstring",
ex_is_started=False,
)
def test_create_node_additional_nics(self):
root_pw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
nic1 = DimensionDataNic(vlan="fake_vlan", network_adapter_name="v1000")
nic2 = DimensionDataNic(private_ip_v4="10.1.1.2", network_adapter_name="v1000")
additional_nics = [nic1, nic2]
node = self.driver.create_node(
name="test2",
image=image,
auth=root_pw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
ex_additional_nics=additional_nics,
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_create_node_additional_nics_vlan_ipv4_coexist_fail(self):
root_pw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
nic1 = DimensionDataNic(
private_ip_v4="10.1.1.1", vlan="fake_vlan", network_adapter_name="v1000"
)
nic2 = DimensionDataNic(
private_ip_v4="10.1.1.2", vlan="fake_vlan2", network_adapter_name="v1000"
)
additional_nics = [nic1, nic2]
with self.assertRaises(ValueError):
self.driver.create_node(
name="test2",
image=image,
auth=root_pw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
ex_additional_nics=additional_nics,
ex_is_started=False,
)
def test_create_node_additional_nics_invalid_input_fail(self):
root_pw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
additional_nics = "blah"
with self.assertRaises(TypeError):
self.driver.create_node(
name="test2",
image=image,
auth=root_pw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
ex_additional_nics=additional_nics,
ex_is_started=False,
)
def test_create_node_additional_nics_vlan_ipv4_not_exist_fail(self):
root_pw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
nic1 = DimensionDataNic(network_adapter_name="v1000")
nic2 = DimensionDataNic(network_adapter_name="v1000")
additional_nics = [nic1, nic2]
with self.assertRaises(ValueError):
self.driver.create_node(
name="test2",
image=image,
auth=root_pw,
ex_description="test2 node",
ex_network_domain="fakenetworkdomain",
ex_primary_nic_private_ipv4="10.0.0.1",
ex_additional_nics=additional_nics,
ex_is_started=False,
)
def test_create_node_bad_additional_nics_vlan(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
with self.assertRaises(TypeError):
self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test2 node",
ex_network_domain="fake_network_domain",
ex_vlan="fake_vlan",
ex_additional_nics_vlan="badstring",
ex_is_started=False,
)
def test_create_node_mcp2_indicate_dns(self):
rootPw = NodeAuthPassword("pass123")
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test2",
image=image,
auth=rootPw,
ex_description="test node dns",
ex_network_domain="fakenetworkdomain",
ex_primary_ipv4="10.0.0.1",
ex_primary_dns="8.8.8.8",
ex_secondary_dns="8.8.4.4",
ex_is_started=False,
)
self.assertEqual(node.id, "e75ead52-692f-4314-8725-c8a4f4d13a87")
self.assertEqual(node.extra["status"].action, "DEPLOY_SERVER")
def test_ex_shutdown_graceful(self):
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
ret = self.driver.ex_shutdown_graceful(node)
self.assertTrue(ret is True)
def test_ex_shutdown_graceful_INPROGRESS(self):
DimensionDataMockHttp.type = "INPROGRESS"
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_shutdown_graceful(node)
def test_ex_start_node(self):
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
ret = self.driver.ex_start_node(node)
self.assertTrue(ret is True)
def test_ex_start_node_INPROGRESS(self):
DimensionDataMockHttp.type = "INPROGRESS"
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_start_node(node)
def test_ex_power_off(self):
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
ret = self.driver.ex_power_off(node)
self.assertTrue(ret is True)
def test_ex_update_vm_tools(self):
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
ret = self.driver.ex_update_vm_tools(node)
self.assertTrue(ret is True)
def test_ex_power_off_INPROGRESS(self):
DimensionDataMockHttp.type = "INPROGRESS"
node = Node(
id="11",
name=None,
state="STOPPING",
public_ips=None,
private_ips=None,
driver=self.driver,
)
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_power_off(node)
def test_ex_reset(self):
node = Node(
id="11",
name=None,
state=None,
public_ips=None,
private_ips=None,
driver=self.driver,
)
ret = self.driver.ex_reset(node)
self.assertTrue(ret is True)
def test_ex_attach_node_to_vlan(self):
node = self.driver.ex_get_node_by_id("e75ead52-692f-4314-8725-c8a4f4d13a87")
vlan = self.driver.ex_get_vlan("0e56433f-d808-4669-821d-812769517ff8")
ret = self.driver.ex_attach_node_to_vlan(node, vlan)
self.assertTrue(ret is True)
def test_ex_destroy_nic(self):
node = self.driver.ex_destroy_nic("a202e51b-41c0-4cfc-add0-b1c62fc0ecf6")
self.assertTrue(node)
def test_list_networks(self):
nets = self.driver.list_networks()
self.assertEqual(nets[0].name, "test-net1")
self.assertTrue(isinstance(nets[0].location, NodeLocation))
def test_ex_create_network(self):
location = self.driver.ex_get_location_by_id("NA9")
net = self.driver.ex_create_network(location, "Test Network", "test")
self.assertEqual(net.id, "208e3a8e-9d2f-11e2-b29c-001517c4643e")
self.assertEqual(net.name, "Test Network")
def test_ex_create_network_NO_DESCRIPTION(self):
location = self.driver.ex_get_location_by_id("NA9")
net = self.driver.ex_create_network(location, "Test Network")
self.assertEqual(net.id, "208e3a8e-9d2f-11e2-b29c-001517c4643e")
self.assertEqual(net.name, "Test Network")
def test_ex_delete_network(self):
net = self.driver.ex_list_networks()[0]
result = self.driver.ex_delete_network(net)
self.assertTrue(result)
def test_ex_rename_network(self):
net = self.driver.ex_list_networks()[0]
result = self.driver.ex_rename_network(net, "barry")
self.assertTrue(result)
def test_ex_create_network_domain(self):
location = self.driver.ex_get_location_by_id("NA9")
plan = NetworkDomainServicePlan.ADVANCED
net = self.driver.ex_create_network_domain(
location=location, name="test", description="test", service_plan=plan
)
self.assertEqual(net.name, "test")
self.assertTrue(net.id, "f14a871f-9a25-470c-aef8-51e13202e1aa")
def test_ex_create_network_domain_NO_DESCRIPTION(self):
location = self.driver.ex_get_location_by_id("NA9")
plan = NetworkDomainServicePlan.ADVANCED
net = self.driver.ex_create_network_domain(
location=location, name="test", service_plan=plan
)
self.assertEqual(net.name, "test")
self.assertTrue(net.id, "f14a871f-9a25-470c-aef8-51e13202e1aa")
def test_ex_get_network_domain(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
self.assertEqual(net.id, "8cdfd607-f429-4df6-9352-162cfc0891be")
self.assertEqual(net.description, "test2")
self.assertEqual(net.name, "test")
def test_ex_update_network_domain(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
net.name = "new name"
net2 = self.driver.ex_update_network_domain(net)
self.assertEqual(net2.name, "new name")
def test_ex_delete_network_domain(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
result = self.driver.ex_delete_network_domain(net)
self.assertTrue(result)
def test_ex_list_networks(self):
nets = self.driver.ex_list_networks()
self.assertEqual(nets[0].name, "test-net1")
self.assertTrue(isinstance(nets[0].location, NodeLocation))
def test_ex_list_network_domains(self):
nets = self.driver.ex_list_network_domains()
self.assertEqual(nets[0].name, "Aurora")
self.assertTrue(isinstance(nets[0].location, NodeLocation))
def test_ex_list_network_domains_ALLFILTERS(self):
DimensionDataMockHttp.type = "ALLFILTERS"
nets = self.driver.ex_list_network_domains(
location="fake_location",
name="fake_name",
service_plan="fake_plan",
state="fake_state",
)
self.assertEqual(nets[0].name, "Aurora")
self.assertTrue(isinstance(nets[0].location, NodeLocation))
def test_ex_list_vlans(self):
vlans = self.driver.ex_list_vlans()
self.assertEqual(vlans[0].name, "Primary")
def test_ex_list_vlans_ALLFILTERS(self):
DimensionDataMockHttp.type = "ALLFILTERS"
vlans = self.driver.ex_list_vlans(
location="fake_location",
network_domain="fake_network_domain",
name="fake_name",
ipv4_address="fake_ipv4",
ipv6_address="fake_ipv6",
state="fake_state",
)
self.assertEqual(vlans[0].name, "Primary")
def test_ex_create_vlan(
self,
):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
vlan = self.driver.ex_create_vlan(
network_domain=net,
name="test",
private_ipv4_base_address="10.3.4.0",
private_ipv4_prefix_size="24",
description="test vlan",
)
self.assertEqual(vlan.id, "0e56433f-d808-4669-821d-812769517ff8")
def test_ex_create_vlan_NO_DESCRIPTION(
self,
):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
vlan = self.driver.ex_create_vlan(
network_domain=net,
name="test",
private_ipv4_base_address="10.3.4.0",
private_ipv4_prefix_size="24",
)
self.assertEqual(vlan.id, "0e56433f-d808-4669-821d-812769517ff8")
def test_ex_get_vlan(self):
vlan = self.driver.ex_get_vlan("0e56433f-d808-4669-821d-812769517ff8")
self.assertEqual(vlan.id, "0e56433f-d808-4669-821d-812769517ff8")
self.assertEqual(vlan.description, "test2")
self.assertEqual(vlan.status, "NORMAL")
self.assertEqual(vlan.name, "Production VLAN")
self.assertEqual(vlan.private_ipv4_range_address, "10.0.3.0")
self.assertEqual(vlan.private_ipv4_range_size, 24)
self.assertEqual(vlan.ipv6_range_size, 64)
self.assertEqual(vlan.ipv6_range_address, "2607:f480:1111:1153:0:0:0:0")
self.assertEqual(vlan.ipv4_gateway, "10.0.3.1")
self.assertEqual(vlan.ipv6_gateway, "2607:f480:1111:1153:0:0:0:1")
def test_ex_wait_for_state(self):
self.driver.ex_wait_for_state(
"NORMAL",
self.driver.ex_get_vlan,
vlan_id="0e56433f-d808-4669-821d-812769517ff8",
poll_interval=0.1,
)
def test_ex_wait_for_state_NODE(self):
self.driver.ex_wait_for_state(
"running",
self.driver.ex_get_node_by_id,
id="e75ead52-692f-4314-8725-c8a4f4d13a87",
poll_interval=0.1,
)
def test_ex_wait_for_state_FAIL(self):
with self.assertRaises(DimensionDataAPIException) as context:
self.driver.ex_wait_for_state(
"starting",
self.driver.ex_get_node_by_id,
id="e75ead52-692f-4314-8725-c8a4f4d13a87",
poll_interval=0.1,
timeout=0.1,
)
self.assertEqual(context.exception.code, "running")
self.assertTrue("timed out" in context.exception.msg)
def test_ex_update_vlan(self):
vlan = self.driver.ex_get_vlan("0e56433f-d808-4669-821d-812769517ff8")
vlan.name = "new name"
vlan2 = self.driver.ex_update_vlan(vlan)
self.assertEqual(vlan2.name, "new name")
def test_ex_delete_vlan(self):
vlan = self.driver.ex_get_vlan("0e56433f-d808-4669-821d-812769517ff8")
result = self.driver.ex_delete_vlan(vlan)
self.assertTrue(result)
def test_ex_expand_vlan(self):
vlan = self.driver.ex_get_vlan("0e56433f-d808-4669-821d-812769517ff8")
vlan.private_ipv4_range_size = "23"
vlan = self.driver.ex_expand_vlan(vlan)
self.assertEqual(vlan.private_ipv4_range_size, "23")
def test_ex_add_public_ip_block_to_network_domain(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
block = self.driver.ex_add_public_ip_block_to_network_domain(net)
self.assertEqual(block.id, "9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8")
def test_ex_list_public_ip_blocks(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
blocks = self.driver.ex_list_public_ip_blocks(net)
self.assertEqual(blocks[0].base_ip, "168.128.4.18")
self.assertEqual(blocks[0].size, "2")
self.assertEqual(blocks[0].id, "9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8")
self.assertEqual(blocks[0].location.id, "NA9")
self.assertEqual(blocks[0].network_domain.id, net.id)
def test_ex_get_public_ip_block(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
block = self.driver.ex_get_public_ip_block("9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8")
self.assertEqual(block.base_ip, "168.128.4.18")
self.assertEqual(block.size, "2")
self.assertEqual(block.id, "9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8")
self.assertEqual(block.location.id, "NA9")
self.assertEqual(block.network_domain.id, net.id)
def test_ex_delete_public_ip_block(self):
block = self.driver.ex_get_public_ip_block("9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8")
result = self.driver.ex_delete_public_ip_block(block)
self.assertTrue(result)
def test_ex_list_firewall_rules(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_firewall_rules(net)
self.assertEqual(rules[0].id, "756cba02-b0bc-48f4-aea5-9445870b6148")
self.assertEqual(rules[0].network_domain.id, "8cdfd607-f429-4df6-9352-162cfc0891be")
self.assertEqual(rules[0].name, "CCDEFAULT.BlockOutboundMailIPv4")
self.assertEqual(rules[0].action, "DROP")
self.assertEqual(rules[0].ip_version, "IPV4")
self.assertEqual(rules[0].protocol, "TCP")
self.assertEqual(rules[0].source.ip_address, "ANY")
self.assertTrue(rules[0].source.any_ip)
self.assertTrue(rules[0].destination.any_ip)
def test_ex_create_firewall_rule(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_firewall_rules(net)
rule = self.driver.ex_create_firewall_rule(net, rules[0], "FIRST")
self.assertEqual(rule.id, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
def test_ex_create_firewall_rule_with_specific_source_ip(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_firewall_rules(net)
specific_source_ip_rule = list(filter(lambda x: x.name == "SpecificSourceIP", rules))[0]
rule = self.driver.ex_create_firewall_rule(net, specific_source_ip_rule, "FIRST")
self.assertEqual(rule.id, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
def test_ex_create_firewall_rule_with_source_ip(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_firewall_rules(net)
specific_source_ip_rule = list(filter(lambda x: x.name == "SpecificSourceIP", rules))[0]
specific_source_ip_rule.source.any_ip = False
specific_source_ip_rule.source.ip_address = "10.0.0.1"
specific_source_ip_rule.source.ip_prefix_size = "15"
rule = self.driver.ex_create_firewall_rule(net, specific_source_ip_rule, "FIRST")
self.assertEqual(rule.id, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
def test_ex_create_firewall_rule_with_any_ip(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_firewall_rules(net)
specific_source_ip_rule = list(filter(lambda x: x.name == "SpecificSourceIP", rules))[0]
specific_source_ip_rule.source.any_ip = True
rule = self.driver.ex_create_firewall_rule(net, specific_source_ip_rule, "FIRST")
self.assertEqual(rule.id, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
def test_ex_create_firewall_rule_ip_prefix_size(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_list_firewall_rules(net)[0]
rule.source.address_list_id = None
rule.source.any_ip = False
rule.source.ip_address = "10.2.1.1"
rule.source.ip_prefix_size = "10"
rule.destination.address_list_id = None
rule.destination.any_ip = False
rule.destination.ip_address = "10.0.0.1"
rule.destination.ip_prefix_size = "20"
self.driver.ex_create_firewall_rule(net, rule, "LAST")
def test_ex_create_firewall_rule_address_list(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_list_firewall_rules(net)[0]
rule.source.address_list_id = "12345"
rule.destination.address_list_id = "12345"
self.driver.ex_create_firewall_rule(net, rule, "LAST")
def test_ex_create_firewall_rule_port_list(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_list_firewall_rules(net)[0]
rule.source.port_list_id = "12345"
rule.destination.port_list_id = "12345"
self.driver.ex_create_firewall_rule(net, rule, "LAST")
def test_ex_create_firewall_rule_port(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_list_firewall_rules(net)[0]
rule.source.port_list_id = None
rule.source.port_begin = "8000"
rule.source.port_end = "8005"
rule.destination.port_list_id = None
rule.destination.port_begin = "7000"
rule.destination.port_end = "7005"
self.driver.ex_create_firewall_rule(net, rule, "LAST")
def test_ex_create_firewall_rule_ALL_VALUES(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_firewall_rules(net)
for rule in rules:
self.driver.ex_create_firewall_rule(net, rule, "LAST")
def test_ex_create_firewall_rule_WITH_POSITION_RULE(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_firewall_rules(net)
rule = self.driver.ex_create_firewall_rule(net, rules[-2], "BEFORE", rules[-1])
self.assertEqual(rule.id, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
def test_ex_create_firewall_rule_WITH_POSITION_RULE_STR(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_firewall_rules(net)
rule = self.driver.ex_create_firewall_rule(
net, rules[-2], "BEFORE", "RULE_WITH_SOURCE_AND_DEST"
)
self.assertEqual(rule.id, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
def test_ex_create_firewall_rule_FAIL_POSITION(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_firewall_rules(net)
with self.assertRaises(ValueError):
self.driver.ex_create_firewall_rule(net, rules[0], "BEFORE")
def test_ex_create_firewall_rule_FAIL_POSITION_WITH_RULE(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_firewall_rules(net)
with self.assertRaises(ValueError):
self.driver.ex_create_firewall_rule(net, rules[0], "LAST", "RULE_WITH_SOURCE_AND_DEST")
def test_ex_get_firewall_rule(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
self.assertEqual(rule.id, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
def test_ex_set_firewall_rule_state(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
result = self.driver.ex_set_firewall_rule_state(rule, False)
self.assertTrue(result)
def test_ex_delete_firewall_rule(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
result = self.driver.ex_delete_firewall_rule(rule)
self.assertTrue(result)
def test_ex_edit_firewall_rule(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
rule.source.any_ip = True
result = self.driver.ex_edit_firewall_rule(rule=rule, position="LAST")
self.assertTrue(result)
def test_ex_edit_firewall_rule_source_ipaddresslist(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
rule.source.address_list_id = "802abc9f-45a7-4efb-9d5a-810082368222"
rule.source.any_ip = False
rule.source.ip_address = "10.0.0.1"
rule.source.ip_prefix_size = 10
result = self.driver.ex_edit_firewall_rule(rule=rule, position="LAST")
self.assertTrue(result)
def test_ex_edit_firewall_rule_destination_ipaddresslist(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
rule.destination.address_list_id = "802abc9f-45a7-4efb-9d5a-810082368222"
rule.destination.any_ip = False
rule.destination.ip_address = "10.0.0.1"
rule.destination.ip_prefix_size = 10
result = self.driver.ex_edit_firewall_rule(rule=rule, position="LAST")
self.assertTrue(result)
def test_ex_edit_firewall_rule_destination_ipaddress(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
rule.source.address_list_id = None
rule.source.any_ip = False
rule.source.ip_address = "10.0.0.1"
rule.source.ip_prefix_size = "10"
result = self.driver.ex_edit_firewall_rule(rule=rule, position="LAST")
self.assertTrue(result)
def test_ex_edit_firewall_rule_source_ipaddress(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
rule.destination.address_list_id = None
rule.destination.any_ip = False
rule.destination.ip_address = "10.0.0.1"
rule.destination.ip_prefix_size = "10"
result = self.driver.ex_edit_firewall_rule(rule=rule, position="LAST")
self.assertTrue(result)
def test_ex_edit_firewall_rule_with_relative_rule(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
placement_rule = self.driver.ex_list_firewall_rules(network_domain=net)[-1]
result = self.driver.ex_edit_firewall_rule(
rule=rule, position="BEFORE", relative_rule_for_position=placement_rule
)
self.assertTrue(result)
def test_ex_edit_firewall_rule_with_relative_rule_by_name(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
placement_rule = self.driver.ex_list_firewall_rules(network_domain=net)[-1]
result = self.driver.ex_edit_firewall_rule(
rule=rule, position="BEFORE", relative_rule_for_position=placement_rule.name
)
self.assertTrue(result)
def test_ex_edit_firewall_rule_source_portlist(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
rule.source.port_list_id = "802abc9f-45a7-4efb-9d5a-810082368222"
result = self.driver.ex_edit_firewall_rule(rule=rule, position="LAST")
self.assertTrue(result)
def test_ex_edit_firewall_rule_source_port(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
rule.source.port_list_id = None
rule.source.port_begin = "3"
rule.source.port_end = "10"
result = self.driver.ex_edit_firewall_rule(rule=rule, position="LAST")
self.assertTrue(result)
def test_ex_edit_firewall_rule_destination_portlist(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
rule.destination.port_list_id = "802abc9f-45a7-4efb-9d5a-810082368222"
result = self.driver.ex_edit_firewall_rule(rule=rule, position="LAST")
self.assertTrue(result)
def test_ex_edit_firewall_rule_destination_port(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
rule.destination.port_list_id = None
rule.destination.port_begin = "3"
rule.destination.port_end = "10"
result = self.driver.ex_edit_firewall_rule(rule=rule, position="LAST")
self.assertTrue(result)
def test_ex_edit_firewall_rule_invalid_position_fail(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
with self.assertRaises(ValueError):
self.driver.ex_edit_firewall_rule(rule=rule, position="BEFORE")
def test_ex_edit_firewall_rule_invalid_position_relative_rule_fail(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_firewall_rule(net, "d0a20f59-77b9-4f28-a63b-e58496b73a6c")
relative_rule = self.driver.ex_list_firewall_rules(network_domain=net)[-1]
with self.assertRaises(ValueError):
self.driver.ex_edit_firewall_rule(
rule=rule, position="FIRST", relative_rule_for_position=relative_rule
)
def test_ex_create_nat_rule(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_create_nat_rule(net, "1.2.3.4", "4.3.2.1")
self.assertEqual(rule.id, "d31c2db0-be6b-4d50-8744-9a7a534b5fba")
def test_ex_list_nat_rules(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rules = self.driver.ex_list_nat_rules(net)
self.assertEqual(rules[0].id, "2187a636-7ebb-49a1-a2ff-5d617f496dce")
self.assertEqual(rules[0].internal_ip, "10.0.0.15")
self.assertEqual(rules[0].external_ip, "165.180.12.18")
def test_ex_get_nat_rule(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_nat_rule(net, "2187a636-7ebb-49a1-a2ff-5d617f496dce")
self.assertEqual(rule.id, "2187a636-7ebb-49a1-a2ff-5d617f496dce")
self.assertEqual(rule.internal_ip, "10.0.0.16")
self.assertEqual(rule.external_ip, "165.180.12.19")
def test_ex_delete_nat_rule(self):
net = self.driver.ex_get_network_domain("8cdfd607-f429-4df6-9352-162cfc0891be")
rule = self.driver.ex_get_nat_rule(net, "2187a636-7ebb-49a1-a2ff-5d617f496dce")
result = self.driver.ex_delete_nat_rule(rule)
self.assertTrue(result)
def test_ex_enable_monitoring(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_enable_monitoring(node, "ADVANCED")
self.assertTrue(result)
def test_ex_disable_monitoring(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_disable_monitoring(node)
self.assertTrue(result)
def test_ex_change_monitoring_plan(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_update_monitoring_plan(node, "ESSENTIALS")
self.assertTrue(result)
def test_ex_add_storage_to_node(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_add_storage_to_node(node, 30, "PERFORMANCE")
self.assertTrue(result)
def test_ex_remove_storage_from_node(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_remove_storage_from_node(node, 0)
self.assertTrue(result)
def test_ex_change_storage_speed(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_change_storage_speed(node, 1, "PERFORMANCE")
self.assertTrue(result)
def test_ex_change_storage_size(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_change_storage_size(node, 1, 100)
self.assertTrue(result)
def test_ex_clone_node_to_image(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_clone_node_to_image(node, "my image", "a description")
self.assertTrue(result)
def test_ex_update_node(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_update_node(node, "my new name", "a description", 2, 4048)
self.assertTrue(result)
def test_ex_reconfigure_node(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_reconfigure_node(node, 4, 4, 1, "HIGHPERFORMANCE")
self.assertTrue(result)
def test_ex_get_location_by_id(self):
location = self.driver.ex_get_location_by_id("NA9")
self.assertTrue(location.id, "NA9")
def test_ex_get_location_by_id_NO_LOCATION(self):
location = self.driver.ex_get_location_by_id(None)
self.assertIsNone(location)
def test_ex_get_base_image_by_id(self):
image_id = self.driver.list_images()[0].id
image = self.driver.ex_get_base_image_by_id(image_id)
self.assertEqual(image.extra["OS_type"], "UNIX")
def test_ex_get_customer_image_by_id(self):
image_id = self.driver.ex_list_customer_images()[1].id
image = self.driver.ex_get_customer_image_by_id(image_id)
self.assertEqual(image.extra["OS_type"], "WINDOWS")
def test_ex_get_image_by_id_base_img(self):
image_id = self.driver.list_images()[1].id
image = self.driver.ex_get_base_image_by_id(image_id)
self.assertEqual(image.extra["OS_type"], "WINDOWS")
def test_ex_get_image_by_id_customer_img(self):
image_id = self.driver.ex_list_customer_images()[0].id
image = self.driver.ex_get_customer_image_by_id(image_id)
self.assertEqual(image.extra["OS_type"], "UNIX")
def test_ex_get_image_by_id_customer_FAIL(self):
image_id = "FAKE_IMAGE_ID"
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_get_base_image_by_id(image_id)
def test_ex_create_anti_affinity_rule(self):
node_list = self.driver.list_nodes()
success = self.driver.ex_create_anti_affinity_rule([node_list[0], node_list[1]])
self.assertTrue(success)
def test_ex_create_anti_affinity_rule_TUPLE(self):
node_list = self.driver.list_nodes()
success = self.driver.ex_create_anti_affinity_rule((node_list[0], node_list[1]))
self.assertTrue(success)
def test_ex_create_anti_affinity_rule_TUPLE_STR(self):
node_list = self.driver.list_nodes()
success = self.driver.ex_create_anti_affinity_rule((node_list[0].id, node_list[1].id))
self.assertTrue(success)
def test_ex_create_anti_affinity_rule_FAIL_STR(self):
node_list = "string"
with self.assertRaises(TypeError):
self.driver.ex_create_anti_affinity_rule(node_list)
def test_ex_create_anti_affinity_rule_FAIL_EXISTING(self):
node_list = self.driver.list_nodes()
DimensionDataMockHttp.type = "FAIL_EXISTING"
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_create_anti_affinity_rule((node_list[0], node_list[1]))
def test_ex_delete_anti_affinity_rule(self):
net_domain = self.driver.ex_list_network_domains()[0]
rule = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain)[0]
success = self.driver.ex_delete_anti_affinity_rule(rule)
self.assertTrue(success)
def test_ex_delete_anti_affinity_rule_STR(self):
net_domain = self.driver.ex_list_network_domains()[0]
rule = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain)[0]
success = self.driver.ex_delete_anti_affinity_rule(rule.id)
self.assertTrue(success)
def test_ex_delete_anti_affinity_rule_FAIL(self):
net_domain = self.driver.ex_list_network_domains()[0]
rule = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain)[0]
DimensionDataMockHttp.type = "FAIL"
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_delete_anti_affinity_rule(rule)
def test_ex_list_anti_affinity_rules_NETWORK_DOMAIN(self):
net_domain = self.driver.ex_list_network_domains()[0]
rules = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain)
self.assertTrue(isinstance(rules, list))
self.assertEqual(len(rules), 2)
self.assertTrue(isinstance(rules[0].id, str))
self.assertTrue(isinstance(rules[0].node_list, list))
def test_ex_list_anti_affinity_rules_NETWORK(self):
network = self.driver.list_networks()[0]
rules = self.driver.ex_list_anti_affinity_rules(network=network)
self.assertTrue(isinstance(rules, list))
self.assertEqual(len(rules), 2)
self.assertTrue(isinstance(rules[0].id, str))
self.assertTrue(isinstance(rules[0].node_list, list))
def test_ex_list_anti_affinity_rules_NODE(self):
node = self.driver.list_nodes()[0]
rules = self.driver.ex_list_anti_affinity_rules(node=node)
self.assertTrue(isinstance(rules, list))
self.assertEqual(len(rules), 2)
self.assertTrue(isinstance(rules[0].id, str))
self.assertTrue(isinstance(rules[0].node_list, list))
def test_ex_list_anti_affinity_rules_PAGINATED(self):
net_domain = self.driver.ex_list_network_domains()[0]
DimensionDataMockHttp.type = "PAGINATED"
rules = self.driver.ex_list_anti_affinity_rules(network_domain=net_domain)
self.assertTrue(isinstance(rules, list))
self.assertEqual(len(rules), 4)
self.assertTrue(isinstance(rules[0].id, str))
self.assertTrue(isinstance(rules[0].node_list, list))
def test_ex_list_anti_affinity_rules_ALLFILTERS(self):
net_domain = self.driver.ex_list_network_domains()[0]
DimensionDataMockHttp.type = "ALLFILTERS"
rules = self.driver.ex_list_anti_affinity_rules(
network_domain=net_domain, filter_id="FAKE_ID", filter_state="FAKE_STATE"
)
self.assertTrue(isinstance(rules, list))
self.assertEqual(len(rules), 2)
self.assertTrue(isinstance(rules[0].id, str))
self.assertTrue(isinstance(rules[0].node_list, list))
def test_ex_list_anti_affinity_rules_BAD_ARGS(self):
with self.assertRaises(ValueError):
self.driver.ex_list_anti_affinity_rules(
network="fake_network", network_domain="fake_network_domain"
)
def test_ex_create_tag_key(self):
success = self.driver.ex_create_tag_key("MyTestKey")
self.assertTrue(success)
def test_ex_create_tag_key_ALLPARAMS(self):
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = "ALLPARAMS"
success = self.driver.ex_create_tag_key(
"MyTestKey",
description="Test Key Desc.",
value_required=False,
display_on_report=False,
)
self.assertTrue(success)
def test_ex_create_tag_key_BADREQUEST(self):
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = "BADREQUEST"
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_create_tag_key("MyTestKey")
def test_ex_list_tag_keys(self):
tag_keys = self.driver.ex_list_tag_keys()
self.assertTrue(isinstance(tag_keys, list))
self.assertTrue(isinstance(tag_keys[0], DimensionDataTagKey))
self.assertTrue(isinstance(tag_keys[0].id, str))
def test_ex_list_tag_keys_ALLFILTERS(self):
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = "ALLFILTERS"
self.driver.ex_list_tag_keys(
id="fake_id",
name="fake_name",
value_required=False,
display_on_report=False,
)
def test_ex_get_tag_by_id(self):
tag = self.driver.ex_get_tag_key_by_id("d047c609-93d7-4bc5-8fc9-732c85840075")
self.assertTrue(isinstance(tag, DimensionDataTagKey))
def test_ex_get_tag_by_id_NOEXIST(self):
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = "NOEXIST"
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_get_tag_key_by_id("d047c609-93d7-4bc5-8fc9-732c85840075")
def test_ex_get_tag_by_name(self):
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = "SINGLE"
tag = self.driver.ex_get_tag_key_by_name("LibcloudTest")
self.assertTrue(isinstance(tag, DimensionDataTagKey))
def test_ex_get_tag_by_name_NOEXIST(self):
with self.assertRaises(ValueError):
self.driver.ex_get_tag_key_by_name("LibcloudTest")
def test_ex_modify_tag_key_NAME(self):
tag_key = self.driver.ex_list_tag_keys()[0]
DimensionDataMockHttp.type = "NAME"
success = self.driver.ex_modify_tag_key(tag_key, name="NewName")
self.assertTrue(success)
def test_ex_modify_tag_key_NOTNAME(self):
tag_key = self.driver.ex_list_tag_keys()[0]
DimensionDataMockHttp.type = "NOTNAME"
success = self.driver.ex_modify_tag_key(
tag_key, description="NewDesc", value_required=False, display_on_report=True
)
self.assertTrue(success)
def test_ex_modify_tag_key_NOCHANGE(self):
tag_key = self.driver.ex_list_tag_keys()[0]
DimensionDataMockHttp.type = "NOCHANGE"
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_modify_tag_key(tag_key)
def test_ex_remove_tag_key(self):
tag_key = self.driver.ex_list_tag_keys()[0]
success = self.driver.ex_remove_tag_key(tag_key)
self.assertTrue(success)
def test_ex_remove_tag_key_NOEXIST(self):
tag_key = self.driver.ex_list_tag_keys()[0]
DimensionDataMockHttp.type = "NOEXIST"
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_remove_tag_key(tag_key)
def test_ex_apply_tag_to_asset(self):
node = self.driver.list_nodes()[0]
success = self.driver.ex_apply_tag_to_asset(node, "TagKeyName", "FakeValue")
self.assertTrue(success)
def test_ex_apply_tag_to_asset_NOVALUE(self):
node = self.driver.list_nodes()[0]
DimensionDataMockHttp.type = "NOVALUE"
success = self.driver.ex_apply_tag_to_asset(node, "TagKeyName")
self.assertTrue(success)
def test_ex_apply_tag_to_asset_NOTAGKEY(self):
node = self.driver.list_nodes()[0]
DimensionDataMockHttp.type = "NOTAGKEY"
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_apply_tag_to_asset(node, "TagKeyNam")
def test_ex_apply_tag_to_asset_BADASSETTYPE(self):
network = self.driver.list_networks()[0]
DimensionDataMockHttp.type = "NOTAGKEY"
with self.assertRaises(TypeError):
self.driver.ex_apply_tag_to_asset(network, "TagKeyNam")
def test_ex_remove_tag_from_asset(self):
node = self.driver.list_nodes()[0]
success = self.driver.ex_remove_tag_from_asset(node, "TagKeyName")
self.assertTrue(success)
def test_ex_remove_tag_from_asset_NOTAG(self):
node = self.driver.list_nodes()[0]
DimensionDataMockHttp.type = "NOTAG"
with self.assertRaises(DimensionDataAPIException):
self.driver.ex_remove_tag_from_asset(node, "TagKeyNam")
def test_ex_list_tags(self):
tags = self.driver.ex_list_tags()
self.assertTrue(isinstance(tags, list))
self.assertTrue(isinstance(tags[0], DimensionDataTag))
self.assertTrue(len(tags) == 3)
def test_ex_list_tags_ALLPARAMS(self):
self.driver.connection._get_orgId()
DimensionDataMockHttp.type = "ALLPARAMS"
tags = self.driver.ex_list_tags(
asset_id="fake_asset_id",
asset_type="fake_asset_type",
location="fake_location",
tag_key_name="fake_tag_key_name",
tag_key_id="fake_tag_key_id",
value="fake_value",
value_required=False,
display_on_report=False,
)
self.assertTrue(isinstance(tags, list))
self.assertTrue(isinstance(tags[0], DimensionDataTag))
self.assertTrue(len(tags) == 3)
def test_priv_location_to_location_id(self):
location = self.driver.ex_get_location_by_id("NA9")
self.assertEqual(self.driver._location_to_location_id(location), "NA9")
def test_priv_location_to_location_id_STR(self):
self.assertEqual(self.driver._location_to_location_id("NA9"), "NA9")
def test_priv_location_to_location_id_TYPEERROR(self):
with self.assertRaises(TypeError):
self.driver._location_to_location_id([1, 2, 3])
def test_priv_image_needs_auth_os_img(self):
image = self.driver.list_images()[1]
self.assertTrue(self.driver._image_needs_auth(image))
def test_priv_image_needs_auth_os_img_STR(self):
image = self.driver.list_images()[1].id
self.assertTrue(self.driver._image_needs_auth(image))
def test_priv_image_needs_auth_cust_img_windows(self):
image = self.driver.ex_list_customer_images()[1]
self.assertTrue(self.driver._image_needs_auth(image))
def test_priv_image_needs_auth_cust_img_windows_STR(self):
image = self.driver.ex_list_customer_images()[1].id
self.assertTrue(self.driver._image_needs_auth(image))
def test_priv_image_needs_auth_cust_img_linux(self):
image = self.driver.ex_list_customer_images()[0]
self.assertTrue(not self.driver._image_needs_auth(image))
def test_priv_image_needs_auth_cust_img_linux_STR(self):
image = self.driver.ex_list_customer_images()[0].id
self.assertTrue(not self.driver._image_needs_auth(image))
def test_summary_usage_report(self):
report = self.driver.ex_summary_usage_report("2016-06-01", "2016-06-30")
report_content = report
self.assertEqual(len(report_content), 13)
self.assertEqual(len(report_content[0]), 6)
def test_detailed_usage_report(self):
report = self.driver.ex_detailed_usage_report("2016-06-01", "2016-06-30")
report_content = report
self.assertEqual(len(report_content), 42)
self.assertEqual(len(report_content[0]), 4)
def test_audit_log_report(self):
report = self.driver.ex_audit_log_report("2016-06-01", "2016-06-30")
report_content = report
self.assertEqual(len(report_content), 25)
self.assertEqual(report_content[2][2], "OEC_SYSTEM")
def test_ex_list_ip_address_list(self):
net_domain = self.driver.ex_list_network_domains()[0]
ip_list = self.driver.ex_list_ip_address_list(ex_network_domain=net_domain)
self.assertTrue(isinstance(ip_list, list))
self.assertEqual(len(ip_list), 4)
self.assertTrue(isinstance(ip_list[0].name, str))
self.assertTrue(isinstance(ip_list[0].description, str))
self.assertTrue(isinstance(ip_list[0].ip_version, str))
self.assertTrue(isinstance(ip_list[0].state, str))
self.assertTrue(isinstance(ip_list[0].create_time, str))
self.assertTrue(isinstance(ip_list[0].child_ip_address_lists, list))
self.assertEqual(len(ip_list[1].child_ip_address_lists), 1)
self.assertTrue(isinstance(ip_list[1].child_ip_address_lists[0].name, str))
def test_ex_get_ip_address_list(self):
net_domain = self.driver.ex_list_network_domains()[0]
DimensionDataMockHttp.type = "FILTERBYNAME"
ip_list = self.driver.ex_get_ip_address_list(
ex_network_domain=net_domain.id,
ex_ip_address_list_name="Test_IP_Address_List_3",
)
self.assertTrue(isinstance(ip_list, list))
self.assertEqual(len(ip_list), 1)
self.assertTrue(isinstance(ip_list[0].name, str))
self.assertTrue(isinstance(ip_list[0].description, str))
self.assertTrue(isinstance(ip_list[0].ip_version, str))
self.assertTrue(isinstance(ip_list[0].state, str))
self.assertTrue(isinstance(ip_list[0].create_time, str))
ips = ip_list[0].ip_address_collection
self.assertEqual(len(ips), 3)
self.assertTrue(isinstance(ips[0].begin, str))
self.assertTrue(isinstance(ips[0].prefix_size, str))
self.assertTrue(isinstance(ips[2].end, str))
def test_ex_create_ip_address_list_FAIL(self):
net_domain = self.driver.ex_list_network_domains()[0]
with self.assertRaises(TypeError):
self.driver.ex_create_ip_address_list(ex_network_domain=net_domain.id)
def test_ex_create_ip_address_list(self):
name = "Test_IP_Address_List_3"
description = "Test Description"
ip_version = "IPV4"
child_ip_address_list_id = "0291ef78-4059-4bc1-b433-3f6ad698dc41"
child_ip_address_list = DimensionDataChildIpAddressList(
id=child_ip_address_list_id, name="test_child_ip_addr_list"
)
net_domain = self.driver.ex_list_network_domains()[0]
ip_address_1 = DimensionDataIpAddress(begin="190.2.2.100")
ip_address_2 = DimensionDataIpAddress(begin="190.2.2.106", end="190.2.2.108")
ip_address_3 = DimensionDataIpAddress(begin="190.2.2.0", prefix_size="24")
ip_address_collection = [ip_address_1, ip_address_2, ip_address_3]
# Create IP Address List
success = self.driver.ex_create_ip_address_list(
ex_network_domain=net_domain,
name=name,
ip_version=ip_version,
description=description,
ip_address_collection=ip_address_collection,
child_ip_address_list=child_ip_address_list,
)
self.assertTrue(success)
def test_ex_create_ip_address_list_STR(self):
name = "Test_IP_Address_List_3"
description = "Test Description"
ip_version = "IPV4"
child_ip_address_list_id = "0291ef78-4059-4bc1-b433-3f6ad698dc41"
net_domain = self.driver.ex_list_network_domains()[0]
ip_address_1 = DimensionDataIpAddress(begin="190.2.2.100")
ip_address_2 = DimensionDataIpAddress(begin="190.2.2.106", end="190.2.2.108")
ip_address_3 = DimensionDataIpAddress(begin="190.2.2.0", prefix_size="24")
ip_address_collection = [ip_address_1, ip_address_2, ip_address_3]
# Create IP Address List
success = self.driver.ex_create_ip_address_list(
ex_network_domain=net_domain.id,
name=name,
ip_version=ip_version,
description=description,
ip_address_collection=ip_address_collection,
child_ip_address_list=child_ip_address_list_id,
)
self.assertTrue(success)
def test_ex_edit_ip_address_list(self):
ip_address_1 = DimensionDataIpAddress(begin="190.2.2.111")
ip_address_collection = [ip_address_1]
child_ip_address_list = DimensionDataChildIpAddressList(
id="2221ef78-4059-4bc1-b433-3f6ad698dc41",
name="test_child_ip_address_list edited",
)
ip_address_list = DimensionDataIpAddressList(
id="1111ef78-4059-4bc1-b433-3f6ad698d111",
name="test ip address list edited",
ip_version="IPv4",
description="test",
ip_address_collection=ip_address_collection,
child_ip_address_lists=child_ip_address_list,
state="NORMAL",
create_time="2015-09-29T02:49:45",
)
success = self.driver.ex_edit_ip_address_list(
ex_ip_address_list=ip_address_list,
description="test ip address list",
ip_address_collection=ip_address_collection,
child_ip_address_lists=child_ip_address_list,
)
self.assertTrue(success)
def test_ex_edit_ip_address_list_STR(self):
ip_address_1 = DimensionDataIpAddress(begin="190.2.2.111")
ip_address_collection = [ip_address_1]
child_ip_address_list = DimensionDataChildIpAddressList(
id="2221ef78-4059-4bc1-b433-3f6ad698dc41",
name="test_child_ip_address_list edited",
)
success = self.driver.ex_edit_ip_address_list(
ex_ip_address_list="84e34850-595d- 436e-a885-7cd37edb24a4",
description="test ip address list",
ip_address_collection=ip_address_collection,
child_ip_address_lists=child_ip_address_list,
)
self.assertTrue(success)
def test_ex_delete_ip_address_list(self):
child_ip_address_list = DimensionDataChildIpAddressList(
id="2221ef78-4059-4bc1-b433-3f6ad698dc41",
name="test_child_ip_address_list edited",
)
ip_address_list = DimensionDataIpAddressList(
id="1111ef78-4059-4bc1-b433-3f6ad698d111",
name="test ip address list edited",
ip_version="IPv4",
description="test",
ip_address_collection=None,
child_ip_address_lists=child_ip_address_list,
state="NORMAL",
create_time="2015-09-29T02:49:45",
)
success = self.driver.ex_delete_ip_address_list(ex_ip_address_list=ip_address_list)
self.assertTrue(success)
def test_ex_delete_ip_address_list_STR(self):
success = self.driver.ex_delete_ip_address_list(
ex_ip_address_list="111ef78-4059-4bc1-b433-3f6ad698d111"
)
self.assertTrue(success)
def test_ex_list_portlist(self):
net_domain = self.driver.ex_list_network_domains()[0]
portlist = self.driver.ex_list_portlist(ex_network_domain=net_domain)
self.assertTrue(isinstance(portlist, list))
self.assertEqual(len(portlist), 3)
self.assertTrue(isinstance(portlist[0].name, str))
self.assertTrue(isinstance(portlist[0].description, str))
self.assertTrue(isinstance(portlist[0].state, str))
self.assertTrue(isinstance(portlist[0].port_collection, list))
self.assertTrue(isinstance(portlist[0].port_collection[0].begin, str))
self.assertTrue(isinstance(portlist[0].port_collection[0].end, str))
self.assertTrue(isinstance(portlist[0].child_portlist_list, list))
self.assertTrue(isinstance(portlist[0].child_portlist_list[0].id, str))
self.assertTrue(isinstance(portlist[0].child_portlist_list[0].name, str))
self.assertTrue(isinstance(portlist[0].create_time, str))
def test_ex_get_port_list(self):
net_domain = self.driver.ex_list_network_domains()[0]
portlist_id = self.driver.ex_list_portlist(ex_network_domain=net_domain)[0].id
portlist = self.driver.ex_get_portlist(ex_portlist_id=portlist_id)
self.assertTrue(isinstance(portlist, DimensionDataPortList))
self.assertTrue(isinstance(portlist.name, str))
self.assertTrue(isinstance(portlist.description, str))
self.assertTrue(isinstance(portlist.state, str))
self.assertTrue(isinstance(portlist.port_collection, list))
self.assertTrue(isinstance(portlist.port_collection[0].begin, str))
self.assertTrue(isinstance(portlist.port_collection[0].end, str))
self.assertTrue(isinstance(portlist.child_portlist_list, list))
self.assertTrue(isinstance(portlist.child_portlist_list[0].id, str))
self.assertTrue(isinstance(portlist.child_portlist_list[0].name, str))
self.assertTrue(isinstance(portlist.create_time, str))
def test_ex_get_portlist_STR(self):
net_domain = self.driver.ex_list_network_domains()[0]
portlist = self.driver.ex_list_portlist(ex_network_domain=net_domain)[0]
port_list = self.driver.ex_get_portlist(ex_portlist_id=portlist.id)
self.assertTrue(isinstance(port_list, DimensionDataPortList))
self.assertTrue(isinstance(port_list.name, str))
self.assertTrue(isinstance(port_list.description, str))
self.assertTrue(isinstance(port_list.state, str))
self.assertTrue(isinstance(port_list.port_collection, list))
self.assertTrue(isinstance(port_list.port_collection[0].begin, str))
self.assertTrue(isinstance(port_list.port_collection[0].end, str))
self.assertTrue(isinstance(port_list.child_portlist_list, list))
self.assertTrue(isinstance(port_list.child_portlist_list[0].id, str))
self.assertTrue(isinstance(port_list.child_portlist_list[0].name, str))
self.assertTrue(isinstance(port_list.create_time, str))
def test_ex_create_portlist_NOCHILDPORTLIST(self):
name = "Test_Port_List"
description = "Test Description"
net_domain = self.driver.ex_list_network_domains()[0]
port_1 = DimensionDataPort(begin="8080")
port_2 = DimensionDataIpAddress(begin="8899", end="9023")
port_collection = [port_1, port_2]
# Create IP Address List
success = self.driver.ex_create_portlist(
ex_network_domain=net_domain,
name=name,
description=description,
port_collection=port_collection,
)
self.assertTrue(success)
def test_ex_create_portlist(self):
name = "Test_Port_List"
description = "Test Description"
net_domain = self.driver.ex_list_network_domains()[0]
port_1 = DimensionDataPort(begin="8080")
port_2 = DimensionDataIpAddress(begin="8899", end="9023")
port_collection = [port_1, port_2]
child_port_1 = DimensionDataChildPortList(
id="333174a2-ae74-4658-9e56-50fc90e086cf", name="test port 1"
)
child_port_2 = DimensionDataChildPortList(
id="311174a2-ae74-4658-9e56-50fc90e04444", name="test port 2"
)
child_ports = [child_port_1, child_port_2]
# Create IP Address List
success = self.driver.ex_create_portlist(
ex_network_domain=net_domain,
name=name,
description=description,
port_collection=port_collection,
child_portlist_list=child_ports,
)
self.assertTrue(success)
def test_ex_create_portlist_STR(self):
name = "Test_Port_List"
description = "Test Description"
net_domain = self.driver.ex_list_network_domains()[0]
port_1 = DimensionDataPort(begin="8080")
port_2 = DimensionDataIpAddress(begin="8899", end="9023")
port_collection = [port_1, port_2]
child_port_1 = DimensionDataChildPortList(
id="333174a2-ae74-4658-9e56-50fc90e086cf", name="test port 1"
)
child_port_2 = DimensionDataChildPortList(
id="311174a2-ae74-4658-9e56-50fc90e04444", name="test port 2"
)
child_ports_ids = [child_port_1.id, child_port_2.id]
# Create IP Address List
success = self.driver.ex_create_portlist(
ex_network_domain=net_domain.id,
name=name,
description=description,
port_collection=port_collection,
child_portlist_list=child_ports_ids,
)
self.assertTrue(success)
def test_ex_edit_portlist(self):
net_domain = self.driver.ex_list_network_domains()[0]
portlist = self.driver.ex_list_portlist(net_domain)[0]
description = "Test Description"
port_1 = DimensionDataPort(begin="8080")
port_2 = DimensionDataIpAddress(begin="8899", end="9023")
port_collection = [port_1, port_2]
child_port_1 = DimensionDataChildPortList(
id="333174a2-ae74-4658-9e56-50fc90e086cf", name="test port 1"
)
child_port_2 = DimensionDataChildPortList(
id="311174a2-ae74-4658-9e56-50fc90e04444", name="test port 2"
)
child_ports = [child_port_1.id, child_port_2.id]
# Create IP Address List
success = self.driver.ex_edit_portlist(
ex_portlist=portlist,
description=description,
port_collection=port_collection,
child_portlist_list=child_ports,
)
self.assertTrue(success)
def test_ex_edit_portlist_STR(self):
portlist_id = "484174a2-ae74-4658-9e56-50fc90e086cf"
description = "Test Description"
port_1 = DimensionDataPort(begin="8080")
port_2 = DimensionDataIpAddress(begin="8899", end="9023")
port_collection = [port_1, port_2]
child_port_1 = DimensionDataChildPortList(
id="333174a2-ae74-4658-9e56-50fc90e086cf", name="test port 1"
)
child_port_2 = DimensionDataChildPortList(
id="311174a2-ae74-4658-9e56-50fc90e04444", name="test port 2"
)
child_ports_ids = [child_port_1.id, child_port_2.id]
# Create IP Address List
success = self.driver.ex_edit_portlist(
ex_portlist=portlist_id,
description=description,
port_collection=port_collection,
child_portlist_list=child_ports_ids,
)
self.assertTrue(success)
def test_ex_delete_portlist(self):
net_domain = self.driver.ex_list_network_domains()[0]
portlist = self.driver.ex_list_portlist(net_domain)[0]
success = self.driver.ex_delete_portlist(ex_portlist=portlist)
self.assertTrue(success)
def test_ex_delete_portlist_STR(self):
net_domain = self.driver.ex_list_network_domains()[0]
portlist = self.driver.ex_list_portlist(net_domain)[0]
success = self.driver.ex_delete_portlist(ex_portlist=portlist.id)
self.assertTrue(success)
class InvalidRequestError(Exception):
def __init__(self, tag):
super().__init__("Invalid Request - %s" % tag)
class DimensionDataMockHttp(MockHttp):
fixtures = ComputeFileFixtures("dimensiondata")
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_report_usage(
self, method, url, body, headers
):
body = self.fixtures.load("summary_usage_report.csv")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_report_usageDetailed(
self, method, url, body, headers
):
body = self.fixtures.load("detailed_usage_report.csv")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_auditlog(self, method, url, body, headers):
body = self.fixtures.load("audit_log.csv")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_UNAUTHORIZED(self, method, url, body, headers):
return (httplib.UNAUTHORIZED, "", {}, httplib.responses[httplib.UNAUTHORIZED])
def _oec_0_9_myaccount(self, method, url, body, headers):
body = self.fixtures.load("oec_0_9_myaccount.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_INPROGRESS(self, method, url, body, headers):
body = self.fixtures.load("oec_0_9_myaccount.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_PAGINATED(self, method, url, body, headers):
body = self.fixtures.load("oec_0_9_myaccount.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_myaccount_ALLFILTERS(self, method, url, body, headers):
body = self.fixtures.load("oec_0_9_myaccount.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_base_image(self, method, url, body, headers):
body = self.fixtures.load("oec_0_9_base_image.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_base_imageWithDiskSpeed(self, method, url, body, headers):
body = self.fixtures.load("oec_0_9_base_imageWithDiskSpeed.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed(
self, method, url, body, headers
):
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployed.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy(
self, method, url, body, headers
):
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_pendingDeploy.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter(self, method, url, body, headers):
body = self.fixtures.load("oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_datacenter.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11(self, method, url, body, headers):
body = None
action = url.split("?")[-1]
if action == "restart":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart.xml"
)
elif action == "shutdown":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown.xml"
)
elif action == "delete":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete.xml"
)
elif action == "start":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start.xml"
)
elif action == "poweroff":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_INPROGRESS(
self, method, url, body, headers
):
body = None
action = url.split("?")[-1]
if action == "restart":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_restart_INPROGRESS.xml"
)
elif action == "shutdown":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_shutdown_INPROGRESS.xml"
)
elif action == "delete":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_delete_INPROGRESS.xml"
)
elif action == "start":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_start_INPROGRESS.xml"
)
elif action == "poweroff":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_11_poweroff_INPROGRESS.xml"
)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server(self, method, url, body, headers):
body = self.fixtures.load("_oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation(
self, method, url, body, headers
):
if method == "POST":
request = ET.fromstring(body)
if request.tag != "{http://oec.api.opsource.net/schemas/network}NewNetworkWithLocation":
raise InvalidRequestError(request.tag)
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation_NA9(
self, method, url, body, headers
):
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_networkWithLocation.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_4bba37be_506f_11e3_b29c_001517c4643e(
self, method, url, body, headers
):
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_4bba37be_506f_11e3_b29c_001517c4643e.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSize(
self, method, url, body, headers
):
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSize.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSpeed(
self, method, url, body, headers
):
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1_changeSpeed.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1(
self, method, url, body, headers
):
action = url.split("?")[-1]
if action == "delete":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_disk_1.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87(
self, method, url, body, headers
):
if method == "GET":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if method == "POST":
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_e75ead52_692f_4314_8725_c8a4f4d13a87_POST.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule(
self, method, url, body, headers
):
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_create.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_FAIL_EXISTING(
self, method, url, body, headers
):
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_create_FAIL.xml"
)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_07e3621a_a920_4a9a_943c_d8021f27f418(
self, method, url, body, headers
):
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_delete.xml"
)
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_07e3621a_a920_4a9a_943c_d8021f27f418_FAIL(
self, method, url, body, headers
):
body = self.fixtures.load(
"oec_0_9_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_antiAffinityRule_delete_FAIL.xml"
)
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server(self, method, url, body, headers):
body = self.fixtures.load("server.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_deleteServer.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deleteServer_INPROGRESS(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_deleteServer_RESOURCEBUSY.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}rebootServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_rebootServer.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_rebootServer_INPROGRESS(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}rebootServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_rebootServer_RESOURCEBUSY.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server(
self, method, url, body, headers
):
if url.endswith("datacenterId=NA3"):
body = self.fixtures.load("server_server_NA3.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
body = self.fixtures.load("server_server.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_PAGESIZE50(
self, method, url, body, headers
):
if not url.endswith("pageSize=50"):
raise ValueError("pageSize is not set as expected")
body = self.fixtures.load("server_server.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_EMPTY(
self, method, url, body, headers
):
body = self.fixtures.load("server_server_paginated_empty.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_PAGED_THEN_EMPTY(
self, method, url, body, headers
):
if "pageNumber=2" in url:
body = self.fixtures.load("server_server_paginated_empty.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
else:
body = self.fixtures.load("server_server_paginated.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_PAGINATED(
self, method, url, body, headers
):
if "pageNumber=2" in url:
body = self.fixtures.load("server_server.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
else:
body = self.fixtures.load("server_server_paginated.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_PAGINATEDEMPTY(
self, method, url, body, headers
):
body = self.fixtures.load("server_server_paginated_empty.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_ALLFILTERS(
self, method, url, body, headers
):
(_, params) = url.split("?")
parameters = params.split("&")
for parameter in parameters:
(key, value) = parameter.split("=")
if key == "datacenterId":
assert value == "fake_loc"
elif key == "networkId":
assert value == "fake_network"
elif key == "networkDomainId":
assert value == "fake_network_domain"
elif key == "vlanId":
assert value == "fake_vlan"
elif key == "ipv6":
assert value == "fake_ipv6"
elif key == "privateIpv4":
assert value == "fake_ipv4"
elif key == "name":
assert value == "fake_name"
elif key == "state":
assert value == "fake_state"
elif key == "started":
assert value == "True"
elif key == "deployed":
assert value == "True"
elif key == "sourceImageId":
assert value == "fake_image"
else:
raise ValueError("Could not find in url parameters {}:{}".format(key, value))
body = self.fixtures.load("server_server.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_antiAffinityRule(
self, method, url, body, headers
):
body = self.fixtures.load("server_antiAffinityRule_list.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_antiAffinityRule_ALLFILTERS(
self, method, url, body, headers
):
(_, params) = url.split("?")
parameters = params.split("&")
for parameter in parameters:
(key, value) = parameter.split("=")
if key == "id":
assert value == "FAKE_ID"
elif key == "state":
assert value == "FAKE_STATE"
elif key == "pageSize":
assert value == "250"
elif key == "networkDomainId":
pass
else:
raise ValueError("Could not find in url parameters {}:{}".format(key, value))
body = self.fixtures.load("server_antiAffinityRule_list.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_antiAffinityRule_PAGINATED(
self, method, url, body, headers
):
if "pageNumber=2" in url:
body = self.fixtures.load("server_antiAffinityRule_list.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
else:
body = self.fixtures.load("server_antiAffinityRule_list_PAGINATED.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_infrastructure_datacenter(
self, method, url, body, headers
):
if url.endswith("id=NA9"):
body = self.fixtures.load("infrastructure_datacenter_NA9.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
body = self.fixtures.load("infrastructure_datacenter.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_infrastructure_datacenter_ALLFILTERS(
self, method, url, body, headers
):
if url.endswith("id=NA9"):
body = self.fixtures.load("infrastructure_datacenter_NA9.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
body = self.fixtures.load("infrastructure_datacenter.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_updateVmwareTools(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}updateVmwareTools":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_updateVmwareTools.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}startServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_startServer.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_startServer_INPROGRESS(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}startServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_startServer_INPROGRESS.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}shutdownServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_shutdownServer.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_shutdownServer_INPROGRESS(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}shutdownServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_shutdownServer_INPROGRESS.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_resetServer(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}resetServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_resetServer.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}powerOffServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_powerOffServer.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_powerOffServer_INPROGRESS(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}powerOffServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_powerOffServer_INPROGRESS.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_11_INPROGRESS(
self, method, url, body, headers
):
body = self.fixtures.load("server_GetServer.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain(
self, method, url, body, headers
):
body = self.fixtures.load("network_networkDomain.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain_ALLFILTERS(
self, method, url, body, headers
):
(_, params) = url.split("?")
parameters = params.split("&")
for parameter in parameters:
(key, value) = parameter.split("=")
if key == "datacenterId":
assert value == "fake_location"
elif key == "type":
assert value == "fake_plan"
elif key == "name":
assert value == "fake_name"
elif key == "state":
assert value == "fake_state"
else:
raise ValueError("Could not find in url parameters {}:{}".format(key, value))
body = self.fixtures.load("network_networkDomain.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_vlan(
self, method, url, body, headers
):
body = self.fixtures.load("network_vlan.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_vlan_ALLFILTERS(
self, method, url, body, headers
):
(_, params) = url.split("?")
parameters = params.split("&")
for parameter in parameters:
(key, value) = parameter.split("=")
if key == "datacenterId":
assert value == "fake_location"
elif key == "networkDomainId":
assert value == "fake_network_domain"
elif key == "ipv6Address":
assert value == "fake_ipv6"
elif key == "privateIpv4Address":
assert value == "fake_ipv4"
elif key == "name":
assert value == "fake_name"
elif key == "state":
assert value == "fake_state"
else:
raise ValueError("Could not find in url parameters {}:{}".format(key, value))
body = self.fixtures.load("network_vlan.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_deployServer(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deployServer":
raise InvalidRequestError(request.tag)
# Make sure the we either have a network tag with an IP or networkId
# Or Network info with a primary nic that has privateip or vlanid
network = request.find(fixxpath("network", TYPES_URN))
network_info = request.find(fixxpath("networkInfo", TYPES_URN))
if network is not None:
if network_info is not None:
raise InvalidRequestError("Request has both MCP1 and MCP2 values")
ipv4 = findtext(network, "privateIpv4", TYPES_URN)
networkId = findtext(network, "networkId", TYPES_URN)
if ipv4 is None and networkId is None:
raise InvalidRequestError(
"Invalid request MCP1 requests need privateIpv4 or networkId"
)
elif network_info is not None:
if network is not None:
raise InvalidRequestError("Request has both MCP1 and MCP2 values")
primary_nic = network_info.find(fixxpath("primaryNic", TYPES_URN))
ipv4 = findtext(primary_nic, "privateIpv4", TYPES_URN)
vlanId = findtext(primary_nic, "vlanId", TYPES_URN)
if ipv4 is None and vlanId is None:
raise InvalidRequestError(
"Invalid request MCP2 requests need privateIpv4 or vlanId"
)
else:
raise InvalidRequestError(
"Invalid request, does not have network or network_info in XML"
)
body = self.fixtures.load("server_deployServer.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_server_e75ead52_692f_4314_8725_c8a4f4d13a87(
self, method, url, body, headers
):
body = self.fixtures.load("server_server_e75ead52_692f_4314_8725_c8a4f4d13a87.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deployNetworkDomain(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deployNetworkDomain":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_deployNetworkDomain.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain_8cdfd607_f429_4df6_9352_162cfc0891be(
self, method, url, body, headers
):
body = self.fixtures.load("network_networkDomain_8cdfd607_f429_4df6_9352_162cfc0891be.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_networkDomain_8cdfd607_f429_4df6_9352_162cfc0891be_ALLFILTERS(
self, method, url, body, headers
):
body = self.fixtures.load("network_networkDomain_8cdfd607_f429_4df6_9352_162cfc0891be.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editNetworkDomain(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}editNetworkDomain":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_editNetworkDomain.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteNetworkDomain(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteNetworkDomain":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_deleteNetworkDomain.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deployVlan(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deployVlan":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_deployVlan.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_vlan_0e56433f_d808_4669_821d_812769517ff8(
self, method, url, body, headers
):
body = self.fixtures.load("network_vlan_0e56433f_d808_4669_821d_812769517ff8.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editVlan(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}editVlan":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_editVlan.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteVlan(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteVlan":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_deleteVlan.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_expandVlan(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}expandVlan":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_expandVlan.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_addPublicIpBlock(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}addPublicIpBlock":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_addPublicIpBlock.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_publicIpBlock_4487241a_f0ca_11e3_9315_d4bed9b167ba(
self, method, url, body, headers
):
body = self.fixtures.load("network_publicIpBlock_4487241a_f0ca_11e3_9315_d4bed9b167ba.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_publicIpBlock(
self, method, url, body, headers
):
body = self.fixtures.load("network_publicIpBlock.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_publicIpBlock_9945dc4a_bdce_11e4_8c14_b8ca3a5d9ef8(
self, method, url, body, headers
):
body = self.fixtures.load("network_publicIpBlock_9945dc4a_bdce_11e4_8c14_b8ca3a5d9ef8.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_removePublicIpBlock(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}removePublicIpBlock":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_removePublicIpBlock.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_firewallRule(
self, method, url, body, headers
):
body = self.fixtures.load("network_firewallRule.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_createFirewallRule(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}createFirewallRule":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_createFirewallRule.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_firewallRule_d0a20f59_77b9_4f28_a63b_e58496b73a6c(
self, method, url, body, headers
):
body = self.fixtures.load("network_firewallRule_d0a20f59_77b9_4f28_a63b_e58496b73a6c.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editFirewallRule(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}editFirewallRule":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_editFirewallRule.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteFirewallRule(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteFirewallRule":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_deleteFirewallRule.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_createNatRule(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}createNatRule":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_createNatRule.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_natRule(
self, method, url, body, headers
):
body = self.fixtures.load("network_natRule.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_natRule_2187a636_7ebb_49a1_a2ff_5d617f496dce(
self, method, url, body, headers
):
body = self.fixtures.load("network_natRule_2187a636_7ebb_49a1_a2ff_5d617f496dce.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteNatRule(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteNatRule":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("network_deleteNatRule.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_addNic(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}addNic":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_addNic.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_removeNic(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}removeNic":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_removeNic.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_disableServerMonitoring(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}disableServerMonitoring":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_disableServerMonitoring.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_enableServerMonitoring(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}enableServerMonitoring":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_enableServerMonitoring.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_changeServerMonitoringPlan(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}changeServerMonitoringPlan":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_changeServerMonitoringPlan.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage(
self, method, url, body, headers
):
body = self.fixtures.load("image_osImage.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage_c14b1a46_2428_44c1_9c1a_b20e6418d08c(
self, method, url, body, headers
):
body = self.fixtures.load("image_osImage_c14b1a46_2428_44c1_9c1a_b20e6418d08c.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage_6b4fb0c7_a57b_4f58_b59c_9958f94f971a(
self, method, url, body, headers
):
body = self.fixtures.load("image_osImage_6b4fb0c7_a57b_4f58_b59c_9958f94f971a.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage_5234e5c7_01de_4411_8b6e_baeb8d91cf5d(
self, method, url, body, headers
):
body = self.fixtures.load("image_osImage_BAD_REQUEST.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage_2ffa36c8_1848_49eb_b4fa_9d908775f68c(
self, method, url, body, headers
):
body = self.fixtures.load("image_osImage_BAD_REQUEST.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_osImage_FAKE_IMAGE_ID(
self, method, url, body, headers
):
body = self.fixtures.load("image_osImage_BAD_REQUEST.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_customerImage(
self, method, url, body, headers
):
body = self.fixtures.load("image_customerImage.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_customerImage_5234e5c7_01de_4411_8b6e_baeb8d91cf5d(
self, method, url, body, headers
):
body = self.fixtures.load("image_customerImage_5234e5c7_01de_4411_8b6e_baeb8d91cf5d.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_customerImage_2ffa36c8_1848_49eb_b4fa_9d908775f68c(
self, method, url, body, headers
):
body = self.fixtures.load("image_customerImage_2ffa36c8_1848_49eb_b4fa_9d908775f68c.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_image_customerImage_FAKE_IMAGE_ID(
self, method, url, body, headers
):
body = self.fixtures.load("image_customerImage_BAD_REQUEST.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_reconfigureServer(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}reconfigureServer":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("server_reconfigureServer.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_cleanServer(
self, method, url, body, headers
):
body = self.fixtures.load("server_cleanServer.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_addDisk(
self, method, url, body, headers
):
body = self.fixtures.load("server_addDisk.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_server_removeDisk(
self, method, url, body, headers
):
body = self.fixtures.load("server_removeDisk.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_createTagKey(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}createTagKey":
raise InvalidRequestError(request.tag)
name = findtext(request, "name", TYPES_URN)
description = findtext(request, "description", TYPES_URN)
value_required = findtext(request, "valueRequired", TYPES_URN)
display_on_report = findtext(request, "displayOnReport", TYPES_URN)
if name is None:
raise ValueError("Name must have a value in the request")
if description is not None:
raise ValueError("Default description for a tag should be blank")
if value_required is None or value_required != "true":
raise ValueError("Default valueRequired should be true")
if display_on_report is None or display_on_report != "true":
raise ValueError("Default displayOnReport should be true")
body = self.fixtures.load("tag_createTagKey.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_createTagKey_ALLPARAMS(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}createTagKey":
raise InvalidRequestError(request.tag)
name = findtext(request, "name", TYPES_URN)
description = findtext(request, "description", TYPES_URN)
value_required = findtext(request, "valueRequired", TYPES_URN)
display_on_report = findtext(request, "displayOnReport", TYPES_URN)
if name is None:
raise ValueError("Name must have a value in the request")
if description is None:
raise ValueError("Description should have a value")
if value_required is None or value_required != "false":
raise ValueError("valueRequired should be false")
if display_on_report is None or display_on_report != "false":
raise ValueError("displayOnReport should be false")
body = self.fixtures.load("tag_createTagKey.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_createTagKey_BADREQUEST(
self, method, url, body, headers
):
body = self.fixtures.load("tag_createTagKey_BADREQUEST.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tagKey(self, method, url, body, headers):
body = self.fixtures.load("tag_tagKey_list.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tagKey_SINGLE(
self, method, url, body, headers
):
body = self.fixtures.load("tag_tagKey_list_SINGLE.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tagKey_ALLFILTERS(
self, method, url, body, headers
):
(_, params) = url.split("?")
parameters = params.split("&")
for parameter in parameters:
(key, value) = parameter.split("=")
if key == "id":
assert value == "fake_id"
elif key == "name":
assert value == "fake_name"
elif key == "valueRequired":
assert value == "false"
elif key == "displayOnReport":
assert value == "false"
elif key == "pageSize":
assert value == "250"
else:
raise ValueError("Could not find in url parameters {}:{}".format(key, value))
body = self.fixtures.load("tag_tagKey_list.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tagKey_d047c609_93d7_4bc5_8fc9_732c85840075(
self, method, url, body, headers
):
body = self.fixtures.load("tag_tagKey_5ab77f5f_5aa9_426f_8459_4eab34e03d54.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tagKey_d047c609_93d7_4bc5_8fc9_732c85840075_NOEXIST(
self, method, url, body, headers
):
body = self.fixtures.load("tag_tagKey_5ab77f5f_5aa9_426f_8459_4eab34e03d54_BADREQUEST.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_editTagKey_NAME(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}editTagKey":
raise InvalidRequestError(request.tag)
name = findtext(request, "name", TYPES_URN)
description = findtext(request, "description", TYPES_URN)
value_required = findtext(request, "valueRequired", TYPES_URN)
display_on_report = findtext(request, "displayOnReport", TYPES_URN)
if name is None:
raise ValueError("Name must have a value in the request")
if description is not None:
raise ValueError("Description should be empty")
if value_required is not None:
raise ValueError("valueRequired should be empty")
if display_on_report is not None:
raise ValueError("displayOnReport should be empty")
body = self.fixtures.load("tag_editTagKey.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_editTagKey_NOTNAME(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}editTagKey":
raise InvalidRequestError(request.tag)
name = findtext(request, "name", TYPES_URN)
description = findtext(request, "description", TYPES_URN)
value_required = findtext(request, "valueRequired", TYPES_URN)
display_on_report = findtext(request, "displayOnReport", TYPES_URN)
if name is not None:
raise ValueError("Name should be empty")
if description is None:
raise ValueError("Description should not be empty")
if value_required is None:
raise ValueError("valueRequired should not be empty")
if display_on_report is None:
raise ValueError("displayOnReport should not be empty")
body = self.fixtures.load("tag_editTagKey.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_editTagKey_NOCHANGE(
self, method, url, body, headers
):
body = self.fixtures.load("tag_editTagKey_BADREQUEST.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_deleteTagKey(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}deleteTagKey":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("tag_deleteTagKey.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_deleteTagKey_NOEXIST(
self, method, url, body, headers
):
body = self.fixtures.load("tag_deleteTagKey_BADREQUEST.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_applyTags(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}applyTags":
raise InvalidRequestError(request.tag)
asset_type = findtext(request, "assetType", TYPES_URN)
asset_id = findtext(request, "assetId", TYPES_URN)
tag = request.find(fixxpath("tag", TYPES_URN))
tag_key_name = findtext(tag, "tagKeyName", TYPES_URN)
value = findtext(tag, "value", TYPES_URN)
if asset_type is None:
raise ValueError("assetType should not be empty")
if asset_id is None:
raise ValueError("assetId should not be empty")
if tag_key_name is None:
raise ValueError("tagKeyName should not be empty")
if value is None:
raise ValueError("value should not be empty")
body = self.fixtures.load("tag_applyTags.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_applyTags_NOVALUE(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}applyTags":
raise InvalidRequestError(request.tag)
asset_type = findtext(request, "assetType", TYPES_URN)
asset_id = findtext(request, "assetId", TYPES_URN)
tag = request.find(fixxpath("tag", TYPES_URN))
tag_key_name = findtext(tag, "tagKeyName", TYPES_URN)
value = findtext(tag, "value", TYPES_URN)
if asset_type is None:
raise ValueError("assetType should not be empty")
if asset_id is None:
raise ValueError("assetId should not be empty")
if tag_key_name is None:
raise ValueError("tagKeyName should not be empty")
if value is not None:
raise ValueError("value should be empty")
body = self.fixtures.load("tag_applyTags.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_applyTags_NOTAGKEY(
self, method, url, body, headers
):
body = self.fixtures.load("tag_applyTags_BADREQUEST.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_removeTags(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}removeTags":
raise InvalidRequestError(request.tag)
body = self.fixtures.load("tag_removeTag.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_removeTags_NOTAG(
self, method, url, body, headers
):
body = self.fixtures.load("tag_removeTag_BADREQUEST.xml")
return (httplib.BAD_REQUEST, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tag(self, method, url, body, headers):
body = self.fixtures.load("tag_tag_list.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_tag_tag_ALLPARAMS(
self, method, url, body, headers
):
(_, params) = url.split("?")
parameters = params.split("&")
for parameter in parameters:
(key, value) = parameter.split("=")
if key == "assetId":
assert value == "fake_asset_id"
elif key == "assetType":
assert value == "fake_asset_type"
elif key == "valueRequired":
assert value == "false"
elif key == "displayOnReport":
assert value == "false"
elif key == "pageSize":
assert value == "250"
elif key == "datacenterId":
assert value == "fake_location"
elif key == "value":
assert value == "fake_value"
elif key == "tagKeyName":
assert value == "fake_tag_key_name"
elif key == "tagKeyId":
assert value == "fake_tag_key_id"
else:
raise ValueError("Could not find in url parameters {}:{}".format(key, value))
body = self.fixtures.load("tag_tag_list.xml")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_ipAddressList(
self, method, url, body, headers
):
body = self.fixtures.load("ip_address_lists.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_ipAddressList_FILTERBYNAME(
self, method, url, body, headers
):
body = self.fixtures.load("ip_address_lists_FILTERBYNAME.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_createIpAddressList(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}" "createIpAddressList":
raise InvalidRequestError(request.tag)
net_domain = findtext(request, "networkDomainId", TYPES_URN)
if net_domain is None:
raise ValueError("Network Domain should not be empty")
name = findtext(request, "name", TYPES_URN)
if name is None:
raise ValueError("Name should not be empty")
ip_version = findtext(request, "ipVersion", TYPES_URN)
if ip_version is None:
raise ValueError("IP Version should not be empty")
ip_address_col_required = findall(request, "ipAddress", TYPES_URN)
child_ip_address_required = findall(request, "childIpAddressListId", TYPES_URN)
if 0 == len(ip_address_col_required) and 0 == len(child_ip_address_required):
raise ValueError(
"At least one ipAddress element or "
"one childIpAddressListId element must be "
"provided."
)
if ip_address_col_required[0].get("begin") is None:
raise ValueError("IP Address should not be empty")
body = self.fixtures.load("ip_address_list_create.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editIpAddressList(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}" "editIpAddressList":
raise InvalidRequestError(request.tag)
ip_address_list = request.get("id")
if ip_address_list is None:
raise ValueError("IpAddressList ID should not be empty")
name = findtext(request, "name", TYPES_URN)
if name is not None:
raise ValueError("Name should not exists in request")
ip_version = findtext(request, "ipVersion", TYPES_URN)
if ip_version is not None:
raise ValueError("IP Version should not exists in request")
ip_address_col_required = findall(request, "ipAddress", TYPES_URN)
child_ip_address_required = findall(request, "childIpAddressListId", TYPES_URN)
if 0 == len(ip_address_col_required) and 0 == len(child_ip_address_required):
raise ValueError(
"At least one ipAddress element or "
"one childIpAddressListId element must be "
"provided."
)
if ip_address_col_required[0].get("begin") is None:
raise ValueError("IP Address should not be empty")
body = self.fixtures.load("ip_address_list_edit.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deleteIpAddressList(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}" "deleteIpAddressList":
raise InvalidRequestError(request.tag)
ip_address_list = request.get("id")
if ip_address_list is None:
raise ValueError("IpAddressList ID should not be empty")
body = self.fixtures.load("ip_address_list_delete.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_portList(
self, method, url, body, headers
):
body = self.fixtures.load("port_list_lists.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_portList_c8c92ea3_2da8_4d51_8153_f39bec794d69(
self, method, url, body, headers
):
body = self.fixtures.load("port_list_get.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_createPortList(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}" "createPortList":
raise InvalidRequestError(request.tag)
net_domain = findtext(request, "networkDomainId", TYPES_URN)
if net_domain is None:
raise ValueError("Network Domain should not be empty")
ports_required = findall(request, "port", TYPES_URN)
child_port_list_required = findall(request, "childPortListId", TYPES_URN)
if 0 == len(ports_required) and 0 == len(child_port_list_required):
raise ValueError(
"At least one port element or one " "childPortListId element must be provided"
)
if ports_required[0].get("begin") is None:
raise ValueError("PORT begin value should not be empty")
body = self.fixtures.load("port_list_create.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_editPortList(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}" "editPortList":
raise InvalidRequestError(request.tag)
ports_required = findall(request, "port", TYPES_URN)
child_port_list_required = findall(request, "childPortListId", TYPES_URN)
if 0 == len(ports_required) and 0 == len(child_port_list_required):
raise ValueError(
"At least one port element or one " "childPortListId element must be provided"
)
if ports_required[0].get("begin") is None:
raise ValueError("PORT begin value should not be empty")
body = self.fixtures.load("port_list_edit.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
def _caas_2_3_8a8f6abc_2745_4d8a_9cbc_8dabe5a7d0e4_network_deletePortList(
self, method, url, body, headers
):
request = ET.fromstring(body)
if request.tag != "{urn:didata.com:api:cloud:types}" "deletePortList":
raise InvalidRequestError(request.tag)
port_list = request.get("id")
if port_list is None:
raise ValueError("Port List ID should not be empty")
body = self.fixtures.load("ip_address_list_delete.xml")
return httplib.OK, body, {}, httplib.responses[httplib.OK]
if __name__ == "__main__":
sys.exit(unittest.main())
| {
"content_hash": "af6527facc7a4e9a45c9c61ce55acfeb",
"timestamp": "",
"source": "github",
"line_count": 3524,
"max_line_length": 125,
"avg_line_length": 43.4199772985244,
"alnum_prop": 0.6300486236373618,
"repo_name": "apache/libcloud",
"id": "c0befbeb1603b0176405eb16296a56c29e3c0160",
"size": "153794",
"binary": false,
"copies": "2",
"ref": "refs/heads/trunk",
"path": "libcloud/test/compute/test_dimensiondata_v2_3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2155"
},
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "9105547"
},
{
"name": "Shell",
"bytes": "12994"
}
],
"symlink_target": ""
} |
import sys
import os
import codecs
incode="utf-8"
outcode="utf-8"
outfile=None
if len(sys.argv) > 3:
outfile=sys.argv[3]
if len(sys.argv) > 2:
incode=sys.argv[2]
if len(sys.argv) > 4:
outcode=sys.argv[4]
if len(sys.argv) < 2:
raise Exception("usage: convertToUtf8.py infile [incode] [outfile] [outcode]")
f = codecs.open(sys.argv[1], encoding=incode)
fout=None
if outfile is not None:
fout=codecs.open(outfile,"w",encoding=outcode)
for line in f:
if fout is not None:
fout.write(line)
else:
print(line)
| {
"content_hash": "8994231cddb020128343bdace758755c",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 80,
"avg_line_length": 19.51851851851852,
"alnum_prop": 0.6907020872865275,
"repo_name": "wellenvogel/avnav",
"id": "cb583335406a62fe780620d8c4137743ae554053",
"size": "551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/convertToUtf8.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1132"
},
{
"name": "C",
"bytes": "110"
},
{
"name": "C#",
"bytes": "42879"
},
{
"name": "CSS",
"bytes": "2568"
},
{
"name": "Groovy",
"bytes": "33083"
},
{
"name": "HTML",
"bytes": "51569"
},
{
"name": "Inno Setup",
"bytes": "2303"
},
{
"name": "Java",
"bytes": "1074366"
},
{
"name": "JavaScript",
"bytes": "1324088"
},
{
"name": "Less",
"bytes": "57801"
},
{
"name": "PHP",
"bytes": "4423"
},
{
"name": "Perl",
"bytes": "1707"
},
{
"name": "PowerShell",
"bytes": "7995"
},
{
"name": "Python",
"bytes": "1193211"
},
{
"name": "Rich Text Format",
"bytes": "105"
},
{
"name": "Shell",
"bytes": "45976"
}
],
"symlink_target": ""
} |
import threading
import os
from enum import Enum
from PyQt5.Qt import QApplication, QSettings
from .fslogger import FSLogger
class FSExtensionType(Enum):
TYPE_EXT = 0
TYPE_FILE = 1
TYPE_MOVIE = 2
TYPE_MUSIC = 3
TYPE_IMAGE = 4
class FSApp(object):
_INST_LOCK = threading.Lock()
_INSTANCE = None
ExtensionSettings = {FSExtensionType.TYPE_MOVIE: "Movie_extensions",
FSExtensionType.TYPE_MUSIC: "Music_extensions",
FSExtensionType.TYPE_IMAGE: "Image_extensions"}
@classmethod
def get_instance(cls):
""" Method for getting the only instance """
if cls._INSTANCE is None:
with cls._INST_LOCK:
if cls._INSTANCE is None:
cls._INSTANCE = FSApp()
assert cls._INSTANCE is not None
return cls._INSTANCE
def __new__(cls, *args, **kwargs):
""" To make sure there will be only one instance """
if not isinstance(cls._INSTANCE, cls):
cls._INSTANCE = object.__new__(cls, *args, **kwargs)
return cls._INSTANCE
def __init__(self):
self._logger = FSLogger.get_instance()
self._settings_file = os.path.join("./", "config.ini")
self._logger.info("Settings file: %s", self._settings_file)
def save_setting(self, name, value):
self._logger.info("Save setting %s -> %s", name, str(value))
settings = QSettings(self._settings_file, QSettings.NativeFormat)
settings.setValue(name, value)
def load_setting(self, name):
settings = QSettings(self._settings_file, QSettings.NativeFormat)
value = settings.value(name, "")
self._logger.info("Load setting %s -> %s", name, str(value))
return value
def extension_types(self):
return self.ExtensionSettings.keys()
def extension_setting_keys(self):
return self.ExtensionSettings.values()
| {
"content_hash": "7c19489a15208e46bb672d367c3f6f04",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 73,
"avg_line_length": 31.80327868852459,
"alnum_prop": 0.6118556701030928,
"repo_name": "gnyiri/filesystem-analyzer",
"id": "2b91eb0ae5d5119359a1b8cbcdacddfcc9aa0b64",
"size": "1940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/fsapp.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "135"
},
{
"name": "Python",
"bytes": "29130"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
from grako.util import simplify_list, eval_escapes, warning
from grako.util import re, RE_FLAGS
from grako import grammars
from grako.exceptions import FailedSemantics
from grako.model import ModelBuilderSemantics
class GrakoASTSemantics(object):
def group(self, ast, *args):
return simplify_list(ast)
def element(self, ast, *args):
return simplify_list(ast)
def sequence(self, ast, *args):
return simplify_list(ast)
def choice(self, ast, *args):
if len(ast) == 1:
return simplify_list(ast[0])
return ast
class GrakoSemantics(ModelBuilderSemantics):
def __init__(self, grammar_name):
super(GrakoSemantics, self).__init__(
baseType=grammars.Model,
types=grammars.Model.classes()
)
self.grammar_name = grammar_name
self.rules = OrderedDict()
def token(self, ast, *args):
token = eval_escapes(ast)
if not token:
raise FailedSemantics('empty token')
return grammars.Token(token)
def pattern(self, ast, *args):
pattern = ast
try:
re.compile(pattern, RE_FLAGS)
except (TypeError, re.error) as e:
raise FailedSemantics('regexp error: ' + str(e))
return grammars.Pattern(pattern)
def hext(self, ast):
return int(ast, 16)
def float(self, ast):
return float(ast)
def int(self, ast):
return int(ast)
def cut_deprecated(self, ast, *args):
warning('The use of >> for cut is deprecated. Use the ~ symbol instead.')
return grammars.Cut()
def override_single_deprecated(self, ast, *args):
warning('The use of @ for override is deprecated. Use @: instead')
return grammars.Override(ast)
def sequence(self, ast, *args):
seq = ast.sequence
assert isinstance(seq, list), str(seq)
if len(seq) == 1:
return seq[0]
return grammars.Sequence(ast)
def choice(self, ast, *args):
if len(ast) == 1:
return ast[0]
return grammars.Choice(ast)
def new_name(self, name):
if name in self.rules:
raise FailedSemantics('rule "%s" already defined' % str(name))
return name
def known_name(self, name):
if name not in self.rules:
raise FailedSemantics('rule "%s" not yet defined' % str(name))
return name
def boolean(self, ast):
return eval(ast)
def rule(self, ast, *args):
decorators = ast.decorators
name = ast.name
exp = ast.exp
base = ast.base
params = ast.params
kwparams = OrderedDict(ast.kwparams) if ast.kwparams else None
if 'override' not in decorators and name in self.rules:
self.new_name(name)
elif 'override' in decorators:
self.known_name(name)
if not base:
rule = grammars.Rule(ast, name, exp, params, kwparams, decorators=decorators)
else:
self.known_name(base)
base_rule = self.rules[base]
rule = grammars.BasedRule(ast, name, exp, base_rule, params, kwparams, decorators=decorators)
self.rules[name] = rule
return rule
def rule_include(self, ast, *args):
name = str(ast)
self.known_name(name)
rule = self.rules[name]
return grammars.RuleInclude(rule)
def grammar(self, ast, *args):
directives = OrderedDict((d.name, d.value) for d in ast.directives)
keywords = set(ast.keywords or [])
return grammars.Grammar(
self.grammar_name,
list(self.rules.values()),
directives=directives,
keywords=keywords
)
| {
"content_hash": "ab6756e1d49ba4fc9e0a8b4e927ec475",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 105,
"avg_line_length": 29.386363636363637,
"alnum_prop": 0.6004124774426398,
"repo_name": "vmuriart/grako",
"id": "2fec3a280cbd0b9d0615276e92971bec27809be4",
"size": "3879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grako/semantics.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "20828"
},
{
"name": "Makefile",
"bytes": "1336"
},
{
"name": "Python",
"bytes": "199771"
},
{
"name": "VimL",
"bytes": "2954"
}
],
"symlink_target": ""
} |
from models import *
from django.contrib import admin
admin.site.register(Package)
| {
"content_hash": "601df521ad9cc1adbb3bccab60ebf7e7",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 32,
"avg_line_length": 21,
"alnum_prop": 0.8095238095238095,
"repo_name": "proffalken/edison",
"id": "e238ffa7789150f10faac4e571440d91505cd14e",
"size": "314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "auditorium/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "75313"
},
{
"name": "Python",
"bytes": "165001"
},
{
"name": "Ruby",
"bytes": "950"
}
],
"symlink_target": ""
} |
num = int(input("Give me a number: "))
listNum = range(1, num + 1)
divisorNum = []
for number in listNum:
if num % number == 0:
divisorNum.append(number)
print(divisorNum) | {
"content_hash": "0994f7cd05d3e41c34fce49c13ba549f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 38,
"avg_line_length": 20.555555555555557,
"alnum_prop": 0.6378378378378379,
"repo_name": "marcmaceira/Python-Exercises",
"id": "c1e57266ca9c3170b1f12307856e7d7a546f3389",
"size": "185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "04-Divisors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6057"
}
],
"symlink_target": ""
} |
import unittest
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from git import GitTestCase
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(GitTestCase))
os.popen(os.path.join(os.path.dirname(__file__), "createrepo.sh"))
os.chdir(os.path.join(os.path.dirname(__file__), "repo"))
unittest.TextTestRunner().run(suite)
os.popen(os.path.join(os.path.dirname(__file__), "destroyrepo.sh"))
| {
"content_hash": "14bc9d6f31bf37afb455eae30e9771d8",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 67,
"avg_line_length": 31.214285714285715,
"alnum_prop": 0.7276887871853547,
"repo_name": "georgebrock/git-browse",
"id": "2f657d46a41c600d5f7201d098ef4b0ceeafa7b4",
"size": "437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35264"
},
{
"name": "Shell",
"bytes": "700"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from djview import views
urlpatterns = [
url(r'^$', views.djview_index, name='djview_index'),
url(r'^about/', views.djview_about, name='djview_about'),
url(r'^add_category/', views.add_category, name='add_category'),
url(r'^category/(?P<category_name_slug>[\w\-]+)/$', views.category,
name='category'),
url(r'^category/(?P<category_name_slug>[\w\-]+)/add_page/$',
views.add_page,
name='add_page'),
url(r'^restricted/$', views.restricted, name='restricted'),
# url(r'^register/$', views.register, name='register'),
# url(r'^login/$', views.user_login, name='user_login'),
# url(r'^logout/$', views.user_logout, name="user_logout")
]
| {
"content_hash": "88b352c30f685bc6d1d86923416f5912",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 71,
"avg_line_length": 42.705882352941174,
"alnum_prop": 0.6198347107438017,
"repo_name": "rklabs/djaaks",
"id": "f74c9c9983e5d07467273f00e60a4689ab4f99c6",
"size": "726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djview/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "11366"
},
{
"name": "Python",
"bytes": "18966"
}
],
"symlink_target": ""
} |
import six
import io
import unittest
import tempfile
import os
import sys
import fileinput
from six import StringIO
import tse.main
class _TestBase(unittest.TestCase):
def _getParser(self):
return tse.main.getargparser()
def setUp(self):
self.testfile = None
self.testfilename = None
def tearDown(self):
if self.testfile:
self.testfile.close()
if self.testfilename:
os.unlink(self.testfilename)
# hack to run test multiple times
fileinput._state = None
sys.stdin = sys.__stdin__
sys.stdout = sys.__stdout__
def _run(self, args, input, enc=None):
if not enc:
enc = sys.getfilesystemencoding()
args = self._getParser().parse_args(args)
fd, self.testfilename = tempfile.mkstemp()
self.testfile = io.open(fd, 'w', encoding=enc)
self.testfile.write(input)
self.testfile.flush()
env = tse.main.Env(args.execute, args.statement, args.begin, args.end,
args.input_encoding, args.output_encoding, args.module,
args.module_star, args.script_file, args.inplace, args.ignore_case,
args.field_separator, [self.testfilename])
return tse.main.run(env)
class TestArgs(_TestBase):
def testStatement(self):
result = self._getParser().parse_args(
["--statement", "arg1", "arg2",
"--statement", "arg3", "arg4", "arg5"])
self.assertEqual(result.statement,
[('statement', ["arg1", "arg2"]),
('statement', ["arg3", "arg4", "arg5"])])
def testStatementError(self):
self.assertRaises(SystemExit, self._getParser().parse_args,
["--statement"])
def testPatternAction(self):
result = self._getParser().parse_args(
["--pattern", "arg1",
"--action", "arg2", "--action", "arg3"])
self.assertEqual(result.statement,
[('pattern', "arg1"),
('action', "arg2"),
('action', "arg3")])
def testActionError(self):
self.assertRaises(SystemExit, self._getParser().parse_args,
["--action", "arg1", ])
self.assertRaises(SystemExit, self._getParser().parse_args,
["--action", "arg1", "--action", "arg2", ])
class TestExec(_TestBase):
def testBegin(self):
globals = self._run(["-b", "a=100", "b=200"], u"")
self.assertEqual(globals['a'], 100)
self.assertEqual(globals['b'], 200)
def testEnd(self):
globals = self._run(["-e", "a=100", "b=200"], u"")
self.assertEqual(globals['a'], 100)
self.assertEqual(globals['b'], 200)
def testExecute(self):
sys.stdout = out = StringIO()
globals = self._run(["-x", "a=100", "if a:", "{{print(a)"], u"")
self.assertEqual(sys.stdout.getvalue(), "100\n")
def testStatement(self):
globals = self._run(
["-b", "lines=[]", "-s", "\\w+", "lines.append(L)"], u"abc\n----\ndef\n")
self.assertEqual(globals['lines'], ["abc", "def"])
def testAction(self):
globals = self._run(
["-b", "lines=[]", "-p", "\\w+", "-a", "lines.append(L)"], u"abc\n----\ndef\n")
self.assertEqual(globals['lines'], ["abc", "def"])
def testIgnorecase(self):
globals = self._run(
["-i", "-b" "lines=[]", "-p", "a", "-a", "lines.append(L)"], u"abc\nAbc\n123\n")
self.assertEqual(globals['lines'], ["abc", "Abc"])
def testModule(self):
globals = self._run(
["-m", "unicodedata, bisect", "-m", "datetime as ddd", "-s", "\\w+"], u"abc\n----\ndef\n")
import unicodedata
import datetime
import bisect
self.assertEqual(globals['unicodedata'], unicodedata)
self.assertEqual(globals['ddd'], datetime)
self.assertEqual(globals['bisect'], bisect)
def testModuleError(self):
self.assertRaises(SystemExit, self._getParser().parse_args,
["-m", "unicodedata;", "-s", "\\w+"])
def testModuleStar(self):
globals = self._run(
["-ms", "unicodedata, bisect", "-s", "\\w+"], u"abc\n----\ndef\n")
import unicodedata
import datetime
import bisect
self.assertEqual(globals['name'], unicodedata.name)
self.assertEqual(globals['bisect'], bisect.bisect)
def testModuleStarError(self):
self.assertRaises(SystemExit, self._getParser().parse_args,
["-ms", "unicodedata;", "-s", "\\w+"])
def testScriptFile(self):
fd, filename = tempfile.mkstemp()
testfile = os.fdopen(fd, "w")
try:
testfile.write("script_a=100")
testfile.close()
globals = self._run(["-s", "\\w+", "-f", filename], u"")
self.assertEqual(globals['script_a'], 100)
finally:
os.unlink(filename)
def testShell(self):
globals = self._run(["-e", "a=E('echo abc')"], u"")
self.assertEqual(globals['a'], 'abc\n')
class TestEncoding(_TestBase):
def testInput(self):
globals = self._run(
["-s", ".*", "a=L", "-ie", "euc-jp"], u"\N{HIRAGANA LETTER A}",
enc='euc-jp')
self.assertEqual(globals['a'], u"\N{HIRAGANA LETTER A}")
def testOutput(self):
sys.stdout = out = StringIO()
globals = self._run(
["-s", ".*", "print(u'\\N{HIRAGANA LETTER I}')",
"-ie", "euc-jp", "-o", "euc-jp"],
u"\N{HIRAGANA LETTER I}", enc='euc-jp')
ret = out.getvalue()[:-1]
if six.PY3:
ret = ret.encode('euc_jp')
self.assertEqual(ret, u"\N{HIRAGANA LETTER I}".encode('euc-jp'))
class TestInplace(_TestBase):
def testInplace(self):
self._run(
["-s", ".*", "print(u'\N{HIRAGANA LETTER I}')",
"--inplace", ".bak"],
u"\N{HIRAGANA LETTER A}")
self.assertEqual(open(self.testfilename, 'rb').read(),
u"\N{HIRAGANA LETTER I}\n".encode('utf-8'))
self.assertEqual(open(self.testfilename + '.bak', 'rb').read(),
u"\N{HIRAGANA LETTER A}".encode('utf-8'))
os.unlink(self.testfilename + '.bak')
class TestSeparator(_TestBase):
def testSeparator(self):
globals = self._run(
["-s", ".*", "a=L0", "-F", "\\t"], u" A B C\tD\tE\tF ")
self.assertEqual(globals['a'], [u" A B C", u"D", u"E", u"F "])
globals = self._run(
["-s", ".*", "a=L0"], u" A B C\tD\tE\t F ")
self.assertEqual(globals['a'], [u"A", u"B", u"C", u"D", u"E", u"F"])
class TestIndent(_TestBase):
def testIndent(self):
sys.stdout = out = StringIO()
self._run(["-s", ".*",
"for c in L:{{if ord(c) % 2:{{print(c)}}else:{{print(c*2)}}}}"],
u"abcdefg")
ret = out.getvalue()[:-1]
self.assertEqual(ret, 'a\nbb\nc\ndd\ne\nff\ng')
def testNewline(self):
sys.stdout = out = StringIO()
self._run(["-s", ".*",
"if 1:{{print(1){{}}print(2)}}"],
u"abcdefg")
ret = out.getvalue()[:-1]
self.assertEqual(ret, '1\n2')
def testMultiline(self):
for opt in [["-s", ".*"], ["-b"], ["-e"]]:
sys.stdout = out = StringIO()
self._run(opt + ["for i in range(1):{{", "print(i)", "}}"],
u"abcdefg")
ret = out.getvalue()
self.assertEqual(ret, '0\n')
class TestString(_TestBase):
def testString(self):
sys.stdout = out = StringIO()
self._run(["-s", ".*",
'print("abcdefg\\\"")'],
u"abcdefg")
ret = out.getvalue()[:-1]
self.assertEqual(ret, 'abcdefg"')
def testCommand(self):
sys.stdout = out = StringIO()
self._run(["-s", ".*",
'print(`echo 12345`)'],
u"abcdefg")
ret = out.getvalue()[:-1]
self.assertEqual(ret, '12345\n')
def testfCommand(self):
sys.stdout = out = StringIO()
self._run(["-s", ".*",
'print(f`echo {L}`)'],
u"abcdefg")
ret = out.getvalue()[:-1]
self.assertEqual(ret, 'abcdefg\n')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "7605d4c40be005c3e94eae9371a2fd0d",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 102,
"avg_line_length": 32.888888888888886,
"alnum_prop": 0.5010484622553588,
"repo_name": "atsuoishimoto/tse",
"id": "5e4422ac1214825b4dff5d35eb19fadb4200311a",
"size": "8608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25492"
}
],
"symlink_target": ""
} |
from ..Helpers.types import Types
from ..Helpers.base import dbstore, dbload
from ..Helpers.loop import Loop
from ..Helpers.commands import Dup, Store, Push, Mul, DMalloc, Load, Compare, DBStore, Add, DBLoad, Jnz, Label, Jump, Jz, Sub
class StringCompiler:
@staticmethod
def store(commands, data):
""" Генерация инструкций для записи строки из стека в heap memory. """
str_start_pointer = data.var(Types.INT)
end_str_pointer = data.var(Types.INT)
# Добавляем к требуемому размеру памяти 1 - для escape-нуля (маркера конца строки)
commands.add(Push, 1)
commands.add(Add)
commands.add(Dup)
# Выделяем память размером = числу на стеке (ранее мы записали туда длину строки)
commands.add(DMalloc, 0)
commands.add(Dup)
commands.add(Store, str_start_pointer)
# Выносим инвариант цикла - указатель на конец строки - в переменную
commands.add(Add)
commands.add(Store, end_str_pointer)
def cycle_body(_counter, b, c):
# Последовательно сохраняем все символы в выделенной памяти в обратном порядке (т. к. берем со стека)
dbstore(end_str_pointer, _counter, commands, invert=True, value=-2)
counter = Loop.stack(commands, data, cycle_body, load_counter=False, return_counter=True)
# Дописываем 0 в последнюю ячейку памяти - это маркер конца строки
commands.add(Push, 0)
dbstore(str_start_pointer, counter, commands)
# Отдаем на стек указатель на начало строки для дальнейшего использования
commands.add(Load, str_start_pointer)
@staticmethod
def strlen(commands, data, type):
""" Генерация инструкций для получения длины строки, находящейся на стеке. """
str_start_pointer = data.var(Types.INT)
# Разыменовываем лежащий на стеке указатель и записываем его в переменную
commands.add(Store, str_start_pointer)
# Считываем строку из памяти до конца (пока не встретим 0), подсчитывая кол-во символов (его кладем на стек)
Loop.data(commands, data, str_start_pointer, memory_type='heap')
@staticmethod
def strget(commands, data, type):
""" Генерация инструкций для получения определенного символа строки """
# Прибавляем к номеру ячейки с началом строки номер требуемого символа (offset)
commands.add(Add)
# Загружаем на стек символ по номеру его ячейки в heap memory
commands.add(DBLoad, 0)
@staticmethod
def strset(commands, data, type):
""" Генерация инструкций для замены определенного символа строки """
# Вычисляем ячейки heap memory, где находится заменяемый символ
commands.add(Add)
# Производим замену символа
commands.add(DBStore, 0)
@staticmethod
def strsub(commands, data, type):
""" Генерация инструкций для получение подстроки строки """
substr_length = data.var(Types.INT)
substr_start_pointer = data.var(Types.INT)
finish_label = data.label()
# Сохраняем длину подстроки
commands.add(Store, substr_length)
commands.add(Add)
commands.add(Store, substr_start_pointer)
# Кладем на стек 0 - маркер конца строки
commands.add(Push, 0)
def cycle_body(_counter, a, b):
commands.add(Load, _counter)
commands.add(Load, substr_length)
commands.add(Compare, 5)
# Если уже прочитали и записали подстркоу требуемой длины - выходим из цикла
commands.add(Jnz, finish_label)
# Загружаем очередной символ подстроки из heap memory
dbload(substr_start_pointer, _counter, commands)
Loop.data(commands, data, substr_start_pointer, cycle_body, load_counter=False, memory_type='heap')
commands.add(Label, finish_label)
# Записываем на стек длину подстроки + 1 (для маркера конца строки - нуля)
commands.add(Load, substr_length)
StringCompiler.store(commands, data)
@staticmethod
def strdup(commands, data, type):
""" Генерация инструкций для дублирования строки """
str_start_pointer = data.var(Types.INT)
# Разыменовываем лежащий на стеке указатель и записываем его в переменную
commands.add(Store, str_start_pointer)
# Кладем на стек 0 - маркер конца строки
commands.add(Push, 0)
def cycle_body(_counter, a, b):
dbload(str_start_pointer, _counter, commands)
# Читаем строку и кладем её на стек
Loop.data(commands, data, str_start_pointer, cycle_body, memory_type='heap')
StringCompiler.store(commands, data)
@staticmethod
def strcat_first(commands, data, type):
""" Генерация инструкций для дублирования первой из конкатенируемых строки """
str_start_pointer = data.var(Types.INT)
commands.add(Store, str_start_pointer)
commands.add(Push, 0)
def cycle_body(_counter, a, b):
dbload(str_start_pointer, _counter, commands)
# Читаем строку и кладем её на стек
Loop.data(commands, data, str_start_pointer, cycle_body, memory_type='heap')
@staticmethod
def strcat_second(commands, data, type):
""" Генерация инструкций для дублирования второй из конкатенируемых строки и запись её в памяти за первой """
str_start_pointer = data.var(Types.INT)
str_length = data.var(Types.INT)
commands.add(Store, str_start_pointer)
commands.add(Store, str_length)
def cycle_body(_counter, a, b):
dbload(str_start_pointer, _counter, commands)
# Читаем строку и кладем её на стек
Loop.data(commands, data, str_start_pointer, cycle_body, memory_type='heap')
commands.add(Load, str_length)
commands.add(Add)
StringCompiler.store(commands, data)
@staticmethod
def strmake(commands, data):
""" Генерация инструкций для создания строки заданной длины с повторяющимся символом """
str_start_pointer = data.var(Types.INT)
str_length = data.var(Types.INT)
basis_symbol = data.var(Types.CHAR)
finish_label = data.label()
commands.add(Dup)
# Сохраняем длину строки в переменную
commands.add(Store, str_length)
# Выделяем память = указанной длине строки +1 (плюс маркер конца строки - 0)
commands.add(DMalloc, 1)
commands.add(Store, str_start_pointer)
commands.add(Store, basis_symbol)
def cycle_body(_counter, b, c):
commands.add(Load, _counter)
commands.add(Load, str_length)
commands.add(Compare, 5)
commands.add(Jnz, finish_label)
commands.add(Load, basis_symbol)
dbstore(str_start_pointer, _counter, commands)
counter = Loop.simple(commands, data, cycle_body, return_counter=True)
# Сюда переходим после того, как запишем нужное количество символов в создаваемую строку
commands.add(Label, finish_label)
# Дописываем 0 в последнюю ячейку памяти - это маркер конца строки
commands.add(Push, 0)
dbstore(str_start_pointer, counter, commands)
# Отдаем на стек указатель на начало созданной строки для дальнейшего использования
commands.add(Load, str_start_pointer)
@staticmethod
def strcmp(commands, data, type1, type2):
""" Генерация инструкций для посимвольного сравнивания двух строк """
str1_start_pointer = data.var(Types.INT)
str2_start_pointer = data.var(Types.INT)
eq_label = data.label()
not_eq_label = data.label()
finish_label = data.label()
commands.add(Store, str1_start_pointer)
commands.add(Store, str2_start_pointer)
def cycle_body(_counter, a, continue_label):
# Загружаем n-ный символ 1-й строки
dbload(str1_start_pointer, _counter, commands)
# Дублируем на стек для дальнейшей проверки (чтобы не загружать снова)
commands.add(Dup)
# Загружаем n-ный символ 2-й строки
dbload(str2_start_pointer, _counter, commands)
commands.add(Compare, 1)
# Если символы не равны, сразу переходим в секцию not_eq_label и выясняем уже там - какой из них больше
# Это также работает, когда мы достиги конца одной из строк (какой-то символ и 0)
commands.add(Jnz, not_eq_label)
commands.add(Push, 0)
# Сравниваем с 0 ранее продублированный символ (1-й строки) - если он равен нулю, то равен и второй,
# т. к. в эту секцию мы попадаем только при равенстве обоих символов
commands.add(Compare, 0)
# 0 говорит о достижении конца строки - если это не 0, то продолжаем цикл
commands.add(Jz, continue_label)
# Сюда попадаем, когда достигли конца одновременно двух строк - т. е. они полностью равны
commands.add(Jump, eq_label)
counter = Loop.simple(commands, data, cycle_body, return_counter=True)
# Секция полного равенства строк: пишем на стек 0
commands.add(Label, eq_label)
commands.add(Push, 0)
commands.add(Jump, finish_label)
# Секция неравенства строк
commands.add(Label, not_eq_label)
# Загружаем только второй символ - первый у нас уже содержится на стеке (см. тело цикла)
dbload(str2_start_pointer, counter, commands)
# Сравниваем символы оператором <
commands.add(Compare, 2)
# Производим нормировку результата сравнения: 0|1 -> -1|1
commands.add(Push, 2)
commands.add(Mul)
commands.add(Push, 1)
commands.add(Sub)
commands.add(Label, finish_label)
| {
"content_hash": "212c1ae5de2a9a1ca8dd990bb60ba557",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 125,
"avg_line_length": 40.421487603305785,
"alnum_prop": 0.6476180740134941,
"repo_name": "PetukhovVictor/compiler",
"id": "650dee845c8675974a97cb9055511b1c720a0a43",
"size": "12404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Compiler/VM/Deep/strings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "16410"
},
{
"name": "Python",
"bytes": "239647"
},
{
"name": "Shell",
"bytes": "109"
}
],
"symlink_target": ""
} |
import urllib2
import xml.etree.ElementTree
from optparse import OptionParser
from subprocess import Popen, PIPE
from sys import stdout, exit
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-H", "--host", action="store", type="string", dest="host", default="xp1app", help="The address of the host (default: xp1app)", metavar="HOST")
parser.add_option("-P", "--port", action="store", type="int", dest="port", default="53500", help="The port to connect to (default: 53500)", metavar="PORT")
(opt, args) = parser.parse_args()
URL = "http://%s:%d/AdapterFramework/ChannelAdminServlet?party=*&service=*&channel=*&action=status" % (opt.host, opt.port)
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, uri=URL, user="pi_monitor", passwd="Lotos321")
auth_handler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(auth_handler)
urllib2.install_opener(opener)
xml_file = urllib2.urlopen(URL)
tree = xml.etree.ElementTree.parse(xml_file)
errors = [elem for elem in tree.getiterator("Channel") if elem.find("ChannelState") != None and elem.find("ChannelState").text == "ERROR"]
print_errors = ["P: %s, S: %s, CN: %s" % (elem.find("Party").text, elem.find("Service").text, elem.find("ChannelName").text) for elem in errors if elem.find("ChannelName").text != "RcvSoapOilTasOILORD"]
num_errors = len(print_errors)
if num_errors == 0:
print "Channels OK"
else:
print "%d channel errors!" % (num_errors)
for x in print_errors:
print x
exit(1)
| {
"content_hash": "e66ff0db7040a66a3c69f39c47f30f5d",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 202,
"avg_line_length": 42.22222222222222,
"alnum_prop": 0.7243421052631579,
"repo_name": "herself/Hscripts",
"id": "eadcb67cbd6190297f49d26d18c928f402069573",
"size": "1729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nagios/check_sap_chan.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1096"
},
{
"name": "Python",
"bytes": "12782"
},
{
"name": "Shell",
"bytes": "12183"
}
],
"symlink_target": ""
} |
def verbing(s):
if len(s) < 3:
r = s
else:
if s[-3:] == 'ing':
r = s + 'ly'
else:
r = s + 'ing'
return r
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
if s.find('not') and s.find('bad') and s.find('not') < s.find('bad'):
r = s[:s.find('not')-1] + ' good' + s[s.find('bad')+3:]
else:
r = s
return r
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
if len(a)%2:
r1 = a[ :(len(a)/2)+1]
r3 = a[-((len(a)/2)): ]
else:
r1 = a[ :len(a)/2]
r3 = a[-len(a)/2: ]
if len(b)%2:
r2 = b[ :(len(b)/2)+1]
r4 = b[-((len(b)/2)): ]
else:
r2 = b[ :len(b)/2]
r4 = b[-len(b)/2: ]
return r1 + r2 + r3 + r4
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| {
"content_hash": "ce8b5b3945dbb76c106c3595101489cc",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 77,
"avg_line_length": 28.158536585365855,
"alnum_prop": 0.5941966219142486,
"repo_name": "agcalero/pyquick",
"id": "dba3d1f01cdaaedf7f215eada7c4cc66c5c659f0",
"size": "2820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basic/string2.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "191608"
},
{
"name": "HTML",
"bytes": "647778"
},
{
"name": "Python",
"bytes": "51226"
}
],
"symlink_target": ""
} |
from django.urls import re_path
from openstack_dashboard.dashboards.project.containers import views
urlpatterns = [
re_path(r'^container/((?P<container_name>.+?)/)?'
'(?P<subfolder_path>.+)?$',
views.NgIndexView.as_view(), name='index'),
re_path(r'^$',
views.NgIndexView.as_view(), name='index')
]
| {
"content_hash": "a70830a29000fa249703e4da505180bf",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 67,
"avg_line_length": 31.272727272727273,
"alnum_prop": 0.6104651162790697,
"repo_name": "openstack/horizon",
"id": "260f7e3b229249b92a58fe25149409e57fea334f",
"size": "1108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/containers/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "583449"
},
{
"name": "JavaScript",
"bytes": "2585531"
},
{
"name": "Python",
"bytes": "5370605"
},
{
"name": "SCSS",
"bytes": "133237"
},
{
"name": "Shell",
"bytes": "6526"
}
],
"symlink_target": ""
} |
"""rptk module.format.yamlf module."""
from __future__ import print_function
from __future__ import unicode_literals
import logging
from rptk.format import BaseFormat
import yaml
try:
from yaml import CDumper as Dumper
except ImportError as e: # pragma: no cover
logging.getLogger(__name__).warning("%s: falling back to python dumper", e)
from yaml import Dumper
class YamlFormat(BaseFormat):
"""Renders result object as a YAML document."""
description = "YAML object representation"
content_type = "application/x-yaml"
def format(self, result=None):
"""Render output as YAML."""
self.log_method_enter(method=self.current_method)
super(self.__class__, self).format(result=result)
self.log.debug(msg="creating json output")
try:
output = yaml.dump(result, Dumper=Dumper, indent=4,
explicit_start=True, explicit_end=True,
default_flow_style=False)
except Exception as e:
self.log.error(msg="{}".format(e))
raise e
self.log_method_exit(method=self.current_method)
return output
| {
"content_hash": "fa9ebc352d800c96ae98a50dea8ec934",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 79,
"avg_line_length": 30.92105263157895,
"alnum_prop": 0.6331914893617021,
"repo_name": "wolcomm/rptk",
"id": "d01200b5e37f3f3aa27dc7353eab5c3b6b4c398b",
"size": "1738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rptk/format/yamlf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "75630"
},
{
"name": "Shell",
"bytes": "768"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0007_auto_20160920_0147'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='second_candidate',
),
migrations.AlterField(
model_name='profile',
name='preferred_candidate',
field=models.CharField(choices=[('Clinton', 'Clinton'), ('Johnson', 'Johnson')], max_length=255, null=True),
),
]
| {
"content_hash": "1a0736fe38ab674b7dddcc6a23a757ed",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 120,
"avg_line_length": 26.09090909090909,
"alnum_prop": 0.5818815331010453,
"repo_name": "sbuss/voteswap",
"id": "f93f79864ffdc4be64b90d9fdf287d481b2b4e97",
"size": "646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "users/migrations/0008_auto_20160923_0332.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "200347"
},
{
"name": "HTML",
"bytes": "159385"
},
{
"name": "JavaScript",
"bytes": "120612"
},
{
"name": "Makefile",
"bytes": "1227"
},
{
"name": "Python",
"bytes": "11135152"
},
{
"name": "Shell",
"bytes": "931"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from optparse import make_option
import sys
import os
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'),
)
help = 'Installs the named fixture(s) in the database.'
args = "fixture [fixture ...]"
def handle(self, *fixture_labels, **options):
from django.db.models import get_apps
from django.core import serializers
from django.db import connection, transaction
from django.conf import settings
self.style = no_style()
verbosity = int(options.get('verbosity', 1))
show_traceback = options.get('traceback', False)
# Keep a count of the installed objects and fixtures
fixture_count = 0
object_count = 0
objects_per_fixture = []
models = set()
humanize = lambda dirname: dirname and "'%s'" % dirname or 'absolute path'
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database (if
# it isn't already initialized).
cursor = connection.cursor()
# Start transaction management. All fixtures are installed in a
# single transaction to ensure that all references are resolved.
transaction.commit_unless_managed()
transaction.enter_transaction_management()
transaction.managed(True)
app_fixtures = [os.path.join(os.path.dirname(app.__file__), 'fixtures') for app in get_apps()]
for fixture_label in fixture_labels:
parts = fixture_label.split('.')
if len(parts) == 1:
fixture_name = fixture_label
formats = serializers.get_public_serializer_formats()
else:
fixture_name, format = '.'.join(parts[:-1]), parts[-1]
if format in serializers.get_public_serializer_formats():
formats = [format]
else:
formats = []
if formats:
if verbosity > 1:
print "Loading '%s' fixtures..." % fixture_name
else:
sys.stderr.write(
self.style.ERROR("Problem installing fixture '%s': %s is not a known serialization format." %
(fixture_name, format)))
transaction.rollback()
transaction.leave_transaction_management()
return
if os.path.isabs(fixture_name):
fixture_dirs = [fixture_name]
else:
fixture_dirs = app_fixtures + list(settings.FIXTURE_DIRS) + ['']
for fixture_dir in fixture_dirs:
if verbosity > 1:
print "Checking %s for fixtures..." % humanize(fixture_dir)
label_found = False
for format in formats:
serializer = serializers.get_serializer(format)
if verbosity > 1:
print "Trying %s for %s fixture '%s'..." % \
(humanize(fixture_dir), format, fixture_name)
try:
full_path = os.path.join(fixture_dir, '.'.join([fixture_name, format]))
fixture = open(full_path, 'r')
if label_found:
fixture.close()
print self.style.ERROR("Multiple fixtures named '%s' in %s. Aborting." %
(fixture_name, humanize(fixture_dir)))
transaction.rollback()
transaction.leave_transaction_management()
return
else:
fixture_count += 1
objects_per_fixture.append(0)
if verbosity > 0:
print "Installing %s fixture '%s' from %s." % \
(format, fixture_name, humanize(fixture_dir))
try:
objects = serializers.deserialize(format, fixture)
for obj in objects:
object_count += 1
objects_per_fixture[-1] += 1
models.add(obj.object.__class__)
obj.save()
label_found = True
except (SystemExit, KeyboardInterrupt):
raise
except Exception:
import traceback
fixture.close()
transaction.rollback()
transaction.leave_transaction_management()
if show_traceback:
import traceback
traceback.print_exc()
else:
sys.stderr.write(
self.style.ERROR("Problem installing fixture '%s': %s\n" %
(full_path, traceback.format_exc())))
return
fixture.close()
except:
if verbosity > 1:
print "No %s fixture '%s' in %s." % \
(format, fixture_name, humanize(fixture_dir))
# If any of the fixtures we loaded contain 0 objects, assume that an
# error was encountered during fixture loading.
if 0 in objects_per_fixture:
sys.stderr.write(
self.style.ERROR("No fixture data found for '%s'. (File format may be invalid.)" %
(fixture_name)))
transaction.rollback()
transaction.leave_transaction_management()
return
# If we found even one object in a fixture, we need to reset the
# database sequences.
if object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(self.style, models)
if sequence_sql:
if verbosity > 1:
print "Resetting sequences"
for line in sequence_sql:
cursor.execute(line)
transaction.commit()
transaction.leave_transaction_management()
if object_count == 0:
if verbosity > 1:
print "No fixtures found."
else:
if verbosity > 0:
print "Installed %d object(s) from %d fixture(s)" % (object_count, fixture_count)
| {
"content_hash": "a000b6a071efbbd1fab0e8dd12a0b69a",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 113,
"avg_line_length": 43.91463414634146,
"alnum_prop": 0.4850041655095807,
"repo_name": "paulsmith/geodjango",
"id": "688cd58e2c21d2dbfbad2a14f713f7d5b91102bc",
"size": "7202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/core/management/commands/loaddata.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "71605"
},
{
"name": "Python",
"bytes": "3433375"
},
{
"name": "Shell",
"bytes": "804"
}
],
"symlink_target": ""
} |
import commands
from os import remove
from shutil import move
# This program must be called from the folder that contains the files to be
# updated. THE FOLDER SHOULD ONLY CONTAIN DHDL FILES TO BE UPDATED!
# Get all filenames for files to be updated
filenames = commands.getoutput('ls' %vars()).split()
n_files = len(filenames)
print "The number of files read in for processing is: ",n_files
# for loop to open and edit each file in the folder
for nf in range(n_files):
#open file with read write access
infile = open(filenames[nf], 'r')
outfile = open("tempfile", 'w')
#read all of the lines of the file
lines = infile.readlines()
#for each file read
for line in lines:
#if the first element of the line is @ then it MAY BE a line that should be changed
if (line[0] == '@'):
#if the third element of the line is an s then it IS a line to be changed
if (line[2] == 's'):
#changed line with replacements to the line adding word 'to' and a space
cline = line.replace("{} (", "{} to (").replace(",", ", ")
#write changed line over original line
outfile.write(cline)
else:
outfile.write(line)
else:
outfile.write(line)
#close infile and outfile
infile.close
outfile.close
#remove original file
remove(filenames[nf])
#move new file to original file location
move("tempfile", filenames[nf])
print "Updated file: ", filenames[nf]
| {
"content_hash": "4a5114f8523b42d6626b19ca4fa1aaab",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 85,
"avg_line_length": 32.16279069767442,
"alnum_prop": 0.7006507592190889,
"repo_name": "MobleyLab/alchemical-analysis",
"id": "987d7e4b13ed06dce156ad2bf6383e717d58fa30",
"size": "1590",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "samples/gromacs/data/convertdata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "127413"
}
],
"symlink_target": ""
} |
import json
import logging
import re
import time
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from desktop.lib.i18n import smart_str
from desktop.lib.view_util import format_duration_in_millis
from jobbrowser.views import job_single_logs
from jobbrowser.models import LinkJobLogs
from liboozie.oozie_api import get_oozie
from oozie.models import Workflow, Pig
from oozie.views.editor import _submit_workflow
LOG = logging.getLogger(__name__)
def get(fs, jt, user):
return OozieApi(fs, jt, user)
class OozieApi(object):
"""
Oozie submission.
"""
WORKFLOW_NAME = 'pig-app-hue-script'
RE_LOG_END = re.compile('(<<< Invocation of Pig command completed <<<|<<< Invocation of Main class completed <<<)')
RE_LOG_START_RUNNING = re.compile('>>> Invoking Pig command line now >>>(.+?)(<<< Invocation of Pig command completed <<<|<<< Invocation of Main class completed)', re.M | re.DOTALL)
RE_LOG_START_FINISHED = re.compile('(>>> Invoking Pig command line now >>>)', re.M | re.DOTALL)
MAX_DASHBOARD_JOBS = 100
def __init__(self, fs, jt, user):
self.oozie_api = get_oozie(user)
self.fs = fs
self.jt = jt
self.user = user
def submit(self, pig_script, params):
workflow = None
try:
workflow = self._create_workflow(pig_script, params)
mapping = dict([(param['name'], param['value']) for param in workflow.get_parameters()])
oozie_wf = _submit_workflow(self.user, self.fs, self.jt, workflow, mapping)
finally:
if workflow:
workflow.delete(skip_trash=True)
return oozie_wf
def _create_workflow(self, pig_script, params):
workflow = Workflow.objects.new_workflow(self.user)
workflow.schema_version = 'uri:oozie:workflow:0.5'
workflow.name = OozieApi.WORKFLOW_NAME
workflow.is_history = True
if pig_script.use_hcatalog:
workflow.add_parameter("oozie.action.sharelib.for.pig", "pig,hcatalog")
workflow.save()
Workflow.objects.initialize(workflow, self.fs)
script_path = workflow.deployment_dir + '/script.pig'
if self.fs: # For testing, difficult to mock
self.fs.do_as_user(self.user.username, self.fs.create, script_path, data=smart_str(pig_script.dict['script']))
files = []
archives = []
popup_params = json.loads(params)
popup_params_names = [param['name'] for param in popup_params]
pig_params = self._build_parameters(popup_params)
if pig_script.isV2:
pig_params += [{"type": "argument", "value": param} for param in pig_script.dict['parameters']]
job_properties = [{"name": prop.split('=', 1)[0], "value": prop.split('=', 1)[1]} for prop in pig_script.dict['hadoopProperties']]
for resource in pig_script.dict['resources']:
if resource.endswith('.zip') or resource.endswith('.tgz') or resource.endswith('.tar') or resource.endswith('.gz'):
archives.append({"dummy": "", "name": resource})
else:
files.append(resource)
else:
script_params = [param for param in pig_script.dict['parameters'] if param['name'] not in popup_params_names]
pig_params += self._build_parameters(script_params)
job_properties = [{"name": prop['name'], "value": prop['value']} for prop in pig_script.dict['hadoopProperties']]
for resource in pig_script.dict['resources']:
if resource['type'] == 'file':
files.append(resource['value'])
if resource['type'] == 'archive':
archives.append({"dummy": "", "name": resource['value']})
action = Pig.objects.create(
name='pig-5760',
script_path=script_path,
workflow=workflow,
node_type='pig',
params=json.dumps(pig_params),
files=json.dumps(files),
archives=json.dumps(archives),
job_properties=json.dumps(job_properties)
)
credentials = []
if pig_script.use_hcatalog and self.oozie_api.security_enabled:
credentials.append({'name': 'hcat', 'value': True})
if pig_script.use_hbase and self.oozie_api.security_enabled:
credentials.append({'name': 'hbase', 'value': True})
if credentials:
action.credentials = credentials # Note, action.credentials is a @setter here
action.save()
action.add_node(workflow.end)
start_link = workflow.start.get_link()
start_link.child = action
start_link.save()
return workflow
def _build_parameters(self, params):
pig_params = []
for param in params:
if param['name'].startswith('-'):
pig_params.append({"type": "argument", "value": "%(name)s" % param})
if param['value']:
pig_params.append({"type": "argument", "value": "%(value)s" % param})
else:
# Simpler way and backward compatibility for parameters
pig_params.append({"type": "argument", "value": "-param"})
pig_params.append({"type": "argument", "value": "%(name)s=%(value)s" % param})
return pig_params
def stop(self, job_id):
return self.oozie_api.job_control(job_id, 'kill')
def get_jobs(self):
kwargs = {'cnt': OozieApi.MAX_DASHBOARD_JOBS,}
kwargs['filters'] = [
('user', self.user.username),
('name', OozieApi.WORKFLOW_NAME)
]
return self.oozie_api.get_workflows(**kwargs).jobs
def get_log(self, request, oozie_workflow):
logs = {}
is_really_done = False
for action in oozie_workflow.get_working_actions():
try:
if action.externalId:
data = job_single_logs(request, **{'job': action.externalId})
if data:
matched_logs = self._match_logs(data)
logs[action.name] = LinkJobLogs._make_links(matched_logs)
is_really_done = OozieApi.RE_LOG_END.search(data['logs'][1]) is not None
except Exception, e:
LOG.error('An error happen while watching the job running: %(error)s' % {'error': e})
is_really_done = True
workflow_actions = []
# Only one Pig action
for action in oozie_workflow.get_working_actions():
progress = get_progress(oozie_workflow, logs.get(action.name, ''))
appendable = {
'name': action.name,
'status': action.status,
'logs': logs.get(action.name, ''),
'isReallyDone': is_really_done,
'progress': progress,
'progressPercent': '%d%%' % progress,
'absoluteUrl': oozie_workflow.get_absolute_url(),
}
workflow_actions.append(appendable)
return logs, workflow_actions, is_really_done
def _match_logs(self, data):
"""Difficult to match multi lines of text"""
logs = data['logs'][1]
if OozieApi.RE_LOG_END.search(logs):
return re.search(OozieApi.RE_LOG_START_RUNNING, logs).group(1).strip()
else:
group = re.search(OozieApi.RE_LOG_START_FINISHED, logs)
i = logs.index(group.group(1)) + len(group.group(1))
return logs[i:].strip()
def massaged_jobs_for_json(self, request, oozie_jobs, hue_jobs):
jobs = []
hue_jobs = dict([(script.dict.get('job_id'), script) for script in hue_jobs if script.dict.get('job_id')])
for job in oozie_jobs:
if job.is_running():
job = self.oozie_api.get_job(job.id)
get_copy = request.GET.copy() # Hacky, would need to refactor JobBrowser get logs
get_copy['format'] = 'python'
request.GET = get_copy
try:
logs, workflow_action, is_really_done = self.get_log(request, job)
progress = workflow_action[0]['progress']
except:
LOG.exception('failed to get progress')
progress = 0
else:
progress = 100
hue_pig = hue_jobs.get(job.id) and hue_jobs.get(job.id) or None
massaged_job = {
'id': job.id,
'lastModTime': hasattr(job, 'lastModTime') and job.lastModTime and format_time(job.lastModTime) or None,
'kickoffTime': hasattr(job, 'kickoffTime') and job.kickoffTime or None,
'timeOut': hasattr(job, 'timeOut') and job.timeOut or None,
'endTime': job.endTime and format_time(job.endTime) or None,
'status': job.status,
'isRunning': job.is_running(),
'duration': job.endTime and job.startTime and format_duration_in_millis(( time.mktime(job.endTime) - time.mktime(job.startTime) ) * 1000) or None,
'appName': hue_pig and hue_pig.dict['name'] or _('Unsaved script'),
'scriptId': hue_pig and hue_pig.id or -1,
'scriptContent': hue_pig and hue_pig.dict['script'] or '',
'progress': progress,
'progressPercent': '%d%%' % progress,
'user': job.user,
'absoluteUrl': job.get_absolute_url(),
'canEdit': has_job_edition_permission(job, self.user),
'killUrl': reverse('oozie:manage_oozie_jobs', kwargs={'job_id':job.id, 'action':'kill'}),
'watchUrl': reverse('pig:watch', kwargs={'job_id': job.id}) + '?format=python',
'created': hasattr(job, 'createdTime') and job.createdTime and job.createdTime and ((job.type == 'Bundle' and job.createdTime) or format_time(job.createdTime)),
'startTime': hasattr(job, 'startTime') and format_time(job.startTime) or None,
'run': hasattr(job, 'run') and job.run or 0,
'frequency': hasattr(job, 'frequency') and job.frequency or None,
'timeUnit': hasattr(job, 'timeUnit') and job.timeUnit or None,
}
jobs.append(massaged_job)
return jobs
def get_progress(job, log):
if job.status in ('SUCCEEDED', 'KILLED', 'FAILED'):
return 100
else:
try:
return int(re.findall("MapReduceLauncher - (1?\d?\d)% complete", log)[-1])
except:
return 0
def format_time(st_time):
if st_time is None:
return '-'
else:
return time.strftime("%a, %d %b %Y %H:%M:%S", st_time)
def has_job_edition_permission(oozie_job, user):
return user.is_superuser or oozie_job.user == user.username
| {
"content_hash": "6b0643ea667814fa320f0960b9f7cf5d",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 183,
"avg_line_length": 36.639405204460964,
"alnum_prop": 0.6336241883116883,
"repo_name": "MobinRanjbar/hue",
"id": "6440d30a51bad64ede0cca16081a27e48324dd04",
"size": "10648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/pig/src/pig/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13685"
},
{
"name": "C",
"bytes": "2397157"
},
{
"name": "C++",
"bytes": "177090"
},
{
"name": "CSS",
"bytes": "453436"
},
{
"name": "Emacs Lisp",
"bytes": "12145"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Groff",
"bytes": "14877"
},
{
"name": "HTML",
"bytes": "24042046"
},
{
"name": "Java",
"bytes": "575404"
},
{
"name": "JavaScript",
"bytes": "3220761"
},
{
"name": "KiCad",
"bytes": "475"
},
{
"name": "Makefile",
"bytes": "114862"
},
{
"name": "Mako",
"bytes": "2450286"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "Perl",
"bytes": "138710"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "38423121"
},
{
"name": "Scala",
"bytes": "215057"
},
{
"name": "Shell",
"bytes": "54810"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TeX",
"bytes": "126420"
},
{
"name": "Thrift",
"bytes": "259222"
},
{
"name": "XSLT",
"bytes": "516845"
}
],
"symlink_target": ""
} |
"""
Django settings for sample project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1($y2mnc1l4d7(57^(cu2hrggi0of&e-v%+jur&21)^cf02$f7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrapform',
'ums',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'sample.urls'
WSGI_APPLICATION = 'sample.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'ja_JP'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'ums.DemoUser'
| {
"content_hash": "5bf63317cdf99696a62579f848cfb109",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 71,
"avg_line_length": 23.662921348314608,
"alnum_prop": 0.7160493827160493,
"repo_name": "ntk1000/S.W.A.P.D.",
"id": "6872f95174086544be6cf3322d8dd3de58e409c3",
"size": "2106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sample/sample/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9894"
}
],
"symlink_target": ""
} |
"""Legacy serialization logic for Keras models."""
import threading
import warnings
import weakref
import tensorflow.compat.v2 as tf
from keras.utils import tf_contextlib
from keras.utils import tf_inspect
# isort: off
from tensorflow.python.util.tf_export import keras_export
# Flag that determines whether to skip the NotImplementedError when calling
# get_config in custom models and layers. This is only enabled when saving to
# SavedModel, when the config isn't required.
_SKIP_FAILED_SERIALIZATION = False
# If a layer does not have a defined config, then the returned config will be a
# dictionary with the below key.
_LAYER_UNDEFINED_CONFIG_KEY = "layer was saved without config"
# Store a unique, per-object ID for shared objects.
#
# We store a unique ID for each object so that we may, at loading time,
# re-create the network properly. Without this ID, we would have no way of
# determining whether a config is a description of a new object that
# should be created or is merely a reference to an already-created object.
SHARED_OBJECT_KEY = "shared_object_id"
SHARED_OBJECT_DISABLED = threading.local()
SHARED_OBJECT_LOADING = threading.local()
SHARED_OBJECT_SAVING = threading.local()
# Attributes on the threadlocal variable must be set per-thread, thus we
# cannot initialize these globally. Instead, we have accessor functions with
# default values.
def _shared_object_disabled():
"""Get whether shared object handling is disabled in a threadsafe manner."""
return getattr(SHARED_OBJECT_DISABLED, "disabled", False)
def _shared_object_loading_scope():
"""Get the current shared object saving scope in a threadsafe manner."""
return getattr(SHARED_OBJECT_LOADING, "scope", NoopLoadingScope())
def _shared_object_saving_scope():
"""Get the current shared object saving scope in a threadsafe manner."""
return getattr(SHARED_OBJECT_SAVING, "scope", None)
class DisableSharedObjectScope:
"""A context manager for disabling handling of shared objects.
Disables shared object handling for both saving and loading.
Created primarily for use with `clone_model`, which does extra surgery that
is incompatible with shared objects.
"""
def __enter__(self):
SHARED_OBJECT_DISABLED.disabled = True
self._orig_loading_scope = _shared_object_loading_scope()
self._orig_saving_scope = _shared_object_saving_scope()
def __exit__(self, *args, **kwargs):
SHARED_OBJECT_DISABLED.disabled = False
SHARED_OBJECT_LOADING.scope = self._orig_loading_scope
SHARED_OBJECT_SAVING.scope = self._orig_saving_scope
class NoopLoadingScope:
"""The default shared object loading scope. It does nothing.
Created to simplify serialization code that doesn't care about shared
objects (e.g. when serializing a single object).
"""
def get(self, unused_object_id):
return None
def set(self, object_id, obj):
pass
class SharedObjectLoadingScope:
"""A context manager for keeping track of loaded objects.
During the deserialization process, we may come across objects that are
shared across multiple layers. In order to accurately restore the network
structure to its original state, `SharedObjectLoadingScope` allows us to
re-use shared objects rather than cloning them.
"""
def __enter__(self):
if _shared_object_disabled():
return NoopLoadingScope()
global SHARED_OBJECT_LOADING
SHARED_OBJECT_LOADING.scope = self
self._obj_ids_to_obj = {}
return self
def get(self, object_id):
"""Given a shared object ID, returns a previously instantiated object.
Args:
object_id: shared object ID to use when attempting to find
already-loaded object.
Returns:
The object, if we've seen this ID before. Else, `None`.
"""
# Explicitly check for `None` internally to make external calling code a
# bit cleaner.
if object_id is None:
return
return self._obj_ids_to_obj.get(object_id)
def set(self, object_id, obj):
"""Stores an instantiated object for future lookup and sharing."""
if object_id is None:
return
self._obj_ids_to_obj[object_id] = obj
def __exit__(self, *args, **kwargs):
global SHARED_OBJECT_LOADING
SHARED_OBJECT_LOADING.scope = NoopLoadingScope()
class SharedObjectConfig(dict):
"""A configuration container that keeps track of references.
`SharedObjectConfig` will automatically attach a shared object ID to any
configs which are referenced more than once, allowing for proper shared
object reconstruction at load time.
In most cases, it would be more proper to subclass something like
`collections.UserDict` or `collections.Mapping` rather than `dict` directly.
Unfortunately, python's json encoder does not support `Mapping`s. This is
important functionality to retain, since we are dealing with serialization.
We should be safe to subclass `dict` here, since we aren't actually
overriding any core methods, only augmenting with a new one for reference
counting.
"""
def __init__(self, base_config, object_id, **kwargs):
self.ref_count = 1
self.object_id = object_id
super().__init__(base_config, **kwargs)
def increment_ref_count(self):
# As soon as we've seen the object more than once, we want to attach the
# shared object ID. This allows us to only attach the shared object ID
# when it's strictly necessary, making backwards compatibility breakage
# less likely.
if self.ref_count == 1:
self[SHARED_OBJECT_KEY] = self.object_id
self.ref_count += 1
class SharedObjectSavingScope:
"""Keeps track of shared object configs when serializing."""
def __enter__(self):
if _shared_object_disabled():
return None
global SHARED_OBJECT_SAVING
# Serialization can happen at a number of layers for a number of
# reasons. We may end up with a case where we're opening a saving scope
# within another saving scope. In that case, we'd like to use the
# outermost scope available and ignore inner scopes, since there is not
# (yet) a reasonable use case for having these nested and distinct.
if _shared_object_saving_scope() is not None:
self._passthrough = True
return _shared_object_saving_scope()
else:
self._passthrough = False
SHARED_OBJECT_SAVING.scope = self
self._shared_objects_config = weakref.WeakKeyDictionary()
self._next_id = 0
return self
def get_config(self, obj):
"""Gets a `SharedObjectConfig` if one has already been seen for `obj`.
Args:
obj: The object for which to retrieve the `SharedObjectConfig`.
Returns:
The SharedObjectConfig for a given object, if already seen. Else,
`None`.
"""
try:
shared_object_config = self._shared_objects_config[obj]
except (TypeError, KeyError):
# If the object is unhashable (e.g. a subclass of
# `AbstractBaseClass` that has not overridden `__hash__`), a
# `TypeError` will be thrown. We'll just continue on without shared
# object support.
return None
shared_object_config.increment_ref_count()
return shared_object_config
def create_config(self, base_config, obj):
"""Create a new SharedObjectConfig for a given object."""
shared_object_config = SharedObjectConfig(base_config, self._next_id)
self._next_id += 1
try:
self._shared_objects_config[obj] = shared_object_config
except TypeError:
# If the object is unhashable (e.g. a subclass of
# `AbstractBaseClass` that has not overridden `__hash__`), a
# `TypeError` will be thrown. We'll just continue on without shared
# object support.
pass
return shared_object_config
def __exit__(self, *args, **kwargs):
if not getattr(self, "_passthrough", False):
global SHARED_OBJECT_SAVING
SHARED_OBJECT_SAVING.scope = None
def serialize_keras_class_and_config(
cls_name, cls_config, obj=None, shared_object_id=None
):
"""Returns the serialization of the class with the given config."""
base_config = {"class_name": cls_name, "config": cls_config}
# We call `serialize_keras_class_and_config` for some branches of the load
# path. In that case, we may already have a shared object ID we'd like to
# retain.
if shared_object_id is not None:
base_config[SHARED_OBJECT_KEY] = shared_object_id
# If we have an active `SharedObjectSavingScope`, check whether we've
# already serialized this config. If so, just use that config. This will
# store an extra ID field in the config, allowing us to re-create the shared
# object relationship at load time.
if _shared_object_saving_scope() is not None and obj is not None:
shared_object_config = _shared_object_saving_scope().get_config(obj)
if shared_object_config is None:
return _shared_object_saving_scope().create_config(base_config, obj)
return shared_object_config
return base_config
@tf_contextlib.contextmanager
def skip_failed_serialization():
global _SKIP_FAILED_SERIALIZATION
prev = _SKIP_FAILED_SERIALIZATION
try:
_SKIP_FAILED_SERIALIZATION = True
yield
finally:
_SKIP_FAILED_SERIALIZATION = prev
class CustomMaskWarning(Warning):
pass
@keras_export("keras.utils.serialize_keras_object")
def serialize_keras_object(instance):
"""Serialize a Keras object into a JSON-compatible representation.
Calls to `serialize_keras_object` while underneath the
`SharedObjectSavingScope` context manager will cause any objects re-used
across multiple layers to be saved with a special shared object ID. This
allows the network to be re-created properly during deserialization.
Args:
instance: The object to serialize.
Returns:
A dict-like, JSON-compatible representation of the object's config.
"""
from keras.saving import object_registration
_, instance = tf.__internal__.decorator.unwrap(instance)
if instance is None:
return None
# For v1 layers, checking supports_masking is not enough. We have to also
# check whether compute_mask has been overridden.
supports_masking = getattr(instance, "supports_masking", False) or (
hasattr(instance, "compute_mask")
and not is_default(instance.compute_mask)
)
if supports_masking and is_default(instance.get_config):
warnings.warn(
"Custom mask layers require a config and must override "
"get_config. When loading, the custom mask layer must be "
"passed to the custom_objects argument.",
category=CustomMaskWarning,
stacklevel=2,
)
if hasattr(instance, "get_config"):
name = object_registration.get_registered_name(instance.__class__)
try:
config = instance.get_config()
except NotImplementedError as e:
if _SKIP_FAILED_SERIALIZATION:
return serialize_keras_class_and_config(
name, {_LAYER_UNDEFINED_CONFIG_KEY: True}
)
raise e
serialization_config = {}
for key, item in config.items():
if isinstance(item, str):
serialization_config[key] = item
continue
# Any object of a different type needs to be converted to string or
# dict for serialization (e.g. custom functions, custom classes)
try:
serialized_item = serialize_keras_object(item)
if isinstance(serialized_item, dict) and not isinstance(
item, dict
):
serialized_item["__passive_serialization__"] = True
serialization_config[key] = serialized_item
except ValueError:
serialization_config[key] = item
name = object_registration.get_registered_name(instance.__class__)
return serialize_keras_class_and_config(
name, serialization_config, instance
)
if hasattr(instance, "__name__"):
return object_registration.get_registered_name(instance)
raise ValueError(
f"Cannot serialize {instance} since it doesn't implement "
"`get_config()`, and also doesn\t have `__name__`"
)
def class_and_config_for_serialized_keras_object(
config,
module_objects=None,
custom_objects=None,
printable_module_name="object",
):
"""Returns the class name and config for a serialized keras object."""
from keras.saving import object_registration
if (
not isinstance(config, dict)
or "class_name" not in config
or "config" not in config
):
raise ValueError(
f"Improper config format for {config}. "
"Expecting python dict contains `class_name` and `config` as keys"
)
class_name = config["class_name"]
cls = object_registration.get_registered_object(
class_name, custom_objects, module_objects
)
if cls is None:
raise ValueError(
f"Unknown {printable_module_name}: '{class_name}'. "
"Please ensure you are using a `keras.utils.custom_object_scope` "
"and that this object is included in the scope. See "
"https://www.tensorflow.org/guide/keras/save_and_serialize"
"#registering_the_custom_object for details."
)
cls_config = config["config"]
# Check if `cls_config` is a list. If it is a list, return the class and the
# associated class configs for recursively deserialization. This case will
# happen on the old version of sequential model (e.g. `keras_version` ==
# "2.0.6"), which is serialized in a different structure, for example
# "{'class_name': 'Sequential',
# 'config': [{'class_name': 'Embedding', 'config': ...}, {}, ...]}".
if isinstance(cls_config, list):
return (cls, cls_config)
deserialized_objects = {}
for key, item in cls_config.items():
if key == "name":
# Assume that the value of 'name' is a string that should not be
# deserialized as a function. This avoids the corner case where
# cls_config['name'] has an identical name to a custom function and
# gets converted into that function.
deserialized_objects[key] = item
elif isinstance(item, dict) and "__passive_serialization__" in item:
deserialized_objects[key] = deserialize_keras_object(
item,
module_objects=module_objects,
custom_objects=custom_objects,
printable_module_name="config_item",
)
# TODO(momernick): Should this also have 'module_objects'?
elif isinstance(item, str) and tf_inspect.isfunction(
object_registration.get_registered_object(item, custom_objects)
):
# Handle custom functions here. When saving functions, we only save
# the function's name as a string. If we find a matching string in
# the custom objects during deserialization, we convert the string
# back to the original function.
# Note that a potential issue is that a string field could have a
# naming conflict with a custom function name, but this should be a
# rare case. This issue does not occur if a string field has a
# naming conflict with a custom object, since the config of an
# object will always be a dict.
deserialized_objects[
key
] = object_registration.get_registered_object(item, custom_objects)
for key, item in deserialized_objects.items():
cls_config[key] = deserialized_objects[key]
return (cls, cls_config)
@keras_export("keras.utils.deserialize_keras_object")
def deserialize_keras_object(
identifier,
module_objects=None,
custom_objects=None,
printable_module_name="object",
):
"""Turns the serialized form of a Keras object back into an actual object.
This function is for mid-level library implementers rather than end users.
Importantly, this utility requires you to provide the dict of
`module_objects` to use for looking up the object config; this is not
populated by default. If you need a deserialization utility that has
preexisting knowledge of built-in Keras objects, use e.g.
`keras.layers.deserialize(config)`, `keras.metrics.deserialize(config)`,
etc.
Calling `deserialize_keras_object` while underneath the
`SharedObjectLoadingScope` context manager will cause any already-seen
shared objects to be returned as-is rather than creating a new object.
Args:
identifier: the serialized form of the object.
module_objects: A dictionary of built-in objects to look the name up in.
Generally, `module_objects` is provided by midlevel library
implementers.
custom_objects: A dictionary of custom objects to look the name up in.
Generally, `custom_objects` is provided by the end user.
printable_module_name: A human-readable string representing the type of
the object. Printed in case of exception.
Returns:
The deserialized object.
Example:
A mid-level library implementer might want to implement a utility for
retrieving an object from its config, as such:
```python
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
identifier,
module_objects=globals(),
custom_objects=custom_objects,
name="MyObjectType",
)
```
This is how e.g. `keras.layers.deserialize()` is implemented.
"""
from keras.saving import object_registration
if identifier is None:
return None
if isinstance(identifier, dict):
# In this case we are dealing with a Keras config dictionary.
config = identifier
(cls, cls_config) = class_and_config_for_serialized_keras_object(
config, module_objects, custom_objects, printable_module_name
)
# If this object has already been loaded (i.e. it's shared between
# multiple objects), return the already-loaded object.
shared_object_id = config.get(SHARED_OBJECT_KEY)
shared_object = _shared_object_loading_scope().get(shared_object_id)
if shared_object is not None:
return shared_object
if hasattr(cls, "from_config"):
arg_spec = tf_inspect.getfullargspec(cls.from_config)
custom_objects = custom_objects or {}
if "custom_objects" in arg_spec.args:
tlco = object_registration._THREAD_LOCAL_CUSTOM_OBJECTS.__dict__
deserialized_obj = cls.from_config(
cls_config,
custom_objects={
**object_registration._GLOBAL_CUSTOM_OBJECTS,
**tlco,
**custom_objects,
},
)
else:
with object_registration.CustomObjectScope(custom_objects):
deserialized_obj = cls.from_config(cls_config)
else:
# Then `cls` may be a function returning a class.
# in this case by convention `config` holds
# the kwargs of the function.
custom_objects = custom_objects or {}
with object_registration.CustomObjectScope(custom_objects):
deserialized_obj = cls(**cls_config)
# Add object to shared objects, in case we find it referenced again.
_shared_object_loading_scope().set(shared_object_id, deserialized_obj)
return deserialized_obj
elif isinstance(identifier, str):
object_name = identifier
if custom_objects and object_name in custom_objects:
obj = custom_objects.get(object_name)
elif (
object_name
in object_registration._THREAD_LOCAL_CUSTOM_OBJECTS.__dict__
):
obj = object_registration._THREAD_LOCAL_CUSTOM_OBJECTS.__dict__[
object_name
]
elif object_name in object_registration._GLOBAL_CUSTOM_OBJECTS:
obj = object_registration._GLOBAL_CUSTOM_OBJECTS[object_name]
else:
obj = module_objects.get(object_name)
if obj is None:
raise ValueError(
f"Unknown {printable_module_name}: '{object_name}'. "
"Please ensure you are using a "
"`keras.utils.custom_object_scope` "
"and that this object is included in the scope. See "
"https://www.tensorflow.org/guide/keras/save_and_serialize"
"#registering_the_custom_object for details."
)
# Classes passed by name are instantiated with no args, functions are
# returned as-is.
if tf_inspect.isclass(obj):
return obj()
return obj
elif tf_inspect.isfunction(identifier):
# If a function has already been deserialized, return as is.
return identifier
else:
raise ValueError(
"Could not interpret serialized "
f"{printable_module_name}: {identifier}"
)
def validate_config(config):
"""Determines whether config appears to be a valid layer config."""
return (
isinstance(config, dict) and _LAYER_UNDEFINED_CONFIG_KEY not in config
)
def is_default(method):
"""Check if a method is decorated with the `default` wrapper."""
return getattr(method, "_is_default", False)
| {
"content_hash": "86791ee857eb3c081b4bfdec3c8f1472",
"timestamp": "",
"source": "github",
"line_count": 576,
"max_line_length": 80,
"avg_line_length": 38.57465277777778,
"alnum_prop": 0.6451235429137225,
"repo_name": "keras-team/keras",
"id": "1ebb2e4bc24ee5f42a2d8e821c235d7a795399fe",
"size": "22908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keras/saving/legacy/serialization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "900"
},
{
"name": "Python",
"bytes": "11342063"
},
{
"name": "Shell",
"bytes": "11489"
},
{
"name": "Starlark",
"bytes": "273139"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import pytest
from ejdb import c
def test_bsonoid():
oid = c.BSONOID.from_string('0123456789abcdef01234567')
assert str(oid) == '0123456789abcdef01234567'
def test_bsonoid_too_short():
with pytest.raises(ValueError) as ctx:
c.BSONOID.from_string('123456789abcdef01234567')
assert str(ctx.value) == 'OID should be a 24-character-long hex string.'
def test_bsonoid_not_hex():
with pytest.raises(ValueError) as ctx:
c.BSONOID.from_string('123456789abcdefg01234567')
assert str(ctx.value) == 'OID should be a 24-character-long hex string.'
def test_bsonoid_too_long():
with pytest.raises(ValueError) as ctx:
c.BSONOID.from_string('0123456789abcdef012345678')
assert str(ctx.value) == 'OID should be a 24-character-long hex string.'
| {
"content_hash": "dc14e40d3c4efa511e5b3b640c604369",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 76,
"avg_line_length": 29.785714285714285,
"alnum_prop": 0.7122302158273381,
"repo_name": "uranusjr/ctypes-ejdb",
"id": "32d195ca14cbae45dd5a8612ea13ee8c407aeca7",
"size": "881",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_c.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1490"
},
{
"name": "Python",
"bytes": "89233"
}
],
"symlink_target": ""
} |
"""daily schedule updates
Revision ID: 2df9ce70bad
Revises: 376804c871b4
Create Date: 2018-03-14 12:30:40.844228
"""
# revision identifiers, used by Alembic.
revision = '2df9ce70bad'
down_revision = '376804c871b4'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('schedule_house_join')
op.drop_table('schedule')
# NCOP by default for existing entries
op.add_column('daily_schedule', sa.Column('house_id', sa.Integer(), nullable=False, server_default='1'))
op.alter_column('daily_schedule', 'house_id', server_default=None)
# remove default
op.alter_column('daily_schedule', 'start_date',
existing_type=sa.DATE(),
nullable=False)
op.create_index(op.f('ix_daily_schedule_house_id'), 'daily_schedule', ['house_id'], unique=False)
op.create_foreign_key(op.f('fk_daily_schedule_house_id_house'), 'daily_schedule', 'house', ['house_id'], ['id'])
op.drop_column('daily_schedule', 'schedule_date')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('daily_schedule', sa.Column('schedule_date', sa.DATE(), autoincrement=False, nullable=True))
op.drop_constraint(op.f('fk_daily_schedule_house_id_house'), 'daily_schedule', type_='foreignkey')
op.drop_index(op.f('ix_daily_schedule_house_id'), table_name='daily_schedule')
op.alter_column('daily_schedule', 'start_date',
existing_type=sa.DATE(),
nullable=True)
op.drop_column('daily_schedule', 'house_id')
op.create_table('schedule',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('description', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('meeting_date', sa.DATE(), autoincrement=False, nullable=True),
sa.Column('meeting_time', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('created_at', postgresql.TIMESTAMP(timezone=True), server_default=sa.text(u'now()'), autoincrement=False, nullable=False),
sa.Column('updated_at', postgresql.TIMESTAMP(timezone=True), server_default=sa.text(u'now()'), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('id', name=u'schedule_pkey')
)
### end Alembic commands ###
op.create_table('schedule_house_join',
sa.Column('schedule_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('house_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['house_id'], [u'house.id'], name=u'schedule_house_join_house_id_fkey'),
sa.ForeignKeyConstraint(['schedule_id'], [u'schedule.id'], name=u'schedule_house_join_schedule_id_fkey')
)
| {
"content_hash": "b9244cb3fd2b4c99dcdd24a44322fbc5",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 136,
"avg_line_length": 47.220338983050844,
"alnum_prop": 0.6848528356066045,
"repo_name": "Code4SA/pmg-cms-2",
"id": "3438d314636e317dec6a074b2cc031659c889847",
"size": "2786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/2df9ce70bad_daily_schedule_updates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "204619"
},
{
"name": "HTML",
"bytes": "361071"
},
{
"name": "JavaScript",
"bytes": "109536"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "469838"
}
],
"symlink_target": ""
} |
"""This is the slimmed ResNet as used by Lopez et al. in the GEM paper."""
import torch.nn as nn
from torch.nn.functional import relu, avg_pool2d
from avalanche.models import MultiHeadClassifier, MultiTaskModule, DynamicModule
class MLP(nn.Module):
def __init__(self, sizes):
super(MLP, self).__init__()
layers = []
for i in range(0, len(sizes) - 1):
layers.append(nn.Linear(sizes[i], sizes[i + 1]))
if i < (len(sizes) - 2):
layers.append(nn.ReLU())
self.net = nn.Sequential(*layers)
def forward(self, x):
return self.net(x)
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(self.expansion * planes),
)
def forward(self, x):
out = relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes, nf):
super(ResNet, self).__init__()
self.in_planes = nf
self.conv1 = conv3x3(3, nf * 1)
self.bn1 = nn.BatchNorm2d(nf * 1)
self.layer1 = self._make_layer(block, nf * 1, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, nf * 2, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, nf * 4, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, nf * 8, num_blocks[3], stride=2)
self.linear = nn.Linear(nf * 8 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
bsz = x.size(0)
out = relu(self.bn1(self.conv1(x.view(bsz, 3, 32, 32))))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def SlimResNet18(nclasses, nf=20):
"""Slimmed ResNet18."""
return ResNet(BasicBlock, [2, 2, 2, 2], nclasses, nf)
class MTSlimResNet18(MultiTaskModule, DynamicModule):
"""MultiTask Slimmed ResNet18."""
def __init__(self, nclasses, nf=20):
super().__init__()
self.in_planes = nf
block = BasicBlock
num_blocks = [2, 2, 2, 2]
self.conv1 = conv3x3(3, nf * 1)
self.bn1 = nn.BatchNorm2d(nf * 1)
self.layer1 = self._make_layer(block, nf * 1, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, nf * 2, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, nf * 4, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, nf * 8, num_blocks[3], stride=2)
self.linear = MultiHeadClassifier(
nf * 8 * BasicBlock.expansion, nclasses
)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, task_labels):
bsz = x.size(0)
out = relu(self.bn1(self.conv1(x.view(bsz, 3, 32, 32))))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out, task_labels)
return out
| {
"content_hash": "98cb489929bd8b44429a964032ee5ef8",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 80,
"avg_line_length": 33.64748201438849,
"alnum_prop": 0.5582638443446654,
"repo_name": "ContinualAI/avalanche",
"id": "29727c31b99f1b1e5d38df632dc7d6334d6b8eea",
"size": "4677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "avalanche/models/slim_resnet18.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "Dockerfile",
"bytes": "349"
},
{
"name": "Jupyter Notebook",
"bytes": "213662"
},
{
"name": "Python",
"bytes": "2031919"
},
{
"name": "Shell",
"bytes": "12008"
}
],
"symlink_target": ""
} |
from .response import text
from traceback import format_exc
#########################################
# 项目自定义异常类
#
# 说明:
# - 框架异常的基类
#
#
#########################################
class SanicException(Exception):
def __init__(self, message, status_code=None):
super().__init__(message)
if status_code is not None:
self.status_code = status_code
# 404:
class NotFound(SanicException):
status_code = 404
# 400:
class InvalidUsage(SanicException):
status_code = 400
# 500:
class ServerError(SanicException):
status_code = 500
# 404:
class FileNotFound(NotFound):
status_code = 404
def __init__(self, message, path, relative_url):
super().__init__(message)
self.path = path
self.relative_url = relative_url
# 408:
class RequestTimeout(SanicException):
status_code = 408
# 413:
class PayloadTooLarge(SanicException):
status_code = 413
#########################################
# 异常处理器
#
# 说明:
# - 处理异常
#
#########################################
class Handler:
handlers = None
def __init__(self, sanic):
self.handlers = {}
self.sanic = sanic
def add(self, exception, handler):
self.handlers[exception] = handler
def response(self, request, exception):
"""
Fetches and executes an exception handler and returns a response object
:param request: Request
:param exception: Exception to handle
:return: Response object
"""
handler = self.handlers.get(type(exception), self.default)
response = handler(request=request, exception=exception)
return response
def default(self, request, exception):
#
# 判断异常类型, 属于框架自身的异常
# - 返回异常状态码, 默认500
#
if issubclass(type(exception), SanicException):
return text(
"Error: {}".format(exception),
status=getattr(exception, 'status_code', 500))
elif self.sanic.debug: # 判断是否为调试模式
return text(
"Error: {}\nException: {}".format(
exception, format_exc()), status=500)
else:
return text(
"An error occurred while generating the request", status=500)
| {
"content_hash": "7f2347ac3caf3a2b1ded124b81822c19",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 79,
"avg_line_length": 23.448979591836736,
"alnum_prop": 0.5483028720626631,
"repo_name": "hhstore/flask-annotated",
"id": "7a4847fcac19b9191bd4f49a9817d7592abfe47a",
"size": "2420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sanic/sanic-0.1.9/sanic/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "173082"
}
],
"symlink_target": ""
} |
from .base import BasePlaylist
from kivy.logger import Logger
from utils import get_unicode
from copy import deepcopy
class FileLoaderPlaylist(BasePlaylist):
'''Playlist that can add all files from a path and it's sub-paths,
files can also later be removed.
Useful when a playlist has to have specific files, folders'''
can_add = True
file_modif_time = 0
adding_files = False
def load(self, path, data):
super(FileLoaderPlaylist, self).load(path, data)
self.media = data['media']
self.update()
def update(self):
pass
def add_path(self, path):
path = get_unicode(path)
new_files = self.get_files(path)
self.media = self.media + new_files
self.refresh_media_id()
self.save()
def add_path_async(self, path):
path = get_unicode(path)
start_index = 0
if self.media:
start_index = self.media[-1]['index']
task = {
'method': 'playlist_from_path', 'path': path,
'start_index': start_inde}
appworker.add_task(task, self.add_path_async_done)
Logger.info('Playlist-{}: add_path_async: {}'.format(self.name, path))
def add_path_async_done(self, result):
Logger.info('Playlist-{}: add_path_async_done:'.format(self.name))
self.media = self.media + result['playlist']
self.refresh_media_id()
self.save()
def remove_indexes(self, index_list):
'''Delete self.media items by indexes in index_list argument'''
for x in reversed(index_list):
del self.media[x]
self.refresh_media_id()
Logger.info('FileLoaderPlaylist: removed %s files' % (len(index_list)))
self.save()
def save(self):
'''Save playlist'''
save_list = []
for i, mdict in enumerate(self.media):
new_dict = {}
for k, v in mdict.items():
if k in self.saved_media_keys:
new_dict[k] = v
save_list.append(new_dict)
self.save_json({
'name': self.name,
'playlist_type': 'file_loader',
'media': save_list
})
@staticmethod
def create(name, path, load_path):
playlist = FileLoaderPlaylist()
playlist.load_path = load_path
playlist.name = name
playlist.path = path
playlist.save()
return playlist
| {
"content_hash": "af25ece442e21dfb71b9f62ebd6a4a90",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 79,
"avg_line_length": 31.636363636363637,
"alnum_prop": 0.5784072249589491,
"repo_name": "Bakterija/mmplayer",
"id": "f8d81e1c2b0d0ea9e642f3359d19c1c62326902f",
"size": "2436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mmplayer/media_controller/playlist_loader/file_loader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "450641"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import User
from django import forms
from apps.books.models import *
class UserProfileBookForm(forms.ModelForm):
"""docstring for LoginForm"""
class Meta:
model = UserProfileBook
fields = ['user_profile', 'book', 'status'] | {
"content_hash": "0f6cf1c7e5efd078fa9b964625f19fa4",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 51,
"avg_line_length": 28,
"alnum_prop": 0.7071428571428572,
"repo_name": "vuonghv/brs",
"id": "f2ea79850d3b5d369d304b0da6ce61da9cace77b",
"size": "280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/books/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1062528"
},
{
"name": "HTML",
"bytes": "1877883"
},
{
"name": "JavaScript",
"bytes": "3157021"
},
{
"name": "PHP",
"bytes": "1684"
},
{
"name": "Python",
"bytes": "84367"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dojo', '0116_test_type_active'),
]
operations = [
migrations.AddField(
model_name='usercontactinfo',
name='force_password_reset',
field=models.BooleanField(default=False, help_text='Forces this user to reset their password on next login.'),
),
]
| {
"content_hash": "91e9369f63a703ac73e3337135ced3e6",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 122,
"avg_line_length": 26.5625,
"alnum_prop": 0.6188235294117647,
"repo_name": "rackerlabs/django-DefectDojo",
"id": "82d4c3ed341e2a8fec17da722654fd235f0c20d2",
"size": "475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dojo/db_migrations/0117_usercontactinfo_force_password_reset.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18132"
},
{
"name": "Groff",
"bytes": "91"
},
{
"name": "HTML",
"bytes": "666571"
},
{
"name": "JavaScript",
"bytes": "6393"
},
{
"name": "Python",
"bytes": "524728"
},
{
"name": "Shell",
"bytes": "20558"
},
{
"name": "XSLT",
"bytes": "6624"
}
],
"symlink_target": ""
} |
import sys
import argparse
import os
import getpass
import time
import pprint
sys.path.append("./library")
from na_funcs import *
from cisco_funcs import *
debug = False
# parse command line arguments and optional environment variables
arguments = argparse.ArgumentParser(
description='Provide an igroup and filer and check the wwpns in that igroup are defined as fcaliases on an mds switch')
arguments.add_argument(
'--switch_hostname', required=True, type=str,
help='MDS switch fqdn or IP.')
arguments.add_argument(
'--filer_hostname', required=True, type=str,
help='filer fqdn or IP.')
arguments.add_argument(
'--igroup', required=True, type=str,
help='name of igroup on NetApp cDOT system to check')
arguments.add_argument(
'--switch_username', required=False, type=str,
help='optional username to ssh into mds switch. Alternate: set environment variable MDS_USERNAME. If neither exists, defaults to current OS username')
arguments.add_argument(
'--switch_password', required=False, type=str,
help='optional password to ssh into mds switch. Alternate: set environment variable MDS_PASSWORD. If unset use_keys defaults to True.')
arguments.add_argument(
'--switch_use_keys', action='store_true',
help='use ssh keys to log into switch')
arguments.add_argument(
'--switch_key_file', required=False, type=str,
help='filename for ssh key file')
arguments.add_argument(
'--filer_username', required=False, type=str,
help='optional username to ssh into mds switch. Alternate: set environment variable FILER_USERNAME. If neither exists, defaults to admin')
arguments.add_argument(
'--filer_password', required=False, type=str,
help='optional password to ssh into mds switch. Alternate: set environment variable filer_PASSWORD. If unset use_keys defaults to True.')
args = arguments.parse_args()
if args.switch_password :
use_keys = False
switch_password = args.switch_password
elif os.getenv('MDS_PASSWORD') :
use_keys = False
switch_password = os.getenv('MDS_PASSWORD')
else :
switch_use_keys = True
switch_password = ''
if args.switch_username :
switch_username = args.switch_username
elif os.getenv('MDS_USERNAME') :
switch_username = os.getenv('MDS_USERNAME')
else:
switch_username = getpass.getuser()
if args.filer_password :
filer_password = args.filer_password
elif os.getenv('FILER_PASSWORD') :
filer_password = os.getenv('FILER_PASSWORD')
else :
filer_password = ''
if args.filer_username :
filer_username = args.filer_username
elif os.getenv('FILER_USERNAME') :
filer_username = os.getenv('FILER_USERNAME')
else:
filer_username = 'admin'
switch_hostname = args.switch_hostname
filer_hostname = args.filer_hostname
igroup = args.igroup
# main loop
mds = {
'device_type': 'cisco_nxos',
'ip': switch_hostname,
'verbose': False,
'username': switch_username,
'password': switch_password,
'use_keys': switch_use_keys
}
#igroup = 'JDC-Prod-01'
#filerusername = 'admin'
#filerpassword = 'netapp123'
#filerhostname = 'jdc-nac1.prod.entergy.com'
filerconnect = cdotconnect(filer_hostname,filer_username,filer_password)
wwpns = getigroupwwpns(igroup, filerconnect)
net_connect = ConnectHandler(**mds)
show_run_str = net_connect.send_command("show run")
show_run = show_run_str.splitlines()
cisco_cfg = CiscoConfParse(show_run)
fcalias_dict = parsefcaliases(cisco_cfg)
net_connect.disconnect()
if debug :
print "DEBUG START: dump grabbed igroup WWPNs and switch fcaliases"
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(wwpns)
pp.pprint(fcalias_dict)
print "DEBUG END: "
for check_wwpn in wwpns :
for key in fcalias_dict.keys() :
if check_wwpn in fcalias_dict[key]['pwwns'] :
print "%s in igroup %s matches fcalias name %s on mds %s vsan %s" % (check_wwpn, igroup, fcalias_dict[key]['name'], switch_hostname, fcalias_dict[key]['vsan'])
| {
"content_hash": "d8d7a342d0d7a9a99b4949b8f78a33a4",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 171,
"avg_line_length": 32.83471074380165,
"alnum_prop": 0.7125597785049081,
"repo_name": "scottharney/python-mdszoning",
"id": "428307fea6dbd90ce01807482c653cb8e1ac63b8",
"size": "4160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "singleinitiatorzone/checkwwpnigroupfcaliases.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "36475"
}
],
"symlink_target": ""
} |
from celery import Task
# django-haystack indexing automatically
from haystack.management.commands import update_index
class UpdateIndexTask(Task):
def run(self):
update_index.Command().handle()
| {
"content_hash": "2cbb32e8e98ffde3cbf3ead9431f2f55",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 53,
"avg_line_length": 21.1,
"alnum_prop": 0.7582938388625592,
"repo_name": "deadlylaid/book_connect",
"id": "5c3dab532748eadd4926a0acf169b018143eb20f",
"size": "211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wef/items/tasks/haystack_indexing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4868"
},
{
"name": "HTML",
"bytes": "34491"
},
{
"name": "JavaScript",
"bytes": "12496"
},
{
"name": "Makefile",
"bytes": "565"
},
{
"name": "Python",
"bytes": "133934"
}
],
"symlink_target": ""
} |
'''
test for changing vm password
@author: SyZhao
'''
import apibinding.inventory as inventory
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstacklib.utils.ssh as ssh
import test_stub
exist_users = ["root"]
users = ["root", "root" ]
passwds = ["password", "95_aaapcn"]
vm = None
cur_usr = None
cur_passwd = None
def check_vm_is_alive(vm):
global cur_usr, cur_passwd
cmd = "pwd"
ret, output, stderr = ssh.execute(cmd, vm.get_vm().vmNics[0].ip, cur_usr, cur_passwd, False, 22)
if ret != 0:
test_util.test_logger("VM is not alived when exception triggered: ip:%s; cmd:%s; user:%s; password:%s; stdout:%s, stderr:%s" %(vm.get_vm().vmNics[0].ip, cmd, cur_usr, cur_passwd, output, stderr))
def check_qemu_ga_is_alive(vm):
global cur_usr, cur_passwd
cmd = "ps -aux|grep ga|grep qemu"
ret, output, stderr = ssh.execute(cmd, vm.get_vm().vmNics[0].ip, cur_usr, cur_passwd, False, 22)
if ret != 0:
test_util.test_logger("qemu-ga is not alived when exception triggered: ip:%s; cmd:%s; user:%s; password:%s; stdout:%s, stderr:%s" %(vm.get_vm().vmNics[0].ip, cmd, cur_usr, cur_passwd, output, stderr))
def test():
global vm, exist_users, cur_usr, cur_passwd
test_util.test_dsc('change VM with assigned password test')
vm = test_stub.create_vm(vm_name = 'ckvmpswd-c6-64', image_name = "imageName_i_c6")
vm.check()
backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vm.vm)
for bs in backup_storage_list:
if bs.type == inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE:
break
if bs.type == inventory.SFTP_BACKUP_STORAGE_TYPE:
break
if bs.type == inventory.CEPH_BACKUP_STORAGE_TYPE:
break
else:
vm.destroy()
test_util.test_skip('Not find image store type backup storage.')
cur_usr = "root"
cur_passwd = "password"
for (usr,passwd) in zip(users, passwds):
#When vm is running:
vm_ops.change_vm_password(vm.get_vm().uuid, usr, passwd, skip_stopped_vm = None, session_uuid = None)
cur_usr = usr
cur_passwd = passwd
if not test_lib.lib_check_login_in_vm(vm.get_vm(), usr, passwd):
test_util.test_fail("create vm with user:%s password: %s failed", usr, passwd)
#When vm is stopped:
#vm.stop()
vm_ops.change_vm_password(vm.get_vm().uuid, "root", test_stub.original_root_password)
cur_usr = "root"
cur_passwd = "password"
#vm.start()
vm.check()
vm.destroy()
vm.check()
vm.expunge()
vm.check()
test_util.test_pass('Set password when VM is creating is successful.')
#Will be called only if exception happens in test().
def error_cleanup():
global vm
check_vm_is_alive(vm)
check_qemu_ga_is_alive(vm)
if vm:
vm.destroy()
vm.expunge()
| {
"content_hash": "3c769b6f999ff0099f0058df9193be5b",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 208,
"avg_line_length": 31.138613861386137,
"alnum_prop": 0.6120826709062003,
"repo_name": "zstackorg/zstack-woodpecker",
"id": "7422b964301bc8034cabc3ea9a4bfcde79ef961e",
"size": "3145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integrationtest/vm/vm_password/test_chg_vm_passwd_c6.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "46522"
},
{
"name": "Makefile",
"bytes": "692"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "2891030"
},
{
"name": "Shell",
"bytes": "54266"
}
],
"symlink_target": ""
} |
import json #required to read and parse json files
import os #required for file path function
import psycopg2 #required for postgres
import sys #also required for postgres?
##########################################################################
## Module Variables/Constants
##########################################################################
DB = 'triptomizer'
user = 'postgres'
password = 'psqlpass'
host = 'localhost'
port = '5433'
conn_str="host={} port={} dbname={} user={} password={}".format(host, port, DB, user, password)
conn=psycopg2.connect(conn_str)
c=conn.cursor()
#create new tables if necessary
c.execute("CREATE TABLE IF NOT EXISTS drivingdata (id serial PRIMARY KEY, timeoftravel varchar(10), airport varchar(20), distance int, duration int);")
c.execute("CREATE TABLE IF NOT EXISTS flightdata (id serial PRIMARY KEY, airport varchar(3), departuretime varchar(24), arrivaltime varchar(24), duration int, cost varchar(20), tripid varchar(40), airline varchar(24));")
conn.commit()
#this is a table to create lookup table for airline codes and names
c.execute("CREATE TABLE IF NOT EXISTS airlinecodesall (id serial PRIMARY KEY, airlinecode varchar(10), airlinename varchar(40));")
#create paths to driving and flight data files
drivepath = os.path.join(os.getcwd(), 'data')
flightpath = os.getcwd()
#make lists of the files
drive_jsons = [j for j in os.listdir(drivepath) if j.endswith('.json')]
flight_jsons = [j for j in os.listdir(flightpath) if ((j.endswith('DCA.json')) or (j.endswith('IAD.json')) or (j.endswith('BWI.json')))]
#go through each json file, get the data you need, and save it to the db
for j in drive_jsons:
filename = os.path.join(drivepath, j)
with open(filename) as json_file:
data=json.load(json_file)
for d in data:
if d=='route':
distance=data["route"]["distance"]
durationmin=data["route"]["realTime"]
duration=durationmin/60
timeoftravel=data["route"]["options"]["localTime"]
airportpostalcode = data["route"]["locations"][1]["postalCode"]
SQL = "INSERT INTO drivingdata (distance, duration, timeoftravel, airport) VALUES (%s, %s, %s, %s);"
c.execute(SQL, (distance, duration, timeoftravel, airportpostalcode))
conn.commit()
#go through each flight file, get the data you need, and save it to db
for j in flight_jsons:
filename = os.path.join(flightpath,j)
airport=filename[-8:-5]
with open(filename, "rb") as json_file:
data=json.load(json_file)
for d in data:
if d == 'trips':
carriers = data['trips']['data']['carrier']
x=0
for car in carriers:
airlinecode=carriers[x]['code']
airlinename=carriers[x]['name']
SQL = "INSERT INTO airlinecodesall (airlinecode, airlinename) VALUES (%s, %s);"
c.execute(SQL, (airlinecode, airlinename))
conn.commit()
x=x+1
tripOptions = data['trips']['tripOption']
x=0
for t in tripOptions:
tripid=tripOptions[x]["id"]
cost=tripOptions[x]['saleTotal']
duration = tripOptions[x]['slice'][0]['duration']
airline=tripOptions[x]['slice'][0]['segment'][0]['flight']['carrier']
legs=tripOptions[x]['slice'][0]['segment']
for leg in legs:
if ((leg['leg'][0]['origin'])==airport):
departuretime = leg['leg'][0]['departureTime']
if ((leg['leg'][0]['destination'])=="LAX"):
arrivaltime = leg['leg'][0]['arrivalTime']
# put it in the table and save
SQL = "INSERT INTO flightdata (airport, departuretime, arrivaltime, duration, cost, tripid, airline) VALUES (%s, %s, %s, %s, %s, %s, %s);"
c.execute(SQL, (airport, departuretime, arrivaltime, duration, cost, tripid, airline))
conn.commit()
x=x+1
#populate the airline code and name lookup table
#update the values in the driving table to show airport code in place of airport zip code
c.execute("UPDATE drivingdata SET airport=%s WHERE airport=%s", ("IAD", "20166"))
c.execute("UPDATE drivingdata SET airport=%s WHERE airport=%s", ("DCA", "22202"))
c.execute("UPDATE drivingdata SET airport=%s WHERE airport=%s", ("BWI", "21240-2004"))
conn.commit()
#update the values in the flight table to show cost as a Number instead of a string (remove "USD")
c.execute("SELECT * FROM flightdata")
rows=c.fetchall()
for row in rows:
cost_string = row[5]
cost_number = cost_string[3:]
c.execute("UPDATE flightdata SET cost=%s WHERE cost=%s", (cost_number, cost_string))
conn.commit()
| {
"content_hash": "f62c5376c519f309e48d788880978335",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 220,
"avg_line_length": 46.166666666666664,
"alnum_prop": 0.5886482150020056,
"repo_name": "georgetown-analytics/triptomizer",
"id": "20fe8612c5b16bb526b0d763c1174f10b31f9383",
"size": "5171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "todb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "261101"
},
{
"name": "Python",
"bytes": "21290"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.