text stringlengths 4 1.02M | meta dict |
|---|---|
from __future__ import print_function, division
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
# from datasets.albrecht import Albrecht
# from datasets.china import China
# from datasets.desharnais import Desharnais
# from datasets.finnish import Finnish
# from datasets.isbsg10 import ISBSG10
# from datasets.kemerer import Kemerer
# from datasets.kitchenhamm import Kitchenhamm
# from datasets.maxwell import Maxwell
# from datasets.miyazaki import Miyazaki
from datasets.cleaned.albrecht import Albrecht
from datasets.cleaned.china import China
from datasets.cleaned.desharnais import Desharnais
from datasets.cleaned.finnish import Finnish
from datasets.cleaned.isbsg10 import ISBSG10
from datasets.cleaned.kemerer import Kemerer
from datasets.cleaned.kitchenhamm import Kitchenhamm
from datasets.cleaned.maxwell import Maxwell
from datasets.cleaned.miyazaki import Miyazaki
from utils.lib import *
from utils.validation import *
from methods.peeking import peeking2
from methods.cart import cart
from methods.teak import teak
from methods.knn import knn_1, knn_3
from methods.cogee import cogee
from methods.atlm import atlm
from optimizer.teak_optimize import teak_optimize
from utils.errors import *
from utils import sk
from joblib import Parallel, delayed
from time import time
datasets = [Albrecht, Desharnais, Finnish, Kemerer, Maxwell,
Miyazaki, China, ISBSG10, Kitchenhamm]
error = msae
def mre_calc(y_predict, y_actual):
mre = []
for predict, actual in zip(y_predict, y_actual):
mre.append(abs(predict - actual) / (actual))
mmre = np.median(mre)
if mmre == 0:
mmre = np.mean(mre)
return mmre
def sa_calc(y_predict, y_actual):
ar = 0
for predict, actual in zip(y_predict, y_actual):
ar += abs(predict - actual)
mar = ar / (len(y_predict))
marr = sum(y_actual) / len(y_actual)
sa_error = (1 - mar / marr)
return sa_error
def run(reps=1):
for dataset_class in datasets:
dataset = dataset_class()
model_scores = {"CART": N(),
"PEEKING": N(),
"TEAK": N(),
"KNN1": N(),
"KNN3": N(),
"ATLM": N(),
"COGEE": N(),
"O_TEAK": N()
}
for score in model_scores.values():
score.go = True
for _ in xrange(reps):
for test, rest in kfold(dataset.get_rows(), 3, shuffle=True):
say(".")
desired_effort = [dataset.effort(row) for row in test]
all_efforts = [dataset.effort(one) for one in rest]
model_scores["PEEKING"] += error(desired_effort, peeking2(dataset, test, rest), all_efforts)
model_scores["CART"] += error(desired_effort, cart(dataset, test, rest), all_efforts)
model_scores["TEAK"] += error(desired_effort, teak(dataset, test, rest), all_efforts)
model_scores["KNN1"] += error(desired_effort, knn_1(dataset, test, rest), all_efforts)
model_scores["KNN3"] += error(desired_effort, knn_3(dataset, test, rest), all_efforts)
model_scores["ATLM"] += error(desired_effort, atlm(dataset, test, rest), all_efforts)
model_scores["COGEE"] += error(desired_effort, cogee(dataset, test, rest), all_efforts)
model_scores["O_TEAK"] += error(desired_effort, teak_optimize(dataset, test, rest), all_efforts)
sk_data = [[key] + n.cache.all for key, n in model_scores.items()]
print("\n### %s (%d projects, %d decisions)" %
(dataset_class.__name__, len(dataset.get_rows()), len(dataset.dec_meta)))
print("```")
sk.rdivDemo(sk_data)
print("```")
print("")
def run_for_dataset(dataset_class, dataset_id, reps):
write_file = "results/%s_sa_mre.txt" % dataset_class.__name__
with open(write_file, "wb") as f:
dataset = dataset_class()
dataset_name = dataset_class.__name__
print("\n### %s (%d projects, %d decisions)" %
(dataset_name, len(dataset.get_rows()), len(dataset.dec_meta)))
# folds = 3 if len(dataset.get_rows()) < 40 else 10
folds = 3
for rep in range(reps):
fold_id = 0
for test, rest in kfold(dataset.get_rows(), folds, shuffle=True):
print("Running for %s, rep = %d, fold = %d" % (dataset_name, rep + 1, fold_id))
fold_id += 1
all_efforts = [dataset.effort(one) for one in rest]
actual_efforts = [dataset.effort(row) for row in test]
start = time()
atlm_efforts = atlm(dataset, test, rest)
atlm_end = time()
cart_efforts = cart(dataset, test, rest)
cart_end = time()
cogee_efforts = cogee(dataset, test, rest)
cogee_end = time()
atlm_mre, atlm_sa = mre_calc(atlm_efforts, actual_efforts), msa(actual_efforts, atlm_efforts, all_efforts)
cart_mre, cart_sa = mre_calc(cart_efforts, actual_efforts), msa(actual_efforts, cart_efforts, all_efforts)
cogee_mre, cogee_sa = mre_calc(cogee_efforts, actual_efforts), msa(actual_efforts, cogee_efforts, all_efforts)
f.write("%s;%d;%f;%f;%f\n" % (dataset_name, 1, atlm_mre, atlm_sa, atlm_end - start))
f.write("%s;%d;%f;%f;%f\n" % (dataset_name, 2, cart_mre, cart_sa, cart_end - start))
f.write("%s;%d;%f;%f;%f\n" % (dataset_name, 3, cogee_mre, cogee_sa, cogee_end - start))
return write_file
def run_patrick(reps, num_cores, consolidated_file="results/patrick_sa_mre.txt"):
local_datasets = datasets
# local_datasets = [Miyazaki]
dataset_files = Parallel(n_jobs=num_cores)(delayed(run_for_dataset)(dataset_class, dataset_id, reps)
for dataset_id, dataset_class in enumerate(local_datasets))
with open(consolidated_file, "wb") as f:
f.write("dataset;method;SA;MRE;Runtime\n")
for dataset_file in dataset_files:
with open(dataset_file) as df:
for line in df.readlines():
if len(line) > 0:
f.write("%s" % line)
# os.remove(dataset_file)
def sarro_cogee_dataset(dataset_class, error, folds, reps):
dataset = dataset_class()
print("\n### %s (%d projects, %d decisions)" %
(dataset_class.__name__, len(dataset.get_rows()), len(dataset.dec_meta)))
model_scores = {"CART": N(),
"ATLM": N(),
"COGEE": N()
}
for score in model_scores.values():
score.go = True
for _ in range(reps):
for test, rest in kfold(dataset.get_rows(), folds, shuffle=True):
say(".")
desired_effort = [dataset.effort(row) for row in test]
all_efforts = [dataset.effort(one) for one in rest]
model_scores["CART"] += error(desired_effort, cart(dataset, test, rest), all_efforts)
model_scores["ATLM"] += error(desired_effort, atlm(dataset, test, rest), all_efforts)
model_scores["COGEE"] += error(desired_effort, cogee(dataset, test, rest), all_efforts)
sk_data = [[key] + n.cache.all for key, n in model_scores.items()]
print("```")
stat = sk.rdivDemo(sk_data)
print("```")
print("")
write_file = "%s/%s.txt" % ("results/sarro", dataset_class.__name__)
with open(write_file, "wb") as f:
f.write("\n### %s (%d projects, %d decisions)\n" %
(dataset_class.__name__, len(dataset.get_rows()), len(dataset.dec_meta)))
f.write("```\n%s\n```\n\n" % stat)
return write_file
def sarro_cogee(num_cores, folds=3, reps=10):
datasets = [China, Desharnais, Finnish, Maxwell, Miyazaki,
Albrecht, Kemerer, ISBSG10, Kitchenhamm]
# datasets = [Miyazaki, Finnish]
mkdir("results/sarro")
error = msa
dataset_files = Parallel(n_jobs=num_cores)(delayed(sarro_cogee_dataset)(dataset_class, error, folds, reps)
for dataset_id, dataset_class in enumerate(datasets))
consolidated_file = "results/sarro/sa.md"
with open(consolidated_file, "wb") as f:
for dataset_file in dataset_files:
with open(dataset_file) as df:
for line in df.readlines():
f.write(line)
def run_patrick_v2():
reps = 20
folds = 3
contents = []
for dataset_class in datasets:
dataset = dataset_class()
dataset_name = dataset_class.__name__
start = time()
atlm_mres, atlm_sas = [], []
for rep in range(reps):
fold_id = 0
for test, rest in kfold(dataset.get_rows(), folds, shuffle=True):
print("Running for %s, rep = %d, fold = %d" % (dataset_name, rep + 1, fold_id))
fold_id += 1
all_efforts = [dataset.effort(one) for one in rest]
actual_efforts = [dataset.effort(row) for row in test]
atlm_efforts = atlm(dataset, test, rest)
atlm_mre, atlm_sa = mre_calc(atlm_efforts, actual_efforts), msa(actual_efforts, atlm_efforts, all_efforts)
atlm_mres.append(atlm_mre)
atlm_sas.append(atlm_sa)
end = time()
content = "dataset: %s\ntotal runtime: %f\n" % (dataset_name, end - start)
content += "\nMRE\n" + " ".join(map(str, atlm_mres)) + "\n"
content += "\nSA\n" + " ".join(map(str, atlm_sas)) + "\n"
contents.append(content)
with open("results/patrick_sa_mre_v2.txt", "wb") as f:
f.write("\n########################################\n\n".join(contents))
def _sarro():
reps = 10
folds = 3
cores = 16
sarro_cogee(cores, folds, reps)
def _main():
# reps = 20
# cores = 16
# consolidated_file = "results/patrick_sa_mre_v2.txt"
# run_patrick(reps, cores, consolidated_file)
run_patrick_v2()
if __name__ == "__main__":
_main()
# _sarro()
| {
"content_hash": "6cf0400e7ef3c4888179ca00004d8938",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 118,
"avg_line_length": 39.016460905349795,
"alnum_prop": 0.6246176563653623,
"repo_name": "dr-bigfatnoob/effort",
"id": "6bb2bdf62d0f2daa61236372311d65af9bdc6cd3",
"size": "9481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126825"
},
{
"name": "Shell",
"bytes": "163"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import dataset
import logging
import importlib
from . import config
from . import helpers
logger = logging.getLogger(__name__)
# Module API
def cli(argv):
# Prepare conf dict
conf = helpers.get_variables(config, str.isupper)
# Prepare conn dict
conn = {
'warehouse': dataset.connect(config.WAREHOUSE_URL),
}
# Get and call collector
collect = importlib.import_module('collectors.%s' % argv[1]).collect
collect(conf, conn, *argv[2:])
if __name__ == '__main__':
cli(sys.argv)
| {
"content_hash": "f79b6aa9d533eac58f866e5a0e2f1b07",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 72,
"avg_line_length": 21.4375,
"alnum_prop": 0.6793002915451894,
"repo_name": "opentrials/scraper",
"id": "f6a5f8010f8e86f96746f57d3a34f0b9326650e8",
"size": "710",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "collectors/base/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "643"
},
{
"name": "Mako",
"bytes": "618"
},
{
"name": "Python",
"bytes": "141926"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from datetime import datetime
from django.conf import settings
from django.http import HttpResponse, Http404
from django.views.decorators.csrf import csrf_exempt
from functools import update_wrapper
from pycon.pycon_api.exceptions import AuthenticationError
from pycon.pycon_api.models import APIAuth
import json
# We need to be able to serialize a datetime object, which the built-in
# encoder won't do. We also want to be sure to use a known format that
# preserved microseconds.
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S.%f'
class JSONDatetimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.strftime(DATETIME_FORMAT)
else:
return super(JSONDatetimeEncoder, self).default(o)
def api_view(method):
"""Decorator that forces the view to require a valid
API key in order to be processed.
Calls to the view that do not have an appropriate key
will return a 403 response.
"""
def f(request, *args, **kwargs):
# Ensure that there is an appropriate key attached
# to this request, and return a 401 otherwise.
try:
APIAuth.verify_request(request)
except AuthenticationError as ex:
return HttpResponse(
content=json.dumps({
'code': 403,
'error': unicode(ex).strip("'"),
}),
content_type='application/json',
status=403,
)
# Run the decorated method.
try:
response = method(request, *args, **kwargs)
code = 200
# Sanity check: Did we get a tuple back?
# This shorthand provides me an easy way to send back
# a success response that is not a 200.
if isinstance(response, tuple) and len(response) == 2:
response, code = response
return HttpResponse(
json.dumps({
'code': code,
'data': response,
}, cls=JSONDatetimeEncoder),
content_type='application/json',
status=code,
)
except Http404 as ex:
msg = unicode(ex).strip("'")
return HttpResponse(
content=json.dumps({
'code': 404,
'error': msg if msg else 'not found',
}),
content_type='application/json',
status=404,
)
f = csrf_exempt(f)
return update_wrapper(f, method)
| {
"content_hash": "570e62e2acd56272878ead0001fe89e2",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 71,
"avg_line_length": 34.1948051948052,
"alnum_prop": 0.5685529813900494,
"repo_name": "alex/pycon",
"id": "ff2120b9c4c09db1c6c87a4ad6792d97dd623c5f",
"size": "2633",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "pycon/pycon_api/decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
def disable_for_loaddata(signal_function):
"""
This is a decorator that will disable signals
that should not be run on loaddata
"""
def wrapped_signal(**kwargs):
if kwargs.get('raw'):
return
else:
return signal_function(**kwargs)
return wrapped_signal
| {
"content_hash": "86b8ee98b4471fa95d613853b3c6a8ae",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 49,
"avg_line_length": 28.727272727272727,
"alnum_prop": 0.6075949367088608,
"repo_name": "PFWhite/qipr_approver",
"id": "cf4d18f8acda58689c44fe3b59cc213ad1775230",
"size": "316",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "qipr_approver/approver/decorators/disable_for_loaddata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "201040"
},
{
"name": "HTML",
"bytes": "81379"
},
{
"name": "JavaScript",
"bytes": "304518"
},
{
"name": "Python",
"bytes": "236569"
},
{
"name": "Ruby",
"bytes": "809"
},
{
"name": "Shell",
"bytes": "5252"
},
{
"name": "Vim script",
"bytes": "1716"
}
],
"symlink_target": ""
} |
import xml.sax
import base64
import time
import boto
import boto.utils
import urllib
from boto.connection import AWSQueryConnection
from boto import handler
from boto.resultset import ResultSet
from boto.rds.dbinstance import DBInstance
from boto.rds.dbsecuritygroup import DBSecurityGroup
from boto.rds.parametergroup import ParameterGroup
from boto.rds.dbsnapshot import DBSnapshot
from boto.rds.event import Event
#boto.set_stream_logger('rds')
class RDSConnection(AWSQueryConnection):
DefaultHost = 'rds.amazonaws.com'
APIVersion = '2009-10-16'
SignatureVersion = '2'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, host=DefaultHost, debug=0,
https_connection_factory=None, path='/'):
AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user,
proxy_pass, self.DefaultHost, debug,
https_connection_factory, path)
# DB Instance methods
def get_all_dbinstances(self, instance_id=None, max_records=None,
marker=None):
"""
Retrieve all the DBInstances in your account.
:type instance_id: str
:param instance_id: DB Instance identifier. If supplied, only information
this instance will be returned. Otherwise, info
about all DB Instances will be returned.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of :class:`boto.rds.dbinstance.DBInstance`
"""
params = {}
if instance_id:
params['DBInstanceIdentifier'] = instance_id
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
return self.get_list('DescribeDBInstances', params, [('DBInstance', DBInstance)])
def create_dbinstance(self, id, allocated_storage, instance_class,
master_username, master_password, port=3306,
engine='MySQL5.1', db_name=None, param_group=None,
security_groups=None, availability_zone=None,
preferred_maintenance_window=None,
backup_retention_period=None,
preferred_backup_window=None):
"""
Create a new DBInstance.
:type id: str
:param id: Unique identifier for the new instance.
Must contain 1-63 alphanumeric characters.
First character must be a letter.
May not end with a hyphen or contain two consecutive hyphens
:type allocated_storage: int
:param allocated_storage: Initially allocated storage size, in GBs.
Valid values are [5-1024]
:type instance_class: str
:param instance_class: The compute and memory capacity of the DBInstance.
Valid values are:
db.m1.small | db.m1.large | db.m1.xlarge |
db.m2.2xlarge | db.m2.4xlarge
:type engine: str
. :param engine: Name of database engine. Must be MySQL5.1 for now.
:type master_username: str
:param master_username: Name of master user for the DBInstance.
Must be 1-15 alphanumeric characters, first must be
a letter.
:type master_password: str
:param master_password: Password of master user for the DBInstance.
Must be 4-16 alphanumeric characters.
:type port: int
:param port: Port number on which database accepts connections.
Valid values [1115-65535]. Defaults to 3306.
:type db_name: str
:param db_name: Name of a database to create when the DBInstance
is created. Default is to create no databases.
:type param_group: str
:param param_group: Name of DBParameterGroup to associate with
this DBInstance. If no groups are specified
no parameter groups will be used.
:type security_groups: list of str or list of DBSecurityGroup objects
:param security_groups: List of names of DBSecurityGroup to authorize on
this DBInstance.
:type availability_zone: str
:param availability_zone: Name of the availability zone to place
DBInstance into.
:type preferred_maintenance_window: str
:param preferred_maintenance_window: The weekly time range (in UTC) during
which maintenance can occur.
Default is Sun:05:00-Sun:09:00
:type backup_retention_period: int
:param backup_retention_period: The number of days for which automated
backups are retained. Setting this to
zero disables automated backups.
:type preferred_backup_window: str
:param preferred_backup_window: The daily time range during which
automated backups are created (if
enabled). Must be in h24:mi-hh24:mi
format (UTC).
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The new db instance.
"""
params = {'DBInstanceIdentifier' : id,
'AllocatedStorage' : allocated_storage,
'DBInstanceClass' : instance_class,
'Engine' : engine,
'MasterUsername' : master_username,
'MasterUserPassword' : master_password}
if port:
params['Port'] = port
if db_name:
params['DBName'] = db_name
if param_group:
params['DBParameterGroup'] = param_group
if security_groups:
l = []
for group in security_groups:
if isinstance(group, DBSecurityGroup):
l.append(group.name)
else:
l.append(group)
self.build_list_params(params, l, 'DBSecurityGroups.member')
if availability_zone:
params['AvailabilityZone'] = availability_zone
if preferred_maintenance_window:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if backup_retention_period:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window:
params['PreferredBackupWindow'] = preferred_backup_window
return self.get_object('CreateDBInstance', params, DBInstance)
def modify_dbinstance(self, id, param_group=None, security_groups=None,
preferred_maintenance_window=None,
master_password=None, allocated_storage=None,
backup_retention_period=None,
preferred_backup_window=None,
apply_immediately=False):
"""
Modify an existing DBInstance.
:type id: str
:param id: Unique identifier for the new instance.
:type security_groups: list of str or list of DBSecurityGroup objects
:param security_groups: List of names of DBSecurityGroup to authorize on
this DBInstance.
:type preferred_maintenance_window: str
:param preferred_maintenance_window: The weekly time range (in UTC) during
which maintenance can occur.
Default is Sun:05:00-Sun:09:00
:type master_password: str
:param master_password: Password of master user for the DBInstance.
Must be 4-15 alphanumeric characters.
:type allocated_storage: int
:param allocated_storage: The new allocated storage size, in GBs.
Valid values are [5-1024]
:type instance_class: str
:param instance_class: The compute and memory capacity of the DBInstance.
Changes will be applied at next maintenance
window unless apply_immediately is True.
Valid values are:
db.m1.small | db.m1.large | db.m1.xlarge |
db.m2.2xlarge | db.m2.4xlarge
:type apply_immediately: bool
:param apply_immediately: If true, the modifications will be applied
as soon as possible rather than waiting for
the next preferred maintenance window.
:type backup_retention_period: int
:param backup_retention_period: The number of days for which automated
backups are retained. Setting this to
zero disables automated backups.
:type preferred_backup_window: str
:param preferred_backup_window: The daily time range during which
automated backups are created (if
enabled). Must be in h24:mi-hh24:mi
format (UTC).
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The modified db instance.
"""
params = {'DBInstanceIdentifier' : id}
if param_group:
params['DBParameterGroupName'] = param_group
if security_groups:
l = []
for group in security_groups:
if isinstance(group, DBSecurityGroup):
l.append(group.name)
else:
l.append(group)
self.build_list_params(params, l, 'DBSecurityGroups.member')
if preferred_maintenance_window:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if master_password:
params['MasterUserPassword'] = master_password
if allocated_storage:
params['AllocatedStorage'] = allocated_storage
if backup_retention_period:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window:
params['PreferredBackupWindow'] = preferred_backup_window
if apply_immediately:
params['ApplyImmediately'] = 'true'
return self.get_object('ModifyDBInstance', params, DBInstance)
def delete_dbinstance(self, id, skip_final_snapshot=False,
final_snapshot_id=''):
"""
Delete an existing DBInstance.
:type id: str
:param id: Unique identifier for the new instance.
:type skip_final_snapshot: bool
:param skip_final_snapshot: This parameter determines whether a final
db snapshot is created before the instance
is deleted. If True, no snapshot is created.
If False, a snapshot is created before
deleting the instance.
:type final_snapshot_id: str
:param final_snapshot_id: If a final snapshot is requested, this
is the identifier used for that snapshot.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The deleted db instance.
"""
params = {'DBInstanceIdentifier' : id}
if skip_final_snapshot:
params['SkipFinalSnapshot'] = 'true'
else:
params['SkipFinalSnapshot'] = 'false'
params['FinalDBSnapshotIdentifier'] = final_snapshot_id
return self.get_object('DeleteDBInstance', params, DBInstance)
# DBParameterGroup methods
def get_all_dbparameter_groups(self, groupname=None, max_records=None,
marker=None):
"""
Get all parameter groups associated with your account in a region.
:type groupname: str
:param groupname: The name of the DBParameter group to retrieve.
If not provided, all DBParameter groups will be returned.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of :class:`boto.ec2.parametergroup.ParameterGroup`
"""
params = {}
if groupname:
params['DBParameterGroupName'] = groupname
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
return self.get_list('DescribeDBParameterGroups', params,
[('DBParameterGroup', ParameterGroup)])
def get_all_dbparameters(self, groupname, source=None,
max_records=None, marker=None):
"""
Get all parameters associated with a ParameterGroup
:type groupname: str
:param groupname: The name of the DBParameter group to retrieve.
:type source: str
:param source: Specifies which parameters to return.
If not specified, all parameters will be returned.
Valid values are: user|system|engine-default
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: :class:`boto.ec2.parametergroup.ParameterGroup`
:return: The ParameterGroup
"""
params = {'DBParameterGroupName' : groupname}
if source:
params['Source'] = source
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
pg = self.get_object('DescribeDBParameters', params, ParameterGroup)
pg.name = groupname
return pg
def create_parameter_group(self, name, engine='MySQL5.1', description=''):
"""
Create a new dbparameter group for your account.
:type name: string
:param name: The name of the new dbparameter group
:type engine: str
:param engine: Name of database engine. Must be MySQL5.1 for now.
:type description: string
:param description: The description of the new security group
:rtype: :class:`boto.rds.dbsecuritygroup.DBSecurityGroup`
:return: The newly created DBSecurityGroup
"""
params = {'DBParameterGroupName': name,
'Engine': engine,
'Description' : description}
return self.get_object('CreateDBParameterGroup', params, ParameterGroup)
def modify_parameter_group(self, name, parameters=None):
"""
Modify a parameter group for your account.
:type name: string
:param name: The name of the new parameter group
:type parameters: list of :class:`boto.rds.parametergroup.Parameter`
:param parameters: The new parameters
:rtype: :class:`boto.rds.parametergroup.ParameterGroup`
:return: The newly created ParameterGroup
"""
params = {'DBParameterGroupName': name}
for i in range(0, len(parameters)):
parameter = parameters[i]
parameter.merge(params, i+1)
return self.get_list('ModifyDBParameterGroup', params, ParameterGroup)
def reset_parameter_group(self, name, reset_all_params=False, parameters=None):
"""
Resets some or all of the parameters of a ParameterGroup to the
default value
:type key_name: string
:param key_name: The name of the ParameterGroup to reset
:type parameters: list of :class:`boto.rds.parametergroup.Parameter`
:param parameters: The parameters to reset. If not supplied, all parameters
will be reset.
"""
params = {'DBParameterGroupName':name}
if reset_all_params:
params['ResetAllParameters'] = 'true'
else:
params['ResetAllParameters'] = 'false'
for i in range(0, len(parameters)):
parameter = parameters[i]
parameter.merge(params, i+1)
return self.get_status('ResetDBParameterGroup', params)
def delete_parameter_group(self, name):
"""
Delete a DBSecurityGroup from your account.
:type key_name: string
:param key_name: The name of the DBSecurityGroup to delete
"""
params = {'DBParameterGroupName':name}
return self.get_status('DeleteDBParameterGroup', params)
# DBSecurityGroup methods
def get_all_dbsecurity_groups(self, groupname=None, max_records=None,
marker=None):
"""
Get all security groups associated with your account in a region.
:type groupnames: list
:param groupnames: A list of the names of security groups to retrieve.
If not provided, all security groups will be returned.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of :class:`boto.rds.dbsecuritygroup.DBSecurityGroup`
"""
params = {}
if groupname:
params['DBSecurityGroupName'] = groupname
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
return self.get_list('DescribeDBSecurityGroups', params,
[('DBSecurityGroup', DBSecurityGroup)])
def create_dbsecurity_group(self, name, description=None):
"""
Create a new security group for your account.
This will create the security group within the region you
are currently connected to.
:type name: string
:param name: The name of the new security group
:type description: string
:param description: The description of the new security group
:rtype: :class:`boto.rds.dbsecuritygroup.DBSecurityGroup`
:return: The newly created DBSecurityGroup
"""
params = {'DBSecurityGroupName':name}
if description:
params['DBSecurityGroupDescription'] = description
group = self.get_object('CreateDBSecurityGroup', params, DBSecurityGroup)
group.name = name
group.description = description
return group
def delete_dbsecurity_group(self, name):
"""
Delete a DBSecurityGroup from your account.
:type key_name: string
:param key_name: The name of the DBSecurityGroup to delete
"""
params = {'DBSecurityGroupName':name}
return self.get_status('DeleteDBSecurityGroup', params)
def authorize_dbsecurity_group(self, group_name, cidr_ip=None,
ec2_security_group_name=None,
ec2_security_group_owner_id=None):
"""
Add a new rule to an existing security group.
You need to pass in either src_security_group_name and
src_security_group_owner_id OR a CIDR block but not both.
:type group_name: string
:param group_name: The name of the security group you are adding
the rule to.
:type ec2_security_group_name: string
:param ec2_security_group_name: The name of the EC2 security group you are
granting access to.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The ID of the owner of the EC2 security
group you are granting access to.
:type cidr_ip: string
:param cidr_ip: The CIDR block you are providing access to.
See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
:rtype: bool
:return: True if successful.
"""
params = {'DBSecurityGroupName':group_name}
if ec2_security_group_name:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_owner_id:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
if cidr_ip:
params['CIDRIP'] = urllib.quote(cidr_ip)
return self.get_object('AuthorizeDBSecurityGroupIngress', params, DBSecurityGroup)
def revoke_security_group(self, group_name, ec2_security_group_name=None,
ec2_security_group_owner_id=None, cidr_ip=None):
"""
Remove an existing rule from an existing security group.
You need to pass in either ec2_security_group_name and
ec2_security_group_owner_id OR a CIDR block.
:type group_name: string
:param group_name: The name of the security group you are removing
the rule from.
:type ec2_security_group_name: string
:param ec2_security_group_name: The name of the EC2 security group you are
granting access to.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The ID of the owner of the EC2 security
group you are granting access to.
:type cidr_ip: string
:param cidr_ip: The CIDR block you are providing access to.
See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
:rtype: bool
:return: True if successful.
"""
params = {'DBSecurityGroupName':group_name}
if ec2_security_group_name:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_owner_id:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
if cidr_ip:
params['CIDRIP'] = cidr_ip
return self.get_object('RevokeDBSecurityGroupIngress', params, DBSecurityGroup)
# DBSnapshot methods
def get_all_dbsnapshots(self, snapshot_id=None, instance_id=None,
max_records=None, marker=None):
"""
Get information about DB Snapshots.
:type snapshot_id: str
:param snapshot_id: The unique identifier of an RDS snapshot.
If not provided, all RDS snapshots will be returned.
:type instance_id: str
:param instance_id: The identifier of a DBInstance. If provided,
only the DBSnapshots related to that instance will
be returned.
If not provided, all RDS snapshots will be returned.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of :class:`boto.rds.dbsnapshot.DBSnapshot`
"""
params = {}
if snapshot_id:
params['DBSnapshotIdentifier'] = snapshot_id
if instance_id:
params['DBInstanceIdentifier'] = instance_id
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
return self.get_list('DescribeDBSnapshots', params,
[('DBSnapshots', DBSnapshot)])
def create_dbsnapshot(self, snapshot_id, dbinstance_id):
"""
Create a new DB snapshot.
:type snapshot_id: string
:param snapshot_id: The identifier for the DBSnapshot
:type dbinstance_id: string
:param dbinstance_id: The source identifier for the RDS instance from
which the snapshot is created.
:rtype: :class:`boto.rds.dbsnapshot.DBSnapshot`
:return: The newly created DBSnapshot
"""
params = {'DBSnapshotIdentifier' : snapshot_id,
'DBInstanceIdentifier' : dbinstance_id}
return self.get_object('CreateDBSnapshot', params, DBSnapshot)
def delete_dbsnapshot(self, identifier):
"""
Delete a DBSnapshot
:type identifier: string
:param identifier: The identifier of the DBSnapshot to delete
"""
params = {'DBSnapshotIdentifier' : identifier}
return self.get_object('DeleteDBSnapshot', params, DBSnapshot)
def restore_dbinstance_from_dbsnapshot(self, identifier, instance_id,
instance_class, port=None,
availability_zone=None):
"""
Create a new DBInstance from a DB snapshot.
:type identifier: string
:param identifier: The identifier for the DBSnapshot
:type instance_id: string
:param instance_id: The source identifier for the RDS instance from
which the snapshot is created.
:type instance_class: str
:param instance_class: The compute and memory capacity of the DBInstance.
Valid values are:
db.m1.small | db.m1.large | db.m1.xlarge |
db.m2.2xlarge | db.m2.4xlarge
:type port: int
:param port: Port number on which database accepts connections.
Valid values [1115-65535]. Defaults to 3306.
:type availability_zone: str
:param availability_zone: Name of the availability zone to place
DBInstance into.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The newly created DBInstance
"""
params = {'DBSnapshotIdentifier' : identifier,
'DBInstanceIdentifier' : instance_id,
'DBInstanceClass' : instance_class}
if port:
params['Port'] = port
if availability_zone:
params['AvailabilityZone'] = availability_zone
return self.get_object('RestoreDBInstanceFromDBSnapshot',
params, DBInstance)
def restore_dbinstance_from_point_in_time(self, source_instance_id,
target_instance_id,
use_latest=False,
restore_time=None,
dbinstance_class=None,
port=None,
availability_zone=None):
"""
Create a new DBInstance from a point in time.
:type source_instance_id: string
:param source_instance_id: The identifier for the source DBInstance.
:type target_instance_id: string
:param target_instance_id: The identifier of the new DBInstance.
:type use_latest: bool
:param use_latest: If True, the latest snapshot availabile will
be used.
:type restore_time: datetime
:param restore_time: The date and time to restore from. Only
used if use_latest is False.
:type instance_class: str
:param instance_class: The compute and memory capacity of the DBInstance.
Valid values are:
db.m1.small | db.m1.large | db.m1.xlarge |
db.m2.2xlarge | db.m2.4xlarge
:type port: int
:param port: Port number on which database accepts connections.
Valid values [1115-65535]. Defaults to 3306.
:type availability_zone: str
:param availability_zone: Name of the availability zone to place
DBInstance into.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The newly created DBInstance
"""
params = {'SourceDBInstanceIdentifier' : source_instance_id,
'TargetDBInstanceIdentifier' : target_instance_id}
if use_latest:
params['UseLatestRestorableTime'] = 'true'
elif restore_time:
params['RestoreTime'] = restore_time.isoformat()
if instance_class:
params['DBInstanceClass'] = instance_class
if port:
params['Port'] = port
if availability_zone:
params['AvailabilityZone'] = availability_zone
return self.get_object('RestoreDBInstanceToPointInTime',
params, DBInstance)
# Events
def get_all_events(self, source_identifier=None, source_type=None,
start_time=None, end_time=None,
max_records=None, marker=None):
"""
Get information about events related to your DBInstances,
DBSecurityGroups and DBParameterGroups.
:type source_identifier: str
:param source_identifier: If supplied, the events returned will be
limited to those that apply to the identified
source. The value of this parameter depends
on the value of source_type. If neither
parameter is specified, all events in the time
span will be returned.
:type source_type: str
:param source_type: Specifies how the source_identifier should
be interpreted. Valid values are:
b-instance | db-security-group |
db-parameter-group | db-snapshot
:type start_time: datetime
:param start_time: The beginning of the time interval for events.
If not supplied, all available events will
be returned.
:type end_time: datetime
:param end_time: The ending of the time interval for events.
If not supplied, all available events will
be returned.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of class:`boto.rds.event.Event`
"""
params = {}
if source_identifier and source_type:
params['SourceIdentifier'] = source_identifier
params['SourceType'] = source_type
if start_time:
params['StartTime'] = start_time.isoformat()
if end_time:
params['EndTime'] = end_time.isoformat()
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
return self.get_list('DescribeEvents', params, [('Event', Event)])
| {
"content_hash": "f95a13426725d795d5be3bbb02547332",
"timestamp": "",
"source": "github",
"line_count": 791,
"max_line_length": 90,
"avg_line_length": 43.29962073324905,
"alnum_prop": 0.5538102189781022,
"repo_name": "linkedin/indextank-service",
"id": "92b7199e631826262bebecc43677689e95c2a0e8",
"size": "35352",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "storefront/boto/rds/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "107392"
},
{
"name": "JavaScript",
"bytes": "241673"
},
{
"name": "Python",
"bytes": "3808496"
},
{
"name": "Racket",
"bytes": "251"
},
{
"name": "Ruby",
"bytes": "255"
},
{
"name": "Shell",
"bytes": "13176"
}
],
"symlink_target": ""
} |
import traceback
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import timeutils
import taskflow.engines
from taskflow.patterns import linear_flow
from taskflow.types import failure as ft
from cinder import backup as backup_api
from cinder.backup import rpcapi as backup_rpcapi
from cinder import context as cinder_context
from cinder import coordination
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _
from cinder.image import glance
from cinder.image import image_utils
from cinder.message import api as message_api
from cinder.message import message_field
from cinder import objects
from cinder.objects import consistencygroup
from cinder.objects import fields
from cinder import utils
from cinder.volume.flows import common
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
ACTION = 'volume:create'
CONF = cfg.CONF
# These attributes we will attempt to save for the volume if they exist
# in the source image metadata.
IMAGE_ATTRIBUTES = (
'checksum',
'container_format',
'disk_format',
'min_disk',
'min_ram',
'size',
)
class OnFailureRescheduleTask(flow_utils.CinderTask):
"""Triggers a rescheduling request to be sent when reverting occurs.
If rescheduling doesn't occur this task errors out the volume.
Reversion strategy: Triggers the rescheduling mechanism whereby a cast gets
sent to the scheduler rpc api to allow for an attempt X of Y for scheduling
this volume elsewhere.
"""
def __init__(self, reschedule_context, db, driver, scheduler_rpcapi,
do_reschedule):
requires = ['filter_properties', 'request_spec', 'volume',
'context']
super(OnFailureRescheduleTask, self).__init__(addons=[ACTION],
requires=requires)
self.do_reschedule = do_reschedule
self.scheduler_rpcapi = scheduler_rpcapi
self.db = db
self.driver = driver
self.reschedule_context = reschedule_context
# These exception types will trigger the volume to be set into error
# status rather than being rescheduled.
self.no_reschedule_types = [
# Image copying happens after volume creation so rescheduling due
# to copy failure will mean the same volume will be created at
# another place when it still exists locally.
exception.ImageCopyFailure,
# Metadata updates happen after the volume has been created so if
# they fail, rescheduling will likely attempt to create the volume
# on another machine when it still exists locally.
exception.MetadataCopyFailure,
exception.MetadataCreateFailure,
exception.MetadataUpdateFailure,
# The volume/snapshot has been removed from the database, that
# can not be fixed by rescheduling.
exception.VolumeNotFound,
exception.SnapshotNotFound,
exception.VolumeTypeNotFound,
exception.ImageUnacceptable,
exception.ImageTooBig,
]
def execute(self, **kwargs):
pass
def _pre_reschedule(self, volume):
"""Actions that happen before the rescheduling attempt occur here."""
try:
# Update volume's timestamp and host.
#
# NOTE(harlowja): this is awkward to be done here, shouldn't
# this happen at the scheduler itself and not before it gets
# sent to the scheduler? (since what happens if it never gets
# there??). It's almost like we need a status of 'on-the-way-to
# scheduler' in the future.
# We don't need to update the volume's status to creating, since
# we haven't changed it to error.
update = {
'scheduled_at': timeutils.utcnow(),
'host': None,
}
LOG.debug("Updating volume %(volume_id)s with %(update)s.",
{'update': update, 'volume_id': volume.id})
volume.update(update)
volume.save()
except exception.CinderException:
# Don't let updating the state cause the rescheduling to fail.
LOG.exception("Volume %s: update volume state failed.",
volume.id)
def _reschedule(self, context, cause, request_spec, filter_properties,
volume):
"""Actions that happen during the rescheduling attempt occur here."""
create_volume = self.scheduler_rpcapi.create_volume
if not filter_properties:
filter_properties = {}
if 'retry' not in filter_properties:
filter_properties['retry'] = {}
retry_info = filter_properties['retry']
num_attempts = retry_info.get('num_attempts', 0)
request_spec['volume_id'] = volume.id
LOG.debug("Volume %(volume_id)s: re-scheduling %(method)s "
"attempt %(num)d due to %(reason)s",
{'volume_id': volume.id,
'method': common.make_pretty_name(create_volume),
'num': num_attempts,
'reason': cause.exception_str})
if all(cause.exc_info):
# Stringify to avoid circular ref problem in json serialization
retry_info['exc'] = traceback.format_exception(*cause.exc_info)
return create_volume(context, volume, request_spec=request_spec,
filter_properties=filter_properties)
def _post_reschedule(self, volume):
"""Actions that happen after the rescheduling attempt occur here."""
LOG.debug("Volume %s: re-scheduled", volume.id)
# NOTE(dulek): Here we should be sure that rescheduling occurred and
# host field will be erased. Just in case volume was already created at
# the backend, we attempt to delete it.
try:
self.driver.delete_volume(volume)
except Exception:
# Most likely the volume weren't created at the backend. We can
# safely ignore this.
pass
def revert(self, context, result, flow_failures, volume, **kwargs):
# NOTE(dulek): Revert is occurring and manager need to know if
# rescheduling happened. We're returning boolean flag that will
# indicate that. It which will be available in flow engine store
# through get_revert_result method.
# If do not want to be rescheduled, just set the volume's status to
# error and return.
if not self.do_reschedule:
common.error_out(volume)
LOG.error("Volume %s: create failed", volume.id)
return False
# Check if we have a cause which can tell us not to reschedule and
# set the volume's status to error.
for failure in flow_failures.values():
if failure.check(*self.no_reschedule_types):
common.error_out(volume)
LOG.error("Volume %s: create failed", volume.id)
return False
# Use a different context when rescheduling.
if self.reschedule_context:
cause = list(flow_failures.values())[0]
context = self.reschedule_context
try:
self._pre_reschedule(volume)
self._reschedule(context, cause, volume=volume, **kwargs)
self._post_reschedule(volume)
return True
except exception.CinderException:
LOG.exception("Volume %s: rescheduling failed", volume.id)
return False
class ExtractVolumeRefTask(flow_utils.CinderTask):
"""Extracts volume reference for given volume id."""
default_provides = 'refreshed'
def __init__(self, db, host, set_error=True):
super(ExtractVolumeRefTask, self).__init__(addons=[ACTION])
self.db = db
self.host = host
self.set_error = set_error
def execute(self, context, volume):
# NOTE(harlowja): this will fetch the volume from the database, if
# the volume has been deleted before we got here then this should fail.
#
# In the future we might want to have a lock on the volume_id so that
# the volume can not be deleted while its still being created?
volume.refresh()
return volume
def revert(self, context, volume, result, **kwargs):
if isinstance(result, ft.Failure) or not self.set_error:
return
reason = _('Volume create failed while extracting volume ref.')
common.error_out(volume, reason)
LOG.error("Volume %s: create failed", volume.id)
class ExtractVolumeSpecTask(flow_utils.CinderTask):
"""Extracts a spec of a volume to be created into a common structure.
This task extracts and organizes the input requirements into a common
and easier to analyze structure for later tasks to use. It will also
attach the underlying database volume reference which can be used by
other tasks to reference for further details about the volume to be.
Reversion strategy: N/A
"""
default_provides = 'volume_spec'
def __init__(self, db):
requires = ['volume', 'request_spec']
super(ExtractVolumeSpecTask, self).__init__(addons=[ACTION],
requires=requires)
self.db = db
def execute(self, context, volume, request_spec):
get_remote_image_service = glance.get_remote_image_service
volume_name = volume.name
volume_size = utils.as_int(volume.size, quiet=False)
# Create a dictionary that will represent the volume to be so that
# later tasks can easily switch between the different types and create
# the volume according to the volume types specifications (which are
# represented in this dictionary).
specs = {
'status': volume.status,
'type': 'raw', # This will have the type of the volume to be
# created, which should be one of [raw, snap,
# source_vol, image, backup]
'volume_id': volume.id,
'volume_name': volume_name,
'volume_size': volume_size,
}
if volume.snapshot_id:
# We are making a snapshot based volume instead of a raw volume.
specs.update({
'type': 'snap',
'snapshot_id': volume.snapshot_id,
})
elif volume.source_volid:
# We are making a source based volume instead of a raw volume.
#
# NOTE(harlowja): This will likely fail if the source volume
# disappeared by the time this call occurred.
source_volid = volume.source_volid
source_volume_ref = objects.Volume.get_by_id(context,
source_volid)
specs.update({
'source_volid': source_volid,
# This is captured incase we have to revert and we want to set
# back the source volume status to its original status. This
# may or may not be sketchy to do??
'source_volstatus': source_volume_ref.status,
'type': 'source_vol',
})
elif request_spec.get('image_id'):
# We are making an image based volume instead of a raw volume.
image_href = request_spec['image_id']
image_service, image_id = get_remote_image_service(context,
image_href)
specs.update({
'type': 'image',
'image_id': image_id,
'image_location': image_service.get_location(context,
image_id),
'image_meta': image_service.show(context, image_id),
# Instead of refetching the image service later just save it.
#
# NOTE(harlowja): if we have to later recover this tasks output
# on another 'node' that this object won't be able to be
# serialized, so we will have to recreate this object on
# demand in the future.
'image_service': image_service,
})
elif request_spec.get('backup_id'):
# We are making a backup based volume instead of a raw volume.
specs.update({
'type': 'backup',
'backup_id': request_spec['backup_id'],
# NOTE(luqitao): if the driver does not implement the method
# `create_volume_from_backup`, cinder-backup will update the
# volume's status, otherwise we need update it in the method
# `CreateVolumeOnFinishTask`.
'need_update_volume': True,
})
return specs
def revert(self, context, result, **kwargs):
if isinstance(result, ft.Failure):
return
volume_spec = result.get('volume_spec')
# Restore the source volume status and set the volume to error status.
common.restore_source_status(context, self.db, volume_spec)
class NotifyVolumeActionTask(flow_utils.CinderTask):
"""Performs a notification about the given volume when called.
Reversion strategy: N/A
"""
def __init__(self, db, event_suffix):
super(NotifyVolumeActionTask, self).__init__(addons=[ACTION,
event_suffix])
self.db = db
self.event_suffix = event_suffix
def execute(self, context, volume):
try:
volume_utils.notify_about_volume_usage(context, volume,
self.event_suffix,
host=volume.host)
except exception.CinderException:
# If notification sending of volume database entry reading fails
# then we shouldn't error out the whole workflow since this is
# not always information that must be sent for volumes to operate
LOG.exception("Failed notifying about the volume"
" action %(event)s for volume %(volume_id)s",
{'event': self.event_suffix, 'volume_id': volume.id})
class CreateVolumeFromSpecTask(flow_utils.CinderTask):
"""Creates a volume from a provided specification.
Reversion strategy: N/A
"""
default_provides = 'volume_spec'
def __init__(self, manager, db, driver, image_volume_cache=None):
super(CreateVolumeFromSpecTask, self).__init__(addons=[ACTION])
self.manager = manager
self.db = db
self.driver = driver
self.image_volume_cache = image_volume_cache
self.message = message_api.API()
self.backup_api = backup_api.API()
self.backup_rpcapi = backup_rpcapi.BackupAPI()
def _handle_bootable_volume_glance_meta(self, context, volume,
**kwargs):
"""Enable bootable flag and properly handle glance metadata.
Caller should provide one and only one of snapshot_id,source_volid
and image_id. If an image_id specified, an image_meta should also be
provided, otherwise will be treated as an empty dictionary.
"""
log_template = _("Copying metadata from %(src_type)s %(src_id)s to "
"%(vol_id)s.")
exception_template = _("Failed updating volume %(vol_id)s metadata"
" using the provided %(src_type)s"
" %(src_id)s metadata")
src_type = None
src_id = None
self._enable_bootable_flag(context, volume)
try:
if kwargs.get('snapshot_id'):
src_type = 'snapshot'
src_id = kwargs['snapshot_id']
snapshot_id = src_id
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume.id})
self.db.volume_glance_metadata_copy_to_volume(
context, volume.id, snapshot_id)
elif kwargs.get('source_volid'):
src_type = 'source volume'
src_id = kwargs['source_volid']
source_volid = src_id
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume.id})
self.db.volume_glance_metadata_copy_from_volume_to_volume(
context,
source_volid,
volume.id)
elif kwargs.get('image_id'):
src_type = 'image'
src_id = kwargs['image_id']
image_id = src_id
image_meta = kwargs.get('image_meta', {})
LOG.debug(log_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume.id})
self._capture_volume_image_metadata(context, volume.id,
image_id, image_meta)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(exception_template, {'src_type': src_type,
'src_id': src_id,
'vol_id': volume.id})
raise exception.MetadataCopyFailure(reason=ex)
def _create_from_snapshot(self, context, volume, snapshot_id,
**kwargs):
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
try:
model_update = self.driver.create_volume_from_snapshot(volume,
snapshot)
finally:
self._cleanup_cg_in_volume(volume)
# NOTE(harlowja): Subtasks would be useful here since after this
# point the volume has already been created and further failures
# will not destroy the volume (although they could in the future).
make_bootable = False
try:
originating_vref = objects.Volume.get_by_id(context,
snapshot.volume_id)
make_bootable = originating_vref.bootable
except exception.CinderException as ex:
LOG.exception("Failed fetching snapshot %(snapshot_id)s bootable"
" flag using the provided glance snapshot "
"%(snapshot_ref_id)s volume reference",
{'snapshot_id': snapshot_id,
'snapshot_ref_id': snapshot.volume_id})
raise exception.MetadataUpdateFailure(reason=ex)
if make_bootable:
self._handle_bootable_volume_glance_meta(context, volume,
snapshot_id=snapshot_id)
return model_update
def _enable_bootable_flag(self, context, volume):
try:
LOG.debug('Marking volume %s as bootable.', volume.id)
volume.bootable = True
volume.save()
except exception.CinderException as ex:
LOG.exception("Failed updating volume %(volume_id)s bootable "
"flag to true", {'volume_id': volume.id})
raise exception.MetadataUpdateFailure(reason=ex)
def _create_from_source_volume(self, context, volume, source_volid,
**kwargs):
# NOTE(harlowja): if the source volume has disappeared this will be our
# detection of that since this database call should fail.
#
# NOTE(harlowja): likely this is not the best place for this to happen
# and we should have proper locks on the source volume while actions
# that use the source volume are underway.
srcvol_ref = objects.Volume.get_by_id(context, source_volid)
try:
model_update = self.driver.create_cloned_volume(volume, srcvol_ref)
finally:
self._cleanup_cg_in_volume(volume)
# NOTE(harlowja): Subtasks would be useful here since after this
# point the volume has already been created and further failures
# will not destroy the volume (although they could in the future).
if srcvol_ref.bootable:
self._handle_bootable_volume_glance_meta(
context, volume, source_volid=srcvol_ref.id)
return model_update
def _copy_image_to_volume(self, context, volume,
image_meta, image_location, image_service):
image_id = image_meta['id']
"""Downloads Glance image to the specified volume."""
LOG.debug("Attempting download of %(image_id)s (%(image_location)s)"
" to volume %(volume_id)s.",
{'image_id': image_id, 'volume_id': volume.id,
'image_location': image_location})
try:
image_encryption_key = image_meta.get('cinder_encryption_key_id')
if volume.encryption_key_id and image_encryption_key:
# If the image provided an encryption key, we have
# already cloned it to the volume's key in
# _get_encryption_key_id, so we can do a direct copy.
self.driver.copy_image_to_volume(
context, volume, image_service, image_id)
elif volume.encryption_key_id:
# Creating an encrypted volume from a normal, unencrypted,
# image.
self.driver.copy_image_to_encrypted_volume(
context, volume, image_service, image_id)
else:
self.driver.copy_image_to_volume(
context, volume, image_service, image_id)
except processutils.ProcessExecutionError as ex:
LOG.exception("Failed to copy image %(image_id)s to volume: "
"%(volume_id)s",
{'volume_id': volume.id, 'image_id': image_id})
raise exception.ImageCopyFailure(reason=ex.stderr)
except exception.ImageUnacceptable as ex:
LOG.exception("Failed to copy image to volume: %(volume_id)s",
{'volume_id': volume.id})
raise exception.ImageUnacceptable(ex)
except exception.ImageTooBig as ex:
LOG.exception("Failed to copy image %(image_id)s to volume: "
"%(volume_id)s",
{'volume_id': volume.id, 'image_id': image_id})
excutils.save_and_reraise_exception()
except Exception as ex:
LOG.exception("Failed to copy image %(image_id)s to "
"volume: %(volume_id)s",
{'volume_id': volume.id, 'image_id': image_id})
if not isinstance(ex, exception.ImageCopyFailure):
raise exception.ImageCopyFailure(reason=ex)
else:
raise
LOG.debug("Downloaded image %(image_id)s (%(image_location)s)"
" to volume %(volume_id)s successfully.",
{'image_id': image_id, 'volume_id': volume.id,
'image_location': image_location})
def _capture_volume_image_metadata(self, context, volume_id,
image_id, image_meta):
# Save some base attributes into the volume metadata
base_metadata = {
'image_id': image_id,
}
name = image_meta.get('name', None)
if name:
base_metadata['image_name'] = name
# Save some more attributes into the volume metadata from the image
# metadata
for key in IMAGE_ATTRIBUTES:
if key not in image_meta:
continue
value = image_meta.get(key, None)
if value is not None:
base_metadata[key] = value
# Save all the image metadata properties into the volume metadata
property_metadata = {}
image_properties = image_meta.get('properties', {})
for (key, value) in image_properties.items():
if value is not None:
property_metadata[key] = value
volume_metadata = dict(property_metadata)
volume_metadata.update(base_metadata)
LOG.debug("Creating volume glance metadata for volume %(volume_id)s"
" backed by image %(image_id)s with: %(vol_metadata)s.",
{'volume_id': volume_id, 'image_id': image_id,
'vol_metadata': volume_metadata})
self.db.volume_glance_metadata_bulk_create(context, volume_id,
volume_metadata)
def _clone_image_volume(self, context, volume, image_location, image_meta):
"""Create a volume efficiently from an existing image.
Returns a dict of volume properties eg. provider_location,
boolean indicating whether cloning occurred
"""
# NOTE (lixiaoy1): currently can't create volume from source vol with
# different encryptions, so just return.
if not image_location or volume.encryption_key_id:
return None, False
if (image_meta.get('container_format') != 'bare' or
image_meta.get('disk_format') != 'raw'):
LOG.info("Requested image %(id)s is not in raw format.",
{'id': image_meta.get('id')})
return None, False
image_volume = None
direct_url, locations = image_location
urls = set([direct_url] + [loc.get('url') for loc in locations or []])
image_volume_ids = [url[9:] for url in urls
if url and url.startswith('cinder://')]
image_volumes = self.db.volume_get_all_by_host(
context, volume['host'], filters={'id': image_volume_ids})
for image_volume in image_volumes:
# For the case image volume is stored in the service tenant,
# image_owner volume metadata should also be checked.
image_owner = None
volume_metadata = image_volume.get('volume_metadata') or {}
for m in volume_metadata:
if m['key'] == 'image_owner':
image_owner = m['value']
if (image_meta['owner'] != volume['project_id'] and
image_meta['owner'] != image_owner):
LOG.info("Skipping image volume %(id)s because "
"it is not accessible by current Tenant.",
{'id': image_volume.id})
continue
LOG.info("Will clone a volume from the image volume "
"%(id)s.", {'id': image_volume.id})
break
else:
LOG.debug("No accessible image volume for image %(id)s found.",
{'id': image_meta['id']})
return None, False
try:
ret = self.driver.create_cloned_volume(volume, image_volume)
self._cleanup_cg_in_volume(volume)
return ret, True
except (NotImplementedError, exception.CinderException):
LOG.exception('Failed to clone image volume %(id)s.',
{'id': image_volume['id']})
return None, False
def _create_from_image_download(self, context, volume, image_location,
image_meta, image_service):
# TODO(harlowja): what needs to be rolled back in the clone if this
# volume create fails?? Likely this should be a subflow or broken
# out task in the future. That will bring up the question of how
# do we make said subflow/task which is only triggered in the
# clone image 'path' resumable and revertable in the correct
# manner.
model_update = self.driver.create_volume(volume) or {}
self._cleanup_cg_in_volume(volume)
model_update['status'] = 'downloading'
try:
volume.update(model_update)
volume.save()
except exception.CinderException:
LOG.exception("Failed updating volume %(volume_id)s with "
"%(updates)s",
{'volume_id': volume.id,
'updates': model_update})
try:
self._copy_image_to_volume(context, volume, image_meta,
image_location, image_service)
except exception.ImageTooBig:
with excutils.save_and_reraise_exception():
LOG.exception("Failed to copy image to volume "
"%(volume_id)s due to insufficient space",
{'volume_id': volume.id})
return model_update
def _create_from_image_cache(self, context, internal_context, volume,
image_id, image_meta):
"""Attempt to create the volume using the image cache.
Best case this will simply clone the existing volume in the cache.
Worst case the image is out of date and will be evicted. In that case
a clone will not be created and the image must be downloaded again.
"""
LOG.debug('Attempting to retrieve cache entry for image = '
'%(image_id)s on host %(host)s.',
{'image_id': image_id, 'host': volume.host})
# Currently can't create volume from source vol with different
# encryptions, so just return
if volume.encryption_key_id:
return None, False
try:
cache_entry = self.image_volume_cache.get_entry(internal_context,
volume,
image_id,
image_meta)
if cache_entry:
LOG.debug('Creating from source image-volume %(volume_id)s',
{'volume_id': cache_entry['volume_id']})
model_update = self._create_from_source_volume(
context,
volume,
cache_entry['volume_id']
)
return model_update, True
except exception.SnapshotLimitReached:
# If this exception occurred when cloning the image-volume,
# it is because the image-volume reached its snapshot limit.
# Delete current cache entry and create a "fresh" entry
# NOTE: This will not delete the existing image-volume and
# only delete the cache entry
with excutils.save_and_reraise_exception():
self.image_volume_cache.evict(context, cache_entry)
except NotImplementedError:
LOG.warning('Backend does not support creating image-volume '
'clone. Image will be downloaded from Glance.')
return None, False
@coordination.synchronized('{image_id}')
def _prepare_image_cache_entry(self, context, volume,
image_location, image_id,
image_meta, image_service):
internal_context = cinder_context.get_internal_tenant_context()
if not internal_context:
return None, False
cache_entry = self.image_volume_cache.get_entry(internal_context,
volume,
image_id,
image_meta)
# If the entry is in the cache then return ASAP in order to minimize
# the scope of the lock. If it isn't in the cache then do the work
# that adds it. The work is done inside the locked region to ensure
# only one cache entry is created.
if cache_entry:
LOG.debug('Found cache entry for image = '
'%(image_id)s on host %(host)s.',
{'image_id': image_id, 'host': volume.host})
return None, False
else:
LOG.debug('Preparing cache entry for image = '
'%(image_id)s on host %(host)s.',
{'image_id': image_id, 'host': volume.host})
model_update = self._create_from_image_cache_or_download(
context,
volume,
image_location,
image_id,
image_meta,
image_service,
update_cache=True)
return model_update, True
def _create_from_image_cache_or_download(self, context, volume,
image_location, image_id,
image_meta, image_service,
update_cache=False):
# NOTE(e0ne): check for free space in image_conversion_dir before
# image downloading.
# NOTE(mnaser): This check *only* happens if the backend is not able
# to clone volumes and we have to resort to downloading
# the image from Glance and uploading it.
if CONF.image_conversion_dir:
fileutils.ensure_tree(CONF.image_conversion_dir)
try:
image_utils.check_available_space(
CONF.image_conversion_dir,
image_meta['size'], image_id)
except exception.ImageTooBig as err:
with excutils.save_and_reraise_exception():
self.message.create(
context,
message_field.Action.COPY_IMAGE_TO_VOLUME,
resource_uuid=volume.id,
detail=message_field.Detail.NOT_ENOUGH_SPACE_FOR_IMAGE,
exception=err)
# Try and use the image cache.
should_create_cache_entry = False
cloned = False
model_update = None
if self.image_volume_cache:
internal_context = cinder_context.get_internal_tenant_context()
if not internal_context:
LOG.info('Unable to get Cinder internal context, will '
'not use image-volume cache.')
else:
try:
model_update, cloned = self._create_from_image_cache(
context,
internal_context,
volume,
image_id,
image_meta
)
except exception.SnapshotLimitReached:
# This exception will be handled by the caller's
# (_create_from_image) retry decorator
with excutils.save_and_reraise_exception():
LOG.debug("Snapshot limit reached. Creating new "
"image-volume.")
except exception.CinderException as e:
LOG.warning('Failed to create volume from image-volume '
'cache, image will be downloaded from Glance. '
'Error: %(exception)s',
{'exception': e})
# Don't cache unless directed.
if not cloned and update_cache:
should_create_cache_entry = True
# cleanup consistencygroup field in the volume,
# because when creating cache entry, it will need
# to update volume object.
self._cleanup_cg_in_volume(volume)
# Fall back to default behavior of creating volume,
# download the image data and copy it into the volume.
original_size = volume.size
backend_name = volume_utils.extract_host(volume.service_topic_queue)
try:
if not cloned:
try:
with image_utils.TemporaryImages.fetch(
image_service, context, image_id,
backend_name) as tmp_image:
# Try to create the volume as the minimal size,
# then we can extend once the image has been
# downloaded.
data = image_utils.qemu_img_info(tmp_image)
virtual_size = image_utils.check_virtual_size(
data.virtual_size, volume.size, image_id)
if should_create_cache_entry:
if virtual_size and virtual_size != original_size:
volume.size = virtual_size
volume.save()
model_update = self._create_from_image_download(
context,
volume,
image_location,
image_meta,
image_service
)
except exception.ImageTooBig as e:
with excutils.save_and_reraise_exception():
self.message.create(
context,
message_field.Action.COPY_IMAGE_TO_VOLUME,
resource_uuid=volume.id,
detail=
message_field.Detail.NOT_ENOUGH_SPACE_FOR_IMAGE,
exception=e)
if should_create_cache_entry:
# Update the newly created volume db entry before we clone it
# for the image-volume creation.
if model_update:
volume.update(model_update)
volume.save()
self.manager._create_image_cache_volume_entry(internal_context,
volume,
image_id,
image_meta)
finally:
# If we created the volume as the minimal size, extend it back to
# what was originally requested. If an exception has occurred or
# extending it back failed, we still need to put this back before
# letting it be raised further up the stack.
if volume.size != original_size:
try:
self.driver.extend_volume(volume, original_size)
finally:
volume.size = original_size
volume.save()
return model_update
@utils.retry(exception.SnapshotLimitReached, retries=1)
def _create_from_image(self, context, volume,
image_location, image_id, image_meta,
image_service, **kwargs):
LOG.debug("Cloning %(volume_id)s from image %(image_id)s "
" at location %(image_location)s.",
{'volume_id': volume.id,
'image_location': image_location, 'image_id': image_id})
virtual_size = image_meta.get('virtual_size')
if virtual_size:
virtual_size = image_utils.check_virtual_size(virtual_size,
volume.size,
image_id)
# Create the volume from an image.
#
# First see if the driver can clone the image directly.
#
# NOTE (singn): two params need to be returned
# dict containing provider_location for cloned volume
# and clone status.
# NOTE (lixiaoy1): Currently all images are raw data, we can't
# use clone_image to copy data if new volume is encrypted.
volume_is_encrypted = volume.encryption_key_id is not None
cloned = False
model_update = None
if not volume_is_encrypted:
model_update, cloned = self.driver.clone_image(context,
volume,
image_location,
image_meta,
image_service)
# Try and clone the image if we have it set as a glance location.
if not cloned and 'cinder' in CONF.allowed_direct_url_schemes:
model_update, cloned = self._clone_image_volume(context,
volume,
image_location,
image_meta)
# If we're going to try using the image cache then prepare the cache
# entry. Note: encrypted volume images are not cached.
if not cloned and self.image_volume_cache and not volume_is_encrypted:
# If _prepare_image_cache_entry() has to create the cache entry
# then it will also create the volume. But if the volume image
# is already in the cache then it returns (None, False), and
# _create_from_image_cache_or_download() will use the cache.
model_update, cloned = self._prepare_image_cache_entry(
context,
volume,
image_location,
image_id,
image_meta,
image_service)
# Try and use the image cache, and download if not cached.
if not cloned:
model_update = self._create_from_image_cache_or_download(
context,
volume,
image_location,
image_id,
image_meta,
image_service)
self._handle_bootable_volume_glance_meta(context, volume,
image_id=image_id,
image_meta=image_meta)
return model_update
def _create_from_backup(self, context, volume, backup_id, **kwargs):
LOG.info("Creating volume %(volume_id)s from backup %(backup_id)s.",
{'volume_id': volume.id,
'backup_id': backup_id})
ret = {}
backup = objects.Backup.get_by_id(context, backup_id)
try:
ret = self.driver.create_volume_from_backup(volume, backup)
need_update_volume = True
except NotImplementedError:
LOG.info("Backend does not support creating volume from "
"backup %(id)s. It will directly create the raw volume "
"at the backend and then schedule the request to the "
"backup service to restore the volume with backup.",
{'id': backup_id})
model_update = self._create_raw_volume(volume, **kwargs) or {}
model_update.update({'status': 'restoring-backup'})
volume.update(model_update)
volume.save()
backup_host = self.backup_api.get_available_backup_service_host(
backup.host, backup.availability_zone)
updates = {'status': fields.BackupStatus.RESTORING,
'restore_volume_id': volume.id,
'host': backup_host}
backup.update(updates)
backup.save()
self.backup_rpcapi.restore_backup(context, backup.host, backup,
volume.id)
need_update_volume = False
LOG.info("Created volume %(volume_id)s from backup %(backup_id)s "
"successfully.",
{'volume_id': volume.id,
'backup_id': backup_id})
return ret, need_update_volume
def _create_raw_volume(self, volume, **kwargs):
try:
ret = self.driver.create_volume(volume)
finally:
self._cleanup_cg_in_volume(volume)
return ret
def execute(self, context, volume, volume_spec):
volume_spec = dict(volume_spec)
volume_id = volume_spec.pop('volume_id', None)
if not volume_id:
volume_id = volume.id
# we can't do anything if the driver didn't init
if not self.driver.initialized:
driver_name = self.driver.__class__.__name__
LOG.error("Unable to create volume. "
"Volume driver %s not initialized", driver_name)
raise exception.DriverNotInitialized()
# NOTE(xyang): Populate consistencygroup_id and consistencygroup
# fields before passing to the driver. This is to support backward
# compatibility of consistencygroup.
if volume.group_id:
volume.consistencygroup_id = volume.group_id
cg = consistencygroup.ConsistencyGroup()
cg.from_group(volume.group)
volume.consistencygroup = cg
create_type = volume_spec.pop('type', None)
LOG.info("Volume %(volume_id)s: being created as %(create_type)s "
"with specification: %(volume_spec)s",
{'volume_spec': volume_spec, 'volume_id': volume_id,
'create_type': create_type})
if create_type == 'raw':
model_update = self._create_raw_volume(volume, **volume_spec)
elif create_type == 'snap':
model_update = self._create_from_snapshot(context, volume,
**volume_spec)
elif create_type == 'source_vol':
model_update = self._create_from_source_volume(
context, volume, **volume_spec)
elif create_type == 'image':
model_update = self._create_from_image(context,
volume,
**volume_spec)
elif create_type == 'backup':
model_update, need_update_volume = self._create_from_backup(
context, volume, **volume_spec)
volume_spec.update({'need_update_volume': need_update_volume})
else:
raise exception.VolumeTypeNotFound(volume_type_id=create_type)
# Persist any model information provided on creation.
try:
if model_update:
with volume.obj_as_admin():
volume.update(model_update)
volume.save()
except exception.CinderException:
# If somehow the update failed we want to ensure that the
# failure is logged (but not try rescheduling since the volume at
# this point has been created).
LOG.exception("Failed updating model of volume %(volume_id)s "
"with creation provided model %(model)s",
{'volume_id': volume_id, 'model': model_update})
raise
return volume_spec
def _cleanup_cg_in_volume(self, volume):
# NOTE(xyang): Cannot have both group_id and consistencygroup_id.
# consistencygroup_id needs to be removed to avoid DB reference
# error because there isn't an entry in the consistencygroups table.
if (('group_id' in volume and volume.group_id) and
('consistencygroup_id' in volume and
volume.consistencygroup_id)):
volume.consistencygroup_id = None
if 'consistencygroup' in volume:
volume.consistencygroup = None
class CreateVolumeOnFinishTask(NotifyVolumeActionTask):
"""On successful volume creation this will perform final volume actions.
When a volume is created successfully it is expected that MQ notifications
and database updates will occur to 'signal' to others that the volume is
now ready for usage. This task does those notifications and updates in a
reliable manner (not re-raising exceptions if said actions can not be
triggered).
Reversion strategy: N/A
"""
def __init__(self, db, event_suffix):
super(CreateVolumeOnFinishTask, self).__init__(db, event_suffix)
self.status_translation = {
'migration_target_creating': 'migration_target',
}
def execute(self, context, volume, volume_spec):
need_update_volume = volume_spec.pop('need_update_volume', True)
if not need_update_volume:
super(CreateVolumeOnFinishTask, self).execute(context, volume)
return
new_status = self.status_translation.get(volume_spec.get('status'),
'available')
update = {
'status': new_status,
'launched_at': timeutils.utcnow(),
}
try:
# TODO(harlowja): is it acceptable to only log if this fails??
# or are there other side-effects that this will cause if the
# status isn't updated correctly (aka it will likely be stuck in
# 'creating' if this fails)??
volume.update(update)
volume.save()
# Now use the parent to notify.
super(CreateVolumeOnFinishTask, self).execute(context, volume)
except exception.CinderException:
LOG.exception("Failed updating volume %(volume_id)s with "
"%(update)s", {'volume_id': volume.id,
'update': update})
# Even if the update fails, the volume is ready.
LOG.info("Volume %(volume_name)s (%(volume_id)s): "
"created successfully",
{'volume_name': volume_spec['volume_name'],
'volume_id': volume.id})
def get_flow(context, manager, db, driver, scheduler_rpcapi, host, volume,
allow_reschedule, reschedule_context, request_spec,
filter_properties, image_volume_cache=None):
"""Constructs and returns the manager entrypoint flow.
This flow will do the following:
1. Determines if rescheduling is enabled (ahead of time).
2. Inject keys & values for dependent tasks.
3. Selects 1 of 2 activated only on *failure* tasks (one to update the db
status & notify or one to update the db status & notify & *reschedule*).
4. Extracts a volume specification from the provided inputs.
5. Notifies that the volume has started to be created.
6. Creates a volume from the extracted volume specification.
7. Attaches an on-success *only* task that notifies that the volume
creation has ended and performs further database status updates.
"""
flow_name = ACTION.replace(":", "_") + "_manager"
volume_flow = linear_flow.Flow(flow_name)
# This injects the initial starting flow values into the workflow so that
# the dependency order of the tasks provides/requires can be correctly
# determined.
create_what = {
'context': context,
'filter_properties': filter_properties,
'request_spec': request_spec,
'volume': volume,
}
volume_flow.add(ExtractVolumeRefTask(db, host, set_error=False))
retry = filter_properties.get('retry', None)
# Always add OnFailureRescheduleTask and we handle the change of volume's
# status when reverting the flow. Meanwhile, no need to revert process of
# ExtractVolumeRefTask.
do_reschedule = allow_reschedule and request_spec and retry
volume_flow.add(OnFailureRescheduleTask(reschedule_context, db, driver,
scheduler_rpcapi, do_reschedule))
LOG.debug("Volume reschedule parameters: %(allow)s "
"retry: %(retry)s", {'allow': allow_reschedule, 'retry': retry})
volume_flow.add(ExtractVolumeSpecTask(db),
NotifyVolumeActionTask(db, "create.start"),
CreateVolumeFromSpecTask(manager,
db,
driver,
image_volume_cache),
CreateVolumeOnFinishTask(db, "create.end"))
# Now load (but do not run) the flow using the provided initial data.
return taskflow.engines.load(volume_flow, store=create_what)
| {
"content_hash": "bdc5021f8a40bb40e66843444cc663a1",
"timestamp": "",
"source": "github",
"line_count": 1167,
"max_line_length": 79,
"avg_line_length": 45.59982862039417,
"alnum_prop": 0.5501644273231232,
"repo_name": "Datera/cinder",
"id": "010309fb5b567bed1fe024350a444b34dc89d05f",
"size": "53788",
"binary": false,
"copies": "1",
"ref": "refs/heads/datera_queens_backport",
"path": "cinder/volume/flows/manager/create_volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15242306"
},
{
"name": "Shell",
"bytes": "8187"
}
],
"symlink_target": ""
} |
"""Support for the Netatmo devices."""
import logging
from datetime import timedelta
from urllib.error import HTTPError
import voluptuous as vol
from homeassistant.const import (
CONF_API_KEY,
CONF_PASSWORD,
CONF_USERNAME,
CONF_DISCOVERY,
CONF_URL,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
from .const import DOMAIN, DATA_NETATMO_AUTH
_LOGGER = logging.getLogger(__name__)
DATA_PERSONS = "netatmo_persons"
DATA_WEBHOOK_URL = "netatmo_webhook_url"
CONF_SECRET_KEY = "secret_key"
CONF_WEBHOOKS = "webhooks"
SERVICE_ADDWEBHOOK = "addwebhook"
SERVICE_DROPWEBHOOK = "dropwebhook"
NETATMO_AUTH = None
NETATMO_WEBHOOK_URL = None
DEFAULT_PERSON = "Unknown"
DEFAULT_DISCOVERY = True
DEFAULT_WEBHOOKS = False
EVENT_PERSON = "person"
EVENT_MOVEMENT = "movement"
EVENT_HUMAN = "human"
EVENT_ANIMAL = "animal"
EVENT_VEHICLE = "vehicle"
EVENT_BUS_PERSON = "netatmo_person"
EVENT_BUS_MOVEMENT = "netatmo_movement"
EVENT_BUS_HUMAN = "netatmo_human"
EVENT_BUS_ANIMAL = "netatmo_animal"
EVENT_BUS_VEHICLE = "netatmo_vehicle"
EVENT_BUS_OTHER = "netatmo_other"
ATTR_ID = "id"
ATTR_PSEUDO = "pseudo"
ATTR_NAME = "name"
ATTR_EVENT_TYPE = "event_type"
ATTR_MESSAGE = "message"
ATTR_CAMERA_ID = "camera_id"
ATTR_HOME_NAME = "home_name"
ATTR_PERSONS = "persons"
ATTR_IS_KNOWN = "is_known"
ATTR_FACE_URL = "face_url"
ATTR_SNAPSHOT_URL = "snapshot_url"
ATTR_VIGNETTE_URL = "vignette_url"
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5)
MIN_TIME_BETWEEN_EVENT_UPDATES = timedelta(seconds=5)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_SECRET_KEY): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_WEBHOOKS, default=DEFAULT_WEBHOOKS): cv.boolean,
vol.Optional(CONF_DISCOVERY, default=DEFAULT_DISCOVERY): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
SCHEMA_SERVICE_ADDWEBHOOK = vol.Schema({vol.Optional(CONF_URL): cv.string})
SCHEMA_SERVICE_DROPWEBHOOK = vol.Schema({})
def setup(hass, config):
"""Set up the Netatmo devices."""
import pyatmo
hass.data[DATA_PERSONS] = {}
try:
auth = pyatmo.ClientAuth(
config[DOMAIN][CONF_API_KEY],
config[DOMAIN][CONF_SECRET_KEY],
config[DOMAIN][CONF_USERNAME],
config[DOMAIN][CONF_PASSWORD],
"read_station read_camera access_camera "
"read_thermostat write_thermostat "
"read_presence access_presence read_homecoach",
)
except HTTPError:
_LOGGER.error("Unable to connect to Netatmo API")
return False
# Store config to be used during entry setup
hass.data[DATA_NETATMO_AUTH] = auth
if config[DOMAIN][CONF_DISCOVERY]:
for component in "camera", "sensor", "binary_sensor", "climate":
discovery.load_platform(hass, component, DOMAIN, {}, config)
if config[DOMAIN][CONF_WEBHOOKS]:
webhook_id = hass.components.webhook.async_generate_id()
hass.data[DATA_WEBHOOK_URL] = hass.components.webhook.async_generate_url(
webhook_id
)
hass.components.webhook.async_register(
DOMAIN, "Netatmo", webhook_id, handle_webhook
)
auth.addwebhook(hass.data[DATA_WEBHOOK_URL])
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, dropwebhook)
def _service_addwebhook(service):
"""Service to (re)add webhooks during runtime."""
url = service.data.get(CONF_URL)
if url is None:
url = hass.data[DATA_WEBHOOK_URL]
_LOGGER.info("Adding webhook for URL: %s", url)
auth.addwebhook(url)
hass.services.register(
DOMAIN,
SERVICE_ADDWEBHOOK,
_service_addwebhook,
schema=SCHEMA_SERVICE_ADDWEBHOOK,
)
def _service_dropwebhook(service):
"""Service to drop webhooks during runtime."""
_LOGGER.info("Dropping webhook")
auth.dropwebhook()
hass.services.register(
DOMAIN,
SERVICE_DROPWEBHOOK,
_service_dropwebhook,
schema=SCHEMA_SERVICE_DROPWEBHOOK,
)
return True
def dropwebhook(hass):
"""Drop the webhook subscription."""
auth = hass.data[DATA_NETATMO_AUTH]
auth.dropwebhook()
async def handle_webhook(hass, webhook_id, request):
"""Handle webhook callback."""
try:
data = await request.json()
except ValueError:
return None
_LOGGER.debug("Got webhook data: %s", data)
published_data = {
ATTR_EVENT_TYPE: data.get(ATTR_EVENT_TYPE),
ATTR_HOME_NAME: data.get(ATTR_HOME_NAME),
ATTR_CAMERA_ID: data.get(ATTR_CAMERA_ID),
ATTR_MESSAGE: data.get(ATTR_MESSAGE),
}
if data.get(ATTR_EVENT_TYPE) == EVENT_PERSON:
for person in data[ATTR_PERSONS]:
published_data[ATTR_ID] = person.get(ATTR_ID)
published_data[ATTR_NAME] = hass.data[DATA_PERSONS].get(
published_data[ATTR_ID], DEFAULT_PERSON
)
published_data[ATTR_IS_KNOWN] = person.get(ATTR_IS_KNOWN)
published_data[ATTR_FACE_URL] = person.get(ATTR_FACE_URL)
hass.bus.async_fire(EVENT_BUS_PERSON, published_data)
elif data.get(ATTR_EVENT_TYPE) == EVENT_MOVEMENT:
published_data[ATTR_VIGNETTE_URL] = data.get(ATTR_VIGNETTE_URL)
published_data[ATTR_SNAPSHOT_URL] = data.get(ATTR_SNAPSHOT_URL)
hass.bus.async_fire(EVENT_BUS_MOVEMENT, published_data)
elif data.get(ATTR_EVENT_TYPE) == EVENT_HUMAN:
published_data[ATTR_VIGNETTE_URL] = data.get(ATTR_VIGNETTE_URL)
published_data[ATTR_SNAPSHOT_URL] = data.get(ATTR_SNAPSHOT_URL)
hass.bus.async_fire(EVENT_BUS_HUMAN, published_data)
elif data.get(ATTR_EVENT_TYPE) == EVENT_ANIMAL:
published_data[ATTR_VIGNETTE_URL] = data.get(ATTR_VIGNETTE_URL)
published_data[ATTR_SNAPSHOT_URL] = data.get(ATTR_SNAPSHOT_URL)
hass.bus.async_fire(EVENT_BUS_ANIMAL, published_data)
elif data.get(ATTR_EVENT_TYPE) == EVENT_VEHICLE:
hass.bus.async_fire(EVENT_BUS_VEHICLE, published_data)
published_data[ATTR_VIGNETTE_URL] = data.get(ATTR_VIGNETTE_URL)
published_data[ATTR_SNAPSHOT_URL] = data.get(ATTR_SNAPSHOT_URL)
else:
hass.bus.async_fire(EVENT_BUS_OTHER, data)
class CameraData:
"""Get the latest data from Netatmo."""
def __init__(self, hass, auth, home=None):
"""Initialize the data object."""
self._hass = hass
self.auth = auth
self.camera_data = None
self.camera_names = []
self.module_names = []
self.home = home
self.camera_type = None
def get_camera_names(self):
"""Return all camera available on the API as a list."""
self.camera_names = []
self.update()
if not self.home:
for home in self.camera_data.cameras:
for camera in self.camera_data.cameras[home].values():
self.camera_names.append(camera["name"])
else:
for camera in self.camera_data.cameras[self.home].values():
self.camera_names.append(camera["name"])
return self.camera_names
def get_module_names(self, camera_name):
"""Return all module available on the API as a list."""
self.module_names = []
self.update()
cam_id = self.camera_data.cameraByName(camera=camera_name, home=self.home)["id"]
for module in self.camera_data.modules.values():
if cam_id == module["cam_id"]:
self.module_names.append(module["name"])
return self.module_names
def get_camera_type(self, camera=None, home=None, cid=None):
"""Return camera type for a camera, cid has preference over camera."""
self.camera_type = self.camera_data.cameraType(
camera=camera, home=home, cid=cid
)
return self.camera_type
def get_persons(self):
"""Gather person data for webhooks."""
for person_id, person_data in self.camera_data.persons.items():
self._hass.data[DATA_PERSONS][person_id] = person_data.get(ATTR_PSEUDO)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Call the Netatmo API to update the data."""
import pyatmo
self.camera_data = pyatmo.CameraData(self.auth, size=100)
@Throttle(MIN_TIME_BETWEEN_EVENT_UPDATES)
def update_event(self):
"""Call the Netatmo API to update the events."""
self.camera_data.updateEvent(home=self.home, cameratype=self.camera_type)
| {
"content_hash": "84fb3c96550943c22aa1a6888aec9809",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 88,
"avg_line_length": 33.47727272727273,
"alnum_prop": 0.6408689748811949,
"repo_name": "fbradyirl/home-assistant",
"id": "28d422557da3f36f54da3d0c0c4cd301b8749a2b",
"size": "8838",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/netatmo/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16494727"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17784"
}
],
"symlink_target": ""
} |
import os
from robot.libraries.BuiltIn import BuiltIn
from robot.api import logger
from keywordgroup import KeywordGroup
class _LoggingKeywords(KeywordGroup):
# Private
def _debug(self, message):
logger.debug(message)
def _get_log_dir(self):
variables = BuiltIn().get_variables()
logfile = variables['${LOG FILE}']
if logfile != 'NONE':
return os.path.dirname(logfile)
return variables['${OUTPUTDIR}']
def _html(self, message):
logger.info(message, True, False)
def _info(self, message):
logger.info(message)
def _log(self, message, level='INFO'):
level = level.upper()
if (level == 'INFO'):
self._info(message)
elif (level == 'DEBUG'):
self._debug(message)
elif (level == 'WARN'):
self._warn(message)
elif (level == 'HTML'):
self._html(message)
def _log_list(self, items, what='item'):
msg = ['Altogether %d %s%s.' % (len(items), what, ['s', ''][len(items) == 1])]
for index, item in enumerate(items):
msg.append('%d: %s' % (index+1, item))
self._info('\n'.join(msg))
return items
def _warn(self, message):
logger.warn(message)
| {
"content_hash": "6d0dbd48ebbdfcd0d727d49c1a81b1a5",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 86,
"avg_line_length": 28.23404255319149,
"alnum_prop": 0.5410700828937453,
"repo_name": "minhnguyenphuonghoang/robotframework-appiumlibrary",
"id": "a146122b646e8fa6a9a142c3f7c326b9896acabf",
"size": "1352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/AppiumLibrary/keywords/_logging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1073"
},
{
"name": "Python",
"bytes": "95817"
}
],
"symlink_target": ""
} |
""" Example demonstrating the picking of line objects.
"""
import numpy as np
from bokeh.models import ColumnDataSource, CustomJS, TapTool
from bokeh.plotting import figure, show
t = np.linspace(0, 0.1, 100)
source = ColumnDataSource(data=dict(text=['No line selected'], text_color=['black']))
p = figure(width=600, height=500)
l1 = p.line(t, 100*np.sin(t*50), color='goldenrod', line_width=30)
l2 = p.line(t, 100*np.sin(t*50+1), color='lightcoral', line_width=20)
l3 = p.line(t, 100*np.sin(t*50+2), color='royalblue', line_width=10)
p.text(0, -100, text_color='text_color', source=source)
# cb_data = {geometries: ..., source: ...}
p.add_tools(TapTool(callback=CustomJS(args=dict(source=source), code= """
// get_view is experimental and may change in the future
const view = cb_data.source.selected.get_view()
if (view) {
const color = view.model.line_color.value
source.data = {
text: ['Selected the ' + color + ' line'],
text_color: [color]
}
}
""")))
show(p)
| {
"content_hash": "95b31803f9d238c2ee5bfc4c16d01c0c",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 85,
"avg_line_length": 30.5,
"alnum_prop": 0.6460945033751205,
"repo_name": "bokeh/bokeh",
"id": "8d53985cb8b356bfd84e1a709d1d656ccb2816ba",
"size": "1037",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-3.1",
"path": "examples/plotting/line_select.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1884"
},
{
"name": "Dockerfile",
"bytes": "1924"
},
{
"name": "GLSL",
"bytes": "44696"
},
{
"name": "HTML",
"bytes": "53475"
},
{
"name": "JavaScript",
"bytes": "20301"
},
{
"name": "Less",
"bytes": "46376"
},
{
"name": "Python",
"bytes": "4475226"
},
{
"name": "Shell",
"bytes": "7673"
},
{
"name": "TypeScript",
"bytes": "3652153"
}
],
"symlink_target": ""
} |
""" Command Line Interface Module """
import optparse
import sys
import os
import yaml
import re
import json
import hashlib
from pymongo import MongoClient
from outbit.restapi import routes
from outbit.plugins import builtins
from outbit.exceptions import DecryptWrongKeyException, DecryptNotClearTextException, DecryptException
from Crypto.Cipher import AES
from hashlib import md5
import binascii
from jinja2 import Template
import ssl
import logging
from logging.handlers import RotatingFileHandler
import time
from glob import glob
import shutil
import datetime
import multiprocessing
import copy
db = None
encryption_password = None
ldap_server = None
ldap_use_ssl = True
ldap_user_cn = None
def counters_db_init(name):
result = db.counters.find_one( {"_id": name} )
if result is None:
db.counters.insert_one( {"_id": name, "seq": 0 } )
def counters_db_getNextSequence(name):
ret = db.counters.update_one({ "_id": name },
{ "$inc": { "seq": 1 } },
)
result = db.counters.find_one( {"_id": name} )
return result["seq"]
def schedule_manager():
schedule_last_run = {} # stores last run times for
global db
# Setup DB Connection For Thread
db = MongoClient('localhost').outbit
while True:
# Get Current Time
cron_minute = datetime.datetime.now().minute
cron_hour = datetime.datetime.now().hour
cron_day_of_month = datetime.datetime.today().day
cron_month = datetime.datetime.today().month
cron_day_of_week = datetime.datetime.today().weekday()
cursor = db.schedules.find()
for doc in list(cursor):
name = doc["name"]
user = doc["user"]
category = doc["category"]
action = doc["action"]
options = None # Default (NOT WORKING)
minute = "*" # Default
hour = "*" # Default
day_of_month = "*" # Default
month = "*" # Default
day_of_week = "*" # Default
if "options" in doc:
options = doc["options"]
if "minute" in doc:
minute = doc["minute"]
if "hour" in doc:
hour = doc["hour"]
if "day_of_month" in doc:
day_of_month = doc["day_of_month"]
if "month" in doc:
month = doc["month"]
if "day_of_week" in doc:
day_of_week = doc["day_of_week"]
# * matches anything, so make it match the current time
if minute == "*":
minute = cron_minute
else:
minute = int(minute)
if hour == "*":
hour = cron_hour
else:
hour = int(hour)
if day_of_month == "*":
day_of_month = cron_day_of_month
else:
day_of_month = int(day_of_month)
if month == "*":
month = cron_month
else:
month = int(month)
if day_of_week == "*":
day_of_week = cron_day_of_week
else:
day_of_week = int(day_of_week)
# Check if cron should be run, see if each setting matches
# If name is not in schedule_last_run its the first time running it, so thats ok.
# If name is already in schedule_last_run then check to make sure it didnt already run within the same minute
if cron_minute == minute and \
cron_hour == hour and \
cron_day_of_month == day_of_month and \
cron_month == month and \
cron_day_of_week == day_of_week and \
(name not in schedule_last_run or \
not (schedule_last_run[name][0] == cron_minute and \
schedule_last_run[name][1] == cron_hour and \
schedule_last_run[name][2] == cron_day_of_month and \
schedule_last_run[name][3] == cron_month and \
schedule_last_run[name][4] == cron_day_of_week)):
# Run Scheduled Action
dat = parse_action(user, category, action, options)
# Audit Logging / History
log_action(user, {"result": dat, "category": category, "action": action, "options": options})
schedule_last_run[name] = (cron_minute, cron_hour, cron_day_of_month, cron_month, cron_day_of_week)
# Delay 10 seconds between each check
time.sleep(10)
plugins = {}
builtin_actions = [{'category': '/actions', 'plugin': 'actions_list', 'action': 'list', 'desc': 'list actions'},
{'category': '/actions', 'plugin': 'actions_del', 'action': 'del', 'desc': 'del actions'},
{'category': '/actions', 'plugin': 'actions_edit', 'action': 'edit', 'desc': 'edit actions'},
{'category': '/actions', 'plugin': 'actions_add', 'action': 'add', 'desc': 'add actions'},
{'category': '/users', 'plugin': 'users_list', 'action': 'list', 'desc': 'list users'},
{'category': '/users', 'plugin': 'users_del', 'action': 'del', 'desc': 'del users'},
{'category': '/users', 'plugin': 'users_edit', 'action': 'edit', 'desc': 'edit users'},
{'category': '/users', 'plugin': 'users_add', 'action': 'add', 'desc': 'add users'},
{'category': '/roles', 'plugin': 'roles_list', 'action': 'list', 'desc': 'list roles'},
{'category': '/roles', 'plugin': 'roles_del', 'action': 'del', 'desc': 'del roles'},
{'category': '/roles', 'plugin': 'roles_edit', 'action': 'edit', 'desc': 'edit roles'},
{'category': '/roles', 'plugin': 'roles_add', 'action': 'add', 'desc': 'add roles'},
{'category': '/secrets', 'plugin': 'secrets_list', 'action': 'list', 'desc': 'list secrets'},
{'category': '/secrets', 'plugin': 'secrets_del', 'action': 'del', 'desc': 'del secrets'},
{'category': '/secrets', 'plugin': 'secrets_edit', 'action': 'edit', 'desc': 'edit secrets'},
{'category': '/secrets', 'plugin': 'secrets_add', 'action': 'add', 'desc': 'add secrets'},
{'category': '/secrets', 'plugin': 'secrets_encryptpw', 'action': 'encryptpw', 'desc': 'Change password encryption'},
{'category': '/plugins', 'plugin': 'plugins_list', 'action': 'list', 'desc': 'list plugins'},
{'category': '/', 'plugin': 'ping', 'action': 'ping', 'desc': 'verify connectivity'},
{'category': '/', 'plugin': 'logs', 'action': 'logs', 'desc': 'show the history log'},
{'category': '/', 'plugin': 'help', 'action': 'help', 'desc': 'print usage'},
{'category': '/help', 'plugin': 'help', 'action': '*', 'desc': 'print usage'},
{'category': '/jobs', 'plugin': 'jobs_list', 'action': 'list', 'desc': 'list jobs'},
{'category': '/jobs', 'plugin': 'jobs_status', 'action': 'status', 'desc': 'get status of job'},
{'category': '/jobs', 'plugin': 'jobs_kill', 'action': 'kill', 'desc': 'kill a job'},
{'category': '/schedules', 'plugin': 'schedules_add', 'action': 'add', 'desc': 'add schedule'},
{'category': '/schedules', 'plugin': 'schedules_edit', 'action': 'edit', 'desc': 'edit schedule'},
{'category': '/schedules', 'plugin': 'schedules_list', 'action': 'list', 'desc': 'list schedules'},
{'category': '/schedules', 'plugin': 'schedules_del', 'action': 'del', 'desc': 'del schedule'},
{'category': '/inventory', 'plugin': 'inventory_list', 'action': 'list', 'desc': 'list inventory'},
{'category': '/inventory', 'plugin': 'inventory_del', 'action': 'del', 'desc': 'del inventory item'},
{'category': '/', 'plugin': 'stats', 'action': 'stats', 'desc': 'statistics'},
]
def load_plugins(plugin_paths=None):
default_plugin_path = os.path.dirname(os.path.realpath(__file__)) + "/../plugins/"
if plugin_paths is None:
plugin_paths = default_plugin_path
for plugin_path in plugin_paths.split(":"):
sys.path.append(plugin_path)
for file in os.listdir(plugin_path):
if file.endswith(".py") and file != "__init__.py":
plugin_module = __import__(file.rstrip(".py"), fromlist=[''])
load_plugins_from_module(plugin_module)
def load_plugins_from_module(module):
import inspect
global plugins
for member in inspect.getmembers(module):
plugin_name = member[0]
plugin_function = member[1]
if inspect.isfunction(plugin_function):
if plugin_name not in plugins:
m = re.match(r'^plugin_(.*?)$', plugin_name)
if m:
plugin_short_name = m.group(1)
plugins[plugin_short_name] = plugin_function
def log_action(username, post):
if post["category"] is not None and post["action"] is not None:
if post["options"] is not None:
# Filter sensitive information from options
for option in ["password", "secret"]:
if option in post["options"]:
post["options"][option] = "..."
# Only Log Valid Requests
post["date"] = datetime.datetime.utcnow()
post["user"] = username
db.logs.insert_one(post)
def encrypt_dict(dictobj):
# encrypt sensitive option vals
global encryption_password
for key in ["secret"]:
if dictobj is not None and key in dictobj:
dictobj[key] = encrypt_str(dictobj[key], encryption_password)
return True
def decrypt_dict(dictobj):
# decrypt sensitive option vals
global encryption_password
for key in ["secret"]:
if dictobj is not None and key in dictobj:
try:
decrypted_str = decrypt_str(dictobj[key], encryption_password, keyname=dictobj["name"])
dictobj[key] = decrypted_str
except DecryptException:
return False
return True
def aes_derive_key_and_iv(password, salt, key_length, iv_length):
""" source: Ansible source code """
""" Create a key and an initialization vector """
d = d_i = ''
while len(d) < key_length + iv_length:
text = ''.join([d_i, password, salt])
d_i = str(md5(text).digest())
d += d_i
key = d[:key_length]
iv = d[key_length:key_length+iv_length]
return key, iv
def encrypt_str(text, encrypt_password=None, key_len=32, encryption_prefix="__outbit_encrypted__:"):
global encryption_password
if encrypt_password is None and encryption_password is not None:
# If No encryption password provided, use global encryption password
encrypt_password = encryption_password
encrypt_text = encryption_prefix + text
if encrypt_password is not None:
salt = "__Salt__"
key, iv = aes_derive_key_and_iv(encrypt_password, salt, key_len, AES.block_size)
encryption_suite = AES.new(key, AES.MODE_CFB, iv)
return str(binascii.b2a_base64(encryption_suite.encrypt(encrypt_text)))
return str(encrypt_text)
def decrypt_str(text, encrypt_password=None, key_len=32, encryption_prefix="__outbit_encrypted__:", keyname="unknown"):
global encryption_password
if encrypt_password is None and encryption_password is not None:
# If No encryption password provided, use global encryption password
encrypt_password = encryption_password
if text[:len(encryption_prefix)] == encryption_prefix:
# Clear Text, No Encryption Password Provided
return str(text[len(encryption_prefix):])
elif encrypt_password is not None:
# Decrypt using password
salt = "__Salt__"
key, iv = aes_derive_key_and_iv(encrypt_password, salt, key_len, AES.block_size)
decryption_suite = AES.new(key, AES.MODE_CFB, iv)
decrypt_text = str(decryption_suite.decrypt(binascii.a2b_base64(text)))
if decrypt_text[:len(encryption_prefix)] == encryption_prefix:
# Decrypted Text
return str(decrypt_text[len(encryption_prefix):])
else:
# Probably Wrong Key
raise DecryptWrongKeyException(" error: Failed to decrypt a secret named %s. If you recently changed your encryption_password try 'secrets encryptpw oldpw=XXXX newpw=XXXX'." % keyname)
else:
# Decryption Failed, Its Not Clear Text
raise DecryptNotClearTextException(" error: Failed to decrypt a secret named %s. If you recently disabled your encryption_password then re-enable it." % keyname)
def secret_has_permission(user, secret):
cursor = db.roles.find()
for doc in list(cursor):
if user in list(doc["users"].split(",")):
if "secrets" not in doc:
# No secrets option, give them access to all secrets
return True
if secret in list(doc["secrets"].split(",")):
return True
return False
def roles_has_permission(user, action, options):
# Ping is always allowed
if action["category"] == "/" and action["action"] == "ping":
return True
# Help is always allowed
if action["category"] == "/" and action["action"] == "help":
return True
# jobs status is always allowed
if action["category"] == "/jobs" and action["action"] == "status":
return True
# jobs list is always allowed
if action["category"] == "/jobs" and action["action"] == "list":
return True
# jobs kill is always allowed
if action["category"] == "/jobs" and action["action"] == "kill":
return True
""" This allows users to edit their own password.
sers edit password is allowed if the username is only changing their own password.
If username is not in options, that means their changing their own password. """
if action["category"] == "/users" and action["action"] == "edit" and ("username" not in options or options["username"] == user):
return True
if action["category"][-1:] == "/":
action_str = "%s%s" % (action["category"], action["action"])
else:
action_str = "%s/%s" % (action["category"], action["action"])
cursor = db.roles.find()
for doc in list(cursor):
if user in list(doc["users"].split(",")):
for action in list(doc["actions"].split(",")):
if re.match(r"^%s" % action, action_str):
return True
return False
def clean_all_secrets():
if not os.path.isdir("/tmp/outbit/"):
os.mkdir("/tmp/outbit")
# Make sure directory permissions are secure
os.chmod("/tmp/outbit/", 0700)
for filename in glob("/tmp/outbit/*"):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
def clean_secrets(secrets):
if secrets is None:
return None
for filename in secrets:
# Temp File must exist
if os.path.isfile(filename):
# Delete secret files
os.remove(filename)
def render_secret_file(name, secret):
filepath = "/tmp/outbit/"
filename = "%s.%s" % (name, time.time())
fullpath = "%s%s" % (filepath, filename)
if not os.path.isdir(filepath):
os.mkdir(filepath)
with open(fullpath, "w") as textfile:
textfile.write(secret)
os.chmod(fullpath, 0700)
return fullpath
def render_secrets(user, dictobj):
secrets = {}
tmp_secret_files = []
if dictobj is None or user is None:
return None
cursor = db.secrets.find()
for doc in list(cursor):
res = decrypt_dict(doc)
if res is False:
# Decryption Failed
return None
if secret_has_permission(user, doc["name"]):
if "type" in doc and doc["type"] == "file":
secrets[doc["name"]] = render_secret_file(doc["name"], doc["secret"])
tmp_secret_files.append(secrets[doc["name"]])
else:
secrets[doc["name"]] = doc["secret"]
render_vars("secret", secrets, dictobj)
return tmp_secret_files
def render_vars(varname, vardict, dictobj):
if dictobj is None or vardict is None:
return None
for key in dictobj:
if isinstance(dictobj[key], basestring):
t = Template(dictobj[key])
dictobj[key] = t.render({varname: vardict})
def parse_action(user, category, action, options):
cursor = db.actions.find()
for dbaction in builtin_actions + list(cursor):
if dbaction["category"] == category and (dbaction["action"] == action or dbaction["action"] == "*"):
new_dbaction = copy.copy(dbaction) # Make a copy to prevent modifying global builtin_actions
new_dbaction["action"] = action
if "plugin" in dbaction:
if not roles_has_permission(user, dbaction, options):
return json.dumps({"response": " you do not have permission to run this action"})
else:
# Admin functions do not allow secrets
if dbaction["category"] not in ["/actions", "/users", "/roles", "/secrets", "/plugins"]:
if dbaction["category"] == "/" and dbaction["action"] in ["ping"]:
# /ping does not allow secrets
pass
else:
# Run Plugin With Secret
render_vars("option", options, dbaction)
tmp_files_dbaction = render_secrets(user, dbaction)
tmp_files_options = render_secrets(user, options)
# Check Decryption Failed
if user is not None:
if (dbaction is not None and tmp_files_dbaction is None) or (options is not None and tmp_files_options is None):
return json.dumps({"response": " error: Failed to decrypt a secret. If you recently changed your encryption_password try 'secrets encryptpw oldpw=XXXX newpw=XXXX'."})
response = plugins[dbaction["plugin"]](user, new_dbaction, options)
response = json.loads(response)
clean_secrets(tmp_files_dbaction)
clean_secrets(tmp_files_options)
# async, return queue_id
if "response" not in response:
if "queue_id" not in response:
return json.dumps({"response": " error: expected async queue id but found none"})
return json.dumps(response)
# Run Plugin Without Secrets
return plugins[dbaction["plugin"]](user, new_dbaction, options)
return None
class Cli(object):
""" outbit CLI """
def __init__(self):
""" Setup Arguments and Options for CLI """
# Parse CLI Arguments
parser = optparse.OptionParser()
parser.add_option("-s", "--server", dest="server",
help="IP address or hostname of outbit-api server",
metavar="SERVER",
default=None)
parser.add_option("-p", "--port", dest="port",
help="tcp port of outbit-api server",
metavar="PORT",
default=None)
parser.add_option("-t", "--insecure", dest="is_secure",
help="Do Not Use SSL",
metavar="SECURE",
action="store_false",
default=True)
parser.add_option("-d", "--debug", dest="is_debug",
help="Debug Mode",
metavar="DEBUG",
action="store_true",
default=False)
parser.add_option("-k", "--ssl_key", dest="ssl_key",
help="SSL key",
metavar="SSLKEY",
default=None)
parser.add_option("-c", "--ssl_crt", dest="ssl_crt",
help="SSL certificate",
metavar="SSLCRT",
default=None)
parser.add_option("-l", "--ldap_server", dest="ldap_server",
help="LDAP Server for Authentiation",
metavar="LDAPSERVER",
default=None)
parser.add_option("-z", "--ldap_use_ssl", dest="ldap_use_ssl",
help="Enable SSL for LDAP",
metavar="LDAPUSESSL",
default=None)
parser.add_option("-x", "--ldap_user_cn", dest="ldap_user_cn",
help="LDAP User CN",
metavar="LDAPUSERCN",
default=None)
parser.add_option("-P", "--pluginpath", dest="pluginpath",
help="Plugin Paths, seperated by :",
metavar="PLUGINPATH",
default=None)
global encryption_password
global ldap_server
global ldap_use_ssl
global ldap_user_cn
(options, args) = parser.parse_args()
self.server = options.server
self.port = options.port
self.is_secure = options.is_secure
self.is_debug = options.is_debug
self.ssl_key = options.ssl_key
self.ssl_crt = options.ssl_crt
ldap_server = options.ldap_server
ldap_use_ssl = options.ldap_use_ssl
ldap_user_cn = options.ldap_user_cn
pluginpath = options.pluginpath
# Assign values from conf
outbit_config_locations = [os.path.expanduser("~")+"/.outbit-api.conf", "/etc/outbit-api.conf"]
outbit_conf_obj = {}
for outbit_conf in outbit_config_locations:
if os.path.isfile(outbit_conf):
with open(outbit_conf, 'r') as stream:
try:
outbit_conf_obj = yaml.load(stream)
except yaml.YAMLError as excep:
print("%s\n" % excep)
if self.server is None and "server" in outbit_conf_obj:
self.server = str(outbit_conf_obj["server"])
if self.port is None and "port" in outbit_conf_obj:
self.port = int(outbit_conf_obj["port"])
if self.is_secure == True and "secure" in outbit_conf_obj:
self.is_secure = bool(outbit_conf_obj["secure"])
if self.is_debug == True and "debug" in outbit_conf_obj:
self.is_debug = bool(outbit_conf_obj["debug"])
if encryption_password is None and "encryption_password" in outbit_conf_obj:
encryption_password = str(outbit_conf_obj["encryption_password"])
if self.ssl_key == None and "ssl_key" in outbit_conf_obj:
self.ssl_key = bool(outbit_conf_obj["ssl_key"])
if self.ssl_crt == None and "ssl_crt" in outbit_conf_obj:
self.ssl_crt = bool(outbit_conf_obj["ssl_crt"])
if ldap_server == None and "ldap_server" in outbit_conf_obj:
ldap_server = str(outbit_conf_obj["ldap_server"])
if ldap_use_ssl == None and "ldap_use_ssl" in outbit_conf_obj:
ldap_use_ssl = str(outbit_conf_obj["ldap_use_ssl"])
if ldap_user_cn == None and "ldap_user_cn" in outbit_conf_obj:
ldap_user_cn = str(outbit_conf_obj["ldap_user_cn"])
if pluginpath == None and "pluginpath" in outbit_conf_obj:
pluginpath = str(outbit_conf_obj["pluginpath"])
# Assign Default values if they were not specified at the cli or in the conf
if self.server is None:
self.server = "127.0.0.1"
if self.port is None:
self.port = 8088
if self.ssl_key is None:
self.ssl_key = "/usr/local/etc/openssl/certs/outbit.key"
if self.ssl_crt is None:
self.ssl_crt = "/usr/local/etc/openssl/certs/outbit.crt"
if ldap_server is None:
ldap_server = options.ldap_server
if ldap_use_ssl is None:
ldap_use_ssl = options.ldap_use_ssl
if ldap_user_cn is None:
ldap_user_cn = options.ldap_user_cn
# Load Plugins
load_plugins(pluginpath)
# Clean any left over secret files
clean_all_secrets()
def run(self):
""" EntryPoint Of Application """
global db
# Setup logging to logfile (only if the file was touched)
if os.path.isfile("/var/log/outbit.log"):
handler = RotatingFileHandler('/var/log/outbit.log', maxBytes=10000, backupCount=1)
# Assign Default values if they were not specified at the cli or in the conf
if self.server is None:
self.server = "127.0.0.1"
if self.port is None:
self.port = 8088
if self.ssl_key is None:
self.ssl_key = "/usr/local/etc/openssl/certs/outbit.key"
if self.ssl_crt is None:
self.ssl_crt = "/usr/local/etc/openssl/certs/outbit.crt"
if ldap_server is None:
ldap_server = options.ldap_server
if ldap_use_ssl is None:
ldap_use_ssl = options.ldap_use_ssl
if ldap_user_cn is None:
ldap_user_cn = options.ldap_user_cn
# Load Plugins
load_plugins(pluginpath)
# Clean any left over secret files
clean_all_secrets()
def run(self):
""" EntryPoint Of Application """
global db
# Setup logging to logfile (only if the file was touched)
if os.path.isfile("/var/log/outbit.log"):
handler = RotatingFileHandler('/var/log/outbit.log', maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
routes.app.logger.addHandler(handler)
# Disable stdout logging since its logging to a log file
log = logging.getLogger('werkzeug')
log.disabled = True
# First Time Defaults, Setup superadmin if it doesnt exist
default_user = "superadmin"
default_password = "superadmin"
default_role = "super"
# Start Scheduler
p = multiprocessing.Process(target=schedule_manager)
p.start()
# Setup DB Connection
db = MongoClient('localhost').outbit
# Init db counters for jobs
counters_db_init("jobid")
# Create default user
post = db.users.find_one({"username": default_user})
if post is None:
m = hashlib.md5()
m.update(default_password)
password_md5 = str(m.hexdigest())
post = {"username": default_user, "password_md5": password_md5}
db.users.insert_one(post)
# Create default role
post = db.roles.find_one({"name": default_role})
if post is None:
post = {"name": default_role, "users": default_user, "actions": "/"}
db.roles.insert_one(post)
# Start API Server
routes.app.logger.info("Starting outbit api server on %s://%s:%d" % ("https" if
self.is_secure else "http", self.server, self.port))
if self.is_secure:
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.check_hostname = False
context.load_cert_chain(certfile=self.ssl_crt, keyfile=self.ssl_key)
routes.app.run(threaded=True, host=self.server, ssl_context=context, port=self.port, debug=self.is_debug)
else:
routes.app.run(threaded=True, host=self.server, port=self.port, debug=self.is_debug)
| {
"content_hash": "a6c4230fe59559cc5d52daba588cd59f",
"timestamp": "",
"source": "github",
"line_count": 662,
"max_line_length": 203,
"avg_line_length": 42.383685800604226,
"alnum_prop": 0.5539240145413072,
"repo_name": "starboarder2001/outbit",
"id": "c19e56e9128cdb0efff8fd90d632020a1a4bd669",
"size": "28058",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "lib/outbit/cli/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1682"
},
{
"name": "HTML",
"bytes": "3411"
},
{
"name": "JavaScript",
"bytes": "6974"
},
{
"name": "Python",
"bytes": "136498"
},
{
"name": "Shell",
"bytes": "380"
}
],
"symlink_target": ""
} |
from unittest.mock import ANY, call
from boxsdk.config import API
def test_automatic_refresh(
box_client,
mock_box_network,
generic_successful_response,
successful_token_mock,
unauthorized_response,
):
mock_box_network.session.request.side_effect = [
unauthorized_response,
successful_token_mock,
generic_successful_response,
]
box_client.folder('0').get()
assert mock_box_network.session.request.mock_calls == [
call(
'GET',
f'{API.BASE_API_URL}/folders/0',
headers=ANY,
params=None,
log_response_content=True,
),
call(
'POST',
f'{API.OAUTH2_API_URL}/token',
data=ANY,
headers={'content-type': 'application/x-www-form-urlencoded', 'User-Agent': ANY, 'X-Box-UA': ANY},
log_response_content=True,
),
call(
'GET',
f'{API.BASE_API_URL}/folders/0',
headers=ANY,
params=None,
log_response_content=True,
),
]
| {
"content_hash": "399de23bb2524df1714ab53e29446406",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 110,
"avg_line_length": 27.925,
"alnum_prop": 0.5335720680393913,
"repo_name": "box/box-python-sdk",
"id": "0117419da1aecc62d104dd3250d31c22e00079a1",
"size": "1117",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/integration/test_retry_and_refresh.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1036959"
},
{
"name": "Smarty",
"bytes": "527"
}
],
"symlink_target": ""
} |
class Settings:
"""
Contains basic settings for SLAMon Agent Fleet Manager
"""
port = 8080 # Port for the server
database_name = 'slamon' # Name of the psql database
database_user = 'afm' # Username to use for psql connection
database_password = 'changeme' # Password to use for psql connection
test_database_name = 'slamon_tests'
test_database_user = 'afm'
test_database_password = 'changeme'
testing_urls_available = True
agent_return_time = 5 # Time what agent should wait before trying to request for tasks again (seconds)
agent_active_threshold = 300 # Time after which agent will be considered as inactive (seconds) | {
"content_hash": "23f42fd8a5bdd4a26995e644f4e091c0",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 107,
"avg_line_length": 37.94444444444444,
"alnum_prop": 0.6954612005856515,
"repo_name": "StealthyLoner/SLAMon",
"id": "815618104cd886ff8584e7113d99faad7cd0077e",
"size": "683",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "slamon/afm/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5404"
},
{
"name": "Java",
"bytes": "124032"
},
{
"name": "Python",
"bytes": "74716"
},
{
"name": "Ruby",
"bytes": "2140"
},
{
"name": "Shell",
"bytes": "1443"
}
],
"symlink_target": ""
} |
"""Grid-based simulation of lateral erosion by channels in a drainage network.
Benjamin Campforts
"""
import numpy as np
from landlab import Component, RasterModelGrid
from landlab.grid.nodestatus import NodeStatus
from landlab.utils.return_array import return_array_at_node
from ..depression_finder.lake_mapper import _FLOODED
from .ext.calc_sequential_ero_depo import _sequential_ero_depo
ROOT2 = np.sqrt(2.0) # syntactic sugar for precalculated square root of 2
TIME_STEP_FACTOR = 0.5 # factor used in simple subdivision solver
class SpaceLargeScaleEroder(Component):
"""Stream Power with Alluvium Conservation and Entrainment (SPACE) large scale eroder
The SPACE_large_Scale_eroder is based on the SPACE component and is designed
to be more robust against large time steps and coded in such a way that mass
conservation is explicitly conserved during calculation.
See the publication:
Shobe, C. M., Tucker, G. E., and Barnhart, K. R.: The SPACE 1.0 model: a
Landlab component for 2-D calculation of sediment transport, bedrock
erosion, and landscape evolution, Geosci. Model Dev., 10, 4577-4604,
`https://doi.org/10.5194/gmd-10-4577-2017 <https://www.geosci-model-dev.net/10/4577/2017/>`_, 2017.
Unlike other some other fluvial erosion componets in Landlab, in this
component (and :py:class:`~landlab.components.ErosionDeposition`) no
erosion occurs in depressions or in areas with adverse slopes. There is no
ability to pass a keyword argument ``erode_flooded_nodes``.
If a depressions are handled (as indicated by the presence of the field
"flood_status_code" at nodes), then deposition occurs throughout the
depression and sediment is passed out of the depression. Where pits are
encountered, then all sediment is deposited at that node only.
Note: In the current version, we do not provide an adaptive time stepper.
This will be addded in future versions of this component.
For more explanation and examples,
check out the correponding notebook of this component
Examples
---------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components import (
... PriorityFloodFlowRouter, SpaceLargeScaleEroder
... )
>>> import matplotlib.pyplot as plt # For plotting results; optional
>>> from landlab import imshow_grid # For plotting results; optional
>>> num_rows = 20
>>> num_columns = 20
>>> node_spacing = 100.0
>>> mg = RasterModelGrid((num_rows, num_columns), xy_spacing=node_spacing)
>>> node_next_to_outlet = num_columns + 1
>>> np.random.seed(seed=5000)
>>> _ = mg.add_zeros("topographic__elevation", at="node")
>>> _ = mg.add_zeros("soil__depth", at="node")
>>> mg.at_node["soil__depth"][mg.core_nodes] = 2.0
>>> _ = mg.add_zeros("bedrock__elevation", at="node")
>>> mg.at_node["bedrock__elevation"] += (
... mg.node_y / 10. + mg.node_x / 10. + np.random.rand(len(mg.node_y)) / 10.
... )
>>> mg.at_node["bedrock__elevation"][:] = mg.at_node["topographic__elevation"]
>>> mg.at_node["topographic__elevation"][:] += mg.at_node["soil__depth"]
>>> mg.set_closed_boundaries_at_grid_edges(
... bottom_is_closed=True,
... left_is_closed=True,
... right_is_closed=True,
... top_is_closed=True,
... )
>>> mg.set_watershed_boundary_condition_outlet_id(
... 0, mg.at_node['topographic__elevation'], -9999.0
... )
>>> fr = PriorityFloodFlowRouter(mg, flow_metric='D8', suppress_out = True)
>>> sp = SpaceLargeScaleEroder(
... mg,
... K_sed=0.01,
... K_br=0.001,
... F_f=0.0,
... phi=0.0,
... H_star=1.0,
... v_s=5.0,
... m_sp=0.5,
... n_sp=1.0,
... sp_crit_sed=0,
... sp_crit_br=0,
... )
>>> timestep = 10.0
>>> elapsed_time = 0.0
>>> count = 0
>>> run_time = 1e4
>>> sed_flux = np.zeros(int(run_time // timestep))
>>> while elapsed_time < run_time:
... fr.run_one_step()
... _ = sp.run_one_step(dt=timestep)
... sed_flux[count] = mg.at_node["sediment__flux"][node_next_to_outlet]
... elapsed_time += timestep
... count += 1
Plot the results.
>>> fig = plt.figure()
>>> plot = plt.subplot()
>>> _ = imshow_grid(
... mg,
... "topographic__elevation",
... plot_name="Sediment flux",
... var_name="Sediment flux",
... var_units=r"m$^3$/yr",
... grid_units=("m", "m"),
... cmap="terrain",
... )
>>> _ = plt.figure()
>>> _ = imshow_grid(
... mg,
... "sediment__flux",
... plot_name="Sediment flux",
... var_name="Sediment flux",
... var_units=r"m$^3$/yr",
... grid_units=("m", "m"),
... cmap="terrain",
... )
>>> fig = plt.figure()
>>> sedfluxplot = plt.subplot()
>>> _ = sedfluxplot.plot(np.arange(len(sed_flux)) * timestep, sed_flux, color="k", linewidth=1.0)
>>> _ = sedfluxplot.set_xlabel("Time [yr]")
>>> _ = sedfluxplot.set_ylabel(r"Sediment flux [m$^3$/yr]")
References
----------
**Required Software Citation(s) Specific to this Component**
Shobe, C., Tucker, G., Barnhart, K. (2017). The SPACE 1.0 model: a Landlab
component for 2-D calculation of sediment transport, bedrock erosion, and
landscape evolution. Geoscientific Model Development 10(12), 4577 - 4604.
https://dx.doi.org/10.5194/gmd-10-4577-2017
**Additional References**
None Listed
"""
_name = "SpaceLargeScaleEroder"
_unit_agnostic = True
_info = {
"flow__link_to_receiver_node": {
"dtype": int,
"intent": "in",
"optional": True,
"units": "-",
"mapping": "node",
"doc": "ID of link downstream of each node, which carries the discharge",
},
"flow__receiver_node": {
"dtype": int,
"intent": "in",
"optional": False,
"units": "-",
"mapping": "node",
"doc": "Node array of receivers (node that receives flow from current node)",
},
"flow__upstream_node_order": {
"dtype": int,
"intent": "in",
"optional": False,
"units": "-",
"mapping": "node",
"doc": "Node array containing downstream-to-upstream ordered list of node IDs",
},
"sediment__influx": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "m3/s",
"mapping": "node",
"doc": "Sediment flux (volume per unit time of sediment entering each node)",
},
"sediment__outflux": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "m3/s",
"mapping": "node",
"doc": "Sediment flux (volume per unit time of sediment leaving each node)",
},
"soil__depth": {
"dtype": float,
"intent": "inout",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "Depth of soil or weathered bedrock",
},
"surface_water__discharge": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "m**3/s",
"mapping": "node",
"doc": "Volumetric discharge of surface water",
},
"topographic__elevation": {
"dtype": float,
"intent": "inout",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "Land surface topographic elevation",
},
"topographic__steepest_slope": {
"dtype": float,
"intent": "in",
"optional": True,
"units": "-",
"mapping": "node",
"doc": "The steepest *downhill* slope",
},
}
_cite_as = """@Article{gmd-10-4577-2017,
AUTHOR = {Shobe, C. M. and Tucker, G. E. and Barnhart, K. R.},
TITLE = {The SPACE~1.0 model: a~Landlab component for 2-D calculation of sediment transport, bedrock erosion, and landscape evolution},
JOURNAL = {Geoscientific Model Development},
VOLUME = {10},
YEAR = {2017},
NUMBER = {12},
PAGES = {4577--4604},
URL = {https://www.geosci-model-dev.net/10/4577/2017/},
DOI = {10.5194/gmd-10-4577-2017}
}"""
def __init__(
self,
grid,
K_sed=0.02,
K_br=0.02,
F_f=0.0,
phi=0.3,
H_star=0.1,
v_s=1.0,
v_s_lake=None,
m_sp=0.5,
n_sp=1.0,
sp_crit_sed=0.0,
sp_crit_br=0.0,
discharge_field="surface_water__discharge",
erode_flooded_nodes=False,
thickness_lim=100,
):
"""Initialize the SpaceLargeScaleEroder model.
Parameters
----------
grid : ModelGrid
Landlab ModelGrid object
K_sed : float, array of float, or str, optional
Erodibility for sediment (units vary) as either a number or a field name.
K_br : float, array of float, or str, optional
Erodibility for bedrock (units vary) as either a number or a field name.
F_f : float, optional
Fraction of permanently suspendable fines in bedrock [-].
phi : float, optional
Sediment porosity [-].
H_star : float, optional
Sediment thickness required for full entrainment [L].
v_s : float, optional
Effective settling velocity for chosen grain size metric [L/T].
v_s_lake : float, optional
Effective settling velocity in lakes for chosen grain size metric [L/T].
m_sp : float, optional
Drainage area exponent (units vary).
n_sp : float, optional
Slope exponent (units vary).
sp_crit_sed : float, array of float, or str, optional
Critical stream power to erode sediment [E/(TL^2)].
sp_crit_br : float, array of float, or str, optional
Critical stream power to erode rock [E/(TL^2)]
discharge_field : float, array of float, or str, optional
Discharge [L^2/T]. The default is to use the grid field
'surface_water__discharge', which is simply drainage area
multiplied by the default rainfall rate (1 m/yr). To use custom
spatially/temporally varying rainfall, use 'water__unit_flux_in'
to specify water input to the FlowAccumulator.
erode_flooded_nodes : bool, optional
Whether erosion occurs in flooded nodes identified by a
depression/lake mapper (e.g., DepressionFinderAndRouter). When set
to false, the field *flood_status_code* must be present on the grid
(this is created by the DepressionFinderAndRouter). Default True.
"""
if grid.at_node["flow__receiver_node"].size != grid.size("node"):
raise NotImplementedError(
"A route-to-multiple flow director has been "
"run on this grid. The landlab development team has not "
"verified that SpaceLargeScaleEroder is compatible with "
"route-to-multiple methods. Please open a GitHub Issue "
"to start this process."
)
super(SpaceLargeScaleEroder, self).__init__(grid)
self._soil__depth = grid.at_node["soil__depth"]
self._topographic__elevation = grid.at_node["topographic__elevation"]
if "bedrock__elevation" in grid.at_node:
self._bedrock__elevation = grid.at_node["bedrock__elevation"]
else:
self._bedrock__elevation = grid.add_zeros(
"bedrock__elevation", at="node", dtype=float
)
self._bedrock__elevation[:] = (
self._topographic__elevation - self._soil__depth
)
# Check consistency of bedrock, soil and topogarphic elevation fields
err_msg = (
"The sum of bedrock elevation and topographic elevation should be equal"
)
np.testing.assert_almost_equal(
grid.at_node["bedrock__elevation"] + grid.at_node["soil__depth"],
grid.at_node["topographic__elevation"],
decimal=5,
err_msg=err_msg,
)
# specific inits
self._thickness_lim = thickness_lim
self._H_star = H_star
self._sed_erosion_term = np.zeros(grid.number_of_nodes)
self._br_erosion_term = np.zeros(grid.number_of_nodes)
self._Es = np.zeros(grid.number_of_nodes)
self._Er = np.zeros(grid.number_of_nodes)
# K's and critical values can be floats, grid fields, or arrays
# use setters defined below
self._K_sed = K_sed
self._K_br = K_br
self._sp_crit_sed = return_array_at_node(grid, sp_crit_sed)
self._sp_crit_br = return_array_at_node(grid, sp_crit_br)
self._erode_flooded_nodes = erode_flooded_nodes
self._flow_receivers = grid.at_node["flow__receiver_node"]
self._stack = grid.at_node["flow__upstream_node_order"]
self._slope = grid.at_node["topographic__steepest_slope"]
self.initialize_output_fields()
self._qs = grid.at_node["sediment__outflux"]
self._q = return_array_at_node(grid, discharge_field)
# for backward compatibility (remove in 3.0.0+)
grid.at_node["sediment__flux"] = grid.at_node["sediment__outflux"]
self._Q_to_the_m = np.zeros(grid.number_of_nodes)
self._S_to_the_n = np.zeros(grid.number_of_nodes)
# store other constants
self._m_sp = np.float64(m_sp)
self._n_sp = np.float64(n_sp)
self._phi = np.float64(phi)
self._v_s = np.float64(v_s)
if isinstance(grid, RasterModelGrid):
self._link_lengths = grid.length_of_d8
else:
self._link_lengths = grid.length_of_link
if v_s_lake is None:
self._v_s_lake = np.float64(v_s)
else:
self._v_s_lake = np.float64(v_s_lake)
self._F_f = np.float64(F_f)
if phi >= 1.0:
raise ValueError("Porosity must be < 1.0")
if F_f > 1.0:
raise ValueError("Fraction of fines must be <= 1.0")
if phi < 0.0:
raise ValueError("Porosity must be > 0.0")
if F_f < 0.0:
raise ValueError("Fraction of fines must be > 0.0")
@property
def K_br(self):
"""Erodibility of bedrock(units depend on m_sp)."""
return self._K_br
@K_br.setter
def K_br(self, new_val):
self._K_br = return_array_at_node(self._grid, new_val)
@property
def K_sed(self):
"""Erodibility of sediment(units depend on m_sp)."""
return self._K_sed
@K_sed.setter
def K_sed(self, new_val):
self._K_sed = return_array_at_node(self._grid, new_val)
@property
def Es(self):
"""Sediment erosion term."""
return self._Es
@property
def Er(self):
"""Bedrock erosion term."""
return self._Er
@property
def sediment_influx(self):
"""Volumetric sediment influx to each node."""
return self.grid.at_node["sediment__influx"]
def _calc_erosion_rates(self):
"""Calculate erosion rates."""
br = self.grid.at_node["bedrock__elevation"]
H = self.grid.at_node["soil__depth"]
# if sp_crits are zero, then this colapses to correct all the time.
if np.isclose(self._n_sp, 1.0):
S_to_the_n = self._slope
else:
S_to_the_n = np.power(self._slope, self._n_sp)
omega_sed = self._K_sed * self._Q_to_the_m * S_to_the_n
omega_br = self._K_br * self._Q_to_the_m * S_to_the_n
omega_sed_over_sp_crit = np.divide(
omega_sed,
self._sp_crit_sed,
out=np.zeros_like(omega_sed),
where=self._sp_crit_sed != 0,
)
omega_br_over_sp_crit = np.divide(
omega_br,
self._sp_crit_br,
out=np.zeros_like(omega_br),
where=self._sp_crit_br != 0,
)
self._sed_erosion_term = omega_sed - self._sp_crit_sed * (
1.0 - np.exp(-omega_sed_over_sp_crit)
) / (
1 - self._phi
) # convert from a volume to a mass flux.
self._br_erosion_term = omega_br - self._sp_crit_br * (
1.0 - np.exp(-omega_br_over_sp_crit)
)
# Do not allow for the formation of potholes (addition v2)
r = self._grid.at_node["flow__receiver_node"]
br_e_max = br - br[r]
br_e_max[br_e_max < 0] = 0
self._br_erosion_term = np.minimum(self._br_erosion_term, br_e_max)
self._Es = self._sed_erosion_term * (1.0 - np.exp(-H / self._H_star))
self._Er = self._br_erosion_term * np.exp(-H / self._H_star)
# if the soil layer becomes exceptionally thick (e.g. because of
# landslide derived sediment deposition(,) the algorithm will become
# unstable because np.exp(x) with x > 709 yeilds inf values.
# Therefore soil depth is temporqlly topped of at 200m and the remaining
# values are added back after the space component has run
self._Es[H > self._thickness_lim] = self._sed_erosion_term[
H > self._thickness_lim
]
self._Er[H > self._thickness_lim] = 0
def run_one_step_basic(self, dt=10):
node_status = self.grid.status_at_node
z = self.grid.at_node["topographic__elevation"]
br = self.grid.at_node["bedrock__elevation"]
H = self.grid.at_node["soil__depth"]
link_to_rcvr = self.grid.at_node["flow__link_to_receiver_node"]
area = self.grid.cell_area_at_node
r = self.grid.at_node["flow__receiver_node"]
stack_flip_ud = np.flipud(self.grid.at_node["flow__upstream_node_order"])
# Select core nodes where qs >0
stack_flip_ud_sel = stack_flip_ud[
(node_status[stack_flip_ud] == NodeStatus.CORE)
& (self._q[stack_flip_ud] > 0.0)
]
slope = (z - z[r]) / self._link_lengths[link_to_rcvr]
# Choose a method for calculating erosion:
self._Q_to_the_m[:] = np.power(self._q, self._m_sp)
self._calc_erosion_rates()
if "flood_status_code" in self.grid.at_node:
flood_status = self.grid.at_node["flood_status_code"]
flooded_nodes = np.nonzero(flood_status == _FLOODED)[0]
else:
flooded_nodes = np.nonzero([slope < 0])[1]
self._Es[flooded_nodes] = 0.0
self._Er[flooded_nodes] = 0.0
self._sed_erosion_term[flooded_nodes] = 0.0
self._br_erosion_term[flooded_nodes] = 0.0
self.sediment_influx[:] = 0
K_sed_vector = np.broadcast_to(self._K_sed, self._q.shape)
vol_SSY_riv = _sequential_ero_depo(
stack_flip_ud_sel,
r,
area,
self._q,
self._qs,
self.sediment_influx,
self._Es,
self._Er,
self._Q_to_the_m,
slope,
H,
br,
self._sed_erosion_term,
self._br_erosion_term,
K_sed_vector,
self._v_s,
self._phi,
self._F_f,
self._H_star,
dt,
self._thickness_lim,
)
V_leaving_riv = np.sum(self.sediment_influx) * dt
# Update topography
cores = self._grid.core_nodes
z[cores] = br[cores] + H[cores]
return vol_SSY_riv, V_leaving_riv
def run_one_step(self, dt):
vol_SSY_riv, V_leaving_riv = self.run_one_step_basic(dt)
return vol_SSY_riv, V_leaving_riv
| {
"content_hash": "bfc79bff239f68eb6371aa41f183dd2c",
"timestamp": "",
"source": "github",
"line_count": 561,
"max_line_length": 153,
"avg_line_length": 35.948306595365416,
"alnum_prop": 0.554966033619279,
"repo_name": "landlab/landlab",
"id": "925e1fb39e02e6ba1ba51ba54656403024575c84",
"size": "20191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "landlab/components/space/space_large_scale_eroder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "762"
},
{
"name": "Cython",
"bytes": "265735"
},
{
"name": "Gherkin",
"bytes": "1601"
},
{
"name": "Jupyter Notebook",
"bytes": "1373117"
},
{
"name": "Makefile",
"bytes": "2250"
},
{
"name": "Python",
"bytes": "4497175"
},
{
"name": "Roff",
"bytes": "445"
},
{
"name": "Shell",
"bytes": "1073"
},
{
"name": "TeX",
"bytes": "42252"
}
],
"symlink_target": ""
} |
from crosscompute.exceptions import DataTypeError
from crosscompute.types import DataType
from pytest import raises
class ADataType(DataType):
@classmethod
def load(Class, path):
if path == 'x':
raise Exception
instance = Class()
instance.path = path
return instance
@classmethod
def parse(Class, x, default_value=None):
if x == 'd':
raise DataTypeError
if x == 'e':
raise Exception
return 'a'
class BDataType(DataType):
@classmethod
def load(Class, path, default_value=None):
instance = Class()
instance.path = path
instance.default_value = default_value
return instance
class CDataType(DataType):
@classmethod
def load_for_view(Class, path):
instance = Class()
instance.path = path
return instance
class TestDataType(object):
def test_load_for_view_safely(self):
x = ADataType.load_for_view_safely('a')
assert x.path == 'a'
x = ADataType.load_for_view_safely('x')
assert x is None
x = BDataType.load_for_view_safely('b', 'bb')
assert x.path == 'b'
assert x.default_value == 'bb'
x = CDataType.load_for_view_safely('c')
assert x.path == 'c'
def test_parse_safely(self):
assert ADataType.parse_safely(None) is None
assert ADataType.parse_safely(1) is 'a'
with raises(DataTypeError):
ADataType.parse_safely('d')
assert ADataType.parse_safely('e') == 'e'
| {
"content_hash": "789b0d0884d770da77e47f040c8773b6",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 53,
"avg_line_length": 24.215384615384615,
"alnum_prop": 0.5952986022871665,
"repo_name": "crosscompute/crosscompute",
"id": "691aeb4d0652ece923e253308ec9214b7b3fe258",
"size": "1574",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "experiments/0.7/tests/x_types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "227"
},
{
"name": "HTML",
"bytes": "1017"
},
{
"name": "JavaScript",
"bytes": "5264"
},
{
"name": "Jinja",
"bytes": "4487"
},
{
"name": "Python",
"bytes": "270455"
},
{
"name": "Shell",
"bytes": "67"
}
],
"symlink_target": ""
} |
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import warnings
from math import nan
from .ctrlutil import unwrap
from .bdalg import feedback
from .margins import stability_margins
from .exception import ControlMIMONotImplemented
from .statesp import StateSpace
from .xferfcn import TransferFunction
from . import config
__all__ = ['bode_plot', 'nyquist_plot', 'gangof4_plot', 'singular_values_plot',
'bode', 'nyquist', 'gangof4']
# Default values for module parameter variables
_freqplot_defaults = {
'freqplot.feature_periphery_decades': 1,
'freqplot.number_of_samples': 1000,
'freqplot.dB': False, # Plot gain in dB
'freqplot.deg': True, # Plot phase in degrees
'freqplot.Hz': False, # Plot frequency in Hertz
'freqplot.grid': True, # Turn on grid for gain and phase
'freqplot.wrap_phase': False, # Wrap the phase plot at a given value
# deprecations
'deprecated.bode.dB': 'freqplot.dB',
'deprecated.bode.deg': 'freqplot.deg',
'deprecated.bode.Hz': 'freqplot.Hz',
'deprecated.bode.grid': 'freqplot.grid',
'deprecated.bode.wrap_phase': 'freqplot.wrap_phase',
}
#
# Main plotting functions
#
# This section of the code contains the functions for generating
# frequency domain plots
#
#
# Bode plot
#
def bode_plot(syslist, omega=None,
plot=True, omega_limits=None, omega_num=None,
margins=None, method='best', *args, **kwargs):
"""Bode plot for a system
Plots a Bode plot for the system over a (optional) frequency range.
Parameters
----------
syslist : linsys
List of linear input/output systems (single system is OK)
omega : array_like
List of frequencies in rad/sec to be used for frequency response
dB : bool
If True, plot result in dB. Default is false.
Hz : bool
If True, plot frequency in Hz (omega must be provided in rad/sec).
Default value (False) set by config.defaults['freqplot.Hz']
deg : bool
If True, plot phase in degrees (else radians). Default value (True)
config.defaults['freqplot.deg']
plot : bool
If True (default), plot magnitude and phase
omega_limits : array_like of two values
Limits of the to generate frequency vector.
If Hz=True the limits are in Hz otherwise in rad/s.
omega_num : int
Number of samples to plot. Defaults to
config.defaults['freqplot.number_of_samples'].
margins : bool
If True, plot gain and phase margin.
method : method to use in computing margins (see :func:`stability_margins`)
*args : :func:`matplotlib.pyplot.plot` positional properties, optional
Additional arguments for `matplotlib` plots (color, linestyle, etc)
**kwargs : :func:`matplotlib.pyplot.plot` keyword properties, optional
Additional keywords (passed to `matplotlib`)
Returns
-------
mag : ndarray (or list of ndarray if len(syslist) > 1))
magnitude
phase : ndarray (or list of ndarray if len(syslist) > 1))
phase in radians
omega : ndarray (or list of ndarray if len(syslist) > 1))
frequency in rad/sec
Other Parameters
----------------
grid : bool
If True, plot grid lines on gain and phase plots. Default is set by
`config.defaults['freqplot.grid']`.
initial_phase : float
Set the reference phase to use for the lowest frequency. If set, the
initial phase of the Bode plot will be set to the value closest to the
value specified. Units are in either degrees or radians, depending on
the `deg` parameter. Default is -180 if wrap_phase is False, 0 if
wrap_phase is True.
wrap_phase : bool or float
If wrap_phase is `False` (default), then the phase will be unwrapped
so that it is continuously increasing or decreasing. If wrap_phase is
`True` the phase will be restricted to the range [-180, 180) (or
[:math:`-\\pi`, :math:`\\pi`) radians). If `wrap_phase` is specified
as a float, the phase will be offset by 360 degrees if it falls below
the specified value. Default value is `False` and can be set using
config.defaults['freqplot.wrap_phase'].
The default values for Bode plot configuration parameters can be reset
using the `config.defaults` dictionary, with module name 'bode'.
Notes
-----
1. Alternatively, you may use the lower-level methods
:meth:`LTI.frequency_response` or ``sys(s)`` or ``sys(z)`` or to
generate the frequency response for a single system.
2. If a discrete time model is given, the frequency response is plotted
along the upper branch of the unit circle, using the mapping ``z =
exp(1j * omega * dt)`` where `omega` ranges from 0 to `pi/dt` and `dt`
is the discrete timebase. If timebase not specified (``dt=True``),
`dt` is set to 1.
Examples
--------
>>> sys = ss("1. -2; 3. -4", "5.; 7", "6. 8", "9.")
>>> mag, phase, omega = bode(sys)
"""
# Make a copy of the kwargs dictionary since we will modify it
kwargs = dict(kwargs)
# Check to see if legacy 'Plot' keyword was used
if 'Plot' in kwargs:
import warnings
warnings.warn("'Plot' keyword is deprecated in bode_plot; use 'plot'",
FutureWarning)
# Map 'Plot' keyword to 'plot' keyword
plot = kwargs.pop('Plot')
# Get values for params (and pop from list to allow keyword use in plot)
dB = config._get_param(
'freqplot', 'dB', kwargs, _freqplot_defaults, pop=True)
deg = config._get_param(
'freqplot', 'deg', kwargs, _freqplot_defaults, pop=True)
Hz = config._get_param(
'freqplot', 'Hz', kwargs, _freqplot_defaults, pop=True)
grid = config._get_param(
'freqplot', 'grid', kwargs, _freqplot_defaults, pop=True)
plot = config._get_param('freqplot', 'plot', plot, True)
margins = config._get_param(
'freqplot', 'margins', margins, False)
wrap_phase = config._get_param(
'freqplot', 'wrap_phase', kwargs, _freqplot_defaults, pop=True)
initial_phase = config._get_param(
'freqplot', 'initial_phase', kwargs, None, pop=True)
omega_num = config._get_param('freqplot', 'number_of_samples', omega_num)
# If argument was a singleton, turn it into a tuple
if not isinstance(syslist, (list, tuple)):
syslist = (syslist,)
omega, omega_range_given = _determine_omega_vector(
syslist, omega, omega_limits, omega_num, Hz=Hz)
if plot:
# Set up the axes with labels so that multiple calls to
# bode_plot will superimpose the data. This was implicit
# before matplotlib 2.1, but changed after that (See
# https://github.com/matplotlib/matplotlib/issues/9024).
# The code below should work on all cases.
# Get the current figure
if 'sisotool' in kwargs:
fig = kwargs.pop('fig')
ax_mag = fig.axes[0]
ax_phase = fig.axes[2]
sisotool = kwargs.pop('sisotool')
else:
fig = plt.gcf()
ax_mag = None
ax_phase = None
sisotool = False
# Get the current axes if they already exist
for ax in fig.axes:
if ax.get_label() == 'control-bode-magnitude':
ax_mag = ax
elif ax.get_label() == 'control-bode-phase':
ax_phase = ax
# If no axes present, create them from scratch
if ax_mag is None or ax_phase is None:
plt.clf()
ax_mag = plt.subplot(211, label='control-bode-magnitude')
ax_phase = plt.subplot(
212, label='control-bode-phase', sharex=ax_mag)
mags, phases, omegas, nyquistfrqs = [], [], [], []
for sys in syslist:
if not sys.issiso():
# TODO: Add MIMO bode plots.
raise ControlMIMONotImplemented(
"Bode is currently only implemented for SISO systems.")
else:
omega_sys = np.asarray(omega)
if sys.isdtime(strict=True):
nyquistfrq = math.pi / sys.dt
if not omega_range_given:
# limit up to and including nyquist frequency
omega_sys = np.hstack((
omega_sys[omega_sys < nyquistfrq], nyquistfrq))
else:
nyquistfrq = None
mag, phase, omega_sys = sys.frequency_response(omega_sys)
mag = np.atleast_1d(mag)
phase = np.atleast_1d(phase)
#
# Post-process the phase to handle initial value and wrapping
#
if initial_phase is None:
# Start phase in the range 0 to -360 w/ initial phase = -180
# If wrap_phase is true, use 0 instead (phase \in (-pi, pi])
initial_phase = -math.pi if wrap_phase is not True else 0
elif isinstance(initial_phase, (int, float)):
# Allow the user to override the default calculation
if deg:
initial_phase = initial_phase/180. * math.pi
else:
raise ValueError("initial_phase must be a number.")
# Shift the phase if needed
if abs(phase[0] - initial_phase) > math.pi:
phase -= 2*math.pi * \
round((phase[0] - initial_phase) / (2*math.pi))
# Phase wrapping
if wrap_phase is False:
phase = unwrap(phase) # unwrap the phase
elif wrap_phase is True:
pass # default calculation OK
elif isinstance(wrap_phase, (int, float)):
phase = unwrap(phase) # unwrap the phase first
if deg:
wrap_phase *= math.pi/180.
# Shift the phase if it is below the wrap_phase
phase += 2*math.pi * np.maximum(
0, np.ceil((wrap_phase - phase)/(2*math.pi)))
else:
raise ValueError("wrap_phase must be bool or float.")
mags.append(mag)
phases.append(phase)
omegas.append(omega_sys)
nyquistfrqs.append(nyquistfrq)
# Get the dimensions of the current axis, which we will divide up
# TODO: Not current implemented; just use subplot for now
if plot:
nyquistfrq_plot = None
if Hz:
omega_plot = omega_sys / (2. * math.pi)
if nyquistfrq:
nyquistfrq_plot = nyquistfrq / (2. * math.pi)
else:
omega_plot = omega_sys
if nyquistfrq:
nyquistfrq_plot = nyquistfrq
phase_plot = phase * 180. / math.pi if deg else phase
mag_plot = mag
if nyquistfrq_plot:
# append data for vertical nyquist freq indicator line.
# if this extra nyquist lime is is plotted in a single plot
# command then line order is preserved when
# creating a legend eg. legend(('sys1', 'sys2'))
omega_nyq_line = np.array(
(np.nan, nyquistfrq_plot, nyquistfrq_plot))
omega_plot = np.hstack((omega_plot, omega_nyq_line))
mag_nyq_line = np.array((
np.nan, 0.7*min(mag_plot), 1.3*max(mag_plot)))
mag_plot = np.hstack((mag_plot, mag_nyq_line))
phase_range = max(phase_plot) - min(phase_plot)
phase_nyq_line = np.array(
(np.nan,
min(phase_plot) - 0.2 * phase_range,
max(phase_plot) + 0.2 * phase_range))
phase_plot = np.hstack((phase_plot, phase_nyq_line))
#
# Magnitude plot
#
if dB:
ax_mag.semilogx(omega_plot, 20 * np.log10(mag_plot),
*args, **kwargs)
else:
ax_mag.loglog(omega_plot, mag_plot, *args, **kwargs)
# Add a grid to the plot + labeling
ax_mag.grid(grid and not margins, which='both')
ax_mag.set_ylabel("Magnitude (dB)" if dB else "Magnitude")
#
# Phase plot
#
# Plot the data
ax_phase.semilogx(omega_plot, phase_plot, *args, **kwargs)
# Show the phase and gain margins in the plot
if margins:
# Compute stability margins for the system
margin = stability_margins(sys, method=method)
gm, pm, Wcg, Wcp = (margin[i] for i in (0, 1, 3, 4))
# Figure out sign of the phase at the first gain crossing
# (needed if phase_wrap is True)
phase_at_cp = phases[0][(np.abs(omegas[0] - Wcp)).argmin()]
if phase_at_cp >= 0.:
phase_limit = 180.
else:
phase_limit = -180.
if Hz:
Wcg, Wcp = Wcg/(2*math.pi), Wcp/(2*math.pi)
# Draw lines at gain and phase limits
ax_mag.axhline(y=0 if dB else 1, color='k', linestyle=':',
zorder=-20)
ax_phase.axhline(y=phase_limit if deg else
math.radians(phase_limit),
color='k', linestyle=':', zorder=-20)
mag_ylim = ax_mag.get_ylim()
phase_ylim = ax_phase.get_ylim()
# Annotate the phase margin (if it exists)
if pm != float('inf') and Wcp != float('nan'):
if dB:
ax_mag.semilogx(
[Wcp, Wcp], [0., -1e5],
color='k', linestyle=':', zorder=-20)
else:
ax_mag.loglog(
[Wcp, Wcp], [1., 1e-8],
color='k', linestyle=':', zorder=-20)
if deg:
ax_phase.semilogx(
[Wcp, Wcp], [1e5, phase_limit + pm],
color='k', linestyle=':', zorder=-20)
ax_phase.semilogx(
[Wcp, Wcp], [phase_limit + pm, phase_limit],
color='k', zorder=-20)
else:
ax_phase.semilogx(
[Wcp, Wcp], [1e5, math.radians(phase_limit) +
math.radians(pm)],
color='k', linestyle=':', zorder=-20)
ax_phase.semilogx(
[Wcp, Wcp], [math.radians(phase_limit) +
math.radians(pm),
math.radians(phase_limit)],
color='k', zorder=-20)
# Annotate the gain margin (if it exists)
if gm != float('inf') and Wcg != float('nan'):
if dB:
ax_mag.semilogx(
[Wcg, Wcg], [-20.*np.log10(gm), -1e5],
color='k', linestyle=':', zorder=-20)
ax_mag.semilogx(
[Wcg, Wcg], [0, -20*np.log10(gm)],
color='k', zorder=-20)
else:
ax_mag.loglog(
[Wcg, Wcg], [1./gm, 1e-8], color='k',
linestyle=':', zorder=-20)
ax_mag.loglog(
[Wcg, Wcg], [1., 1./gm], color='k', zorder=-20)
if deg:
ax_phase.semilogx(
[Wcg, Wcg], [0, phase_limit],
color='k', linestyle=':', zorder=-20)
else:
ax_phase.semilogx(
[Wcg, Wcg], [0, math.radians(phase_limit)],
color='k', linestyle=':', zorder=-20)
ax_mag.set_ylim(mag_ylim)
ax_phase.set_ylim(phase_ylim)
if sisotool:
ax_mag.text(
0.04, 0.06,
'G.M.: %.2f %s\nFreq: %.2f %s' %
(20*np.log10(gm) if dB else gm,
'dB ' if dB else '',
Wcg, 'Hz' if Hz else 'rad/s'),
horizontalalignment='left',
verticalalignment='bottom',
transform=ax_mag.transAxes,
fontsize=8 if int(mpl.__version__[0]) == 1 else 6)
ax_phase.text(
0.04, 0.06,
'P.M.: %.2f %s\nFreq: %.2f %s' %
(pm if deg else math.radians(pm),
'deg' if deg else 'rad',
Wcp, 'Hz' if Hz else 'rad/s'),
horizontalalignment='left',
verticalalignment='bottom',
transform=ax_phase.transAxes,
fontsize=8 if int(mpl.__version__[0]) == 1 else 6)
else:
plt.suptitle(
"Gm = %.2f %s(at %.2f %s), "
"Pm = %.2f %s (at %.2f %s)" %
(20*np.log10(gm) if dB else gm,
'dB ' if dB else '',
Wcg, 'Hz' if Hz else 'rad/s',
pm if deg else math.radians(pm),
'deg' if deg else 'rad',
Wcp, 'Hz' if Hz else 'rad/s'))
# Add a grid to the plot + labeling
ax_phase.set_ylabel("Phase (deg)" if deg else "Phase (rad)")
def gen_zero_centered_series(val_min, val_max, period):
v1 = np.ceil(val_min / period - 0.2)
v2 = np.floor(val_max / period + 0.2)
return np.arange(v1, v2 + 1) * period
if deg:
ylim = ax_phase.get_ylim()
ax_phase.set_yticks(gen_zero_centered_series(
ylim[0], ylim[1], 45.))
ax_phase.set_yticks(gen_zero_centered_series(
ylim[0], ylim[1], 15.), minor=True)
else:
ylim = ax_phase.get_ylim()
ax_phase.set_yticks(gen_zero_centered_series(
ylim[0], ylim[1], math.pi / 4.))
ax_phase.set_yticks(gen_zero_centered_series(
ylim[0], ylim[1], math.pi / 12.), minor=True)
ax_phase.grid(grid and not margins, which='both')
# ax_mag.grid(which='minor', alpha=0.3)
# ax_mag.grid(which='major', alpha=0.9)
# ax_phase.grid(which='minor', alpha=0.3)
# ax_phase.grid(which='major', alpha=0.9)
# Label the frequency axis
ax_phase.set_xlabel("Frequency (Hz)" if Hz
else "Frequency (rad/sec)")
if len(syslist) == 1:
return mags[0], phases[0], omegas[0]
else:
return mags, phases, omegas
#
# Nyquist plot
#
# Default values for module parameter variables
_nyquist_defaults = {
'nyquist.primary_style': ['-', '-.'], # style for primary curve
'nyquist.mirror_style': ['--', ':'], # style for mirror curve
'nyquist.arrows': 2, # number of arrors around curve
'nyquist.arrow_size': 8, # pixel size for arrows
'nyquist.encirclement_threshold': 0.05, # warning threshold
'nyquist.indent_radius': 1e-4, # indentation radius
'nyquist.indent_direction': 'right', # indentation direction
'nyquist.indent_points': 50, # number of points to insert
'nyquist.max_curve_magnitude': 20, # clip large values
'nyquist.max_curve_offset': 0.02, # offset of primary/mirror
'nyquist.start_marker': 'o', # marker at start of curve
'nyquist.start_marker_size': 4, # size of the maker
}
def nyquist_plot(
syslist, omega=None, plot=True, omega_limits=None, omega_num=None,
label_freq=0, color=None, return_contour=False,
warn_encirclements=True, warn_nyquist=True, **kwargs):
"""Nyquist plot for a system
Plots a Nyquist plot for the system over a (optional) frequency range.
The curve is computed by evaluating the Nyqist segment along the positive
imaginary axis, with a mirror image generated to reflect the negative
imaginary axis. Poles on or near the imaginary axis are avoided using a
small indentation. The portion of the Nyquist contour at infinity is not
explicitly computed (since it maps to a constant value for any system with
a proper transfer function).
Parameters
----------
syslist : list of LTI
List of linear input/output systems (single system is OK). Nyquist
curves for each system are plotted on the same graph.
plot : boolean
If True, plot magnitude
omega : array_like
Set of frequencies to be evaluated, in rad/sec.
omega_limits : array_like of two values
Limits to the range of frequencies. Ignored if omega is provided, and
auto-generated if omitted.
omega_num : int
Number of frequency samples to plot. Defaults to
config.defaults['freqplot.number_of_samples'].
color : string
Used to specify the color of the line and arrowhead.
return_contour : bool, optional
If 'True', return the contour used to evaluate the Nyquist plot.
**kwargs : :func:`matplotlib.pyplot.plot` keyword properties, optional
Additional keywords (passed to `matplotlib`)
Returns
-------
count : int (or list of int if len(syslist) > 1)
Number of encirclements of the point -1 by the Nyquist curve. If
multiple systems are given, an array of counts is returned.
contour : ndarray (or list of ndarray if len(syslist) > 1)), optional
The contour used to create the primary Nyquist curve segment, returned
if `return_contour` is Tue. To obtain the Nyquist curve values,
evaluate system(s) along contour.
Additional Parameters
---------------------
arrows : int or 1D/2D array of floats, optional
Specify the number of arrows to plot on the Nyquist curve. If an
integer is passed. that number of equally spaced arrows will be
plotted on each of the primary segment and the mirror image. If a 1D
array is passed, it should consist of a sorted list of floats between
0 and 1, indicating the location along the curve to plot an arrow. If
a 2D array is passed, the first row will be used to specify arrow
locations for the primary curve and the second row will be used for
the mirror image.
arrow_size : float, optional
Arrowhead width and length (in display coordinates). Default value is
8 and can be set using config.defaults['nyquist.arrow_size'].
arrow_style : matplotlib.patches.ArrowStyle, optional
Define style used for Nyquist curve arrows (overrides `arrow_size`).
encirclement_threshold : float, optional
Define the threshold for generating a warning if the number of net
encirclements is a non-integer value. Default value is 0.05 and can
be set using config.defaults['nyquist.encirclement_threshold'].
indent_direction : str, optional
For poles on the imaginary axis, set the direction of indentation to
be 'right' (default), 'left', or 'none'.
indent_points : int, optional
Number of points to insert in the Nyquist contour around poles that
are at or near the imaginary axis.
indent_radius : float, optional
Amount to indent the Nyquist contour around poles on or near the
imaginary axis. Portions of the Nyquist plot corresponding to indented
portions of the contour are plotted using a different line style.
label_freq : int, optiona
Label every nth frequency on the plot. If not specified, no labels
are generated.
max_curve_magnitude : float, optional
Restrict the maximum magnitude of the Nyquist plot to this value.
Portions of the Nyquist plot whose magnitude is restricted are
plotted using a different line style.
max_curve_offset : float, optional
When plotting scaled portion of the Nyquist plot, increase/decrease
the magnitude by this fraction of the max_curve_magnitude to allow
any overlaps between the primary and mirror curves to be avoided.
mirror_style : [str, str] or False
Linestyles for mirror image of the Nyquist curve. The first element
is used for unscaled portions of the Nyquist curve, the second element
is used for portions that are scaled (using max_curve_magnitude). If
`False` then omit completely. Default linestyle (['--', ':']) is
determined by config.defaults['nyquist.mirror_style'].
primary_style : [str, str], optional
Linestyles for primary image of the Nyquist curve. The first
element is used for unscaled portions of the Nyquist curve,
the second element is used for portions that are scaled (using
max_curve_magnitude). Default linestyle (['-', '-.']) is
determined by config.defaults['nyquist.mirror_style'].
start_marker : str, optional
Matplotlib marker to use to mark the starting point of the Nyquist
plot. Defaults value is 'o' and can be set using
config.defaults['nyquist.start_marker'].
start_marker_size : float, optional
Start marker size (in display coordinates). Default value is
4 and can be set using config.defaults['nyquist.start_marker_size'].
warn_nyquist : bool, optional
If set to 'False', turn off warnings about frequencies above Nyquist.
warn_encirclements : bool, optional
If set to 'False', turn off warnings about number of encirclements not
meeting the Nyquist criterion.
Notes
-----
1. If a discrete time model is given, the frequency response is computed
along the upper branch of the unit circle, using the mapping ``z =
exp(1j * omega * dt)`` where `omega` ranges from 0 to `pi/dt` and `dt`
is the discrete timebase. If timebase not specified (``dt=True``),
`dt` is set to 1.
2. If a continuous-time system contains poles on or near the imaginary
axis, a small indentation will be used to avoid the pole. The radius
of the indentation is given by `indent_radius` and it is taken to the
right of stable poles and the left of unstable poles. If a pole is
exactly on the imaginary axis, the `indent_direction` parameter can be
used to set the direction of indentation. Setting `indent_direction`
to `none` will turn off indentation. If `return_contour` is True, the
exact contour used for evaluation is returned.
Examples
--------
>>> sys = ss([[1, -2], [3, -4]], [[5], [7]], [[6, 8]], [[9]])
>>> count = nyquist_plot(sys)
"""
# Check to see if legacy 'Plot' keyword was used
if 'Plot' in kwargs:
warnings.warn("'Plot' keyword is deprecated in nyquist_plot; "
"use 'plot'", FutureWarning)
# Map 'Plot' keyword to 'plot' keyword
plot = kwargs.pop('Plot')
# Check to see if legacy 'labelFreq' keyword was used
if 'labelFreq' in kwargs:
warnings.warn("'labelFreq' keyword is deprecated in nyquist_plot; "
"use 'label_freq'", FutureWarning)
# Map 'labelFreq' keyword to 'label_freq' keyword
label_freq = kwargs.pop('labelFreq')
# Check to see if legacy 'arrow_width' or 'arrow_length' were used
if 'arrow_width' in kwargs or 'arrow_length' in kwargs:
warnings.warn(
"'arrow_width' and 'arrow_length' keywords are deprecated in "
"nyquist_plot; use `arrow_size` instead", FutureWarning)
kwargs['arrow_size'] = \
(kwargs.get('arrow_width', 0) + kwargs.get('arrow_length', 0)) / 2
kwargs.pop('arrow_width', False)
kwargs.pop('arrow_length', False)
# Get values for params (and pop from list to allow keyword use in plot)
omega_num_given = omega_num is not None
omega_num = config._get_param('freqplot', 'number_of_samples', omega_num)
arrows = config._get_param(
'nyquist', 'arrows', kwargs, _nyquist_defaults, pop=True)
arrow_size = config._get_param(
'nyquist', 'arrow_size', kwargs, _nyquist_defaults, pop=True)
arrow_style = config._get_param('nyquist', 'arrow_style', kwargs, None)
indent_radius = config._get_param(
'nyquist', 'indent_radius', kwargs, _nyquist_defaults, pop=True)
encirclement_threshold = config._get_param(
'nyquist', 'encirclement_threshold', kwargs,
_nyquist_defaults, pop=True)
indent_direction = config._get_param(
'nyquist', 'indent_direction', kwargs, _nyquist_defaults, pop=True)
indent_points = config._get_param(
'nyquist', 'indent_points', kwargs, _nyquist_defaults, pop=True)
max_curve_magnitude = config._get_param(
'nyquist', 'max_curve_magnitude', kwargs, _nyquist_defaults, pop=True)
max_curve_offset = config._get_param(
'nyquist', 'max_curve_offset', kwargs, _nyquist_defaults, pop=True)
start_marker = config._get_param(
'nyquist', 'start_marker', kwargs, _nyquist_defaults, pop=True)
start_marker_size = config._get_param(
'nyquist', 'start_marker_size', kwargs, _nyquist_defaults, pop=True)
# Set line styles for the curves
def _parse_linestyle(style_name, allow_false=False):
style = config._get_param(
'nyquist', style_name, kwargs, _nyquist_defaults, pop=True)
if isinstance(style, str):
# Only one style provided, use the default for the other
style = [style, _nyquist_defaults['nyquist.' + style_name][1]]
warnings.warn(
"use of a single string for linestyle will be deprecated "
" in a future release", PendingDeprecationWarning)
if (allow_false and style is False) or \
(isinstance(style, list) and len(style) == 2):
return style
else:
raise ValueError(f"invalid '{style_name}': {style}")
primary_style = _parse_linestyle('primary_style')
mirror_style = _parse_linestyle('mirror_style', allow_false=True)
# If argument was a singleton, turn it into a tuple
if not isinstance(syslist, (list, tuple)):
syslist = (syslist,)
# Determine the range of frequencies to use, based on args/features
omega, omega_range_given = _determine_omega_vector(
syslist, omega, omega_limits, omega_num, feature_periphery_decades=2)
# If omega was not specified explicitly, start at omega = 0
if not omega_range_given:
if omega_num_given:
# Just reset the starting point
omega[0] = 0.0
else:
# Insert points between the origin and the first frequency point
omega = np.concatenate((
np.linspace(0, omega[0], indent_points), omega[1:]))
# Go through each system and keep track of the results
counts, contours = [], []
for sys in syslist:
if not sys.issiso():
# TODO: Add MIMO nyquist plots.
raise ControlMIMONotImplemented(
"Nyquist plot currently only supports SISO systems.")
# Figure out the frequency range
omega_sys = np.asarray(omega)
# Determine the contour used to evaluate the Nyquist curve
if sys.isdtime(strict=True):
# Restrict frequencies for discrete-time systems
nyquistfrq = math.pi / sys.dt
if not omega_range_given:
# limit up to and including nyquist frequency
omega_sys = np.hstack((
omega_sys[omega_sys < nyquistfrq], nyquistfrq))
# Issue a warning if we are sampling above Nyquist
if np.any(omega_sys * sys.dt > np.pi) and warn_nyquist:
warnings.warn("evaluation above Nyquist frequency")
# do indentations in s-plane where it is more convenient
splane_contour = 1j * omega_sys
# Bend the contour around any poles on/near the imaginary axis
# TODO: smarter indent radius that depends on dcgain of system
# and timebase of discrete system.
if isinstance(sys, (StateSpace, TransferFunction)) \
and indent_direction != 'none':
if sys.isctime():
splane_poles = sys.poles()
splane_cl_poles = sys.feedback().poles()
else:
# map z-plane poles to s-plane, ignoring any at the origin
# because we don't need to indent for them
zplane_poles = sys.poles()
zplane_poles = zplane_poles[~np.isclose(abs(zplane_poles), 0.)]
splane_poles = np.log(zplane_poles) / sys.dt
zplane_cl_poles = sys.feedback().poles()
zplane_cl_poles = zplane_cl_poles[
~np.isclose(abs(zplane_poles), 0.)]
splane_cl_poles = np.log(zplane_cl_poles) / sys.dt
#
# Check to make sure indent radius is small enough
#
# If there is a closed loop pole that is near the imaginary access
# at a point that is near an open loop pole, it is possible that
# indentation might skip or create an extraneous encirclement.
# We check for that situation here and generate a warning if that
# could happen.
#
for p_cl in splane_cl_poles:
# See if any closed loop poles are near the imaginary axis
if abs(p_cl.real) <= indent_radius:
# See if any open loop poles are close to closed loop poles
p_ol = splane_poles[
(np.abs(splane_poles - p_cl)).argmin()]
if abs(p_ol - p_cl) <= indent_radius and \
warn_encirclements:
warnings.warn(
"indented contour may miss closed loop pole; "
"consider reducing indent_radius to be less than "
f"{abs(p_ol - p_cl):5.2g}", stacklevel=2)
#
# See if we should add some frequency points near imaginary poles
#
for p in splane_poles:
# See if we need to process this pole (skip if on the negative
# imaginary axis or not near imaginary axis + user override)
if p.imag < 0 or abs(p.real) > indent_radius or \
omega_range_given:
continue
# Find the frequencies before the pole frequency
below_points = np.argwhere(
splane_contour.imag - abs(p.imag) < -indent_radius)
if below_points.size > 0:
first_point = below_points[-1].item()
start_freq = p.imag - indent_radius
else:
# Add the points starting at the beginning of the contour
assert splane_contour[0] == 0
first_point = 0
start_freq = 0
# Find the frequencies after the pole frequency
above_points = np.argwhere(
splane_contour.imag - abs(p.imag) > indent_radius)
last_point = above_points[0].item()
# Add points for half/quarter circle around pole frequency
# (these will get indented left or right below)
splane_contour = np.concatenate((
splane_contour[0:first_point+1],
(1j * np.linspace(
start_freq, p.imag + indent_radius, indent_points)),
splane_contour[last_point:]))
# Indent points that are too close to a pole
for i, s in enumerate(splane_contour):
# Find the nearest pole
p = splane_poles[(np.abs(splane_poles - s)).argmin()]
# See if we need to indent around it
if abs(s - p) < indent_radius:
# Figure out how much to offset (simple trigonometry)
offset = np.sqrt(indent_radius ** 2 - (s - p).imag ** 2) \
- (s - p).real
# Figure out which way to offset the contour point
if p.real < 0 or (p.real == 0 and
indent_direction == 'right'):
# Indent to the right
splane_contour[i] += offset
elif p.real > 0 or (p.real == 0 and
indent_direction == 'left'):
# Indent to the left
splane_contour[i] -= offset
else:
raise ValueError("unknown value for indent_direction")
# change contour to z-plane if necessary
if sys.isctime():
contour = splane_contour
else:
contour = np.exp(splane_contour * sys.dt)
# Compute the primary curve
resp = sys(contour)
# Compute CW encirclements of -1 by integrating the (unwrapped) angle
phase = -unwrap(np.angle(resp + 1))
encirclements = np.sum(np.diff(phase)) / np.pi
count = int(np.round(encirclements, 0))
# Let the user know if the count might not make sense
if abs(encirclements - count) > encirclement_threshold and \
warn_encirclements:
warnings.warn(
"number of encirclements was a non-integer value; this can"
" happen is contour is not closed, possibly based on a"
" frequency range that does not include zero.")
#
# Make sure that the enciriclements match the Nyquist criterion
#
# If the user specifies the frequency points to use, it is possible
# to miss enciriclements, so we check here to make sure that the
# Nyquist criterion is actually satisfied.
#
if isinstance(sys, (StateSpace, TransferFunction)):
# Count the number of open/closed loop RHP poles
if sys.isctime():
if indent_direction == 'right':
P = (sys.poles().real > 0).sum()
else:
P = (sys.poles().real >= 0).sum()
Z = (sys.feedback().poles().real >= 0).sum()
else:
if indent_direction == 'right':
P = (np.abs(sys.poles()) > 1).sum()
else:
P = (np.abs(sys.poles()) >= 1).sum()
Z = (np.abs(sys.feedback().poles()) >= 1).sum()
# Check to make sure the results make sense; warn if not
if Z != count + P and warn_encirclements:
warnings.warn(
"number of encirclements does not match Nyquist criterion;"
" check frequency range and indent radius/direction",
UserWarning, stacklevel=2)
elif indent_direction == 'none' and any(sys.poles().real == 0) and \
warn_encirclements:
warnings.warn(
"system has pure imaginary poles but indentation is"
" turned off; results may be meaningless",
RuntimeWarning, stacklevel=2)
counts.append(count)
contours.append(contour)
if plot:
# Parse the arrows keyword
if not arrows:
arrow_pos = []
elif isinstance(arrows, int):
N = arrows
# Space arrows out, starting midway along each "region"
arrow_pos = np.linspace(0.5/N, 1 + 0.5/N, N, endpoint=False)
elif isinstance(arrows, (list, np.ndarray)):
arrow_pos = np.sort(np.atleast_1d(arrows))
else:
raise ValueError("unknown or unsupported arrow location")
# Set the arrow style
if arrow_style is None:
arrow_style = mpl.patches.ArrowStyle(
'simple', head_width=arrow_size, head_length=arrow_size)
# Find the different portions of the curve (with scaled pts marked)
reg_mask = np.logical_or(
np.abs(resp) > max_curve_magnitude,
splane_contour.real != 0)
# reg_mask = np.logical_or(
# np.abs(resp.real) > max_curve_magnitude,
# np.abs(resp.imag) > max_curve_magnitude)
scale_mask = ~reg_mask \
& np.concatenate((~reg_mask[1:], ~reg_mask[-1:])) \
& np.concatenate((~reg_mask[0:1], ~reg_mask[:-1]))
# Rescale the points with large magnitude
rescale = np.logical_and(
reg_mask, abs(resp) > max_curve_magnitude)
resp[rescale] *= max_curve_magnitude / abs(resp[rescale])
# Plot the regular portions of the curve (and grab the color)
x_reg = np.ma.masked_where(reg_mask, resp.real)
y_reg = np.ma.masked_where(reg_mask, resp.imag)
p = plt.plot(
x_reg, y_reg, primary_style[0], color=color, **kwargs)
c = p[0].get_color()
# Figure out how much to offset the curve: the offset goes from
# zero at the start of the scaled section to max_curve_offset as
# we move along the curve
curve_offset = _compute_curve_offset(
resp, scale_mask, max_curve_offset)
# Plot the scaled sections of the curve (changing linestyle)
x_scl = np.ma.masked_where(scale_mask, resp.real)
y_scl = np.ma.masked_where(scale_mask, resp.imag)
plt.plot(
x_scl * (1 + curve_offset), y_scl * (1 + curve_offset),
primary_style[1], color=c, **kwargs)
# Plot the primary curve (invisible) for setting arrows
x, y = resp.real.copy(), resp.imag.copy()
x[reg_mask] *= (1 + curve_offset[reg_mask])
y[reg_mask] *= (1 + curve_offset[reg_mask])
p = plt.plot(x, y, linestyle='None', color=c, **kwargs)
# Add arrows
ax = plt.gca()
_add_arrows_to_line2D(
ax, p[0], arrow_pos, arrowstyle=arrow_style, dir=1)
# Plot the mirror image
if mirror_style is not False:
# Plot the regular and scaled segments
plt.plot(
x_reg, -y_reg, mirror_style[0], color=c, **kwargs)
plt.plot(
x_scl * (1 - curve_offset),
-y_scl * (1 - curve_offset),
mirror_style[1], color=c, **kwargs)
# Add the arrows (on top of an invisible contour)
x, y = resp.real.copy(), resp.imag.copy()
x[reg_mask] *= (1 - curve_offset[reg_mask])
y[reg_mask] *= (1 - curve_offset[reg_mask])
p = plt.plot(x, -y, linestyle='None', color=c, **kwargs)
_add_arrows_to_line2D(
ax, p[0], arrow_pos, arrowstyle=arrow_style, dir=-1)
# Mark the start of the curve
if start_marker:
plt.plot(resp[0].real, resp[0].imag, start_marker,
color=c, markersize=start_marker_size)
# Mark the -1 point
plt.plot([-1], [0], 'r+')
# Label the frequencies of the points
if label_freq:
ind = slice(None, None, label_freq)
for xpt, ypt, omegapt in zip(x[ind], y[ind], omega_sys[ind]):
# Convert to Hz
f = omegapt / (2 * np.pi)
# Factor out multiples of 1000 and limit the
# result to the range [-8, 8].
pow1000 = max(min(get_pow1000(f), 8), -8)
# Get the SI prefix.
prefix = gen_prefix(pow1000)
# Apply the text. (Use a space before the text to
# prevent overlap with the data.)
#
# np.round() is used because 0.99... appears
# instead of 1.0, and this would otherwise be
# truncated to 0.
plt.text(xpt, ypt, ' ' +
str(int(np.round(f / 1000 ** pow1000, 0))) + ' ' +
prefix + 'Hz')
if plot:
ax = plt.gca()
ax.set_xlabel("Real axis")
ax.set_ylabel("Imaginary axis")
ax.grid(color="lightgray")
# "Squeeze" the results
if len(syslist) == 1:
counts, contours = counts[0], contours[0]
# Return counts and (optionally) the contour we used
return (counts, contours) if return_contour else counts
# Internal function to add arrows to a curve
def _add_arrows_to_line2D(
axes, line, arrow_locs=[0.2, 0.4, 0.6, 0.8],
arrowstyle='-|>', arrowsize=1, dir=1, transform=None):
"""
Add arrows to a matplotlib.lines.Line2D at selected locations.
Parameters:
-----------
axes: Axes object as returned by axes command (or gca)
line: Line2D object as returned by plot command
arrow_locs: list of locations where to insert arrows, % of total length
arrowstyle: style of the arrow
arrowsize: size of the arrow
transform: a matplotlib transform instance, default to data coordinates
Returns:
--------
arrows: list of arrows
Based on https://stackoverflow.com/questions/26911898/
"""
if not isinstance(line, mpl.lines.Line2D):
raise ValueError("expected a matplotlib.lines.Line2D object")
x, y = line.get_xdata(), line.get_ydata()
arrow_kw = {
"arrowstyle": arrowstyle,
}
color = line.get_color()
use_multicolor_lines = isinstance(color, np.ndarray)
if use_multicolor_lines:
raise NotImplementedError("multicolor lines not supported")
else:
arrow_kw['color'] = color
linewidth = line.get_linewidth()
if isinstance(linewidth, np.ndarray):
raise NotImplementedError("multiwidth lines not supported")
else:
arrow_kw['linewidth'] = linewidth
if transform is None:
transform = axes.transData
# Compute the arc length along the curve
s = np.cumsum(np.sqrt(np.diff(x) ** 2 + np.diff(y) ** 2))
arrows = []
for loc in arrow_locs:
n = np.searchsorted(s, s[-1] * loc)
# Figure out what direction to paint the arrow
if dir == 1:
arrow_tail = (x[n], y[n])
arrow_head = (np.mean(x[n:n + 2]), np.mean(y[n:n + 2]))
elif dir == -1:
# Orient the arrow in the other direction on the segment
arrow_tail = (x[n + 1], y[n + 1])
arrow_head = (np.mean(x[n:n + 2]), np.mean(y[n:n + 2]))
else:
raise ValueError("unknown value for keyword 'dir'")
p = mpl.patches.FancyArrowPatch(
arrow_tail, arrow_head, transform=transform, lw=0,
**arrow_kw)
axes.add_patch(p)
arrows.append(p)
return arrows
#
# Function to compute Nyquist curve offsets
#
# This function computes a smoothly varying offset that starts and ends at
# zero at the ends of a scaled segment.
#
def _compute_curve_offset(resp, mask, max_offset):
# Compute the arc length along the curve
s_curve = np.cumsum(
np.sqrt(np.diff(resp.real) ** 2 + np.diff(resp.imag) ** 2))
# Initialize the offset
offset = np.zeros(resp.size)
arclen = np.zeros(resp.size)
# Walk through the response and keep track of each continous component
i, nsegs = 0, 0
while i < resp.size:
# Skip the regular segment
while i < resp.size and mask[i]:
i += 1 # Increment the counter
if i == resp.size:
break
# Keep track of the arclength
arclen[i] = arclen[i-1] + np.abs(resp[i] - resp[i-1])
nsegs += 0.5
if i == resp.size:
break
# Save the starting offset of this segment
seg_start = i
# Walk through the scaled segment
while i < resp.size and not mask[i]:
i += 1
if i == resp.size: # See if we are done with this segment
break
# Keep track of the arclength
arclen[i] = arclen[i-1] + np.abs(resp[i] - resp[i-1])
nsegs += 0.5
if i == resp.size:
break
# Save the ending offset of this segment
seg_end = i
# Now compute the scaling for this segment
s_segment = arclen[seg_end-1] - arclen[seg_start]
offset[seg_start:seg_end] = max_offset * s_segment/s_curve[-1] * \
np.sin(np.pi * (arclen[seg_start:seg_end]
- arclen[seg_start])/s_segment)
return offset
#
# Gang of Four plot
#
# TODO: think about how (and whether) to handle lists of systems
def gangof4_plot(P, C, omega=None, **kwargs):
"""Plot the "Gang of 4" transfer functions for a system
Generates a 2x2 plot showing the "Gang of 4" sensitivity functions
[T, PS; CS, S]
Parameters
----------
P, C : LTI
Linear input/output systems (process and control)
omega : array
Range of frequencies (list or bounds) in rad/sec
**kwargs : :func:`matplotlib.pyplot.plot` keyword properties, optional
Additional keywords (passed to `matplotlib`)
Returns
-------
None
"""
if not P.issiso() or not C.issiso():
# TODO: Add MIMO go4 plots.
raise ControlMIMONotImplemented(
"Gang of four is currently only implemented for SISO systems.")
# Get the default parameter values
dB = config._get_param(
'freqplot', 'dB', kwargs, _freqplot_defaults, pop=True)
Hz = config._get_param(
'freqplot', 'Hz', kwargs, _freqplot_defaults, pop=True)
grid = config._get_param(
'freqplot', 'grid', kwargs, _freqplot_defaults, pop=True)
# Compute the senstivity functions
L = P * C
S = feedback(1, L)
T = L * S
# Select a default range if none is provided
# TODO: This needs to be made more intelligent
if omega is None:
omega = _default_frequency_range((P, C, S), Hz=Hz)
# Set up the axes with labels so that multiple calls to
# gangof4_plot will superimpose the data. See details in bode_plot.
plot_axes = {'t': None, 's': None, 'ps': None, 'cs': None}
for ax in plt.gcf().axes:
label = ax.get_label()
if label.startswith('control-gangof4-'):
key = label[len('control-gangof4-'):]
if key not in plot_axes:
raise RuntimeError(
"unknown gangof4 axis type '{}'".format(label))
plot_axes[key] = ax
# if any of the axes are missing, start from scratch
if any((ax is None for ax in plot_axes.values())):
plt.clf()
plot_axes = {'s': plt.subplot(221, label='control-gangof4-s'),
'ps': plt.subplot(222, label='control-gangof4-ps'),
'cs': plt.subplot(223, label='control-gangof4-cs'),
't': plt.subplot(224, label='control-gangof4-t')}
#
# Plot the four sensitivity functions
#
omega_plot = omega / (2. * math.pi) if Hz else omega
# TODO: Need to add in the mag = 1 lines
mag_tmp, phase_tmp, omega = S.frequency_response(omega)
mag = np.squeeze(mag_tmp)
if dB:
plot_axes['s'].semilogx(omega_plot, 20 * np.log10(mag), **kwargs)
else:
plot_axes['s'].loglog(omega_plot, mag, **kwargs)
plot_axes['s'].set_ylabel("$|S|$" + " (dB)" if dB else "")
plot_axes['s'].tick_params(labelbottom=False)
plot_axes['s'].grid(grid, which='both')
mag_tmp, phase_tmp, omega = (P * S).frequency_response(omega)
mag = np.squeeze(mag_tmp)
if dB:
plot_axes['ps'].semilogx(omega_plot, 20 * np.log10(mag), **kwargs)
else:
plot_axes['ps'].loglog(omega_plot, mag, **kwargs)
plot_axes['ps'].tick_params(labelbottom=False)
plot_axes['ps'].set_ylabel("$|PS|$" + " (dB)" if dB else "")
plot_axes['ps'].grid(grid, which='both')
mag_tmp, phase_tmp, omega = (C * S).frequency_response(omega)
mag = np.squeeze(mag_tmp)
if dB:
plot_axes['cs'].semilogx(omega_plot, 20 * np.log10(mag), **kwargs)
else:
plot_axes['cs'].loglog(omega_plot, mag, **kwargs)
plot_axes['cs'].set_xlabel(
"Frequency (Hz)" if Hz else "Frequency (rad/sec)")
plot_axes['cs'].set_ylabel("$|CS|$" + " (dB)" if dB else "")
plot_axes['cs'].grid(grid, which='both')
mag_tmp, phase_tmp, omega = T.frequency_response(omega)
mag = np.squeeze(mag_tmp)
if dB:
plot_axes['t'].semilogx(omega_plot, 20 * np.log10(mag), **kwargs)
else:
plot_axes['t'].loglog(omega_plot, mag, **kwargs)
plot_axes['t'].set_xlabel(
"Frequency (Hz)" if Hz else "Frequency (rad/sec)")
plot_axes['t'].set_ylabel("$|T|$" + " (dB)" if dB else "")
plot_axes['t'].grid(grid, which='both')
plt.tight_layout()
#
# Singular values plot
#
def singular_values_plot(syslist, omega=None,
plot=True, omega_limits=None, omega_num=None,
*args, **kwargs):
"""Singular value plot for a system
Plots a singular value plot for the system over a (optional) frequency
range.
Parameters
----------
syslist : linsys
List of linear systems (single system is OK).
omega : array_like
List of frequencies in rad/sec to be used for frequency response.
plot : bool
If True (default), generate the singular values plot.
omega_limits : array_like of two values
Limits of the frequency vector to generate.
If Hz=True the limits are in Hz otherwise in rad/s.
omega_num : int
Number of samples to plot. Default value (1000) set by
config.defaults['freqplot.number_of_samples'].
dB : bool
If True, plot result in dB. Default value (False) set by
config.defaults['freqplot.dB'].
Hz : bool
If True, plot frequency in Hz (omega must be provided in rad/sec).
Default value (False) set by config.defaults['freqplot.Hz']
Returns
-------
sigma : ndarray (or list of ndarray if len(syslist) > 1))
singular values
omega : ndarray (or list of ndarray if len(syslist) > 1))
frequency in rad/sec
Other Parameters
----------------
grid : bool
If True, plot grid lines on gain and phase plots. Default is set by
`config.defaults['freqplot.grid']`.
Examples
--------
>>> import numpy as np
>>> den = [75, 1]
>>> sys = TransferFunction(
[[[87.8], [-86.4]], [[108.2], [-109.6]]], [[den, den], [den, den]])
>>> omega = np.logspace(-4, 1, 1000)
>>> sigma, omega = singular_values_plot(sys, plot=True)
>>> singular_values_plot(sys, 0.0, plot=False)
(array([[197.20868123],
[ 1.39141948]]), array([0.]))
"""
# Make a copy of the kwargs dictionary since we will modify it
kwargs = dict(kwargs)
# Get values for params (and pop from list to allow keyword use in plot)
dB = config._get_param(
'freqplot', 'dB', kwargs, _freqplot_defaults, pop=True)
Hz = config._get_param(
'freqplot', 'Hz', kwargs, _freqplot_defaults, pop=True)
grid = config._get_param(
'freqplot', 'grid', kwargs, _freqplot_defaults, pop=True)
plot = config._get_param(
'freqplot', 'plot', plot, True)
omega_num = config._get_param('freqplot', 'number_of_samples', omega_num)
# If argument was a singleton, turn it into a tuple
if not isinstance(syslist, (list, tuple)):
syslist = (syslist,)
omega, omega_range_given = _determine_omega_vector(
syslist, omega, omega_limits, omega_num, Hz=Hz)
omega = np.atleast_1d(omega)
if plot:
fig = plt.gcf()
ax_sigma = None
# Get the current axes if they already exist
for ax in fig.axes:
if ax.get_label() == 'control-sigma':
ax_sigma = ax
# If no axes present, create them from scratch
if ax_sigma is None:
plt.clf()
ax_sigma = plt.subplot(111, label='control-sigma')
# color cycle handled manually as all singular values
# of the same systems are expected to be of the same color
color_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
color_offset = 0
if len(ax_sigma.lines) > 0:
last_color = ax_sigma.lines[-1].get_color()
if last_color in color_cycle:
color_offset = color_cycle.index(last_color) + 1
sigmas, omegas, nyquistfrqs = [], [], []
for idx_sys, sys in enumerate(syslist):
omega_sys = np.asarray(omega)
if sys.isdtime(strict=True):
nyquistfrq = math.pi / sys.dt
if not omega_range_given:
# limit up to and including nyquist frequency
omega_sys = np.hstack((
omega_sys[omega_sys < nyquistfrq], nyquistfrq))
omega_complex = np.exp(1j * omega_sys * sys.dt)
else:
nyquistfrq = None
omega_complex = 1j*omega_sys
fresp = sys(omega_complex, squeeze=False)
fresp = fresp.transpose((2, 0, 1))
sigma = np.linalg.svd(fresp, compute_uv=False)
sigmas.append(sigma.transpose()) # return shape is "channel first"
omegas.append(omega_sys)
nyquistfrqs.append(nyquistfrq)
if plot:
color = color_cycle[(idx_sys + color_offset) % len(color_cycle)]
color = kwargs.pop('color', color)
nyquistfrq_plot = None
if Hz:
omega_plot = omega_sys / (2. * math.pi)
if nyquistfrq:
nyquistfrq_plot = nyquistfrq / (2. * math.pi)
else:
omega_plot = omega_sys
if nyquistfrq:
nyquistfrq_plot = nyquistfrq
sigma_plot = sigma
if dB:
ax_sigma.semilogx(omega_plot, 20 * np.log10(sigma_plot),
color=color, *args, **kwargs)
else:
ax_sigma.loglog(omega_plot, sigma_plot,
color=color, *args, **kwargs)
if nyquistfrq_plot is not None:
ax_sigma.axvline(x=nyquistfrq_plot, color=color)
# Add a grid to the plot + labeling
if plot:
ax_sigma.grid(grid, which='both')
ax_sigma.set_ylabel(
"Singular Values (dB)" if dB else "Singular Values")
ax_sigma.set_xlabel("Frequency (Hz)" if Hz else "Frequency (rad/sec)")
if len(syslist) == 1:
return sigmas[0], omegas[0]
else:
return sigmas, omegas
#
# Utility functions
#
# This section of the code contains some utility functions for
# generating frequency domain plots
#
# Determine the frequency range to be used
def _determine_omega_vector(syslist, omega_in, omega_limits, omega_num,
Hz=None, feature_periphery_decades=None):
"""Determine the frequency range for a frequency-domain plot
according to a standard logic.
If omega_in and omega_limits are both None, then omega_out is computed
on omega_num points according to a default logic defined by
_default_frequency_range and tailored for the list of systems syslist, and
omega_range_given is set to False.
If omega_in is None but omega_limits is an array-like of 2 elements, then
omega_out is computed with the function np.logspace on omega_num points
within the interval [min, max] = [omega_limits[0], omega_limits[1]], and
omega_range_given is set to True.
If omega_in is not None, then omega_out is set to omega_in,
and omega_range_given is set to True
Parameters
----------
syslist : list of LTI
List of linear input/output systems (single system is OK)
omega_in : 1D array_like or None
Frequency range specified by the user
omega_limits : 1D array_like or None
Frequency limits specified by the user
omega_num : int
Number of points to be used for the frequency
range (if the frequency range is not user-specified)
Hz : bool, optional
If True, the limits (first and last value) of the frequencies
are set to full decades in Hz so it fits plotting with logarithmic
scale in Hz otherwise in rad/s. Omega is always returned in rad/sec.
Returns
-------
omega_out : 1D array
Frequency range to be used
omega_range_given : bool
True if the frequency range was specified by the user, either through
omega_in or through omega_limits. False if both omega_in
and omega_limits are None.
"""
omega_range_given = True
if omega_in is None:
if omega_limits is None:
omega_range_given = False
# Select a default range if none is provided
omega_out = _default_frequency_range(
syslist, number_of_samples=omega_num, Hz=Hz,
feature_periphery_decades=feature_periphery_decades)
else:
omega_limits = np.asarray(omega_limits)
if len(omega_limits) != 2:
raise ValueError("len(omega_limits) must be 2")
omega_out = np.logspace(np.log10(omega_limits[0]),
np.log10(omega_limits[1]),
num=omega_num, endpoint=True)
else:
omega_out = np.copy(omega_in)
return omega_out, omega_range_given
# Compute reasonable defaults for axes
def _default_frequency_range(syslist, Hz=None, number_of_samples=None,
feature_periphery_decades=None):
"""Compute a default frequency range for frequency domain plots.
This code looks at the poles and zeros of all of the systems that
we are plotting and sets the frequency range to be one decade above
and below the min and max feature frequencies, rounded to the nearest
integer. If no features are found, it returns logspace(-1, 1)
Parameters
----------
syslist : list of LTI
List of linear input/output systems (single system is OK)
Hz : bool, optional
If True, the limits (first and last value) of the frequencies
are set to full decades in Hz so it fits plotting with logarithmic
scale in Hz otherwise in rad/s. Omega is always returned in rad/sec.
number_of_samples : int, optional
Number of samples to generate. The default value is read from
``config.defaults['freqplot.number_of_samples']. If None, then the
default from `numpy.logspace` is used.
feature_periphery_decades : float, optional
Defines how many decades shall be included in the frequency range on
both sides of features (poles, zeros). The default value is read from
``config.defaults['freqplot.feature_periphery_decades']``.
Returns
-------
omega : array
Range of frequencies in rad/sec
Examples
--------
>>> from matlab import ss
>>> sys = ss("1. -2; 3. -4", "5.; 7", "6. 8", "9.")
>>> omega = _default_frequency_range(sys)
"""
# Set default values for options
number_of_samples = config._get_param(
'freqplot', 'number_of_samples', number_of_samples)
feature_periphery_decades = config._get_param(
'freqplot', 'feature_periphery_decades', feature_periphery_decades, 1)
# Find the list of all poles and zeros in the systems
features = np.array(())
freq_interesting = []
# detect if single sys passed by checking if it is sequence-like
if not hasattr(syslist, '__iter__'):
syslist = (syslist,)
for sys in syslist:
try:
# Add new features to the list
if sys.isctime():
features_ = np.concatenate(
(np.abs(sys.poles()), np.abs(sys.zeros())))
# Get rid of poles and zeros at the origin
toreplace = np.isclose(features_, 0.0)
if np.any(toreplace):
features_ = features_[~toreplace]
elif sys.isdtime(strict=True):
fn = math.pi * 1. / sys.dt
# TODO: What distance to the Nyquist frequency is appropriate?
freq_interesting.append(fn * 0.9)
features_ = np.concatenate((sys.poles(), sys.zeros()))
# Get rid of poles and zeros on the real axis (imag==0)
# * origin and real < 0
# * at 1.: would result in omega=0. (logaritmic plot!)
toreplace = np.isclose(features_.imag, 0.0) & (
(features_.real <= 0.) |
(np.abs(features_.real - 1.0) < 1.e-10))
if np.any(toreplace):
features_ = features_[~toreplace]
# TODO: improve
features_ = np.abs(np.log(features_) / (1.j * sys.dt))
else:
# TODO
raise NotImplementedError(
"type of system in not implemented now")
features = np.concatenate((features, features_))
except NotImplementedError:
pass
# Make sure there is at least one point in the range
if features.shape[0] == 0:
features = np.array([1.])
if Hz:
features /= 2. * math.pi
features = np.log10(features)
lsp_min = np.rint(np.min(features) - feature_periphery_decades)
lsp_max = np.rint(np.max(features) + feature_periphery_decades)
if Hz:
lsp_min += np.log10(2. * math.pi)
lsp_max += np.log10(2. * math.pi)
if freq_interesting:
lsp_min = min(lsp_min, np.log10(min(freq_interesting)))
lsp_max = max(lsp_max, np.log10(max(freq_interesting)))
# TODO: Add a check in discrete case to make sure we don't get aliasing
# (Attention: there is a list of system but only one omega vector)
# Set the range to be an order of magnitude beyond any features
if number_of_samples:
omega = np.logspace(
lsp_min, lsp_max, num=number_of_samples, endpoint=True)
else:
omega = np.logspace(lsp_min, lsp_max, endpoint=True)
return omega
#
# Utility functions to create nice looking labels (KLD 5/23/11)
#
def get_pow1000(num):
"""Determine exponent for which significand of a number is within the
range [1, 1000).
"""
# Based on algorithm from http://www.mail-archive.com/
# matplotlib-users@lists.sourceforge.net/msg14433.html, accessed 2010/11/7
# by Jason Heeris 2009/11/18
from decimal import Decimal
from math import floor
dnum = Decimal(str(num))
if dnum == 0:
return 0
elif dnum < 0:
dnum = -dnum
return int(floor(dnum.log10() / 3))
def gen_prefix(pow1000):
"""Return the SI prefix for a power of 1000.
"""
# Prefixes according to Table 5 of [BIPM 2006] (excluding hecto,
# deca, deci, and centi).
if pow1000 < -8 or pow1000 > 8:
raise ValueError(
"Value is out of the range covered by the SI prefixes.")
return ['Y', # yotta (10^24)
'Z', # zetta (10^21)
'E', # exa (10^18)
'P', # peta (10^15)
'T', # tera (10^12)
'G', # giga (10^9)
'M', # mega (10^6)
'k', # kilo (10^3)
'', # (10^0)
'm', # milli (10^-3)
r'$\mu$', # micro (10^-6)
'n', # nano (10^-9)
'p', # pico (10^-12)
'f', # femto (10^-15)
'a', # atto (10^-18)
'z', # zepto (10^-21)
'y'][8 - pow1000] # yocto (10^-24)
def find_nearest_omega(omega_list, omega):
omega_list = np.asarray(omega_list)
return omega_list[(np.abs(omega_list - omega)).argmin()]
# Function aliases
bode = bode_plot
nyquist = nyquist_plot
gangof4 = gangof4_plot
| {
"content_hash": "72633be98ef34752919fbd7ebba21b86",
"timestamp": "",
"source": "github",
"line_count": 1717,
"max_line_length": 80,
"avg_line_length": 40.77577169481654,
"alnum_prop": 0.5531337485002571,
"repo_name": "murrayrm/python-control",
"id": "05ae9da55fb6f33ef013cd05f645ff2166488729",
"size": "71883",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "control/freqplot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "126"
},
{
"name": "Python",
"bytes": "1562387"
}
],
"symlink_target": ""
} |
'''
Created by auto_sdk on 2015.09.11
'''
from top.api.base import RestApi
class OpenimTribeQuitRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.tribe_id = None
self.user = None
def getapiname(self):
return 'taobao.openim.tribe.quit'
| {
"content_hash": "981cd5db7fd0f7391c4e77cb36f3badd",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 55,
"avg_line_length": 26.833333333333332,
"alnum_prop": 0.6832298136645962,
"repo_name": "Akagi201/pycsc",
"id": "73e623e8faf318e48c6f3c7e0bfc4342bd31a0ab",
"size": "322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "top/api/rest/OpenimTribeQuitRequest.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63997"
}
],
"symlink_target": ""
} |
import pyglet
from pyglet.window import BaseWindow, WindowException
from pyglet.window import MouseCursor, DefaultMouseCursor
from pyglet.event import EventDispatcher
from pyglet.canvas.cocoa import CocoaCanvas
from pyglet.libs.darwin.cocoapy import *
from .systemcursor import SystemCursor
from .pyglet_delegate import PygletDelegate
from .pyglet_textview import PygletTextView
from .pyglet_window import PygletWindow, PygletToolWindow
from .pyglet_view import PygletView
NSApplication = ObjCClass('NSApplication')
NSCursor = ObjCClass('NSCursor')
NSAutoreleasePool = ObjCClass('NSAutoreleasePool')
NSColor = ObjCClass('NSColor')
NSEvent = ObjCClass('NSEvent')
NSImage = ObjCClass('NSImage')
class CocoaMouseCursor(MouseCursor):
drawable = False
def __init__(self, cursorName):
# cursorName is a string identifying one of the named default NSCursors
# e.g. 'pointingHandCursor', and can be sent as message to NSCursor
# class.
self.cursorName = cursorName
def set(self):
cursor = getattr(NSCursor, self.cursorName)()
cursor.set()
class CocoaWindow(BaseWindow):
# NSWindow instance.
_nswindow = None
# Delegate object.
_delegate = None
# Window properties
_minimum_size = None
_maximum_size = None
_is_mouse_exclusive = False
_mouse_platform_visible = True
_mouse_ignore_motion = False
_is_keyboard_exclusive = False
# Flag set during close() method.
_was_closed = False
# NSWindow style masks.
_style_masks = {
BaseWindow.WINDOW_STYLE_DEFAULT: NSTitledWindowMask |
NSClosableWindowMask |
NSMiniaturizableWindowMask,
BaseWindow.WINDOW_STYLE_DIALOG: NSTitledWindowMask |
NSClosableWindowMask,
BaseWindow.WINDOW_STYLE_TOOL: NSTitledWindowMask |
NSClosableWindowMask |
NSUtilityWindowMask,
BaseWindow.WINDOW_STYLE_BORDERLESS: NSBorderlessWindowMask,
}
def _recreate(self, changes):
if ('context' in changes):
self.context.set_current()
if 'fullscreen' in changes:
if not self._fullscreen: # leaving fullscreen
self.screen.release_display()
self._create()
def _create(self):
# Create a temporary autorelease pool for this method.
pool = NSAutoreleasePool.alloc().init()
if self._nswindow:
# The window is about the be recreated so destroy everything
# associated with the old window, then destroy the window itself.
nsview = self.canvas.nsview
self.canvas = None
self._nswindow.orderOut_(None)
self._nswindow.close()
self.context.detach()
self._nswindow.release()
self._nswindow = None
nsview.release()
self._delegate.release()
self._delegate = None
# Determine window parameters.
content_rect = NSMakeRect(0, 0, self._width, self._height)
WindowClass = PygletWindow
if self._fullscreen:
style_mask = NSBorderlessWindowMask
else:
if self._style not in self._style_masks:
self._style = self.WINDOW_STYLE_DEFAULT
style_mask = self._style_masks[self._style]
if self._resizable:
style_mask |= NSResizableWindowMask
if self._style == BaseWindow.WINDOW_STYLE_TOOL:
WindowClass = PygletToolWindow
# First create an instance of our NSWindow subclass.
# FIX ME:
# Need to use this initializer to have any hope of multi-monitor support.
# But currently causes problems on Mac OS X Lion. So for now, we initialize the
# window without including screen information.
#
# self._nswindow = WindowClass.alloc().initWithContentRect_styleMask_backing_defer_screen_(
# content_rect, # contentRect
# style_mask, # styleMask
# NSBackingStoreBuffered, # backing
# False, # defer
# self.screen.get_nsscreen()) # screen
self._nswindow = WindowClass.alloc().initWithContentRect_styleMask_backing_defer_(
content_rect, # contentRect
style_mask, # styleMask
NSBackingStoreBuffered, # backing
False) # defer
if self._fullscreen:
# BUG: I suspect that this doesn't do the right thing when using
# multiple monitors (which would be to go fullscreen on the monitor
# where the window is located). However I've no way to test.
blackColor = NSColor.blackColor()
self._nswindow.setBackgroundColor_(blackColor)
self._nswindow.setOpaque_(True)
self.screen.capture_display()
self._nswindow.setLevel_(quartz.CGShieldingWindowLevel())
self.context.set_full_screen()
self._center_window()
self._mouse_in_window = True
else:
self._set_nice_window_location()
self._mouse_in_window = self._mouse_in_content_rect()
# Then create a view and set it as our NSWindow's content view.
self._nsview = PygletView.alloc().initWithFrame_cocoaWindow_(
content_rect, self)
self._nswindow.setContentView_(self._nsview)
self._nswindow.makeFirstResponder_(self._nsview)
# Create a canvas with the view as its drawable and attach context to
# it.
self.canvas = CocoaCanvas(self.display, self.screen, self._nsview)
self.context.attach(self.canvas)
# Configure the window.
self._nswindow.setAcceptsMouseMovedEvents_(True)
self._nswindow.setReleasedWhenClosed_(False)
self._nswindow.useOptimizedDrawing_(True)
self._nswindow.setPreservesContentDuringLiveResize_(False)
# Set the delegate.
self._delegate = PygletDelegate.alloc().initWithWindow_(self)
# Configure CocoaWindow.
self.set_caption(self._caption)
if self._minimum_size is not None:
self.set_minimum_size(*self._minimum_size)
if self._maximum_size is not None:
self.set_maximum_size(*self._maximum_size)
self.context.update_geometry()
self.switch_to()
self.set_vsync(self._vsync)
self.set_visible(self._visible)
pool.drain()
def _set_nice_window_location(self):
# Construct a list of all visible windows that aren't us.
visible_windows = [win for win in pyglet.app.windows if
win is not self and
win._nswindow and
win._nswindow.isVisible()]
# If there aren't any visible windows, then center this window.
if not visible_windows:
self._center_window()
# Otherwise, cascade from last window in list.
else:
point = visible_windows[-
1]._nswindow.cascadeTopLeftFromPoint_(NSZeroPoint)
self._nswindow.cascadeTopLeftFromPoint_(point)
def _center_window(self):
# [NSWindow center] does not move the window to a true center position
# and also always moves the window to the main display.
x = self.screen.x + int((self.screen.width - self._width) / 2)
y = self.screen.y + int((self.screen.height - self._height) / 2)
self._nswindow.setFrameOrigin_(NSPoint(x, y))
def close(self):
# If we've already gone through this once, don't do it again.
if self._was_closed:
return
# Create a temporary autorelease pool for this method.
pool = NSAutoreleasePool.new()
# Restore cursor visibility
self.set_mouse_platform_visible(True)
self.set_exclusive_mouse(False)
self.set_exclusive_keyboard(False)
# Remove the delegate object
if self._delegate:
self._nswindow.setDelegate_(None)
self._delegate.release()
self._delegate = None
# Remove window from display and remove its view.
if self._nswindow:
self._nswindow.orderOut_(None)
self._nswindow.setContentView_(None)
self._nswindow.close()
# Restore screen mode. This also releases the display
# if it was captured for fullscreen mode.
self.screen.restore_mode()
# Remove view from canvas and then remove canvas.
if self.canvas:
self.canvas.nsview.release()
self.canvas.nsview = None
self.canvas = None
# Do this last, so that we don't see white flash
# when exiting application from fullscreen mode.
super().close()
self._was_closed = True
pool.drain()
def switch_to(self):
if self.context:
self.context.set_current()
def flip(self):
self.draw_mouse_cursor()
if self.context:
self.context.flip()
def dispatch_events(self):
self._allow_dispatch_event = True
# Process all pyglet events.
self.dispatch_pending_events()
event = True
# Dequeue and process all of the pending Cocoa events.
pool = NSAutoreleasePool.new()
NSApp = NSApplication.sharedApplication()
while event and self._nswindow and self._context:
event = NSApp.nextEventMatchingMask_untilDate_inMode_dequeue_(
NSAnyEventMask, None, NSEventTrackingRunLoopMode, True)
if event:
event_type = event.type()
# Pass on all events.
NSApp.sendEvent_(event)
# And resend key events to special handlers.
if event_type == NSKeyDown and not event.isARepeat():
NSApp.sendAction_to_from_(
get_selector('pygletKeyDown:'), None, event)
elif event_type == NSKeyUp:
NSApp.sendAction_to_from_(
get_selector('pygletKeyUp:'), None, event)
elif event_type == NSFlagsChanged:
NSApp.sendAction_to_from_(
get_selector('pygletFlagsChanged:'), None, event)
NSApp.updateWindows()
pool.drain()
self._allow_dispatch_event = False
def dispatch_pending_events(self):
while self._event_queue:
event = self._event_queue.pop(0)
EventDispatcher.dispatch_event(self, *event)
def set_caption(self, caption):
self._caption = caption
if self._nswindow is not None:
self._nswindow.setTitle_(get_NSString(caption))
def set_icon(self, *images):
# Only use the biggest image from the list.
max_image = images[0]
for img in images:
if img.width > max_image.width and img.height > max_image.height:
max_image = img
# Grab image data from pyglet image.
image = max_image.get_image_data()
format = 'ARGB'
bytesPerRow = len(format) * image.width
data = image.get_data(format, -bytesPerRow)
# Use image data to create a data provider.
# Using CGDataProviderCreateWithData crashes PyObjC 2.2b3, so we create
# a CFDataRef object first and use it to create the data provider.
cfdata = c_void_p(cf.CFDataCreate(None, data, len(data)))
provider = c_void_p(quartz.CGDataProviderCreateWithCFData(cfdata))
colorSpace = c_void_p(quartz.CGColorSpaceCreateDeviceRGB())
# Then create a CGImage from the provider.
cgimage = c_void_p(quartz.CGImageCreate(
image.width, image.height, 8, 32, bytesPerRow,
colorSpace,
kCGImageAlphaFirst,
provider,
None,
True,
kCGRenderingIntentDefault))
if not cgimage:
return
cf.CFRelease(cfdata)
quartz.CGDataProviderRelease(provider)
quartz.CGColorSpaceRelease(colorSpace)
# Turn the CGImage into an NSImage.
size = NSMakeSize(image.width, image.height)
nsimage = NSImage.alloc().initWithCGImage_size_(cgimage, size)
if not nsimage:
return
# And finally set the app icon.
NSApp = NSApplication.sharedApplication()
NSApp.setApplicationIconImage_(nsimage)
nsimage.release()
def get_location(self):
window_frame = self._nswindow.frame()
rect = self._nswindow.contentRectForFrameRect_(window_frame)
screen_frame = self._nswindow.screen().frame()
screen_width = int(screen_frame.size.width)
screen_height = int(screen_frame.size.height)
return int(rect.origin.x), int(screen_height - rect.origin.y - rect.size.height)
def set_location(self, x, y):
window_frame = self._nswindow.frame()
rect = self._nswindow.contentRectForFrameRect_(window_frame)
screen_frame = self._nswindow.screen().frame()
screen_width = int(screen_frame.size.width)
screen_height = int(screen_frame.size.height)
origin = NSPoint(x, screen_height - y - rect.size.height)
self._nswindow.setFrameOrigin_(origin)
def get_size(self):
window_frame = self._nswindow.frame()
rect = self._nswindow.contentRectForFrameRect_(window_frame)
return int(rect.size.width), int(rect.size.height)
def set_size(self, width, height):
if self._fullscreen:
raise WindowException('Cannot set size of fullscreen window.')
self._width = max(1, int(width))
self._height = max(1, int(height))
# Move frame origin down so that top-left corner of window doesn't
# move.
window_frame = self._nswindow.frame()
rect = self._nswindow.contentRectForFrameRect_(window_frame)
rect.origin.y += rect.size.height - self._height
rect.size.width = self._width
rect.size.height = self._height
new_frame = self._nswindow.frameRectForContentRect_(rect)
# The window background flashes when the frame size changes unless it's
# animated, but we can set the window's animationResizeTime to zero.
is_visible = self._nswindow.isVisible()
self._nswindow.setFrame_display_animate_(new_frame, True, is_visible)
def set_minimum_size(self, width, height):
self._minimum_size = NSSize(width, height)
if self._nswindow is not None:
self._nswindow.setContentMinSize_(self._minimum_size)
def set_maximum_size(self, width, height):
self._maximum_size = NSSize(width, height)
if self._nswindow is not None:
self._nswindow.setContentMaxSize_(self._maximum_size)
def activate(self):
if self._nswindow is not None:
NSApp = NSApplication.sharedApplication()
NSApp.activateIgnoringOtherApps_(True)
self._nswindow.makeKeyAndOrderFront_(None)
def set_visible(self, visible=True):
self._visible = visible
if self._nswindow is not None:
if visible:
# Not really sure why on_resize needs to be here,
# but it's what pyglet wants.
self.dispatch_event('on_resize', self._width, self._height)
self.dispatch_event('on_show')
self.dispatch_event('on_expose')
self._nswindow.makeKeyAndOrderFront_(None)
else:
self._nswindow.orderOut_(None)
def minimize(self):
self._mouse_in_window = False
if self._nswindow is not None:
self._nswindow.miniaturize_(None)
def maximize(self):
if self._nswindow is not None:
self._nswindow.zoom_(None)
def set_vsync(self, vsync):
if pyglet.options['vsync'] is not None:
vsync = pyglet.options['vsync']
self._vsync = vsync # _recreate depends on this
if self.context:
self.context.set_vsync(vsync)
def _mouse_in_content_rect(self):
# Returns true if mouse is inside the window's content rectangle.
# Better to use this method to check manually rather than relying
# on instance variables that may not be set correctly.
point = NSEvent.mouseLocation()
window_frame = self._nswindow.frame()
rect = self._nswindow.contentRectForFrameRect_(window_frame)
return foundation.NSMouseInRect(point, rect, False)
def set_mouse_platform_visible(self, platform_visible=None):
# When the platform_visible argument is supplied with a boolean, then this
# method simply sets whether or not the platform mouse cursor is
# visible.
if platform_visible is not None:
if platform_visible:
SystemCursor.unhide()
else:
SystemCursor.hide()
# But if it has been called without an argument, it turns into
# a completely different function. Now we are trying to figure out
# whether or not the mouse *should* be visible, and if so, what it should
# look like.
else:
# If we are in mouse exclusive mode, then hide the mouse cursor.
if self._is_mouse_exclusive:
SystemCursor.hide()
# If we aren't inside the window, then always show the mouse
# and make sure that it is the default cursor.
elif not self._mouse_in_content_rect():
NSCursor.arrowCursor().set()
SystemCursor.unhide()
# If we are in the window, then what we do depends on both
# the current pyglet-set visibility setting for the mouse and
# the type of the mouse cursor. If the cursor has been hidden
# in the window with set_mouse_visible() then don't show it.
elif not self._mouse_visible:
SystemCursor.hide()
# If the mouse is set as a system-defined cursor, then we
# need to set the cursor and show the mouse.
# *** FIX ME ***
elif isinstance(self._mouse_cursor, CocoaMouseCursor):
self._mouse_cursor.set()
SystemCursor.unhide()
# If the mouse cursor is drawable, then it we need to hide
# the system mouse cursor, so that the cursor can draw itself.
elif self._mouse_cursor.drawable:
SystemCursor.hide()
# Otherwise, show the default cursor.
else:
NSCursor.arrowCursor().set()
SystemCursor.unhide()
def get_system_mouse_cursor(self, name):
# It would make a lot more sense for most of this code to be
# inside the CocoaMouseCursor class, but all of the CURSOR_xxx
# constants are defined as properties of BaseWindow.
if name == self.CURSOR_DEFAULT:
return DefaultMouseCursor()
cursors = {
self.CURSOR_CROSSHAIR: 'crosshairCursor',
self.CURSOR_HAND: 'pointingHandCursor',
self.CURSOR_HELP: 'arrowCursor',
# Mac OS 10.6
self.CURSOR_NO: 'operationNotAllowedCursor',
self.CURSOR_SIZE: 'arrowCursor',
self.CURSOR_SIZE_UP: 'resizeUpCursor',
self.CURSOR_SIZE_UP_RIGHT: 'arrowCursor',
self.CURSOR_SIZE_RIGHT: 'resizeRightCursor',
self.CURSOR_SIZE_DOWN_RIGHT: 'arrowCursor',
self.CURSOR_SIZE_DOWN: 'resizeDownCursor',
self.CURSOR_SIZE_DOWN_LEFT: 'arrowCursor',
self.CURSOR_SIZE_LEFT: 'resizeLeftCursor',
self.CURSOR_SIZE_UP_LEFT: 'arrowCursor',
self.CURSOR_SIZE_UP_DOWN: 'resizeUpDownCursor',
self.CURSOR_SIZE_LEFT_RIGHT: 'resizeLeftRightCursor',
self.CURSOR_TEXT: 'IBeamCursor',
# No wristwatch cursor in Cocoa
self.CURSOR_WAIT: 'arrowCursor',
# No wristwatch cursor in Cocoa
self.CURSOR_WAIT_ARROW: 'arrowCursor',
}
if name not in cursors:
raise RuntimeError('Unknown cursor name "%s"' % name)
return CocoaMouseCursor(cursors[name])
def set_mouse_position(self, x, y, absolute=False):
if absolute:
# If absolute, then x, y is given in global display coordinates
# which sets (0,0) at top left corner of main display. It is possible
# to warp the mouse position to a point inside of another display.
quartz.CGWarpMouseCursorPosition(CGPoint(x, y))
else:
# Window-relative coordinates: (x, y) are given in window coords
# with (0,0) at bottom-left corner of window and y up. We find
# which display the window is in and then convert x, y into local
# display coords where (0,0) is now top-left of display and y down.
screenInfo = self._nswindow.screen().deviceDescription()
displayID = screenInfo.objectForKey_(
get_NSString('NSScreenNumber'))
displayID = displayID.intValue()
displayBounds = quartz.CGDisplayBounds(displayID)
frame = self._nswindow.frame()
windowOrigin = frame.origin
x += windowOrigin.x
y = displayBounds.size.height - windowOrigin.y - y
quartz.CGDisplayMoveCursorToPoint(displayID, NSPoint(x, y))
def set_exclusive_mouse(self, exclusive=True):
self._is_mouse_exclusive = exclusive
if exclusive:
# Skip the next motion event, which would return a large delta.
self._mouse_ignore_motion = True
# Move mouse to center of window.
frame = self._nswindow.frame()
width, height = frame.size.width, frame.size.height
self.set_mouse_position(width / 2, height / 2)
quartz.CGAssociateMouseAndMouseCursorPosition(False)
else:
quartz.CGAssociateMouseAndMouseCursorPosition(True)
# Update visibility of mouse cursor.
self.set_mouse_platform_visible()
def set_exclusive_keyboard(self, exclusive=True):
# http://developer.apple.com/mac/library/technotes/tn2002/tn2062.html
# http://developer.apple.com/library/mac/#technotes/KioskMode/
# BUG: System keys like F9 or command-tab are disabled, however
# pyglet also does not receive key press events for them.
# This flag is queried by window delegate to determine whether
# the quit menu item is active.
self._is_keyboard_exclusive = exclusive
if exclusive:
# "Be nice! Don't disable force-quit!"
# -- Patrick Swayze, Road House (1989)
options = NSApplicationPresentationHideDock | \
NSApplicationPresentationHideMenuBar | \
NSApplicationPresentationDisableProcessSwitching | \
NSApplicationPresentationDisableHideApplication
else:
options = NSApplicationPresentationDefault
NSApp = NSApplication.sharedApplication()
NSApp.setPresentationOptions_(options)
| {
"content_hash": "7e9f8f76ba510e6b8c01c7b31759dd40",
"timestamp": "",
"source": "github",
"line_count": 579,
"max_line_length": 99,
"avg_line_length": 40.46804835924007,
"alnum_prop": 0.6095770560368742,
"repo_name": "bitcraft/pyglet",
"id": "3d9342a9261218c892c50932130a673d7628931c",
"size": "25146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyglet/window/cocoa/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1828"
},
{
"name": "HTML",
"bytes": "1652"
},
{
"name": "JavaScript",
"bytes": "6745"
},
{
"name": "PHP",
"bytes": "2192"
},
{
"name": "Python",
"bytes": "6201398"
},
{
"name": "Shell",
"bytes": "251"
}
],
"symlink_target": ""
} |
"""
pygments.formatters.rtf
~~~~~~~~~~~~~~~~~~~~~~~
A formatter that generates RTF files.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_int_opt, _surrogatepair
__all__ = ['RtfFormatter']
class RtfFormatter(Formatter):
"""
Format tokens as RTF markup. This formatter automatically outputs full RTF
documents with color information and other useful stuff. Perfect for Copy and
Paste into Microsoft® Word® documents.
Please note that ``encoding`` and ``outencoding`` options are ignored.
The RTF format is ASCII natively, but handles unicode characters correctly
thanks to escape sequences.
.. versionadded:: 0.6
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`fontface`
The used font famliy, for example ``Bitstream Vera Sans``. Defaults to
some generic font which is supposed to have fixed width.
`fontsize`
Size of the font used. Size is specified in half points. The
default is 24 half-points, giving a size 12 font.
.. versionadded:: 2.0
"""
name = 'RTF'
aliases = ['rtf']
filenames = ['*.rtf']
def __init__(self, **options):
"""
Additional options accepted:
``fontface``
Name of the font used. Could for example be ``'Courier New'``
to further specify the default which is ``'\fmodern'``. The RTF
specification claims that ``\fmodern`` are "Fixed-pitch serif
and sans serif fonts". Hope every RTF implementation thinks
the same about modern...
"""
Formatter.__init__(self, **options)
self.fontface = options.get('fontface') or ''
self.fontsize = get_int_opt(options, 'fontsize', 0)
def _escape(self, text):
return text.replace(u'\\', u'\\\\') \
.replace(u'{', u'\\{') \
.replace(u'}', u'\\}')
def _escape_text(self, text):
# empty strings, should give a small performance improvment
if not text:
return u''
# escape text
text = self._escape(text)
buf = []
for c in text:
cn = ord(c)
if cn < (2**7):
# ASCII character
buf.append(str(c))
elif (2**7) <= cn < (2**16):
# single unicode escape sequence
buf.append(u'{\\u%d}' % cn)
elif (2**16) <= cn:
# RTF limits unicode to 16 bits.
# Force surrogate pairs
buf.append(u'{\\u%d}{\\u%d}' % _surrogatepair(cn))
return u''.join(buf).replace(u'\n', u'\\par\n')
def format_unencoded(self, tokensource, outfile):
# rtf 1.8 header
outfile.write(u'{\\rtf1\\ansi\\uc0\\deff0'
u'{\\fonttbl{\\f0\\fmodern\\fprq1\\fcharset0%s;}}'
u'{\\colortbl;' % (self.fontface and
u' ' + self._escape(self.fontface) or
u''))
# convert colors and save them in a mapping to access them later.
color_mapping = {}
offset = 1
for _, style in self.style:
for color in style['color'], style['bgcolor'], style['border']:
if color and color not in color_mapping:
color_mapping[color] = offset
outfile.write(u'\\red%d\\green%d\\blue%d;' % (
int(color[0:2], 16),
int(color[2:4], 16),
int(color[4:6], 16)
))
offset += 1
outfile.write(u'}\\f0 ')
if self.fontsize:
outfile.write(u'\\fs%d' % (self.fontsize))
# highlight stream
for ttype, value in tokensource:
while not self.style.styles_token(ttype) and ttype.parent:
ttype = ttype.parent
style = self.style.style_for_token(ttype)
buf = []
if style['bgcolor']:
buf.append(u'\\cb%d' % color_mapping[style['bgcolor']])
if style['color']:
buf.append(u'\\cf%d' % color_mapping[style['color']])
if style['bold']:
buf.append(u'\\b')
if style['italic']:
buf.append(u'\\i')
if style['underline']:
buf.append(u'\\ul')
if style['border']:
buf.append(u'\\chbrdr\\chcfpat%d' %
color_mapping[style['border']])
start = u''.join(buf)
if start:
outfile.write(u'{%s ' % start)
outfile.write(self._escape_text(value))
if start:
outfile.write(u'}')
outfile.write(u'}')
| {
"content_hash": "2ffb8ac8f2d64088cbb50ba1122dc1f5",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 81,
"avg_line_length": 34.38356164383562,
"alnum_prop": 0.5135458167330678,
"repo_name": "spencerlyon2/pygments",
"id": "abecd4847b0024863481d3a31332de8fe3d58f9a",
"size": "5046",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pygments/formatters/rtf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "APL",
"bytes": "587"
},
{
"name": "ASP",
"bytes": "636"
},
{
"name": "ActionScript",
"bytes": "5686"
},
{
"name": "Ada",
"bytes": "5145"
},
{
"name": "Agda",
"bytes": "3018"
},
{
"name": "Alloy",
"bytes": "6579"
},
{
"name": "AppleScript",
"bytes": "421"
},
{
"name": "Assembly",
"bytes": "3294"
},
{
"name": "AutoHotkey",
"bytes": "3733"
},
{
"name": "AutoIt",
"bytes": "692"
},
{
"name": "Awk",
"bytes": "4528"
},
{
"name": "BlitzBasic",
"bytes": "1824"
},
{
"name": "BlitzMax",
"bytes": "2387"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "Bro",
"bytes": "7337"
},
{
"name": "C",
"bytes": "109073"
},
{
"name": "C#",
"bytes": "17784"
},
{
"name": "C++",
"bytes": "79372"
},
{
"name": "COBOL",
"bytes": "117432"
},
{
"name": "CSS",
"bytes": "14802"
},
{
"name": "Ceylon",
"bytes": "1387"
},
{
"name": "Chapel",
"bytes": "4366"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clean",
"bytes": "2878"
},
{
"name": "Clojure",
"bytes": "23871"
},
{
"name": "CoffeeScript",
"bytes": "20149"
},
{
"name": "ColdFusion",
"bytes": "9263"
},
{
"name": "Common Lisp",
"bytes": "91743"
},
{
"name": "Coq",
"bytes": "66"
},
{
"name": "Cuda",
"bytes": "776"
},
{
"name": "D",
"bytes": "5475"
},
{
"name": "Dart",
"bytes": "591"
},
{
"name": "Dylan",
"bytes": "6343"
},
{
"name": "Ecl",
"bytes": "2599"
},
{
"name": "Eiffel",
"bytes": "2145"
},
{
"name": "Elixir",
"bytes": "4340"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "F#",
"bytes": "19734"
},
{
"name": "FORTRAN",
"bytes": "27879"
},
{
"name": "Factor",
"bytes": "10194"
},
{
"name": "Fancy",
"bytes": "2581"
},
{
"name": "Fantom",
"bytes": "25331"
},
{
"name": "GAP",
"bytes": "15760"
},
{
"name": "Gnuplot",
"bytes": "10376"
},
{
"name": "Go",
"bytes": "172"
},
{
"name": "Gosu",
"bytes": "2853"
},
{
"name": "Groovy",
"bytes": "2586"
},
{
"name": "Haskell",
"bytes": "49530"
},
{
"name": "Haxe",
"bytes": "16812"
},
{
"name": "Hy",
"bytes": "7237"
},
{
"name": "IDL",
"bytes": "2098"
},
{
"name": "Idris",
"bytes": "2771"
},
{
"name": "Inform 7",
"bytes": "2046"
},
{
"name": "Ioke",
"bytes": "469"
},
{
"name": "Isabelle",
"bytes": "21392"
},
{
"name": "Java",
"bytes": "81611"
},
{
"name": "JavaScript",
"bytes": "1382"
},
{
"name": "Julia",
"bytes": "27687"
},
{
"name": "Kotlin",
"bytes": "971"
},
{
"name": "LSL",
"bytes": "160"
},
{
"name": "Lasso",
"bytes": "18650"
},
{
"name": "LiveScript",
"bytes": "972"
},
{
"name": "Logos",
"bytes": "306"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Monkey",
"bytes": "2587"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "MoonScript",
"bytes": "14862"
},
{
"name": "Nemerle",
"bytes": "1517"
},
{
"name": "Nimrod",
"bytes": "37191"
},
{
"name": "Nit",
"bytes": "55581"
},
{
"name": "Nix",
"bytes": "2448"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "3385"
},
{
"name": "Objective-J",
"bytes": "24768"
},
{
"name": "Opa",
"bytes": "172"
},
{
"name": "OpenEdge ABL",
"bytes": "318"
},
{
"name": "PAWN",
"bytes": "6555"
},
{
"name": "PHP",
"bytes": "17296"
},
{
"name": "Pan",
"bytes": "1241"
},
{
"name": "Pascal",
"bytes": "84519"
},
{
"name": "Perl6",
"bytes": "4692"
},
{
"name": "PigLatin",
"bytes": "6657"
},
{
"name": "Pike",
"bytes": "8479"
},
{
"name": "PowerShell",
"bytes": "6127"
},
{
"name": "Prolog",
"bytes": "738"
},
{
"name": "Puppet",
"bytes": "130"
},
{
"name": "Python",
"bytes": "2837637"
},
{
"name": "R",
"bytes": "4057"
},
{
"name": "Racket",
"bytes": "11341"
},
{
"name": "Rebol",
"bytes": "1887"
},
{
"name": "Red",
"bytes": "10792"
},
{
"name": "Ruby",
"bytes": "91403"
},
{
"name": "Rust",
"bytes": "6787"
},
{
"name": "Scala",
"bytes": "327"
},
{
"name": "Scheme",
"bytes": "47137"
},
{
"name": "Scilab",
"bytes": "943"
},
{
"name": "Shell",
"bytes": "118660"
},
{
"name": "ShellSession",
"bytes": "320"
},
{
"name": "Smalltalk",
"bytes": "156665"
},
{
"name": "SourcePawn",
"bytes": "130"
},
{
"name": "Standard ML",
"bytes": "36869"
},
{
"name": "Swift",
"bytes": "2035"
},
{
"name": "SystemVerilog",
"bytes": "265"
},
{
"name": "TypeScript",
"bytes": "535"
},
{
"name": "VHDL",
"bytes": "4446"
},
{
"name": "VimL",
"bytes": "16922"
},
{
"name": "Visual Basic",
"bytes": "17210"
},
{
"name": "XQuery",
"bytes": "4289"
},
{
"name": "XSLT",
"bytes": "755"
},
{
"name": "Xtend",
"bytes": "727"
},
{
"name": "Zephir",
"bytes": "485"
},
{
"name": "eC",
"bytes": "26388"
},
{
"name": "nesC",
"bytes": "23697"
},
{
"name": "xBase",
"bytes": "3349"
}
],
"symlink_target": ""
} |
import os
from glob import glob
from utils import *
# Return all images of one slice of a brain (seperated)
def get_img_row(overall_img, img_size, i):
from_ = i * img_size
to = from_ + img_size
img_row = overall_img[from_:to]
return np.array(np.split(img_row, 10, axis=1))
# Output array has the shape [batch_size, dimensions, width, height]
def prepare_images(file, batch_size = 16):
channels = 10 # 10 dimensions for each slice of a brain
input_img = np.array(scipy.misc.imread(file, flatten=True)) # (2048, 1280)
img_size = int(input_img.shape[1] / channels)
slices = int(input_img.shape[0] / img_size)
output = np.empty((slices, channels, img_size, img_size))
for i in range(16):
output[i] = get_img_row(input_img, img_size, i)
return output
def example_metric(source, target):
# Image of one tumor region
return np.abs(source.sum() - target.sum())
def execute_metrics(images, metrics=[example_metric]):
# For each image, metric and tumor region
results = np.empty((images.shape[0], len(metrics), 3))
for i, row in enumerate(images):
gt = row[:3]
predicted = row[7:]
for j, metric in enumerate(metrics):
for k, gt_region, pred_region in zip(range(3), gt, predicted):
results[i, j, k] = metric(gt_region, pred_region)
# TODO: Later plot the image with a table containing the metrics (for presentation)
return results
if __name__ == '__main__':
images = prepare_images(file='test-x/test_0118.png')
metrics = execute_metrics(images)
# slices(rows) x metrics x 3(tumor regions)
print(metrics)
| {
"content_hash": "5244a61b551d8d8bb21700349d02f97f",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 91,
"avg_line_length": 37.56818181818182,
"alnum_prop": 0.64670296430732,
"repo_name": "ultra-lstm/RNA-GAN",
"id": "c7fc62c3cbdbe59759cfad4522a142b3bc8a26dd",
"size": "1653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cGAN/metrics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41425"
}
],
"symlink_target": ""
} |
sum_of_answered_yes = 0
with open("input.txt", "r") as f:
lines = [line.rstrip() for line in f.readlines()]
answered_yes_group = []
for line in lines:
#print(line)
if line == "":
sum_of_answered_yes += len(set(answered_yes_group))
answered_yes_group = []
continue
for item in list(line):
answered_yes_group.append(item)
sum_of_answered_yes += len(set(answered_yes_group))
answered_yes_group = []
print(f"\nWhat is the sum of those counts? {sum_of_answered_yes}")
| {
"content_hash": "3e0593fa881778be826564b3b1749fcc",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 66,
"avg_line_length": 29.42105263157895,
"alnum_prop": 0.5778175313059034,
"repo_name": "neiesc/Problem-solving",
"id": "d31cb22a56fc949a332e3eb055ac17bcbaa6e6cc",
"size": "559",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "AdventOfCode/2020/day6/day6-pt1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "42048"
},
{
"name": "Elixir",
"bytes": "7313"
},
{
"name": "JavaScript",
"bytes": "1052"
},
{
"name": "Python",
"bytes": "58944"
},
{
"name": "Scala",
"bytes": "333"
},
{
"name": "Shell",
"bytes": "317"
}
],
"symlink_target": ""
} |
from ducktape.mark import matrix
from ducktape.mark.resource import cluster
from kafkatest.tests.kafka_test import KafkaTest
from kafkatest.services.kafka import quorum
from kafkatest.services.streams import StreamsSmokeTestShutdownDeadlockService
class StreamsShutdownDeadlockTest(KafkaTest):
"""
Simple test of Kafka Streams.
"""
def __init__(self, test_context):
super(StreamsShutdownDeadlockTest, self).__init__(test_context, num_zk=1, num_brokers=1, topics={
'source' : { 'partitions': 1, 'replication-factor': 1 }
})
self.driver = StreamsSmokeTestShutdownDeadlockService(test_context, self.kafka)
@cluster(num_nodes=3)
@matrix(metadata_quorum=[quorum.remote_kraft])
def test_shutdown_wont_deadlock(self, metadata_quorum):
"""
Start ShutdownDeadLockTest, wait for upt to 1 minute, and check that the process exited.
If it hasn't exited then fail as it is deadlocked
"""
self.driver.start()
self.driver.wait(timeout_sec=60)
self.driver.stop_nodes(clean_shutdown=False)
self.driver.stop()
| {
"content_hash": "a29699bd77cf4e05a163e0b70574040c",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 105,
"avg_line_length": 32.285714285714285,
"alnum_prop": 0.6938053097345133,
"repo_name": "apache/kafka",
"id": "3da5b481da9ffbe18ad11dfc13a0b91c336457ba",
"size": "1911",
"binary": false,
"copies": "3",
"ref": "refs/heads/trunk",
"path": "tests/kafkatest/tests/streams/streams_shutdown_deadlock_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "32930"
},
{
"name": "Dockerfile",
"bytes": "9184"
},
{
"name": "HTML",
"bytes": "3739"
},
{
"name": "Java",
"bytes": "33868408"
},
{
"name": "Python",
"bytes": "1153808"
},
{
"name": "Roff",
"bytes": "39396"
},
{
"name": "Scala",
"bytes": "10004229"
},
{
"name": "Shell",
"bytes": "107622"
},
{
"name": "XSLT",
"bytes": "7116"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0017_remove_userprofile_theme'),
('news_event', '0007_auto_20160730_1343'),
]
operations = [
migrations.AddField(
model_name='news',
name='posted_by',
field=models.ForeignKey(to='login.UserProfile', null=True),
),
]
| {
"content_hash": "15e968a13e5433a590675f2b3d7b5801",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 71,
"avg_line_length": 23.842105263157894,
"alnum_prop": 0.5960264900662252,
"repo_name": "BuildmLearn/University-Campus-Portal-UCP",
"id": "e909e84a3d8dd5a4911a7d3ed7b99d3ec8833adb",
"size": "477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UCP/news_event/migrations/0008_news_posted_by.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "91894"
},
{
"name": "HTML",
"bytes": "99158"
},
{
"name": "JavaScript",
"bytes": "1069551"
},
{
"name": "Python",
"bytes": "137868"
}
],
"symlink_target": ""
} |
import json
import time
import telnetlib
from fabric.api import *
from fabric.colors import *
from fabric.contrib.console import *
from fabric.contrib.files import *
import config
cm_conn = None
class Output:
def __init__(self):
self.cm = True
def init():
for i in range(3):
if connect() == True:
return True
time.sleep(1)
return False
def connect():
global cm_conn
# Ping Check
try:
cm_conn = telnetlib.Telnet(config.CONF_MASTER_IP, config.CONF_MASTER_PORT)
cm_conn.write('cluster_ls\r\n')
ret = cm_conn.read_until('\r\n', 1)
ret = json.loads(ret)
if 'redirect' == ret['state']:
config.CONF_MASTER_IP = ret['data']['ip']
config.CONF_MASTER_PORT = int(ret['data']['port'])
return False
elif 'success' != ret['state']:
warn(red('Can not connect to Conf master. Aborting...'))
return False
except:
warn(red('Can not connect to Conf master. Aborting...'))
return False
return True
def check_connection():
if ping() == False:
return reconnect()
else:
return True
def ping():
try:
cm_conn.write('ping\r\n')
ret = cm_conn.read_until('\r\n', 3)
json_data = json.loads(ret)
if json_data['state'] != 'success' or json_data['msg'] != '+PONG':
return False
except:
return False
return True
def reconnect():
global cm_conn
if cm_conn != None:
cm_conn.close()
for i in range(3):
if connect() == True:
return True
time.sleep(1)
return False
def pg_list(cluster_name):
out = Output()
out.json = None
if check_connection() == False:
out.cm = False
return out
out_cluster_info = cluster_info(cluster_name)
if out_cluster_info.cm == False:
out.cm = False
return out
cluster_json = out_cluster_info.json
if cluster_json['state'] != 'success':
warn(red("Cluster '%s' doesn't exist." % cluster_name))
return out
slot_map = slot_rle_to_map(cluster_json['data']['cluster_info']['PN_PG_Map'])
cmd = ''
for pg in cluster_json['data']['pg_list']:
pg_id = pg['pg_id'].encode('ascii')
cmd = cmd + 'pg_info %s %s\r\n' % (cluster_name, pg_id)
cm_conn.write(cmd)
pg_list = []
for pg in cluster_json['data']['pg_list']:
pg_id = pg['pg_id'].encode('ascii')
reply = cm_conn.read_until('\r\n', 1)
pg_json = pgstr_to_json(int(pg_id), reply, slot_map)
pg_list.append(pg_json)
out.json = sorted(pg_list, key=lambda x: int(x['pg_id']))
return out
def slot_rle_to_map(pn_pg_map):
slot_info = pn_pg_map.split(' ')
i = 0
slot_no = 0
slot_map = {}
while i < len(slot_info):
pg_id = int(slot_info[i])
slot_len = int(slot_info[i+1])
i = i + 2
if pg_id not in slot_map.keys():
slot_map[pg_id] = []
slot_map[pg_id].append('%s:%s' % (slot_no, slot_no + slot_len - 1))
slot_no = slot_no + slot_len
for k, slot in slot_map.items():
slot_map[k] = ' '.join(slot)
return slot_map
# On success, get_pgs_list() returns map of pgs; on error, it returns None.
def get_pgs_list(cluster_name, pg_id):
out = Output()
out.json = None
if check_connection() == False:
out.cm = False
return out
# Get cluster info from Conf Master
out_cluster_info = cluster_info(cluster_name)
if out_cluster_info.cm == False:
out.cm = False
return out
if out_cluster_info.json == None:
return None
cluster_json_data = out_cluster_info.json
exist = [k for loop_pg_data in cluster_json_data ['data']['pg_list']
for k, v in loop_pg_data.iteritems() if k == 'pg_id' and int(v) == pg_id]
if len(exist) == 0:
warn(red("PG '%d' doesn't exist." % pg_id))
return None
pgs_list = {}
# Get PG and PGS info from Json
for pg_data in cluster_json_data['data']['pg_list']:
cur_pg_id = int(pg_data['pg_id'])
if cur_pg_id != pg_id:
continue
for pgs_id in pg_data['pg_data']['pgs_ID_List']:
out_pgs_info = pgs_info(cluster_name, pgs_id)
if out_pgs_info.cm == False:
out.cm = False
return out
if out_pgs_info.json == None:
warn(red("PGS '%s' doesn't exist." % pgs_id))
return None
pgs_list[pgs_id] = out_pgs_info.json['data']
out.json = pgs_list
return out
def pgs_info(cluster_name, pgs_id):
out = Output()
out.json = None
if check_connection() == False:
out.cm = False
return out
cm_conn.write('pgs_info %s %d\r\n' % (cluster_name, pgs_id))
ret = cm_conn.read_until('\r\n', 1)
json_data = json.loads(ret)
if json_data['state'] != 'success':
return out
json_data['data']['pgs_id'] = pgs_id
json_data['data']['pg_id'] = json_data['data']['pg_ID']
json_data['data']['ip'] = json_data['data']['pm_IP']
json_data['data']['redis_port'] = json_data['data']['backend_Port_Of_Redis']
json_data['data']['smr_base_port'] = json_data['data']['replicator_Port_Of_SMR']
json_data['data']['mgmt_port'] = json_data['data']['management_Port_Of_SMR']
json_data['data']['smr_role'] = json_data['data']['smr_Role'].encode('ascii')
json_data['data']['hb'] = json_data['data']['hb']
out.json = json_data
return out
def pgstr_to_json(pg_id, json_str, slot_map):
json_data = json.loads(json_str)
if json_data['state'] != 'success':
warn(red("PG '%d' doesn't exist." % pg_id))
return None
pg_json = json_data['data']
pg_json['pg_id'] = pg_id
if slot_map.get(pg_id) == None:
pg_json['slot'] = ''
else:
pg_json['slot'] = slot_map[pg_id]
pg_json['master_Gen'] = max(int(k) for k in pg_json['master_Gen_Map']) + 1
return pg_json
def cluster_ls():
out = Output()
out.json = None
if check_connection() == False:
out.cm = False
return None
cm_conn.write('cluster_ls\r\n')
ret = cm_conn.read_until('\r\n', 1)
json_data = json.loads(ret)
if json_data['state'] != 'success':
warn(red("cluster_ls fail. reply:%s" % ret))
return out
out.json = json_data
return out
def cluster_info(cluster_name):
out = Output()
out.json = None
if check_connection() == False:
out.cm = False
return out
cm_conn.write('cluster_info %s\r\n' % cluster_name)
ret = cm_conn.read_until('\r\n', 1)
json_data = json.loads(ret)
if json_data['state'] != 'success':
warn(red("Cluster '%s' doesn't exist." % cluster_name))
return out
out.json = json_data
return out
# return list of appdata
def appdata_get(cluster_name):
out = Output()
out.appdata_list = None
if check_connection() == False:
out.cm = False
return out
try:
cmd = 'appdata_get %s backup all\r\n' % cluster_name
cm_conn.write(cmd)
appdata_list = cm_conn.read_until('\r\n')
appdata_list = json.loads(appdata_list)
if appdata_list['state'] != 'success':
return out
appdata_list = appdata_list['data']
for appdata in appdata_list:
appdata['name'] = '%s_%d' % (cluster_name, appdata['backup_id'])
appdata['cluster_name'] = cluster_name
appdata['type'] = appdata['type'].encode('ascii')
appdata['period'] = appdata['period'].encode('ascii')
appdata['holding_period'] = int(appdata['holding_period'].encode('ascii'))
appdata['base_time'] = appdata['base_time'].encode('ascii')
appdata['net_limit'] = int(appdata['net_limit'].encode('ascii'))
appdata['output_format'] = appdata['output_format'].encode('ascii')
appdata['service_url'] = appdata['service_url'].encode('ascii')
out.appdata_list = appdata_list
return out
except IOError as e:
warn(red(e))
out.cm = False
return out
| {
"content_hash": "c6b57f49ee40c64e18dbee6cbaf3208f",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 93,
"avg_line_length": 27.945945945945947,
"alnum_prop": 0.5530705996131529,
"repo_name": "otheng03/nbase-arc",
"id": "94f95b668fa71c231e198bedb578452d29d08540",
"size": "8855",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/batch_daemon/cm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1151"
},
{
"name": "C",
"bytes": "6437039"
},
{
"name": "C++",
"bytes": "394336"
},
{
"name": "CSS",
"bytes": "1647"
},
{
"name": "HTML",
"bytes": "416894"
},
{
"name": "Java",
"bytes": "3113775"
},
{
"name": "Lua",
"bytes": "11887"
},
{
"name": "M4",
"bytes": "71933"
},
{
"name": "Makefile",
"bytes": "190328"
},
{
"name": "Objective-C",
"bytes": "24462"
},
{
"name": "Perl",
"bytes": "175311"
},
{
"name": "Python",
"bytes": "1215722"
},
{
"name": "Roff",
"bytes": "937514"
},
{
"name": "Ruby",
"bytes": "71136"
},
{
"name": "Shell",
"bytes": "277781"
},
{
"name": "Smarty",
"bytes": "1047"
},
{
"name": "Tcl",
"bytes": "471765"
},
{
"name": "XSLT",
"bytes": "303"
}
],
"symlink_target": ""
} |
"""
Connect dialog for entering the url, username, password of the WebDAV Server.
"""
from PyQt4 import QtCore
from PyQt4.QtGui import QDialog
from datafinder.gui.gen.user.authentification_connect_dialog_ui import Ui_AuthConnectDialog
from datafinder.gui.user.dialogs.authentification_dialog.auth_pref_dialog import AuthPrefDialogView
from datafinder.gui.user.dialogs.authentification_dialog.auth_edit_dialog import AuthEditDialogView
__version__ = "$Revision-Id:$"
class AuthConnectDialogView(QDialog, Ui_AuthConnectDialog):
"""
The connection dialog is displayed when the datafinder has to establish a connection to
a webdav server or any other server needing authentification information.
This dialog contains a field for entering a url and authentification credentials such as username and password.
"""
def __init__(self, preferences=None, parent=None,):
"""
Constructor.
@param parent: Parent window of this L{QtGui.QDialog}
@type parent: C{QtGui.QWidget}
@param preferences: The preferences object.
@type preferences: L{PreferencesHandler<datafinder.core.configuration.preferences.PreferencesHandler>}
"""
QDialog.__init__(self, parent)
Ui_AuthConnectDialog.__init__(self)
self.setupUi(self)
self._preferences = preferences
self.connect(self.cancelButton, QtCore.SIGNAL("clicked()"), self.reject)
self.connect(self.connectButton, QtCore.SIGNAL("clicked()"), self.accept)
self.connect(self.urlComboBox, QtCore.SIGNAL("currentIndexChanged(const QString)"), self._urlChangedSlot)
self.connect(self.preferencesButton, QtCore.SIGNAL("clicked()"), self._preferencesActionSlot)
self.connect(self.editButton, QtCore.SIGNAL("clicked()"), self._editLocationActionSlot)
self.uri = preferences.connectionUris
def _urlChangedSlot(self, newUri):
""" Implementing changing of connection URI. """
uri = unicode(newUri)
connection = self._preferences.getConnection(uri)
if not connection is None:
self.username = connection.username
self.password = connection.password
self.savePasswordFlag = not connection.password is None
def _getUrl(self):
"""
Returns the entered url.
@return: The url that was entered in the combobox.
@rtype: C{string}
"""
return unicode(self.urlComboBox.lineEdit().text())
def _setUrl(self, urls):
"""
Appends urls to the L{QtGui.QComboBox} widget.
@param urls: A list of urls that has to be added.
@type urls: C{list}
"""
for url in urls:
self.urlComboBox.addItem(url)
def _getUsername(self):
"""
Returns the username that was entered by the user.
@return: The username that was entered.
@rtype: C{string}
"""
return unicode(self.usernameLineEdit.text())
def _setUsername(self, username):
"""
Set a string that in the username field.
@param username: The username that has to be in the username field.
@type username: C{string}
"""
self.usernameLineEdit.setText(username or "")
def _getPassword(self):
"""
Returns the password from the password field.
@return: Returns the password in the password field.
@rtype: C{string}
"""
return unicode(self.passwordLineEdit.text())
def _setPassword(self, password):
"""
Sets the password in the password field.
@param password: The password that has to be in the password field.
@type password: C{string}
"""
self.passwordLineEdit.setText(password or "")
def _getSavePassword(self):
"""
Returns true when the save password L{QtGui.QCheckBox} is checked else false.
@return: True when the L{QtGui.QCheckBox} is checked else False.
@rtype: C{boolean}
"""
return self.savePasswordCheckBox.isChecked()
def _setSavePassword(self, checked):
"""
Set the state of the save password L{QtGui.QCheckBox}.
@param checked: True when the L{QtGui.QCheckBox} has to be checked else False.
@type checked: C{boolean}
"""
self.savePasswordCheckBox.setChecked(checked)
def _setShowUrl(self, show):
"""
Show or hide the server groupbox by the given show parameter.
@param show: True when the server groupbox has to be shown else False.
@type show: C{boolean}
"""
self.serverGroupBox.setHidden(not show)
uri = property(_getUrl, _setUrl)
username = property(_getUsername, _setUsername)
password = property(_getPassword, _setPassword)
savePasswordFlag = property(_getSavePassword, _setSavePassword)
showUrl = property(fset=_setShowUrl)
def _preferencesActionSlot(self):
""" Shows the preferences dialog for connection settings. """
preferencesDialog = AuthPrefDialogView(None, self._preferences)
#preferencesDialog.useLdap = self._preferences.useLdap
#preferencesDialog.ldapBaseDn = self._preferences.ldapBaseDn
#preferencesDialog.ldapServerUri = self._preferences.ldapServerUri
preferencesDialog.fillingTable(self._preferences.connectionUris)
if preferencesDialog.exec_() == QDialog.Accepted:
self._preferences.useLdap = preferencesDialog.useLdap
self._preferences.ldapBaseDn = preferencesDialog.ldapBaseDn
self._preferences.ldapServerUri = preferencesDialog.ldapServerUri
def _editLocationActionSlot(self):
""" Shows the edit Loaction dialog for more information on the location settings"""
editDialog = AuthEditDialogView (None, self._preferences, self.uri)
if editDialog.exec_() == QDialog.Accepted:
print "good job"
| {
"content_hash": "ae3f1e67b9dbd9fbffd058bba539cb0b",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 115,
"avg_line_length": 34.016304347826086,
"alnum_prop": 0.6341268573254514,
"repo_name": "DLR-SC/DataFinder",
"id": "d75677088e9bfe95c86b3bbf9de72b7b4071d0b7",
"size": "7953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/datafinder/gui/user/dialogs/authentification_dialog/auth_connect_dialog.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "7649"
},
{
"name": "Python",
"bytes": "7056802"
},
{
"name": "QMake",
"bytes": "1975"
}
],
"symlink_target": ""
} |
from django.contrib import messages
from django.core.urlresolvers import reverse_lazy, reverse
from django.http import JsonResponse
from django.views.generic import View
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.detail import DetailView
import requests
from django_tables2.views import RequestConfig, SingleTableView
from django.http import HttpResponseRedirect, HttpResponse
from django.conf import settings
from django.views.generic.edit import FormView
from django.template.response import TemplateResponse
from plugin_manager.core.mixins.views import MultipleGroupRequiredMixin, \
GroupRequiredMixin
from plugin_manager.hosts import models, tables, forms
from plugin_manager.hosts.tables import HostPluginsInstalledTable
from plugin_manager.accounts.models import DeployUser, PermissionHost
from plugin_manager.hosts.forms import MembersAddForm
from plugin_manager.accounts.utils import \
get_filtered_user_queryset_from_filter_dict
from plugin_manager.hosts.util import getAdvancedPermissionBlock
from jenkinsapi.jenkins import Jenkins
from bs4 import BeautifulSoup
from plugin_manager.hosts.tasks import update_plugin, delete_plugin, upload_plugin
from django.views.generic.detail import SingleObjectMixin
import uuid
import os
class HostMembersDelete(MultipleGroupRequiredMixin,
DeleteView):
"""
Delete a project member
"""
group_required = ('Admin', )
model = PermissionHost
template_name = 'hosts/host_members_confirm_delete.html'
def get_object(self, queryset=None):
return PermissionHost.objects.filter(user_id=self.kwargs['user_id'],
host=self.kwargs[
'host_id']).first()
""" Hook to ensure object is owned by request.user. """
def get_success_url(self):
return (reverse_lazy('hosts_host_members',
args=[self.kwargs['host_id']]))
def post(self, request, *args, **kwargs):
if "cancel" in request.POST:
url = self.get_success_url()
return HttpResponseRedirect(url)
else:
return super(HostMembersDelete, self).post(
request, *args, **kwargs)
class HostMemberAdd(CreateView):
"""
If it receives an action of add it adds member to project
"""
def get(self, request, *args, **kwargs):
hostid = kwargs.get('host_id')
userid = request.GET.get('user_id')
permhost = PermissionHost.objects.get_or_create(
user=DeployUser.objects.get(pk=userid),
host=models.Host.objects.get(pk=hostid))
return HttpResponse()
class HostMembersAdd(MultipleGroupRequiredMixin,
CreateView):
"""
Add a member to a host
"""
group_required = ('Admin', )
model = models.Host
form_class = MembersAddForm
template_name = 'hosts/host_members_add.html'
formvalues = ['first_name', 'last_name', 'email', 'user_level']
def get_success_url(self):
return (reverse_lazy('hosts_host_members_add',
args=[self.kwargs['pk']]))
def get_context_data(self, **kwargs):
context = super(HostMembersAdd, self).get_context_data(**kwargs)
context = {}
users = ""
context['pk'] = self.kwargs['pk']
context['host'] = models.Host.objects.all().get(pk=context['pk'])
context['form'] = MembersAddForm()
first_name = self.request.GET.get(
'first_name').strip() if self.request.GET.get(
'first_name') else None
last_name = self.request.GET.get(
'last_name').strip() if self.request.GET.get(
'last_name') else None
email = self.request.GET.get(
'email').strip() if self.request.GET.get(
'email') else None
user_level = self.request.GET.get(
'user_level').strip() if self.request.GET.get(
'user_level') else None
context['form'].fields['first_name'].initial = first_name
context['form'].fields['last_name'].initial = last_name
context['form'].fields['email'].initial = email
context['form'].fields['user_level'].initial = user_level
fields = ['first_name', 'last_name', 'email', 'user_level']
filterdict = {}
for field in fields:
if context['form'].fields[field].initial is not None:
filterdict[field] = context['form'].fields[field].initial
if (len(filterdict) != 0):
users = get_filtered_user_queryset_from_filter_dict(
filterdict).exclude(
pk__in=PermissionHost.objects.filter(host=context[
'host']).values_list('user'))
else:
users = DeployUser.active_records.all()
context['members'] = users
members_table = tables.HostMembersAddTable(users, prefix='members_')
RequestConfig(self.request, paginate={"per_page": getattr(settings,
"NUM_RESULTS_PER_PAGE",
None)}).configure(
members_table)
context['members_table'] = members_table
if (first_name or last_name or email or user_level) and \
(users.count() != 0):
context['show_table'] = True
else:
context['show_table'] = False
return context
class HostMembersList(MultipleGroupRequiredMixin,
DetailView):
"""
Update a project members
"""
group_required = ('Admin', )
model = models.Host
template_name = 'hosts/host_members.html'
def get_context_data(self, **kwargs):
context = super(HostMembersList, self).get_context_data(**kwargs)
users = DeployUser.objects.filter(
pk__in=PermissionHost.objects.filter(host=kwargs[
'object']).values_list(
'user'))
context['members'] = users
members_table = tables.HostMembersTable(users, prefix='members_',
host_id=self.kwargs['pk'])
RequestConfig(self.request, paginate={"per_page": getattr(
settings, "NUM_RESULTS_PER_PAGE", None)}).configure(
members_table)
context['members_table'] = members_table
return context
class HostList(MultipleGroupRequiredMixin, SingleTableView):
group_required = ['Admin', 'Deployer', ]
table_class = tables.HostTable
model = models.Host
table_pagination = {
"per_page": getattr(settings, "NUM_RESULTS_PER_PAGE", None)}
class HostDetail(MultipleGroupRequiredMixin, DetailView):
group_required = ['Admin', 'Deployer', ]
model = models.Host
def get_url(self,name,list):
if name in list:
return 'hosts_host_plugin_update'
else:
return 'hosts_host_plugin_upload_wn'
def get_context_data(self, **kwargs):
context = super(HostDetail, self).get_context_data(**kwargs)
host = self.get_object()
context['pk'] = self.kwargs['pk']
jenkins_server = Jenkins('http://'+host.name,host.jenkins_username,host.jenkins_password)
## Get Current Available Plugins
html_plugins = 'http://updates.jenkins-ci.org/download/plugins/'
html_doc = requests.get(html_plugins)
soup = BeautifulSoup(html_doc.text, 'html.parser')
asfind = soup.find_all('a')
asfind = asfind[5:]
plugins = [x.string.strip("/") for x in asfind]
installed_plugins = jenkins_server.get_plugins().values()
plugin_list = [{'name':x.shortName,'version':x.version,'url':self.get_url(x.shortName,plugins)} for x in installed_plugins]
sorted_plugin_list = sorted(plugin_list, key=lambda k: k['name'])
context['plugin'] = sorted_plugin_list
table = HostPluginsInstalledTable(sorted_plugin_list,
host_id=self.kwargs['pk'])
RequestConfig(self.request, paginate={"per_page": getattr(
settings, "NUM_RESULTS_PER_PAGE", None)}).configure(
table)
context['table'] = table
return context
class HostPluginUpdate(FormView):
model = models.Host
permission_required = ['hosts.change_host']
form_class = forms.HostPluginUpdateForm
template_name_suffix = '_update_plugin'
template_name = 'hosts/host_update_plugin.html'
def get_form_kwargs(self):
kwargs = {}
html_plugins = 'http://updates.jenkins-ci.org/download/plugins/%s' % self.kwargs['plugin_name']
html_doc = requests.get(html_plugins)
soup = BeautifulSoup(html_doc.text, 'html.parser')
asfind = soup.find_all('a')
versions = [x.string for x in asfind if x.string!='permalink to the latest']
initial = super(HostPluginUpdate, self).get_initial()
kwargs['versions'] = versions
return kwargs
def get_initial(self):
"""
Returns the initial data to use for forms on this view.
"""
html_plugins = 'http://updates.jenkins-ci.org/download/plugins/%s' % self.kwargs['plugin_name']
html_doc = requests.get(html_plugins)
soup = BeautifulSoup(html_doc.text, 'html.parser')
asfind = soup.find_all('a')
versions = [x.string for x in asfind if x.string!='permalink to the latest']
initial = super(HostPluginUpdate, self).get_initial()
initial['versions'] = versions
print("getting initials!")
print(initial['versions'])
return initial
def get_context_data(self, **kwargs):
context = super(HostPluginUpdate, self).get_context_data(**kwargs)
context['plugin_name'] = self.kwargs['plugin_name']
context['host_name'] = models.Host.objects.get(pk=self.kwargs['pk'])
return context
def dispatch(self, request, *args, **kwargs):
self.host_id = kwargs.get('pk')
host = models.Host.objects.get(pk=self.host_id)
instance = host
redirect = getAdvancedPermissionBlock(self.request,
self.permission_required,
instance)
if redirect:
return redirect
else:
return super(HostPluginUpdate, self).dispatch(
request, *args, **kwargs)
def form_invalid(self, form):
myform = forms.HostPluginUpdateForm(self.request.POST)
form_valid_from_parent = self.form_valid(form)
if self.form_valid(form):
form_valid_from_parent = self.get_success_url()
host = models.Host.objects.get(pk=self.host_id)
showname = myform.data['plugin_name'] if myform.data['plugin_name'] != '' else self.request.FILES['file'].name
update_plugin.delay(host=host,
username=host.ssh_username,
password=host.ssh_password,
jenkins_username=host.jenkins_username,
jenkins_password=host.jenkins_password,
plugin_name=showname,
plugin_version=myform.data['versions'])
msg = 'Plugin {} on Host {} Successfully Updated'.format(self.kwargs['plugin_name'],host)
messages.success(self.request, msg)
return HttpResponseRedirect(form_valid_from_parent)
else:
return None
def form_valid(self, form):
"""First call the parent's form valid then let the user know it worked.
"""
form_valid_from_parent = self.get_success_url()
host = models.Host.objects.get(pk=self.host_id)
return form_valid_from_parent
def get_success_url(self):
""""""
return reverse('hosts_host_detail', kwargs={'pk': self.kwargs['pk']})
class HostPluginInstall(FormView):
model = models.Host
permission_required = ['hosts.change_host']
form_class = forms.HostPluginInstallForm
template_name_suffix = '_install_plugin'
template_name = 'hosts/host_install_plugin.html'
def get_form_kwargs(self):
kwargs = {}
html_plugins = 'http://updates.jenkins-ci.org/download/plugins/'
html_doc = requests.get(html_plugins)
soup = BeautifulSoup(html_doc.text, 'html.parser')
asfind = soup.find_all('a')
asfind = asfind[5:]
plugins = [x.string.strip("/") for x in asfind]
initial = super(HostPluginInstall, self).get_initial()
kwargs['plugins'] = plugins
return kwargs
def get_context_data(self, **kwargs):
context = super(HostPluginInstall, self).get_context_data(**kwargs)
#context['plugin_name'] = self.kwargs['plugin_name']
context['host_name'] = models.Host.objects.get(pk=self.kwargs['pk'])
return context
def dispatch(self, request, *args, **kwargs):
self.host_id = kwargs.get('pk')
host = models.Host.objects.get(pk=self.host_id)
instance = host
redirect = getAdvancedPermissionBlock(self.request,
self.permission_required,
instance)
if redirect:
return redirect
else:
return super(HostPluginInstall, self).dispatch(
request, *args, **kwargs)
def form_invalid(self, form):
myform = forms.HostPluginInstallForm(self.request.POST)
form_valid_from_parent = self.form_valid(form)
if self.form_valid(form):
form_valid_from_parent = self.get_success_url()
host = models.Host.objects.get(pk=self.host_id)
update_plugin.delay(host=host,
username=host.ssh_username,
password=host.ssh_password,
jenkins_username=host.jenkins_username,
jenkins_password=host.jenkins_password,
plugin_name=myform.data['plugins'],
plugin_version=myform.data['versions'])
msg = 'Plugin {} on Host {} Successfully Updated'.format(myform.data['plugins'],host)
messages.success(self.request, msg)
return HttpResponseRedirect(form_valid_from_parent)
else:
return None
def form_valid(self, form):
"""First call the parent's form valid then let the user know it worked.
"""
form_valid_from_parent = self.get_success_url()
host = models.Host.objects.get(pk=self.host_id)
return form_valid_from_parent
def get_success_url(self):
""""""
return reverse('hosts_host_detail', kwargs={'pk': self.kwargs['pk']})
class HostPluginDelete(DeleteView):
model = models.Plugin
permission_required = ['hosts.change_host']
template_name = 'hosts/plugin_confirm_delete.html'
def get_object(self, queryset=None):
myplugin = models.Plugin()
myplugin.name =self.kwargs['plugin_name']
myplugin.version = 'todelete'
return myplugin
def get_success_url(self):
""""""
return reverse('hosts_host_detail', kwargs={'pk': self.kwargs['pk']})
def delete(self, request, *args, **kwargs):
host = models.Host.objects.get(pk=self.kwargs['pk'])
## Delete Logic here!
delete_plugin.delay(host=host,
username=host.ssh_username,
password=host.ssh_password,
jenkins_username=host.jenkins_username,
jenkins_password=host.jenkins_password,
plugin_name=self.kwargs['plugin_name'],)
msg = 'Plugin {} on Host {} Successfully Deleted'.format(self.kwargs['plugin_name'],host)
messages.success(self.request, msg)
url = reverse('hosts_host_detail', kwargs={'pk': self.kwargs['pk']})
return HttpResponseRedirect(url)
class HostCreate(CreateView):
"""View for creating a host. Hosts let us know where we can shovel code to.
"""
model = models.Host
form_class = forms.HostCreateForm
template_name_suffix = '_create'
permission_required = ['hosts.add_host']
def dispatch(self, request, *args, **kwargs):
redirect = getAdvancedPermissionBlock(self.request,
self.permission_required,
None)
if redirect:
return redirect
else:
return super(HostCreate, self).dispatch(
request, *args, **kwargs)
def form_valid(self, form):
"""First call the parent's form valid then let the user know it worked."""
form_valid_from_parent = super(HostCreate, self).form_valid(form)
messages.success(self.request, 'Host {} Successfully Created'.format(self.object))
newph = PermissionHost(user=self.request.user, host=self.object)
newph.save()
return form_valid_from_parent
def get_success_url(self):
"""Send them back to the detail view for that host"""
return reverse('hosts_host_detail', kwargs={'pk': self.object.pk})
class HostUpdate(UpdateView):
model = models.Host
form_class = forms.HostUpdateForm
template_name_suffix = '_update'
permission_required = ['hosts.change_host']
def dispatch(self, request, *args, **kwargs):
self.host_id = kwargs.get('pk')
host = models.Host.objects.get(pk=self.host_id)
instance = host
redirect = getAdvancedPermissionBlock(self.request,
self.permission_required,
instance)
if redirect:
return redirect
else:
return super(HostUpdate, self).dispatch(
request, *args, **kwargs)
def form_valid(self, form):
"""First call the parent's form valid then let the user know it worked.
"""
form_valid_from_parent = super(HostUpdate, self).form_valid(form)
msg = 'Host {} Successfully Updated'.format(self.object)
messages.success(self.request, msg)
return form_valid_from_parent
def get_success_url(self):
""""""
return reverse('hosts_host_detail', kwargs={'pk': self.object.pk})
class HostDelete(GroupRequiredMixin, DeleteView):
model = models.Host
success_url = reverse_lazy('hosts_host_list')
permission_required = ['hosts.delete_host']
def dispatch(self, request, *args, **kwargs):
self.host_id = kwargs.get('pk')
host = models.Host.objects.get(pk=self.host_id)
instance = host
redirect = getAdvancedPermissionBlock(self.request,
self.permission_required,
instance)
if redirect:
return redirect
else:
return super(HostDelete, self).dispatch(
request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
msg = 'Host {} Successfully Deleted'.format(self.get_object())
messages.success(self.request, msg)
return super(HostDelete, self).delete(self, request, *args, **kwargs)
class ProxyElasticSearch(View):
def get(self, request):
elk_url = settings.ELK_URL + ":" + settings.ELK_PORT
response = requests.get(elk_url)
return JsonResponse(response.json(), safe=False)
class GetVersionsByPluginNameAjax(SingleObjectMixin, View):
def get(self, request, *args, **kwargs):
if 'plugin_name' in kwargs:
html_plugins = 'http://updates.jenkins-ci.org/download/plugins/%s' % kwargs.get('plugin_name')
html_doc = requests.get(html_plugins)
soup = BeautifulSoup(html_doc.text, 'html.parser')
asfind = soup.find_all('a')
versions = [x.string for x in asfind if x.string!='permalink to the latest']
return JsonResponse([{'version': o, } for o in versions],
safe=False)
else:
return JsonResponse({'error': "No Ajax"})
class HostPluginUpload(FormView):
model = models.Host
permission_required = ['hosts.change_host']
form_class = forms.UploadFileForm
template_name_suffix = '_install_plugin'
template_name = 'hosts/host_install_plugin.html'
def get_form_kwargs(self):
kwargs = {}
return kwargs
def get_context_data(self, **kwargs):
context = super(HostPluginUpload, self).get_context_data(**kwargs)
#context['plugin_name'] = self.kwargs['plugin_name']
context['host_name'] = models.Host.objects.get(pk=self.kwargs['pk'])
return context
def dispatch(self, request, *args, **kwargs):
self.host_id = kwargs.get('pk')
host = models.Host.objects.get(pk=self.host_id)
instance = host
redirect = getAdvancedPermissionBlock(self.request,
self.permission_required,
instance)
if redirect:
return redirect
else:
return super(HostPluginUpload, self).dispatch(
request, *args, **kwargs)
def form_invalid(self, form):
myform = forms.HostPluginInstallForm(self.request.POST)
form_valid_from_parent = self.form_valid(form)
if self.form_valid(form):
form_valid_from_parent = self.get_success_url()
host = models.Host.objects.get(pk=self.host_id)
f = self.request.FILES['file']
unique_fname = uuid.uuid4()
os.mkdir('/tmp/%s' % unique_fname)
path = '/tmp/%s/%s' % (unique_fname,self.request.FILES['file'].name)
with open('/tmp/%s/%s' % (unique_fname,self.request.FILES['file'].name)
, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
showname = myform.data['plugin_name'] if myform.data['plugin_name'] != '' else self.request.FILES['file'].name
upload_plugin.delay(host=host,
username=host.ssh_username,
password=host.ssh_password,
jenkins_username=host.jenkins_username,
jenkins_password=host.jenkins_password,
plugin_name=myform.data['plugin_name'],
file_path=path)
msg = 'Plugin {} on Host {} Successfully Uploaded'.format(showname,host)
messages.success(self.request, msg)
return HttpResponseRedirect(form_valid_from_parent)
else:
return None
def form_valid(self, form):
"""First call the parent's form valid then let the user know it worked.
"""
form_valid_from_parent = self.get_success_url()
host = models.Host.objects.get(pk=self.host_id)
return form_valid_from_parent
def get_success_url(self):
""""""
return reverse('hosts_host_detail', kwargs={'pk': self.kwargs['pk']})
class HostPluginUploadWithName(FormView):
model = models.Host
permission_required = ['hosts.change_host']
form_class = forms.UploadFileFormWithName
template_name_suffix = '_install_plugin_with_name'
template_name = 'hosts/host_install_plugin_with_name.html'
def getFileNameWithoutExtension(self,path):
return path.split('\\').pop().split('/').pop().rsplit('.', 1)[0]
def get_form_kwargs(self):
kwargs = {}
return kwargs
def get_context_data(self, **kwargs):
context = super(HostPluginUploadWithName, self).get_context_data(**kwargs)
context['plugin_name'] = self.kwargs['plugin_name']
context['host_name'] = models.Host.objects.get(pk=self.kwargs['pk'])
return context
def dispatch(self, request, *args, **kwargs):
self.host_id = kwargs.get('pk')
host = models.Host.objects.get(pk=self.host_id)
instance = host
redirect = getAdvancedPermissionBlock(self.request,
self.permission_required,
instance)
if redirect:
return redirect
else:
return super(HostPluginUploadWithName, self).dispatch(
request, *args, **kwargs)
def form_invalid(self, form):
myform = forms.UploadFileFormWithName(self.request.POST)
form_valid_from_parent = self.form_valid(form)
if self.form_valid(form):
form_valid_from_parent = self.get_success_url()
host = models.Host.objects.get(pk=self.host_id)
f = self.request.FILES['file']
unique_fname = uuid.uuid4()
os.mkdir('/tmp/%s' % unique_fname)
path = '/tmp/%s/%s' % (unique_fname,self.request.FILES['file'].name)
with open('/tmp/%s/%s' % (unique_fname,self.request.FILES['file'].name)
, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
showname = self.request.FILES['file'].name
upload_plugin.delay(host=host,
username=host.ssh_username,
password=host.ssh_password,
jenkins_username=host.jenkins_username,
jenkins_password=host.jenkins_password,
plugin_name=self.getFileNameWithoutExtension(showname),
file_path=path)
msg = 'Plugin {} on Host {} Successfully Uploaded'.format(showname,host)
messages.success(self.request, msg)
return HttpResponseRedirect(form_valid_from_parent)
else:
return None
def form_valid(self, form):
"""First call the parent's form valid then let the user know it worked.
"""
form_valid_from_parent = self.get_success_url()
host = models.Host.objects.get(pk=self.host_id)
return form_valid_from_parent
def get_success_url(self):
""""""
return reverse('hosts_host_detail', kwargs={'pk': self.kwargs['pk']}) | {
"content_hash": "ddd4558131ac5a5a193f8f9458a9e9d4",
"timestamp": "",
"source": "github",
"line_count": 703,
"max_line_length": 131,
"avg_line_length": 38.02560455192034,
"alnum_prop": 0.5860017956007781,
"repo_name": "ahharu/plugin-manager",
"id": "911c4dcf16b7914d1d4884c79e29d989b459bef3",
"size": "26732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugin_manager/hosts/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "68971"
},
{
"name": "HTML",
"bytes": "72414"
},
{
"name": "JavaScript",
"bytes": "313284"
},
{
"name": "Python",
"bytes": "138428"
}
],
"symlink_target": ""
} |
"""
FIZZ BUZZ.
CHALLENGE DESCRIPTION:
Players generally sit in a circle. The first player says the number “1”,
and each player says next number in turn. However, any number divisible by X
(for example, three) is replaced by the word fizz, and any divisible by Y
(for example, five) by the word buzz. Numbers divisible by both become fizz
buzz. A player who hesitates, or makes a mistake is eliminated from the game.
Write a program that prints out the final series of numbers where those
divisible by X, Y and both are replaced by “F” for fizz, “B” for buzz and “FB”
for fizz buzz.
INPUT SAMPLE:
Your program should accept a file as its first argument.
The file contains multiple separated lines; each line contains 3 numbers that
are space delimited. The first number is the first divider (X), the second
number is the second divider (Y), and the third number is how far you should
count (N). You may assume that the input file is formatted correctly and the
numbers are valid positive integers.
"""
# Future
from __future__ import (absolute_import, division,
print_function, unicode_literals)
# Standart Library
import sys
def file_lines(file_location):
"""Output file line-by-line."""
with open(file_location, 'r') as a_file:
for line in a_file:
yield line
def fizz_buzz(line):
"""Return a final series of numbers according to Fizz Buzz rules."""
numbers = line.split()
devisor1 = int(numbers[0])
devisor2 = int(numbers[1])
series_length = int(numbers[2])
series = []
for i in range(1, series_length+1):
if not i % devisor1 and not i % devisor2:
series.append('FB')
elif not i % devisor1:
series.append('F')
elif not i % devisor2:
series.append('B')
else:
series.append(str(i))
return ' '.join(series)
def main():
"""Program entry point."""
file_location = sys.argv[1]
for line in file_lines(file_location):
print(fizz_buzz(line))
if __name__ == '__main__':
main()
| {
"content_hash": "6f402e54316ec55bb736a3ef800e13f3",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 78,
"avg_line_length": 32.296875,
"alnum_prop": 0.6686018384131591,
"repo_name": "kirillmorozov/CodeEval",
"id": "58f732491085936a8db25dcaafe92d1478e8c587",
"size": "2128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/easy/fizz_buzz.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33671"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python3
# python setup.py sdist --format=zip,gztar
from setuptools import setup
import os
import sys
import platform
import imp
import argparse
with open('contrib/requirements/requirements.txt') as f:
requirements = f.read().splitlines()
with open('contrib/requirements/requirements-hw.txt') as f:
requirements_hw = f.read().splitlines()
version = imp.load_source('version', 'lib/version.py')
if sys.version_info[:3] < (3, 4, 0):
sys.exit("Error: Electrum requires Python version >= 3.4.0...")
data_files = ['contrib/requirements/' + r for r in ['requirements.txt', 'requirements-hw.txt']]
if platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:
parser = argparse.ArgumentParser()
parser.add_argument('--root=', dest='root_path', metavar='dir', default='/')
opts, _ = parser.parse_known_args(sys.argv[1:])
usr_share = os.path.join(sys.prefix, "share")
if not os.access(opts.root_path + usr_share, os.W_OK) and \
not os.access(opts.root_path, os.W_OK):
if 'XDG_DATA_HOME' in os.environ.keys():
usr_share = os.environ['XDG_DATA_HOME']
else:
usr_share = os.path.expanduser('~/.local/share')
data_files += [
(os.path.join(usr_share, 'applications/'), ['electrum.desktop']),
(os.path.join(usr_share, 'pixmaps/'), ['icons/electrum.png'])
]
setup(
name="Electrum",
version=version.ELECTRUM_VERSION,
install_requires=requirements,
extras_require={
'hardware': requirements_hw,
},
packages=[
'electrum',
'electrum_gui',
'electrum_gui.qt',
'electrum_plugins',
'electrum_plugins.audio_modem',
'electrum_plugins.cosigner_pool',
'electrum_plugins.email_requests',
'electrum_plugins.greenaddress_instant',
'electrum_plugins.hw_wallet',
'electrum_plugins.keepkey',
'electrum_plugins.labels',
'electrum_plugins.ledger',
'electrum_plugins.trezor',
'electrum_plugins.digitalbitbox',
'electrum_plugins.trustedcoin',
'electrum_plugins.virtualkeyboard',
],
package_dir={
'electrum': 'lib',
'electrum_gui': 'gui',
'electrum_plugins': 'plugins',
},
package_data={
'electrum': [
'servers.json',
'servers_testnet.json',
'currencies.json',
'checkpoints.json',
'checkpoints_testnet.json',
'www/index.html',
'wordlist/*.txt',
'locale/*/LC_MESSAGES/electrum.mo',
]
},
scripts=['electrum'],
data_files=data_files,
description="Lightweight Bitcoin Wallet",
author="Thomas Voegtlin",
author_email="thomasv@electrum.org",
license="MIT Licence",
url="https://electrum.org",
long_description="""Lightweight Bitcoin Wallet"""
)
| {
"content_hash": "ea97ca9a5e2b51744b7d491737413087",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 95,
"avg_line_length": 31.53846153846154,
"alnum_prop": 0.6097560975609756,
"repo_name": "romanz/electrum",
"id": "63581a614106543095b845b5d2dce4df97145102",
"size": "2870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "1574"
},
{
"name": "Makefile",
"bytes": "837"
},
{
"name": "NSIS",
"bytes": "7309"
},
{
"name": "Python",
"bytes": "1709601"
},
{
"name": "Shell",
"bytes": "11466"
}
],
"symlink_target": ""
} |
formatter = "%r %r %r %r"
print formatter % (1, 2, 3, 4)
print formatter % ("one", "two", "three", "four")
print formatter % (True, False, False, True)
print formatter % (formatter, formatter, formatter, formatter)
print formatter % (
"I had this thing.",
"That you could type up right.",
# The following line will print with double-quotes around it.
# That's because of the single-quote used in the string. :)
"But it didn't sing.",
"So I said goodnight."
)
# After the study drills, see if you can do a formatter insert within another
# one. Should work.... right?
#
# Okay so on this last edit, I'm going to remove the commas. Hopefully the
# strings will print normally and without the spaces since I have the \n
# making new lines.
# OKAY SO it almost worked perfectly until the last line where the formatter
# was used. Let's try making it a string variable.
#
# OKAY SO that didn't work. Looks like using the comma in a print function does more that just add spaces. Reminds me of using parantheses.... say... Will
# that make it work??
#
# NOPE That didn't work either. Oh well. I've fixed it now. This is just here
# remember later. And also because I'm narcissistic and want to think I'll
# look back on these early days of learning programming.
#
# Dear diary,
#
# Hey, it's me again. Apparently I didn't fix the last thing I commented
# about. I believe the problem might be that I'm still not using the commas
# to seperate the strings. Let's fix that and see what happens.
#
# With love,
# Jared Manning
#
# P.S. I also changed the string printed back to the formatter variable and
# deleted the \n lines from the inner strings. That seems to have fixed all
# of my problems (even though the output is still ugly).
| {
"content_hash": "c105708e47d2e97b1064dec91d008d10",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 154,
"avg_line_length": 41.44186046511628,
"alnum_prop": 0.7048260381593715,
"repo_name": "jaredmanning/learning",
"id": "a62408b2b5f4b1958dd196cf2c251260d2efc5ba",
"size": "1782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lpthw/ex8.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "285"
},
{
"name": "Python",
"bytes": "34489"
},
{
"name": "Scheme",
"bytes": "8913"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.core.files.storage import Storage
from django.core.files.base import ContentFile
import azure_util
from azure_util.storage import *
from datetime import datetime
import os, mimetypes
class AzureStorage(Storage):
def __init__(self, container=None):
self.blob_service = BlobService(account_name=settings.AZURE_STORAGE_ACCOUNT, account_key=settings.AZURE_STORAGE_KEY)
if not container:
self.container = settings.AZURE_STORAGE_CONTAINER
else:
self.container = container
self.blob_service.create_container(self.container, x_ms_blob_public_access='blob')
def _open(self, name, mode='rb'):
data = self.blob_service.get_blob(self.container, name)
return ContentFile(data)
def _save(self, name, content):
content.open(mode="rb")
data = content.read()
content_type = mimetypes.guess_type(name)[0]
metadata = {"modified_time": "%f" % os.path.getmtime(content.name)}
self.blob_service.put_blob(self.container, name, data, x_ms_blob_type='BlockBlob', x_ms_blob_content_type=content_type, x_ms_meta_name_values=metadata)
return name
def delete(self, name):
self.blob_service.delete_blob(self.container, name)
def exists(self, name):
try:
self.blob_service.get_blob_properties(self.container, name)
return True
except:
return False
def listdir(self, path):
dirs = []
files = []
blobs = self.blob_service.list_blobs(self.container, prefix=(path or None))
for blob in blobs:
directory, file_name = os.path.split(blob.name)
dirs.append(directory)
files.append(file_name)
return (dirs, files)
def size(self, name):
properties = self.blob_service.get_blob_properties(self.container, name)
return properties.get('content-length')
def url(self, name):
blob = self.blob_service.list_blobs(self.container, prefix=name)
return blob.blobs[0].url
def modified_time(self, name):
metadata = self.blob_service.get_blob_metadata(self.container, name)
modified_time = float(metadata.get('x-ms-meta-modified_time'))
return datetime.fromtimestamp(modified_time)
| {
"content_hash": "9bbc181fffe58458108ecc5ff93fc539",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 159,
"avg_line_length": 36.515625,
"alnum_prop": 0.6534017971758665,
"repo_name": "fergalmoran/dss",
"id": "2f2a4b36d3b5111a21c08c5133123ee1f0e3cb2f",
"size": "2337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spa/storage.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "1335630"
},
{
"name": "CoffeeScript",
"bytes": "91082"
},
{
"name": "JavaScript",
"bytes": "3576558"
},
{
"name": "Python",
"bytes": "1543569"
}
],
"symlink_target": ""
} |
"""Package contenant les commandes du module commerce."""
import primaires.commerce.commandes.acheter
import primaires.commerce.commandes.info
import primaires.commerce.commandes.lister
import primaires.commerce.commandes.questeur
import primaires.commerce.commandes.vendre
| {
"content_hash": "313609f52a22c5050335c84375a87e39",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 57,
"avg_line_length": 39.285714285714285,
"alnum_prop": 0.8545454545454545,
"repo_name": "vlegoff/tsunami",
"id": "abc049eef12ebdd5fac985250f14fc2028c6ceb8",
"size": "1842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/commerce/commandes/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7930908"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
import time
import logging
import json
import inspect
import random
from . import config
from .proto import Protocol
from .chain.message import *
from threading import Thread
from threading import Lock
from werkzeug.wrappers import Request, Response
from werkzeug.serving import run_simple
from jsonrpc import JSONRPCResponseManager, dispatcher
logger = logging.getLogger(config.APP_NAME)
logging.getLogger('werkzeug').setLevel(logging.ERROR)
class API:
def __init__ (self, backend, chain, dht, port, threads):
self.port = int (port)
self.threads = int (threads)
self.dht = dht
self.backend = backend
self.chain = chain
self.RPCHelp = {
"broadcast" : {"args": ["signed_transaction", "temp_id"], "return": {'txid': 'transaction_hash'}},
"info" : {"args": [], "return": {
"chain": {
"code": "XLT",
"height": 561596,
"name": "Litecoin testnet"
},
"node": {
"backend": [ "rpc", "chainsoapi" ],
"dapps": { "list": [ "cotst" ], "enabled": [ "cotst" ] },
"version": "0.1"
}
}},
"net.peers": {"args": [], "return": {"list": [("host", "port", "id")]}},
"net.connections": {"args": [], "return": {"count": 'total_peers'}},
"help": {"args":[], "return": {}}
}
self.RPCDispatcher = {}
self.RPCDispatcher["broadcast"] = self.method_broadcast
self.RPCDispatcher["help"] = self.method_help
self.RPCDispatcher["info"] = self.method_info
self.RPCDispatcher["net.peers"] = self.method_net_peers
self.RPCDispatcher["net.connections"] = self.method_net_connections
def registerRPCMethod (self, name, method):
self.RPCDispatcher [name] = method['call']
self.RPCHelp [name] = method['help']
def method_net_connections (self):
return {'count': len (self.dht.peers ())}
def method_net_peers (self):
return self.dht.peers ()
# Broadcast a signed transaction
def method_broadcast (self, thex, temp_id):
# TODO check if temp_id is associated with the player who signed thex
# Check the validity of the signed transaction
# Use the backend to broadcast the transaction and get txid
r = self.backend.broadcastTransaction (thex)
# Publish the temp data on the DHT
if r != None:
self.dht.publish (temp_id, r)
# Return the transaction id to the client
return {'txid': r}
def method_help (self):
return self.RPCHelp
def method_info (self):
return {'chain': {'height': self.chain.getChainHeight (), 'regtest': config.CONF['regtest'],
'code': self.chain.getChainCode (), 'name': self.chain.getChainName ()},
'node': { 'dapps': config.CONF['dapps'], 'backend': config.CONF['backend']['protocol'],
'version': config.APP_VERSION }}
@Request.application
def serveApplication (self, request):
try:
rjson = json.loads (request.data.decode('ascii'))
except:
apiresponse = Response({}, mimetype='application/json')
apiresponse.headers.add('Access-Control-Allow-Origin', '*')
apiresponse.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,PATCH')
apiresponse.headers.add('Access-Control-Allow-Headers', 'Content-Type, Authorization')
return apiresponse
if rjson['method'] in self.RPCDispatcher:
nargs = len (inspect.getargspec (self.RPCDispatcher[rjson['method']]).args) - 1
if len (rjson['params']) != nargs:
logger.error ('Client invalid request arguments: "%s" %s', rjson['method'], str(rjson['params']))
else:
if rjson['method'].find ('get') == -1 and rjson['method'].find ('info') == -1:
logger.debug ('Client request: %s', rjson['method'])
else:
logger.error ('Client invalid request: "%s" %s', rjson['method'], str(rjson['params']))
response = JSONRPCResponseManager.handle (request.data, self.RPCDispatcher)
apiresponse = Response(response.json, mimetype='application/json')
apiresponse.headers.add('Access-Control-Allow-Origin', '*')
apiresponse.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,PATCH')
apiresponse.headers.add('Access-Control-Allow-Headers', 'Content-Type, Authorization')
return apiresponse
def serverThread (self):
if self.threads > 1:
run_simple('localhost', self.port, self.serveApplication, threaded=True, processes=self.threads, use_debugger=False)
else:
run_simple('localhost', self.port, self.serveApplication, use_debugger=False)
def run (self):
logger.info ('Starting jsonrpc api server at port %d (%d threads)', self.port, self.threads)
# Start the serve thread
self.servethread = Thread(target=self.serverThread, args=())
self.servethread.start()
| {
"content_hash": "fce519e5680bddfa4b48971d78744f0d",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 119,
"avg_line_length": 32.163120567375884,
"alnum_prop": 0.6809261300992282,
"repo_name": "contractvm/contractvmd",
"id": "3ce7369ff77f88d26fa2b0eb30c3458e5811968d",
"size": "4707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contractvmd/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57983"
},
{
"name": "Shell",
"bytes": "4580"
}
],
"symlink_target": ""
} |
import subprocess
import sys
def main(args):
if '--local' in args:
args = [a for a in args if a != '--local']
else:
subprocess.check_call("docker pull ffig/ffig-base".split())
subprocess.check_call("docker build -t ffig_local .".split())
subprocess.check_call(['docker',
'run',
'ffig_local',
'/bin/bash',
'-c',
'./scripts/build.py {}'.format(' '.join(args[1:]))])
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "c7d0af591d66f6c6c072166a904149a1",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 79,
"avg_line_length": 29.05,
"alnum_prop": 0.4509466437177281,
"repo_name": "jbcoe/C_API_generation",
"id": "f0b31dfd98ae13bc7bd35ecf99c3745499e98321",
"size": "603",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/test-docker.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "7733"
},
{
"name": "CMake",
"bytes": "692"
},
{
"name": "Jupyter Notebook",
"bytes": "9682"
},
{
"name": "Python",
"bytes": "12718"
},
{
"name": "Ruby",
"bytes": "614"
},
{
"name": "Shell",
"bytes": "1555"
}
],
"symlink_target": ""
} |
"""Package providing discourse segmenter for BitPar constituency trees.
Attributes:
align (module): auxiliary routines for doing string alignment
constants (module): constants specific to that package
constituency_tree (module): proxy class for handling constituency syntax
trees
bparsegmenter (module): class for segmenting syntax trees into discourse
units
__all__ (List[str]): list of sub-modules exported by this package
__author__ (str): package's author
__email__ (str): email of package's author
__name__ (str): package's name
__version__ (str): package version
"""
##################################################################
# Imports
from .constants import ENCODING, NO_PARSE_RE, WORD_SEP
from .bparsegmenter import BparSegmenter, read_trees, read_segments, trees2segs
from .constituency_tree import CTree
##################################################################
# Intialization
__name__ = "bparseg"
__all__ = ["ENCODING", "NO_PARSE_RE", "WORD_SEP", "BparSegmenter", "CTree", \
"read_trees", "read_segments", "trees2segs"]
__author__ = "Uladzimir Sidarenka"
__email__ = "sidarenk at uni dash potsdam dot de"
__version__ = "0.0.1"
| {
"content_hash": "8210db684030abba48777cbf2e317811",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 40.29032258064516,
"alnum_prop": 0.6052842273819056,
"repo_name": "discourse-lab/DiscourseSegmenter",
"id": "69954a7d99ef4c54253e6ae867848625a9a7ec98",
"size": "1393",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dsegmenter/bparseg/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "2244"
},
{
"name": "Python",
"bytes": "265132"
},
{
"name": "Shell",
"bytes": "174"
}
],
"symlink_target": ""
} |
"""
Support for ComEd Hourly Pricing data.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.comed_hourly_pricing/
"""
from datetime import timedelta
import logging
import voluptuous as vol
from requests import RequestException, get
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, STATE_UNKNOWN
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
_RESOURCE = 'https://hourlypricing.comed.com/api'
SCAN_INTERVAL = timedelta(minutes=5)
CONF_MONITORED_FEEDS = 'monitored_feeds'
CONF_SENSOR_TYPE = 'type'
CONF_OFFSET = 'offset'
CONF_NAME = 'name'
CONF_FIVE_MINUTE = 'five_minute'
CONF_CURRENT_HOUR_AVERAGE = 'current_hour_average'
SENSOR_TYPES = {
CONF_FIVE_MINUTE: ['ComEd 5 Minute Price', 'c'],
CONF_CURRENT_HOUR_AVERAGE: ['ComEd Current Hour Average Price', 'c'],
}
TYPES_SCHEMA = vol.In(SENSOR_TYPES)
SENSORS_SCHEMA = vol.Schema({
vol.Required(CONF_SENSOR_TYPE): TYPES_SCHEMA,
vol.Optional(CONF_OFFSET, default=0.0): vol.Coerce(float),
vol.Optional(CONF_NAME): cv.string
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_MONITORED_FEEDS): [SENSORS_SCHEMA]
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the ComEd Hourly Pricing sensor."""
dev = []
for variable in config[CONF_MONITORED_FEEDS]:
dev.append(ComedHourlyPricingSensor(
variable[CONF_SENSOR_TYPE], variable[CONF_OFFSET],
variable.get(CONF_NAME)))
add_devices(dev)
class ComedHourlyPricingSensor(Entity):
"""Implementation of a ComEd Hourly Pricing sensor."""
def __init__(self, sensor_type, offset, name):
"""Initialize the sensor."""
if name:
self._name = name
else:
self._name = SENSOR_TYPES[sensor_type][0]
self.type = sensor_type
self.offset = offset
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_ATTRIBUTION: 'Data provided by ComEd Hourly '
'Pricing service'}
return attrs
def update(self):
"""Get the ComEd Hourly Pricing data from the web service."""
try:
if self.type == CONF_FIVE_MINUTE:
url_string = _RESOURCE + '?type=5minutefeed'
response = get(url_string, timeout=10)
self._state = float(response.json()[0]['price']) + self.offset
elif self.type == CONF_CURRENT_HOUR_AVERAGE:
url_string = _RESOURCE + '?type=currenthouraverage'
response = get(url_string, timeout=10)
self._state = float(response.json()[0]['price']) + self.offset
else:
self._state = STATE_UNKNOWN
except (RequestException, ValueError, KeyError):
_LOGGER.warning('Could not update status for %s', self.name)
| {
"content_hash": "20ccff26c384a414609ad274bb49593b",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 78,
"avg_line_length": 32.24545454545454,
"alnum_prop": 0.6458979419227516,
"repo_name": "morphis/home-assistant",
"id": "30948fada8fd182295a4d1d0bab809d472623a3b",
"size": "3547",
"binary": false,
"copies": "2",
"ref": "refs/heads/snap-support",
"path": "homeassistant/components/sensor/comed_hourly_pricing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1601137"
},
{
"name": "Python",
"bytes": "5600477"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "15144"
}
],
"symlink_target": ""
} |
"""
search_backend.py
.. moduleauthor:: Shaun O'Keefe <shaun.okeefe@versi.edu.au>
"""
#
# Note: We're pulling this straight out of the solr_backend module.
# ideally we should be pulling it out of the backend module,
# but we're already cheating with the highlightSearchBackend.
# This is a definite TODO once the search code has settled down a
# bit
#
from haystack.backends.solr_backend import SearchQuery
class FacetFixedSearchQuery(SearchQuery):
def get_facet_counts(self):
if self._facet_counts is None:
self.get_results()
return self._facet_counts
| {
"content_hash": "f9511e59083e0fdace2c1c4e0c95e330",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 68,
"avg_line_length": 26.043478260869566,
"alnum_prop": 0.7128547579298832,
"repo_name": "iiman/mytardis",
"id": "03875b6cb59aef3603185a0f1a9705bb621ac02b",
"size": "2350",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tardis/tardis_portal/search_query.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "308165"
},
{
"name": "Python",
"bytes": "1736671"
},
{
"name": "Shell",
"bytes": "953"
}
],
"symlink_target": ""
} |
"""Support for a switch using a 433MHz module via GPIO on a Raspberry Pi."""
import importlib
import logging
from threading import RLock
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import (
CONF_NAME,
CONF_PROTOCOL,
CONF_SWITCHES,
EVENT_HOMEASSISTANT_STOP,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_CODE_OFF = "code_off"
CONF_CODE_ON = "code_on"
CONF_GPIO = "gpio"
CONF_PULSELENGTH = "pulselength"
CONF_SIGNAL_REPETITIONS = "signal_repetitions"
DEFAULT_PROTOCOL = 1
DEFAULT_SIGNAL_REPETITIONS = 10
SWITCH_SCHEMA = vol.Schema(
{
vol.Required(CONF_CODE_OFF): vol.All(cv.ensure_list_csv, [cv.positive_int]),
vol.Required(CONF_CODE_ON): vol.All(cv.ensure_list_csv, [cv.positive_int]),
vol.Optional(CONF_PULSELENGTH): cv.positive_int,
vol.Optional(
CONF_SIGNAL_REPETITIONS, default=DEFAULT_SIGNAL_REPETITIONS
): cv.positive_int,
vol.Optional(CONF_PROTOCOL, default=DEFAULT_PROTOCOL): cv.positive_int,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_GPIO): cv.positive_int,
vol.Required(CONF_SWITCHES): vol.Schema({cv.string: SWITCH_SCHEMA}),
}
)
# pylint: disable=no-member
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Find and return switches controlled by a generic RF device via GPIO."""
rpi_rf = importlib.import_module("rpi_rf")
gpio = config.get(CONF_GPIO)
rfdevice = rpi_rf.RFDevice(gpio)
rfdevice_lock = RLock()
switches = config.get(CONF_SWITCHES)
devices = []
for dev_name, properties in switches.items():
devices.append(
RPiRFSwitch(
properties.get(CONF_NAME, dev_name),
rfdevice,
rfdevice_lock,
properties.get(CONF_PROTOCOL),
properties.get(CONF_PULSELENGTH),
properties.get(CONF_SIGNAL_REPETITIONS),
properties.get(CONF_CODE_ON),
properties.get(CONF_CODE_OFF),
)
)
if devices:
rfdevice.enable_tx()
add_entities(devices)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, lambda event: rfdevice.cleanup())
class RPiRFSwitch(SwitchEntity):
"""Representation of a GPIO RF switch."""
def __init__(
self,
name,
rfdevice,
lock,
protocol,
pulselength,
signal_repetitions,
code_on,
code_off,
):
"""Initialize the switch."""
self._name = name
self._state = False
self._rfdevice = rfdevice
self._lock = lock
self._protocol = protocol
self._pulselength = pulselength
self._code_on = code_on
self._code_off = code_off
self._rfdevice.tx_repeat = signal_repetitions
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def _send_code(self, code_list, protocol, pulselength):
"""Send the code(s) with a specified pulselength."""
with self._lock:
_LOGGER.info("Sending code(s): %s", code_list)
for code in code_list:
self._rfdevice.tx_code(code, protocol, pulselength)
return True
def turn_on(self, **kwargs):
"""Turn the switch on."""
if self._send_code(self._code_on, self._protocol, self._pulselength):
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the switch off."""
if self._send_code(self._code_off, self._protocol, self._pulselength):
self._state = False
self.schedule_update_ha_state()
| {
"content_hash": "c23b9ce91db8d39bda9326d97c8bbbb0",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 84,
"avg_line_length": 29.18840579710145,
"alnum_prop": 0.6117179741807348,
"repo_name": "turbokongen/home-assistant",
"id": "4ac7283b1942d036653eb220b0563f5f4db72ab1",
"size": "4028",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/rpi_rf/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "30405146"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
} |
import numpy
import opengm
from opengm import learning
import vigra
from progressbar import *
import glob
import os
from functools import partial
from opengm.learning import secondOrderImageDataset, getPbar,superpixelDataset
#i = numpy.ones([7, 5])
#
#print posiFeatures(i).shape
#
# where is the dataset stored
dsetRoot = '/home/tbeier/datasets/weizmann_horse_db/'
imgPath = dsetRoot + 'rgb/'
gtBasePath = dsetRoot + 'figure_ground/'
imgFiles = glob.glob(imgPath+'*.jpg')
takeNth = 2
imgs = []
sps = []
gts = []
pbar = getPbar(len(imgFiles), 'Load Image')
pbar.start()
for i,path in enumerate(imgFiles):
if i>20 :
break
gtPath = gtBasePath + os.path.basename(path)
rgbImg = vigra.impex.readImage(path)
gtImg = vigra.impex.readImage(gtPath).astype('uint32')[::takeNth,::takeNth]
gtImg[gtImg<125] = 0
gtImg[gtImg>=125] = 1
rgbImg = vigra.resize(rgbImg, [gtImg.shape[0],gtImg.shape[1]])
#vigra.imshow(gtImg.astype('float32'))
#vigra.show()
labImg = vigra.colors.transform_RGB2Lab(rgbImg.astype('float32'))
sp,nSeg = vigra.analysis.slicSuperpixels(labImg, intensityScaling=20.0, seedDistance=5)
sp = vigra.analysis.labelImage(sp)-1
#vigra.segShow(rgbImg, sp)
#vigra.show()
gg = vigra.graphs.gridGraph(rgbImg.shape[0:2])
rag = vigra.graphs.regionAdjacencyGraph(gg,sp)
gt,qtq = rag.projectBaseGraphGt(gtImg)
#rag.show(rgbImg, gt)
#vigra.show()
imgs.append(rgbImg)
gts.append(gt)
sps.append(sp)
pbar.update(i)
pbar.finish()
def posiFeatures(img):
shape = img.shape[0:2]
x = numpy.linspace(0, 1, shape[0])
y = numpy.linspace(0, 1, shape[1])
xv, yv = numpy.meshgrid(y, x)
xv -=0.5
yv -=0.5
rad = numpy.sqrt(xv**2 + yv**2)[:,:,None]
erad = numpy.exp(1.0 - rad)
xva = (xv**2)[:,:,None]
yva = (yv**2)[:,:,None]
res = numpy.concatenate([erad, rad,xva,yva,xv[:,:,None],yv[:,:,None]],axis=2)
assert res.shape[0:2] == img.shape[0:2]
return res
def getSelf(img):
f=img.copy()
f-=f.min()
f/=f.max()
return f
def labHessianOfGaussian(img, sigma):
l = vigra.colors.transform_RGB2Lab(img)[:,:,0]
l = vigra.taggedView(l,'xy')
f = vigra.filters.hessianOfGaussianEigenvalues(l, sigma)
f-=f.min()
f/=f.max()
return f
def labStructTensorEv(img, sigma):
l = vigra.colors.transform_RGB2Lab(img)[:,:,0]
l = vigra.taggedView(l,'xy')
f = vigra.filters.structureTensorEigenvalues(l, sigma, 2*sigma)
f-=f.min()
f/=f.max()
return f
def rgbHist(img):
minVals=(0.0,0.0,0.0)
maxVals=(255.0, 255.0, 255.0)
img = vigra.taggedView(img,'xyc')
hist = vigra.histogram.gaussianHistogram(img,minVals,maxVals,bins=30,sigma=3.0, sigmaBin=1.0)
f = vigra.taggedView(hist,'xyc')
f-=f.min()
f/=f.max()
return f
def labHist(img):
minVals=(0.0,-86.1814 ,-107.862)
maxVals=(100.0, 98.2353, 94.48)
imgl= vigra.colors.transform_RGB2Lab(img)
hist = vigra.histogram.gaussianHistogram(imgl,minVals,maxVals,bins=30,sigma=3.0, sigmaBin=1.0)
f = vigra.taggedView(hist,'xyc')
f-=f.min()
f/=f.max()
return f
def gmag(img, sigma):
f = vigra.filters.gaussianGradientMagnitude(img, sigma)
f-=f.min()
f/=f.max()
return f
fUnary = [
posiFeatures,
labHist,
rgbHist,
getSelf,
#vigra.colors.transform_RGB2XYZ,
#vigra.colors.transform_RGB2Lab,
#vigra.colors.transform_RGB2Luv,
#partial(labHessianOfGaussian, sigma=1.0),
#partial(labHessianOfGaussian, sigma=2.0),
#partial(vigra.filters.gaussianGradientMagnitude, sigma=1.0),
#partial(vigra.filters.gaussianGradientMagnitude, sigma=2.0),
]#
fBinary = [
#posiFeatures,
##rgbHist,
#partial(labHessianOfGaussian, sigma=1.0),
#partial(labHessianOfGaussian, sigma=2.0),
#partial(labStructTensorEv, sigma=1.0),
#partial(labStructTensorEv, sigma=2.0),
partial(gmag, sigma=1.0),
partial(gmag, sigma=2.0),
]
dataset,test_set = superpixelDataset(imgs=imgs,sps=sps, gts=gts, numberOfLabels=2,
fUnary=fUnary, fBinary=fBinary,
addConstFeature=True)
learner = learning.subgradientSSVM(dataset, learningRate=0.1, C=0.1,
learningMode='batch',maxIterations=2000, averaging=-1)
#learner = learning.structMaxMarginLearner(dataset, 0.1, 0.001, 0)
learner.learn(infCls=opengm.inference.QpboExternal,
parameter=opengm.InfParam())
w = dataset.getWeights()
for wi in range(len(w)):
print "wi ",w[wi]
# predict on test test
for (rgbImg, sp, gm) in test_set :
# infer for test image
inf = opengm.inference.QpboExternal(gm)
inf.infer()
arg = inf.arg()+1
gg = vigra.graphs.gridGraph(rgbImg.shape[0:2])
rag = vigra.graphs.regionAdjacencyGraph(gg,sp)
seg = rag.projectLabelsToBaseGraph(arg.astype('uint32'))
vigra.segShow(rgbImg, seg+2)
vigra.show()
| {
"content_hash": "88038ca5a1c547826339fe42e1e694af",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 98,
"avg_line_length": 24.368932038834952,
"alnum_prop": 0.6402390438247012,
"repo_name": "chaubold/opengm",
"id": "1d1579b4a8b7b1dcc4e0d6fe95644cbc3e0c5499",
"size": "5020",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fubar/brown_horse_sp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "297"
},
{
"name": "C++",
"bytes": "5700101"
},
{
"name": "CMake",
"bytes": "133637"
},
{
"name": "Jupyter Notebook",
"bytes": "1115075"
},
{
"name": "M",
"bytes": "151"
},
{
"name": "Matlab",
"bytes": "40642"
},
{
"name": "Python",
"bytes": "319084"
},
{
"name": "Shell",
"bytes": "150"
}
],
"symlink_target": ""
} |
import proto
from utils import str_to_bytes
def print_verbose(security_ctx, data):
if (security_ctx.verbose):
print(f'\x1b[32;20m++++ {data} ++++\x1b[0m')
def scan_start_request(security_ctx, blocking=True, passive=False, group_channels=5, period_ms=120):
# Form protobuf request packet for ScanStart command
cmd = proto.wifi_scan_pb2.WiFiScanPayload()
cmd.msg = proto.wifi_scan_pb2.TypeCmdScanStart
cmd.cmd_scan_start.blocking = blocking
cmd.cmd_scan_start.passive = passive
cmd.cmd_scan_start.group_channels = group_channels
cmd.cmd_scan_start.period_ms = period_ms
enc_cmd = security_ctx.encrypt_data(cmd.SerializeToString())
print_verbose(security_ctx, f'Client -> Device (Encrypted CmdScanStart): 0x{enc_cmd.hex()}')
return enc_cmd.decode('latin-1')
def scan_start_response(security_ctx, response_data):
# Interpret protobuf response packet from ScanStart command
dec_resp = security_ctx.decrypt_data(str_to_bytes(response_data))
resp = proto.wifi_scan_pb2.WiFiScanPayload()
resp.ParseFromString(dec_resp)
print_verbose(security_ctx, f'ScanStart status: 0x{str(resp.status)}')
if resp.status != 0:
raise RuntimeError
def scan_status_request(security_ctx):
# Form protobuf request packet for ScanStatus command
cmd = proto.wifi_scan_pb2.WiFiScanPayload()
cmd.msg = proto.wifi_scan_pb2.TypeCmdScanStatus
enc_cmd = security_ctx.encrypt_data(cmd.SerializeToString())
print_verbose(security_ctx, f'Client -> Device (Encrypted CmdScanStatus): 0x{enc_cmd.hex()}')
return enc_cmd.decode('latin-1')
def scan_status_response(security_ctx, response_data):
# Interpret protobuf response packet from ScanStatus command
dec_resp = security_ctx.decrypt_data(str_to_bytes(response_data))
resp = proto.wifi_scan_pb2.WiFiScanPayload()
resp.ParseFromString(dec_resp)
print_verbose(security_ctx, f'ScanStatus status: 0x{str(resp.status)}')
if resp.status != 0:
raise RuntimeError
return {'finished': resp.resp_scan_status.scan_finished, 'count': resp.resp_scan_status.result_count}
def scan_result_request(security_ctx, index, count):
# Form protobuf request packet for ScanResult command
cmd = proto.wifi_scan_pb2.WiFiScanPayload()
cmd.msg = proto.wifi_scan_pb2.TypeCmdScanResult
cmd.cmd_scan_result.start_index = index
cmd.cmd_scan_result.count = count
enc_cmd = security_ctx.encrypt_data(cmd.SerializeToString())
print_verbose(security_ctx, f'Client -> Device (Encrypted CmdScanResult): 0x{enc_cmd.hex()}')
return enc_cmd.decode('latin-1')
def scan_result_response(security_ctx, response_data):
# Interpret protobuf response packet from ScanResult command
dec_resp = security_ctx.decrypt_data(str_to_bytes(response_data))
resp = proto.wifi_scan_pb2.WiFiScanPayload()
resp.ParseFromString(dec_resp)
print_verbose(security_ctx, f'ScanResult status: 0x{str(resp.status)}')
if resp.status != 0:
raise RuntimeError
authmode_str = ['Open', 'WEP', 'WPA_PSK', 'WPA2_PSK', 'WPA_WPA2_PSK',
'WPA2_ENTERPRISE', 'WPA3_PSK', 'WPA2_WPA3_PSK']
results = []
for entry in resp.resp_scan_result.entries:
results += [{'ssid': entry.ssid.decode('latin-1').rstrip('\x00'),
'bssid': entry.bssid.hex(),
'channel': entry.channel,
'rssi': entry.rssi,
'auth': authmode_str[entry.auth]}]
print_verbose(security_ctx, f"ScanResult SSID : {str(results[-1]['ssid'])}")
print_verbose(security_ctx, f"ScanResult BSSID : {str(results[-1]['bssid'])}")
print_verbose(security_ctx, f"ScanResult Channel : {str(results[-1]['channel'])}")
print_verbose(security_ctx, f"ScanResult RSSI : {str(results[-1]['rssi'])}")
print_verbose(security_ctx, f"ScanResult AUTH : {str(results[-1]['auth'])}")
return results
| {
"content_hash": "815d4419881ec600ffeb3190a238afa4",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 105,
"avg_line_length": 46.01162790697674,
"alnum_prop": 0.6805660854182461,
"repo_name": "espressif/esp-idf",
"id": "8d48cc23d318b4f0fa12ccf5998aa59b9381465b",
"size": "4143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/esp_prov/prov/wifi_scan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "388440"
},
{
"name": "Batchfile",
"bytes": "5451"
},
{
"name": "C",
"bytes": "69102322"
},
{
"name": "C++",
"bytes": "992772"
},
{
"name": "CMake",
"bytes": "539972"
},
{
"name": "Dockerfile",
"bytes": "3290"
},
{
"name": "Makefile",
"bytes": "23747"
},
{
"name": "Nim",
"bytes": "1005"
},
{
"name": "PowerShell",
"bytes": "4537"
},
{
"name": "Python",
"bytes": "2158180"
},
{
"name": "Roff",
"bytes": "101"
},
{
"name": "Shell",
"bytes": "126143"
}
],
"symlink_target": ""
} |
import re
import sys
from Bio.Seq import Seq
import pandas as pd
import pybedtools as pbt
import pysam
from general import parse_region
def get_region_nt_counts(region, bam, stranded=False):
"""
Get counts of each nucleotide from a bam file for a given region. If R1 and
R2 reads both overlap a position, only one count will be added. If the R1
and R2 reads disagree at a position they both overlap, that read pair is not
used for that position. Can optionally output strand-specific counts.
Parameters
----------
region : str or list
Region of type chrom:start-end, chrom:start-end:strand, or [chrom,
start, end]. The strand is ignored for chrom:start-end:strand. For
chrom:start-end, the coordinates are one-based inclusive. For example,
the query chr1:10-11 will give you the counts for the 10th and 11th
bases of chr1. For [chrom, start, end], the coordinates are zero-based
and end exclusive (like a bed file). The query [chr1, 9, 11] will give
you the coverage of the 10th and 11th bases of chr1. The region value is
passed directly to pysam's pileup function.
bam : pysam.calignmentfile.AlignmentFile or str
Bam file opened with pysam or path to bam file (must be sorted and
indexed).
stranded : boolean
Boolean indicating whether read data is stranded and stranded nucleotide
counts should be returned. Assumes R1 read on reverse strand implies +
strand coverage etc.
Returns
-------
counts : pandas.DataFrame
Data frame with the counts for each base in the region. The index of
this data frame is one-based for compatibility with VCF files.
"""
# TODO: I should figure out what the different possible values are that
# pysam could give me back (so far I only have ATCGN). Can I get deletions
# and insertions?
# TODO: This could probably be parallelized.
if type(bam) == str:
bam = pysam.AlignmentFile(bam, 'rb')
if type(region) is str:
r = parse_region(region)
if len(r) == 3:
chrom, start, end = r
elif len(r) == 4:
chrom, start, end, strand = r
start = int(start)
end = int(end)
ind = ['{}:{}'.format(chrom, x) for
x in range(start, end + 1)]
pp = bam.pileup(region=region, truncate=True)
elif type(region) is (list or tuple):
chrom, start, end = region
ind = ['{}:{}'.format(chrom, x) for
x in range(int(start) + 1, int(end) + 1)]
pp = bam.pileup(chrom, start, end, truncate=True)
cols = ['A', 'T', 'C', 'G', 'N']
if stranded:
cols = ['{}+'.format(x) for x in cols] + ['{}-'.format(x) for x in cols]
counts = pd.DataFrame(0, index=ind, columns=cols)
for pc in pp:
# Most of this code deals with R1 and R2 reads that overlap so that we
# don't get two counts from one fragment.
pos = pc.reference_pos + 1
r1_qnames = []
r1_nts = []
r2_qnames = []
r2_nts = []
for pr in pc.pileups:
qnames = [r1_qnames, r2_qnames][pr.alignment.is_read2]
nts = [r1_nts, r2_nts][pr.alignment.is_read2]
nt = _pos_nt(pr, pc.reference_pos, stranded)
if nt:
qnames.append(pr.alignment.qname)
nts.append(nt)
r1 = pd.Series(r1_nts, index=r1_qnames)
r2 = pd.Series(r2_nts, index=r2_qnames)
df = pd.DataFrame([r1, r2], index=['R1', 'R2']).T
singles = df[df.isnull().sum(axis=1) == 1]
doubles = df.dropna()
vcs = []
vcs.append(singles['R1'].value_counts())
vcs.append(singles['R2'].value_counts())
doubles = doubles[doubles.R1 == doubles.R2]
vcs.append(doubles.R1.value_counts())
for vc in vcs:
counts.ix['{}:{}'.format(chrom, pos), vc.index] += vc
return counts
def _pos_nt(pr, pos, stranded=False):
"""
Given a pileup read and a position, return the base that is covered by the
read at the given position if the position is covered.
Parameters
----------
pr : pysam.calignmentfile.PileupRead
Region of type chrom:start-end, chrom:start-end:strand, or [chrom,
pos : int
Zero-based position of the nucleotide of interest in genomic
coordinates.
stranded : boolean
Boolean indicating whether data is stranded and stranded nucleotide
should be returned. Assumes R1 read on reverse strand implies + strand
coverage etc.
Returns
-------
nt : str or None
If None, then the read did not cover the position. If not None, returns
the nucleotide at that position (with + or - appended to indicate strand
if desired).
"""
nt = None
bases = dict(zip(pr.alignment.get_reference_positions(),
list(pr.alignment.seq.upper())))
if pos in bases.keys():
nt = bases[pos]
if nt and stranded:
strand = None
if pr.alignment.is_read1 and pr.alignment.is_reverse:
strand = '+'
if pr.alignment.is_read2 and not pr.alignment.is_reverse:
strand = '+'
if pr.alignment.is_read1 and not pr.alignment.is_reverse:
nt = str(Seq(nt).reverse_complement())
strand = '-'
if pr.alignment.is_read2 and pr.alignment.is_reverse:
nt = str(Seq(nt).reverse_complement())
strand = '-'
nt = '{}{}'.format(nt, strand)
return nt
def nt_counts(bam, positions, stranded=False, vcf=False, bed=False):
"""
Find the number of nucleotides covered at all positions in a bed or vcf
file.
Parameters
----------
bam : str or pysam.calignmentfile.AlignmentFile
Bam file opened with pysam or path to bam file (must
be sorted and indexed).
positions : str or pybedtools.BedTool
Path to bed or vcf file or pybedtools.BedTool object. The extension is
used to determine whether the file is a bed or vcf (.bed vs .vcf).
stranded : boolean
Boolean indicating whether read data is stranded and stranded nucleotide
counts should be returned. Assumes R1 read on reverse strand implies +
strand coverage etc.
vcf : boolean
Set to True if you are providing a vcf file that doesn't have a .vcf
suffix.
bed : boolean
Set to True if you are providing a bed file that doesn't have a .bed
suffix.
Returns
-------
counts : pandas.DataFrame
Data frame with the counts for each base in the region. The index of
this data frame is one-based for compatibility with VCF files.
"""
if not bed and not vcf:
if type(positions) == pbt.bedtool.BedTool:
df = positions.to_dataframe()
elif positions[-4:] == '.bed':
bed = True
elif positions[-4:] == '.vcf':
vcf = True
else:
sys.stderr.write('Positions must be BedTool, bed file, or vcf '
'file.\n')
if bed:
df = pbt.BedTool(positions).to_dataframe()
elif vcf:
from variants import vcf_as_df
tdf = vcf_as_df(positions)
df = pd.DataFrame(index=tdf.index)
df['chrom'] = tdf.CHROM
df['start'] = tdf.POS - 1
df['end'] = tdf.POS
res = []
for i in df.index:
region = [df.ix[i, 'chrom'], df.ix[i, 'start'], df.ix[i, 'end']]
res.append(get_region_nt_counts(region, bam, stranded))
res = pd.concat(res)
return res
| {
"content_hash": "a9db63d0cf47c11be50bdb3f9b82c03a",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 80,
"avg_line_length": 36.056338028169016,
"alnum_prop": 0.5985677083333333,
"repo_name": "cdeboever3/cdpybio",
"id": "1871565e4cd8f3061c8b35a79fbd9118ba67a31e",
"size": "7680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cdpybio/pysamext.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Limbo",
"bytes": "1711"
},
{
"name": "Python",
"bytes": "205463"
}
],
"symlink_target": ""
} |
import os
import six
import unittest
import numpy
import chainer
from chainer import testing
from chainer.training import extensions
class TestVariableStatisticsPlot(unittest.TestCase):
def setUp(self):
stop_trigger = (2, 'iteration')
extension_trigger = (1, 'iteration')
self.file_name = 'variable_statistics_plot_test.png'
self.trainer = testing.get_trainer_with_mock_updater(
stop_trigger=stop_trigger)
x = numpy.random.rand(1, 2, 3)
self.extension = extensions.VariableStatisticsPlot(
chainer.variable.Variable(x), trigger=extension_trigger,
file_name=self.file_name)
self.trainer.extend(self.extension, extension_trigger)
# In the following we explicitly use plot_report._available instead of
# PlotReport.available() because in some cases `test_available()` fails
# because it sometimes does not raise UserWarning despite
# matplotlib is not installed (this is due to the difference between
# the behavior of unittest in python2 and that in python3).
@unittest.skipUnless(
extensions.variable_statistics_plot._available,
'matplotlib is not installed')
def test_run_and_save_plot(self):
import matplotlib
matplotlib.use('Agg')
try:
self.trainer.run()
finally:
os.remove(os.path.join(self.trainer.out, self.file_name))
@testing.parameterize(
{'shape': (2, 7, 3), 'n': 5, 'reservoir_size': 3}
)
class TestReservoir(unittest.TestCase):
def setUp(self):
self.xs = [
numpy.random.uniform(-1, 1, self.shape) for i in range(self.n)]
def test_reservoir_size(self):
self.reservoir = extensions.variable_statistics_plot.Reservoir(
size=self.reservoir_size, data_shape=self.shape)
for x in self.xs:
self.reservoir.add(x)
idxs, data = self.reservoir.get_data()
assert len(idxs) == self.reservoir_size
assert len(data) == self.reservoir_size
assert idxs.ndim == 1
assert data[0].shape == self.xs[0].shape
testing.assert_allclose(idxs, numpy.sort(idxs))
@testing.parameterize(
{'shape': (2, 7, 3)}
)
class TestStatistician(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.shape)
def test_statistician_percentile(self):
self.percentile_sigmas = (0., 50., 100.) # min, median, max
self.statistician = extensions.variable_statistics_plot.Statistician(
collect_mean=True, collect_std=True,
percentile_sigmas=self.percentile_sigmas)
stat = self.statistician(self.x, axis=None, dtype=self.x.dtype)
for s in six.itervalues(stat):
assert s.dtype == self.x.dtype
testing.assert_allclose(stat['mean'], numpy.mean(self.x))
testing.assert_allclose(stat['std'], numpy.std(self.x))
percentile = stat['percentile']
assert len(percentile) == 3
testing.assert_allclose(percentile[0], numpy.min(self.x))
testing.assert_allclose(percentile[1], numpy.median(self.x))
testing.assert_allclose(percentile[2], numpy.max(self.x))
testing.run_module(__name__, __file__)
| {
"content_hash": "e88edb4bf0bbb44998ecbbd1e18d0e66",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 77,
"avg_line_length": 33.49484536082474,
"alnum_prop": 0.6512773160972607,
"repo_name": "aonotas/chainer",
"id": "9ae05f23cd332ffe6a2ec4a0136ce2bbdd485dcc",
"size": "3249",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/training_tests/extensions_tests/test_variable_statistics_plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "3357320"
}
],
"symlink_target": ""
} |
import json
import os
class SiteSettings:
def __init__(self):
config_file = '%s/../ldap.json' % os.path.dirname(os.path.realpath(__file__))
with open(config_file) as data_file:
self.config = json.load(data_file)
def get_ldap_url(self):
return self.config.get('ldap_url')
def get_ldap_base(self):
return self.config.get('ldap_base')
def get_bind_ip(self):
return self.config.get('bind_ip', 'localhost')
def get_bind_port(self):
return self.config.get('bind_port', 5050)
| {
"content_hash": "f59d66a985e5b6c6b338afee217ba5fc",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 85,
"avg_line_length": 25.272727272727273,
"alnum_prop": 0.6061151079136691,
"repo_name": "ishgroup/lightbook",
"id": "4e31c1f2cfa4ff0be3adbe8c1c675daeb6333398",
"size": "556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ldap_api/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10496"
},
{
"name": "HTML",
"bytes": "718"
},
{
"name": "JavaScript",
"bytes": "70228"
},
{
"name": "Python",
"bytes": "51767"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/dantooine/shared_dant_large_communal_dest.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "dee356c5158ea775e71f3e534b469daa",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 90,
"avg_line_length": 24.53846153846154,
"alnum_prop": 0.7021943573667712,
"repo_name": "obi-two/Rebelion",
"id": "602117513e9d0695c8047b4dcc009c9494256406",
"size": "464",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/static/structure/dantooine/shared_dant_large_communal_dest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name='dallasdata_crime',
version='0.1',
author='Peter Griess',
author_email='pg@std.in',
package_dir={'': 'src'},
packages=['dallasdata.crime'],
install_requires=[
'postgres',
'pyproj',
],
)
| {
"content_hash": "6408e65eadd19afa751fc8eb2e77ac8e",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 43,
"avg_line_length": 20.785714285714285,
"alnum_prop": 0.584192439862543,
"repo_name": "dallasdata/crime",
"id": "5036704bdc090bfa2285d59f2fb2e7864a556432",
"size": "1407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21758"
},
{
"name": "Shell",
"bytes": "1464"
}
],
"symlink_target": ""
} |
import csv as Csv
import math as python_lib_Math
import math as Math
import functools as python_lib_Functools
import inspect as python_lib_Inspect
from io import StringIO as python_lib_io_StringIO
class _hx_AnonObject:
def __init__(self, fields):
self.__dict__ = fields
class Enum:
_hx_class_name = "Enum"
_hx_fields = ["tag", "index", "params"]
_hx_methods = ["__str__"]
def __init__(self,tag,index,params):
self.tag = None
self.index = None
self.params = None
self.tag = tag
self.index = index
self.params = params
def __str__(self):
if (self.params is None):
return self.tag
else:
return (((HxOverrides.stringOrNull(self.tag) + "(") + HxOverrides.stringOrNull(",".join([python_Boot.toString1(x1,'') for x1 in self.params]))) + ")")
class EnumValue:
_hx_class_name = "EnumValue"
class Lambda:
_hx_class_name = "Lambda"
_hx_statics = ["has"]
@staticmethod
def has(it,elt):
_hx_local_0 = HxOverrides.iterator(it)
while _hx_local_0.hasNext():
x = _hx_local_0.next()
if (x == elt):
return True
return False
class Script:
_hx_class_name = "Script"
_hx_statics = ["main"]
@staticmethod
def main():
f = open("nfl.csv","r")
csvreader = Csv.reader(f)
nfl = list(csvreader)
print(str(nfl[0:10]))
class Std:
_hx_class_name = "Std"
_hx_statics = ["string"]
@staticmethod
def string(s):
return python_Boot.toString1(s,"")
class format_csv_Reader:
_hx_class_name = "format.csv.Reader"
_hx_fields = ["sep", "esc", "eol", "inp", "eolsize", "buffer", "pos", "bufferOffset", "cachedToken", "cachedPos"]
_hx_methods = ["substring", "stringLength", "fetchBytes", "get", "peekToken", "nextToken", "readSafeChar", "readEscapedChar", "readEscapedString", "readString", "readField", "readRecord", "open", "reset", "readAll", "hasNext", "next", "iterator"]
_hx_statics = ["FETCH_SIZE", "readCsv", "parseCsv", "read"]
def __init__(self,separator = None,escape = None,endOfLine = None):
self.sep = None
self.esc = None
self.eol = None
self.inp = None
self.eolsize = None
self.buffer = None
self.pos = None
self.bufferOffset = None
self.cachedToken = None
self.cachedPos = None
_g = self
if (separator is not None):
self.sep = separator
else:
self.sep = ","
if (self.stringLength(self.sep) != 1):
raise _HxException((("Separator string \"" + HxOverrides.stringOrNull(self.sep)) + "\" not allowed, only single char"))
if (escape is not None):
self.esc = escape
else:
self.esc = "\""
if (self.stringLength(self.esc) != 1):
raise _HxException((("Escape string \"" + HxOverrides.stringOrNull(self.esc)) + "\" not allowed, only single char"))
if (endOfLine is not None):
self.eol = endOfLine
else:
self.eol = ["\r\n", "\n"]
if (Lambda.has(self.eol,None) or Lambda.has(self.eol,"")):
raise _HxException("EOL sequences can't be empty")
def _hx_local_0(a,b):
return (_g.stringLength(b) - _g.stringLength(a))
self.eol.sort(key= python_lib_Functools.cmp_to_key(_hx_local_0))
self.eolsize = list(map(self.stringLength,self.eol))
self.open(None,None)
def substring(self,_hx_str,pos,length = None):
return HxString.substr(_hx_str,pos,length)
def stringLength(self,_hx_str):
return len(_hx_str)
def fetchBytes(self,n):
if (self.inp is None):
return None
try:
_hx_bytes = haxe_io_Bytes.alloc(n)
got = self.inp.readBytes(_hx_bytes,0,n)
return _hx_bytes.getString(0,got)
except Exception as _hx_e:
_hx_e1 = _hx_e.val if isinstance(_hx_e, _HxException) else _hx_e
if isinstance(_hx_e1, haxe_io_Eof):
e = _hx_e1
return None
else:
raise _hx_e
def get(self,p,_hx_len):
bpos = (p - self.bufferOffset)
if ((bpos + _hx_len) > self.stringLength(self.buffer)):
more = self.fetchBytes(4096)
if (more is not None):
self.buffer = (HxOverrides.stringOrNull(self.substring(self.buffer,(self.pos - self.bufferOffset))) + ("null" if more is None else more))
self.bufferOffset = self.pos
bpos = (p - self.bufferOffset)
ret = self.substring(self.buffer,bpos,_hx_len)
if (ret != ""):
return ret
else:
return None
def peekToken(self,skip = 0):
if (skip is None):
skip = 0
token = self.cachedToken
p = self.pos
if (token is not None):
p = self.cachedPos
skip = (skip - 1)
def _hx_local_2():
nonlocal skip
_hx_local_1 = skip
skip = (skip - 1)
return _hx_local_1
while (_hx_local_2() >= 0):
token = self.get(p,1)
if (token is None):
break
_g1 = 0
_g = len(self.eol)
while (_g1 < _g):
i = _g1
_g1 = (_g1 + 1)
t = self.get(p,(self.eolsize[i] if i >= 0 and i < len(self.eolsize) else None))
if (t == (self.eol[i] if i >= 0 and i < len(self.eol) else None)):
token = t
break
p = (p + self.stringLength(token))
if (self.cachedToken is None):
self.cachedToken = token
self.cachedPos = p
return token
def nextToken(self):
ret = self.peekToken()
if (ret is None):
return None
self.pos = self.cachedPos
self.cachedToken = None
return ret
def readSafeChar(self):
cur = self.peekToken()
if (((cur == self.sep) or ((cur == self.esc))) or Lambda.has(self.eol,cur)):
return None
return self.nextToken()
def readEscapedChar(self):
cur = self.peekToken()
if (cur == self.esc):
if (self.peekToken(1) != self.esc):
return None
self.nextToken()
return self.nextToken()
def readEscapedString(self):
buf_b = python_lib_io_StringIO()
x = self.readEscapedChar()
while (x is not None):
buf_b.write(Std.string(x))
x = self.readEscapedChar()
return buf_b.getvalue()
def readString(self):
buf_b = python_lib_io_StringIO()
x = self.readSafeChar()
while (x is not None):
buf_b.write(Std.string(x))
x = self.readSafeChar()
return buf_b.getvalue()
def readField(self):
cur = self.peekToken()
if (cur == self.esc):
self.nextToken()
s = self.readEscapedString()
fi = self.nextToken()
if (fi != self.esc):
raise _HxException(((("Missing " + HxOverrides.stringOrNull(self.esc)) + " at the end of escaped field ") + HxOverrides.stringOrNull((((HxOverrides.stringOrNull(HxString.substr(s,0,10)) + "[...]") if ((len(s) > 15)) else s)))))
return s
else:
return self.readString()
def readRecord(self):
r = []
x = self.readField()
r.append(x)
while (self.peekToken() == self.sep):
self.nextToken()
x1 = self.readField()
r.append(x1)
return r
def open(self,string = None,stream = None):
if (string is not None):
self.buffer = string
else:
self.buffer = ""
self.inp = stream
self.pos = 0
self.bufferOffset = 0
self.cachedToken = None
self.cachedPos = 0
return self
def reset(self,string = None,stream = None):
return self.open(string,stream)
def readAll(self):
r = []
nl = None
while (self.peekToken() is not None):
x = self.readRecord()
r.append(x)
nl = self.nextToken()
if ((nl is not None) and (not Lambda.has(self.eol,nl))):
raise _HxException((("Unexpected \"" + ("null" if nl is None else nl)) + "\" after record"))
return r
def hasNext(self):
return (self.peekToken() is not None)
def next(self):
r = self.readRecord()
nl = self.nextToken()
if ((nl is not None) and (not Lambda.has(self.eol,nl))):
raise _HxException((("Unexpected \"" + ("null" if nl is None else nl)) + "\" after record"))
return r
def iterator(self):
return self
@staticmethod
def readCsv(stream,separator = None,escape = None,endOfLine = None):
p = format_csv_Reader(separator, escape, endOfLine)
p.inp = stream
return p
@staticmethod
def parseCsv(text,separator = None,escape = None,endOfLine = None):
p = format_csv_Reader(separator, escape, endOfLine)
p.buffer = text
return p.readAll()
@staticmethod
def read(text,separator = None,escape = None,endOfLine = None):
return format_csv_Reader.parseCsv(text,separator,escape,endOfLine)
class haxe_io_Bytes:
_hx_class_name = "haxe.io.Bytes"
_hx_fields = ["length", "b"]
_hx_methods = ["getString"]
_hx_statics = ["alloc"]
def __init__(self,length,b):
self.length = None
self.b = None
self.length = length
self.b = b
def getString(self,pos,_hx_len):
if (((pos < 0) or ((_hx_len < 0))) or (((pos + _hx_len) > self.length))):
raise _HxException(haxe_io_Error.OutsideBounds)
return self.b[pos:pos+_hx_len].decode('UTF-8','replace')
@staticmethod
def alloc(length):
return haxe_io_Bytes(length, bytearray(length))
class haxe_io_Input:
_hx_class_name = "haxe.io.Input"
_hx_methods = ["readByte", "readBytes"]
def readByte(self):
raise _HxException("Not implemented")
def readBytes(self,s,pos,_hx_len):
k = _hx_len
b = s.b
if (((pos < 0) or ((_hx_len < 0))) or (((pos + _hx_len) > s.length))):
raise _HxException(haxe_io_Error.OutsideBounds)
while (k > 0):
b[pos] = self.readByte()
pos = (pos + 1)
k = (k - 1)
return _hx_len
class haxe_io_Eof:
_hx_class_name = "haxe.io.Eof"
_hx_methods = ["toString"]
def toString(self):
return "Eof"
class haxe_io_Error(Enum):
_hx_class_name = "haxe.io.Error"
@staticmethod
def Custom(e):
return haxe_io_Error("Custom", 3, [e])
haxe_io_Error.Blocked = haxe_io_Error("Blocked", 0, list())
haxe_io_Error.Overflow = haxe_io_Error("Overflow", 1, list())
haxe_io_Error.OutsideBounds = haxe_io_Error("OutsideBounds", 2, list())
class python_Boot:
_hx_class_name = "python.Boot"
_hx_statics = ["keywords", "toString1", "fields", "simpleField", "getInstanceFields", "getSuperClass", "getClassFields", "prefixLength", "unhandleKeywords"]
@staticmethod
def toString1(o,s):
if (o is None):
return "null"
if isinstance(o,str):
return o
if (s is None):
s = ""
if (len(s) >= 5):
return "<...>"
if isinstance(o,bool):
if o:
return "true"
else:
return "false"
if isinstance(o,int):
return str(o)
if isinstance(o,float):
try:
if (o == int(o)):
def _hx_local_1():
def _hx_local_0():
v = o
return Math.floor((v + 0.5))
return str(_hx_local_0())
return _hx_local_1()
else:
return str(o)
except Exception as _hx_e:
_hx_e1 = _hx_e.val if isinstance(_hx_e, _HxException) else _hx_e
e = _hx_e1
return str(o)
if isinstance(o,list):
o1 = o
l = len(o1)
st = "["
s = (("null" if s is None else s) + "\t")
_g = 0
while (_g < l):
i = _g
_g = (_g + 1)
prefix = ""
if (i > 0):
prefix = ","
st = (("null" if st is None else st) + HxOverrides.stringOrNull(((("null" if prefix is None else prefix) + HxOverrides.stringOrNull(python_Boot.toString1((o1[i] if i >= 0 and i < len(o1) else None),s))))))
st = (("null" if st is None else st) + "]")
return st
try:
if hasattr(o,"toString"):
return o.toString()
except Exception as _hx_e:
_hx_e1 = _hx_e.val if isinstance(_hx_e, _HxException) else _hx_e
pass
if (python_lib_Inspect.isfunction(o) or python_lib_Inspect.ismethod(o)):
return "<function>"
if hasattr(o,"__class__"):
if isinstance(o,_hx_AnonObject):
toStr = None
try:
fields = python_Boot.fields(o)
fieldsStr = None
_g1 = []
_g11 = 0
while (_g11 < len(fields)):
f = (fields[_g11] if _g11 >= 0 and _g11 < len(fields) else None)
_g11 = (_g11 + 1)
x = ((("" + ("null" if f is None else f)) + " : ") + HxOverrides.stringOrNull(python_Boot.toString1(python_Boot.simpleField(o,f),(("null" if s is None else s) + "\t"))))
_g1.append(x)
fieldsStr = _g1
toStr = (("{ " + HxOverrides.stringOrNull(", ".join([x1 for x1 in fieldsStr]))) + " }")
except Exception as _hx_e:
_hx_e1 = _hx_e.val if isinstance(_hx_e, _HxException) else _hx_e
e2 = _hx_e1
return "{ ... }"
if (toStr is None):
return "{ ... }"
else:
return toStr
if isinstance(o,Enum):
o2 = o
l1 = len(o2.params)
hasParams = (l1 > 0)
if hasParams:
paramsStr = ""
_g2 = 0
while (_g2 < l1):
i1 = _g2
_g2 = (_g2 + 1)
prefix1 = ""
if (i1 > 0):
prefix1 = ","
paramsStr = (("null" if paramsStr is None else paramsStr) + HxOverrides.stringOrNull(((("null" if prefix1 is None else prefix1) + HxOverrides.stringOrNull(python_Boot.toString1((o2.params[i1] if i1 >= 0 and i1 < len(o2.params) else None),s))))))
return (((HxOverrides.stringOrNull(o2.tag) + "(") + ("null" if paramsStr is None else paramsStr)) + ")")
else:
return o2.tag
if hasattr(o,"_hx_class_name"):
if (o.__class__.__name__ != "type"):
fields1 = python_Boot.getInstanceFields(o)
fieldsStr1 = None
_g3 = []
_g12 = 0
while (_g12 < len(fields1)):
f1 = (fields1[_g12] if _g12 >= 0 and _g12 < len(fields1) else None)
_g12 = (_g12 + 1)
x1 = ((("" + ("null" if f1 is None else f1)) + " : ") + HxOverrides.stringOrNull(python_Boot.toString1(python_Boot.simpleField(o,f1),(("null" if s is None else s) + "\t"))))
_g3.append(x1)
fieldsStr1 = _g3
toStr1 = (((HxOverrides.stringOrNull(o._hx_class_name) + "( ") + HxOverrides.stringOrNull(", ".join([x1 for x1 in fieldsStr1]))) + " )")
return toStr1
else:
fields2 = python_Boot.getClassFields(o)
fieldsStr2 = None
_g4 = []
_g13 = 0
while (_g13 < len(fields2)):
f2 = (fields2[_g13] if _g13 >= 0 and _g13 < len(fields2) else None)
_g13 = (_g13 + 1)
x2 = ((("" + ("null" if f2 is None else f2)) + " : ") + HxOverrides.stringOrNull(python_Boot.toString1(python_Boot.simpleField(o,f2),(("null" if s is None else s) + "\t"))))
_g4.append(x2)
fieldsStr2 = _g4
toStr2 = (((("#" + HxOverrides.stringOrNull(o._hx_class_name)) + "( ") + HxOverrides.stringOrNull(", ".join([x1 for x1 in fieldsStr2]))) + " )")
return toStr2
if (o == str):
return "#String"
if (o == list):
return "#Array"
if callable(o):
return "function"
try:
if hasattr(o,"__repr__"):
return o.__repr__()
except Exception as _hx_e:
_hx_e1 = _hx_e.val if isinstance(_hx_e, _HxException) else _hx_e
pass
if hasattr(o,"__str__"):
return o.__str__([])
if hasattr(o,"__name__"):
return o.__name__
return "???"
else:
return str(o)
@staticmethod
def fields(o):
a = []
if (o is not None):
if hasattr(o,"_hx_fields"):
fields = o._hx_fields
return list(fields)
if isinstance(o,_hx_AnonObject):
d = o.__dict__
keys = d.keys()
handler = python_Boot.unhandleKeywords
for k in keys:
a.append(handler(k))
elif hasattr(o,"__dict__"):
a1 = []
d1 = o.__dict__
keys1 = d1.keys()
for k in keys1:
a.append(k)
return a
@staticmethod
def simpleField(o,field):
if (field is None):
return None
field1 = None
if field in python_Boot.keywords:
field1 = ("_hx_" + field)
elif ((((len(field) > 2) and ((ord(field[0]) == 95))) and ((ord(field[1]) == 95))) and ((ord(field[(len(field) - 1)]) != 95))):
field1 = ("_hx_" + field)
else:
field1 = field
if hasattr(o,field1):
return getattr(o,field1)
else:
return None
@staticmethod
def getInstanceFields(c):
f = None
if hasattr(c,"_hx_fields"):
f = c._hx_fields
else:
f = []
if hasattr(c,"_hx_methods"):
a = c._hx_methods
f = (f + a)
sc = python_Boot.getSuperClass(c)
if (sc is None):
return f
else:
scArr = python_Boot.getInstanceFields(sc)
scMap = set(scArr)
res = []
_g = 0
while (_g < len(f)):
f1 = (f[_g] if _g >= 0 and _g < len(f) else None)
_g = (_g + 1)
if (not f1 in scMap):
scArr.append(f1)
return scArr
@staticmethod
def getSuperClass(c):
if (c is None):
return None
try:
if hasattr(c,"_hx_super"):
return c._hx_super
return None
except Exception as _hx_e:
_hx_e1 = _hx_e.val if isinstance(_hx_e, _HxException) else _hx_e
pass
return None
@staticmethod
def getClassFields(c):
if hasattr(c,"_hx_statics"):
x = c._hx_statics
return list(x)
else:
return []
@staticmethod
def unhandleKeywords(name):
if (HxString.substr(name,0,python_Boot.prefixLength) == "_hx_"):
real = HxString.substr(name,python_Boot.prefixLength,None)
if real in python_Boot.keywords:
return real
return name
class python_HaxeIterator:
_hx_class_name = "python.HaxeIterator"
_hx_fields = ["it", "x", "has", "checked"]
_hx_methods = ["next", "hasNext"]
def __init__(self,it):
self.it = None
self.x = None
self.has = None
self.checked = None
self.checked = False
self.has = False
self.x = None
self.it = it
def next(self):
if (not self.checked):
self.hasNext()
self.checked = False
return self.x
def hasNext(self):
if (not self.checked):
try:
self.x = self.it.__next__()
self.has = True
except Exception as _hx_e:
_hx_e1 = _hx_e.val if isinstance(_hx_e, _HxException) else _hx_e
if isinstance(_hx_e1, StopIteration):
s = _hx_e1
self.has = False
self.x = None
else:
raise _hx_e
self.checked = True
return self.has
class python_internal_ArrayImpl:
_hx_class_name = "python.internal.ArrayImpl"
_hx_statics = ["_get"]
@staticmethod
def _get(x,idx):
if ((idx > -1) and ((idx < len(x)))):
return x[idx]
else:
return None
class _HxException(Exception):
_hx_class_name = "_HxException"
_hx_fields = ["val"]
_hx_methods = []
_hx_statics = []
_hx_super = Exception
def __init__(self,val):
self.val = None
message = str(val)
super().__init__(message)
self.val = val
class HxOverrides:
_hx_class_name = "HxOverrides"
_hx_statics = ["iterator", "eq", "stringOrNull"]
@staticmethod
def iterator(x):
if isinstance(x,list):
return python_HaxeIterator(x.__iter__())
return x.iterator()
@staticmethod
def eq(a,b):
if (isinstance(a,list) or isinstance(b,list)):
return a is b
return (a == b)
@staticmethod
def stringOrNull(s):
if (s is None):
return "null"
else:
return s
class HxString:
_hx_class_name = "HxString"
_hx_statics = ["substr"]
@staticmethod
def substr(s,startIndex,_hx_len = None):
if (_hx_len is None):
return s[startIndex:]
else:
if (_hx_len == 0):
return ""
return s[startIndex:(startIndex + _hx_len)]
Math.NEGATIVE_INFINITY = float("-inf")
Math.POSITIVE_INFINITY = float("inf")
Math.NaN = float("nan")
Math.PI = python_lib_Math.pi
format_csv_Reader.FETCH_SIZE = 4096
python_Boot.keywords = set(["and", "del", "from", "not", "with", "as", "elif", "global", "or", "yield", "assert", "else", "if", "pass", "None", "break", "except", "import", "raise", "True", "class", "exec", "in", "return", "False", "continue", "finally", "is", "try", "def", "for", "lambda", "while"])
python_Boot.prefixLength = len("_hx_")
Script.main() | {
"content_hash": "2b18a9a2da88f3087bd84a4ecdec29ee",
"timestamp": "",
"source": "github",
"line_count": 713,
"max_line_length": 301,
"avg_line_length": 26.220196353436187,
"alnum_prop": 0.60994918427387,
"repo_name": "ustutz/dataquest",
"id": "f1d5598ca2656e2506db8e24eee034c3239746d5",
"size": "18695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Data_Analyst/Step_2_Intermediate_Python_and_Pandas/1_Python_Programming_Intermediate/5_The_csv_module/script.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3837"
},
{
"name": "Haxe",
"bytes": "144293"
},
{
"name": "Python",
"bytes": "754917"
}
],
"symlink_target": ""
} |
import os
import json
import datetime
import pprint
import math
API_KEY = "0f60efd9302d38f88380c6a4608bd9be"
dir_weather = "../dataset/weather"
file_business = "../dataset/yelp_academic_dataset_business.json"
file_users = "../dataset/yelp_academic_dataset_user.json"
file_reviews = "../dataset/yelp_academic_dataset_review.json"
file_users_centroid = "../dataset/user-centroid.json"
file_users_reviews = "../dataset/user-location.json"
file_feature_name = "feature-climate.data"
_business = {}
_userLocations = {}
_userReviews = {}
_weather = {}
def loadWeather():
f = os.listdir(dir_weather)
c = 0
for l in f:
if (l == '.DS_Store'):
continue
folder = dir_weather + '/' + l
files = os.listdir(folder)
for f in files:
if (f == '.DS_Store'):
continue
fo = open(folder + '/' + f)
for i in fo:
try:
j = json.loads(i)
except Exception, e:
print f
raise
data = j['daily']['data'][0]
d2 = datetime.datetime.utcfromtimestamp(int(data['time']))
w = l + '-' + d2.strftime('%Y-%m-%d')
try:
_weather[w.lower()] = '%s temperatureMin:%f temperatureMax:%f' % (data['icon'], data['temperatureMin'], data['temperatureMax'])
except Exception, e:
print 'Local: %s - Data: %s' % (l, d2.strftime('%d/%m/%Y %H:%M:%S %Z'))
# if (datetime.datetime.now() >= d2):
# print json.dumps(j)
# raise
def loadUserReviews():
global _userReviews
_userReviews = {}
f = open(file_users_reviews)
_userReviews = json.load(f)
xr = {}
c = 0
for k in _userReviews.keys():
c += 1
u = _userReviews[k]
b = []
for r in u['reviews']:
bid = r['business_id']
b.append(bid)
qtd = len(list(set(b)))
q = 0
if (qtd in xr):
q = xr[qtd] + 1
else:
q = 1
xr[qtd] = q
pprint.pprint(xr)
print 'total: %d' % c
def generateVW():
#Reading user reviews
# print 'loading user reviews locations'
# loadUserReviews()
#Generate users attributes
print 'reading user attributes'
users = getUserAttributes()
#Get users locations
print 'reading user locations'
getUserLocations()
#Generate venues attributes
print 'reading business attributes'
items = getBusinessAttributes()
#Loading weather
print 'loading weather'
loadWeather()
#getting relevant climate feature
#fw_feature = open('output/analysis/' + file_feature_name, 'w')
f = open(file_reviews)
fw = open('output/20053-20131.vw', 'w')
for line in f:
item = json.loads(line)
date = datetime.datetime.strptime(item['date'], '%Y-%m-%d')
if (date.year == 2005 and date.month >= 03) or (date.year > 2005 and date.year < 2013) or (date.year == 2013 and date.month < 02):
user_id = item['user_id']
item_id = item['business_id']
review_id = item['review_id']
rating = item['stars']#int(item['stars'] >= 4)
#review namespace
nReview = 'uid_%s_bid_%s_rid_%s' % (user_id, item_id, review_id)
#user namespace
nUser = users[user_id]
#item attributes namespace
nItemAttr = items[item_id + '-attr']
#item categories namespace
nItemCat = items[item_id + '-cat']
#contextual namespace
#nContextual = getContextAttributes(item)
#temporal
temporal = getTemporalAttributes(item)
temporal = temporal.lower()
#weather
weather = getWeatherAttributes(item)
weather = weather.lower()
#distance
distance = getDistanceAttributes(item_id, user_id)
distante = distance.lower()
result = "%s '%s |user %s |item %s |category %s |weather %s |distance %s |temporal %s" % (rating, nReview, nUser, nItemAttr, nItemCat, weather, distance, temporal)
#result = '%s |review %s |user %s |item %s |type %s |context %s' % (rating, nReview, nUser, nItemAttr, nItemCat, nContextual)
fw.write(result + "\n")
#cli = weather.split(' ')
#feature = cli[0].replace('nothing', '').replace('not-found', '').strip()
# feature = temporal
# #tratamento
# feature = feature.replace('weekday:0', '').replace('weekend:0', '').replace('weekday:1', 'weekday').replace('weekend:1', 'weekend')
# #day of week
# feature = feature.replace('monday','').replace('tuesday','').replace('wednesday','').replace('thursday','').replace('friday','').replace('saturday', '').replace('sunday', '')
# #seasons
# feature = feature.replace('summer', '').replace('winter', '').replace('spring', '').replace('fall', '')
# #weekdays
# feature = feature.replace('weekday', '').replace('weekend', '')
# #months
# feature = feature.replace('september', '').replace('december', '').replace('july', '').replace('march', '').replace('august', '').replace('may', '').replace('june', '').replace('november', '').replace('february', '').replace('october', '').replace('january', '').replace('april', '')
#gravando arquivo
# if feature:
# feat = "%s|review %s |item %s |feature %s" % (rating, review_id, item_id, feature)
# fw_feature.write(feat + "\n")
f.close()
fw.close()
# fw_feature.close()
return
def generateUserItemVW():
#Reading user reviews
# print 'loading user reviews locations'
# loadUserReviews()
f = open(file_reviews)
fw = open('output/data-user.vw', 'w')
for line in f:
item = json.loads(line)
user_id = item['user_id']
item_id = item['business_id']
rating = item['stars']
result = "%s |user %s |item %s" % (rating, user_id, item_id)
#result = '%s |review %s |user %s |item %s |type %s |context %s' % (rating, nReview, nUser, nItemAttr, nItemCat, nContextual)
fw.write(result + "\n")
f.close()
fw.close()
return
def getUserLocations():
global _userLocations
_userLocations = {}
f = open(file_users_centroid)
for line in f:
j = json.loads(line)
u = {}
u['lat'] = j['lat']
u['lng'] = j['lng']
_userLocations[j['user_id']] = u
def getUserAttributes():
f = open(file_users)
xr = {}
for line in f:
j = json.loads(line)
#xr[j['user_id']] = 'user_id__%s average_stars:%s review_counts:%s' % (j['user_id'], str(j['average_stars']), str(j['review_count']))
xr[j['user_id']] = 'average_stars:%s review_counts:%s' % (str(j['average_stars']), str(j['review_count']))
f.close()
return xr
#Item attributes
def getBusinessAttributes():
global _business
_business = {}
f = open(file_business)
xr = {}
for line in f:
j = json.loads(line)
_business[j['business_id']] = j
cat = ' '.join(getCategories(j['categories']))
attr = ' '.join(getAttributes(j['attributes'], None))
#xr[j['business_id']] = 'business_id__%s open:%d stars:%d %s %s' % (j['business_id'], int(j['open']), j['stars'], cat.lower(), attr.lower())
xr[j['business_id'] + '-attr'] = 'stars:%d %s' % (j['stars'], attr.lower())
xr[j['business_id'] + '-cat'] = cat.lower()
f.close()
return xr
#Item categories
def getCategories(attr):
result = []
for k in attr:
result.append(k.replace(" ", "-"))
return result
#Item attributes
def getAttributes(attrs, attr):
result = []
for a in attrs:
key = a
value = attrs[a]
if (type(value) is dict):
result.extend(getAttributes(value, key))
else:
if (attr is not None):
key = attr + '_' + key
v = value
if (type(value) is unicode):
value = key + '__' + v
elif(type(value) is bool):
v = str(int(attrs[a]))
value = key + ':' + v
else:
value = key + ':' + str(v)
value = value.replace(" ", "-")
result.append(value)
return result
#Context attributes
def getContextAttributes(review):
#temporal
temporal = getTemporalAttributes(review)
#weather
weather = getWeatherAttributes(review)
#distance
distance = getDistanceAttributes(review['business_id'], review['user_id'])
attributes = '%s %s %s' % (temporal, distance, weather)
return attributes.lower()
def getTemporalAttributes(review):
date = datetime.datetime.strptime(review['date'], '%Y-%m-%d')
dayofweek = date.strftime('%A').lower()
weekday = int((date.isoweekday() < 6))
weekend = int((date.isoweekday() >= 6))
month = date.strftime('%B').lower()
season = getSeason(date, 'north')
context = '%s weekday:%d weekend:%d %s %s %s' % (dayofweek, weekday, weekend, month, season, review['date'])
return context
_city = []
def getWeatherAttributes(review):
b = _business[review['business_id']]
name = b['city']
name = name.strip()
weatherName = name + '-' + review['date']
weatherName = weatherName.replace('"', '').lower()
v = 'nothing'
try:
v = _weather[weatherName]
except Exception, e:
if ('las vegas' in weatherName):
weatherName = 'las vegas' + '-' + review['date']
v = _weather[weatherName]
else:
print 'error'
v = 'not-found'
_city.append(weatherName)
finally:
return v
def getSeason(date, hemisphere):
md = date.month * 100 + date.day
if ((md > 320) and (md < 621)):
s = 0 #spring
elif ((md > 620) and (md < 923)):
s = 1 #summer
elif ((md > 922) and (md < 1223)):
s = 2 #fall
else:
s = 3 #winter
if not hemisphere == 'north':
s = (s + 2) % 3
if (s == 0):
return 'spring'
elif (s == 1):
return 'summer'
elif (s == 2):
return 'fall'
else:
return 'winter'
def getDistanceAttributes(business, user):
lat1 = _userLocations[user]['lat']
lng1 = _userLocations[user]['lng']
lat2 = _business[business]['latitude']
lng2 = _business[business]['longitude']
distance = distance_on_unit_sphere(lat1, lng1, lat2, lng2)
km = distance / 1000
if (km <= 5):
return 'distance-near'
elif (km > 5 and km <= 20):
return 'distance-medium'
else:
return 'distance-far'
#print 'lat1: %f lng1: %f | lat2: %f lng2: %f == distance %f' % (lat1, lng1, lat2, lng2, distance)
def feq(a,b):
if abs(a-b)<0.00000001:
return 1
else:
return 0
def distance_on_unit_sphere(lat1, long1, lat2, long2):
try:
if (feq(lat1, lat2) and feq(long1, long2)):
return 0
# Convert latitude and longitude to
# spherical coordinates in radians.
degrees_to_radians = math.pi/180.0
# phi = 90 - latitude
phi1 = (90.0 - lat1)*degrees_to_radians
phi2 = (90.0 - lat2)*degrees_to_radians
# theta = longitude
theta1 = long1*degrees_to_radians
theta2 = long2*degrees_to_radians
# Compute spherical distance from spherical coordinates.
# For two locations in spherical coordinates
# (1, theta, phi) and (1, theta, phi)
# cosine( arc length ) =
# sin phi sin phi' cos(theta-theta') + cos phi cos phi'
# distance = rho * arc length
cos = (math.sin(phi1)*math.sin(phi2)*math.cos(theta1 - theta2) +
math.cos(phi1)*math.cos(phi2))
arc = math.acos( cos )
# Remember to multiply arc by the radius of the earth
# in your favorite set of units to get length.
distance = math.degrees(arc) # in degrees
distance = distance * 60 # 60 nautical miles / lat degree
distance = distance * 1852 # conversion to meters
distance = round(distance)
return distance
except:
print 'lat1: %f lng1: %f | lat2: %f lng2: %f' % (lat1, long1, lat2, long2)
raise
generateVW();
#errors
print _city | {
"content_hash": "a497162a785a8d64f7d2b698b8bfaf7a",
"timestamp": "",
"source": "github",
"line_count": 427,
"max_line_length": 288,
"avg_line_length": 25.875878220140514,
"alnum_prop": 0.6194225721784777,
"repo_name": "jordansilva/lorien",
"id": "2fcb4efec7cb5d8aa6461a64c01e251a1e0cc30f",
"size": "11049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vowpal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1512"
},
{
"name": "HTML",
"bytes": "2924"
},
{
"name": "JavaScript",
"bytes": "16158"
},
{
"name": "Python",
"bytes": "74776"
}
],
"symlink_target": ""
} |
from __future__ import division
import sys
import random
import pygame
import numpy as np
from StimControl.LightStim.Core import DefaultScreen
from StimControl.LightStim.LightData import dictattr
from StimControl.LightStim.SweepSeque import TimingSeque
from StimControl.LightStim.FrameControl import FrameSweep
from StimControl.LightStim.SweepController import SweepSequeStimulusController
from StimControl.LightStim.Movie import SurfaceTextureObject, TimingSetMovie
DefaultScreen(['left','right'], bgcolor=(0.0,0.0,0.0))
argv = list(sys.argv)
subject = None
if len(argv) >= 2:
subject = argv[1]
while subject is None:
sys.stdout.write('Please input lowercase initials of subject name: ')
subject = raw_input()
interval = None
if len(argv) >= 3:
interval = int(argv[2]) / 1000
while interval is None:
sys.stdout.write('Please input stimulus interval in miliseconds: ')
interval = int(raw_input()) / 1000
stim_interval = interval
pre_left = 0.0 if stim_interval > 0 else abs(stim_interval)
pre_right = 0.0 if stim_interval <= 0 else stim_interval
layout = None
if len(argv) >= 4:
layout = argv[3]
if layout not in ("LR", "TB"):
layout = "2D"
filename = argv[-1]
movie = pygame.movie.Movie(filename)
width, height = movie.get_size()
pygame_surface = pygame.surface.Surface((width,height))
movie.set_display(pygame_surface)
texture_object = SurfaceTextureObject(dimensions=2)
p_left = dictattr()
p_left.layout = layout
p_left.bgbrightness = 0.0
p_left.contrast = 1.0
p_right = dictattr()
p_right.layout = layout
p_right.bgbrightness = 0.0
p_right.contrast = 0.5
cycle_left = dictattr(duration=0.016, pre=pre_left, stimulus=0.016)
cycle_right = dictattr(duration=0.016, pre=pre_right, stimulus=0.016)
block_left = dictattr(repeat=None, cycle=cycle_left, interval=0.0)
block_right = dictattr(repeat=None, cycle=cycle_right, interval=0.0)
sequence_left = TimingSeque(repeat=1, block=block_left, shuffle=True)
sequence_right = TimingSeque(repeat=1, block=block_right, shuffle=True)
if __name__ == '__main__':
sweep = FrameSweep()
movie_left = TimingSetMovie(viewport='left',
surface=pygame_surface, texture_obj=texture_object,
params=p_left, subject=subject, sweepseq=sequence_left)
movie_right = TimingSetMovie(viewport='right',
surface=pygame_surface, texture_obj=texture_object,
params=p_right, subject=subject, sweepseq=sequence_right)
sweep.add_stimulus(movie_left)
sweep.add_stimulus(movie_right)
sweep.add_quit_callback(movie.stop)
movie.play()
sweep.go(prestim=0.5,poststim=0.5)
| {
"content_hash": "12752bfb421779980c60205cc6c49984",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 90,
"avg_line_length": 33.45679012345679,
"alnum_prop": 0.7022140221402214,
"repo_name": "chrox/RealTimeElectrophy",
"id": "32c28291f2164fb5386ce9dd59fea9964ef0e983",
"size": "2892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "StimControl/Experiments/demo/movie-timing.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "24301"
},
{
"name": "Python",
"bytes": "681188"
},
{
"name": "Shell",
"bytes": "73"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import mock
from orquesta import statuses as wf_statuses
import st2tests
# XXX: actionsensor import depends on config being setup.
import st2tests.config as tests_config
tests_config.parse_args()
from tests.unit import base
from st2common.bootstrap import actionsregistrar
from st2common.bootstrap import runnersregistrar
from st2common.constants import action as ac_const
from st2common.models.db import liveaction as lv_db_models
from st2common.persistence import execution as ex_db_access
from st2common.persistence import liveaction as lv_db_access
from st2common.persistence import workflow as wf_db_access
from st2common.services import action as ac_svc
from st2common.services import workflows as wf_svc
from st2common.transport import liveaction as lv_ac_xport
from st2common.transport import workflow as wf_ex_xport
from st2common.transport import publishers
from st2tests.fixtures.packs.core.fixture import PACK_PATH as CORE_PACK_PATH
from st2tests.fixtures.packs.orquesta_tests.fixture import PACK_PATH as TEST_PACK_PATH
from st2tests.mocks import liveaction as mock_lv_ac_xport
from st2tests.mocks import workflow as mock_wf_ex_xport
PACKS = [TEST_PACK_PATH, CORE_PACK_PATH]
@mock.patch.object(
publishers.CUDPublisher, "publish_update", mock.MagicMock(return_value=None)
)
@mock.patch.object(
lv_ac_xport.LiveActionPublisher,
"publish_create",
mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_create),
)
@mock.patch.object(
lv_ac_xport.LiveActionPublisher,
"publish_state",
mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_state),
)
@mock.patch.object(
wf_ex_xport.WorkflowExecutionPublisher,
"publish_create",
mock.MagicMock(
side_effect=mock_wf_ex_xport.MockWorkflowExecutionPublisher.publish_create
),
)
@mock.patch.object(
wf_ex_xport.WorkflowExecutionPublisher,
"publish_state",
mock.MagicMock(
side_effect=mock_wf_ex_xport.MockWorkflowExecutionPublisher.publish_state
),
)
class OrquestaFunctionTest(st2tests.ExecutionDbTestCase):
@classmethod
def setUpClass(cls):
super(OrquestaFunctionTest, cls).setUpClass()
# Register runners.
runnersregistrar.register_runners()
# Register test pack(s).
actions_registrar = actionsregistrar.ActionsRegistrar(
use_pack_cache=False, fail_on_failure=True
)
for pack in PACKS:
actions_registrar.register_from_pack(pack)
def _execute_workflow(
self,
wf_name,
expected_task_sequence,
expected_output,
expected_status=wf_statuses.SUCCEEDED,
expected_errors=None,
):
wf_file = wf_name + ".yaml"
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, wf_file)
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
# Assert action execution is running.
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(
lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result
)
wf_ex_db = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)[0]
self.assertEqual(wf_ex_db.status, ac_const.LIVEACTION_STATUS_RUNNING)
for task_id, route in expected_task_sequence:
tk_ex_dbs = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id), task_id=task_id, task_route=route
)
if len(tk_ex_dbs) <= 0:
break
tk_ex_db = sorted(tk_ex_dbs, key=lambda x: x.start_timestamp)[
len(tk_ex_dbs) - 1
]
tk_ac_ex_db = ex_db_access.ActionExecution.query(
task_execution=str(tk_ex_db.id)
)[0]
tk_lv_ac_db = lv_db_access.LiveAction.get_by_id(
tk_ac_ex_db.liveaction["id"]
)
self.assertEqual(tk_lv_ac_db.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
self.assertTrue(
wf_svc.is_action_execution_under_workflow_context(tk_ac_ex_db)
)
wf_svc.handle_action_execution_completion(tk_ac_ex_db)
# Assert workflow is completed.
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, expected_status)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, expected_status)
ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(ac_ex_db.id))
self.assertEqual(ac_ex_db.status, expected_status)
# Check workflow output, liveaction result, and action execution result.
expected_result = {"output": expected_output}
if expected_errors is not None:
expected_result["errors"] = expected_errors
if expected_output is not None:
self.assertDictEqual(wf_ex_db.output, expected_output)
self.assertDictEqual(lv_ac_db.result, expected_result)
self.assertDictEqual(ac_ex_db.result, expected_result)
def test_task_functions_in_yaql(self):
wf_name = "yaql-task-functions"
expected_task_sequence = [
("task1", 0),
("task3", 0),
("task6", 0),
("task7", 0),
("task2", 0),
("task4", 0),
("task8", 1),
("task8", 2),
("task4", 0),
("task9", 1),
("task9", 2),
("task5", 0),
]
expected_output = {
"last_task4_result": "False",
"task9__1__parent": "task8__1",
"task9__2__parent": "task8__2",
"that_task_by_name": "task1",
"this_task_by_name": "task1",
"this_task_no_arg": "task1",
}
self._execute_workflow(wf_name, expected_task_sequence, expected_output)
def test_task_functions_in_jinja(self):
wf_name = "jinja-task-functions"
expected_task_sequence = [
("task1", 0),
("task3", 0),
("task6", 0),
("task7", 0),
("task2", 0),
("task4", 0),
("task8", 1),
("task8", 2),
("task4", 0),
("task9", 1),
("task9", 2),
("task5", 0),
]
expected_output = {
"last_task4_result": "False",
"task9__1__parent": "task8__1",
"task9__2__parent": "task8__2",
"that_task_by_name": "task1",
"this_task_by_name": "task1",
"this_task_no_arg": "task1",
}
self._execute_workflow(wf_name, expected_task_sequence, expected_output)
def test_task_nonexistent_in_yaql(self):
wf_name = "yaql-task-nonexistent"
expected_task_sequence = [("task1", 0)]
expected_output = None
expected_errors = [
{
"type": "error",
"message": (
"YaqlEvaluationException: Unable to evaluate expression "
"'<% task(\"task0\") %>'. ExpressionEvaluationException: "
'Unable to find task execution for "task0".'
),
"task_transition_id": "continue__t0",
"task_id": "task1",
"route": 0,
}
]
self._execute_workflow(
wf_name,
expected_task_sequence,
expected_output,
expected_status=ac_const.LIVEACTION_STATUS_FAILED,
expected_errors=expected_errors,
)
def test_task_nonexistent_in_jinja(self):
wf_name = "jinja-task-nonexistent"
expected_task_sequence = [("task1", 0)]
expected_output = None
expected_errors = [
{
"type": "error",
"message": (
"JinjaEvaluationException: Unable to evaluate expression "
"'{{ task(\"task0\") }}'. ExpressionEvaluationException: "
'Unable to find task execution for "task0".'
),
"task_transition_id": "continue__t0",
"task_id": "task1",
"route": 0,
}
]
self._execute_workflow(
wf_name,
expected_task_sequence,
expected_output,
expected_status=ac_const.LIVEACTION_STATUS_FAILED,
expected_errors=expected_errors,
)
| {
"content_hash": "43beca8a982f43cad64048efa43bfec0",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 88,
"avg_line_length": 33.06488549618321,
"alnum_prop": 0.5846704374927854,
"repo_name": "nzlosh/st2",
"id": "b325839c9d8440fc4d2392a7b72951dcdfb20cb9",
"size": "9291",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "contrib/runners/orquesta_runner/tests/unit/test_functions_task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jinja",
"bytes": "174532"
},
{
"name": "Makefile",
"bytes": "75242"
},
{
"name": "PowerShell",
"bytes": "856"
},
{
"name": "Python",
"bytes": "6453910"
},
{
"name": "Shell",
"bytes": "93607"
},
{
"name": "Starlark",
"bytes": "7236"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns # noqa
from django.conf.urls import url # noqa
from dlux.dashboards.network.layer3.staticroutes import views
urlpatterns = patterns(
'',
url(r'^create$', views.CreateStaticRouteView.as_view(), name='create'),
)
| {
"content_hash": "d70ec44090eeac73f3d8dda41639174d",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 75,
"avg_line_length": 26.2,
"alnum_prop": 0.732824427480916,
"repo_name": "ekarlso/dlux-horizon",
"id": "9e4f5e9029a77ba80ea6aa2f305a443e63f719ff",
"size": "917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dlux/dashboards/network/layer3/staticroutes/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "163296"
},
{
"name": "JavaScript",
"bytes": "10251"
},
{
"name": "Python",
"bytes": "120062"
},
{
"name": "Shell",
"bytes": "12382"
}
],
"symlink_target": ""
} |
"""
acos-submit is a custom directive that behaves almost identically to the normal
submit directive. It is intended for exercises that are hosted outside the MOOC grader,
such as the ACOS server. The directive option url should define the URL path of
the exercise in the ACOS server. The URL domain is added automatically based on
the configuration value "acos_submit_base_url" (in conf.py). The acos-submit
directive also automatically uses the "ajax" flag of the submit directive.
"""
from sphinx.errors import SphinxError
from .submit import SubmitForm
class ACOSSubmitDirective(SubmitForm):
def run(self):
if 'config' in self.options:
raise SphinxError('Do not use the "config" option with ACOS exercises.')
if 'url' not in self.options:
raise SphinxError('The "url" option is mandatory. ' \
'It should only contain the URL path, not the domain, ' \
'since the domain is read from the configuration value "acos_submit_base_url".')
env = self.state.document.settings.env
# modify some options before calling this method in the super class
# ensure that the ajax option is set
self.options['ajax'] = None # flag is active even though it is None in the docutils API
# add the domain to the URL path
self.options['url'] = env.config.acos_submit_base_url + self.options['url']
# acos exercises don't need configuring
self.options['no-configure'] = None
return super().run()
| {
"content_hash": "d9859ae9a60414467c32e07b14a5ee6d",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 96,
"avg_line_length": 46.303030303030305,
"alnum_prop": 0.693717277486911,
"repo_name": "Aalto-LeTech/a-plus-rst-tools",
"id": "4f14415446729cb735aa25b790ae0aad9a36cb76",
"size": "1552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "directives/acos_submit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21135"
},
{
"name": "HTML",
"bytes": "10587"
},
{
"name": "JavaScript",
"bytes": "23024"
},
{
"name": "Python",
"bytes": "200090"
}
],
"symlink_target": ""
} |
"""Base class for all hypervisors
The syntax for the _CHECK variables and the contents of the PARAMETERS
dict is the same, see the docstring for L{BaseHypervisor.PARAMETERS}.
@var _FILE_CHECK: stub for file checks, without the required flag
@var _DIR_CHECK: stub for directory checks, without the required flag
@var REQ_FILE_CHECK: mandatory file parameter
@var OPT_FILE_CHECK: optional file parameter
@var REQ_DIR_CHECK: mandatory directory parametr
@var OPT_DIR_CHECK: optional directory parameter
@var NO_CHECK: parameter without any checks at all
@var REQUIRED_CHECK: parameter required to exist (and non-false), but
without other checks; beware that this can't be used for boolean
parameters, where you should use NO_CHECK or a custom checker
"""
import os
import re
import logging
from ganeti import constants
from ganeti import errors
from ganeti import objects
from ganeti import utils
def _IsCpuMaskWellFormed(cpu_mask):
"""Verifies if the given single CPU mask is valid
The single CPU mask should be in the form "a,b,c,d", where each
letter is a positive number or range.
"""
try:
cpu_list = utils.ParseCpuMask(cpu_mask)
except errors.ParseError, _:
return False
return isinstance(cpu_list, list) and len(cpu_list) > 0
def _IsMultiCpuMaskWellFormed(cpu_mask):
"""Verifies if the given multiple CPU mask is valid
A valid multiple CPU mask is in the form "a:b:c:d", where each
letter is a single CPU mask.
"""
try:
utils.ParseMultiCpuMask(cpu_mask)
except errors.ParseError, _:
return False
return True
# Read the BaseHypervisor.PARAMETERS docstring for the syntax of the
# _CHECK values
# must be a file
_FILE_CHECK = (utils.IsNormAbsPath, "must be an absolute normalized path",
os.path.isfile, "not found or not a file")
# must be a file or a URL
_FILE_OR_URL_CHECK = (lambda x: utils.IsNormAbsPath(x) or utils.IsUrl(x),
"must be an absolute normalized path or a URL",
lambda x: os.path.isfile(x) or utils.IsUrl(x),
"not found or not a file or URL")
# must be a directory
_DIR_CHECK = (utils.IsNormAbsPath, "must be an absolute normalized path",
os.path.isdir, "not found or not a directory")
# CPU mask must be well-formed
# TODO: implement node level check for the CPU mask
_CPU_MASK_CHECK = (_IsCpuMaskWellFormed,
"CPU mask definition is not well-formed",
None, None)
# Multiple CPU mask must be well-formed
_MULTI_CPU_MASK_CHECK = (_IsMultiCpuMaskWellFormed,
"Multiple CPU mask definition is not well-formed",
None, None)
# Check for validity of port number
_NET_PORT_CHECK = (lambda x: 0 < x < 65535, "invalid port number",
None, None)
# Check if number of queues is in safe range
_VIRTIO_NET_QUEUES_CHECK = (lambda x: 0 < x < 9, "invalid number of queues",
None, None)
# Check that an integer is non negative
_NONNEGATIVE_INT_CHECK = (lambda x: x >= 0, "cannot be negative", None, None)
# nice wrappers for users
REQ_FILE_CHECK = (True, ) + _FILE_CHECK
OPT_FILE_CHECK = (False, ) + _FILE_CHECK
REQ_FILE_OR_URL_CHECK = (True, ) + _FILE_OR_URL_CHECK
OPT_FILE_OR_URL_CHECK = (False, ) + _FILE_OR_URL_CHECK
REQ_DIR_CHECK = (True, ) + _DIR_CHECK
OPT_DIR_CHECK = (False, ) + _DIR_CHECK
REQ_NET_PORT_CHECK = (True, ) + _NET_PORT_CHECK
OPT_NET_PORT_CHECK = (False, ) + _NET_PORT_CHECK
REQ_VIRTIO_NET_QUEUES_CHECK = (True, ) + _VIRTIO_NET_QUEUES_CHECK
OPT_VIRTIO_NET_QUEUES_CHECK = (False, ) + _VIRTIO_NET_QUEUES_CHECK
REQ_CPU_MASK_CHECK = (True, ) + _CPU_MASK_CHECK
OPT_CPU_MASK_CHECK = (False, ) + _CPU_MASK_CHECK
REQ_MULTI_CPU_MASK_CHECK = (True, ) + _MULTI_CPU_MASK_CHECK
OPT_MULTI_CPU_MASK_CHECK = (False, ) + _MULTI_CPU_MASK_CHECK
REQ_NONNEGATIVE_INT_CHECK = (True, ) + _NONNEGATIVE_INT_CHECK
OPT_NONNEGATIVE_INT_CHECK = (False, ) + _NONNEGATIVE_INT_CHECK
# no checks at all
NO_CHECK = (False, None, None, None, None)
# required, but no other checks
REQUIRED_CHECK = (True, None, None, None, None)
# migration type
MIGRATION_MODE_CHECK = (True, lambda x: x in constants.HT_MIGRATION_MODES,
"invalid migration mode", None, None)
def ParamInSet(required, my_set):
"""Builds parameter checker for set membership.
@type required: boolean
@param required: whether this is a required parameter
@type my_set: tuple, list or set
@param my_set: allowed values set
"""
fn = lambda x: x in my_set
err = ("The value must be one of: %s" % utils.CommaJoin(my_set))
return (required, fn, err, None, None)
def GenerateTapName():
"""Generate a TAP network interface name for a NIC.
This helper function generates a special TAP network interface
name for NICs that are meant to be used in instance communication.
This function checks the existing TAP interfaces in order to find
a unique name for the new TAP network interface. The TAP network
interface names are of the form 'gnt.com.%d', where '%d' is a
unique number within the node.
@rtype: string
@return: TAP network interface name, or the empty string if the
NIC is not used in instance communication
"""
result = utils.RunCmd(["ip", "link", "show"])
if result.failed:
raise errors.HypervisorError("Failed to list TUN/TAP interfaces")
idxs = set()
for line in result.output.splitlines()[0::2]:
parts = line.split(": ")
if len(parts) < 2:
raise errors.HypervisorError("Failed to parse TUN/TAP interfaces")
r = re.match(r"gnt\.com\.([0-9]+)", parts[1])
if r is not None:
idxs.add(int(r.group(1)))
if idxs:
idx = max(idxs) + 1
else:
idx = 0
return "gnt.com.%d" % idx
def ConfigureNIC(cmd, instance, seq, nic, tap):
"""Run the network configuration script for a specified NIC
@type cmd: string
@param cmd: command to run
@type instance: instance object
@param instance: instance we're acting on
@type seq: int
@param seq: nic sequence number
@type nic: nic object
@param nic: nic we're acting on
@type tap: str
@param tap: the host's tap interface this NIC corresponds to
"""
env = {
"PATH": "%s:/sbin:/usr/sbin" % os.environ["PATH"],
"INSTANCE": instance.name,
"MAC": nic.mac,
"MODE": nic.nicparams[constants.NIC_MODE],
"INTERFACE": tap,
"INTERFACE_INDEX": str(seq),
"INTERFACE_UUID": nic.uuid,
"TAGS": " ".join(instance.GetTags()),
}
if nic.ip:
env["IP"] = nic.ip
if nic.name:
env["INTERFACE_NAME"] = nic.name
if nic.nicparams[constants.NIC_LINK]:
env["LINK"] = nic.nicparams[constants.NIC_LINK]
if constants.NIC_VLAN in nic.nicparams:
env["VLAN"] = nic.nicparams[constants.NIC_VLAN]
if nic.network:
n = objects.Network.FromDict(nic.netinfo)
env.update(n.HooksDict())
if nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
env["BRIDGE"] = nic.nicparams[constants.NIC_LINK]
result = utils.RunCmd(cmd, env=env)
if result.failed:
raise errors.HypervisorError("Failed to configure interface %s: %s;"
" network configuration script output: %s" %
(tap, result.fail_reason, result.output))
class HvInstanceState(object):
RUNNING = 0
SHUTDOWN = 1
@staticmethod
def IsRunning(s):
return s == HvInstanceState.RUNNING
@staticmethod
def IsShutdown(s):
return s == HvInstanceState.SHUTDOWN
class BaseHypervisor(object):
"""Abstract virtualisation technology interface
The goal is that all aspects of the virtualisation technology are
abstracted away from the rest of code.
@cvar PARAMETERS: a dict of parameter name: check type; the check type is
a five-tuple containing:
- the required flag (boolean)
- a function to check for syntax, that will be used in
L{CheckParameterSyntax}, in the master daemon process
- an error message for the above function
- a function to check for parameter validity on the remote node,
in the L{ValidateParameters} function
- an error message for the above function
@type CAN_MIGRATE: boolean
@cvar CAN_MIGRATE: whether this hypervisor can do migration (either
live or non-live)
"""
PARAMETERS = {}
ANCILLARY_FILES = []
ANCILLARY_FILES_OPT = []
CAN_MIGRATE = False
def StartInstance(self, instance, block_devices, startup_paused):
"""Start an instance."""
raise NotImplementedError
def StopInstance(self, instance, force=False, retry=False, name=None,
timeout=None):
"""Stop an instance
@type instance: L{objects.Instance}
@param instance: instance to stop
@type force: boolean
@param force: whether to do a "hard" stop (destroy)
@type retry: boolean
@param retry: whether this is just a retry call
@type name: string or None
@param name: if this parameter is passed, the the instance object
should not be used (will be passed as None), and the shutdown
must be done by name only
@type timeout: int or None
@param timeout: if the parameter is not None, a soft shutdown operation will
be killed after the specified number of seconds. A hard (forced)
shutdown cannot have a timeout
@raise errors.HypervisorError: when a parameter is not valid or
the instance failed to be stopped
"""
raise NotImplementedError
def CleanupInstance(self, instance_name):
"""Cleanup after a stopped instance
This is an optional method, used by hypervisors that need to cleanup after
an instance has been stopped.
@type instance_name: string
@param instance_name: instance name to cleanup after
"""
pass
def RebootInstance(self, instance):
"""Reboot an instance."""
raise NotImplementedError
def ListInstances(self, hvparams=None):
"""Get the list of running instances."""
raise NotImplementedError
def GetInstanceInfo(self, instance_name, hvparams=None):
"""Get instance properties.
@type instance_name: string
@param instance_name: the instance name
@type hvparams: dict of strings
@param hvparams: hvparams to be used with this instance
@rtype: (string, string, int, int, HvInstanceState, int)
@return: tuple (name, id, memory, vcpus, state, times)
"""
raise NotImplementedError
def GetAllInstancesInfo(self, hvparams=None):
"""Get properties of all instances.
@type hvparams: dict of strings
@param hvparams: hypervisor parameter
@rtype: (string, string, int, int, HvInstanceState, int)
@return: list of tuples (name, id, memory, vcpus, state, times)
"""
raise NotImplementedError
def GetNodeInfo(self, hvparams=None):
"""Return information about the node.
@type hvparams: dict of strings
@param hvparams: hypervisor parameters
@return: a dict with at least the following keys (memory values in MiB):
- memory_total: the total memory size on the node
- memory_free: the available memory on the node for instances
- memory_dom0: the memory used by the node itself, if available
- cpu_total: total number of CPUs
- cpu_dom0: number of CPUs used by the node OS
- cpu_nodes: number of NUMA domains
- cpu_sockets: number of physical CPU sockets
"""
raise NotImplementedError
@classmethod
def GetInstanceConsole(cls, instance, primary_node, node_group,
hvparams, beparams):
"""Return information for connecting to the console of an instance.
"""
raise NotImplementedError
@classmethod
def GetAncillaryFiles(cls):
"""Return a list of ancillary files to be copied to all nodes as ancillary
configuration files.
@rtype: (list of absolute paths, list of absolute paths)
@return: (all files, optional files)
"""
# By default we return a member variable, so that if an hypervisor has just
# a static list of files it doesn't have to override this function.
assert set(cls.ANCILLARY_FILES).issuperset(cls.ANCILLARY_FILES_OPT), \
"Optional ancillary files must be a subset of ancillary files"
return (cls.ANCILLARY_FILES, cls.ANCILLARY_FILES_OPT)
def Verify(self, hvparams=None):
"""Verify the hypervisor.
@type hvparams: dict of strings
@param hvparams: hypervisor parameters to be verified against
@return: Problem description if something is wrong, C{None} otherwise
"""
raise NotImplementedError
@staticmethod
def VersionsSafeForMigration(src, target):
"""Decide if migration between those version is likely to suceed.
Given two versions of a hypervisor, give a guess whether live migration
from the one version to the other version is likely to succeed. The current
"""
if src == target:
return True
return False
def MigrationInfo(self, instance): # pylint: disable=R0201,W0613
"""Get instance information to perform a migration.
By default assume no information is needed.
@type instance: L{objects.Instance}
@param instance: instance to be migrated
@rtype: string/data (opaque)
@return: instance migration information - serialized form
"""
return ""
def AcceptInstance(self, instance, info, target):
"""Prepare to accept an instance.
By default assume no preparation is needed.
@type instance: L{objects.Instance}
@param instance: instance to be accepted
@type info: string/data (opaque)
@param info: migration information, from the source node
@type target: string
@param target: target host (usually ip), on this node
"""
pass
def BalloonInstanceMemory(self, instance, mem):
"""Balloon an instance memory to a certain value.
@type instance: L{objects.Instance}
@param instance: instance to be accepted
@type mem: int
@param mem: actual memory size to use for instance runtime
"""
raise NotImplementedError
def FinalizeMigrationDst(self, instance, info, success):
"""Finalize the instance migration on the target node.
Should finalize or revert any preparation done to accept the instance.
Since by default we do no preparation, we also don't have anything to do
@type instance: L{objects.Instance}
@param instance: instance whose migration is being finalized
@type info: string/data (opaque)
@param info: migration information, from the source node
@type success: boolean
@param success: whether the migration was a success or a failure
"""
pass
def MigrateInstance(self, cluster_name, instance, target, live):
"""Migrate an instance.
@type cluster_name: string
@param cluster_name: name of the cluster
@type instance: L{objects.Instance}
@param instance: the instance to be migrated
@type target: string
@param target: hostname (usually ip) of the target node
@type live: boolean
@param live: whether to do a live or non-live migration
"""
raise NotImplementedError
def FinalizeMigrationSource(self, instance, success, live):
"""Finalize the instance migration on the source node.
@type instance: L{objects.Instance}
@param instance: the instance that was migrated
@type success: bool
@param success: whether the migration succeeded or not
@type live: bool
@param live: whether the user requested a live migration or not
"""
pass
def GetMigrationStatus(self, instance):
"""Get the migration status
@type instance: L{objects.Instance}
@param instance: the instance that is being migrated
@rtype: L{objects.MigrationStatus}
@return: the status of the current migration (one of
L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
progress info that can be retrieved from the hypervisor
"""
raise NotImplementedError
def _InstanceStartupMemory(self, instance):
"""Get the correct startup memory for an instance
This function calculates how much memory an instance should be started
with, making sure it's a value between the minimum and the maximum memory,
but also trying to use no more than the current free memory on the node.
@type instance: L{objects.Instance}
@param instance: the instance that is being started
@rtype: integer
@return: memory the instance should be started with
"""
free_memory = self.GetNodeInfo(hvparams=instance.hvparams)["memory_free"]
max_start_mem = min(instance.beparams[constants.BE_MAXMEM], free_memory)
start_mem = max(instance.beparams[constants.BE_MINMEM], max_start_mem)
return start_mem
@classmethod
def _IsParamValueUnspecified(cls, param_value):
"""Check if the parameter value is a kind of value meaning unspecified.
This function checks if the parameter value is a kind of value meaning
unspecified.
@type param_value: any
@param param_value: the parameter value that needs to be checked
@rtype: bool
@return: True if the parameter value is a kind of value meaning unspecified,
False otherwise
"""
return param_value is None \
or isinstance(param_value, basestring) and param_value == ""
@classmethod
def CheckParameterSyntax(cls, hvparams):
"""Check the given parameters for validity.
This should check the passed set of parameters for
validity. Classes should extend, not replace, this function.
@type hvparams: dict
@param hvparams: dictionary with parameter names/value
@raise errors.HypervisorError: when a parameter is not valid
"""
for key in hvparams:
if key not in cls.PARAMETERS:
raise errors.HypervisorError("Parameter '%s' is not supported" % key)
# cheap tests that run on the master, should not access the world
for name, (required, check_fn, errstr, _, _) in cls.PARAMETERS.items():
if name not in hvparams:
raise errors.HypervisorError("Parameter '%s' is missing" % name)
value = hvparams[name]
if not required and cls._IsParamValueUnspecified(value):
continue
if cls._IsParamValueUnspecified(value):
raise errors.HypervisorError("Parameter '%s' is required but"
" is currently not defined" % (name, ))
if check_fn is not None and not check_fn(value):
raise errors.HypervisorError("Parameter '%s' fails syntax"
" check: %s (current value: '%s')" %
(name, errstr, value))
@classmethod
def ValidateParameters(cls, hvparams):
"""Check the given parameters for validity.
This should check the passed set of parameters for
validity. Classes should extend, not replace, this function.
@type hvparams: dict
@param hvparams: dictionary with parameter names/value
@raise errors.HypervisorError: when a parameter is not valid
"""
for name, (required, _, _, check_fn, errstr) in cls.PARAMETERS.items():
value = hvparams[name]
if not required and cls._IsParamValueUnspecified(value):
continue
if check_fn is not None and not check_fn(value):
raise errors.HypervisorError("Parameter '%s' fails"
" validation: %s (current value: '%s')" %
(name, errstr, value))
@classmethod
def PowercycleNode(cls, hvparams=None):
"""Hard powercycle a node using hypervisor specific methods.
This method should hard powercycle the node, using whatever
methods the hypervisor provides. Note that this means that all
instances running on the node must be stopped too.
@type hvparams: dict of strings
@param hvparams: hypervisor params to be used on this node
"""
raise NotImplementedError
@staticmethod
def GetLinuxNodeInfo(meminfo="/proc/meminfo", cpuinfo="/proc/cpuinfo"):
"""For linux systems, return actual OS information.
This is an abstraction for all non-hypervisor-based classes, where
the node actually sees all the memory and CPUs via the /proc
interface and standard commands. The other case if for example
xen, where you only see the hardware resources via xen-specific
tools.
@param meminfo: name of the file containing meminfo
@type meminfo: string
@param cpuinfo: name of the file containing cpuinfo
@type cpuinfo: string
@return: a dict with the following keys (values in MiB):
- memory_total: the total memory size on the node
- memory_free: the available memory on the node for instances
- memory_dom0: the memory used by the node itself, if available
- cpu_total: total number of CPUs
- cpu_dom0: number of CPUs used by the node OS
- cpu_nodes: number of NUMA domains
- cpu_sockets: number of physical CPU sockets
"""
try:
data = utils.ReadFile(meminfo).splitlines()
except EnvironmentError, err:
raise errors.HypervisorError("Failed to list node info: %s" % (err,))
result = {}
sum_free = 0
try:
for line in data:
splitfields = line.split(":", 1)
if len(splitfields) > 1:
key = splitfields[0].strip()
val = splitfields[1].strip()
if key == "MemTotal":
result["memory_total"] = int(val.split()[0]) / 1024
elif key in ("MemFree", "Buffers", "Cached"):
sum_free += int(val.split()[0]) / 1024
elif key == "Active":
result["memory_dom0"] = int(val.split()[0]) / 1024
except (ValueError, TypeError), err:
raise errors.HypervisorError("Failed to compute memory usage: %s" %
(err,))
result["memory_free"] = sum_free
cpu_total = 0
try:
fh = open(cpuinfo)
try:
cpu_total = len(re.findall(r"(?m)^processor\s*:\s*[0-9]+\s*$",
fh.read()))
finally:
fh.close()
except EnvironmentError, err:
raise errors.HypervisorError("Failed to list node info: %s" % (err,))
result["cpu_total"] = cpu_total
# We assume that the node OS can access all the CPUs
result["cpu_dom0"] = cpu_total
# FIXME: export correct data here
result["cpu_nodes"] = 1
result["cpu_sockets"] = 1
return result
@classmethod
def LinuxPowercycle(cls):
"""Linux-specific powercycle method.
"""
try:
fd = os.open("/proc/sysrq-trigger", os.O_WRONLY)
try:
os.write(fd, "b")
finally:
fd.close()
except OSError:
logging.exception("Can't open the sysrq-trigger file")
result = utils.RunCmd(["reboot", "-n", "-f"])
if not result:
logging.error("Can't run shutdown: %s", result.output)
@staticmethod
def _FormatVerifyResults(msgs):
"""Formats the verification results, given a list of errors.
@param msgs: list of errors, possibly empty
@return: overall problem description if something is wrong,
C{None} otherwise
"""
if msgs:
return "; ".join(msgs)
else:
return None
# pylint: disable=R0201,W0613
def HotAddDevice(self, instance, dev_type, device, extra, seq):
"""Hot-add a device.
"""
raise errors.HotplugError("Hotplug is not supported by this hypervisor")
# pylint: disable=R0201,W0613
def HotDelDevice(self, instance, dev_type, device, extra, seq):
"""Hot-del a device.
"""
raise errors.HotplugError("Hotplug is not supported by this hypervisor")
# pylint: disable=R0201,W0613
def HotModDevice(self, instance, dev_type, device, extra, seq):
"""Hot-mod a device.
"""
raise errors.HotplugError("Hotplug is not supported by this hypervisor")
# pylint: disable=R0201,W0613
def VerifyHotplugSupport(self, instance, action, dev_type):
"""Verifies that hotplug is supported.
Given the target device and hotplug action checks if hotplug is
actually supported.
@type instance: L{objects.Instance}
@param instance: the instance object
@type action: string
@param action: one of the supported hotplug commands
@type dev_type: string
@param dev_type: one of the supported device types to hotplug
@raise errors.HotplugError: if hotplugging is not supported
"""
raise errors.HotplugError("Hotplug is not supported.")
def HotplugSupported(self, instance):
"""Checks if hotplug is supported.
By default is not. Currently only KVM hypervisor supports it.
"""
raise errors.HotplugError("Hotplug is not supported by this hypervisor")
| {
"content_hash": "736f701c71eb1ffc629af8cb9c6a5e02",
"timestamp": "",
"source": "github",
"line_count": 753,
"max_line_length": 80,
"avg_line_length": 32.83665338645418,
"alnum_prop": 0.6714794143816226,
"repo_name": "dimara/ganeti",
"id": "8aaa06245bb5feca336c8e742fdad7dcf7046c7a",
"size": "26104",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "lib/hypervisor/hv_base.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Haskell",
"bytes": "2409763"
},
{
"name": "Python",
"bytes": "5842471"
},
{
"name": "Shell",
"bytes": "110549"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Attachment'
db.create_table(u'attachment_attachment', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('file', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
))
db.send_create_signal(u'attachment', ['Attachment'])
# Adding model 'AttachmentRelationship'
db.create_table(u'attachment_attachmentrelationship', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.IntegerField')()),
('attachment', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['attachment.Attachment'])),
))
db.send_create_signal(u'attachment', ['AttachmentRelationship'])
def backwards(self, orm):
# Deleting model 'Attachment'
db.delete_table(u'attachment_attachment')
# Deleting model 'AttachmentRelationship'
db.delete_table(u'attachment_attachmentrelationship')
models = {
u'attachment.attachment': {
'Meta': {'object_name': 'Attachment'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'attachment.attachmentrelationship': {
'Meta': {'object_name': 'AttachmentRelationship'},
'attachment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['attachment.Attachment']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'object_id': ('django.db.models.fields.IntegerField', [], {})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['attachment'] | {
"content_hash": "e73f17746a55006125fc65873808d043",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 195,
"avg_line_length": 68.25510204081633,
"alnum_prop": 0.5890267603528181,
"repo_name": "lettoosoft/lettoo-weixin-platform-back",
"id": "ec3779a82e2f6d3c2b64456cab2c92b5ac69b4f0",
"size": "6713",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/attachment/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39"
},
{
"name": "JavaScript",
"bytes": "1451"
},
{
"name": "Python",
"bytes": "204592"
},
{
"name": "Shell",
"bytes": "6587"
}
],
"symlink_target": ""
} |
from django.test import LiveServerTestCase
from django.test import TestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
# Create your tests here.
class SignupTestCase(LiveServerTestCase):
def setUp(self):
self.selenium = webdriver.Firefox(executable_path='/opt/firefox-geckodriver/geckodriver')
super(SignupTestCase, self).setUp()
def tearDown(self):
self.selenium.quit()
super(SignupTestCase, self).tearDown()
def invalid_login(self):
selenium = self.selenium
#Opening the link we want to test
selenium.get('http://localhost:8000/login/')
#find the form element
# first_name = selenium.find_element_by_id('id_username')
# last_name = selenium.find_element_by_id('id_last_name')
# email = selenium.find_element_by_id('id_email')
# password2 = selenium.find_element_by_id('id_password2')
username = selenium.find_element_by_id('id_username')
password = selenium.find_element_by_id('id_password')
submit = selenium.find_element_by_name('login')
#Fill the form with data
# first_name.send_keys('Yusuf')
# last_name.send_keys('Unary')
# email.send_keys('yusuf@qawba.com')
# password2.send_keys('123456')
username.send_keys('kantanand@insmartapps.com')
password.send_keys('abcd@1234s')
#submitting the form
submit.send_keys(Keys.RETURN)
#check the returned result
# print '--------------------'
# print selenium.page_source
# print '--------------------'
assert 'Username/Password is not valid. Please try again' in selenium.page_source
def valid_login(self):
selenium = self.selenium
#Opening the link we want to test
selenium.get('http://localhost:8000/login/')
username = selenium.find_element_by_id('id_username')
password = selenium.find_element_by_id('id_password')
submit = selenium.find_element_by_name('login')
username.send_keys('kantanand@insmartapps.com')
password.send_keys('abcd@1234')
#submitting the form
submit.send_keys(Keys.RETURN)
assert 'signup page' in selenium.page_source | {
"content_hash": "dc86052135e8b76ba5ef186c4eb47f78",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 97,
"avg_line_length": 38.49152542372882,
"alnum_prop": 0.6393659180977543,
"repo_name": "kantanand/insmartapps",
"id": "0a32905e09376ef00d1a3aee431b0310220d1b4a",
"size": "2271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/learning/signup/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2043"
},
{
"name": "Java",
"bytes": "2158"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "62883"
},
{
"name": "Shell",
"bytes": "2505"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 12, transform = "Integration", sigma = 0.0, exog_count = 20, ar_order = 12); | {
"content_hash": "7be37aaa5ba8e8751876c03aae07c34f",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 172,
"avg_line_length": 38.857142857142854,
"alnum_prop": 0.7132352941176471,
"repo_name": "antoinecarme/pyaf",
"id": "db0714be74944d213453a5f43ba8f3e75c4432d1",
"size": "272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Integration/trend_MovingAverage/cycle_12/ar_12/test_artificial_128_Integration_MovingAverage_12_12_20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import logging
from luigi import six
import luigi.target
logger = logging.getLogger('luigi-interface')
class CascadingClient(object):
"""
A FilesystemClient that will cascade failing function calls through a list of clients.
Which clients are used are specified at time of construction.
"""
# This constant member is supposed to include all methods, feel free to add
# methods here. If you want full control of which methods that should be
# created, pass the kwarg to the constructor.
ALL_METHOD_NAMES = ['exists', 'rename', 'remove', 'chmod', 'chown',
'count', 'copy', 'get', 'put', 'mkdir', 'list', 'listdir',
'getmerge',
'isdir',
'move',
'rename_dont_move',
'touchz',
]
def __init__(self, clients, method_names=None):
self.clients = clients
if method_names is None:
method_names = self.ALL_METHOD_NAMES
for method_name in method_names:
new_method = self._make_method(method_name)
real_method = six.create_bound_method(new_method, self)
setattr(self, method_name, real_method)
@classmethod
def _make_method(cls, method_name):
def new_method(self, *args, **kwargs):
return self._chained_call(method_name, *args, **kwargs)
return new_method
def _chained_call(self, method_name, *args, **kwargs):
for i in range(len(self.clients)):
client = self.clients[i]
try:
result = getattr(client, method_name)(*args, **kwargs)
return result
except luigi.target.FileSystemException:
# For exceptions that are semantical, we must throw along
raise
except BaseException:
is_last_iteration = (i + 1) >= len(self.clients)
if is_last_iteration:
raise
else:
logger.warning('The %s failed to %s, using fallback class %s',
client.__class__.__name__, method_name, self.clients[i + 1].__class__.__name__)
| {
"content_hash": "c86b057c8549d9b61e089b347f11ed97",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 114,
"avg_line_length": 37.56666666666667,
"alnum_prop": 0.5488021295474712,
"repo_name": "Houzz/luigi",
"id": "a85068a97ac77128bc8751cbf6668f9128cc4d90",
"size": "2858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "luigi/contrib/target.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5133"
},
{
"name": "HTML",
"bytes": "40752"
},
{
"name": "JavaScript",
"bytes": "173179"
},
{
"name": "Python",
"bytes": "1858455"
},
{
"name": "Shell",
"bytes": "2627"
},
{
"name": "TSQL",
"bytes": "262"
}
],
"symlink_target": ""
} |
import pytest
from dvc.fs.base import FileSystem, RemoteMissingDepsError
@pytest.mark.parametrize(
"pkg, msg",
[
(None, "Please report this bug to"),
("pip", "pip install"),
("conda", "conda install"),
],
)
def test_missing_deps(pkg, msg, mocker):
requires = {"missing": "missing"}
mocker.patch.object(FileSystem, "REQUIRES", requires)
mocker.patch("dvc.utils.pkg.PKG", pkg)
with pytest.raises(RemoteMissingDepsError, match=msg):
FileSystem()
| {
"content_hash": "9ab8b6875c36248dffbab6ef1c50d9d7",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 58,
"avg_line_length": 26.63157894736842,
"alnum_prop": 0.6403162055335968,
"repo_name": "efiop/dvc",
"id": "8d35654bf1026275441e50f1a05c2a07ac0226e6",
"size": "506",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit/fs/test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "53"
},
{
"name": "Inno Setup",
"bytes": "10158"
},
{
"name": "PowerShell",
"bytes": "2686"
},
{
"name": "Python",
"bytes": "2231040"
},
{
"name": "Shell",
"bytes": "695"
}
],
"symlink_target": ""
} |
"""
Author: Peter Zujko (@zujko)
Lukas Yelle (@lxy5611)
Desc: Implements channels routing for the whole pawprints app.
"""
from channels.routing import ProtocolTypeRouter, URLRouter
from channels.auth import AuthMiddlewareStack
import petitions.routing
application = ProtocolTypeRouter({
'websocket': AuthMiddlewareStack(
URLRouter(
petitions.routing.websocket_urlpatterns
)
),
})
| {
"content_hash": "b3040014b98cb8555252e306d1dbd718",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 62,
"avg_line_length": 26.625,
"alnum_prop": 0.7206572769953051,
"repo_name": "ritstudentgovernment/PawPrints",
"id": "64113caad0f484d20b25b0a8725abcdec1427da0",
"size": "426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pawprints/routing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "51771"
},
{
"name": "Dockerfile",
"bytes": "369"
},
{
"name": "HTML",
"bytes": "142080"
},
{
"name": "JavaScript",
"bytes": "118459"
},
{
"name": "Python",
"bytes": "140963"
},
{
"name": "Shell",
"bytes": "951"
}
],
"symlink_target": ""
} |
"""
sphinx.builders.linkcheck
~~~~~~~~~~~~~~~~~~~~~~~~~
The CheckExternalLinksBuilder class.
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import Queue
import socket
import threading
from os import path
from urllib2 import build_opener, Request
from docutils import nodes
from sphinx.builders import Builder
from sphinx.util.console import purple, red, darkgreen, darkgray
# create an opener that will simulate a browser user-agent
opener = build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
class HeadRequest(Request):
"""Subclass of urllib2.Request that sends a HEAD request."""
def get_method(self):
return 'HEAD'
class CheckExternalLinksBuilder(Builder):
"""
Checks for broken external links.
"""
name = 'linkcheck'
def init(self):
self.to_ignore = map(re.compile, self.app.config.linkcheck_ignore)
self.good = set()
self.broken = {}
self.redirected = {}
# set a timeout for non-responding servers
socket.setdefaulttimeout(5.0)
# create output file
open(path.join(self.outdir, 'output.txt'), 'w').close()
# create queues and worker threads
self.wqueue = Queue.Queue()
self.rqueue = Queue.Queue()
self.workers = []
for i in range(self.app.config.linkcheck_workers):
thread = threading.Thread(target=self.check_thread)
thread.setDaemon(True)
thread.start()
self.workers.append(thread)
def check_thread(self):
kwargs = {}
if sys.version_info > (2, 5) and self.app.config.linkcheck_timeout:
kwargs['timeout'] = self.app.config.linkcheck_timeout
def check():
# check for various conditions without bothering the network
if len(uri) == 0 or uri[0:7] == 'mailto:' or uri[0:4] == 'ftp:':
return 'unchecked', ''
elif not (uri[0:5] == 'http:' or uri[0:6] == 'https:'):
return 'local', ''
elif uri in self.good:
return 'working', ''
elif uri in self.broken:
return 'broken', self.broken[uri]
elif uri in self.redirected:
return 'redirected', self.redirected[uri]
for rex in self.to_ignore:
if rex.match(uri):
return 'ignored', ''
# need to actually check the URI
try:
f = opener.open(HeadRequest(uri), **kwargs)
f.close()
except Exception, err:
self.broken[uri] = str(err)
return 'broken', str(err)
if f.url.rstrip('/') == uri.rstrip('/'):
self.good.add(uri)
return 'working', 'new'
else:
self.redirected[uri] = f.url
return 'redirected', f.url
while True:
uri, docname, lineno = self.wqueue.get()
if uri is None:
break
status, info = check()
self.rqueue.put((uri, docname, lineno, status, info))
def process_result(self, result):
uri, docname, lineno, status, info = result
if status == 'unchecked':
return
if status == 'working' and info != 'new':
return
if lineno:
self.info('(line %3d) ' % lineno, nonl=1)
if status == 'ignored':
self.info(uri + ' - ' + darkgray('ignored'))
elif status == 'local':
self.info(uri + ' - ' + darkgray('local'))
self.write_entry('local', docname, lineno, uri)
elif status == 'working':
self.info(uri + ' - ' + darkgreen('working'))
elif status == 'broken':
self.info(uri + ' - ' + red('broken: ') + info)
self.write_entry('broken', docname, lineno, uri + ': ' + info)
if self.app.quiet:
self.warn('broken link: %s' % uri,
'%s:%s' % (self.env.doc2path(docname), lineno))
elif status == 'redirected':
self.info(uri + ' - ' + purple('redirected') + ' to ' + info)
self.write_entry('redirected', docname, lineno, uri + ' to ' + info)
def get_target_uri(self, docname, typ=None):
return ''
def get_outdated_docs(self):
return self.env.found_docs
def prepare_writing(self, docnames):
return
def write_doc(self, docname, doctree):
self.info()
n = 0
for node in doctree.traverse(nodes.reference):
if 'refuri' not in node:
continue
uri = node['refuri']
if '#' in uri:
uri = uri.split('#')[0]
lineno = None
while lineno is None:
node = node.parent
if node is None:
break
lineno = node.line
self.wqueue.put((uri, docname, lineno), False)
n += 1
done = 0
while done < n:
self.process_result(self.rqueue.get())
done += 1
if self.broken:
self.app.statuscode = 1
def write_entry(self, what, docname, line, uri):
output = open(path.join(self.outdir, 'output.txt'), 'a')
output.write("%s:%s: [%s] %s\n" % (self.env.doc2path(docname, None),
line, what, uri))
output.close()
def finish(self):
for worker in self.workers:
self.wqueue.put((None, None, None), False)
| {
"content_hash": "b13c5bf204c132b6a427e39e96c20bec",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 80,
"avg_line_length": 33.34705882352941,
"alnum_prop": 0.5284882695360734,
"repo_name": "neumerance/deploy",
"id": "ad15b55deae2d7bee3b8ecc3d12c0f08f0f0db58",
"size": "5693",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": ".venv/lib/python2.7/site-packages/sphinx/builders/linkcheck.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "49399"
},
{
"name": "CSS",
"bytes": "769836"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "Erlang",
"bytes": "31042"
},
{
"name": "JavaScript",
"bytes": "642626"
},
{
"name": "PHP",
"bytes": "3858"
},
{
"name": "Perl",
"bytes": "386749"
},
{
"name": "Python",
"bytes": "23358678"
},
{
"name": "Racket",
"bytes": "28441"
},
{
"name": "Ruby",
"bytes": "453"
},
{
"name": "Shell",
"bytes": "29414"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import os
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
NEWS = open(os.path.join(here, 'NEWS.txt')).read()
version = '0.1'
install_requires = []
setup(name='eventually',
version=version,
description="",
long_description=README + '\n\n' + NEWS,
classifiers = [
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
],
keywords='',
author='Asheesh Laroia',
author_email='asheesh@eventbrite.com',
url='https://github.com/eventbrite/eventually',
packages=find_packages(),
include_package_data=True,
zip_safe=True,
install_requires=install_requires,
entry_points={
'console_scripts': [
'eventually=eventually.cmdline:main',
],
},
)
| {
"content_hash": "70527f256cd4c81291a43a22b01daf93",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 54,
"avg_line_length": 26.441176470588236,
"alnum_prop": 0.6017797552836485,
"repo_name": "eventbrite/eventually",
"id": "759dc175711e0253965c409a54223141af9b6486",
"size": "899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12378"
}
],
"symlink_target": ""
} |
import warnings
from cloudify_rest_client.responses import ListResponse
class Node(dict):
"""
Cloudify node.
"""
@property
def id(self):
"""
:return: The identifier of the node.
"""
return self.get('id')
@property
def deployment_id(self):
"""
:return: The deployment id the node belongs to.
"""
return self.get('deployment_id')
@property
def created_by(self):
"""
:return: The name of the node creator.
"""
return self.get('created_by')
@property
def properties(self):
"""
:return: The static properties of the node.
"""
return self.get('properties')
@property
def operations(self):
"""
:return: The node operations mapped to plugins.
:rtype: dict
"""
return self.get('operations')
@property
def relationships(self):
"""
:return: The node relationships with other nodes.
:rtype: list
"""
return self.get('relationships')
@property
def blueprint_id(self):
"""
:return: The id of the blueprint this node belongs to.
:rtype: str
"""
return self.get('blueprint_id')
@property
def plugins(self):
"""
:return: The plugins this node has operations mapped to.
:rtype: dict
"""
return self.get('plugins')
@property
def number_of_instances(self):
"""
:return: The number of instances this node has.
:rtype: int
"""
return int(self.get(
'number_of_instances')) if 'number_of_instances' in self else None
@property
def planned_number_of_instances(self):
"""
:return: The planned number of instances this node has.
:rtype: int
"""
return int(self.get(
'planned_number_of_instances')) if 'planned_number_of_instances' \
in self else None
@property
def deploy_number_of_instances(self):
"""
:return: The number of instances this set for this node when the
deployment was created.
:rtype: int
"""
return int(self.get(
'deploy_number_of_instances')) if 'deploy_number_of_instances' \
in self else None
@property
def host_id(self):
"""
:return: The id of the node instance which hosts this node.
:rtype: str
"""
return self.get('host_id')
@property
def type_hierarchy(self):
"""
:return: The type hierarchy of this node.
:rtype: list
"""
return self['type_hierarchy']
@property
def type(self):
"""
:return: The type of this node.
:rtype: str
"""
return self['type']
class NodesClient(object):
def __init__(self, api):
self.api = api
self._wrapper_cls = Node
self._uri_prefix = 'nodes'
def _create_filters(
self,
deployment_id=None,
node_id=None,
sort=None,
is_descending=False,
evaluate_functions=False,
**kwargs
):
params = {'_evaluate_functions': evaluate_functions}
if deployment_id:
params['deployment_id'] = deployment_id
if node_id:
warnings.warn("'node_id' filtering capability is deprecated, use"
" 'id' instead", DeprecationWarning)
params['id'] = node_id
params.update(kwargs)
if sort:
params['_sort'] = '-' + sort if is_descending else sort
return params
def list(self, _include=None, **kwargs):
"""
Returns a list of nodes which belong to the deployment identified
by the provided deployment id.
:param deployment_id: The deployment's id to list nodes for.
:param node_id: If provided, returns only the requested node. This
parameter is deprecated, use 'id' instead.
:param _include: List of fields to include in response.
:param sort: Key for sorting the list.
:param is_descending: True for descending order, False for ascending.
:param kwargs: Optional filter fields. for a list of available fields
see the REST service's models.DeploymentNode.fields
:param evaluate_functions: Evaluate intrinsic functions
:return: Nodes.
:rtype: list
"""
params = self._create_filters(**kwargs)
response = self.api.get(
'/{self._uri_prefix}'.format(self=self),
params=params,
_include=_include
)
return ListResponse(
[self._wrapper_cls(item) for item in response['items']],
response['metadata']
)
def get(self, deployment_id, node_id, _include=None,
evaluate_functions=False):
"""
Returns the node which belongs to the deployment identified
by the provided deployment id .
:param deployment_id: The deployment's id of the node.
:param node_id: The node id.
:param _include: List of fields to include in response.
:param evaluate_functions: Evaluate intrinsic functions
:return: Nodes.
:rtype: Node
"""
assert deployment_id
assert node_id
result = self.list(deployment_id=deployment_id,
id=node_id,
_include=_include,
evaluate_functions=evaluate_functions)
if not result:
return None
else:
return result[0]
| {
"content_hash": "e26a9a9e5ee18467ebadae44e67fead2",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 78,
"avg_line_length": 27.971291866028707,
"alnum_prop": 0.5417379404721177,
"repo_name": "cloudify-cosmo/cloudify-rest-client",
"id": "a16b271670ffef2269d41d44a7f8de852f278109",
"size": "6489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudify_rest_client/nodes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1033"
},
{
"name": "Python",
"bytes": "152141"
}
],
"symlink_target": ""
} |
class AuthenticationError(Exception):
"""Generic OAuth2 authentication error.
Attributes:
message (dict) - parsed JSON response
status_code (int) - HTTP code returned
response (requests.Response) - response from Google
Args:
response (requests.Response) - response from Google
"""
def __init__(self, response):
self.response = response
self.message = response.json()
self.status_code = response.status_code
| {
"content_hash": "8d6bf282195148282a138a99bf7d7c94",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 59,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.6494845360824743,
"repo_name": "miedzinski/google-oauth",
"id": "f172a8dd03622c027ca7183347e170dad1fbe51a",
"size": "485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google_oauth/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14508"
}
],
"symlink_target": ""
} |
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'image07.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.image_dir = test_dir + 'images/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet1.insert_image('E9', self.image_dir + 'red.png')
worksheet2.insert_image('E9', self.image_dir + 'yellow.png')
workbook.close()
self.assertExcelEqual()
| {
"content_hash": "686239e3a6f7892b4c7011b538c2f596",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 74,
"avg_line_length": 28.10810810810811,
"alnum_prop": 0.6317307692307692,
"repo_name": "jkyeung/XlsxWriter",
"id": "9bf8580c1021819e6f4aafac7e6a0f136f2e9688",
"size": "1213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xlsxwriter/test/comparison/test_image07.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7819"
},
{
"name": "Perl",
"bytes": "3504"
},
{
"name": "Python",
"bytes": "2430294"
},
{
"name": "Shell",
"bytes": "6064"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function, absolute_import
import pytest
import numpy as np
from numpy.testing import TestCase, assert_array_equal
import scipy.sparse as sps
from scipy.optimize._constraints import (
Bounds, LinearConstraint, NonlinearConstraint, PreparedConstraint,
new_bounds_to_old, old_bound_to_new, strict_bounds)
class TestStrictBounds(TestCase):
def test_scalarvalue_unique_enforce_feasibility(self):
m = 3
lb = 2
ub = 4
enforce_feasibility = False
strict_lb, strict_ub = strict_bounds(lb, ub,
enforce_feasibility,
m)
assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf])
assert_array_equal(strict_ub, [np.inf, np.inf, np.inf])
enforce_feasibility = True
strict_lb, strict_ub = strict_bounds(lb, ub,
enforce_feasibility,
m)
assert_array_equal(strict_lb, [2, 2, 2])
assert_array_equal(strict_ub, [4, 4, 4])
def test_vectorvalue_unique_enforce_feasibility(self):
m = 3
lb = [1, 2, 3]
ub = [4, 5, 6]
enforce_feasibility = False
strict_lb, strict_ub = strict_bounds(lb, ub,
enforce_feasibility,
m)
assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf])
assert_array_equal(strict_ub, [np.inf, np.inf, np.inf])
enforce_feasibility = True
strict_lb, strict_ub = strict_bounds(lb, ub,
enforce_feasibility,
m)
assert_array_equal(strict_lb, [1, 2, 3])
assert_array_equal(strict_ub, [4, 5, 6])
def test_scalarvalue_vector_enforce_feasibility(self):
m = 3
lb = 2
ub = 4
enforce_feasibility = [False, True, False]
strict_lb, strict_ub = strict_bounds(lb, ub,
enforce_feasibility,
m)
assert_array_equal(strict_lb, [-np.inf, 2, -np.inf])
assert_array_equal(strict_ub, [np.inf, 4, np.inf])
def test_vectorvalue_vector_enforce_feasibility(self):
m = 3
lb = [1, 2, 3]
ub = [4, 6, np.inf]
enforce_feasibility = [True, False, True]
strict_lb, strict_ub = strict_bounds(lb, ub,
enforce_feasibility,
m)
assert_array_equal(strict_lb, [1, -np.inf, 3])
assert_array_equal(strict_ub, [4, np.inf, np.inf])
def test_prepare_constraint_infeasible_x0():
lb = np.array([0, 20, 30])
ub = np.array([0.5, np.inf, 70])
x0 = np.array([1, 2, 3])
enforce_feasibility = np.array([False, True, True], dtype=bool)
bounds = Bounds(lb, ub, enforce_feasibility)
pytest.raises(ValueError, PreparedConstraint, bounds, x0)
x0 = np.array([1, 2, 3, 4])
A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]])
enforce_feasibility = np.array([True, True, True], dtype=bool)
linear = LinearConstraint(A, -np.inf, 0, enforce_feasibility)
pytest.raises(ValueError, PreparedConstraint, linear, x0)
def fun(x):
return A.dot(x)
def jac(x):
return A
def hess(x, v):
return sps.csr_matrix((4, 4))
nonlinear = NonlinearConstraint(fun, -np.inf, 0, jac, hess,
enforce_feasibility)
pytest.raises(ValueError, PreparedConstraint, nonlinear, x0)
def test_new_bounds_to_old():
lb = np.array([-np.inf, 2, 3])
ub = np.array([3, np.inf, 10])
bounds = [(None, 3), (2, None), (3, 10)]
assert_array_equal(new_bounds_to_old(lb, ub, 3), bounds)
bounds_single_lb = [(-1, 3), (-1, None), (-1, 10)]
assert_array_equal(new_bounds_to_old(-1, ub, 3), bounds_single_lb)
bounds_no_lb = [(None, 3), (None, None), (None, 10)]
assert_array_equal(new_bounds_to_old(-np.inf, ub, 3), bounds_no_lb)
bounds_single_ub = [(None, 20), (2, 20), (3, 20)]
assert_array_equal(new_bounds_to_old(lb, 20, 3), bounds_single_ub)
bounds_no_ub = [(None, None), (2, None), (3, None)]
assert_array_equal(new_bounds_to_old(lb, np.inf, 3), bounds_no_ub)
bounds_single_both = [(1, 2), (1, 2), (1, 2)]
assert_array_equal(new_bounds_to_old(1, 2, 3), bounds_single_both)
bounds_no_both = [(None, None), (None, None), (None, None)]
assert_array_equal(new_bounds_to_old(-np.inf, np.inf, 3), bounds_no_both)
def test_old_bounds_to_new():
bounds = ([1, 2], (None, 3), (-1, None))
lb_true = np.array([1, -np.inf, -1])
ub_true = np.array([2, 3, np.inf])
lb, ub = old_bound_to_new(bounds)
assert_array_equal(lb, lb_true)
assert_array_equal(ub, ub_true)
| {
"content_hash": "c92bcc663000fa0319dc868e90822817",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 77,
"avg_line_length": 37.621212121212125,
"alnum_prop": 0.5386629077728554,
"repo_name": "gfyoung/scipy",
"id": "78b5637691073339321d0cb679947c7e6b67dc6a",
"size": "4966",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "scipy/optimize/tests/test_constraints.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4142653"
},
{
"name": "C++",
"bytes": "498142"
},
{
"name": "Fortran",
"bytes": "5572451"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "11540629"
},
{
"name": "Shell",
"bytes": "2226"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
"""
The MIT License (MIT)
Copyright (c) 2015 Mat Leonard
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import math
# noinspection PyPep8Naming,PyPep8
class Vector(object):
def __init__(self, *args):
""" Create a vector, example: v = Vector(1,2) """
if len(args) == 0:
self.values = (0, 0)
else:
self.values = args
def norm(self):
""" Returns the norm (length, magnitude) of the vector """
return math.sqrt(sum(comp ** 2 for comp in self))
def argument(self):
""" Returns the argument of the vector, the angle clockwise from +y."""
arg_in_rad = math.acos(Vector(0, 1) * self / self.norm())
arg_in_deg = math.degrees(arg_in_rad)
if self.values[0] < 0:
return 360 - arg_in_deg
else:
return arg_in_deg
def normalize(self):
""" Returns a normalized unit vector """
norm = self.norm()
normed = tuple(comp / norm for comp in self)
return Vector(*normed)
def rotate(self, *args):
""" Rotate this vector. If passed a number, assumes this is a
2D vector and rotates by the passed value in degrees. Otherwise,
assumes the passed value is a list acting as a matrix which rotates the vector.
"""
if len(args) == 1 and type(args[0]) == type(1) or type(args[0]) == type(1.):
# So, if rotate is passed an int or a float...
if len(self) != 2:
raise ValueError("Rotation axis not defined for greater than 2D vector")
return self._rotate2D(*args)
elif len(args) == 1:
matrix = args[0]
if not all(len(row) == len(self.values) for row in matrix) or not len(matrix) == len(self):
raise ValueError("Rotation matrix must be square and same dimensions as vector")
return self.matrix_mult(matrix)
def _rotate2D(self, theta):
""" Rotate this vector by theta in degrees.
Returns a new vector.
"""
theta = math.radians(theta)
# Just applying the 2D rotation matrix
dc, ds = math.cos(theta), math.sin(theta)
x, y = self.values
x, y = dc * x - ds * y, ds * x + dc * y
return Vector(x, y)
def matrix_mult(self, matrix):
""" Multiply this vector by a matrix. Assuming matrix is a list of lists.
Example:
mat = [[1,2,3],[-1,0,1],[3,4,5]]
Vector(1,2,3).matrix_mult(mat) -> (14, 2, 26)
"""
if not all(len(row) == len(self) for row in matrix):
raise ValueError('Matrix must match vector dimensions')
# Grab a row from the matrix, make it a Vector, take the dot product,
# and store it as the first component
product = tuple(Vector(*row) * self for row in matrix)
return Vector(*product)
def cross(self, other):
""" Returns the cross product (inner product) of self and other vector
"""
return Vector(
self.values[1] * other.values[2] - self.values[2] * other.values[1],
self.values[2] * other.values[0] - self.values[0] * other.values[2],
self.values[0] * other.values[1] - self.values[1] * other.values[0])
def inner(self, other):
""" Returns the dot product (inner product) of self and other vector
"""
return sum(a * b for a, b in zip(self, other))
def __mul__(self, other):
""" Returns the dot product of self and other if multiplied
by another Vector. If multiplied by an int or float,
multiplies each component by other.
"""
if type(other) == type(self):
return self.inner(other)
elif type(other) == type(1) or type(other) == type(1.0):
product = tuple(a * other for a in self)
return Vector(*product)
def __rmul__(self, other):
""" Called if 4*self for instance """
return self.__mul__(other)
def __div__(self, other):
if type(other) == type(1) or type(other) == type(1.0):
divided = tuple(a / other for a in self)
return Vector(*divided)
def __add__(self, other):
""" Returns the vector addition of self and other """
added = tuple(a + b for a, b in zip(self, other))
return Vector(*added)
def __sub__(self, other):
""" Returns the vector difference of self and other """
subbed = tuple(a - b for a, b in zip(self, other))
return Vector(*subbed)
def __iter__(self):
return self.values.__iter__()
def __eq__(self, other):
return all(a == b for a, b in zip(self, other))
def __ne__(self, other):
return all(a != b for a, b in zip(self, other))
def __len__(self):
return len(self.values)
def __getitem__(self, key):
return self.values[key]
def __repr__(self):
return str(self.values)
| {
"content_hash": "df15a8666823cc900305d8fc6c2107b8",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 103,
"avg_line_length": 37.3125,
"alnum_prop": 0.5988274706867671,
"repo_name": "leupibr/BombDefusal",
"id": "a6c347fbe67d170e06be1fc9ce45c9249aaed4ef",
"size": "5970",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "bomb_defusal/view/utils/vector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "200225"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('cbh_chembl_model_extension', '0021_auto_20150721_0551'),
]
operations = [
migrations.CreateModel(
name='ProjectType',
fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', django_extensions.db.fields.CreationDateTimeField(
default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(
default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),
('name', models.CharField(
default=None, max_length=100, null=True, db_index=True, blank=True)),
('show_compounds', models.BooleanField(default=True)),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='project',
name='project_type',
field=models.ForeignKey(related_name='project', default=None,
blank=True, to='cbh_chembl_model_extension.ProjectType', null=True),
preserve_default=True,
),
]
| {
"content_hash": "1bb4d302a5a5057d6da24ba2c7db301e",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 109,
"avg_line_length": 39.73809523809524,
"alnum_prop": 0.5644098262432594,
"repo_name": "strets123/cbh_chembl_model_extension",
"id": "5a92ee320c4bb71462e43609aafa590cd7fc0115",
"size": "1693",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cbh_chembl_model_extension/migrations/0022_auto_20150721_1031.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "661"
},
{
"name": "Makefile",
"bytes": "1298"
},
{
"name": "Python",
"bytes": "137533"
}
],
"symlink_target": ""
} |
import sys
def input_process(in_question):
return input(in_question) if sys.version_info >= (3,0) else raw_input(in_question)
if __name__ == '__main__':
answer = str(input_process('Is the car silent when you turn the key? '))
if answer.upper() == 'Y':
answer = str(input_process('Are the battery terminals corroded? '))
if answer.upper() == 'Y':
print ('Clean terminals and try starting again.')
exit()
elif answer.upper() == 'N':
print ('Replace cables and try again.')
exit()
elif answer.upper() == 'N':
answer = str(input_process('Does the car make a clicking noise? '))
if answer.upper() == 'Y':
print ('Replace the battery.')
exit()
elif answer.upper() == 'N':
answer = str(input_process('Does the car crank up but fail to start? '))
if answer.upper() == 'Y':
print ('Check spark plug connections.')
exit()
elif answer.upper() == 'N':
answer = str(input_process('Does the engine start and then die? '))
if answer.upper() == 'Y':
answer = str(input_process('Does your car have fuel injection? '))
if answer.upper() == 'Y':
print ('Get it in for service.')
exit()
elif answer.upper() == 'N':
print ('Check to ensure the choke is opening and closing.')
exit()
print ('You input wrong value.')
| {
"content_hash": "f02cc4e43b6bf4bbb241bef551158dd7",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 86,
"avg_line_length": 43.08108108108108,
"alnum_prop": 0.5043914680050188,
"repo_name": "kssim/efp",
"id": "c9a141b452b7d5aeb125e78a02e26bea4ee4e15e",
"size": "1936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "making_decisions/python/troubleshooting_car_issues.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "27702"
},
{
"name": "Python",
"bytes": "51083"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0009_auto_20150113_1311'),
]
operations = [
migrations.AlterField(
model_name='apirequest',
name='meta',
field=models.TextField(blank=True, null=True),
preserve_default=True,
),
]
| {
"content_hash": "ac73080a754d124ce68a16e2120c839b",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 58,
"avg_line_length": 22.05263157894737,
"alnum_prop": 0.5871121718377088,
"repo_name": "CMU-Robotics-Club/roboticsclub.org",
"id": "804601410afbf484c2d913e9c72bf7dcd213d89f",
"size": "443",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "api/migrations/0010_auto_20150113_1338.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4725"
},
{
"name": "HTML",
"bytes": "33977"
},
{
"name": "JavaScript",
"bytes": "5079"
},
{
"name": "Python",
"bytes": "249072"
}
],
"symlink_target": ""
} |
import cPickle
from collections import defaultdict
from helpers import functions as helpers
from view import View
import pandas as pd
import copy
class Chain(defaultdict):
"""
Container class that holds ordered Link defintions and associated Views.
The Chain object is a subclassed dict of list where each list contains one
or more View aggregations of a Stack. It is an internal class included and
used inside the Stack object. Users can interact with the data directly
through the Chain or through the related Cluster object.
"""
def __init__(self, name=None):
super(Chain, self).__init__(Chain)
self.name = name
self.orientation = None
self.source_name = None
self.source_type = None
self.source_length = None
self.len_of_axis = None
self.content_of_axis = None
self.data_key = None
self.filter = None
self.views = None
# self.view_sizes = None
# self.view_lengths = None
self.has_weighted_views = False
self.x_hidden_codes = None
self.y_hidden_codes = None
self.x_new_order = None
self.y_new_order = None
self.props_tests = list()
self.props_tests_levels = list()
self.means_tests = list()
self.means_tests_levels = list()
self.has_props_tests = False
self.has_means_tests = False
self.is_banked = False
self.banked_spec = None
self.banked_view_key = None
self.banked_meta = None
self.base_text = None
self.annotations = None
def __repr__(self):
return ('%s:\norientation-axis: %s - %s,\ncontent-axis: %s, \nviews: %s'
%(Chain, self.orientation, self.source_name,
self.content_of_axis, len(self.views)))
def __setstate__(self, attr_dict):
self.__dict__.update(attr_dict)
def __reduce__(self):
return self.__class__, (self.name, ), self.__dict__, None, self.iteritems()
def save(self, path=None):
"""
This method saves the current chain instance (self) to file (.chain) using cPickle.
Attributes :
path (string)
Specifies the location of the saved file, NOTE: has to end with '/'
Example: './tests/'
"""
if path is None:
path_chain = "./{}.chain".format(self.name)
else:
path_chain = path
f = open(path_chain, 'wb')
cPickle.dump(self, f, cPickle.HIGHEST_PROTOCOL)
f.close()
def copy(self):
"""
Create a copy of self by serializing to/from a bytestring using
cPickle.
"""
new_chain = cPickle.loads(
cPickle.dumps(self, cPickle.HIGHEST_PROTOCOL))
return new_chain
def _lazy_name(self):
"""
Apply lazy-name logic to chains created without an explicit name.
- This method does not take any responsibilty for uniquley naming chains
"""
self.name = '%s.%s.%s.%s' % (self.orientation, self.source_name, '.'.join(self.content_of_axis), '.'.join(self.views).replace(' ', '_'))
def _derive_attributes(self, data_key, filter, x_def, y_def, views, source_type=None, orientation=None):
"""
A simple method that is deriving attributes of the chain from its specification:
(some attributes are only updated when chains get post-processed,
i.e. there is meta data available for the dataframe)
-- examples:
- orientation: directional alignment of the link
- source_name: name of the orientation defining variable
- source_type: dtype of the source variable
- len_of_axis: number of variables in the non-orientation axis
- views: the list of views specified in the chain
- view_sizes: a list of lists of dataframe index and column lenght tuples,
matched on x/view index (only when post-processed)
"""
if x_def is not None or y_def is not None:
self.orientation = orientation
if self.orientation=='x':
self.source_name = ''.join(x_def)
self.len_of_axis = len(y_def)
self.content_of_axis = y_def
else:
self.source_name = ''.join(y_def)
self.len_of_axis = len(x_def)
self.content_of_axis = x_def
self.views = views
self.data_key = data_key
self.filter = filter
self.source_type = source_type
def concat(self):
"""
Concatenates all Views found for the Chain definition along its
orientations axis.
"""
views_on_var = []
contents = []
full_chain = []
all_chains = []
chain_query = self[self.data_key][self.filter]
if self.orientation == 'y':
for var in self.content_of_axis:
contents = []
for view in self.views:
try:
res = (chain_query[var][self.source_name]
[view].dataframe.copy())
if self.source_name == '@':
res.columns = pd.MultiIndex.from_product(
['@', '-'], names=['Question', 'Values'])
views_on_var.append(res)
except:
pass
contents.append(views_on_var)
for c in contents:
full_chain.append(pd.concat(c, axis=0))
concat_chain = pd.concat(full_chain, axis=0)
else:
for var in self.content_of_axis:
views_on_var = []
for view in self.views:
try:
res = (chain_query[self.source_name][var]
[view].dataframe.copy())
if var == '@':
res.columns = pd.MultiIndex.from_product(
['@', '-'], names=['Question', 'Values'])
views_on_var.append(res)
except:
pass
contents.append(pd.concat(views_on_var, axis=0))
concat_chain = pd.concat(contents, axis=1)
return concat_chain
def view_sizes(self):
dk = self.data_key
fk = self.filter
xk = self.source_name
sizes = []
for yk in self.content_of_axis:
vk_sizes = []
for vk in self.views:
vk_sizes.append(self[dk][fk][xk][yk][vk].dataframe.shape)
sizes.append(vk_sizes)
return sizes
def view_lengths(self):
lengths = [
list(zip(*view_size)[0])
for view_size in [y_size for y_size in self.view_sizes()]]
return lengths
def describe(self, index=None, columns=None, query=None):
""" Generates a list of all link defining stack keys.
"""
stack_tree = []
for dk in self.keys():
path_dk = [dk]
filters = self[dk]
for fk in filters.keys():
path_fk = path_dk + [fk]
xs = self[dk][fk]
for sk in xs.keys():
path_sk = path_fk + [sk]
ys = self[dk][fk][sk]
for tk in ys.keys():
path_tk = path_sk + [tk]
views = self[dk][fk][sk][tk]
for vk in views.keys():
path_vk = path_tk + [vk, 1]
stack_tree.append(tuple(path_vk))
column_names = ['data', 'filter', 'x', 'y', 'view', '#']
df = pd.DataFrame.from_records(stack_tree, columns=column_names)
if not query is None:
df = df.query(query)
if not index is None or not columns is None:
df = df.pivot_table(values='#', index=index, columns=columns, aggfunc='count')
return df
# STATIC METHODS
@staticmethod
def load(filename):
"""
This method loads the pickled object that is made using method: save()
Attributes:
filename ( string )
Specifies the name of the file to be loaded.
Example of use: new_stack = Chain.load("./tests/ChainName.chain")
"""
f = open(filename, 'rb')
new_stack = cPickle.load(f)
f.close()
return new_stack
| {
"content_hash": "5beb43e30c8a51139026c80eb7221053",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 144,
"avg_line_length": 37.22175732217573,
"alnum_prop": 0.5061825539568345,
"repo_name": "Quantipy/quantipy",
"id": "fb354e6abeb75e347b92381b397c5a586e24be60",
"size": "8896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quantipy/core/chain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "28542"
},
{
"name": "HTML",
"bytes": "38072"
},
{
"name": "Python",
"bytes": "4139420"
},
{
"name": "Scheme",
"bytes": "4844"
},
{
"name": "Shell",
"bytes": "130"
},
{
"name": "Visual Basic",
"bytes": "36615"
}
],
"symlink_target": ""
} |
from django.utils import timezone
TASK_ID_PREFIX = "cla_backend.notifications.task.notifications"
def get_update_client_times(instance):
times = []
now = timezone.now()
task_id = "%s-%s" % (TASK_ID_PREFIX, instance.pk)
times.append({})
if instance.start_time > now:
times.append({"eta": instance.start_time, "task_id": "%s-start" % task_id})
times.append({"eta": instance.end_time, "task_id": "%s-end" % task_id})
return times
def send_notifications_to_users(sender, instance, **kwargs):
from .tasks import send_notifications
for kwargs in get_update_client_times(instance):
send_notifications.apply_async(**kwargs)
| {
"content_hash": "da18869204e4f0ef6a34aa15f62fb2a4",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 83,
"avg_line_length": 29.391304347826086,
"alnum_prop": 0.665680473372781,
"repo_name": "ministryofjustice/cla_backend",
"id": "a9e736977b50098f693d1cca792944247dac2e97",
"size": "691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cla_backend/apps/notifications/signals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45941"
},
{
"name": "Dockerfile",
"bytes": "1272"
},
{
"name": "HTML",
"bytes": "14794"
},
{
"name": "JavaScript",
"bytes": "2762"
},
{
"name": "Mustache",
"bytes": "3607"
},
{
"name": "Python",
"bytes": "1577558"
},
{
"name": "Shell",
"bytes": "11204"
},
{
"name": "Smarty",
"bytes": "283906"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
# Generated by Django 1.9 on 2016-05-14 20:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('podcasts', '0019_podcastepisode_flair_tip_jar'),
]
operations = [
migrations.AlterField(
model_name='podcast',
name='networks',
field=models.ManyToManyField(blank=True, to='accounts.Network'),
),
]
| {
"content_hash": "946c6a1bc2d941ee67ffc13b07471ddd",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 76,
"avg_line_length": 25.15,
"alnum_prop": 0.6361829025844931,
"repo_name": "AlmostBetterNetwork/podmaster-host",
"id": "8380b8f264e44d8fedfb21a41f9f823faab5b913",
"size": "527",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "podcasts/migrations/0020_auto_20160514_2030.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "59857"
},
{
"name": "HTML",
"bytes": "130751"
},
{
"name": "JavaScript",
"bytes": "46479"
},
{
"name": "Python",
"bytes": "200422"
}
],
"symlink_target": ""
} |
""" p2p-streams (c) 2014 enen92 fightnight
This file contains web utilities
Classes:
download_tools() -> Contains a downloader, a extraction function and a remove function
Functions:
get_page_source -> Get a webpage source code through urllib2
mechanize_browser(url) -> Get a webpage source code through mechanize module. To avoid DDOS protections.
makeRequest(url, headers=None) -> check if a page is up and retrieve its source code
clean(text) -> Remove specific characters from the page source
url_isup(url, headers=None) -> Check if url is up. Returns True or False.
"""
import xbmc,xbmcplugin,xbmcgui,xbmcaddon,urllib,urllib2,tarfile,os,sys,re
from pluginxbmc import *
user_agent = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1468.0 Safari/537.36'
class download_tools():
def Downloader(self,url,dest,description,heading):
dp = xbmcgui.DialogProgress()
dp.create(heading,description,'')
dp.update(0)
urllib.urlretrieve(url,dest,lambda nb, bs, fs, url=url: self._pbhook(nb,bs,fs,dp))
def _pbhook(self,numblocks, blocksize, filesize,dp=None):
try:
percent = int((int(numblocks)*int(blocksize)*100)/int(filesize))
dp.update(percent)
except:
percent = 100
dp.update(percent)
if dp.iscanceled():
dp.close()
def extract(self,file_tar,destination):
dp = xbmcgui.DialogProgress()
dp.create(translate(40000),translate(40044))
tar = tarfile.open(file_tar)
tar.extractall(destination)
dp.update(100)
tar.close()
dp.close()
def remove(self,file_):
dp = xbmcgui.DialogProgress()
dp.create(translate(40000),translate(40045))
os.remove(file_)
dp.update(100)
dp.close()
def get_page_source(url):
req = urllib2.Request(url)
req.add_header('User-Agent', user_agent)
response = urllib2.urlopen(req)
link=response.read()
response.close()
return link
def mechanize_browser(url):
import mechanize
br = mechanize.Browser()
br.set_handle_equiv(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
r = br.open(url)
html = r.read()
html_source= br.response().read()
return html_source
def makeRequest(url, headers=None):
try:
if not headers:
headers = {'User-agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:19.0) Gecko/20100101 Firefox/19.0'}
req = urllib2.Request(url,None,headers)
response = urllib2.urlopen(req)
data = response.read()
response.close()
return data
except:
mensagemok(translate(40000),translate(40122))
sys.exit(0)
def url_isup(url, headers=None):
try:
if not headers:
headers = {'User-agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:19.0) Gecko/20100101 Firefox/19.0'}
req = urllib2.Request(url,None,headers)
response = urllib2.urlopen(req)
data = response.read()
response.close()
return True
except: return False
def clean(text):
command={'\r':'','\n':'','\t':'',' ':' ','"':'"',''':'',''':"'",'ã':'ã','&170;':'ª','é':'é','ç':'ç','ó':'ó','â':'â','ñ':'ñ','á':'á','í':'í','õ':'õ','É':'É','ú':'ú','&':'&','Á':'Á','Ã':'Ã','Ê':'Ê','Ç':'Ç','Ó':'Ó','Õ':'Õ','Ô':'Ó','Ú':'Ú'}
regex = re.compile("|".join(map(re.escape, command.keys())))
return regex.sub(lambda mo: command[mo.group(0)], text)
| {
"content_hash": "dd9e27dfca7da46a2c8a3a056b40e700",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 362,
"avg_line_length": 33.98095238095238,
"alnum_prop": 0.6519058295964125,
"repo_name": "aplicatii-romanesti/allinclusive-kodi-pi",
"id": "4ecea0759f4e1e42984ee84385546d00224c032e",
"size": "3613",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": ".kodi/addons/plugin.video.p2p-streams/resources/core/peertopeerutils/webutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6178"
},
{
"name": "Python",
"bytes": "8657978"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
} |
from chartforge import dynamic_chart
@dynamic_chart()
class ExampleChart:
"""
Example line chart.
"""
template_name = 'example_line_chart.json'
def get_data(self):
return {'bvah': 1}
@dynamic_chart('CustomChartName', template_name='example_line_chart.json')
def my_chart(chart):
print('my_chart(%s)' % chart)
return {'data': 123}
| {
"content_hash": "c09ec9e38544aa65e33b20e3a15080f6",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 74,
"avg_line_length": 20.61111111111111,
"alnum_prop": 0.6442048517520216,
"repo_name": "featherweightweb/django-chartforge",
"id": "b28e9f5f37fb114efb34f823951a612da27f2cf9",
"size": "371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/charts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22663"
}
],
"symlink_target": ""
} |
class mysql(object):
def __init__(self, host, user, passwd, db):
import pymysql
self.con = pymysql.connect(host=host,user=user,password=passwd,database=db)
self.x = self.con.cursor(pymysql.cursors.DictCursor)
def close(self):
self.x.close()
self.con.close()
def query(self,query,commit,*args):
self.x.execute(query, (args))
if commit == 1:
self.commit()
return self.x.fetchall()
def commit(self):
self.con.commit()
class mssql(object):
def __init__(self, host, user, passwd, db):
import pymssql
self.con = pymssql.connect(host=host,user=user,password=passwd,database=db)
self.x = self.con.cursor(as_dict=True)
def close(self):
self.x.close()
self.con.close()
def query(self,query,commit,*args):
self.x.execute(query, (args))
if commit == 1:
self.commit()
return self.x.fetchall()
def commit(self):
self.con.commit()
class sqlite(object):
def __init__(self,dbpath):
import sqlite3
self.con = sqlite3.connect(dbpath)
self.x = self.con.cursor()
def close(self):
self.x.close()
self.con.close()
def query(self,query,commit,*args):
self.x.execute(query, (args))
if commit == 1:
self.commit()
return self.x.fetchall()
def commit(self):
self.con.commit()
| {
"content_hash": "c5c398ad18d04db837ac64fd639eb8ac",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 83,
"avg_line_length": 24.19672131147541,
"alnum_prop": 0.5650406504065041,
"repo_name": "xcs491/PythonPlugs",
"id": "75f3086a5bfe401252e481689b93d38f787159dd",
"size": "1476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python3/sql3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4989"
}
],
"symlink_target": ""
} |
from . import memory_usage_calc
from .memory_usage_calc import *
from . import op_frequence
from .op_frequence import *
from . import quantize
from .quantize import *
from . import slim
from . import extend_optimizer
from .extend_optimizer import *
from . import model_stat
from .model_stat import *
from . import mixed_precision
from .mixed_precision import *
from . import layers
from .layers import *
from . import optimizer
from .optimizer import *
from . import sparsity
from .sparsity import *
__all__ = []
__all__ += memory_usage_calc.__all__
__all__ += op_frequence.__all__
__all__ += quantize.__all__
__all__ += extend_optimizer.__all__
__all__ += ['mixed_precision']
__all__ += layers.__all__
__all__ += optimizer.__all__
__all__ += sparsity.__all__
| {
"content_hash": "2b3a6c911206697c803be7f19281ae30",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 36,
"avg_line_length": 25.4,
"alnum_prop": 0.6797900262467191,
"repo_name": "PaddlePaddle/Paddle",
"id": "2860d414d0a5bd4bb212c0ee56b82033eb34f498",
"size": "1440",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/contrib/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36848680"
},
{
"name": "CMake",
"bytes": "902619"
},
{
"name": "Cuda",
"bytes": "5227207"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36203874"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553177"
}
],
"symlink_target": ""
} |
"""
Copyright 2013 yuki akiyama (you2197901 at gmail.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import MySQLdb
import json
import os
CONFIG_FILE="partition.json"
# -----------------------------------
def config_read(filename):
config = json.load(open(filename))
return config
# -----------------------------------
def date_show_all_partitions(conn, tablename):
lists = []
infotable = "information_schema.PARTITIONS"
sql = "SELECT PARTITION_NAME FROM "+ infotable +" WHERE TABLE_NAME='"+ tablename +"' AND PARTITION_NAME!='pmax' ORDER BY PARTITION_NAME desc;"
cur = conn.cursor()
cur.execute(sql)
res = cur.fetchall()
for row in res:
lists.append(row[0])
cur.close()
return lists
def partition_exec(conn, table):
lists = date_show_all_partitions(conn, table)
for v in lists:
print table + ":" + v
def main():
path = os.path.join(os.path.join(os.path.dirname(__file__), ".."), "config");
conf = config_read(os.path.join(path, CONFIG_FILE))
myconf = conf["MYSQL"]
conn = MySQLdb.connect(host=myconf["HOST"], db=myconf["DB"], user=myconf["USER"], passwd=myconf["PASS"])
for table in conf["TABLES"]:
partition_exec(conn, table["NAME"])
conn.close()
main()
| {
"content_hash": "cf0479b7970f9658a46553ee5f9b1b97",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 146,
"avg_line_length": 30.982758620689655,
"alnum_prop": 0.6521981079577073,
"repo_name": "you21979/mysql_batch",
"id": "6e95872c0920d14430f731a56f1e7c13da21d9d7",
"size": "1815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/partition_show.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8059"
}
],
"symlink_target": ""
} |
import webapp2
import handler
from blog import NewPost, MainPage, SinglePost
URL_BASE = "/assignment-3/blog"
URL_MAIN = URL_BASE + "/?"
URL_SINGLE_POST = URL_BASE + "/(\d+)"
URL_NEWPOST = "/assignment-3/blog/newpost"
config = {
'jinja_env' : handler.setup_jinja('assignment-3'),
'url_base' : URL_BASE,
'url_newpost' : URL_NEWPOST
}
app = webapp2.WSGIApplication([
(URL_NEWPOST, NewPost),
(URL_SINGLE_POST, SinglePost),
(URL_MAIN, MainPage)
], config=config, debug=True) | {
"content_hash": "1b8145714f573fecc882e0b56ff361e1",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 54,
"avg_line_length": 23.857142857142858,
"alnum_prop": 0.6606786427145709,
"repo_name": "bikush/udacity-web-development",
"id": "b26455e80f445791e929a4b753e8e9b0a1da3af9",
"size": "501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "assignment-3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "9995"
},
{
"name": "Python",
"bytes": "22004"
}
],
"symlink_target": ""
} |
import logging
from hailtop.utils import secret_alnum_string
log = logging.getLogger('batch.spec_writer')
class SpecWriter:
byteorder = 'little'
signed = False
bytes_per_offset = 8
@staticmethod
def get_index_file_offsets(job_id, start_job_id):
assert job_id >= start_job_id
idx_start = SpecWriter.bytes_per_offset * (job_id - start_job_id)
idx_end = (idx_start + 2 * SpecWriter.bytes_per_offset) - 1 # `end` parameter in gcs is inclusive of last byte to return
return (idx_start, idx_end)
@staticmethod
def get_spec_file_offsets(offsets):
assert len(offsets) == 2 * SpecWriter.bytes_per_offset
spec_start = int.from_bytes(offsets[:8], byteorder=SpecWriter.byteorder, signed=SpecWriter.signed)
next_spec_start = int.from_bytes(offsets[8:], byteorder=SpecWriter.byteorder, signed=SpecWriter.signed)
return (spec_start, next_spec_start - 1) # `end` parameter in gcs is inclusive of last byte to return
@staticmethod
async def get_token_start_id(db, batch_id, job_id):
bunch_record = await db.select_and_fetchone(
'''
SELECT start_job_id, token FROM batch_bunches
WHERE batch_id = %s AND start_job_id <= %s
ORDER BY start_job_id DESC
LIMIT 1;
''',
(batch_id, job_id))
token = bunch_record['token']
start_job_id = bunch_record['start_job_id']
return (token, start_job_id)
def __init__(self, log_store, batch_id):
self.log_store = log_store
self.batch_id = batch_id
self.token = secret_alnum_string(16)
self._data_bytes = bytearray()
self._offsets_bytes = bytearray()
self._n_elements = 0
def add(self, data):
data_bytes = data.encode('utf-8')
start = len(self._data_bytes)
self._offsets_bytes.extend(start.to_bytes(8, byteorder=SpecWriter.byteorder, signed=SpecWriter.signed))
self._data_bytes.extend(data_bytes)
self._n_elements += 1
async def write(self):
end = len(self._data_bytes)
self._offsets_bytes.extend(end.to_bytes(8, byteorder=SpecWriter.byteorder, signed=SpecWriter.signed))
await self.log_store.write_spec_file(self.batch_id, self.token,
bytes(self._data_bytes), bytes(self._offsets_bytes))
return self.token
| {
"content_hash": "4753291b4b68f2ddd6c6c7da55a10a97",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 129,
"avg_line_length": 36.43076923076923,
"alnum_prop": 0.6351351351351351,
"repo_name": "cseed/hail",
"id": "dd4a5abf2a7c023da950faf6cbfa6b08771f91db",
"size": "2368",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "batch/batch/spec_writer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7729"
},
{
"name": "C",
"bytes": "289"
},
{
"name": "C++",
"bytes": "170210"
},
{
"name": "CSS",
"bytes": "20423"
},
{
"name": "Dockerfile",
"bytes": "7426"
},
{
"name": "HTML",
"bytes": "43106"
},
{
"name": "Java",
"bytes": "22564"
},
{
"name": "JavaScript",
"bytes": "730"
},
{
"name": "Jupyter Notebook",
"bytes": "162397"
},
{
"name": "Makefile",
"bytes": "58348"
},
{
"name": "PLpgSQL",
"bytes": "23163"
},
{
"name": "Python",
"bytes": "3477764"
},
{
"name": "R",
"bytes": "3038"
},
{
"name": "Scala",
"bytes": "3496240"
},
{
"name": "Shell",
"bytes": "41254"
},
{
"name": "TSQL",
"bytes": "10385"
},
{
"name": "TeX",
"bytes": "7125"
},
{
"name": "XSLT",
"bytes": "9787"
}
],
"symlink_target": ""
} |
''' Describes enumerated variable types for use in the server
'''
# STANDARD PYTHON IMPORTS
# PYTHON LIBRARIES
# USER LIBRARIES
# GLOBAL VARIABLES
# CLASSES
# FUNCTIONS
def enum(*sequential, **named):
''' Enables the use of enumerated types in python. Returns an Enum() type,
the lookup of which returns entries from the enums dictionary. Reverse
mapping available as MyEnum.get_name(my_enum_value).
'''
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.iteritems())
enums['get_name'] = reverse
return type('Enum', (), enums)
# CODE
if __name__ == '__main__':
print "Do not run enums.py from __main__."
else:
Device = enum('INVALID', 'PI', 'PHONE', 'LAPTOP')
# END OF FILE | {
"content_hash": "2736495027b77df36085ebeb85534b32",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 76,
"avg_line_length": 24.419354838709676,
"alnum_prop": 0.6882430647291942,
"repo_name": "plus44/hcr-2016",
"id": "1b28a9df27495392a5093fc15fcd8a26cee153ba",
"size": "776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/server/enum.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "8500"
},
{
"name": "CMake",
"bytes": "2496"
},
{
"name": "GLSL",
"bytes": "721"
},
{
"name": "HTML",
"bytes": "143"
},
{
"name": "Matlab",
"bytes": "4873"
},
{
"name": "Objective-C",
"bytes": "2721"
},
{
"name": "Python",
"bytes": "548575"
},
{
"name": "Ruby",
"bytes": "1314"
},
{
"name": "Shell",
"bytes": "1436"
},
{
"name": "Swift",
"bytes": "882369"
}
],
"symlink_target": ""
} |
import datetime
import json
import logging
import os
from unidecode import unidecode
logger = logging.getLogger(__name__)
def check_parliament_members_at_date(check_date):
filepath = './data/secret/parliament_members_check.json'
if not os.path.exists(filepath):
logger.error('file ' + filepath + ' does note exist!')
return []
with open(filepath, 'r') as filein:
members_json = json.loads(filein.read())
members_active = []
for member in members_json:
for date_range in member['date_ranges']:
start_date = datetime.datetime.strptime(date_range['start_date'], "%Y-%m-%d").date()
if date_range['end_date'] != '':
end_date = datetime.datetime.strptime(date_range['end_date'], "%Y-%m-%d").date()
else:
end_date = datetime.date.today() + datetime.timedelta(days=1)
# print(str(start_date) + ' - ' + str(end_date) + ' [' + str(check_date) + ']')
if start_date < check_date < end_date:
members_active.append(member)
return members_active
def get_members_missing(members_current, members_current_check):
members_missing = []
for member_check in members_current_check:
found = False
member_check_name = unidecode(member_check['name'])
member_check_forename = unidecode(member_check['forename'])
for member in members_current:
member_name = unidecode(member.person.surname_including_prefix())
if member_check_name == member_name and member_check_forename == unidecode(member.person.forename):
found = True
break
if not found:
members_missing.append(
member_check['initials'] + ' ' + member_check['name'] + ' (' + member_check['forename'] + ')')
# print(member_check['name'])
return members_missing
def get_members_incorrect(members_current, members_current_check):
members_incorrect = []
for member in members_current:
found = False
member_name = unidecode(member.person.surname_including_prefix())
member_forename = unidecode(member.person.forename)
for member_check in members_current_check:
member_check_name = unidecode(member_check['name'])
member_check_forename = unidecode(member_check['forename'])
if member_check_name == member_name and member_check_forename == member_forename:
found = True
break
if not found:
members_incorrect.append(member)
# print(member.person.fullname())
return members_incorrect
| {
"content_hash": "95daf5bde96ccf70d1daa60ebef86da7",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 111,
"avg_line_length": 41.03076923076923,
"alnum_prop": 0.6134233220847394,
"repo_name": "openkamer/openkamer",
"id": "100840c92ad85a4fd7dd90406a4dc804c5949f93",
"size": "2667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parliament/check.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "442"
},
{
"name": "CSS",
"bytes": "11171"
},
{
"name": "HTML",
"bytes": "154052"
},
{
"name": "JavaScript",
"bytes": "1051"
},
{
"name": "Python",
"bytes": "513282"
},
{
"name": "Shell",
"bytes": "157"
}
],
"symlink_target": ""
} |
data = '../data/fm_train_real.dat'
parameter_list = [[data]]
def converter_isomap (data_fname):
from shogun import CSVFile
from shogun import Isomap
features = sg.features(CSVFile(data))
converter = Isomap()
converter.set_k(20)
converter.set_target_dim(1)
converter.transform(features)
return features
if __name__=='__main__':
print('Isomap')
#converter_isomap(*parameter_list[0])
| {
"content_hash": "04cb8644de0f351797e1636221075b02",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 38,
"avg_line_length": 19.95,
"alnum_prop": 0.706766917293233,
"repo_name": "karlnapf/shogun",
"id": "82a31d63b466ee32715a46d78a30a40b3d4e71c4",
"size": "421",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "examples/undocumented/python/converter_isomap.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "568"
},
{
"name": "C",
"bytes": "12000"
},
{
"name": "C++",
"bytes": "10554889"
},
{
"name": "CMake",
"bytes": "195345"
},
{
"name": "Dockerfile",
"bytes": "2029"
},
{
"name": "GDB",
"bytes": "89"
},
{
"name": "HTML",
"bytes": "2066"
},
{
"name": "MATLAB",
"bytes": "8755"
},
{
"name": "Makefile",
"bytes": "244"
},
{
"name": "Python",
"bytes": "285072"
},
{
"name": "Shell",
"bytes": "11995"
}
],
"symlink_target": ""
} |
"""Meta checkout manager supporting both Subversion and GIT."""
# Files
# .gclient : Current client configuration, written by 'config' command.
# Format is a Python script defining 'solutions', a list whose
# entries each are maps binding the strings "name" and "url"
# to strings specifying the name and location of the client
# module, as well as "custom_deps" to a map similar to the
# deps section of the DEPS file below, as well as
# "custom_hooks" to a list similar to the hooks sections of
# the DEPS file below.
# .gclient_entries : A cache constructed by 'update' command. Format is a
# Python script defining 'entries', a list of the names
# of all modules in the client
# <module>/DEPS : Python script defining var 'deps' as a map from each
# requisite submodule name to a URL where it can be found (via
# one SCM)
#
# Hooks
# .gclient and DEPS files may optionally contain a list named "hooks" to
# allow custom actions to be performed based on files that have changed in the
# working copy as a result of a "sync"/"update" or "revert" operation. This
# can be prevented by using --nohooks (hooks run by default). Hooks can also
# be forced to run with the "runhooks" operation. If "sync" is run with
# --force, all known but not suppressed hooks will run regardless of the state
# of the working copy.
#
# Each item in a "hooks" list is a dict, containing these two keys:
# "pattern" The associated value is a string containing a regular
# expression. When a file whose pathname matches the expression
# is checked out, updated, or reverted, the hook's "action" will
# run.
# "action" A list describing a command to run along with its arguments, if
# any. An action command will run at most one time per gclient
# invocation, regardless of how many files matched the pattern.
# The action is executed in the same directory as the .gclient
# file. If the first item in the list is the string "python",
# the current Python interpreter (sys.executable) will be used
# to run the command. If the list contains string
# "$matching_files" it will be removed from the list and the list
# will be extended by the list of matching files.
# "name" An optional string specifying the group to which a hook belongs
# for overriding and organizing.
#
# Example:
# hooks = [
# { "pattern": "\\.(gif|jpe?g|pr0n|png)$",
# "action": ["python", "image_indexer.py", "--all"]},
# { "pattern": ".",
# "name": "gyp",
# "action": ["python", "src/build/gyp_chromium"]},
# ]
#
# Pre-DEPS Hooks
# DEPS files may optionally contain a list named "pre_deps_hooks". These are
# the same as normal hooks, except that they run before the DEPS are
# processed. Pre-DEPS run with "sync" and "revert" unless the --noprehooks
# flag is used.
#
# Specifying a target OS
# An optional key named "target_os" may be added to a gclient file to specify
# one or more additional operating systems that should be considered when
# processing the deps_os dict of a DEPS file.
#
# Example:
# target_os = [ "android" ]
#
# If the "target_os_only" key is also present and true, then *only* the
# operating systems listed in "target_os" will be used.
#
# Example:
# target_os = [ "ios" ]
# target_os_only = True
__version__ = '0.7'
import copy
import json
import logging
import optparse
import os
import platform
import posixpath
import pprint
import re
import sys
import time
import urllib
import urlparse
import breakpad # pylint: disable=W0611
import fix_encoding
import gclient_scm
import gclient_utils
import git_cache
from third_party.repo.progress import Progress
import subcommand
import subprocess2
from third_party import colorama
class GClientKeywords(object):
class FromImpl(object):
"""Used to implement the From() syntax."""
def __init__(self, module_name, sub_target_name=None):
"""module_name is the dep module we want to include from. It can also be
the name of a subdirectory to include from.
sub_target_name is an optional parameter if the module name in the other
DEPS file is different. E.g., you might want to map src/net to net."""
self.module_name = module_name
self.sub_target_name = sub_target_name
def __str__(self):
return 'From(%s, %s)' % (repr(self.module_name),
repr(self.sub_target_name))
class FileImpl(object):
"""Used to implement the File('') syntax which lets you sync a single file
from a SVN repo."""
def __init__(self, file_location):
self.file_location = file_location
def __str__(self):
return 'File("%s")' % self.file_location
def GetPath(self):
return os.path.split(self.file_location)[0]
def GetFilename(self):
rev_tokens = self.file_location.split('@')
return os.path.split(rev_tokens[0])[1]
def GetRevision(self):
rev_tokens = self.file_location.split('@')
if len(rev_tokens) > 1:
return rev_tokens[1]
return None
class VarImpl(object):
def __init__(self, custom_vars, local_scope):
self._custom_vars = custom_vars
self._local_scope = local_scope
def Lookup(self, var_name):
"""Implements the Var syntax."""
if var_name in self._custom_vars:
return self._custom_vars[var_name]
elif var_name in self._local_scope.get("vars", {}):
return self._local_scope["vars"][var_name]
raise gclient_utils.Error("Var is not defined: %s" % var_name)
class DependencySettings(GClientKeywords):
"""Immutable configuration settings."""
def __init__(
self, parent, url, safesync_url, managed, custom_deps, custom_vars,
custom_hooks, deps_file, should_process):
GClientKeywords.__init__(self)
# These are not mutable:
self._parent = parent
self._safesync_url = safesync_url
self._deps_file = deps_file
self._url = url
# 'managed' determines whether or not this dependency is synced/updated by
# gclient after gclient checks it out initially. The difference between
# 'managed' and 'should_process' is that the user specifies 'managed' via
# the --unmanaged command-line flag or a .gclient config, where
# 'should_process' is dynamically set by gclient if it goes over its
# recursion limit and controls gclient's behavior so it does not misbehave.
self._managed = managed
self._should_process = should_process
# This is a mutable value which has the list of 'target_os' OSes listed in
# the current deps file.
self.local_target_os = None
# These are only set in .gclient and not in DEPS files.
self._custom_vars = custom_vars or {}
self._custom_deps = custom_deps or {}
self._custom_hooks = custom_hooks or []
# TODO(iannucci): Remove this when all masters are correctly substituting
# the new blink url.
if (self._custom_vars.get('webkit_trunk', '') ==
'svn://svn-mirror.golo.chromium.org/webkit-readonly/trunk'):
new_url = 'svn://svn-mirror.golo.chromium.org/blink/trunk'
print 'Overwriting Var("webkit_trunk") with %s' % new_url
self._custom_vars['webkit_trunk'] = new_url
# Post process the url to remove trailing slashes.
if isinstance(self._url, basestring):
# urls are sometime incorrectly written as proto://host/path/@rev. Replace
# it to proto://host/path@rev.
self._url = self._url.replace('/@', '@')
elif not isinstance(self._url,
(self.FromImpl, self.FileImpl, None.__class__)):
raise gclient_utils.Error(
('dependency url must be either a string, None, '
'File() or From() instead of %s') % self._url.__class__.__name__)
# Make any deps_file path platform-appropriate.
for sep in ['/', '\\']:
self._deps_file = self._deps_file.replace(sep, os.sep)
@property
def deps_file(self):
return self._deps_file
@property
def managed(self):
return self._managed
@property
def parent(self):
return self._parent
@property
def root(self):
"""Returns the root node, a GClient object."""
if not self.parent:
# This line is to signal pylint that it could be a GClient instance.
return self or GClient(None, None)
return self.parent.root
@property
def safesync_url(self):
return self._safesync_url
@property
def should_process(self):
"""True if this dependency should be processed, i.e. checked out."""
return self._should_process
@property
def custom_vars(self):
return self._custom_vars.copy()
@property
def custom_deps(self):
return self._custom_deps.copy()
@property
def custom_hooks(self):
return self._custom_hooks[:]
@property
def url(self):
return self._url
@property
def target_os(self):
if self.local_target_os is not None:
return tuple(set(self.local_target_os).union(self.parent.target_os))
else:
return self.parent.target_os
def get_custom_deps(self, name, url):
"""Returns a custom deps if applicable."""
if self.parent:
url = self.parent.get_custom_deps(name, url)
# None is a valid return value to disable a dependency.
return self.custom_deps.get(name, url)
class Dependency(gclient_utils.WorkItem, DependencySettings):
"""Object that represents a dependency checkout."""
def __init__(self, parent, name, url, safesync_url, managed, custom_deps,
custom_vars, custom_hooks, deps_file, should_process):
gclient_utils.WorkItem.__init__(self, name)
DependencySettings.__init__(
self, parent, url, safesync_url, managed, custom_deps, custom_vars,
custom_hooks, deps_file, should_process)
# This is in both .gclient and DEPS files:
self._deps_hooks = []
self._pre_deps_hooks = []
# Calculates properties:
self._parsed_url = None
self._dependencies = []
# A cache of the files affected by the current operation, necessary for
# hooks.
self._file_list = []
# If it is not set to True, the dependency wasn't processed for its child
# dependency, i.e. its DEPS wasn't read.
self._deps_parsed = False
# This dependency has been processed, i.e. checked out
self._processed = False
# This dependency had its pre-DEPS hooks run
self._pre_deps_hooks_ran = False
# This dependency had its hook run
self._hooks_ran = False
# This is the scm used to checkout self.url. It may be used by dependencies
# to get the datetime of the revision we checked out.
self._used_scm = None
self._used_revision = None
# The actual revision we ended up getting, or None if that information is
# unavailable
self._got_revision = None
# This is a mutable value that overrides the normal recursion limit for this
# dependency. It is read from the actual DEPS file so cannot be set on
# class instantiation.
self.recursion_override = None
# recursedeps is a mutable value that selectively overrides the default
# 'no recursion' setting on a dep-by-dep basis. It will replace
# recursion_override.
self.recursedeps = None
if not self.name and self.parent:
raise gclient_utils.Error('Dependency without name')
@property
def requirements(self):
"""Calculate the list of requirements."""
requirements = set()
# self.parent is implicitly a requirement. This will be recursive by
# definition.
if self.parent and self.parent.name:
requirements.add(self.parent.name)
# For a tree with at least 2 levels*, the leaf node needs to depend
# on the level higher up in an orderly way.
# This becomes messy for >2 depth as the DEPS file format is a dictionary,
# thus unsorted, while the .gclient format is a list thus sorted.
#
# * _recursion_limit is hard coded 2 and there is no hope to change this
# value.
#
# Interestingly enough, the following condition only works in the case we
# want: self is a 2nd level node. 3nd level node wouldn't need this since
# they already have their parent as a requirement.
if self.parent and self.parent.parent and not self.parent.parent.parent:
requirements |= set(i.name for i in self.root.dependencies if i.name)
if isinstance(self.url, self.FromImpl):
requirements.add(self.url.module_name)
if self.name:
requirements |= set(
obj.name for obj in self.root.subtree(False)
if (obj is not self
and obj.name and
self.name.startswith(posixpath.join(obj.name, ''))))
requirements = tuple(sorted(requirements))
logging.info('Dependency(%s).requirements = %s' % (self.name, requirements))
return requirements
@property
def try_recursedeps(self):
"""Returns False if recursion_override is ever specified."""
if self.recursion_override is not None:
return False
return self.parent.try_recursedeps
@property
def recursion_limit(self):
"""Returns > 0 if this dependency is not too recursed to be processed."""
# We continue to support the absence of recursedeps until tools and DEPS
# using recursion_override are updated.
if self.try_recursedeps and self.parent.recursedeps != None:
if self.name in self.parent.recursedeps:
return 1
if self.recursion_override is not None:
return self.recursion_override
return max(self.parent.recursion_limit - 1, 0)
def verify_validity(self):
"""Verifies that this Dependency is fine to add as a child of another one.
Returns True if this entry should be added, False if it is a duplicate of
another entry.
"""
logging.info('Dependency(%s).verify_validity()' % self.name)
if self.name in [s.name for s in self.parent.dependencies]:
raise gclient_utils.Error(
'The same name "%s" appears multiple times in the deps section' %
self.name)
if not self.should_process:
# Return early, no need to set requirements.
return True
# This require a full tree traversal with locks.
siblings = [d for d in self.root.subtree(False) if d.name == self.name]
for sibling in siblings:
self_url = self.LateOverride(self.url)
sibling_url = sibling.LateOverride(sibling.url)
# Allow to have only one to be None or ''.
if self_url != sibling_url and bool(self_url) == bool(sibling_url):
raise gclient_utils.Error(
('Dependency %s specified more than once:\n'
' %s [%s]\n'
'vs\n'
' %s [%s]') % (
self.name,
sibling.hierarchy(),
sibling_url,
self.hierarchy(),
self_url))
# In theory we could keep it as a shadow of the other one. In
# practice, simply ignore it.
logging.warn('Won\'t process duplicate dependency %s' % sibling)
return False
return True
def LateOverride(self, url):
"""Resolves the parsed url from url.
Manages From() keyword accordingly. Do not touch self.parsed_url nor
self.url because it may called with other urls due to From()."""
assert self.parsed_url == None or not self.should_process, self.parsed_url
parsed_url = self.get_custom_deps(self.name, url)
if parsed_url != url:
logging.info(
'Dependency(%s).LateOverride(%s) -> %s' %
(self.name, url, parsed_url))
return parsed_url
if isinstance(url, self.FromImpl):
# Requires tree traversal.
ref = [
dep for dep in self.root.subtree(True) if url.module_name == dep.name
]
if not ref:
raise gclient_utils.Error('Failed to find one reference to %s. %s' % (
url.module_name, ref))
# It may happen that len(ref) > 1 but it's no big deal.
ref = ref[0]
sub_target = url.sub_target_name or self.name
found_deps = [d for d in ref.dependencies if d.name == sub_target]
if len(found_deps) != 1:
raise gclient_utils.Error(
'Couldn\'t find %s in %s, referenced by %s (parent: %s)\n%s' % (
sub_target, ref.name, self.name, self.parent.name,
str(self.root)))
# Call LateOverride() again.
found_dep = found_deps[0]
parsed_url = found_dep.LateOverride(found_dep.url)
logging.info(
'Dependency(%s).LateOverride(%s) -> %s (From)' %
(self.name, url, parsed_url))
return parsed_url
if isinstance(url, basestring):
parsed_url = urlparse.urlparse(url)
if (not parsed_url[0] and
not re.match(r'^\w+\@[\w\.-]+\:[\w\/]+', parsed_url[2])):
# A relative url. Fetch the real base.
path = parsed_url[2]
if not path.startswith('/'):
raise gclient_utils.Error(
'relative DEPS entry \'%s\' must begin with a slash' % url)
# Create a scm just to query the full url.
parent_url = self.parent.parsed_url
if isinstance(parent_url, self.FileImpl):
parent_url = parent_url.file_location
scm = gclient_scm.CreateSCM(
parent_url, self.root.root_dir, None, self.outbuf)
parsed_url = scm.FullUrlForRelativeUrl(url)
else:
parsed_url = url
logging.info(
'Dependency(%s).LateOverride(%s) -> %s' %
(self.name, url, parsed_url))
return parsed_url
if isinstance(url, self.FileImpl):
logging.info(
'Dependency(%s).LateOverride(%s) -> %s (File)' %
(self.name, url, url))
return url
if url is None:
logging.info(
'Dependency(%s).LateOverride(%s) -> %s' % (self.name, url, url))
return url
raise gclient_utils.Error('Unknown url type')
@staticmethod
def MergeWithOsDeps(deps, deps_os, target_os_list):
"""Returns a new "deps" structure that is the deps sent in updated
with information from deps_os (the deps_os section of the DEPS
file) that matches the list of target os."""
os_overrides = {}
for the_target_os in target_os_list:
the_target_os_deps = deps_os.get(the_target_os, {})
for os_dep_key, os_dep_value in the_target_os_deps.iteritems():
overrides = os_overrides.setdefault(os_dep_key, [])
overrides.append((the_target_os, os_dep_value))
# If any os didn't specify a value (we have fewer value entries
# than in the os list), then it wants to use the default value.
for os_dep_key, os_dep_value in os_overrides.iteritems():
if len(os_dep_value) != len(target_os_list):
# Record the default value too so that we don't accidently
# set it to None or miss a conflicting DEPS.
if os_dep_key in deps:
os_dep_value.append(('default', deps[os_dep_key]))
target_os_deps = {}
for os_dep_key, os_dep_value in os_overrides.iteritems():
# os_dep_value is a list of (os, value) pairs.
possible_values = set(x[1] for x in os_dep_value if x[1] is not None)
if not possible_values:
target_os_deps[os_dep_key] = None
else:
if len(possible_values) > 1:
# It would be possible to abort here but it would be
# unfortunate if we end up preventing any kind of checkout.
logging.error('Conflicting dependencies for %s: %s. (target_os=%s)',
os_dep_key, os_dep_value, target_os_list)
# Sorting to get the same result every time in case of conflicts.
target_os_deps[os_dep_key] = sorted(possible_values)[0]
new_deps = deps.copy()
new_deps.update(target_os_deps)
return new_deps
def ParseDepsFile(self):
"""Parses the DEPS file for this dependency."""
assert not self.deps_parsed
assert not self.dependencies
deps_content = None
use_strict = False
# First try to locate the configured deps file. If it's missing, fallback
# to DEPS.
deps_files = [self.deps_file]
if 'DEPS' not in deps_files:
deps_files.append('DEPS')
for deps_file in deps_files:
filepath = os.path.join(self.root.root_dir, self.name, deps_file)
if os.path.isfile(filepath):
logging.info(
'ParseDepsFile(%s): %s file found at %s', self.name, deps_file,
filepath)
break
logging.info(
'ParseDepsFile(%s): No %s file found at %s', self.name, deps_file,
filepath)
if os.path.isfile(filepath):
deps_content = gclient_utils.FileRead(filepath)
logging.debug('ParseDepsFile(%s) read:\n%s', self.name, deps_content)
use_strict = 'use strict' in deps_content.splitlines()[0]
local_scope = {}
if deps_content:
# One thing is unintuitive, vars = {} must happen before Var() use.
var = self.VarImpl(self.custom_vars, local_scope)
if use_strict:
logging.info(
'ParseDepsFile(%s): Strict Mode Enabled', self.name)
global_scope = {
'__builtins__': {'None': None},
'Var': var.Lookup,
'deps_os': {},
}
else:
global_scope = {
'File': self.FileImpl,
'From': self.FromImpl,
'Var': var.Lookup,
'deps_os': {},
}
# Eval the content.
try:
exec(deps_content, global_scope, local_scope)
except SyntaxError, e:
gclient_utils.SyntaxErrorToError(filepath, e)
if use_strict:
for key, val in local_scope.iteritems():
if not isinstance(val, (dict, list, tuple, str)):
raise gclient_utils.Error(
'ParseDepsFile(%s): Strict mode disallows %r -> %r' %
(self.name, key, val))
deps = local_scope.get('deps', {})
if 'recursion' in local_scope:
self.recursion_override = local_scope.get('recursion')
logging.warning(
'Setting %s recursion to %d.', self.name, self.recursion_limit)
self.recursedeps = local_scope.get('recursedeps', None)
if 'recursedeps' in local_scope:
self.recursedeps = set(self.recursedeps)
logging.warning('Found recursedeps %r.', repr(self.recursedeps))
# If present, save 'target_os' in the local_target_os property.
if 'target_os' in local_scope:
self.local_target_os = local_scope['target_os']
# load os specific dependencies if defined. these dependencies may
# override or extend the values defined by the 'deps' member.
target_os_list = self.target_os
if 'deps_os' in local_scope and target_os_list:
deps = self.MergeWithOsDeps(deps, local_scope['deps_os'], target_os_list)
# If a line is in custom_deps, but not in the solution, we want to append
# this line to the solution.
for d in self.custom_deps:
if d not in deps:
deps[d] = self.custom_deps[d]
# If use_relative_paths is set in the DEPS file, regenerate
# the dictionary using paths relative to the directory containing
# the DEPS file. Also update recursedeps if use_relative_paths is
# enabled.
use_relative_paths = local_scope.get('use_relative_paths', False)
if use_relative_paths:
logging.warning('use_relative_paths enabled.')
rel_deps = {}
for d, url in deps.items():
# normpath is required to allow DEPS to use .. in their
# dependency local path.
rel_deps[os.path.normpath(os.path.join(self.name, d))] = url
logging.warning('Updating deps by prepending %s.', self.name)
deps = rel_deps
# Update recursedeps if it's set.
if self.recursedeps is not None:
logging.warning('Updating recursedeps by prepending %s.', self.name)
rel_deps = set()
for d in self.recursedeps:
rel_deps.add(os.path.normpath(os.path.join(self.name, d)))
self.recursedeps = rel_deps
# Convert the deps into real Dependency.
deps_to_add = []
for name, url in deps.iteritems():
should_process = self.recursion_limit and self.should_process
deps_to_add.append(Dependency(
self, name, url, None, None, None, None, None,
self.deps_file, should_process))
deps_to_add.sort(key=lambda x: x.name)
# override named sets of hooks by the custom hooks
hooks_to_run = []
hook_names_to_suppress = [c.get('name', '') for c in self.custom_hooks]
for hook in local_scope.get('hooks', []):
if hook.get('name', '') not in hook_names_to_suppress:
hooks_to_run.append(hook)
# add the replacements and any additions
for hook in self.custom_hooks:
if 'action' in hook:
hooks_to_run.append(hook)
self._pre_deps_hooks = [self.GetHookAction(hook, []) for hook in
local_scope.get('pre_deps_hooks', [])]
self.add_dependencies_and_close(deps_to_add, hooks_to_run)
logging.info('ParseDepsFile(%s) done' % self.name)
def add_dependencies_and_close(self, deps_to_add, hooks):
"""Adds the dependencies, hooks and mark the parsing as done."""
for dep in deps_to_add:
if dep.verify_validity():
self.add_dependency(dep)
self._mark_as_parsed(hooks)
def maybeGetParentRevision(self, command, options, parsed_url, parent):
"""Uses revision/timestamp of parent if no explicit revision was specified.
If we are performing an update and --transitive is set, use
- the parent's revision if 'self.url' is in the same repository
- the parent's timestamp otherwise
to update 'self.url'. The used revision/timestamp will be set in
'options.revision'.
If we have an explicit revision do nothing.
"""
if command == 'update' and options.transitive and not options.revision:
_, revision = gclient_utils.SplitUrlRevision(parsed_url)
if not revision:
options.revision = getattr(parent, '_used_revision', None)
if (options.revision and
not gclient_utils.IsDateRevision(options.revision)):
assert self.parent and self.parent.used_scm
# If this dependency is in the same repository as parent it's url will
# start with a slash. If so we take the parent revision instead of
# it's timestamp.
# (The timestamps of commits in google code are broken -- which can
# result in dependencies to be checked out at the wrong revision)
if self.url.startswith('/'):
if options.verbose:
print('Using parent\'s revision %s since we are in the same '
'repository.' % options.revision)
else:
parent_revision_date = self.parent.used_scm.GetRevisionDate(
options.revision)
options.revision = gclient_utils.MakeDateRevision(
parent_revision_date)
if options.verbose:
print('Using parent\'s revision date %s since we are in a '
'different repository.' % options.revision)
# Arguments number differs from overridden method
# pylint: disable=W0221
def run(self, revision_overrides, command, args, work_queue, options):
"""Runs |command| then parse the DEPS file."""
logging.info('Dependency(%s).run()' % self.name)
assert self._file_list == []
if not self.should_process:
return
# When running runhooks, there's no need to consult the SCM.
# All known hooks are expected to run unconditionally regardless of working
# copy state, so skip the SCM status check.
run_scm = command not in ('runhooks', 'recurse', None)
parsed_url = self.LateOverride(self.url)
file_list = [] if not options.nohooks else None
if run_scm and parsed_url:
if isinstance(parsed_url, self.FileImpl):
# Special support for single-file checkout.
if not command in (None, 'cleanup', 'diff', 'pack', 'status'):
# Sadly, pylint doesn't realize that parsed_url is of FileImpl.
# pylint: disable=E1103
options.revision = parsed_url.GetRevision()
self._used_scm = gclient_scm.SVNWrapper(
parsed_url.GetPath(), self.root.root_dir, self.name,
out_cb=work_queue.out_cb)
self._used_scm.RunCommand('updatesingle',
options, args + [parsed_url.GetFilename()], file_list)
else:
# Create a shallow copy to mutate revision.
options = copy.copy(options)
options.revision = revision_overrides.pop(self.name, None)
self.maybeGetParentRevision(
command, options, parsed_url, self.parent)
self._used_revision = options.revision
self._used_scm = gclient_scm.CreateSCM(
parsed_url, self.root.root_dir, self.name, self.outbuf,
out_cb=work_queue.out_cb)
self._got_revision = self._used_scm.RunCommand(command, options, args,
file_list)
if file_list:
file_list = [os.path.join(self.name, f.strip()) for f in file_list]
# TODO(phajdan.jr): We should know exactly when the paths are absolute.
# Convert all absolute paths to relative.
for i in range(len(file_list or [])):
# It depends on the command being executed (like runhooks vs sync).
if not os.path.isabs(file_list[i]):
continue
prefix = os.path.commonprefix(
[self.root.root_dir.lower(), file_list[i].lower()])
file_list[i] = file_list[i][len(prefix):]
# Strip any leading path separators.
while file_list[i].startswith(('\\', '/')):
file_list[i] = file_list[i][1:]
# Always parse the DEPS file.
self.ParseDepsFile()
self._run_is_done(file_list or [], parsed_url)
if command in ('update', 'revert') and not options.noprehooks:
self.RunPreDepsHooks()
if self.recursion_limit:
# Parse the dependencies of this dependency.
for s in self.dependencies:
work_queue.enqueue(s)
if command == 'recurse':
if not isinstance(parsed_url, self.FileImpl):
# Skip file only checkout.
scm = gclient_scm.GetScmName(parsed_url)
if not options.scm or scm in options.scm:
cwd = os.path.normpath(os.path.join(self.root.root_dir, self.name))
# Pass in the SCM type as an env variable. Make sure we don't put
# unicode strings in the environment.
env = os.environ.copy()
if scm:
env['GCLIENT_SCM'] = str(scm)
if parsed_url:
env['GCLIENT_URL'] = str(parsed_url)
env['GCLIENT_DEP_PATH'] = str(self.name)
if options.prepend_dir and scm == 'git':
print_stdout = False
def filter_fn(line):
"""Git-specific path marshaling. It is optimized for git-grep."""
def mod_path(git_pathspec):
match = re.match('^(\\S+?:)?([^\0]+)$', git_pathspec)
modified_path = os.path.join(self.name, match.group(2))
branch = match.group(1) or ''
return '%s%s' % (branch, modified_path)
match = re.match('^Binary file ([^\0]+) matches$', line)
if match:
print 'Binary file %s matches\n' % mod_path(match.group(1))
return
items = line.split('\0')
if len(items) == 2 and items[1]:
print '%s : %s' % (mod_path(items[0]), items[1])
elif len(items) >= 2:
# Multiple null bytes or a single trailing null byte indicate
# git is likely displaying filenames only (such as with -l)
print '\n'.join(mod_path(path) for path in items if path)
else:
print line
else:
print_stdout = True
filter_fn = None
if parsed_url is None:
print >> sys.stderr, 'Skipped omitted dependency %s' % cwd
elif os.path.isdir(cwd):
try:
gclient_utils.CheckCallAndFilter(
args, cwd=cwd, env=env, print_stdout=print_stdout,
filter_fn=filter_fn,
)
except subprocess2.CalledProcessError:
if not options.ignore:
raise
else:
print >> sys.stderr, 'Skipped missing %s' % cwd
@gclient_utils.lockedmethod
def _run_is_done(self, file_list, parsed_url):
# Both these are kept for hooks that are run as a separate tree traversal.
self._file_list = file_list
self._parsed_url = parsed_url
self._processed = True
@staticmethod
def GetHookAction(hook_dict, matching_file_list):
"""Turns a parsed 'hook' dict into an executable command."""
logging.debug(hook_dict)
logging.debug(matching_file_list)
command = hook_dict['action'][:]
if command[0] == 'python':
# If the hook specified "python" as the first item, the action is a
# Python script. Run it by starting a new copy of the same
# interpreter.
command[0] = sys.executable
if '$matching_files' in command:
splice_index = command.index('$matching_files')
command[splice_index:splice_index + 1] = matching_file_list
return command
def GetHooks(self, options):
"""Evaluates all hooks, and return them in a flat list.
RunOnDeps() must have been called before to load the DEPS.
"""
result = []
if not self.should_process or not self.recursion_limit:
# Don't run the hook when it is above recursion_limit.
return result
# If "--force" was specified, run all hooks regardless of what files have
# changed.
if self.deps_hooks:
# TODO(maruel): If the user is using git or git-svn, then we don't know
# what files have changed so we always run all hooks. It'd be nice to fix
# that.
if (options.force or
isinstance(self.parsed_url, self.FileImpl) or
gclient_scm.GetScmName(self.parsed_url) in ('git', None) or
os.path.isdir(os.path.join(self.root.root_dir, self.name, '.git'))):
for hook_dict in self.deps_hooks:
result.append(self.GetHookAction(hook_dict, []))
else:
# Run hooks on the basis of whether the files from the gclient operation
# match each hook's pattern.
for hook_dict in self.deps_hooks:
pattern = re.compile(hook_dict['pattern'])
matching_file_list = [
f for f in self.file_list_and_children if pattern.search(f)
]
if matching_file_list:
result.append(self.GetHookAction(hook_dict, matching_file_list))
for s in self.dependencies:
result.extend(s.GetHooks(options))
return result
def RunHooksRecursively(self, options):
assert self.hooks_ran == False
self._hooks_ran = True
for hook in self.GetHooks(options):
try:
start_time = time.time()
gclient_utils.CheckCallAndFilterAndHeader(
hook, cwd=self.root.root_dir, always=True)
except (gclient_utils.Error, subprocess2.CalledProcessError), e:
# Use a discrete exit status code of 2 to indicate that a hook action
# failed. Users of this script may wish to treat hook action failures
# differently from VC failures.
print >> sys.stderr, 'Error: %s' % str(e)
sys.exit(2)
finally:
elapsed_time = time.time() - start_time
if elapsed_time > 10:
print "Hook '%s' took %.2f secs" % (
gclient_utils.CommandToStr(hook), elapsed_time)
def RunPreDepsHooks(self):
assert self.processed
assert self.deps_parsed
assert not self.pre_deps_hooks_ran
assert not self.hooks_ran
for s in self.dependencies:
assert not s.processed
self._pre_deps_hooks_ran = True
for hook in self.pre_deps_hooks:
try:
start_time = time.time()
gclient_utils.CheckCallAndFilterAndHeader(
hook, cwd=self.root.root_dir, always=True)
except (gclient_utils.Error, subprocess2.CalledProcessError), e:
# Use a discrete exit status code of 2 to indicate that a hook action
# failed. Users of this script may wish to treat hook action failures
# differently from VC failures.
print >> sys.stderr, 'Error: %s' % str(e)
sys.exit(2)
finally:
elapsed_time = time.time() - start_time
if elapsed_time > 10:
print "Hook '%s' took %.2f secs" % (
gclient_utils.CommandToStr(hook), elapsed_time)
def subtree(self, include_all):
"""Breadth first recursion excluding root node."""
dependencies = self.dependencies
for d in dependencies:
if d.should_process or include_all:
yield d
for d in dependencies:
for i in d.subtree(include_all):
yield i
def depth_first_tree(self):
"""Depth-first recursion including the root node."""
yield self
for i in self.dependencies:
for j in i.depth_first_tree():
if j.should_process:
yield j
@gclient_utils.lockedmethod
def add_dependency(self, new_dep):
self._dependencies.append(new_dep)
@gclient_utils.lockedmethod
def _mark_as_parsed(self, new_hooks):
self._deps_hooks.extend(new_hooks)
self._deps_parsed = True
@property
@gclient_utils.lockedmethod
def dependencies(self):
return tuple(self._dependencies)
@property
@gclient_utils.lockedmethod
def deps_hooks(self):
return tuple(self._deps_hooks)
@property
@gclient_utils.lockedmethod
def pre_deps_hooks(self):
return tuple(self._pre_deps_hooks)
@property
@gclient_utils.lockedmethod
def parsed_url(self):
return self._parsed_url
@property
@gclient_utils.lockedmethod
def deps_parsed(self):
"""This is purely for debugging purposes. It's not used anywhere."""
return self._deps_parsed
@property
@gclient_utils.lockedmethod
def processed(self):
return self._processed
@property
@gclient_utils.lockedmethod
def pre_deps_hooks_ran(self):
return self._pre_deps_hooks_ran
@property
@gclient_utils.lockedmethod
def hooks_ran(self):
return self._hooks_ran
@property
@gclient_utils.lockedmethod
def file_list(self):
return tuple(self._file_list)
@property
def used_scm(self):
"""SCMWrapper instance for this dependency or None if not processed yet."""
return self._used_scm
@property
@gclient_utils.lockedmethod
def got_revision(self):
return self._got_revision
@property
def file_list_and_children(self):
result = list(self.file_list)
for d in self.dependencies:
result.extend(d.file_list_and_children)
return tuple(result)
def __str__(self):
out = []
for i in ('name', 'url', 'parsed_url', 'safesync_url', 'custom_deps',
'custom_vars', 'deps_hooks', 'file_list', 'should_process',
'processed', 'hooks_ran', 'deps_parsed', 'requirements'):
# First try the native property if it exists.
if hasattr(self, '_' + i):
value = getattr(self, '_' + i, False)
else:
value = getattr(self, i, False)
if value:
out.append('%s: %s' % (i, value))
for d in self.dependencies:
out.extend([' ' + x for x in str(d).splitlines()])
out.append('')
return '\n'.join(out)
def __repr__(self):
return '%s: %s' % (self.name, self.url)
def hierarchy(self):
"""Returns a human-readable hierarchical reference to a Dependency."""
out = '%s(%s)' % (self.name, self.url)
i = self.parent
while i and i.name:
out = '%s(%s) -> %s' % (i.name, i.url, out)
i = i.parent
return out
class GClient(Dependency):
"""Object that represent a gclient checkout. A tree of Dependency(), one per
solution or DEPS entry."""
DEPS_OS_CHOICES = {
"win32": "win",
"win": "win",
"cygwin": "win",
"darwin": "mac",
"mac": "mac",
"unix": "unix",
"linux": "unix",
"linux2": "unix",
"linux3": "unix",
"android": "android",
}
DEFAULT_CLIENT_FILE_TEXT = ("""\
solutions = [
{ "name" : "%(solution_name)s",
"url" : "%(solution_url)s",
"deps_file" : "%(deps_file)s",
"managed" : %(managed)s,
"custom_deps" : {
},
"safesync_url": "%(safesync_url)s",
},
]
cache_dir = %(cache_dir)r
""")
DEFAULT_SNAPSHOT_SOLUTION_TEXT = ("""\
{ "name" : "%(solution_name)s",
"url" : "%(solution_url)s",
"deps_file" : "%(deps_file)s",
"managed" : %(managed)s,
"custom_deps" : {
%(solution_deps)s },
"safesync_url": "%(safesync_url)s",
},
""")
DEFAULT_SNAPSHOT_FILE_TEXT = ("""\
# Snapshot generated with gclient revinfo --snapshot
solutions = [
%(solution_list)s]
""")
def __init__(self, root_dir, options):
# Do not change previous behavior. Only solution level and immediate DEPS
# are processed.
self._recursion_limit = 2
Dependency.__init__(self, None, None, None, None, True, None, None, None,
'unused', True)
self._options = options
if options.deps_os:
enforced_os = options.deps_os.split(',')
else:
enforced_os = [self.DEPS_OS_CHOICES.get(sys.platform, 'unix')]
if 'all' in enforced_os:
enforced_os = self.DEPS_OS_CHOICES.itervalues()
self._enforced_os = tuple(set(enforced_os))
self._root_dir = root_dir
self.config_content = None
def _CheckConfig(self):
"""Verify that the config matches the state of the existing checked-out
solutions."""
for dep in self.dependencies:
if dep.managed and dep.url:
scm = gclient_scm.CreateSCM(
dep.url, self.root_dir, dep.name, self.outbuf)
actual_url = scm.GetActualRemoteURL(self._options)
if actual_url and not scm.DoesRemoteURLMatch(self._options):
raise gclient_utils.Error('''
Your .gclient file seems to be broken. The requested URL is different from what
is actually checked out in %(checkout_path)s.
The .gclient file contains:
%(expected_url)s (%(expected_scm)s)
The local checkout in %(checkout_path)s reports:
%(actual_url)s (%(actual_scm)s)
You should ensure that the URL listed in .gclient is correct and either change
it or fix the checkout. If you're managing your own git checkout in
%(checkout_path)s but the URL in .gclient is for an svn repository, you probably
want to set 'managed': False in .gclient.
''' % {'checkout_path': os.path.join(self.root_dir, dep.name),
'expected_url': dep.url,
'expected_scm': gclient_scm.GetScmName(dep.url),
'actual_url': actual_url,
'actual_scm': gclient_scm.GetScmName(actual_url)})
def SetConfig(self, content):
assert not self.dependencies
config_dict = {}
self.config_content = content
try:
exec(content, config_dict)
except SyntaxError, e:
gclient_utils.SyntaxErrorToError('.gclient', e)
# Append any target OS that is not already being enforced to the tuple.
target_os = config_dict.get('target_os', [])
if config_dict.get('target_os_only', False):
self._enforced_os = tuple(set(target_os))
else:
self._enforced_os = tuple(set(self._enforced_os).union(target_os))
gclient_scm.GitWrapper.cache_dir = config_dict.get('cache_dir')
git_cache.Mirror.SetCachePath(config_dict.get('cache_dir'))
if not target_os and config_dict.get('target_os_only', False):
raise gclient_utils.Error('Can\'t use target_os_only if target_os is '
'not specified')
deps_to_add = []
for s in config_dict.get('solutions', []):
try:
deps_to_add.append(Dependency(
self, s['name'], s['url'],
s.get('safesync_url', None),
s.get('managed', True),
s.get('custom_deps', {}),
s.get('custom_vars', {}),
s.get('custom_hooks', []),
s.get('deps_file', 'DEPS'),
True))
except KeyError:
raise gclient_utils.Error('Invalid .gclient file. Solution is '
'incomplete: %s' % s)
self.add_dependencies_and_close(deps_to_add, config_dict.get('hooks', []))
logging.info('SetConfig() done')
def SaveConfig(self):
gclient_utils.FileWrite(os.path.join(self.root_dir,
self._options.config_filename),
self.config_content)
@staticmethod
def LoadCurrentConfig(options):
"""Searches for and loads a .gclient file relative to the current working
dir. Returns a GClient object."""
if options.spec:
client = GClient('.', options)
client.SetConfig(options.spec)
else:
path = gclient_utils.FindGclientRoot(os.getcwd(), options.config_filename)
if not path:
return None
client = GClient(path, options)
client.SetConfig(gclient_utils.FileRead(
os.path.join(path, options.config_filename)))
if (options.revisions and
len(client.dependencies) > 1 and
any('@' not in r for r in options.revisions)):
print >> sys.stderr, (
'You must specify the full solution name like --revision %s@%s\n'
'when you have multiple solutions setup in your .gclient file.\n'
'Other solutions present are: %s.') % (
client.dependencies[0].name,
options.revisions[0],
', '.join(s.name for s in client.dependencies[1:]))
return client
def SetDefaultConfig(self, solution_name, deps_file, solution_url,
safesync_url, managed=True, cache_dir=None):
self.SetConfig(self.DEFAULT_CLIENT_FILE_TEXT % {
'solution_name': solution_name,
'solution_url': solution_url,
'deps_file': deps_file,
'safesync_url' : safesync_url,
'managed': managed,
'cache_dir': cache_dir,
})
def _SaveEntries(self):
"""Creates a .gclient_entries file to record the list of unique checkouts.
The .gclient_entries file lives in the same directory as .gclient.
"""
# Sometimes pprint.pformat will use {', sometimes it'll use { ' ... It
# makes testing a bit too fun.
result = 'entries = {\n'
for entry in self.root.subtree(False):
# Skip over File() dependencies as we can't version them.
if not isinstance(entry.parsed_url, self.FileImpl):
result += ' %s: %s,\n' % (pprint.pformat(entry.name),
pprint.pformat(entry.parsed_url))
result += '}\n'
file_path = os.path.join(self.root_dir, self._options.entries_filename)
logging.debug(result)
gclient_utils.FileWrite(file_path, result)
def _ReadEntries(self):
"""Read the .gclient_entries file for the given client.
Returns:
A sequence of solution names, which will be empty if there is the
entries file hasn't been created yet.
"""
scope = {}
filename = os.path.join(self.root_dir, self._options.entries_filename)
if not os.path.exists(filename):
return {}
try:
exec(gclient_utils.FileRead(filename), scope)
except SyntaxError, e:
gclient_utils.SyntaxErrorToError(filename, e)
return scope['entries']
def _EnforceRevisions(self):
"""Checks for revision overrides."""
revision_overrides = {}
if self._options.head:
return revision_overrides
# Do not check safesync_url if one or more --revision flag is specified.
if not self._options.revisions:
for s in self.dependencies:
if not s.managed:
self._options.revisions.append('%s@unmanaged' % s.name)
elif s.safesync_url:
self._ApplySafeSyncRev(dep=s)
if not self._options.revisions:
return revision_overrides
solutions_names = [s.name for s in self.dependencies]
index = 0
for revision in self._options.revisions:
if not '@' in revision:
# Support for --revision 123
revision = '%s@%s' % (solutions_names[index], revision)
name, rev = revision.split('@', 1)
revision_overrides[name] = rev
index += 1
return revision_overrides
def _ApplySafeSyncRev(self, dep):
"""Finds a valid revision from the content of the safesync_url and apply it
by appending revisions to the revision list. Throws if revision appears to
be invalid for the given |dep|."""
assert len(dep.safesync_url) > 0
handle = urllib.urlopen(dep.safesync_url)
rev = handle.read().strip()
handle.close()
if not rev:
raise gclient_utils.Error(
'It appears your safesync_url (%s) is not working properly\n'
'(as it returned an empty response). Check your config.' %
dep.safesync_url)
scm = gclient_scm.CreateSCM(
dep.url, dep.root.root_dir, dep.name, self.outbuf)
safe_rev = scm.GetUsableRev(rev, self._options)
if self._options.verbose:
print('Using safesync_url revision: %s.\n' % safe_rev)
self._options.revisions.append('%s@%s' % (dep.name, safe_rev))
def RunOnDeps(self, command, args, ignore_requirements=False, progress=True):
"""Runs a command on each dependency in a client and its dependencies.
Args:
command: The command to use (e.g., 'status' or 'diff')
args: list of str - extra arguments to add to the command line.
"""
if not self.dependencies:
raise gclient_utils.Error('No solution specified')
self._CheckConfig()
revision_overrides = {}
# It's unnecessary to check for revision overrides for 'recurse'.
# Save a few seconds by not calling _EnforceRevisions() in that case.
if command not in ('diff', 'recurse', 'runhooks', 'status'):
revision_overrides = self._EnforceRevisions()
pm = None
# Disable progress for non-tty stdout.
if (sys.stdout.isatty() and not self._options.verbose and progress):
if command in ('update', 'revert'):
pm = Progress('Syncing projects', 1)
elif command == 'recurse':
pm = Progress(' '.join(args), 1)
work_queue = gclient_utils.ExecutionQueue(
self._options.jobs, pm, ignore_requirements=ignore_requirements,
verbose=self._options.verbose)
for s in self.dependencies:
work_queue.enqueue(s)
work_queue.flush(revision_overrides, command, args, options=self._options)
if revision_overrides:
print >> sys.stderr, ('Please fix your script, having invalid '
'--revision flags will soon considered an error.')
# Once all the dependencies have been processed, it's now safe to run the
# hooks.
if not self._options.nohooks:
self.RunHooksRecursively(self._options)
if command == 'update':
# Notify the user if there is an orphaned entry in their working copy.
# Only delete the directory if there are no changes in it, and
# delete_unversioned_trees is set to true.
entries = [i.name for i in self.root.subtree(False) if i.url]
full_entries = [os.path.join(self.root_dir, e.replace('/', os.path.sep))
for e in entries]
for entry, prev_url in self._ReadEntries().iteritems():
if not prev_url:
# entry must have been overridden via .gclient custom_deps
continue
# Fix path separator on Windows.
entry_fixed = entry.replace('/', os.path.sep)
e_dir = os.path.join(self.root_dir, entry_fixed)
def _IsParentOfAny(parent, path_list):
parent_plus_slash = parent + '/'
return any(
path[:len(parent_plus_slash)] == parent_plus_slash
for path in path_list)
# Use entry and not entry_fixed there.
if (entry not in entries and
(not any(path.startswith(entry + '/') for path in entries)) and
os.path.exists(e_dir)):
scm = gclient_scm.CreateSCM(
prev_url, self.root_dir, entry_fixed, self.outbuf)
# Check to see if this directory is now part of a higher-up checkout.
# The directory might be part of a git OR svn checkout.
scm_root = None
for scm_class in (gclient_scm.scm.GIT, gclient_scm.scm.SVN):
try:
scm_root = scm_class.GetCheckoutRoot(scm.checkout_path)
except subprocess2.CalledProcessError:
pass
if scm_root:
break
else:
logging.warning('Could not find checkout root for %s. Unable to '
'determine whether it is part of a higher-level '
'checkout, so not removing.' % entry)
continue
if scm_root in full_entries:
logging.info('%s is part of a higher level checkout, not '
'removing.', scm.GetCheckoutRoot())
continue
file_list = []
scm.status(self._options, [], file_list)
modified_files = file_list != []
if (not self._options.delete_unversioned_trees or
(modified_files and not self._options.force)):
# There are modified files in this entry. Keep warning until
# removed.
print(('\nWARNING: \'%s\' is no longer part of this client. '
'It is recommended that you manually remove it.\n') %
entry_fixed)
else:
# Delete the entry
print('\n________ deleting \'%s\' in \'%s\'' % (
entry_fixed, self.root_dir))
gclient_utils.rmtree(e_dir)
# record the current list of entries for next time
self._SaveEntries()
return 0
def PrintRevInfo(self):
if not self.dependencies:
raise gclient_utils.Error('No solution specified')
# Load all the settings.
work_queue = gclient_utils.ExecutionQueue(
self._options.jobs, None, False, verbose=self._options.verbose)
for s in self.dependencies:
work_queue.enqueue(s)
work_queue.flush({}, None, [], options=self._options)
def GetURLAndRev(dep):
"""Returns the revision-qualified SCM url for a Dependency."""
if dep.parsed_url is None:
return None
if isinstance(dep.parsed_url, self.FileImpl):
original_url = dep.parsed_url.file_location
else:
original_url = dep.parsed_url
url, _ = gclient_utils.SplitUrlRevision(original_url)
scm = gclient_scm.CreateSCM(
original_url, self.root_dir, dep.name, self.outbuf)
if not os.path.isdir(scm.checkout_path):
return None
return '%s@%s' % (url, scm.revinfo(self._options, [], None))
if self._options.snapshot:
new_gclient = ''
# First level at .gclient
for d in self.dependencies:
entries = {}
def GrabDeps(dep):
"""Recursively grab dependencies."""
for d in dep.dependencies:
entries[d.name] = GetURLAndRev(d)
GrabDeps(d)
GrabDeps(d)
custom_deps = []
for k in sorted(entries.keys()):
if entries[k]:
# Quotes aren't escaped...
custom_deps.append(' \"%s\": \'%s\',\n' % (k, entries[k]))
else:
custom_deps.append(' \"%s\": None,\n' % k)
new_gclient += self.DEFAULT_SNAPSHOT_SOLUTION_TEXT % {
'solution_name': d.name,
'solution_url': d.url,
'deps_file': d.deps_file,
'safesync_url' : d.safesync_url or '',
'managed': d.managed,
'solution_deps': ''.join(custom_deps),
}
# Print the snapshot configuration file
print(self.DEFAULT_SNAPSHOT_FILE_TEXT % {'solution_list': new_gclient})
else:
entries = {}
for d in self.root.subtree(False):
if self._options.actual:
entries[d.name] = GetURLAndRev(d)
else:
entries[d.name] = d.parsed_url
keys = sorted(entries.keys())
for x in keys:
print('%s: %s' % (x, entries[x]))
logging.info(str(self))
def ParseDepsFile(self):
"""No DEPS to parse for a .gclient file."""
raise gclient_utils.Error('Internal error')
@property
def root_dir(self):
"""Root directory of gclient checkout."""
return self._root_dir
@property
def enforced_os(self):
"""What deps_os entries that are to be parsed."""
return self._enforced_os
@property
def recursion_limit(self):
"""How recursive can each dependencies in DEPS file can load DEPS file."""
return self._recursion_limit
@property
def try_recursedeps(self):
"""Whether to attempt using recursedeps-style recursion processing."""
return True
@property
def target_os(self):
return self._enforced_os
#### gclient commands.
def CMDcleanup(parser, args):
"""Cleans up all working copies.
Mostly svn-specific. Simply runs 'svn cleanup' for each module.
"""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.verbose:
# Print out the .gclient file. This is longer than if we just printed the
# client dict, but more legible, and it might contain helpful comments.
print(client.config_content)
return client.RunOnDeps('cleanup', args)
@subcommand.usage('[command] [args ...]')
def CMDrecurse(parser, args):
"""Operates [command args ...] on all the dependencies.
Runs a shell command on all entries.
Sets GCLIENT_DEP_PATH enviroment variable as the dep's relative location to
root directory of the checkout.
"""
# Stop parsing at the first non-arg so that these go through to the command
parser.disable_interspersed_args()
parser.add_option('-s', '--scm', action='append', default=[],
help='Choose scm types to operate upon.')
parser.add_option('-i', '--ignore', action='store_true',
help='Ignore non-zero return codes from subcommands.')
parser.add_option('--prepend-dir', action='store_true',
help='Prepend relative dir for use with git <cmd> --null.')
parser.add_option('--no-progress', action='store_true',
help='Disable progress bar that shows sub-command updates')
options, args = parser.parse_args(args)
if not args:
print >> sys.stderr, 'Need to supply a command!'
return 1
root_and_entries = gclient_utils.GetGClientRootAndEntries()
if not root_and_entries:
print >> sys.stderr, (
'You need to run gclient sync at least once to use \'recurse\'.\n'
'This is because .gclient_entries needs to exist and be up to date.')
return 1
# Normalize options.scm to a set()
scm_set = set()
for scm in options.scm:
scm_set.update(scm.split(','))
options.scm = scm_set
options.nohooks = True
client = GClient.LoadCurrentConfig(options)
return client.RunOnDeps('recurse', args, ignore_requirements=True,
progress=not options.no_progress)
@subcommand.usage('[args ...]')
def CMDfetch(parser, args):
"""Fetches upstream commits for all modules.
Completely git-specific. Simply runs 'git fetch [args ...]' for each module.
"""
(options, args) = parser.parse_args(args)
return CMDrecurse(OptionParser(), [
'--jobs=%d' % options.jobs, '--scm=git', 'git', 'fetch'] + args)
def CMDgrep(parser, args):
"""Greps through git repos managed by gclient.
Runs 'git grep [args...]' for each module.
"""
# We can't use optparse because it will try to parse arguments sent
# to git grep and throw an error. :-(
if not args or re.match('(-h|--help)$', args[0]):
print >> sys.stderr, (
'Usage: gclient grep [-j <N>] git-grep-args...\n\n'
'Example: "gclient grep -j10 -A2 RefCountedBase" runs\n"git grep '
'-A2 RefCountedBase" on each of gclient\'s git\nrepos with up to '
'10 jobs.\n\nBonus: page output by appending "|& less -FRSX" to the'
' end of your query.'
)
return 1
jobs_arg = ['--jobs=1']
if re.match(r'(-j|--jobs=)\d+$', args[0]):
jobs_arg, args = args[:1], args[1:]
elif re.match(r'(-j|--jobs)$', args[0]):
jobs_arg, args = args[:2], args[2:]
return CMDrecurse(
parser,
jobs_arg + ['--ignore', '--prepend-dir', '--no-progress', '--scm=git',
'git', 'grep', '--null', '--color=Always'] + args)
@subcommand.usage('[url] [safesync url]')
def CMDconfig(parser, args):
"""Creates a .gclient file in the current directory.
This specifies the configuration for further commands. After update/sync,
top-level DEPS files in each module are read to determine dependent
modules to operate on as well. If optional [url] parameter is
provided, then configuration is read from a specified Subversion server
URL.
"""
# We do a little dance with the --gclientfile option. 'gclient config' is the
# only command where it's acceptable to have both '--gclientfile' and '--spec'
# arguments. So, we temporarily stash any --gclientfile parameter into
# options.output_config_file until after the (gclientfile xor spec) error
# check.
parser.remove_option('--gclientfile')
parser.add_option('--gclientfile', dest='output_config_file',
help='Specify an alternate .gclient file')
parser.add_option('--name',
help='overrides the default name for the solution')
parser.add_option('--deps-file', default='DEPS',
help='overrides the default name for the DEPS file for the'
'main solutions and all sub-dependencies')
parser.add_option('--unmanaged', action='store_true', default=False,
help='overrides the default behavior to make it possible '
'to have the main solution untouched by gclient '
'(gclient will check out unmanaged dependencies but '
'will never sync them)')
parser.add_option('--git-deps', action='store_true',
help='sets the deps file to ".DEPS.git" instead of "DEPS"')
parser.add_option('--cache-dir',
help='(git only) Cache all git repos into this dir and do '
'shared clones from the cache, instead of cloning '
'directly from the remote. (experimental)')
parser.set_defaults(config_filename=None)
(options, args) = parser.parse_args(args)
if options.output_config_file:
setattr(options, 'config_filename', getattr(options, 'output_config_file'))
if ((options.spec and args) or len(args) > 2 or
(not options.spec and not args)):
parser.error('Inconsistent arguments. Use either --spec or one or 2 args')
client = GClient('.', options)
if options.spec:
client.SetConfig(options.spec)
else:
base_url = args[0].rstrip('/')
if not options.name:
name = base_url.split('/')[-1]
if name.endswith('.git'):
name = name[:-4]
else:
# specify an alternate relpath for the given URL.
name = options.name
deps_file = options.deps_file
if options.git_deps:
deps_file = '.DEPS.git'
safesync_url = ''
if len(args) > 1:
safesync_url = args[1]
client.SetDefaultConfig(name, deps_file, base_url, safesync_url,
managed=not options.unmanaged,
cache_dir=options.cache_dir)
client.SaveConfig()
return 0
@subcommand.epilog("""Example:
gclient pack > patch.txt
generate simple patch for configured client and dependences
""")
def CMDpack(parser, args):
"""Generates a patch which can be applied at the root of the tree.
Internally, runs 'svn diff'/'git diff' on each checked out module and
dependencies, and performs minimal postprocessing of the output. The
resulting patch is printed to stdout and can be applied to a freshly
checked out tree via 'patch -p0 < patchfile'.
"""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
parser.remove_option('--jobs')
(options, args) = parser.parse_args(args)
# Force jobs to 1 so the stdout is not annotated with the thread ids
options.jobs = 1
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.verbose:
# Print out the .gclient file. This is longer than if we just printed the
# client dict, but more legible, and it might contain helpful comments.
print(client.config_content)
return client.RunOnDeps('pack', args)
def CMDstatus(parser, args):
"""Shows modification status for every dependencies."""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.verbose:
# Print out the .gclient file. This is longer than if we just printed the
# client dict, but more legible, and it might contain helpful comments.
print(client.config_content)
return client.RunOnDeps('status', args)
@subcommand.epilog("""Examples:
gclient sync
update files from SCM according to current configuration,
*for modules which have changed since last update or sync*
gclient sync --force
update files from SCM according to current configuration, for
all modules (useful for recovering files deleted from local copy)
gclient sync --revision src@31000
update src directory to r31000
JSON output format:
If the --output-json option is specified, the following document structure will
be emitted to the provided file. 'null' entries may occur for subprojects which
are present in the gclient solution, but were not processed (due to custom_deps,
os_deps, etc.)
{
"solutions" : {
"<name>": { # <name> is the posix-normalized path to the solution.
"revision": [<svn rev int>|<git id hex string>|null],
"scm": ["svn"|"git"|null],
}
}
}
""")
def CMDsync(parser, args):
"""Checkout/update all modules."""
parser.add_option('-f', '--force', action='store_true',
help='force update even for unchanged modules')
parser.add_option('-n', '--nohooks', action='store_true',
help='don\'t run hooks after the update is complete')
parser.add_option('-p', '--noprehooks', action='store_true',
help='don\'t run pre-DEPS hooks', default=False)
parser.add_option('-r', '--revision', action='append',
dest='revisions', metavar='REV', default=[],
help='Enforces revision/hash for the solutions with the '
'format src@rev. The src@ part is optional and can be '
'skipped. -r can be used multiple times when .gclient '
'has multiple solutions configured and will work even '
'if the src@ part is skipped. Note that specifying '
'--revision means your safesync_url gets ignored.')
parser.add_option('--with_branch_heads', action='store_true',
help='Clone git "branch_heads" refspecs in addition to '
'the default refspecs. This adds about 1/2GB to a '
'full checkout. (git only)')
parser.add_option('-t', '--transitive', action='store_true',
help='When a revision is specified (in the DEPS file or '
'with the command-line flag), transitively update '
'the dependencies to the date of the given revision. '
'Only supported for SVN repositories.')
parser.add_option('-H', '--head', action='store_true',
help='skips any safesync_urls specified in '
'configured solutions and sync to head instead')
parser.add_option('-D', '--delete_unversioned_trees', action='store_true',
help='Deletes from the working copy any dependencies that '
'have been removed since the last sync, as long as '
'there are no local modifications. When used with '
'--force, such dependencies are removed even if they '
'have local modifications. When used with --reset, '
'all untracked directories are removed from the '
'working copy, excluding those which are explicitly '
'ignored in the repository.')
parser.add_option('-R', '--reset', action='store_true',
help='resets any local changes before updating (git only)')
parser.add_option('-M', '--merge', action='store_true',
help='merge upstream changes instead of trying to '
'fast-forward or rebase')
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
parser.add_option('-m', '--manually_grab_svn_rev', action='store_true',
help='Skip svn up whenever possible by requesting '
'actual HEAD revision from the repository')
parser.add_option('--upstream', action='store_true',
help='Make repo state match upstream branch.')
parser.add_option('--output-json',
help='Output a json document to this path containing '
'summary information about the sync.')
parser.add_option('--shallow', action='store_true',
help='GIT ONLY - Do a shallow clone into the cache dir. '
'Requires Git 1.9+')
parser.add_option('--ignore_locks', action='store_true',
help='GIT ONLY - Ignore cache locks.')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.revisions and options.head:
# TODO(maruel): Make it a parser.error if it doesn't break any builder.
print('Warning: you cannot use both --head and --revision')
if options.verbose:
# Print out the .gclient file. This is longer than if we just printed the
# client dict, but more legible, and it might contain helpful comments.
print(client.config_content)
ret = client.RunOnDeps('update', args)
if options.output_json:
slns = {}
for d in client.subtree(True):
normed = d.name.replace('\\', '/').rstrip('/') + '/'
slns[normed] = {
'revision': d.got_revision,
'scm': d.used_scm.name if d.used_scm else None,
'url': str(d.url) if d.url else None,
}
with open(options.output_json, 'wb') as f:
json.dump({'solutions': slns}, f)
return ret
CMDupdate = CMDsync
def CMDdiff(parser, args):
"""Displays local diff for every dependencies."""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.verbose:
# Print out the .gclient file. This is longer than if we just printed the
# client dict, but more legible, and it might contain helpful comments.
print(client.config_content)
return client.RunOnDeps('diff', args)
def CMDrevert(parser, args):
"""Reverts all modifications in every dependencies.
That's the nuclear option to get back to a 'clean' state. It removes anything
that shows up in svn status."""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
parser.add_option('-n', '--nohooks', action='store_true',
help='don\'t run hooks after the revert is complete')
parser.add_option('-p', '--noprehooks', action='store_true',
help='don\'t run pre-DEPS hooks', default=False)
parser.add_option('--upstream', action='store_true',
help='Make repo state match upstream branch.')
(options, args) = parser.parse_args(args)
# --force is implied.
options.force = True
options.reset = False
options.delete_unversioned_trees = False
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
return client.RunOnDeps('revert', args)
def CMDrunhooks(parser, args):
"""Runs hooks for files that have been modified in the local working copy."""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
parser.add_option('-f', '--force', action='store_true', default=True,
help='Deprecated. No effect.')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.verbose:
# Print out the .gclient file. This is longer than if we just printed the
# client dict, but more legible, and it might contain helpful comments.
print(client.config_content)
options.force = True
options.nohooks = False
return client.RunOnDeps('runhooks', args)
def CMDrevinfo(parser, args):
"""Outputs revision info mapping for the client and its dependencies.
This allows the capture of an overall 'revision' for the source tree that
can be used to reproduce the same tree in the future. It is only useful for
'unpinned dependencies', i.e. DEPS/deps references without a svn revision
number or a git hash. A git branch name isn't 'pinned' since the actual
commit can change.
"""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
parser.add_option('-a', '--actual', action='store_true',
help='gets the actual checked out revisions instead of the '
'ones specified in the DEPS and .gclient files')
parser.add_option('-s', '--snapshot', action='store_true',
help='creates a snapshot .gclient file of the current '
'version of all repositories to reproduce the tree, '
'implies -a')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
client.PrintRevInfo()
return 0
def CMDhookinfo(parser, args):
"""Outputs the hooks that would be run by `gclient runhooks`."""
(options, args) = parser.parse_args(args)
options.force = True
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
client.RunOnDeps(None, [])
print '; '.join(' '.join(hook) for hook in client.GetHooks(options))
return 0
class OptionParser(optparse.OptionParser):
gclientfile_default = os.environ.get('GCLIENT_FILE', '.gclient')
def __init__(self, **kwargs):
optparse.OptionParser.__init__(
self, version='%prog ' + __version__, **kwargs)
# Some arm boards have issues with parallel sync.
if platform.machine().startswith('arm'):
jobs = 1
else:
jobs = max(8, gclient_utils.NumLocalCpus())
# cmp: 2013/06/19
# Temporary workaround to lower bot-load on SVN server.
# Bypassed if a bot_update flag is detected.
if (os.environ.get('CHROME_HEADLESS') == '1' and
not os.path.exists('update.flag')):
jobs = 1
self.add_option(
'-j', '--jobs', default=jobs, type='int',
help='Specify how many SCM commands can run in parallel; defaults to '
'%default on this machine')
self.add_option(
'-v', '--verbose', action='count', default=0,
help='Produces additional output for diagnostics. Can be used up to '
'three times for more logging info.')
self.add_option(
'--gclientfile', dest='config_filename',
help='Specify an alternate %s file' % self.gclientfile_default)
self.add_option(
'--spec',
help='create a gclient file containing the provided string. Due to '
'Cygwin/Python brokenness, it can\'t contain any newlines.')
self.add_option(
'--no-nag-max', default=False, action='store_true',
help='Ignored for backwards compatibility.')
def parse_args(self, args=None, values=None):
"""Integrates standard options processing."""
options, args = optparse.OptionParser.parse_args(self, args, values)
levels = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
logging.basicConfig(
level=levels[min(options.verbose, len(levels) - 1)],
format='%(module)s(%(lineno)d) %(funcName)s:%(message)s')
if options.config_filename and options.spec:
self.error('Cannot specifiy both --gclientfile and --spec')
if (options.config_filename and
options.config_filename != os.path.basename(options.config_filename)):
self.error('--gclientfile target must be a filename, not a path')
if not options.config_filename:
options.config_filename = self.gclientfile_default
options.entries_filename = options.config_filename + '_entries'
if options.jobs < 1:
self.error('--jobs must be 1 or higher')
# These hacks need to die.
if not hasattr(options, 'revisions'):
# GClient.RunOnDeps expects it even if not applicable.
options.revisions = []
if not hasattr(options, 'head'):
options.head = None
if not hasattr(options, 'nohooks'):
options.nohooks = True
if not hasattr(options, 'noprehooks'):
options.noprehooks = True
if not hasattr(options, 'deps_os'):
options.deps_os = None
if not hasattr(options, 'manually_grab_svn_rev'):
options.manually_grab_svn_rev = None
if not hasattr(options, 'force'):
options.force = None
return (options, args)
def disable_buffering():
# Make stdout auto-flush so buildbot doesn't kill us during lengthy
# operations. Python as a strong tendency to buffer sys.stdout.
sys.stdout = gclient_utils.MakeFileAutoFlush(sys.stdout)
# Make stdout annotated with the thread ids.
sys.stdout = gclient_utils.MakeFileAnnotated(sys.stdout)
def Main(argv):
"""Doesn't parse the arguments here, just find the right subcommand to
execute."""
if sys.hexversion < 0x02060000:
print >> sys.stderr, (
'\nYour python version %s is unsupported, please upgrade.\n' %
sys.version.split(' ', 1)[0])
return 2
if not sys.executable:
print >> sys.stderr, (
'\nPython cannot find the location of it\'s own executable.\n')
return 2
fix_encoding.fix_encoding()
disable_buffering()
colorama.init()
dispatcher = subcommand.CommandDispatcher(__name__)
try:
return dispatcher.execute(OptionParser(), argv)
except KeyboardInterrupt:
gclient_utils.GClientChildren.KillAllRemainingChildren()
raise
except (gclient_utils.Error, subprocess2.CalledProcessError), e:
print >> sys.stderr, 'Error: %s' % str(e)
return 1
finally:
gclient_utils.PrintWarnings()
if '__main__' == __name__:
sys.exit(Main(sys.argv[1:]))
# vim: ts=2:sw=2:tw=80:et:
| {
"content_hash": "ad275dc8cd9d74ac03183ddbca28f18b",
"timestamp": "",
"source": "github",
"line_count": 2061,
"max_line_length": 80,
"avg_line_length": 39.30228044638525,
"alnum_prop": 0.6245648255598627,
"repo_name": "Chilledheart/depot_tools",
"id": "b60884a0c2742bd55b43f01ddc5e5380af216bff",
"size": "81191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gclient.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5028"
},
{
"name": "CSS",
"bytes": "5392"
},
{
"name": "JavaScript",
"bytes": "15796"
},
{
"name": "PHP",
"bytes": "617"
},
{
"name": "Python",
"bytes": "5084358"
},
{
"name": "Shell",
"bytes": "98561"
}
],
"symlink_target": ""
} |
import json
import os
import errno
import re
import subprocess
import time
import urllib2
import urlparse
import cherrypy
import autoupdate_lib
import common_util
import log_util
# Module-local log function.
def _Log(message, *args):
return log_util.LogWithTag('UPDATE', message, *args)
UPDATE_FILE = 'update.gz'
KERNEL_UPDATE_FILE = 'kernel_update.gz'
METADATA_FILE = 'update.meta'
KERNEL_METADATA_FILE = 'kernel_update.meta'
CACHE_DIR = 'cache'
class AutoupdateError(Exception):
"""Exception classes used by this module."""
pass
def _ChangeUrlPort(url, new_port):
"""Return the URL passed in with a different port"""
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
host_port = netloc.split(':')
if len(host_port) == 1:
host_port.append(new_port)
else:
host_port[1] = new_port
print host_port
netloc = "%s:%s" % tuple(host_port)
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
def _NonePathJoin(*args):
"""os.path.join that filters None's from the argument list."""
return os.path.join(*filter(None, args))
class HostInfo(object):
"""Records information about an individual host.
Members:
attrs: Static attributes (legacy)
log: Complete log of recorded client entries
"""
def __init__(self):
# A dictionary of current attributes pertaining to the host.
self.attrs = {}
# A list of pairs consisting of a timestamp and a dictionary of recorded
# attributes.
self.log = []
def __repr__(self):
return 'attrs=%s, log=%s' % (self.attrs, self.log)
def AddLogEntry(self, entry):
"""Append a new log entry."""
# Append a timestamp.
assert not 'timestamp' in entry, 'Oops, timestamp field already in use'
entry['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S')
# Add entry to hosts' message log.
self.log.append(entry)
class HostInfoTable(object):
"""Records information about a set of hosts who engage in update activity.
Members:
table: Table of information on hosts.
"""
def __init__(self):
# A dictionary of host information. Keys are normally IP addresses.
self.table = {}
def __repr__(self):
return '%s' % self.table
def GetInitHostInfo(self, host_id):
"""Return a host's info object, or create a new one if none exists."""
return self.table.setdefault(host_id, HostInfo())
def GetHostInfo(self, host_id):
"""Return an info object for given host, if such exists."""
return self.table.get(host_id)
class UpdateMetadata(object):
"""Object containing metadata about an update payload."""
def __init__(self, sha1, sha256, size, is_delta_format):
self.sha1 = sha1
self.sha256 = sha256
self.size = size
self.is_delta_format = is_delta_format
class Autoupdate(object):
"""Class that contains functionality that handles Chrome OS update pings.
Members:
serve_only: serve only pre-built updates. static_dir must contain
update.gz.
use_test_image: use coreos_test_image.bin rather than the standard.
urlbase: base URL, other than devserver, for update images.
forced_image: path to an image to use for all updates.
payload_path: path to pre-generated payload to serve.
src_image: if specified, creates a delta payload from this image.
proxy_port: port of local proxy to tell client to connect to you
through.
vm: set for VM images (doesn't patch kernel)
board: board for the image. Needed for pre-generating of updates.
copy_to_static_root: copies images generated from the cache to ~/static.
private_key: path to private key in PEM format.
critical_update: whether provisioned payload is critical.
remote_payload: whether provisioned payload is remotely staged.
max_updates: maximum number of updates we'll try to provision.
host_log: record full history of host update events.
"""
_PAYLOAD_URL_PREFIX = '/static/'
_FILEINFO_URL_PREFIX = '/api/fileinfo/'
SHA1_ATTR = 'sha1'
SHA256_ATTR = 'sha256'
SIZE_ATTR = 'size'
ISDELTA_ATTR = 'is_delta'
def __init__(self, serve_only=None, test_image=False, urlbase=None,
forced_image=None, payload_path=None,
proxy_port=None, src_image='', vm=False, board=None,
copy_to_static_root=True, private_key=None,
critical_update=False, remote_payload=False, max_updates= -1,
host_log=False, devserver_dir=None, scripts_dir=None,
static_dir=None):
self.devserver_dir = devserver_dir,
self.scripts_dir = scripts_dir
self.static_dir = static_dir
self.serve_only = serve_only
self.use_test_image = test_image
if urlbase:
self.urlbase = urlbase
else:
self.urlbase = None
self.forced_image = forced_image
self.payload_path = payload_path
self.src_image = src_image
self.proxy_port = proxy_port
self.vm = vm
self.board = board
self.copy_to_static_root = copy_to_static_root
self.private_key = private_key
self.critical_update = critical_update
self.remote_payload = remote_payload
self.max_updates = max_updates
self.host_log = host_log
# Path to pre-generated file.
self.pregenerated_path = None
# Initialize empty host info cache. Used to keep track of various bits of
# information about a given host. A host is identified by its IP address.
# The info stored for each host includes a complete log of events for this
# host, as well as a dictionary of current attributes derived from events.
self.host_infos = HostInfoTable()
@classmethod
def _ReadMetadataFromStream(cls, stream):
"""Returns metadata obj from input json stream that implements .read()."""
file_attr_dict = {}
try:
file_attr_dict = json.loads(stream.read())
except IOError:
return None
sha1 = file_attr_dict.get(cls.SHA1_ATTR)
sha256 = file_attr_dict.get(cls.SHA256_ATTR)
size = file_attr_dict.get(cls.SIZE_ATTR)
is_delta = file_attr_dict.get(cls.ISDELTA_ATTR)
return UpdateMetadata(sha1, sha256, size, is_delta)
@staticmethod
def _ReadMetadataFromFile(payload_dir, legacy_image):
"""Returns metadata object from the metadata_file in the payload_dir"""
if legacy_image:
metadata_file = os.path.join(payload_dir, METADATA_FILE)
else:
metadata_file = os.path.join(payload_dir, KERNEL_METADATA_FILE)
if os.path.exists(metadata_file):
with open(metadata_file, 'r') as metadata_stream:
return Autoupdate._ReadMetadataFromStream(metadata_stream)
@classmethod
def _StoreMetadataToFile(cls, payload_dir, metadata_obj, legacy_image):
"""Stores metadata object into the metadata_file of the payload_dir"""
file_dict = {cls.SHA1_ATTR: metadata_obj.sha1,
cls.SHA256_ATTR: metadata_obj.sha256,
cls.SIZE_ATTR: metadata_obj.size,
cls.ISDELTA_ATTR: metadata_obj.is_delta_format}
if legacy_image:
metadata_file = os.path.join(payload_dir, METADATA_FILE)
else:
metadata_file = os.path.join(payload_dir, KERNEL_METADATA_FILE)
with open(metadata_file, 'w') as file_handle:
json.dump(file_dict, file_handle)
def _GetLatestImageDir(self, board):
"""Returns the latest image dir based on shell script."""
cmd = '%s/get_latest_image.sh --board %s' % (self.scripts_dir, board)
return os.popen(cmd).read().strip()
@staticmethod
def _GetVersionFromDir(image_dir):
"""Returns the version of the image based on version.txt."""
with open('%s/version.txt' % image_dir, 'r') as ver_file:
for line in ver_file:
key, _, value = line.partition('=')
if key == 'COREOS_VERSION':
return value.strip('"\'\t ')
raise AutoupdateError('Failed to parse version.txt in %s' % image_dir)
@staticmethod
def _CanUpdate(client_version, latest_version):
"""Returns true if the latest_version is greater than the client_version.
"""
_Log('client version %s latest version %s', client_version, latest_version)
client_tokens = [int(i) for i in re.split('[^0-9]', client_version) if i]
latest_tokens = [int(i) for i in re.split('[^0-9]', latest_version) if i]
return latest_tokens > client_tokens
def _GetImageName(self):
"""Returns the name of the image that should be used."""
if self.use_test_image:
image_name = 'coreos_test_image.bin'
else:
image_name = 'coreos_developer_image.bin'
return image_name
@staticmethod
def _IsDeltaFormatFile(filename):
try:
file_handle = open(filename, 'r')
delta_magic = 'CrAU'
magic = file_handle.read(len(delta_magic))
return magic == delta_magic
except IOError:
# For unit tests, we may not have real files, so it's ok to
# ignore these IOErrors. In any case, this value is not being
# used in update_engine at all as of now.
return False
def GenerateUpdateFile(self, src_image, image_path, output_dir,
legacy_image):
"""Generates an update gz given a full path to an image.
Args:
image_path: Full path to image.
Raises:
subprocess.CalledProcessError if the update generator fails to generate a
stateful payload.
"""
if legacy_image:
update_path = os.path.join(output_dir, UPDATE_FILE)
else:
update_path = os.path.join(output_dir, KERNEL_UPDATE_FILE)
_Log('Generating update image %s', update_path)
update_command = [
'cros_generate_update_payload',
'--image', image_path,
'--output', update_path,
]
if not legacy_image:
update_command.extend(['--include_kernel'])
if src_image:
update_command.extend(['--src_image', src_image])
if self.private_key:
update_command.extend(['--private_key', self.private_key])
_Log('Running %s', ' '.join(update_command))
subprocess.check_call(update_command)
def FindCachedUpdateImageSubDir(self, src_image, dest_image):
"""Find directory to store a cached update.
Given one, or two images for an update, this finds which cache directory
should hold the update files, even if they don't exist yet.
Returns:
A directory path for storing a cached update, of the following form:
Non-delta updates:
CACHE_DIR/<dest_hash>
Delta updates:
CACHE_DIR/<src_hash>_<dest_hash>
Signed updates (self.private_key):
CACHE_DIR/<src_hash>_<dest_hash>+<private_key_hash>
"""
update_dir = ''
if src_image:
update_dir += common_util.GetFileMd5(src_image) + '_'
update_dir += common_util.GetFileMd5(dest_image)
if self.private_key:
update_dir += '+' + common_util.GetFileMd5(self.private_key)
if not self.vm:
update_dir += '+patched_kernel'
return os.path.join(CACHE_DIR, update_dir)
def GenerateUpdateImage(self, image_path, output_dir, legacy_image):
"""Force generates an update payload based on the given image_path.
Args:
src_image: image we are updating from (Null/empty for non-delta)
image_path: full path to the image.
output_dir: the directory to write the update payloads to
Raises:
AutoupdateError if it failed to generate either update or stateful
payload.
"""
_Log('Generating update for image %s', image_path)
try:
os.makedirs(output_dir)
except OSError as e:
if e.errno == errno.EEXIST:
pass
try:
self.GenerateUpdateFile(self.src_image, image_path, output_dir,
legacy_image)
except subprocess.CalledProcessError:
os.system('rm -rf "%s"' % output_dir)
raise AutoupdateError('Failed to generate update in %s' % output_dir)
def GenerateUpdateImageWithCache(self, image_path, static_image_dir,
legacy_image):
"""Force generates an update payload based on the given image_path.
Args:
image_path: full path to the image.
static_image_dir: the directory to move images to after generating.
Returns:
update directory relative to static_image_dir. None if it should
serve from the static_image_dir.
Raises:
AutoupdateError if it we need to generate a payload and fail to do so.
"""
_Log('Generating update for src %s image %s', self.src_image, image_path)
# Which sub_dir of static_image_dir should hold our cached update image
cache_sub_dir = self.FindCachedUpdateImageSubDir(self.src_image, image_path)
_Log('Caching in sub_dir "%s"', cache_sub_dir)
# The cached payloads exist in a cache dir
if legacy_image:
cache_update_payload = os.path.join(static_image_dir,
cache_sub_dir, UPDATE_FILE)
else:
cache_update_payload = os.path.join(static_image_dir,
cache_sub_dir, KERNEL_UPDATE_FILE)
full_cache_dir = os.path.join(static_image_dir, cache_sub_dir)
# Check to see if this cache directory is valid.
if not os.path.exists(cache_update_payload):
self.GenerateUpdateImage(image_path, full_cache_dir, legacy_image)
# Generate the cache file.
self.GetLocalPayloadAttrs(full_cache_dir, legacy_image)
if legacy_image:
cache_metadata_file = os.path.join(full_cache_dir, METADATA_FILE)
else:
cache_metadata_file = os.path.join(full_cache_dir, KERNEL_METADATA_FILE)
# Generation complete, copy if requested.
if self.copy_to_static_root:
# The final results exist directly in static
if legacy_image:
update_payload = os.path.join(static_image_dir,
UPDATE_FILE)
metadata_file = os.path.join(static_image_dir, METADATA_FILE)
else:
update_payload = os.path.join(static_image_dir,
KERNEL_UPDATE_FILE)
metadata_file = os.path.join(static_image_dir, KERNEL_METADATA_FILE)
common_util.CopyFile(cache_update_payload, update_payload)
common_util.CopyFile(cache_metadata_file, metadata_file)
return None
else:
return cache_sub_dir
def GenerateLatestUpdateImage(self, board, client_version,
static_image_dir, legacy_image):
"""Generates an update using the latest image that has been built.
This will only generate an update if the newest update is newer than that
on the client or client_version is 'ForcedUpdate'.
Args:
board: Name of the board.
client_version: Current version of the client or 'ForcedUpdate'
static_image_dir: the directory to move images to after generating.
Returns:
Name of the update directory relative to the static dir. None if it should
serve from the static_image_dir.
Raises:
AutoupdateError if it failed to generate the payload or can't update
the given client_version.
"""
latest_image_dir = self._GetLatestImageDir(board)
latest_version = self._GetVersionFromDir(latest_image_dir)
latest_image_path = os.path.join(latest_image_dir, self._GetImageName())
# Check to see whether or not we should update.
if client_version != 'ForcedUpdate' and not self._CanUpdate(
client_version, latest_version):
raise AutoupdateError('Update check received but no update available '
'for client')
return self.GenerateUpdateImageWithCache(latest_image_path,
static_image_dir=static_image_dir,
legacy_image=legacy_image)
def GenerateUpdatePayload(self, board, client_version, static_image_dir,
legacy_image):
"""Generates an update for an image and returns the relative payload dir.
Returns:
payload dir relative to static_image_dir. None if it should
serve from the static_image_dir.
Raises:
AutoupdateError if it failed to generate the payload.
"""
if legacy_image:
dest_path = os.path.join(static_image_dir, UPDATE_FILE)
else:
dest_path = os.path.join(static_image_dir, KERNEL_UPDATE_FILE)
if self.payload_path:
# If the forced payload is not already in our static_image_dir,
# copy it there.
src_path = os.path.abspath(self.payload_path)
# Only copy the files if the source directory is different from dest.
if os.path.dirname(src_path) != os.path.abspath(static_image_dir):
common_util.CopyFile(src_path, dest_path)
# Serve from the main directory so rel_path is None.
return None
elif self.forced_image:
return self.GenerateUpdateImageWithCache(
self.forced_image,
static_image_dir=static_image_dir,
legacy_image=legacy_image)
else:
if not board:
raise AutoupdateError(
'Failed to generate update. '
'You must set --board when pre-generating latest update.')
return self.GenerateLatestUpdateImage(board, client_version,
static_image_dir, legacy_image)
def PreGenerateUpdate(self):
"""Pre-generates an update and prints out the relative path it.
Returns relative path of the update.
Raises:
AutoupdateError if it failed to generate the payload.
"""
_Log('Pre-generating the update payload')
# Does not work with labels so just use static dir.
pregenerated_update = self.GenerateUpdatePayload(self.board, '0.0.0.0',
self.static_dir)
print 'PREGENERATED_UPDATE=%s' % _NonePathJoin(pregenerated_update,
UPDATE_FILE)
return pregenerated_update
def _GetRemotePayloadAttrs(self, url):
"""Returns hashes, size and delta flag of a remote update payload.
Obtain attributes of a payload file available on a remote devserver. This
is based on the assumption that the payload URL uses the /static prefix. We
need to make sure that both clients (requests) and remote devserver
(provisioning) preserve this invariant.
Args:
url: URL of statically staged remote file (http://host:port/static/...)
Returns:
A tuple containing the SHA1, SHA256, file size and whether or not it's a
delta payload (Boolean).
"""
if self._PAYLOAD_URL_PREFIX not in url:
raise AutoupdateError(
'Payload URL does not have the expected prefix (%s)' %
self._PAYLOAD_URL_PREFIX)
fileinfo_url = url.replace(self._PAYLOAD_URL_PREFIX,
self._FILEINFO_URL_PREFIX)
_Log('Retrieving file info for remote payload via %s', fileinfo_url)
try:
conn = urllib2.urlopen(fileinfo_url)
metadata_obj = Autoupdate._ReadMetadataFromStream(conn)
# These fields are required for remote calls.
if not metadata_obj:
raise AutoupdateError('Failed to obtain remote payload info')
if not metadata_obj.is_delta_format:
metadata_obj.is_delta_format = ('_mton' in url) or ('_nton' in url)
return metadata_obj
except IOError as e:
raise AutoupdateError('Failed to obtain remote payload info: %s', e)
def GetLocalPayloadAttrs(self, payload_dir, legacy_image):
"""Returns hashes, size and delta flag of a local update payload.
Args:
payload_dir: Path to the directory the payload is in.
Returns:
A tuple containing the SHA1, SHA256, file size and whether or not it's a
delta payload (Boolean).
"""
if legacy_image:
filename = os.path.join(payload_dir, UPDATE_FILE)
else:
filename = os.path.join(payload_dir, KERNEL_UPDATE_FILE)
if not os.path.exists(filename):
raise AutoupdateError('%s not present in payload dir %s' %
(filename, payload_dir))
metadata_obj = Autoupdate._ReadMetadataFromFile(payload_dir, legacy_image)
if not metadata_obj or not (metadata_obj.sha1 and
metadata_obj.sha256 and
metadata_obj.size):
sha1 = common_util.GetFileSha1(filename)
sha256 = common_util.GetFileSha256(filename)
size = common_util.GetFileSize(filename)
is_delta_format = self._IsDeltaFormatFile(filename)
metadata_obj = UpdateMetadata(sha1, sha256, size, is_delta_format)
Autoupdate._StoreMetadataToFile(payload_dir, metadata_obj, legacy_image)
return metadata_obj
def _ProcessUpdateComponents(self, app, event):
"""Processes the app and event components of an update request.
Returns tuple containing forced_update_label, client_version, board and
app_id
"""
# Initialize an empty dictionary for event attributes to log.
log_message = {}
# Determine request IP, strip any IPv6 data for simplicity.
client_ip = cherrypy.request.remote.ip.split(':')[-1]
# Obtain (or init) info object for this client.
curr_host_info = self.host_infos.GetInitHostInfo(client_ip)
client_version = 'ForcedUpdate'
board = None
app_id = None
if app:
client_version = app.getAttribute('version')
channel = app.getAttribute('track')
board = (app.hasAttribute('board') and app.getAttribute('board')
or self.board)
app_id = app.getAttribute('appid')
# Add attributes to log message
log_message['version'] = client_version
log_message['track'] = channel
log_message['board'] = board
curr_host_info.attrs['last_known_version'] = client_version
if event:
event_result = int(event[0].getAttribute('eventresult'))
event_type = int(event[0].getAttribute('eventtype'))
client_previous_version = (event[0].getAttribute('previousversion')
if event[0].hasAttribute('previousversion')
else None)
# Store attributes to legacy host info structure
curr_host_info.attrs['last_event_status'] = event_result
curr_host_info.attrs['last_event_type'] = event_type
# Add attributes to log message
log_message['event_result'] = event_result
log_message['event_type'] = event_type
if client_previous_version is not None:
log_message['previous_version'] = client_previous_version
# Log host event, if so instructed.
if self.host_log:
curr_host_info.AddLogEntry(log_message)
return (curr_host_info.attrs.pop('forced_update_label', None),
client_version, board, app_id)
def _GetStaticUrl(self):
"""Returns the static url base that should prefix all payload responses."""
x_forwarded_host = cherrypy.request.headers.get('X-Forwarded-Host')
if x_forwarded_host:
hostname = 'http://' + x_forwarded_host
else:
hostname = cherrypy.request.base
if self.urlbase:
static_urlbase = self.urlbase
elif self.serve_only:
static_urlbase = '%s/static/archive' % hostname
else:
static_urlbase = '%s/static' % hostname
# If we have a proxy port, adjust the URL we instruct the client to
# use to go through the proxy.
if self.proxy_port:
static_urlbase = _ChangeUrlPort(static_urlbase, self.proxy_port)
_Log('Using static url base %s', static_urlbase)
_Log('Handling update ping as %s', hostname)
return static_urlbase
def HandleUpdatePing(self, data, label=None):
"""Handles an update ping from an update client.
Args:
data: XML blob from client.
label: optional label for the update.
Returns:
Update payload message for client.
"""
# Get the static url base that will form that base of our update url e.g.
# http://hostname:8080/static/update.gz.
static_urlbase = self._GetStaticUrl()
_Log(data)
# Parse the XML we got into the components we care about.
protocol, app, event, update_check = autoupdate_lib.ParseUpdateRequest(data)
# #########################################################################
# Process attributes of the update check.
forced_update_label, client_version, board, app_id = self._ProcessUpdateComponents(
app, event)
if app_id == '{e96281a6-d1af-4bde-9a0a-97b76e56dc57}':
legacy_image = True
else:
legacy_image = False
# We only process update_checks in the update rpc.
if not update_check:
_Log('Non-update check received. Returning blank payload')
# TODO(sosa): Generate correct non-updatecheck payload to better test
# update clients.
return autoupdate_lib.GetNoUpdateResponse(protocol)
# In case max_updates is used, return no response if max reached.
if self.max_updates > 0:
self.max_updates -= 1
elif self.max_updates == 0:
_Log('Request received but max number of updates handled')
return autoupdate_lib.GetNoUpdateResponse(protocol)
_Log('Update Check Received. Client is using protocol version: %s',
protocol)
if forced_update_label:
if label:
_Log('Label: %s set but being overwritten to %s by request', label,
forced_update_label)
label = forced_update_label
# #########################################################################
# Finally its time to generate the omaha response to give to client that
# lets them know where to find the payload and its associated metadata.
metadata_obj = None
try:
# Are we provisioning a remote or local payload?
if self.remote_payload:
# If no explicit label was provided, use the value of --payload.
if not label:
label = self.payload_path
# Form the URL of the update payload. This assumes that the payload
# file name is a devserver constant (which currently is the case).
url = '/'.join(filter(None, [static_urlbase, label, UPDATE_FILE]))
# Get remote payload attributes.
metadata_obj = self._GetRemotePayloadAttrs(url)
else:
static_image_dir = _NonePathJoin(self.static_dir, label)
rel_path = None
# Serving files only, don't generate an update.
if not self.serve_only:
# Generate payload if necessary.
rel_path = self.GenerateUpdatePayload(board, client_version,
static_image_dir, legacy_image)
if legacy_image:
filename = UPDATE_FILE
else:
filename = KERNEL_UPDATE_FILE
url = '/'.join(filter(None, [static_urlbase, label, rel_path,
filename]))
local_payload_dir = _NonePathJoin(static_image_dir, rel_path)
metadata_obj = self.GetLocalPayloadAttrs(local_payload_dir, legacy_image)
except AutoupdateError as e:
# Raised if we fail to generate an update payload.
_Log('Failed to process an update: %r', e)
return autoupdate_lib.GetNoUpdateResponse(protocol)
_Log('Responding to client to use url %s to get image', url)
return autoupdate_lib.GetUpdateResponse(
metadata_obj.sha1, metadata_obj.sha256, metadata_obj.size, url,
metadata_obj.is_delta_format, protocol, self.critical_update)
def HandleHostInfoPing(self, ip):
"""Returns host info dictionary for the given IP in JSON format."""
assert ip, 'No ip provided.'
if ip in self.host_infos.table:
return json.dumps(self.host_infos.GetHostInfo(ip).attrs)
def HandleHostLogPing(self, ip):
"""Returns a complete log of events for host in JSON format."""
# If all events requested, return a dictionary of logs keyed by IP address.
if ip == 'all':
return json.dumps(
dict([(key, self.host_infos.table[key].log)
for key in self.host_infos.table]))
# Otherwise we're looking for a specific IP address, so find its log.
if ip in self.host_infos.table:
return json.dumps(self.host_infos.GetHostInfo(ip).log)
# If no events were logged for this IP, return an empty log.
return json.dumps([])
def HandleSetUpdatePing(self, ip, label):
"""Sets forced_update_label for a given host."""
assert ip, 'No ip provided.'
assert label, 'No label provided.'
self.host_infos.GetInitHostInfo(ip).attrs['forced_update_label'] = label
| {
"content_hash": "d69cea69ed3844ed12f050288578a03d",
"timestamp": "",
"source": "github",
"line_count": 768,
"max_line_length": 87,
"avg_line_length": 37.053385416666664,
"alnum_prop": 0.6498576800084338,
"repo_name": "coreos/dev-util",
"id": "3f060b3f1d0ee0243377181603d450726663e285",
"size": "28627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autoupdate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "367"
},
{
"name": "Makefile",
"bytes": "1033"
},
{
"name": "Python",
"bytes": "333745"
},
{
"name": "Shell",
"bytes": "131315"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import json
import logging
import logging.config
import os
import shutil
from celery import Celery
import boto
from boto.s3.connection import S3Connection
import pika
import requests
import time
from retrying import retry
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TMP_DIR = os.path.join(BASE_DIR, '.imgtmp')
if not os.path.exists(TMP_DIR):
os.makedirs(TMP_DIR)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
app = Celery('twitterCollector')
app.config_from_object('celeryconfig')
@app.task
def process_image(img_id, url, twitter_user, bucket, s3_credentials, rmq_credentials, queue=None):
logger.debug('Downloading picture (ID: %s, URL: %s)', img_id, url)
try:
img_name, path = download_file(url, img_id)
except:
logger.debug('Could not download picture (ID: %s, URL: %s)', img_id, url)
return False
logger.debug('Picture downloaded successfuly into %s (ID: %s, URL: %s)', path, img_id, url)
logger.debug('Uploading picture to object store (ID: %s)', img_id)
try:
upload_object_to_bucket(s3_credentials, path, img_name, bucket)
except:
logger.error("Couldn't upload file %s to bucket %s.", path, bucket)
return False
logger.info('Image uploaded successfully to object store (ID: %s)', img_id)
if queue:
logger.info('Sending message to queue "%s" (ID: %s)', queue, img_id)
try:
message = {'twitter_user': twitter_user, 'media_id': img_id}
send_message_to_queue(rmq_credentials, message, queue)
except Exception, e:
logger.error('Could not send message to RabbitMQ queue', e)
False
logger.info('Message sent to queue "%s" (ID: %s)', queue, img_id)
try:
os.remove(path)
logger.debug('Removed temporary file at %s', path)
except OSError:
pass
return True
@retry(wait_exponential_multiplier=10000, stop_max_attempt_number=3)
def download_file(url, image_id):
r = requests.get(url, stream=True)
if r.status_code != 200:
raise Exception('Could not download picture (ID: %s, URL: %s)' % (image_id, url))
img_name = '{name}.{extension}'.format(name=image_id, extension='jpg')
path = os.path.join(TMP_DIR, img_name)
with open(path, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
return img_name, path
@retry(wait_exponential_multiplier=10000, stop_max_attempt_number=3)
def upload_object_to_bucket(s3_credentials, path, object_name, bucket_name):
conn = S3Connection(aws_access_key_id=s3_credentials['access_key'],
aws_secret_access_key=s3_credentials['secret_key'],
host=s3_credentials['host'],
port=s3_credentials['port'],
calling_format='boto.s3.connection.ProtocolIndependentOrdinaryCallingFormat',
is_secure=s3_credentials['is_secure'])
bucket = conn.get_bucket(bucket_name)
key = bucket.get_key(object_name, validate=False)
key.set_contents_from_filename(path)
@retry(wait_exponential_multiplier=10000, stop_max_attempt_number=3)
def send_message_to_queue(rmq_credentials, message, queue):
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=rmq_credentials['host'], port=rmq_credentials['port'],
credentials=pika.PlainCredentials(rmq_credentials['user'], rmq_credentials['password'])))
channel = connection.channel()
channel.queue_declare(queue=queue, durable=True)
channel.basic_publish(exchange='',
routing_key=queue,
body=json.dumps(message),
properties=pika.BasicProperties(
delivery_mode=2,
))
connection.close()
| {
"content_hash": "478add0096589e98d1a33a677eef4e64",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 101,
"avg_line_length": 34.973214285714285,
"alnum_prop": 0.6413071227980598,
"repo_name": "arvindkandhare/mosaicme",
"id": "af8aeb2adf626e3c333a489b4bd9a2a9edb0de65",
"size": "3917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mosaicme/collector/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10512"
},
{
"name": "HTML",
"bytes": "13180"
},
{
"name": "Java",
"bytes": "48410"
},
{
"name": "JavaScript",
"bytes": "64254"
},
{
"name": "Puppet",
"bytes": "750"
},
{
"name": "Python",
"bytes": "34375"
},
{
"name": "Shell",
"bytes": "3848"
}
],
"symlink_target": ""
} |
"""Import files from an NXP MCUXpresso SDK archive into Zephyr
The MCUXpresso SDK provides device header files and peripheral drivers for NXP
Kinetis, LPC, and i.MX SoCs. Zephyr drivers for these SoCs are shims that adapt
MCUXpresso SDK APIs to Zephyr APIs.
This script automates updating Zephyr to a newer version of the MCUXpresso SDK.
"""
import argparse
import os
import re
import shutil
import sys
import tempfile
if "ZEPHYR_BASE" not in os.environ:
sys.stderr.write("$ZEPHYR_BASE environment variable undefined.\n")
exit(1)
ZEPHYR_BASE = os.environ["ZEPHYR_BASE"]
def get_soc_family(device):
if device.startswith('MK'):
return 'kinetis'
elif device.startswith('LPC'):
return 'lpc'
elif device.startswith('MIMX'):
return 'imx'
def get_files(src, pattern):
matches = []
nonmatches = []
if os.path.exists(src):
for filename in os.listdir(src):
path = os.path.join(src, filename)
if re.search(pattern, filename):
matches.append(path)
else:
nonmatches.append(path)
return [matches, nonmatches]
def copy_files(files, dst):
if not files:
return
os.makedirs(dst, exist_ok=True)
for f in files:
shutil.copy2(f, dst)
def import_sdk(directory):
devices = os.listdir(os.path.join(directory, 'devices'))
boards = os.listdir(os.path.join(directory, 'boards'))
for device in devices:
family = get_soc_family(device)
shared_dst = os.path.join(ZEPHYR_BASE, 'ext/hal/nxp/mcux/drivers', family)
device_dst = os.path.join(ZEPHYR_BASE, 'ext/hal/nxp/mcux/devices', device)
device_src = os.path.join(directory, 'devices', device)
device_pattern = "|".join([device, 'fsl_device_registers'])
[device_headers, ignore] = get_files(device_src, device_pattern)
drivers_src = os.path.join(directory, 'devices', device, 'drivers')
drivers_pattern = "fsl_clock|fsl_iomuxc"
[device_drivers, shared_drivers] = get_files(drivers_src, drivers_pattern)
xip_boot_src = os.path.join(directory, 'devices', device, 'xip')
xip_boot_pattern = ".*"
[xip_boot, ignore] = get_files(xip_boot_src, xip_boot_pattern)
print('Importing {} device headers to {}'.format(device, device_dst))
copy_files(device_headers, device_dst)
print('Importing {} device-specific drivers to {}'.format(device, device_dst))
copy_files(device_drivers, device_dst)
print('Importing {} family shared drivers to {}'.format(family, shared_dst))
copy_files(shared_drivers, shared_dst)
print('Importing {} xip boot to {}'.format(device, shared_dst))
copy_files(xip_boot, shared_dst)
for board in boards:
board_src = os.path.join(directory, 'boards', board)
board_dst = os.path.join(ZEPHYR_BASE, 'ext/hal/nxp/mcux/boards', board)
xip_config_src = os.path.join(board_src, 'xip')
xip_config_pattern = ".*"
[xip_config, ignore] = get_files(xip_config_src, xip_config_pattern)
print('Importing {} xip config to {}'.format(board, board_dst))
copy_files(xip_config, board_dst)
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-f", "--file", required=True,
help="MCUXpresso SDK archive file to import from")
args = parser.parse_args()
with tempfile.TemporaryDirectory() as d:
print('Extracting MCUXpresso SDK into temporary directory {}'.format(d))
shutil.unpack_archive(args.file, d)
import_sdk(d)
def main():
parse_args()
if __name__ == "__main__":
main()
| {
"content_hash": "1ecb61b403b2ce0178651e7e04c31abf",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 86,
"avg_line_length": 33.214285714285715,
"alnum_prop": 0.6413978494623656,
"repo_name": "explora26/zephyr",
"id": "5029991808d106ebab827ec7f6c98c0a6e100106",
"size": "3812",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ext/hal/nxp/mcux/scripts/import_mcux_sdk.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1293047"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "340251497"
},
{
"name": "C++",
"bytes": "3179665"
},
{
"name": "CMake",
"bytes": "531524"
},
{
"name": "EmberScript",
"bytes": "793"
},
{
"name": "Makefile",
"bytes": "3313"
},
{
"name": "Objective-C",
"bytes": "34223"
},
{
"name": "Perl",
"bytes": "202106"
},
{
"name": "Python",
"bytes": "909223"
},
{
"name": "Shell",
"bytes": "42672"
},
{
"name": "Verilog",
"bytes": "6394"
}
],
"symlink_target": ""
} |
"""Implentation of Brocade Neutron Plugin."""
from oslo.config import cfg
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.common import rpc as q_rpc
from neutron.common import topics
from neutron.common import utils
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import dhcp_rpc_base
from neutron.db import extraroute_db
from neutron.db import l3_rpc_base
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import portbindings
from neutron.extensions import securitygroup as ext_sg
from neutron.openstack.common import context
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.openstack.common.rpc import proxy
from neutron.plugins.brocade.db import models as brocade_db
from neutron.plugins.brocade import vlanbm as vbm
from neutron import scheduler
LOG = logging.getLogger(__name__)
PLUGIN_VERSION = 0.88
AGENT_OWNER_PREFIX = "network:"
NOS_DRIVER = 'neutron.plugins.brocade.nos.nosdriver.NOSdriver'
SWITCH_OPTS = [cfg.StrOpt('address', default=''),
cfg.StrOpt('username', default=''),
cfg.StrOpt('password', default='', secret=True),
cfg.StrOpt('ostype', default='NOS')
]
PHYSICAL_INTERFACE_OPTS = [cfg.StrOpt('physical_interface', default='eth0')
]
cfg.CONF.register_opts(SWITCH_OPTS, "SWITCH")
cfg.CONF.register_opts(PHYSICAL_INTERFACE_OPTS, "PHYSICAL_INTERFACE")
cfg.CONF.register_opts(scheduler.AGENTS_SCHEDULER_OPTS)
class BridgeRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin,
l3_rpc_base.L3RpcCallbackMixin,
sg_db_rpc.SecurityGroupServerRpcCallbackMixin):
"""Agent callback."""
RPC_API_VERSION = '1.1'
# Device names start with "tap"
# history
# 1.1 Support Security Group RPC
TAP_PREFIX_LEN = 3
def create_rpc_dispatcher(self):
"""Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
"""
return q_rpc.PluginRpcDispatcher([self,
agents_db.AgentExtRpcCallback()])
@classmethod
def get_port_from_device(cls, device):
"""Get port from the brocade specific db."""
# TODO(shh) context is not being passed as
# an argument to this function;
#
# need to be fixed in:
# file: neutron/db/securtygroups_rpc_base.py
# function: securitygroup_rules_for_devices()
# which needs to pass context to us
# Doing what other plugins are doing
session = db.get_session()
port = brocade_db.get_port_from_device(
session, device[cls.TAP_PREFIX_LEN:])
# TODO(shiv): need to extend the db model to include device owners
# make it appears that the device owner is of type network
if port:
port['device'] = device
port['device_owner'] = AGENT_OWNER_PREFIX
port['binding:vif_type'] = 'bridge'
return port
def get_device_details(self, rpc_context, **kwargs):
"""Agent requests device details."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s details requested from %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port = brocade_db.get_port(rpc_context, device[self.TAP_PREFIX_LEN:])
if port:
entry = {'device': device,
'vlan_id': port.vlan_id,
'network_id': port.network_id,
'port_id': port.port_id,
'physical_network': port.physical_interface,
'admin_state_up': port.admin_state_up
}
else:
entry = {'device': device}
LOG.debug(_("%s can not be found in database"), device)
return entry
def update_device_down(self, rpc_context, **kwargs):
"""Device no longer exists on agent."""
device = kwargs.get('device')
port = self.get_port_from_device(device)
if port:
entry = {'device': device,
'exists': True}
# Set port status to DOWN
port_id = port['port_id']
brocade_db.update_port_state(rpc_context, port_id, False)
else:
entry = {'device': device,
'exists': False}
LOG.debug(_("%s can not be found in database"), device)
return entry
class AgentNotifierApi(proxy.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
"""Agent side of the linux bridge rpc API.
API version history:
1.0 - Initial version.
"""
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic = topic
self.topic_network_delete = topics.get_topic_name(topic,
topics.NETWORK,
topics.DELETE)
self.topic_port_update = topics.get_topic_name(topic,
topics.PORT,
topics.UPDATE)
def network_delete(self, context, network_id):
self.fanout_cast(context,
self.make_msg('network_delete',
network_id=network_id),
topic=self.topic_network_delete)
def port_update(self, context, port, physical_network, vlan_id):
self.fanout_cast(context,
self.make_msg('port_update',
port=port,
physical_network=physical_network,
vlan_id=vlan_id),
topic=self.topic_port_update)
class BrocadePluginV2(db_base_plugin_v2.NeutronDbPluginV2,
extraroute_db.ExtraRoute_db_mixin,
sg_db_rpc.SecurityGroupServerRpcMixin,
agentschedulers_db.L3AgentSchedulerDbMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin):
"""BrocadePluginV2 is a Neutron plugin.
Provides L2 Virtual Network functionality using VDX. Upper
layer driver class that interfaces to NETCONF layer below.
"""
def __init__(self):
"""Initialize Brocade Plugin.
Specify switch address and db configuration.
"""
self.supported_extension_aliases = ["binding", "security-group",
"router", "extraroute",
"agent", "l3_agent_scheduler",
"dhcp_agent_scheduler"]
self.physical_interface = (cfg.CONF.PHYSICAL_INTERFACE.
physical_interface)
db.configure_db()
self.ctxt = context.get_admin_context()
self.ctxt.session = db.get_session()
self._vlan_bitmap = vbm.VlanBitmap(self.ctxt)
self._setup_rpc()
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
self.router_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver
)
self.brocade_init()
def brocade_init(self):
"""Brocade specific initialization."""
self._switch = {'address': cfg.CONF.SWITCH.address,
'username': cfg.CONF.SWITCH.username,
'password': cfg.CONF.SWITCH.password
}
self._driver = importutils.import_object(NOS_DRIVER)
def _setup_rpc(self):
# RPC support
self.topic = topics.PLUGIN
self.rpc_context = context.RequestContext('neutron', 'neutron',
is_admin=False)
self.conn = rpc.create_connection(new=True)
self.callbacks = BridgeRpcCallbacks()
self.dispatcher = self.callbacks.create_rpc_dispatcher()
self.conn.create_consumer(self.topic, self.dispatcher,
fanout=False)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
self.notifier = AgentNotifierApi(topics.AGENT)
self.dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self.l3_agent_notifier = l3_rpc_agent_api.L3AgentNotify
def create_network(self, context, network):
"""Create network.
This call to create network translates to creation of port-profile on
the physical switch.
"""
with context.session.begin(subtransactions=True):
net = super(BrocadePluginV2, self).create_network(context, network)
net_uuid = net['id']
vlan_id = self._vlan_bitmap.get_next_vlan(None)
switch = self._switch
try:
self._driver.create_network(switch['address'],
switch['username'],
switch['password'],
vlan_id)
except Exception as e:
# Proper formatting
LOG.warning(_("Brocade NOS driver:"))
LOG.warning(_("%s"), e)
LOG.debug(_("Returning the allocated vlan (%d) to the pool"),
vlan_id)
self._vlan_bitmap.release_vlan(int(vlan_id))
raise Exception("Brocade plugin raised exception, check logs")
brocade_db.create_network(context, net_uuid, vlan_id)
self._process_l3_create(context, net, network['network'])
LOG.info(_("Allocated vlan (%d) from the pool"), vlan_id)
return net
def delete_network(self, context, net_id):
"""Delete network.
This call to delete the network translates to removing the
port-profile on the physical switch.
"""
with context.session.begin(subtransactions=True):
result = super(BrocadePluginV2, self).delete_network(context,
net_id)
# we must delete all ports in db first (foreign key constraint)
# there is no need to delete port in the driver (its a no-op)
# (actually: note there is no such call to the driver)
bports = brocade_db.get_ports(context, net_id)
for bport in bports:
brocade_db.delete_port(context, bport['port_id'])
# find the vlan for this network
net = brocade_db.get_network(context, net_id)
vlan_id = net['vlan']
# Tell hw to do remove PP
switch = self._switch
try:
self._driver.delete_network(switch['address'],
switch['username'],
switch['password'],
net_id)
except Exception as e:
# Proper formatting
LOG.warning(_("Brocade NOS driver:"))
LOG.warning(_("%s"), e)
raise Exception("Brocade plugin raised exception, check logs")
# now ok to delete the network
brocade_db.delete_network(context, net_id)
# relinquish vlan in bitmap
self._vlan_bitmap.release_vlan(int(vlan_id))
return result
def update_network(self, context, id, network):
session = context.session
with session.begin(subtransactions=True):
net = super(BrocadePluginV2, self).update_network(context, id,
network)
self._process_l3_update(context, net, network['network'])
return net
def create_port(self, context, port):
"""Create logical port on the switch."""
tenant_id = port['port']['tenant_id']
network_id = port['port']['network_id']
admin_state_up = port['port']['admin_state_up']
physical_interface = self.physical_interface
with context.session.begin(subtransactions=True):
bnet = brocade_db.get_network(context, network_id)
vlan_id = bnet['vlan']
neutron_port = super(BrocadePluginV2, self).create_port(context,
port)
interface_mac = neutron_port['mac_address']
port_id = neutron_port['id']
switch = self._switch
# convert mac format: xx:xx:xx:xx:xx:xx -> xxxx.xxxx.xxxx
mac = self.mac_reformat_62to34(interface_mac)
try:
self._driver.associate_mac_to_network(switch['address'],
switch['username'],
switch['password'],
vlan_id,
mac)
except Exception as e:
# Proper formatting
LOG.warning(_("Brocade NOS driver:"))
LOG.warning(_("%s"), e)
raise Exception("Brocade plugin raised exception, check logs")
# save to brocade persistent db
brocade_db.create_port(context, port_id, network_id,
physical_interface,
vlan_id, tenant_id, admin_state_up)
# apply any extensions
return self._extend_port_dict_binding(context, neutron_port)
def delete_port(self, context, port_id):
with context.session.begin(subtransactions=True):
super(BrocadePluginV2, self).delete_port(context, port_id)
brocade_db.delete_port(context, port_id)
def update_port(self, context, port_id, port):
original_port = self.get_port(context, port_id)
session = context.session
port_updated = False
with session.begin(subtransactions=True):
# delete the port binding and read it with the new rules
if ext_sg.SECURITYGROUPS in port['port']:
port['port'][ext_sg.SECURITYGROUPS] = (
self._get_security_groups_on_port(context, port))
self._delete_port_security_group_bindings(context, port_id)
# process_port_create_security_group also needs port id
port['port']['id'] = port_id
self._process_port_create_security_group(
context,
port['port'],
port['port'][ext_sg.SECURITYGROUPS])
port_updated = True
port = super(BrocadePluginV2, self).update_port(
context, port_id, port)
if original_port['admin_state_up'] != port['admin_state_up']:
port_updated = True
if (original_port['fixed_ips'] != port['fixed_ips'] or
not utils.compare_elements(
original_port.get(ext_sg.SECURITYGROUPS),
port.get(ext_sg.SECURITYGROUPS))):
self.notifier.security_groups_member_updated(
context, port.get(ext_sg.SECURITYGROUPS))
if port_updated:
self._notify_port_updated(context, port)
return self._extend_port_dict_binding(context, port)
def get_port(self, context, port_id, fields=None):
with context.session.begin(subtransactions=True):
port = super(BrocadePluginV2, self).get_port(
context, port_id, fields)
self._extend_port_dict_binding(context, port)
return self._fields(port, fields)
def get_ports(self, context, filters=None, fields=None):
res_ports = []
with context.session.begin(subtransactions=True):
ports = super(BrocadePluginV2, self).get_ports(context,
filters,
fields)
for port in ports:
self._extend_port_dict_binding(context, port)
res_ports.append(self._fields(port, fields))
return res_ports
def _notify_port_updated(self, context, port):
port_id = port['id']
bport = brocade_db.get_port(context, port_id)
self.notifier.port_update(context, port,
bport.physical_interface,
bport.vlan_id)
def _extend_port_dict_binding(self, context, port):
port[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_BRIDGE
port[portbindings.CAPABILITIES] = {
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}
return port
def get_plugin_version(self):
"""Get version number of the plugin."""
return PLUGIN_VERSION
@staticmethod
def mac_reformat_62to34(interface_mac):
"""Transform MAC address format.
Transforms from 6 groups of 2 hexadecimal numbers delimited by ":"
to 3 groups of 4 hexadecimals numbers delimited by ".".
:param interface_mac: MAC address in the format xx:xx:xx:xx:xx:xx
:type interface_mac: string
:returns: MAC address in the format xxxx.xxxx.xxxx
:rtype: string
"""
mac = interface_mac.replace(":", "")
mac = mac[0:4] + "." + mac[4:8] + "." + mac[8:12]
return mac
| {
"content_hash": "d5956c92c9eafda555b5937c140c771c",
"timestamp": "",
"source": "github",
"line_count": 453,
"max_line_length": 79,
"avg_line_length": 40.18763796909492,
"alnum_prop": 0.5555616588849217,
"repo_name": "ykaneko/neutron",
"id": "137fb979eef714af656de1c6d088acc3af9da034",
"size": "19089",
"binary": false,
"copies": "1",
"ref": "refs/heads/bug/1198917",
"path": "neutron/plugins/brocade/NeutronPlugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Python",
"bytes": "4566707"
},
{
"name": "Shell",
"bytes": "9109"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
from platypus.server import routes, app
from platypus.server.models import db
def run(port, debug, local):
db.create_all()
routes.add_routes()
app.run(host='127.0.0.1' if local else '0.0.0.0', port=port, debug=debug)
def show_routes():
max_length = max([len(k) for k in routes.routes.keys()]) + 5
for rule in sorted(routes.routes.keys()):
print '%s [%s]' % (rule.ljust(max_length), ','.join(routes.routes[rule][1]))
| {
"content_hash": "2338ebe9c762ddc9c1fbf46ee6aa9a2a",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 84,
"avg_line_length": 29.933333333333334,
"alnum_prop": 0.643652561247216,
"repo_name": "devraccoon/platypus",
"id": "1067aa040f4da91c9ae4ad659407aa74fc0fc41b",
"size": "449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "platypus/server/application.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1046"
},
{
"name": "JavaScript",
"bytes": "41463"
},
{
"name": "Python",
"bytes": "5396"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(name='carnival',
version='1.0.0',
author='Elben Shira',
author_email='elbenshira@gmail.com',
url='http://elbenshira.com',
description='Expert Search on Code Repositories.',
packages=['carnival'],
)
| {
"content_hash": "2b14e523467ef8456102bc7775bc5980",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 56,
"avg_line_length": 23.5,
"alnum_prop": 0.6276595744680851,
"repo_name": "elben/carnival",
"id": "e0954d10942da12aae411d3747a36deb569db603",
"size": "282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21028"
},
{
"name": "Shell",
"bytes": "4519"
}
],
"symlink_target": ""
} |
import os
import sys
import asciichart
# -----------------------------------------------------------------------------
this_folder = os.path.dirname(os.path.abspath(__file__))
root_folder = os.path.dirname(os.path.dirname(this_folder))
sys.path.append(root_folder + '/python')
sys.path.append(this_folder)
# -----------------------------------------------------------------------------
import ccxt # noqa: E402
# -----------------------------------------------------------------------------
exchange = ccxt.cex()
symbol = 'BTC/USD'
# each ohlcv candle is a list of [ timestamp, open, high, low, close, volume ]
index = 4 # use close price from each ohlcv candle
length = 80
height = 15
def print_chart(exchange, symbol, timeframe):
print("\n" + exchange.name + ' ' + symbol + ' ' + timeframe + ' chart:')
# get a list of ohlcv candles
ohlcv = exchange.fetch_ohlcv(symbol, timeframe)
# get the ohlCv (closing price, index == 4)
series = [x[index] for x in ohlcv]
# print the chart
print("\n" + asciichart.plot(series[-length:], {'height': height})) # print the chart
last = ohlcv[len(ohlcv) - 1][index] # last closing price
return last
last = print_chart(exchange, symbol, '1m')
print("\n" + exchange.name + " ₿ = $" + str(last) + "\n") # print last closing price
| {
"content_hash": "757a42cf6f976d42b091efe314cc3159",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 90,
"avg_line_length": 28.717391304347824,
"alnum_prop": 0.5352006056018168,
"repo_name": "tritoanst/ccxt",
"id": "a4c28b6344c59c341beabc1d04f64d66c3041052",
"size": "1348",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/py/fetch-ohlcv-cex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3955653"
},
{
"name": "PHP",
"bytes": "783191"
},
{
"name": "Python",
"bytes": "680573"
},
{
"name": "Shell",
"bytes": "833"
}
],
"symlink_target": ""
} |
from event import *
from pygame.locals import *
from model import *
import pygame
from pprint import pprint
class CPUTickController:
def __init__(self, evtmgr):
self.evtmgr = evtmgr
self.keep_going = True
self.clock = pygame.time.Clock()
def notify(self, event):
if isinstance(event, QuitEvent):
self.keep_going = False
def run(self):
while self.keep_going:
self.clock.tick(20)
self.evtmgr.trigger(TickEvent())
class KeyboardController:
def __init__(self, evtmgr):
self.evtmgr = evtmgr
self.suspend = False
def notify(self, event):
if isinstance(event, TickEvent):
for event in pygame.event.get():
ev = None
if event.type == QUIT:
ev = QuitEvent()
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
ev = QuitEvent()
else:
ev = KeyboardEvent(event.key)
if ev:
self.evtmgr.trigger(ev)
elif isinstance(event, MatchEvent):
self.suspend = True
elif isinstance(event, MatchResolvedEvent):
self.suspend = False
class PygameController:
def __init__(self, evtmgr):
self.evtmgr = evtmgr
def notify(self, event):
if isinstance(event, InitEvent):
pygame.display.init()
pygame.font.init()
self.window = pygame.display.set_mode((600, 600))
pygame.display.set_caption('Gemgem')
pygame.font.Font('freesansbold.ttf', 36)
if isinstance(event, TickEvent):
self.window.fill((0, 0, 0))
self.evtmgr.trigger(DrawEvent(self.window))
pygame.display.flip()
class ModelController:
def __init__(self, evtmgr, board):
self.evtmgr = evtmgr
self.board = board
def notify(self, e):
if isinstance(e, MatchResolvedEvent):
for p in e.matches:
self.board.remove(p)
if isinstance(e, KeyboardEvent):
if e.key in [ K_UP, K_DOWN, K_LEFT, K_RIGHT ]:
self.board.move(e.key)
if e.key == K_RETURN:
if (self.board.is_holding()):
if (self.board.is_valid_swap()):
self.board.swap()
if self.board.has_match():
self.evtmgr.trigger(MatchEvent(self.board.find_matches()))
else:
self.board.release()
else:
self.board.hold()
GEMSIZE = 64
class AnimationController:
def __init__(self, evtmgr, board):
self.evtmgr = evtmgr
self.matches = []
self.board = board
self.tick = 0
def notify(self, e):
if isinstance(e, MatchEvent):
self.matches = e.matches
if isinstance(e, DrawEvent):
for x, row in enumerate(self.board.gems):
for y, gem in enumerate(row):
pos = (x * GEMSIZE, y * GEMSIZE)
color = (30, 30, 30)
if self.board.held == (x, y):
color = (255, 0, 0)
elif self.board.selected == (x, y):
color = (255, 255, 0)
pygame.draw.rect(e.surface, color, pygame.Rect(pos, (GEMSIZE, GEMSIZE)), 1)
if gem:
if (x, y) in self.matches:
self.evtmgr.trigger(MatchResolvedEvent(self.matches))
else:
e.surface.blit(gem.surface, pos)
if isinstance(e, MatchResolvedEvent):
self.reset()
def reset(self):
self.tick = 0
self.matches = []
| {
"content_hash": "10ad62560fe72e0cce6de5f42beb8cd6",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 95,
"avg_line_length": 33.15384615384615,
"alnum_prop": 0.5001288992008249,
"repo_name": "WishCow/gemgem",
"id": "bc00e879b88025a90d3c1423260ba07f34c5fffb",
"size": "3879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10805"
}
],
"symlink_target": ""
} |
from django.forms import ChoiceField, Field, Form, Select
from django.test import SimpleTestCase
class BasicFieldsTests(SimpleTestCase):
def test_field_sets_widget_is_required(self):
self.assertTrue(Field(required=True).widget.is_required)
self.assertFalse(Field(required=False).widget.is_required)
def test_cooperative_multiple_inheritance(self):
class A(object):
def __init__(self):
self.class_a_var = True
super(A, self).__init__()
class ComplexField(Field, A):
def __init__(self):
super(ComplexField, self).__init__()
f = ComplexField()
self.assertTrue(f.class_a_var)
def test_field_deepcopies_widget_instance(self):
class CustomChoiceField(ChoiceField):
widget = Select(attrs={'class': 'my-custom-class'})
class TestForm(Form):
field1 = CustomChoiceField(choices=[])
field2 = CustomChoiceField(choices=[])
f = TestForm()
f.fields['field1'].choices = [('1', '1')]
f.fields['field2'].choices = [('2', '2')]
self.assertEqual(f.fields['field1'].widget.choices, [('1', '1')])
self.assertEqual(f.fields['field2'].widget.choices, [('2', '2')])
class DisabledFieldTests(SimpleTestCase):
def test_disabled_field_has_changed_always_false(self):
disabled_field = Field(disabled=True)
self.assertFalse(disabled_field.has_changed('x', 'y'))
| {
"content_hash": "b351de3675e5b91377403a9d09bc9cb9",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 73,
"avg_line_length": 35.42857142857143,
"alnum_prop": 0.6149193548387096,
"repo_name": "cloudera/hue",
"id": "67c3003393940f0fc0c5413ec6831b02de2dd64c",
"size": "1488",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/Django-1.11.29/tests/forms_tests/field_tests/test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
} |
try:
import asyncio
except ImportError:
# Trollius >= 0.3 was renamed
import trollius as asyncio
from os import environ
import datetime
from autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
A simple time service application component.
"""
@asyncio.coroutine
def onJoin(self, details):
def utcnow():
now = datetime.datetime.utcnow()
return now.strftime("%Y-%m-%dT%H:%M:%SZ")
yield from self.register(utcnow, u'com.timeservice.now')
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", u"ws://127.0.0.1:8080/ws"),
u"crossbardemo",
debug=False, # optional; log even more details
)
runner.run(Component)
| {
"content_hash": "4fb5fed154545b94ae5163cfc6e918bd",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 71,
"avg_line_length": 23.314285714285713,
"alnum_prop": 0.6446078431372549,
"repo_name": "RyanHope/AutobahnPython",
"id": "d587ff24f760135d67e8d9b74ef42c0bb633bba1",
"size": "2093",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/asyncio/wamp/rpc/timeservice/backend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3648"
},
{
"name": "Python",
"bytes": "983364"
}
],
"symlink_target": ""
} |
__all__ = ["config", "pageserver", "restful", "heart"]
import config
import pageserver
import restful
import heart
def run(host, port, ns_host, ns_port, name):
"""
Runs the server.
@param host The host for the server
@param port The port for the server
"""
hb = heart.Heart(name, host, port, ns_host, ns_port, 1)
while True:
try:
hb.register()
break
except:
pass
hb.start()
config.app.run(host=host, port=int(port), debug=True, use_reloader=False)
| {
"content_hash": "0414337aefb8531b7be318425ad09220",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 77,
"avg_line_length": 20.11111111111111,
"alnum_prop": 0.5874769797421732,
"repo_name": "wallarelvo/SmallCartography",
"id": "16ff04de0beda3a11cadc1391ee08f005e4cc7e1",
"size": "544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "carto/reducer/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "6921"
},
{
"name": "Python",
"bytes": "18482"
},
{
"name": "Shell",
"bytes": "383"
}
],
"symlink_target": ""
} |
'''
'''
from ambry.bundle import BuildBundle
import csv
import datetime
class Bundle(BuildBundle):
''' '''
def __init__(self,directory=None):
self.super_ = super(Bundle, self)
self.super_.__init__(directory)
self.part_cache = {}
def generate_incidents(self, p):
for year in self.metadata.build.set_1_years:
f = self.filesystem.download(year)
uz = self.filesystem.unzip(f)
self.log("Reading: {}".format(uz))
with open(uz, 'rbU') as csvfile:
reader = csv.reader(csvfile)
header = reader.next()
fh = [ c.data['fileheader'] for c in p.table.columns]
# get rid of the id field, since that isn't in the data.
fh = fh[1:]
if fh != header:
raise Exception("Header mismatch: {} != {} ".format(fh, header))
for row in reader:
yield list(row)
def get_partition(self,year):
if year not in self.part_cache:
p = self.partitions.find_or_new(time=year, table='incidents');
self.part_cache[year] = p.database.inserter('incidents')
return self.part_cache[year]
def build(self):
from dateutil.parser import parse
# All incidents
allp = self.partitions.find_or_new(table='incidents');
allins = allp.database.inserter()
table = allp.table
header = [c.name for c in table.columns]
lr = self.init_log_rate(10000)
for row in self.generate_incidents(allp):
lr()
dt = parse(row[2])
row[2] = dt
row[5] = unicode(row[5],errors='ignore').strip()
ins = self.get_partition(dt.year)
drow = [ v if v else None for v in row ]
if not drow[6]:
drow[6] = -1 # Zips
# The [None] bit is the place holder for the id column
drow = dict(zip(header, [None]+drow))
try:
ins.insert(drow)
allins.insert(drow)
except:
print row
raise
for ins in self.part_cache.values():
ins.close()
allins.close()
return True
| {
"content_hash": "e59aa51c71a52bad1e3468ceb38a68b5",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 84,
"avg_line_length": 25.257731958762886,
"alnum_prop": 0.4816326530612245,
"repo_name": "sdrdl/sdrdl-ambry-bundles",
"id": "c4b854edd15243a86e6b162d689ab4048af13522",
"size": "2450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandag.org/crimeincidents-orig/bundle.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "10780"
},
{
"name": "Python",
"bytes": "110896"
}
],
"symlink_target": ""
} |
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- Hack for ReadTheDocs ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
output_dir = os.path.join(__location__, "../docs/api")
module_dir = os.path.join(__location__, "../cs207project")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cs207project'
copyright = u'2016, Jonathan Friedman'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from cs207project import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cs207project-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'cs207project Documentation',
u'Jonathan Friedman', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| {
"content_hash": "7f7fcab6383de3eb1c169df0953df102",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 85,
"avg_line_length": 35.27196652719665,
"alnum_prop": 0.6892052194543298,
"repo_name": "mc-hammertimeseries/cs207project",
"id": "9141c1834bfe7e1521273a75e140776fd26223bf",
"size": "8736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "85"
},
{
"name": "Jupyter Notebook",
"bytes": "22080"
},
{
"name": "Python",
"bytes": "156766"
},
{
"name": "Shell",
"bytes": "309"
}
],
"symlink_target": ""
} |
import sys
import os
import os.path
from flask import Flask, send_file, Response
sys.path[0:0] = [""]
from pyclosuretempaltes.javascript_backend import compileToJS
app = Flask(__name__)
TEST_JS_DIR = os.path.join(os.path.dirname(os.path.join(os.getcwd(), __file__)), 'js/')
@app.route('/')
def mainPage():
return send_file(os.path.join(TEST_JS_DIR, 'index.html'))
@app.route('/closure-templates.js')
def closureTemplatesJS():
return Response(status=200,
response=compileToJS(os.path.join(TEST_JS_DIR, 'template.soy')),
mimetype="application/javascript")
@app.route('/<path>')
def resource(path):
return send_file(os.path.join(TEST_JS_DIR, path))
if __name__ == "__main__":
app.debug = True
app.run()
| {
"content_hash": "362a9fb8028778e6496b58a79ef16a77",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 88,
"avg_line_length": 25.633333333333333,
"alnum_prop": 0.6488946684005201,
"repo_name": "archimag/python-closure-templates",
"id": "c2a5b0ddea58b3b2a9e934328863b4b248f0ca3c",
"size": "1396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_js_backend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "44465"
},
{
"name": "Python",
"bytes": "88413"
}
],
"symlink_target": ""
} |
"""
A Printer for generating executable code.
The most important function here is srepr (that is an exact equivalent of
builtin repr, except for optional arguments) that returns a string so that the
relation eval(srepr(expr))=expr holds in an appropriate environment.
"""
from __future__ import annotations
import typing
import mpmath.libmp as mlib
from mpmath.libmp import prec_to_dps, repr_dps
from ..core.function import AppliedUndef
from ..utilities import default_sort_key
from .defaults import DefaultPrinting
from .printer import Printer
class ReprPrinter(Printer):
"""Repr printer."""
printmethod = '_diofantrepr'
_default_settings: dict[str, typing.Any] = {
'order': None
}
def reprify(self, args, sep):
"""
Prints each item in `args` and joins them with `sep`.
"""
return sep.join([self.doprint(item) for item in args])
def emptyPrinter(self, expr):
"""
The fallback printer.
"""
if hasattr(expr, 'args') and hasattr(expr.args, '__iter__'):
l = []
for o in expr.args:
l.append(self._print(o))
return expr.__class__.__name__ + '(%s)' % ', '.join(l)
elif hasattr(expr, '__repr__') and not issubclass(expr.__class__,
DefaultPrinting):
return repr(expr)
else:
return object.__repr__(expr)
def _print_Dict(self, expr):
l = []
for o in sorted(expr.args, key=default_sort_key):
l.append(self._print(o))
return expr.__class__.__name__ + '(%s)' % ', '.join(l)
def _print_Add(self, expr, order=None):
args = expr.as_ordered_terms(order=order or self.order)
args = map(self._print, args)
return 'Add(%s)' % ', '.join(args)
def _print_Function(self, expr):
r = self._print(expr.func)
r += '(%s)' % ', '.join([self._print(a) for a in expr.args])
return r
def _print_FunctionClass(self, expr):
if issubclass(expr, AppliedUndef):
return f'Function({expr.__name__!r})'
else:
return expr.__name__
def _print_RationalConstant(self, expr):
return f'Rational({expr.numerator}, {expr.denominator})'
def _print_AtomicExpr(self, expr):
return str(expr)
def _print_NumberSymbol(self, expr):
return str(expr)
def _print_Integer(self, expr):
return 'Integer(%i)' % int(expr.numerator)
def _print_list(self, expr):
return '[%s]' % self.reprify(expr, ', ')
def _print_MatrixBase(self, expr):
# special case for some empty matrices
if (expr.rows == 0) ^ (expr.cols == 0):
return '%s(%s, %s, %s)' % (expr.__class__.__name__,
self._print(expr.rows),
self._print(expr.cols),
self._print([]))
l = []
for i in range(expr.rows):
l.append([])
for j in range(expr.cols):
l[-1].append(expr[i, j])
return f'{expr.__class__.__name__}({self._print(l)})'
def _print_BooleanTrue(self, expr):
return 'true'
def _print_BooleanFalse(self, expr):
return 'false'
def _print_NaN(self, expr):
return 'nan'
def _print_Mul(self, expr, order=None):
terms = expr.args
args = expr._new_rawargs(*terms).as_ordered_factors()
args = map(self._print, args)
return 'Mul(%s)' % ', '.join(args)
def _print_Rational(self, expr):
return 'Rational(%s, %s)' % (self._print(int(expr.numerator)),
self._print(int(expr.denominator)))
def _print_Float(self, expr):
dps = prec_to_dps(expr._prec)
r = mlib.to_str(expr._mpf_, repr_dps(expr._prec))
return f"{expr.__class__.__name__}('{r}', dps={dps:d})"
def _print_BaseSymbol(self, expr):
d = expr._assumptions.generator
if d == {}:
return f'{expr.__class__.__name__}({self._print(expr.name)})'
else:
attr = [f'{k}={v}' for k, v in d.items()]
return '%s(%s, %s)' % (expr.__class__.__name__,
self._print(expr.name), ', '.join(attr))
def _print_str(self, expr):
return repr(expr)
def _print_tuple(self, expr):
if len(expr) == 1:
return '(%s,)' % self._print(expr[0])
else:
return '(%s)' % self.reprify(expr, ', ')
def _print_WildFunction(self, expr):
return f"{expr.__class__.__name__}('{expr.name}')"
def _print_PolynomialRing(self, ring):
return '%s(%s, %s, %s)' % (ring.__class__.__name__,
self._print(ring.domain),
self._print(ring.symbols),
self._print(ring.order))
def _print_GMPYIntegerRing(self, expr):
return f'{expr.__class__.__name__}()'
_print_GMPYRationalField = _print_GMPYIntegerRing
_print_PythonIntegerRing = _print_GMPYIntegerRing
_print_PythonRationalField = _print_GMPYIntegerRing
_print_LexOrder = _print_GMPYIntegerRing
_print_GradedLexOrder = _print_LexOrder
def _print_FractionField(self, field):
return '%s(%s, %s, %s)' % (field.__class__.__name__,
self._print(field.domain), self._print(field.symbols), self._print(field.order))
def _print_PolyElement(self, poly):
terms = list(poly.items())
terms.sort(key=poly.ring.order, reverse=True)
return f'{poly.__class__.__name__}({self._print(poly.ring)}, {self._print(terms)})'
def _print_FracElement(self, frac):
numer_terms = list(frac.numerator.items())
numer_terms.sort(key=frac.field.order, reverse=True)
denom_terms = list(frac.denominator.items())
denom_terms.sort(key=frac.field.order, reverse=True)
numer = self._print(numer_terms)
denom = self._print(denom_terms)
return f'{frac.__class__.__name__}({self._print(frac.field)}, {numer}, {denom})'
def _print_AlgebraicField(self, expr):
return 'AlgebraicField(%s, %s)' % (self._print(expr.domain),
self._print(expr.ext.as_expr()))
def _print_AlgebraicElement(self, expr):
return '%s(%s)' % (self._print(expr.parent),
self._print(list(map(expr.domain.domain.to_expr, expr.rep.all_coeffs()))))
def _print_Domain(self, expr):
return expr.rep
def srepr(expr, **settings):
"""Return expr in repr form."""
return ReprPrinter(settings).doprint(expr)
| {
"content_hash": "c01a0b8a332b175d73b44dd9c34d1478",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 115,
"avg_line_length": 34.717948717948715,
"alnum_prop": 0.5425406203840473,
"repo_name": "diofant/diofant",
"id": "7bc4b40f1525851325241246b5954994c3484465",
"size": "6770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diofant/printing/repr.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9063539"
}
],
"symlink_target": ""
} |
from libnmap.process import NmapProcess
from libnmap.parser import NmapParser, NmapParserException
# start a new nmap scan on localhost with some specific options
def do_scan(targets, options):
nm = NmapProcess(targets, options)
rc = nm.run()
if rc != 0:
print("nmap scan failed: {0}".format(nm.stderr))
try:
parsed = NmapParser.parse(nm.stdout)
except NmapParserException as e:
print("Exception raised while parsing scan: {0}".format(e.msg))
return parsed
# print scan results from a nmap report
def print_scan(nmap_report):
print("Starting Nmap {0} ( http://nmap.org ) at {1}".format(
nmap_report._nmaprun['version'],
nmap_report._nmaprun['startstr']))
for host in nmap_report.hosts:
if len(host.hostnames):
tmp_host = host.hostnames.pop()
else:
tmp_host = host.address
print("Nmap scan report for {0} ({1})".format(
tmp_host,
host.address))
print("Host is {0}.".format(host.status))
print(" PORT STATE SERVICE")
for serv in host.services:
pserv = "{0:>5s}/{1:3s} {2:12s} {3}".format(
str(serv.port),
serv.protocol,
serv.state,
serv.service)
if len(serv.banner):
pserv += " ({0})".format(serv.banner)
print(pserv)
print(nmap_report.summary)
if __name__ == "__main__":
report = do_scan("127.0.0.1", "-sV")
print_scan(report)
| {
"content_hash": "1efa22fee80273a2a6b73c5687cbc258",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 71,
"avg_line_length": 30.076923076923077,
"alnum_prop": 0.5607416879795396,
"repo_name": "pyphrb/myweb",
"id": "5399b666fccf369655b273a2a004d923c8b167b6",
"size": "1586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/plugin/nmap/libnmap/test/process-stressbox/proc_nmap_like.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1922"
},
{
"name": "Python",
"bytes": "192309"
}
],
"symlink_target": ""
} |
''' Sample usage of function 'topology'.
Print the function's documentation.
Apply the function to the network.
Print the function output.
'''
from __future__ import print_function as _print_function
from basics.topology import topology
from pydoc import plain
from pydoc import render_doc as doc
from basics.context import sys_exit, EX_OK, EX_SOFTWARE
def main():
'''
This sample script does:
http://127.0.0.1:8181/restconf/operational/network-topology:network-topology/topology/topology-netconf
which is not found (404) in ODL Helium.
It might work in ODL Lithium.
The request for the Yang container *does* succeed:
http://127.0.0.1:8181/restconf/operational/network-topology:network-topology
'''
print(plain(doc(topology)))
try:
print(topology("operational"))
return EX_OK
except Exception as e:
print(e)
return EX_SOFTWARE
if __name__ == "__main__":
sys_exit(main())
| {
"content_hash": "6618aadd884b764add062b44568509eb",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 106,
"avg_line_length": 28.61764705882353,
"alnum_prop": 0.6834532374100719,
"repo_name": "tbarrongh/cosc-learning-labs",
"id": "1729ba1b10acc7893c8e8746f91185d31460d5cd",
"size": "1576",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/learning_lab/04_topology.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "354065"
},
{
"name": "Shell",
"bytes": "2128"
}
],
"symlink_target": ""
} |
import os
import argparse
import subprocess
import re
__paceconfigs = {}
common_props = ['checker_tool', 'scratchpad_dir', 'debug_level', 'ignore_ioctl', 'ignore_mmap', 'ignore_stacktrace', 'ignore_file_read', 'cached_prefix_states_file']
def init_paceconfig(machine_id, args):
global __paceconfigs
__paceconfig = None
assert machine_id >= 0
parser = argparse.ArgumentParser()
parser.add_argument('--strace_file_prefix', dest = 'strace_file_prefix', type = str, default = False)
parser.add_argument('--initial_snapshot', dest = 'initial_snapshot', type = str, default = False)
parser.add_argument('--checker_tool', dest = 'checker_tool', type = str, default = False)
parser.add_argument('--base_path', dest = 'base_path', type = str, default = False)
parser.add_argument('--starting_cwd', dest = 'starting_cwd', type = str, default = False)
parser.add_argument('--interesting_stdout_prefix', dest = 'interesting_stdout_prefix', type = str, default = False)
parser.add_argument('--collapse_recv', dest = 'collapse_recv', type = bool, default = False)
parser.add_argument('--interesting_path_string', dest = 'interesting_path_string', type = str, default = False)
parser.add_argument('--scratchpad_dir', dest = 'scratchpad_dir', type = str, default = '/tmp')
parser.add_argument('--debug_level', dest = 'debug_level', type = int, default = 0)
parser.add_argument('--ignore_ioctl', dest = 'ignore_ioctl', type = list, default = [])
parser.add_argument('--ignore_mmap', dest = 'ignore_mmap', type = bool, default = False)
parser.add_argument('--ignore_stacktrace', dest = 'ignore_stacktrace', type = bool, default = False)
parser.add_argument('--ignore_file_read', dest = 'ignore_file_read', type = bool, default = True)
parser.add_argument('--cached_prefix_states_file', dest = 'cached_prefix_states_file', type = str, default = False)
parser.add_argument('--client', dest = 'client', type = bool, default = False)
__paceconfig = parser.parse_args('')
for key in __paceconfig.__dict__:
if key in args:
__paceconfig.__dict__[key] = args[key]
assert __paceconfig.strace_file_prefix != False
assert __paceconfig.initial_snapshot != False
assert __paceconfig.base_path != False and __paceconfig.base_path.startswith('/')
if __paceconfig.base_path.endswith('/'):
__paceconfig.base_path = __paceconfig.base_path[0 : -1]
if __paceconfig.interesting_path_string == False:
__paceconfig.interesting_path_string = r'^' + __paceconfig.base_path
if 'starting_cwd' not in __paceconfig.__dict__ or __paceconfig.starting_cwd == False:
__paceconfig.starting_cwd = __paceconfig.base_path
def all_same(items):
return all(x == items[0] for x in items)
assert __paceconfig.scratchpad_dir != False
__paceconfig.machine_id = machine_id
__paceconfigs[machine_id] = __paceconfig
for prop in common_props:
to_check = []
for machine in __paceconfigs.keys():
to_check.append(getattr(__paceconfigs[machine], prop))
assert all_same(to_check) == True
def paceconfig(machine_id):
# machine_id can be None for common properties and we can return any paceconfig
# but for simplicity, just make sure that everyone passes machine context to obtain
# config
return __paceconfigs[machine_id]
def get_path_inode_map(directory):
result = {}
while(directory.endswith('/')):
directory = directory[ : -1]
for inode_path in subprocess.check_output("find " + directory + " -printf '%i %p %y\n'", shell = True).split('\n'):
if inode_path == '':
continue
(inode, path, entry_type) = inode_path.split(' ')
inode = int(inode)
assert entry_type == 'd' or entry_type == 'f'
result[path] = (inode, entry_type)
return result
def colorize(s, i):
return '\033[00;' + str(30 + i) + 'm' + s + '\033[0m'
def coded_colorize(s, s2 = None):
colors=[1,3,5,6,11,12,14,15]
if s2 == None:
s2 = s
return colorize(s, colors[hash(s2) % len(colors)])
def colors_test(fname):
f = open(fname, 'w')
for i in range(0, 30):
f.write(colorize(str(i), i) + '\n')
f.close()
def short_path(machine_id, name):
if not __paceconfigs[machine_id] or not name.startswith(__paceconfigs[machine_id].base_path):
return name
return name.replace(re.sub(r'//', r'/', __paceconfigs[machine_id].base_path + '/'), '', 1)
# The input parameter must already have gone through original_path()
def initial_path(machine_id, name):
if not name.startswith(__paceconfigs[machine_id].base_path):
return False
toret = name.replace(__paceconfigs[machine_id].base_path, __paceconfigs[machine_id].initial_snapshot + '/', 1)
return re.sub(r'//', r'/', toret)
# The input parameter must already have gone through original_path()
def replayed_path(machine_id, name):
if not name.startswith(__paceconfigs[machine_id].base_path):
return False
toret = name.replace(__paceconfigs[machine_id].base_path, __paceconfigs[machine_id].scratchpad_dir + '/', 1)
return re.sub(r'//', r'/', toret)
def safe_string_to_int(s):
try:
if len(s) >= 2 and s[0:2] == "0x":
return int(s, 16)
elif s[0] == '0':
return int(s, 8)
return int(s)
except ValueError as err:
print s
raise err
def is_interesting(machine_id, path):
return re.search(paceconfig(machine_id).interesting_path_string, path)
def writeable_toggle(path, mode = None):
if mode == 'UNTOGGLED':
return
elif mode != None:
os.chmod(path, mode)
if os.access(path, os.W_OK):
return 'UNTOGGLED'
if not os.access(path, os.W_OK):
old_mode = os.stat(path).st_mode
os.chmod(path, 0777)
return old_mode
| {
"content_hash": "dcbe4b5b0f54319861e997235a5cf186",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 165,
"avg_line_length": 39.52517985611511,
"alnum_prop": 0.6821987622861303,
"repo_name": "ramanala/PACE",
"id": "47a182269e63c9e2ae5206d9170e0cc703b1cffa",
"size": "6575",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_paceutils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "194"
},
{
"name": "C",
"bytes": "1487904"
},
{
"name": "C++",
"bytes": "65155"
},
{
"name": "Emacs Lisp",
"bytes": "3117"
},
{
"name": "Groff",
"bytes": "1812"
},
{
"name": "M4",
"bytes": "4306"
},
{
"name": "Makefile",
"bytes": "122268"
},
{
"name": "Perl",
"bytes": "9808"
},
{
"name": "Perl6",
"bytes": "793"
},
{
"name": "Python",
"bytes": "247228"
},
{
"name": "Shell",
"bytes": "109528"
}
],
"symlink_target": ""
} |
from nutils import mesh, function, solver, export, cli, testing
import numpy, treelog
def main(nelems:int, etype:str, btype:str, degree:int, traction:float, maxrefine:int, radius:float, poisson:float):
'''
Horizontally loaded linear elastic plate with FCM hole.
.. arguments::
nelems [9]
Number of elements along edge.
etype [square]
Type of elements (square/triangle/mixed).
btype [std]
Type of basis function (std/spline), with availability depending on the
selected element type.
degree [2]
Polynomial degree.
traction [.1]
Far field traction (relative to Young's modulus).
maxrefine [2]
Number or refinement levels used for the finite cell method.
radius [.5]
Cut-out radius.
poisson [.3]
Poisson's ratio, nonnegative and strictly smaller than 1/2.
'''
domain0, geom = mesh.unitsquare(nelems, etype)
domain = domain0.trim(function.norm2(geom) - radius, maxrefine=maxrefine)
ns = function.Namespace()
ns.x = geom
ns.lmbda = 2 * poisson
ns.mu = 1 - poisson
ns.ubasis = domain.basis(btype, degree=degree).vector(2)
ns.u_i = 'ubasis_ni ?lhs_n'
ns.X_i = 'x_i + u_i'
ns.strain_ij = '(d(u_i, x_j) + d(u_j, x_i)) / 2'
ns.stress_ij = 'lmbda strain_kk δ_ij + 2 mu strain_ij'
ns.r2 = 'x_k x_k'
ns.R2 = radius**2 / ns.r2
ns.k = (3-poisson) / (1+poisson) # plane stress parameter
ns.scale = traction * (1+poisson) / 2
ns.uexact_i = 'scale (x_i ((k + 1) (0.5 + R2) + (1 - R2) R2 (x_0^2 - 3 x_1^2) / r2) - 2 δ_i1 x_1 (1 + (k - 1 + R2) R2))'
ns.du_i = 'u_i - uexact_i'
sqr = domain.boundary['left,bottom'].integral('(u_i n_i)^2 J(x)' @ ns, degree=degree*2)
cons = solver.optimize('lhs', sqr, droptol=1e-15)
sqr = domain.boundary['top,right'].integral('du_k du_k J(x)' @ ns, degree=20)
cons = solver.optimize('lhs', sqr, droptol=1e-15, constrain=cons)
res = domain.integral('d(ubasis_ni, x_j) stress_ij J(x)' @ ns, degree=degree*2)
lhs = solver.solve_linear('lhs', res, constrain=cons)
bezier = domain.sample('bezier', 5)
X, stressxx = bezier.eval(['X', 'stress_00'] @ ns, lhs=lhs)
export.triplot('stressxx.png', X, stressxx, tri=bezier.tri, hull=bezier.hull)
err = domain.integral('<du_k du_k, sum:ij(d(du_i, x_j)^2)>_n J(x)' @ ns, degree=max(degree,3)*2).eval(lhs=lhs)**.5
treelog.user('errors: L2={:.2e}, H1={:.2e}'.format(*err))
return err, cons, lhs
# If the script is executed (as opposed to imported), :func:`nutils.cli.run`
# calls the main function with arguments provided from the command line. For
# example, to keep with the default arguments simply run :sh:`python3
# platewithhole.py`. To select mixed elements and quadratic basis functions add
# :sh:`python3 platewithhole.py etype=mixed degree=2`.
if __name__ == '__main__':
cli.run(main)
# Once a simulation is developed and tested, it is good practice to save a few
# strategic return values for regression testing. The :mod:`nutils.testing`
# module, which builds on the standard :mod:`unittest` framework, facilitates
# this by providing :func:`nutils.testing.TestCase.assertAlmostEqual64` for the
# embedding of desired results as compressed base64 data.
class test(testing.TestCase):
@testing.requires('matplotlib')
def test_spline(self):
err, cons, lhs = main(nelems=4, etype='square', btype='spline', degree=2, traction=.1, maxrefine=2, radius=.5, poisson=.3)
with self.subTest('l2-error'):
self.assertAlmostEqual(err[0], .00033, places=5)
with self.subTest('h1-error'):
self.assertAlmostEqual(err[1], .00672, places=5)
with self.subTest('constraints'): self.assertAlmostEqual64(cons, '''
eNpjaGBoYGBAxvrnGBow4X89g3NQFSjQwLAGq7i10Wus4k+NfM8fNWZgOGL89upc47WX0ozvXjAzPn1e
1TjnPACrACoJ''')
with self.subTest('left-hand side'): self.assertAlmostEqual64(lhs, '''
eNpbZHbajIHhxzkGBhMgtgdi/XPypyRPvjFxO/PccPq5Vn2vcxr6luf+6xmcm2LMwLDQePf5c0bTzx8x
5D7vaTjnnIFhzbmlQPH5xhV39Y3vXlxtJHoh2EjvvLXR63MbgOIbjRdfrTXeecnUeO+Fn0Yrzj818j1/
FCh+xPjt1bnGay+lGd+9YGZ8+ryqcc55AK+AP/0=''')
@testing.requires('matplotlib')
def test_mixed(self):
err, cons, lhs = main(nelems=4, etype='mixed', btype='std', degree=2, traction=.1, maxrefine=2, radius=.5, poisson=.3)
with self.subTest('l2-error'):
self.assertAlmostEqual(err[0], .00024, places=5)
with self.subTest('h1-error'):
self.assertAlmostEqual(err[1], .00739, places=5)
with self.subTest('constraints'): self.assertAlmostEqual64(cons, '''
eNpjaGDADhlwiOEU1z8HZusbgukkg5BzRJqKFRoa1oD1HzfceA5NH9FmgKC10SuwOdONpM7DxDYa77gM
MueoMQPDEePzV2Hic42XXmoynnQRxvc3dryQbnz3Aoj91Mj3vJnx6fOqxjnnAQzkV94=''')
with self.subTest('left-hand side'): self.assertAlmostEqual64(lhs, '''
eNoNzE8og3EcBvC3uUo5rNUOnBSK9/19n0Ic0Eo5oJBmRxcaB04kUnPgoETmT2w7LVrtMBy4auMw+35/
7/vaykFSFEopKTnIe/jU01PPU6FNWcQIn+Or5CBfSqCGD1uDYhi7/KbW+dma5aK65gX6Y8Po8HSzZQ7y
vBniHyvFV9aq17V7TK42O9kwFS9YUzxhjXIcZxLCnIzjTsfxah/BMFJotjUlZYz6xYeoPqEPKaigbKhb
9lOj9NGa9KgtVmqJH9UT36gcp71dEr6HaVS5GS8f46AcQ9itx739SQXdBL8dRqeTo1odox35poh2yJVh
apEueucsRWWPgpJFoLKPNzeHC/fU+yl48pDyMi6dCFbsBNJODNu2iawOoE4PoVdP4kH/UkZeaEDaUJQG
zMg/DouRUg==''')
| {
"content_hash": "c882e0dc3f3587fe9661184b153b2dce",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 126,
"avg_line_length": 46.123893805309734,
"alnum_prop": 0.7014581734458941,
"repo_name": "joostvanzwieten/nutils",
"id": "dadafac16693c9d1ef0504c6128eabec55f46f1b",
"size": "5679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/platewithhole.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "5351"
},
{
"name": "Python",
"bytes": "1203910"
}
],
"symlink_target": ""
} |
"""Test listing raw events.
"""
import datetime
import testscenarios
from oslo.config import cfg
from ceilometer.publisher import rpc
from ceilometer import sample
from ceilometer.tests import api as tests_api
from ceilometer.tests import db as tests_db
load_tests = testscenarios.load_tests_apply_scenarios
class TestListEvents(tests_api.TestBase,
tests_db.MixinTestsWithBackendScenarios):
def setUp(self):
super(TestListEvents, self).setUp()
for cnt in [
sample.Sample(
'instance',
'cumulative',
'',
1,
'user-id',
'project1',
'resource-id',
timestamp=datetime.datetime(2012, 7, 2, 10, 40),
resource_metadata={'display_name': 'test-server',
'tag': 'self.sample'},
source='source1',
),
sample.Sample(
'instance',
'cumulative',
'',
2,
'user-id',
'project1',
'resource-id',
timestamp=datetime.datetime(2012, 7, 2, 10, 41),
resource_metadata={'display_name': 'test-server',
'tag': 'self.sample'},
source='source1',
),
sample.Sample(
'instance',
'cumulative',
'',
1,
'user-id2',
'project2',
'resource-id-alternate',
timestamp=datetime.datetime(2012, 7, 2, 10, 42),
resource_metadata={'display_name': 'test-server',
'tag': 'self.sample2'},
source='source1',
),
]:
msg = rpc.meter_message_from_counter(
cnt,
cfg.CONF.publisher_rpc.metering_secret)
self.conn.record_metering_data(msg)
def test_empty_project(self):
data = self.get('/projects/no-such-project/meters/instance')
self.assertEqual({'events': []}, data)
def test_by_project(self):
data = self.get('/projects/project1/meters/instance')
self.assertEqual(2, len(data['events']))
def test_by_project_non_admin(self):
data = self.get('/projects/project1/meters/instance',
headers={"X-Roles": "Member",
"X-Project-Id": "project1"})
self.assertEqual(2, len(data['events']))
def test_by_project_wrong_tenant(self):
resp = self.get('/projects/project1/meters/instance',
headers={"X-Roles": "Member",
"X-Project-Id": "this-is-my-project"})
self.assertEqual(404, resp.status_code)
def test_by_project_with_timestamps(self):
data = self.get('/projects/project1/meters/instance',
start_timestamp=datetime.datetime(2012, 7, 2, 10, 42))
self.assertEqual(0, len(data['events']))
def test_empty_resource(self):
data = self.get('/resources/no-such-resource/meters/instance')
self.assertEqual({'events': []}, data)
def test_by_resource(self):
data = self.get('/resources/resource-id/meters/instance')
self.assertEqual(2, len(data['events']))
def test_by_resource_non_admin(self):
data = self.get('/resources/resource-id-alternate/meters/instance',
headers={"X-Roles": "Member",
"X-Project-Id": "project2"})
self.assertEqual(1, len(data['events']))
def test_by_resource_some_tenant(self):
data = self.get('/resources/resource-id/meters/instance',
headers={"X-Roles": "Member",
"X-Project-Id": "project2"})
self.assertEqual(0, len(data['events']))
def test_empty_source(self):
data = self.get('/sources/no-such-source/meters/instance')
self.assertEqual({'events': []}, data)
def test_by_source(self):
data = self.get('/sources/source1/meters/instance')
self.assertEqual(3, len(data['events']))
def test_by_source_non_admin(self):
data = self.get('/sources/source1/meters/instance',
headers={"X-Roles": "Member",
"X-Project-Id": "project2"})
self.assertEqual(1, len(data['events']))
def test_by_source_with_timestamps(self):
data = self.get('/sources/source1/meters/instance',
end_timestamp=datetime.datetime(2012, 7, 2, 10, 42))
self.assertEqual(2, len(data['events']))
def test_empty_user(self):
data = self.get('/users/no-such-user/meters/instance')
self.assertEqual({'events': []}, data)
def test_by_user(self):
data = self.get('/users/user-id/meters/instance')
self.assertEqual(2, len(data['events']))
def test_by_user_non_admin(self):
data = self.get('/users/user-id/meters/instance',
headers={"X-Roles": "Member",
"X-Project-Id": "project1"})
self.assertEqual(2, len(data['events']))
def test_by_user_wrong_tenant(self):
data = self.get('/users/user-id/meters/instance',
headers={"X-Roles": "Member",
"X-Project-Id": "project2"})
self.assertEqual(0, len(data['events']))
def test_by_user_with_timestamps(self):
data = self.get('/users/user-id/meters/instance',
start_timestamp=datetime.datetime(2012, 7, 2, 10, 41),
end_timestamp=datetime.datetime(2012, 7, 2, 10, 42))
self.assertEqual(1, len(data['events']))
def test_template_list_event(self):
rv = self.get('/resources/resource-id/meters/instance',
headers={"Accept": "text/html"})
self.assertEqual(200, rv.status_code)
self.assertTrue("text/html" in rv.content_type)
class TestListEventsMetaquery(TestListEvents,
tests_db.MixinTestsWithBackendScenarios):
def test_metaquery1(self):
q = '/sources/source1/meters/instance'
data = self.get('%s?metadata.tag=self.sample2' % q)
self.assertEqual(1, len(data['events']))
def test_metaquery1_wrong_tenant(self):
q = '/sources/source1/meters/instance'
data = self.get('%s?metadata.tag=self.sample2' % q,
headers={"X-Roles": "Member",
"X-Project-Id": "project1"})
self.assertEqual(0, len(data['events']))
def test_metaquery2(self):
q = '/sources/source1/meters/instance'
data = self.get('%s?metadata.tag=self.sample' % q)
self.assertEqual(2, len(data['events']))
def test_metaquery2_non_admin(self):
q = '/sources/source1/meters/instance'
data = self.get('%s?metadata.tag=self.sample' % q,
headers={"X-Roles": "Member",
"X-Project-Id": "project1"})
self.assertEqual(2, len(data['events']))
def test_metaquery3(self):
q = '/sources/source1/meters/instance'
data = self.get('%s?metadata.display_name=test-server' % q)
self.assertEqual(3, len(data['events']))
def test_metaquery3_with_project(self):
q = '/sources/source1/meters/instance'
data = self.get('%s?metadata.display_name=test-server' % q,
headers={"X-Roles": "Member",
"X-Project-Id": "project2"})
self.assertEqual(1, len(data['events']))
| {
"content_hash": "3957c5d24713196298833e5aa213d2d6",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 78,
"avg_line_length": 39.19211822660098,
"alnum_prop": 0.5214932126696833,
"repo_name": "citrix-openstack-build/ceilometer",
"id": "a5add0b250cae487dd0896f0e89112431e8a691d",
"size": "8687",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/api/v1/test_list_events_scenarios.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6284"
},
{
"name": "JavaScript",
"bytes": "304636"
},
{
"name": "Python",
"bytes": "1776303"
},
{
"name": "Shell",
"bytes": "1322"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.