code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
"""
Methods for interacting with Tanda shifts
"""
from flask import session
from flask_login import current_user
from swappr.user.tanda_api import tanda_auth, get_user_by_id
from swappr.database import db_session
from swappr.models import Shift
import swappr.god_request
import json
import datetime
#Might be benificial to cache roster?
def fetch_current_user_upcoming_shifts():
shift_info = tanda_auth.get('rosters/current').data
valid_shifts = []
if not shift_info or "schedules" not in shift_info: # Make sure we don't access empty dictionary
return valid_shifts
for i in range(len(shift_info["schedules"])):
# at this point, we examining all the schedules for a particular day
for j in range(len(shift_info["schedules"][i]["schedules"])):
sched_item = shift_info["schedules"][i]["schedules"][j]
if (sched_item["user_id"] == current_user.employee_id):
sched_item["date"] = shift_info["schedules"][i]["date"]
sched_item["adjusted_start"] = sched_item["start"] + current_user.utc_offset
sched_item["adjusted_finish"] = sched_item["finish"] + current_user.utc_offset
valid_shifts.append(sched_item)
return valid_shifts
def fetch_current_user_shifts_for_date(delta):
target_date = (datetime.datetime.now() + datetime.timedelta(int(delta))).strftime('%Y-%m-%d')
shift_info = tanda_auth.get('rosters/on/{}'.format(target_date)).data
valid_shifts = []
if not shift_info or "schedules" not in shift_info: # Make sure we don't access empty dictionary
return valid_shifts
for i in range(len(shift_info["schedules"])):
# at this point, we examining all the schedules for a particular day
for j in range(len(shift_info["schedules"][i]["schedules"])):
sched_item = shift_info["schedules"][i]["schedules"][j]
if (sched_item["user_id"] == current_user.employee_id):
sched_item["date"] = shift_info["schedules"][i]["date"]
sched_item["adjusted_start"] = sched_item["start"] + current_user.utc_offset
sched_item["adjusted_finish"] = sched_item["finish"] + current_user.utc_offset
valid_shifts.append(sched_item)
return valid_shifts
def fetch_vacant_shifts():
shift_info = tanda_auth.get('rosters/current').data
vacant_shifts = []
if not shift_info or "schedules" not in shift_info:
return vacant_shifts
for i in range(len(shift_info["schedules"])):
#at this point, we examining all the schedules for a particular day
for j in range(len(shift_info["schedules"][i]["schedules"])):
sched_item = shift_info["schedules"][i]["schedules"][j]
if (sched_item["user_id"] == None):
sched_item["date"] = shift_info["schedules"][i]["date"]
sched_item["adjusted_start"] = sched_item["start"] + current_user.utc_offset
sched_item["adjusted_finish"] = sched_item["finish"] + current_user.utc_offset
vacant_shifts.append(sched_item)
return vacant_shifts
def offer_this_shift(id):
shift = tanda_auth.get('schedules/' + str(id)).data
name = get_user_by_id(shift["user_id"])['name']
print(shift)
shift_offer = Shift(shift["id"], None, shift["user_id"], shift["start"], shift["finish"], None,
shift["department_id"], name, None)
db_session.add(shift_offer)
db_session.commit()
def fetch_offered_shifts():
shifts = db_session.query(Shift)
actual_shifts = [shift for shift in shifts]
print("Shift is:")
print(actual_shifts)
for shift in actual_shifts:
shift.adjusted_start = shift.start_time + current_user.utc_offset
shift.adjusted_finish = shift.end_time + current_user.utc_offset
return actual_shifts
def take_offered_shift(shift_id, taker_id):
shift = db_session.query(Shift).filter(Shift.schedule_id == shift_id).one()
shift.taker = taker_id
shift.taker_name = get_user_by_id(taker_id)['name']
db_session.commit()
#this method can only be called by a manager!!!
def replace_user_in_schedule(user_id, schedule_id):
print(current_user.is_manager)
result = None
url_path = 'schedules/' + str(schedule_id)
data = {
'user_id': user_id
}
json_data = json.dumps(data)
print(json_data)
if (current_user.is_manager):
#clear to proceed as current user - probably
result = tanda_auth.put(url_path, data=data).data
else:
#switch to god mode
result = swappr.god_request.put(url_path, data=data).json()
print(result)
if ("error" in result):
return False
return result["user_id"] == user_id
|
swappr-tanda-team/swappr
|
swappr/shift/tanda_shift.py
|
Python
|
mit
| 4,754
|
import pandas as pd
import numpy as np
import ml_metrics as metrics
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import log_loss
path = '../Data/'
print("read training data")
train = pd.read_csv(path+'train_tfidf.csv')
label = train['target']
trainID = train['id']
del train['id']
del train['target']
tsne = pd.read_csv(path+'tfidf_train_tsne.csv')
train = train.join(tsne)
clf = ExtraTreesClassifier(n_jobs=-1, n_estimators=300, verbose=3, random_state=131)
iso_clf = CalibratedClassifierCV(clf, method='isotonic', cv=10)
iso_clf.fit(train.values, label)
print("read test data")
test = pd.read_csv(path+'test_tfidf.csv')
ID = test['id']
del test['id']
tsne = pd.read_csv(path+'tfidf_test_tsne.csv')
test = test.join(tsne)
clf_probs = iso_clf.predict_proba(test.values)
sample = pd.read_csv(path+'sampleSubmission.csv')
print("writing submission data")
submission = pd.DataFrame(clf_probs, index=ID, columns=sample.columns[1:])
submission.to_csv(path+'extraTree_tfidf.csv',index_label='id')
# retrain
sample = pd.read_csv(path+'sampleSubmission.csv')
submission = pd.DataFrame(index=trainID, columns=sample.columns[1:])
nfold=5
score = np.zeros(nfold)
skf = StratifiedKFold(label, nfold, random_state=131)
i=0
for tr, te in skf:
X_train, X_test, y_train, y_test = train.values[tr], train.values[te], label[tr], label[te]
clf = ExtraTreesClassifier(n_jobs=-1, n_estimators=300, verbose=3, random_state=131)
iso_clf = CalibratedClassifierCV(clf, method='isotonic', cv=10)
iso_clf.fit(X_train, y_train)
pred = iso_clf.predict_proba(X_test)
tmp = pd.DataFrame(pred, columns=sample.columns[1:])
submission.iloc[te] = pred
score[i]= log_loss(y_test,pred,eps=1e-15, normalize=True)
print(score[i])
i+=1
print("ave: "+ str(np.average(score)) + "stddev: " + str(np.std(score)))
# cv 10, 0.4640333 + 0.008130822
# nfold 5: 0.4662828, stddev 0.008184
# nfold 4: 0.4688583, stddev 0.004398
# nfold 3: 0.4706140, stddev 0.004154
print(log_loss(label,submission.values,eps=1e-15, normalize=True))
submission.to_csv(path+'extraTree_tfidf_retrain.csv',index_label='id')
|
puyokw/kaggle_Otto
|
new/src/1st_level/extraTree_tfidf.py
|
Python
|
mit
| 2,236
|
from django.contrib import admin
from zealous.blog.models import Category, Post, PostImage
class PostImageInline(admin.TabularInline):
model = PostImage
extra = 0
class PostAdmin(admin.ModelAdmin):
fieldsets = [
('Basic Info', {
'fields': ['name', 'slug', 'author', 'categories', 'pub_date',]
}),
('Content', {
'fields': ['tease', 'body',],
}),
('Metadata', {
'fields': ['status', 'created_ts', 'updated_ts',]
}),
]
filter_horizontal = ['categories',]
prepopulated_fields = {'slug': ['name',]}
radio_fields = {'status': admin.VERTICAL}
readonly_fields = ['created_ts', 'updated_ts',]
inlines = [PostImageInline,]
date_hierarchy = 'pub_date'
search_fields = ['name', 'author__first_name', 'author__last_name',
'tease', 'body',]
list_display = ['name', 'author', 'pub_date', 'status',]
list_filter = ['status', 'categories',]
class CategoryAdmin(admin.ModelAdmin):
fieldsets = [
('Basic Info', {
'fields': ['name', 'slug',]
}),
('Metadata', {
'fields': ['status', 'created_ts', 'updated_ts',]
}),
]
prepopulated_fields = {'slug': ['name',]}
radio_fields = {'status': admin.VERTICAL}
readonly_fields = ['created_ts', 'updated_ts',]
search_fields = ['name',]
list_display = ['name', 'status',]
list_filter = ['status',]
admin.site.register(Post, PostAdmin)
admin.site.register(Category, CategoryAdmin)
|
jeffschenck/zealous
|
zealous/blog/admin.py
|
Python
|
mit
| 1,536
|
#! /usr/bin/env python
#Read arguments
from sys import argv
if len(argv)!=2:
print("Usage: {} <dataset_name>".format(argv[0]))
exit(1)
PREFIX=argv[1]
#Read data
import csv
DATA_FILE=PREFIX
x,y,clase=[],[],[]
with open(DATA_FILE,'r') as f:
reader = csv.reader(f)
for row in reader:
if '\t' in row[0]:
row=row[0].split('\t');
clase.append(row[-1])
x.append(float(row[0]))
y.append(float(row[1]))
#Draw graph
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set(color_codes=True)
df = pd.DataFrame(dict(X=x, Y=y, clase=clase))
lm = sns.lmplot('X', 'Y', data=df, hue='clase', markers='o', palette="Set1", aspect=1, legend=False, fit_reg=False, scatter_kws={"s": 40} )
ax = lm.axes[0,0]
ax.set_aspect('equal')
LIMI=1.1
ax.set_ylim(-LIMI,LIMI)
LIMI=1.1
ax.set_xlim(-LIMI,LIMI)
#~ FONT_SIZE = 18
#~ for label in (ax.get_xticklabels() + ax.get_yticklabels()):
#~ label.set_fontsize(FONT_SIZE)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_frame_on(False) #Remove both axes
plt.savefig(PREFIX+'.png', bbox_inches='tight', pad_inches=0)
ax.xaxis.set_visible(True)
ax.yaxis.set_visible(True)
ax.set_frame_on(True) #Remove both axes
plt.show()
|
mvpossum/machine-learning
|
tp1/plot_2d_dataset.py
|
Python
|
mit
| 1,274
|
from setuptools import setup
try:
# Try to seed the prng to make the tests repeatable.
# Unfortunately, numpy might not be installed.
import numpy as np
np.random.seed(1)
except ImportError as e:
pass
setup(name = 'vampyre',
version = '0.0',
description = 'Vampyre is a Python package for generalized Approximate Message Passing',
author = 'GAMP Team',
install_requires = ['nose','nose-timer','numpy','scipy','matplotlib','pywavelets','scikit-learn',],
test_suite = 'nose.collector',
tests_require = ['nose','nose-timer','numpy','scipy','PyWavelets'],
author_email = 'gampteam@gmail.com',
license = 'MIT',
packages = ['vampyre'],
zip_safe = False)
|
GAMPTeam/vampyre
|
setup.py
|
Python
|
mit
| 730
|
"""
Setup module for wagoner.
"""
from setuptools import setup, find_packages
with open("README.rst", "r") as readme:
setup(
name="wagoner",
version="1.1",
description="A random word generator.",
long_description=readme.read(),
url="https://github.com/sbusard/wagoner",
author="Simon Busard",
author_email="simon.busard@gmail.com",
license="MIT",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4"
],
keywords="random word generation",
packages=find_packages(),
scripts=[
"wagoner/table.py",
"wagoner/tree.py",
"wagoner/word.py"
]
)
|
sbusard/wagoner
|
setup.py
|
Python
|
mit
| 1,055
|
def test_age():
from isochrones.priors import AgePrior
age_prior = AgePrior()
age_prior.test_integral()
age_prior.test_sampling()
def test_distance():
from isochrones.priors import DistancePrior
distance_prior = DistancePrior()
distance_prior.test_integral()
distance_prior.test_sampling()
def test_AV():
from isochrones.priors import AVPrior
AV_prior = AVPrior()
AV_prior.test_integral()
AV_prior.test_sampling()
def test_q():
from isochrones.priors import QPrior
q_prior = QPrior()
q_prior.test_integral()
q_prior.test_sampling()
def test_salpeter():
from isochrones.priors import SalpeterPrior
salpeter_prior = SalpeterPrior()
salpeter_prior.test_integral()
salpeter_prior.test_sampling()
def test_feh():
from isochrones.priors import FehPrior
feh_prior = FehPrior()
feh_prior.test_integral()
feh_prior.test_sampling()
feh_prior.bounds = (-3, 0.25)
feh_prior.test_integral()
feh_prior.test_sampling()
assert feh_prior(-3.5) == 0
assert feh_prior(0.4) == 0
def test_chabrier():
from isochrones.priors import ChabrierPrior
chabrier_prior = ChabrierPrior()
chabrier_prior.test_integral()
chabrier_prior.test_sampling()
|
timothydmorton/isochrones
|
isochrones/tests/test_priors.py
|
Python
|
mit
| 1,270
|
class EqualityMixin(object):
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(str(self))
class BasicMixin(object):
def __str__(self):
return self.__class__.__name__
class ItemTypeMixin(object):
def __str__(self):
return '{0}({1})'.format(self.__class__.__name__, str(self.item_type))
class TupleMixin(object):
def __str__(self):
return '{0}({1})'.format(self.__class__.__name__,
','.join([str(x) for x in self.item_types]))
class CallableMixin(object):
def __str__(self):
return '{0}({1} -> {2})'.format(self.__class__.__name__,
self.signature, self.return_type)
class Unknown(EqualityMixin, BasicMixin):
def example(self):
return object()
class NoneType(EqualityMixin, BasicMixin):
def example(self):
return None
class Bool(EqualityMixin, BasicMixin):
def example(self):
return True
class Num(EqualityMixin, BasicMixin):
def example(self):
return 1
class Str(EqualityMixin, BasicMixin):
def example(self):
return 'a'
class List(EqualityMixin, ItemTypeMixin):
def __init__(self, item_type):
self.item_type = item_type
def example(self):
return [self.item_type.example()]
# hack to allow testing for arbitrary-length tuple
class BaseTuple(EqualityMixin, BasicMixin):
def example(self):
return tuple()
class Tuple(EqualityMixin, TupleMixin):
def __init__(self, item_types):
self.item_types = item_types
def example(self):
return tuple(x.example() for x in self.item_types)
class Set(EqualityMixin, ItemTypeMixin):
def __init__(self, item_type):
self.item_type = item_type
def example(self):
return {self.item_type.example()}
class Dict(EqualityMixin):
def __init__(self, key_type, value_type):
self.key_type = key_type
self.value_type = value_type
def example(self):
return {self.key_type.example(): self.value_type.example()}
def __str__(self):
return '{0}({1},{2})'.format(self.__class__.__name__,
self.key_type, self.value_type)
class Function(EqualityMixin, CallableMixin):
def __init__(self, signature, return_type, evaluator, instance=None):
assert evaluator is not None
self.signature = signature
self.return_type = return_type
self.evaluator = evaluator
self.instance = instance
def example(self):
return object()
# set class_name to __import__ for imports
class Instance(EqualityMixin):
def __init__(self, class_name, attributes):
self.class_name = class_name
self.attributes = attributes # Scope object
self.initialized = False
def example(self):
return object()
def __str__(self):
return '{0}({1})'.format(self.__class__.__name__, self.class_name)
# a Class is a Function that returns an Instance plus static methods/attrs
class Class(EqualityMixin, CallableMixin):
def __init__(self, name, signature, return_type, evaluator, attributes):
self.name = name
self.signature = signature
self.return_type = return_type
self.evaluator = evaluator
# only contains class methods and class attributes
self.attributes = attributes
def example(self):
return object()
def __str__(self):
return self.name
class Maybe(EqualityMixin):
def __init__(self, subtype):
assert subtype is not None
self.subtype = subtype
def example(self):
return self.subtype.example()
def __str__(self):
return '{0}({1})'.format(self.__class__.__name__, self.subtype)
class Union(EqualityMixin):
def __init__(self, *subtypes):
assert len(subtypes) > 0
assert not any(isinstance(x, list) for x in subtypes)
self.subtypes = list(subtypes)
def example(self):
return self.subtypes[0].example()
def __str__(self):
return 'Union({0})'.format(','.join([str(x) for x in self.subtypes]))
|
clark800/pystarch
|
backend/type_objects.py
|
Python
|
mit
| 4,330
|
#!/usr/bin/env python3
#
# architecture.py
# SWN Architecture Generator
#
# Copyright (c) 2014 Steve Simenic <orffen@orffenspace.com>
#
# This file is part of the SWN Toolbox.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import json
import random
import sys
class Architecture:
"""
This class generates an architecture element from
tables/architecture.json, which can be accessed through
the "element" attribute.
"""
def __init__(self):
with open("tables/architecture.json", "r") as file:
architecture = json.load(file)
self.element = str(random.choice(architecture["element"]))
def __str__(self):
return self.element
if __name__ == "__main__":
try:
times = int(sys.argv[1])
except:
times = 1
for i in range(times):
if i != 0:
print("-----------+-+-+-----------")
print(Architecture())
|
mosodemus/swn
|
architecture.py
|
Python
|
mit
| 1,964
|
# -*- coding: utf-8 -*-
"""Chat robot based on natural language understanding and machine learning."""
__name__ = 'chat'
__verison__ = '1.0.7'
__author__ = 'Decalogue'
__author_email__ = '1044908508@qq.com'
|
Decalogue/chat
|
chat/__init__.py
|
Python
|
mit
| 209
|
# -*- coding: utf-8 -*-
"""
chemdataextractor.parse.table
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import re
from lxml.builder import E
from .common import delim
from ..utils import first
from ..model import Compound, UvvisSpectrum, UvvisPeak, QuantumYield, FluorescenceLifetime, MeltingPoint, GlassTransition
from ..model import ElectrochemicalPotential, IrSpectrum, IrPeak
from .actions import join, merge, fix_whitespace
from .base import BaseParser
from .cem import chemical_label, label_before_name, chemical_name, chemical_label_phrase, solvent_name, lenient_chemical_label
from .elements import R, I, W, Optional, ZeroOrMore, Any, OneOrMore, Start, End, Group, Not
log = logging.getLogger(__name__)
delims = ZeroOrMore(delim)
minus = R('^[\-–−‒]$')
name_blacklist = R('^([\d\.]+)$')
#: Compound identifier column heading
compound_heading = R('(^|\b)(comp((oun)?d)?|molecule|ligand|oligomer|complex|dye|porphyrin|substance|sample|material|catalyst|acronym|isomer|(co)?polymer|chromophore|species|quinone|ether|diene|adduct|acid|radical|monomer|amine|analyte|product|system|(photo)?sensitiser|phthalocyanine|MPc)(e?s)?($|\b)', re.I)
solvent_heading = R('(^|\b)(solvent)s?($|\b)', re.I)
solvent_in_heading = Group(solvent_name)('cem')
solvent_cell = Group(solvent_name | chemical_name)('cem')
compound_cell = Group(
(Start() + chemical_label + End())('cem') |
(Start() + lenient_chemical_label + End())('cem') |
chemical_label_phrase('cem') |
(Not(Start() + OneOrMore(name_blacklist) + End()) + OneOrMore(Any())('name').add_action(join).add_action(fix_whitespace) + Optional(W('(').hide() + chemical_label + W(')').hide()))('cem') |
label_before_name
)('cem_phrase')
uvvis_emi_title = (
I('emission') + R('max(ima)?') |
W('λ') + Optional(I('max')) + Optional(W(',')) + R('em(i(ssion)?)?', re.I) |
R('em(i(ssion)?)?', re.I) + W('λ') + Optional(I('max')) + Optional(W(','))
)
uvvis_abs_title = (
I('absorption') + R('max(ima)?') |
W('λ') + OneOrMore(R('^(a|sol)?max$', re.I) | R('abs(or[bp]tion)?', re.I) | I('a') | W(',')) |
R('uv([-/]?vis)?', re.I)
)
extinction_title = Optional(R('^10\d$') | W('10') + minus + R('^\d$')).hide() + W('ε') + Optional(I('max'))
uvvis_units = (W('nm') | R('^eV[\-–−‒]1$') | W('eV') + minus + W('1'))('uvvis_units').add_action(merge)
multiplier = Optional(I('×')) + (R('^10–?[34]$') | (W('10') + minus + R('^[345]$')))
extinction_units = (
(Optional(multiplier + delims) + (
I('M') + minus + I('1') + I('cm') + minus + I('1') |
I('M') + minus + I('1') + I('cm') + minus + I('1') |
I('dm3') + I('mol') + minus + I('1') + I('cm') + minus + I('1') |
I('l') + I('mol') + minus + I('1') + I('cm') + minus + I('1') |
I('l') + I('cm') + minus + I('1') + I('mol') + minus + I('1')
)) | multiplier
)('extinction_units').add_action(join)
ir_title = (
R('^(FT-?)?IR$') + Optional(I('absorption'))
)
ir_units = Optional(W('/')).hide() + (
R('^\[?cm[-–−]1\]?$') |
W('cm') + R('^[-–−]$') + W('1')
)('ir_units').add_action(merge)
ir_heading = (OneOrMore(ir_title.hide()) + ZeroOrMore(delims.hide() + ir_units))('ir_heading')
ir_value = (R('^\d{3,5}(\.\d{1,2})?$'))('value')
peak_strength = R('^(sh(oulder)?|br(oad)?)$')('strength')
ir_peak = (
ir_value + Optional(W('(').hide()) + Optional(peak_strength) + Optional(W(')').hide())
)('ir_peak')
ir_cell = (
ir_peak + ZeroOrMore(W(',').hide() + ir_peak)
)('ir_cell')
# TODO: (photoluminescence|fluorescence) quantum yield
quantum_yield_title = (R('^(Φ|ϕ)(fl?|pl|ze|t|l|lum)?$', re.I) + Optional(R('^(fl?|pl|ze|t|l|lum)$', re.I)))('quantum_yield_type').add_action(merge) # + ZeroOrMore(Any())
quantum_yield_units = W('%')('quantum_yield_units')
quantum_yield_heading = Group(Start() + quantum_yield_title + delims.hide() + Optional(quantum_yield_units) + delims.hide() + End())('quantum_yield_heading')
quantum_yield_value = (Optional(R('^[~∼\<\>]$')) + ((W('10') + minus + R('^\d$')) | R('^(100(\.0+)?|\d\d?(\.\d+)?)$')) + Optional(W('±') + R('^\d+(\.\d+)?$')))('quantum_yield_value').add_action(merge)
quantum_yield_cell = (quantum_yield_value + Optional(quantum_yield_units))('quantum_yield_cell')
def split_uvvis_shape(tokens, start, result):
""""""
if result[0].text.endswith('sh') or result[0].text.endswith('br'):
result.append(E('shape', result[0].text[-2:]))
result[0].text = result[0].text[:-2]
uvvis_emi_heading = (OneOrMore(uvvis_emi_title.hide()))('uvvis_emi_heading')
uvvis_abs_heading = (OneOrMore(uvvis_abs_title.hide()) + ZeroOrMore(delims.hide() + (uvvis_units | extinction_title.hide() | extinction_units)))('uvvis_abs_heading')
uvvis_abs_disallowed = I('emission')
extinction_heading = (extinction_title.hide() + delims.hide() + Optional(extinction_units))('extinction_heading')
uvvis_value = (R('^\d{3,4}(\.\d{1,2})?(sh|br)?$'))('value').add_action(split_uvvis_shape)
peak_shape = R('^(sh(oulder)?|br(oad)?)$')('shape')
extinction_value = (
R('^\d+\.\d+$') + Optional(W('±') + R('^\d+\.\d+$')) + Optional(W('×') + R('10\d+')) | # Scientific notation
R('^\d{1,3}$') + R('^\d\d\d$') | # RSC often inserts spaces within values instead of commas
R('^\d{1,2},?\d{3,3}$')
)('extinction').add_action(merge)
uvvis_abs_emi_quantum_yield_heading = (
OneOrMore(uvvis_abs_title.hide()) +
Optional(Optional(delims.hide()) + uvvis_units('uvvis_abs_units') + Optional(delims.hide())) +
OneOrMore(uvvis_emi_title.hide()) +
Optional(Optional(delims.hide()) + uvvis_units + Optional(delims.hide())) +
Optional(delims.hide()) + quantum_yield_title.hide() + Optional(delims.hide()) +
Optional(Optional(delims.hide()) + quantum_yield_units + Optional(delims.hide()))
)('uvvis_emi_quantum_yield_heading')
uvvis_abs_emi_quantum_yield_cell = (
uvvis_value('uvvis_abs_value') + delims.hide() + uvvis_value + delims.hide() + quantum_yield_value + Optional(quantum_yield_units)
)('uvvis_emi_quantum_yield_cell')
uvvis_emi_quantum_yield_heading = (
OneOrMore(uvvis_emi_title.hide()) +
Optional(Optional(delims.hide()) + uvvis_units + Optional(delims.hide())) +
Optional(delims.hide()) + quantum_yield_title.hide() + Optional(delims.hide()) +
Optional(Optional(delims.hide()) + quantum_yield_units + Optional(delims.hide()))
)('uvvis_emi_quantum_yield_heading')
uvvis_emi_quantum_yield_cell = (
uvvis_value + delims.hide() + quantum_yield_value + Optional(quantum_yield_units)
)('uvvis_emi_quantum_yield_cell')
uvvis_abs_peak = (
uvvis_value + Optional(peak_shape) + Optional(W('(').hide() + extinction_value + W(')').hide())
)('uvvis_abs_peak')
uvvis_abs_cell = (
uvvis_abs_peak + ZeroOrMore(W(',').hide() + uvvis_abs_peak)
)('uvvis_abs_cell')
extinction_cell = (
extinction_value + ZeroOrMore(W(',').hide() + extinction_value)
)('uvvis_abs_cell')
uvvis_emi_peak = (
uvvis_value + Optional(peak_shape)
)('uvvis_emi_peak')
uvvis_emi_cell = (
uvvis_emi_peak + ZeroOrMore(W(',').hide() + uvvis_emi_peak)
)('uvvis_emi_cell')
fluorescence_lifetime_title = W('τ') + R('^(e|f|ave|avg|0)$', re.I)
fluorescence_lifetime_units = (W('ns') | W('μ') + W('s'))('fluorescence_lifetime_units').add_action(merge)
fluorescence_lifetime_heading = (fluorescence_lifetime_title.hide() + delims.hide() + Optional(fluorescence_lifetime_units))('fluorescence_lifetime_heading')
fluorescence_lifetime_value = (Optional(R('^[~∼\<\>]$')) + R('^\d+(\.\d+)?$'))('fluorescence_lifetime_value').add_action(merge)
fluorescence_lifetime_cell = (
fluorescence_lifetime_value + ZeroOrMore(W(',').hide() + fluorescence_lifetime_value)
)('fluorescence_lifetime_cell')
electrochemical_potential_title = ((R('^E(ox|red)1?$', re.I) | W('E') + R('^(ox|red)1?$')) + Optional(W('/') + W('2')))('electrochemical_potential_type').add_action(merge)
electrochemical_potential_units = (W('V'))('electrochemical_potential_units').add_action(merge)
electrochemical_potential_heading = (electrochemical_potential_title + delims.hide() + Optional(electrochemical_potential_units))('electrochemical_potential_heading')
electrochemical_potential_value = (Optional(R('^[~∼\<\>]$')) + Optional(minus) + R('^\d+(\.\d+)?$'))('electrochemical_potential_value').add_action(merge)
electrochemical_potential_cell = (
electrochemical_potential_value + ZeroOrMore(delims.hide() + electrochemical_potential_value)
)('electrochemical_potential_cell')
subject_phrase = ((I('of') | I('for')) + chemical_name)('subject_phrase')
solvent_phrase = (I('in') + (solvent_name | chemical_name))('solvent_phrase')
temp_range = (Optional(R('^[\-–−]$')) + (R('^[\+\-–−]?\d+(\.\d+)?[\-–−]\d+(\.\d+)?$') | (R('^[\+\-–−]?\d+(\.\d+)?$') + R('^[\-–−]$') + R('^[\+\-–−]?\d+(\.\d+)?$'))))('temperature').add_action(merge)
temp_value = (Optional(R('^[\-–−]$')) + R('^[\+\-–−]?\d+(\.\d+)?$') + Optional(W('±') + R('^\d+(\.\d+)?$')))('temperature').add_action(merge)
temp_word = (I('room') + R('^temp(erature)?$') | R('^r\.?t\.?$', re.I))('temperature').add_action(merge)
temp = (temp_range | temp_value | temp_word)('value')
temp_units = (W('°') + R('[CFK]') | W('K'))('units').add_action(merge)
temp_with_units = (temp + temp_units)('temp')
temp_with_optional_units = (temp + Optional(temp_units))('temp')
temp_phrase = (I('at') + temp_with_units)('temp_phrase')
melting_point_title = R('^T(melt|m\.p|m)$', re.I) | W('T') + R('^(melt|m\.p|m)?$')
melting_point_heading = (melting_point_title.hide() + delims.hide() + Optional(temp_units))('melting_point_heading')
melting_point_cell = (
temp_with_optional_units + ZeroOrMore(delims.hide() + temp_with_optional_units)
)('melting_point_cell')
glass_transition_title = R('^T(g\.)$', re.I) | W('T') + R('^(g\.)?$')
glass_transition_heading = (glass_transition_title.hide() + delims.hide() + Optional(temp_units))('glass_transition_heading')
glass_transition_cell = (
temp_with_optional_units + ZeroOrMore(delims.hide() + temp_with_optional_units)
)('glass_transition_cell')
caption_context = Group(subject_phrase | solvent_phrase | temp_phrase)('caption_context')
class CompoundHeadingParser(BaseParser):
""""""
root = compound_heading
def interpret(self, result, start, end):
""""""
yield Compound()
class SolventHeadingParser(BaseParser):
""""""
root = solvent_heading
def interpret(self, result, start, end):
""""""
yield Compound()
class UvvisAbsDisallowedHeadingParser(BaseParser):
""""""
root = uvvis_abs_disallowed
def interpret(self, result, start, end):
""""""
yield Compound()
class SolventInHeadingParser(BaseParser):
""""""
root = solvent_in_heading
def interpret(self, result, start, end):
""""""
c = Compound()
solvent = first(result.xpath('./name/text()'))
if solvent is not None:
context = {'solvent': solvent}
c.melting_points = [MeltingPoint(**context)]
c.glass_transitions = [GlassTransition(**context)]
c.quantum_yields = [QuantumYield(**context)]
c.fluorescence_lifetimes = [FluorescenceLifetime(**context)]
c.electrochemical_potentials = [ElectrochemicalPotential(**context)]
c.uvvis_spectra = [UvvisSpectrum(**context)]
if c.serialize():
yield c
class TempInHeadingParser(BaseParser):
""""""
root = temp_with_units
def interpret(self, result, start, end):
""""""
c = Compound()
context = {
'temperature': first(result.xpath('./value/text()')),
'temperature_units': first(result.xpath('./units/text()'))
}
c.quantum_yields = [QuantumYield(**context)]
c.fluorescence_lifetimes = [FluorescenceLifetime(**context)]
c.electrochemical_potentials = [ElectrochemicalPotential(**context)]
c.uvvis_spectra = [UvvisSpectrum(**context)]
yield c
class SolventCellParser(BaseParser):
""""""
root = solvent_cell
def interpret(self, result, start, end):
""""""
c = Compound()
solvent = first(result.xpath('./name/text()'))
if solvent is not None:
context = {'solvent': solvent}
c.melting_points = [MeltingPoint(**context)]
c.glass_transitions = [GlassTransition(**context)]
c.quantum_yields = [QuantumYield(**context)]
c.fluorescence_lifetimes = [FluorescenceLifetime(**context)]
c.electrochemical_potentials = [ElectrochemicalPotential(**context)]
c.uvvis_spectra = [UvvisSpectrum(**context)]
if c.serialize():
yield c
class CompoundCellParser(BaseParser):
""""""
root = compound_cell
def interpret(self, result, start, end):
for cem_el in result.xpath('./cem'):
c = Compound(
names=cem_el.xpath('./name/text()'),
labels=cem_el.xpath('./label/text()')
)
yield c
class UvvisEmiHeadingParser(BaseParser):
""""""
root = uvvis_emi_heading
def interpret(self, result, start, end):
""""""
uvvis_units = first(result.xpath('./uvvis_units/text()'))
c = Compound()
# TODO: Emission peaks
yield c
class UvvisAbsHeadingParser(BaseParser):
""""""
root = uvvis_abs_heading
def interpret(self, result, start, end):
""""""
uvvis_units = first(result.xpath('./uvvis_units/text()'))
extinction_units = first(result.xpath('./extinction_units/text()'))
c = Compound()
if uvvis_units or extinction_units:
c.uvvis_spectra.append(
UvvisSpectrum(peaks=[UvvisPeak(units=uvvis_units, extinction_units=extinction_units)])
)
yield c
class ExtinctionHeadingParser(BaseParser):
""""""
root = extinction_heading
def interpret(self, result, start, end):
""""""
extinction_units = first(result.xpath('./extinction_units/text()'))
c = Compound()
if extinction_units:
c.uvvis_spectra.append(
UvvisSpectrum(peaks=[UvvisPeak(extinction_units=extinction_units)])
)
yield c
class IrHeadingParser(BaseParser):
""""""
root = ir_heading
def interpret(self, result, start, end):
""""""
ir_units = first(result.xpath('./ir_units/text()'))
c = Compound()
if ir_units:
c.ir_spectra.append(
IrSpectrum(peaks=[IrPeak(units=ir_units)])
)
yield c
class IrCellParser(BaseParser):
""""""
root = ir_cell
def interpret(self, result, start, end):
""""""
c = Compound()
ir = IrSpectrum()
for peak in result.xpath('./ir_peak'):
ir.peaks.append(
IrPeak(
value=first(peak.xpath('./value/text()')),
strength=first(peak.xpath('./strength/text()'))
)
)
if ir.peaks:
c.ir_spectra.append(ir)
yield c
class QuantumYieldHeadingParser(BaseParser):
""""""
root = quantum_yield_heading
def interpret(self, result, start, end):
""""""
c = Compound(
quantum_yields=[
QuantumYield(
type=first(result.xpath('./quantum_yield_type/text()')),
units=first(result.xpath('./quantum_yield_units/text()'))
)
]
)
yield c
class QuantumYieldCellParser(BaseParser):
""""""
root = quantum_yield_cell
def interpret(self, result, start, end):
""""""
c = Compound()
qy = QuantumYield(
value=first(result.xpath('./quantum_yield_value/text()')),
units=first(result.xpath('./quantum_yield_units/text()'))
)
if qy.value:
c.quantum_yields.append(qy)
yield c
class UvvisEmiCellParser(BaseParser):
""""""
root = uvvis_emi_cell
def interpret(self, result, start, end):
""""""
# TODO: Emission peaks
return
yield
class UvvisAbsCellParser(BaseParser):
""""""
root = uvvis_abs_cell
def interpret(self, result, start, end):
""""""
c = Compound()
uvvis = UvvisSpectrum()
for peak in result.xpath('./uvvis_abs_peak'):
uvvis.peaks.append(
UvvisPeak(
value=first(peak.xpath('./value/text()')),
extinction=first(peak.xpath('./extinction/text()')),
shape=first(peak.xpath('./shape/text()'))
)
)
if uvvis.peaks:
c.uvvis_spectra.append(uvvis)
yield c
class ExtinctionCellParser(BaseParser):
""""""
root = extinction_cell
def interpret(self, result, start, end):
""""""
c = Compound()
uvvis = UvvisSpectrum()
for value in result.xpath('./extinction/text()'):
uvvis.peaks.append(
UvvisPeak(
extinction=value,
)
)
if uvvis.peaks:
c.uvvis_spectra.append(uvvis)
yield c
class UvvisAbsEmiQuantumYieldHeadingParser(BaseParser):
""""""
root = uvvis_abs_emi_quantum_yield_heading
def interpret(self, result, start, end):
""""""
c = Compound()
abs_units = first(result.xpath('./uvvis_abs_units/text()'))
if abs_units:
c.uvvis_spectra.append(
UvvisSpectrum(peaks=[UvvisPeak(units=abs_units)])
)
qy_units = first(result.xpath('./quantum_yield_units/text()'))
if qy_units:
c.quantum_yields.append(
QuantumYield(units=qy_units)
)
yield c
class UvvisAbsEmiQuantumYieldCellParser(BaseParser):
""""""
root = uvvis_abs_emi_quantum_yield_cell
def interpret(self, result, start, end):
""""""
c = Compound()
uvvis = UvvisSpectrum()
for value in result.xpath('./uvvis_abs_value/text()'):
uvvis.peaks.append(
UvvisPeak(
value=value,
)
)
if uvvis.peaks:
c.uvvis_spectra.append(uvvis)
qy = QuantumYield(
value=first(result.xpath('./quantum_yield_value/text()'))
)
if qy.value:
c.quantum_yields.append(qy)
if c.quantum_yields or c.uvvis_spectra:
yield c
class UvvisEmiQuantumYieldHeadingParser(BaseParser):
""""""
root = uvvis_emi_quantum_yield_heading
def interpret(self, result, start, end):
""""""
# Yield an empty compound to signal that the Parser matched
yield Compound()
class UvvisEmiQuantumYieldCellParser(BaseParser):
""""""
root = uvvis_emi_quantum_yield_cell
def interpret(self, result, start, end):
""""""
c = Compound()
qy = QuantumYield(
value=first(result.xpath('./quantum_yield_value/text()'))
)
if qy.value:
c.quantum_yields.append(qy)
yield c
class FluorescenceLifetimeHeadingParser(BaseParser):
""""""
root = fluorescence_lifetime_heading
def interpret(self, result, start, end):
""""""
fluorescence_lifetime_units = first(result.xpath('./fluorescence_lifetime_units/text()'))
c = Compound()
if fluorescence_lifetime_units:
c.fluorescence_lifetimes.append(
FluorescenceLifetime(units=fluorescence_lifetime_units)
)
yield c
class FluorescenceLifetimeCellParser(BaseParser):
""""""
root = fluorescence_lifetime_cell
def interpret(self, result, start, end):
""""""
c = Compound()
fl = FluorescenceLifetime(
value=first(result.xpath('./fluorescence_lifetime_value/text()'))
)
if fl.value:
c.fluorescence_lifetimes.append(fl)
yield c
class MeltingPointHeadingParser(BaseParser):
""""""
root = melting_point_heading
def interpret(self, result, start, end):
""""""
melting_point_units = first(result.xpath('./units/text()'))
c = Compound()
if melting_point_units:
c.melting_points.append(
MeltingPoint(units=melting_point_units)
)
yield c
class MeltingPointCellParser(BaseParser):
""""""
root = melting_point_cell
def interpret(self, result, start, end):
""""""
c = Compound()
for mp in result.xpath('./temp'):
c.melting_points.append(
MeltingPoint(
value=first(mp.xpath('./value/text()')),
units=first(mp.xpath('./units/text()'))
)
)
if c.melting_points:
yield c
class GlassTransitionHeadingParser(BaseParser):
""""""
root = glass_transition_heading
def interpret(self, result, start, end):
""""""
glass_transition_units = first(result.xpath('./units/text()'))
c = Compound()
if glass_transition_units:
c.glass_transitions.append(
GlassTransition(units=glass_transition_units)
)
yield c
class GlassTransitionCellParser(BaseParser):
""""""
root = glass_transition_cell
def interpret(self, result, start, end):
""""""
c = Compound()
for tg in result.xpath('./temp'):
c.glass_transitions.append(
GlassTransition(
value=first(mp.xpath('./value/text()')),
units=first(mp.xpath('./units/text()'))
)
)
if c.glass_transition:
yield c
class ElectrochemicalPotentialHeadingParser(BaseParser):
""""""
root = electrochemical_potential_heading
def interpret(self, result, start, end):
""""""
c = Compound(
electrochemical_potentials=[
ElectrochemicalPotential(
type=first(result.xpath('./electrochemical_potential_type/text()')),
units=first(result.xpath('./electrochemical_potential_units/text()'))
)
]
)
yield c
class ElectrochemicalPotentialCellParser(BaseParser):
""""""
root = electrochemical_potential_cell
def interpret(self, result, start, end):
""""""
c = Compound()
for value in result.xpath('./electrochemical_potential_value/text()'):
c.electrochemical_potentials.append(
ElectrochemicalPotential(
value=value
)
)
if c.electrochemical_potentials:
yield c
class CaptionContextParser(BaseParser):
""""""
root = caption_context
def __init__(self):
pass
def interpret(self, result, start, end):
name = first(result.xpath('./subject_phrase/name/text()'))
c = Compound(names=[name]) if name else Compound()
context = {}
# print(etree.tostring(result[0]))
solvent = first(result.xpath('./solvent_phrase/name/text()'))
if solvent is not None:
context['solvent'] = solvent
# Melting point shouldn't have contextual temperature
if context:
c.melting_points = [MeltingPoint(**context)]
temp = first(result.xpath('./temp_phrase'))
if temp is not None:
context['temperature'] = first(temp.xpath('./temp/value/text()'))
context['temperature_units'] = first(temp.xpath('./temp/units/text()'))
# Glass transition temperature shouldn't have contextual temperature
if context:
c.glass_transitions = [GlassTransition(**context)]
temp = first(result.xpath('./temp_phrase'))
if temp is not None:
context['temperature'] = first(temp.xpath('./temp/value/text()'))
context['temperature_units'] = first(temp.xpath('./temp/units/text()'))
if context:
c.quantum_yields = [QuantumYield(**context)]
c.fluorescence_lifetimes = [FluorescenceLifetime(**context)]
c.electrochemical_potentials = [ElectrochemicalPotential(**context)]
c.uvvis_spectra = [UvvisSpectrum(**context)]
if c.serialize():
# print(c.to_primitive())
yield c
|
mcs07/ChemDataExtractor
|
chemdataextractor/parse/table.py
|
Python
|
mit
| 24,740
|
#!/usr/bin/python
from __future__ import division
from __future__ import with_statement
import math
import numpy
import os
import toolbox_basic
import xml.etree.ElementTree as xmlTree
class Output:
def __init__(self, path=None, root_name='idynomics'):
if path == None or not os.path.isfile(path):
self.path = path
self.root = xmlTree.Element(root_name)
self.tree = xmlTree.ElementTree(self.root)
simulation = xmlTree.SubElement(self.root, 'simulation')
simulation.set('iterate', '0')
simulation.set('time', '0.0')
simulation.set('unit', 'h')
else:
self.path = toolbox_basic.check_path(path)
self.tree = toolbox_basic.get_xml_tree(self.path)
self.root = self.tree.getroot()
self.simulation = self.find('./simulation')
self.iterate = int(self.simulation.attrib['iterate'])
self.time = float(self.simulation.attrib['time'])
self.time_unit = self.simulation.attrib['unit']
def set_iterate(self, iterate):
self.simulation.attrib['iterate'] = iterate
self.iterate = iterate
def set_time(self, time):
self.simulation.attrib['time'] = time
self.time = time
def write(self, output_path=None):
if output_path == None: output_path = self.path
else: self.path = output_path
with open(output_path, 'w') as f:
self.tree.write(f, encoding='utf-8', xml_declaration=True)
def find(self, search_pattern):
out = self.tree.find(search_pattern)
#if out == None:
# print('No results searching for '+search_pattern)
return out
def findall(self, search_pattern):
return self.tree.findall(search_pattern)
def display(self):
xmlTree.dump(self.tree)
class AgentOutput(Output):
def __init__(self, path=None, root_name='idynomics'):
Output.__init__(self, path=path, root_name=root_name)
if path == None:
grid = xmlTree.SubElement(self.simulation, 'grid')
grid.set('resolution', '0.0')
grid.set('nI','0')
grid.set('nJ','0')
grid.set('nK','0')
grid = self.find('./simulation/grid')
self.grid_res = float(grid.attrib['resolution'])
self.grid_nI = int(grid.attrib['nI'])
self.grid_nJ = int(grid.attrib['nJ'])
self.grid_nK = int(grid.attrib['nK'])
self.three_dim = (self.grid_nK > 1)
self.species = self.findall('./simulation/species')
self.species_outputs = self.get_species_outputs()
def check_single_species(self):
if len(self.species) > 1:
toolbox_basic.error_message('More than one species in:',self.path)
return False
elif len(self.species) < 1:
toolbox_basic.error_message('No species present in:',self.path)
return False
else:
return True
def get_species_names(self):
names = []
for species in self.species:
names.append(species.attrib['name'])
return names
def get_species_outputs(self):
spec_list = []
for spec_name in self.get_species_names():
spec_list.append(SpeciesOutput(self, name=spec_name))
return spec_list
def get_species_by_name(self, name):
for species in self.get_species_outputs():
if name == species.name:
return species
toolbox_basic.error_message('Species %s cannot be found in'%(name), self.path)
def get_all_cells(self):
cell_list = []
for spec in self.species_outputs:
cell_list += spec.members
return cell_list
def add_species(self, data, header, name):
species = xmlTree.SubElement(self.simulation, 'species')
species.set('name', name)
species.set('header', header)
species.text = data
self.species = self.findall('./simulation/species')
def calc_total_attibute(self, attribute):
total = 0.0
for species in self.species_outputs:
total += species.calc_total_attibute(attribute)
return total
class SpeciesOutput:
def __init__(self, agent_output, name=''):
if not (name == ''):
search_pattern = './simulation/species[@name="'+name+'"]'
elif agent_output.check_single_species():
search_pattern = './simulation/species'
else:
toolbox_basic.error_message('Please define which species to use in',
agent_output.path)
self.agent_output = agent_output
species = self.agent_output.find(search_pattern)
self.name = species.attrib['name']
self.attributes = species.attrib['header'].split(',')
self.biomass_names = []
self.members = []
for line in species.text.translate(None,'\n').split(';'):
if line == '': break
variables = line.split(',')
cell = CellOutput(self)
for i, attribute in enumerate(self.attributes):
cell.vars[attribute] = variables[i]
self.members.append(cell)
def calc_mean_specific_growth_rate(self):
rates = self.get_specific_growth_rates()
mean = numpy.mean(rates)
std = numpy.std(rates)
return mean, std
def calc_total_specific_growth_rate(self):
rates = self.get_specific_growth_rates()
return sum(rates)
def calc_total_attibute(self, attribute):
if attribute == 'specific growth rate':
return self.calc_total_specific_growth_rate()
if self.attributes.count(attribute) < 1:
toolbox_basic.error_message('Species '+self.name,
'does not have attribute '+attribute)
exit()
return sum(self.get_attribute_values(attribute))
def calc_mean_attribute(self, attribute):
if attribute == 'specific growth rate':
return self.calc_mean_specific_growth_rate()
if self.attributes.count(attribute) < 1:
toolbox_basic.error_message('Species '+self.name,
'does not have attribute '+attribute)
exit()
values = self.get_attribute_values(attribute)
mean = numpy.mean(values)
std = numpy.std(values)
return mean, std
def change_header(self, new_header):
search_pattern = './simulation/species[@name='+self.name+']'
self.agent_output.species[0].attrib['header'] = new_header
self.attributes = new_header.split(',')
def get_header(self):
return self.agent_output.species[0].attrib['header']
def find_attribute_position(self, atttribute):
position = -1
for i, x in enumerate(self.header.split(',')):
if str(x) == str(attribute):
position = i
break
if position < 0:
msg = 'Could not find attribute "'+attribute
msg += '" for species "'+self.name+'" in '
toolbox_basic.error_message(msg, path)
return position
def find_cells(self, requirements):
possibles = self.members
for attribute in requirements.keys():
requirement = str(requirements[attribute])
possibles = [c for c in possibles if
(str(c.vars[attribute]) == requirement)]
return possibles
def get_specific_growth_rates(self):
rates = []
for cell in self.members:
rates.append(cell.get_specific_growth_rate(self.biomass_names))
return rates
def get_attribute_values(self, attribute):
if attribute == 'specific growth rate':
return self.get_specific_growth_rates()
values = []
for cell in self.members:
values.append(float(cell.vars[attribute]))
return values
def set_biomass_names(self, biomass_names):
self.biomass_names = biomass_names
def update_agent_output(self):
data_script = '\n'
for cell in self.members:
for attribute in self.attributes[:-1]:
data_script += str(cell.vars[attribute])+','
data_script += str(cell.vars[self.attributes[-1]])+';\n'
search_pattern = './simulation/species[@name='+self.name+']'
self.agent_output.species[0].text = data_script
def population(self):
return len(self.members)
class CellOutput:
def __init__(self, species):
self.species = species.name
self.vars = {}
# color should be in RGB, values between 0 and 1: (r, g, b)
self.color = None
def get_location(self):
x = float(self.vars['locationX'])
y = float(self.vars['locationY'])
if 'locationZ' in self.vars.keys():
z = float(self.vars['locationZ'])
else:
z = 0.0
return (x, y, z)
def get_radius(self, total_radius=True):
if total_radius:
return float(self.vars['totalRadius'])
else:
return float(self.vars['radius'])
def get_specific_growth_rate(self, biomass_names):
growth_rate = float(self.vars['growthRate'])
biomass = self.get_total_biomass(biomass_names)
return growth_rate/biomass
def get_total_biomass(self, biomass_names):
biomass = 0.0
for bname in biomass_names:
biomass += float(self.vars[bname])
return biomass
def calc_sphere_volume(self, total_radius=True):
#if total_radius: r = float(self.vars['totalRadius'])
#else: r = self.vars['radius']
r = self.get_radius(total_radius=total_radius)
return (4/3) * math.pi * (r**3)
def calc_circle_area(self, total_radius=True):
#if total_radius: r = self.vars['totalRadius']
#else: r = self.vars['radius']
r = self.get_radius(total_radius=total_radius)
return math.pi * (r**2)
class EnvOutput(Output):
def __init__(self, path, root_name='idynomics'):
Output.__init__(self, path, root_name=root_name)
# If the simulation is a biofilm one, there will be a thickness element
thickness = self.find('./simulation/thickness')
if thickness == None:
self.biofilm = False
else:
self.biofilm = True
self.thickness_mean = float(thickness.find('mean').text)
self.thickness_stddev = float(thickness.find('stddev').text)
self.thickness_max = float(thickness.find('max').text)
self.solutes = self.findall('./simulation/solute')
def get_solute(self, solute_name):
for solute in self.solutes:
if solute.attrib['name'] == solute_name:
return solute
toolbox_basic.error_message('Could not find solute '+solute_name,
'in '+self.path)
def get_solute_names(self):
names = []
for solute in self.solutes:
names.append(solute.attrib['name'])
return names
class SoluteOutput:
def __init__(self, env_output, name=''):
search_pattern = './simulation/'
if not env_output.find('./simulation/bulk') == None:
search_pattern += 'bulk/'
if not (name == ''):
search_pattern += 'solute[@name="'+name+'"]'
else:
toolbox_basic.error_message('Please define which solute to use in',
env_output.path)
self.env_output = env_output
solute = env_output.find(search_pattern)
if solute == None:
toolbox_basic.error_message('Trouble finding solute from name:',
search_pattern)
self.name = solute.attrib['name']
self.unit = solute.attrib['unit']
self.grid_res = float(solute.attrib['resolution'])
self.grid_nI = int(solute.attrib['nI'])
self.grid_nJ = int(solute.attrib['nJ'])
self.grid_nK = int(solute.attrib['nK'])
self.three_dim = (self.grid_nK > 1)
temp = solute.text.translate(None,' ').split(';\n')
self.values = []
for value in temp:
if value == '' or value == '\n': continue
self.values.append(float(value))
def get_concentration(self):
return self.values[0]
def concentration_array(self):
self.array = numpy.array(self.values)
if self.three_dim:
# Older versions of iDynoMiCS included padding in the env_State
if self.array.shape[0] == self.grid_nI*self.grid_nJ*self.grid_nK:
new_shape = (self.grid_nI, self.grid_nJ, self.grid_nK)
self.array = self.array.reshape(new_shape)
else:
new_shape = (self.grid_nI+2, self.grid_nJ+2, self.grid_nK+2)
self.array = self.array.reshape(new_shape)
self.array = self.array[1:-1, 1:-1, 1:-1]
else:
# Older versions of iDynoMiCS included padding in the env_State
if self.array.shape[0] == self.grid_nI*self.grid_nJ:
new_shape = (self.grid_nI, self.grid_nJ)
self.array = self.array.reshape(new_shape)
else:
new_shape = (self.grid_nI+2, self.grid_nJ+2)
self.array = self.array.reshape(new_shape)
self.array = self.array[1:-1, 1:-1]
return self.array
class ResultsOutput(Output):
def __init__(self, path=None, root_name='idynomics'):
Output.__init__(self, path=path, root_name=root_name)
if self.get_results() == None:
results = xmlTree.SubElement(self.simulation, 'results')
def add_result(self, attributes, data):
results = self.get_results()
new_result = xmlTree.SubElement(results, 'result')
for attribute in attributes.keys():
new_result.set(attribute, attributes[attribute])
new_result.text = data
return new_result
def get_result(self, attributes, ignore_header=False):
attrib = attributes.copy()
if ignore_header:
attrib.pop('header')
for result in self.get_results():
result_attrib = result.attrib.copy()
if ignore_header:
result_attrib.pop('header')
if toolbox_basic.are_dicts_same(result_attrib, attrib):
return result
return None
def get_results(self):
return self.find('./simulation/results')
class ResultSet:
def __init__(self, results_output, attributes, ignore_header=False):
self.results_output = results_output
self.name = attributes['name']
self.members = []
self.result = results_output.get_result(attributes, ignore_header)
if self.result == None:
self.attributes = attributes['header'].split(',')
self.result = results_output.add_result(attributes, '')
else:
self.attributes = self.result.attrib['header'].split(',')
for line in self.result.text.translate(None,'\n').split(';'):
if line == '': break
variables = line.split(',')
result = SingleResult()
for i, attribute in enumerate(self.attributes):
result.vars[attribute] = variables[i]
self.members.append(result)
def update_results_output(self):
data_script = '\n'
for result in self.members:
for attribute in self.attributes[:-1]:
data_script += str(result.vars[attribute])+','
data_script += str(result.vars[self.attributes[-1]])+';\n'
self.result.text = data_script
self.result.set('name', self.name)
self.result.set('header', ','.join(self.attributes))
def find_single_result(self, attribute_name, attribute_value):
for result in self.members:
if str(result.vars[attribute_name]) == str(attribute_value):
return result
toolbox_basic.error_message('Could not find result with '+str(attribute_name),
'of '+str(attribute_value))
return None
class SingleResult:
def __init__(self):
self.vars = {}
'''def get_single_species(path):
print('toolbox_results.get_single_species(path) is deprecated')
output = AgentOutput(path)
species = SpeciesOutput(output)
return output, species'''
|
roughhawkbit/robs-python-scripts
|
toolbox_results.py
|
Python
|
mit
| 16,594
|
import numpy as np
import unittest
from accuread import ReadART
class TestAccuRead(unittest.TestCase):
def setUp(self):
self.PA = ReadART('demo1',basefolder='accuread/tests/testdata',
scalar=True,iops=True,direct=True,sine=True,radiance=True,
runvarfile='sza.txt')
def test_wl(self):
wl = self.PA.wavelengths
self.assertTrue(np.array_equal(np.array([400,500,600,700]),wl))
def test_depths(self):
z = self.PA.depths
self.assertTrue(np.array_equal(np.array([0,99999,100001]),z))
def test_runvar(self):
rv = self.PA.runvar
self.assertTrue(np.array_equal(np.array([45,60]),rv))
def test_cosdown_size(self):
irrshape = self.PA.downdata.shape
self.assertEqual((3,4,2),irrshape)
def test_sindown_size(self):
irrshape = self.PA.sine_down.shape
self.assertEqual((3,4,2),irrshape)
def test_scldown_size(self):
irrshape = self.PA.scalar_down.shape
self.assertEqual((3,4,2),irrshape)
def test_dirdown_size(self):
irrshape = self.PA.direct_down.shape
self.assertEqual((3,4,2),irrshape)
def test_cosup_size(self):
irrshape = self.PA.updata.shape
self.assertEqual((3,4,2),irrshape)
def test_sinup_size(self):
irrshape = self.PA.sine_up.shape
self.assertEqual((3,4,2),irrshape)
def test_sclup_size(self):
irrshape = self.PA.scalar_up.shape
self.assertEqual((3,4,2),irrshape)
def test_dirup_size(self):
irrshape = self.PA.direct_down.shape
self.assertEqual((3,4,2),irrshape)
def test_rad_size(self):
radshape = self.PA.radiance.shape
self.assertEqual((3,4,10,7,2),radshape)
def test_polarangles(self):
polang = self.PA.polarangles
self.assertTrue(
np.array_equal(
np.array([179.92,160,140,120,100,80,60,40,20,0.081028]),
polang))
def test_azimuthanglesangles(self):
azang = self.PA.azimuthangles
self.assertTrue(
np.array_equal(
np.array([50,70,90,110,130,150,170]),
azang))
if __name__ == '__main__':
unittest.main()
|
TorbjornT/pyAccuRT
|
accuread/tests/test_basic.py
|
Python
|
mit
| 2,225
|
##################
# Projective Set #
# Sam Ettinger #
# April 2014 #
##################
import random, pygame, sys
from pygame.locals import *
if not pygame.font: print 'Warning, fonts disabled'
# Colors
aqua = (0, 255, 255)
bgcol = (230, 230, 230)
black = (0, 0, 0)
blue = (0, 0, 255)
fuschia = (255, 0, 255)
gray = (128, 128, 128)
green = (0, 128, 0)
lime = (0, 255, 0)
orange = (255, 165, 0)
red = (255, 0, 0)
white = (255, 255, 255)
yellow = (255, 255, 50)
# saddlebrown = (139, 69, 19)
# (x,y) coords of top-left corner of cards in various modes
cardposlist = [
[(120, 100), (240, 100), (120, 270), (240, 270)],
[(120, 100), (240, 100), (60, 270), (180, 270), (300, 270)],
[(60, 100), (180, 100), (300, 100), (60, 270), (180, 270), (300, 270)],
[(120, 100), (240, 100), (60, 270), (180, 270), (300, 270), (120, 440), (240, 440)],
[(60, 100), (180, 100), (300, 100), (120, 270), (240, 270), (60, 440), (180, 440), (300, 440)],
[(60, 100), (180, 100), (300, 100), (60, 270), (180, 270), (300, 270), (60, 440), (180, 440), (300, 440)],
[(120, 100), (240, 100), (360, 100), (60, 270), (180, 270), (300, 270), (420, 270), (120, 440), (240, 440), (360, 440)]]
# (x,y) coords of dots' centers in various modes, relative to top-left corner of card
dotposlist = [
[(50, 25), (50, 75), (50, 125)],
[(25, 50), (75, 50), (25, 100), (75, 100)],
[(25, 25), (75, 25), (50, 75), (25, 125), (75, 125)],
[(25, 25), (75, 25), (25, 75), (75, 75), (25, 125), (75, 125)],
[(25, 25), (75, 25), (25, 75), (75, 75), (25, 125), (75, 125), (50,50)],
[(25, 19), (75, 19), (50, 47), (25, 75), (75, 75), (50, 103), (25, 131), (75, 131)],
[(17, 25), (50, 25), (83, 25), (17, 75), (50, 75), (83, 75), (17, 125), (50, 125), (83, 125)]]
# Aaaaand colors of dots 1-n in difficulty level n
dotcollist = [
[red, yellow, blue],
[red, orange, blue, fuschia],
[red, orange, green, blue, fuschia],
[red, orange, yellow, green, blue, fuschia],
[red, orange, yellow, green, blue, fuschia, aqua],
[red, orange, aqua, yellow, green, gray, blue, fuschia],
[red, aqua, orange, yellow, gray, green, blue, lime, fuschia]]
# Dot radii, card size, etc
radius = 16
cardsize = (cardwidth, cardheight) = (100, 150)
# Choose number of bits on card (3-9)
pygame.init()
screen = pygame.display.set_mode((460, 660))
cardpos = cardposlist[3]
screen.fill(bgcol)
for k in range(7):
power = k+3
dotpos = dotposlist[k]
dotcol = dotcollist[k]
pos = (x,y) = (cardpos[k][0], cardpos[k][1])
pygame.draw.rect(screen, bgcol, Rect(pos, cardsize))
pygame.draw.rect(screen, black, Rect((x-1, y-1), (cardwidth+2, cardheight+2)), 1)
for dot in range(power):
pygame.draw.circle(screen, black, (x+dotpos[dot][0],y+dotpos[dot][1]), radius+2)
pygame.draw.circle(screen, dotcol[dot], (x+dotpos[dot][0],y+dotpos[dot][1]), radius)
if pygame.font:
font = pygame.font.Font(None, 24)
font2 = pygame.font.Font(None, 18)
title = font.render("Projective Set", 1, black)
instr = font.render("Choose difficulty:", 1, black)
rules1 = font2.render("RULE: Select a subset of cards such that each color has an even", 1, black)
rules2 = font2.render("number of dots. COMMANDS: Number keys or click to toggle", 1, black)
rules3 = font2.render("selection. Type \'c\' to clear selection. Type \'r\' to reset.", 1, black)
titlep = title.get_rect(centerx=230, y=10)
instrp = instr.get_rect(centerx=230, y=70)
rules1p = rules1.get_rect(centerx=230, y=600)
rules2p = rules2.get_rect(centerx=230, y=618)
rules3p = rules3.get_rect(centerx=230, y=636)
screen.blit(title, titlep)
screen.blit(instr, instrp)
screen.blit(rules1, rules1p)
screen.blit(rules2, rules2p)
screen.blit(rules3, rules3p)
pygame.display.update()
# Loop until power is chosen
power = 0
while power not in range(3,10):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONUP and event.button == 1:
mousepos = event.pos
for k in range(7):
cardrect = Rect(cardpos[k], cardsize)
if cardrect.collidepoint(mousepos): power = k+3
elif event.type == KEYDOWN:
if event.key == K_ESCAPE or event.key == K_q:
pygame.quit()
sys.exit()
if event.key == K_1: power = 3
if event.key == K_2: power = 4
if event.key == K_3: power = 5
if event.key == K_4: power = 6
if event.key == K_5: power = 7
if event.key == K_6: power = 8
if event.key == K_7: power = 9
# Now that power's chosen, initialize things
cardpos = cardposlist[power-3]
dotpos = dotposlist[power-3]
dotcol = dotcollist[power-3]
# Initialize Deck
deck = range(1,2**power)
random.shuffle(deck)
selected = []
inPlay = [0]*(power+1)
if power==9: width = 580
else: width = 460
if power==2: height = 270
elif power<6: height = 440
else: height = 610
size = (width, height)
screen = pygame.display.set_mode(size)
def drawdot(x,y,color):
pygame.draw.circle(screen, black, (x,y), radius+2)
pygame.draw.circle(screen, color, (x,y), radius)
def drawcard(x,y,num,isSelected):
if num != 0:
if isSelected: pygame.draw.rect(screen, gray, Rect((x,y),cardsize))
else: pygame.draw.rect(screen, bgcol, Rect((x,y),cardsize))
pygame.draw.rect(screen, black, Rect((x-1,y-1),(cardwidth+2,cardheight+2)),1)
for k in range(power):
if (2**k & num) == 2**k:
drawdot(x+dotpos[k][0], y+dotpos[k][1], dotcol[k])
def isSet(cardlist):
if len(cardlist)>2: return (reduce(lambda x,y:x^y,cardlist)==0)
else: return False
win = False
sets = 0
clk = pygame.time.Clock()
ms = 0
while 1:
# Check for win
if len(deck)==0 and inPlay == [0]*(power+1):
win = True
# Change text?
# Check if a valid set is selected
if isSet(selected):
for k in range(power+1):
if inPlay[k] in selected: inPlay[k] = 0
sets += 1
selected = []
# Check for selections
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONUP and event.button == 1:
mousepos = event.pos
for k in range(power+1):
cardrect = Rect(cardpos[k], cardsize)
if cardrect.collidepoint(mousepos):
if inPlay[k] in selected: selected.remove(inPlay[k])
else: selected.append(inPlay[k])
elif event.type == KEYDOWN:
if event.key == K_ESCAPE or event.key == K_q:
pygame.quit()
sys.exit()
if event.key == K_r:
deck = range(1,2**power)
random.shuffle(deck)
selected = []
inPlay = [0]*(power+1)
clk.tick()
ms = 0
win = False
sets = 0
if event.key == K_c: selected = []
if event.key == K_1 and power>0:
if inPlay[0] in selected: selected.remove(inPlay[0])
else: selected.append(inPlay[0])
if event.key == K_2 and power>0:
if inPlay[1] in selected: selected.remove(inPlay[1])
else: selected.append(inPlay[1])
if event.key == K_3 and power>1:
if inPlay[2] in selected: selected.remove(inPlay[2])
else: selected.append(inPlay[2])
if event.key == K_4 and power>2:
if inPlay[3] in selected: selected.remove(inPlay[3])
else: selected.append(inPlay[3])
if event.key == K_5 and power>3:
if inPlay[4] in selected: selected.remove(inPlay[4])
else: selected.append(inPlay[4])
if event.key == K_6 and power>4:
if inPlay[5] in selected: selected.remove(inPlay[5])
else: selected.append(inPlay[5])
if event.key == K_7 and power>5:
if inPlay[6] in selected: selected.remove(inPlay[6])
else: selected.append(inPlay[6])
if event.key == K_8 and power>6:
if inPlay[7] in selected: selected.remove(inPlay[7])
else: selected.append(inPlay[7])
if event.key == K_9 and power>7:
if inPlay[8] in selected: selected.remove(inPlay[8])
else: selected.append(inPlay[8])
if event.key == K_0 and power>8:
if inPlay[9] in selected: selected.remove(inPlay[9])
else: selected.append(inPlay[9])
# Deal cards
if 0 in inPlay:
for k in range(power+1):
if inPlay[k]==0 and len(deck)>0: inPlay[k] = deck.pop()
# Update time
if not win: ms = ms + clk.tick()
rawtime = int(ms/1000)
second = rawtime%60
strsec = str(second)
if len(strsec)==1: strsec = '0'+strsec
minute = ((rawtime-second)%3600)/60
strmin = str(minute)
if len(strmin)==1: strmin = '0'+strmin
hour = (rawtime-second-(60*minute))/3600
strhr = str(hour)
# Draw display
screen.fill(bgcol)
if pygame.font:
font = pygame.font.Font(None, 24)
title = font.render("Projective Set", 1, black)
timer = font.render(strhr + ':' + strmin + ':' + strsec, 1, black)
nset = font.render("Sets: " + str(sets), 1, black)
ncard = font.render("Cards left: " + str(len(deck)),1, black)
titlep = title.get_rect(centerx=width/2, y=10)
screen.blit(title, titlep)
timerp = timer.get_rect(centerx=width/2, y=60)
screen.blit(timer, timerp)
nsetp = nset.get_rect(centerx = width/2-90, y=40)
screen.blit(nset, nsetp)
ncardp = ncard.get_rect(centerx=width/2+90, y=40)
screen.blit(ncard, ncardp)
for card in range(power+1):
if inPlay[card] != 0: drawcard(cardpos[card][0], cardpos[card][1], inPlay[card], (inPlay[card] in selected))
pygame.display.update()
|
settinger/proset
|
proset.py
|
Python
|
mit
| 10,193
|
#! /usr/bin/python
# Originally found on http://www.mobileread.com/forums/showthread.php?t=25565
import getopt, sys
from pyPdf import PdfFileWriter, PdfFileReader
def usage ():
print """sjvr767\'s PDF Cropping Script.
Example:
my_pdf_crop.py -s -p 0.5 -i input.pdf -o output.pdf
my_pdf_crop.py --skip --percent 0.5 -input input.pdf -output output.pdf
\n
REQUIRED OPTIONS:
-p\t--percent
The factor by which to crop. Must be positive and less than or equal to 1.
-i\t--input
The path to the file to be cropped.
\n
OPTIONAL:
-s\t--skip
Skip the first page. Ouptut file will not contain the first page of the input file.
-o\t--output
Specify the name and path of the output file. If none specified, the script appends \'cropped\' to the file name.
-m\t--margin
Specify additional absolute cropping, for fine tuning results.
\t-m "left top right bottom"
"""
sys.exit(0)
def cut_length(dictionary, key, factor):
cut_factor = 1-factor
cut = float(dictionary[key])*cut_factor
cut = cut / 4
return cut
def new_coords(dictionary, key, cut, margin, code = "tl"):
if code == "tl":
if key == "x":
return abs(float(dictionary[key])+(cut+margin["l"]))
else:
return abs(float(dictionary[key])-(cut+margin["t"]))
elif code == "tr":
if key == "x":
return abs(float(dictionary[key])-(cut+margin["r"]))
else:
return abs(float(dictionary[key])-(cut+margin["t"]))
elif code == "bl":
if key == "x":
return abs(float(dictionary[key])+(cut+margin["l"]))
else:
return abs(float(dictionary[key])+(cut+margin["b"]))
else:
if key == "x":
return abs(float(dictionary[key])-(cut+margin["r"]))
else:
return abs(float(dictionary[key])+(cut+margin["b"]))
try:
opts, args = getopt.getopt(sys.argv[1:], "sp:i:o:m:", ["skip", "percent=", "input=", "output=", "margin="])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
skipone = 0
for a in opts[:]:
if a[0] == '-s' or a[0]=='--skip':
skipone = 1
factor = 0.8 #default scaling factor
for a in opts[:]:
if a[0] == '-p' or a[0]=='--factor':
if a[1] != None:
try:
factor = float(a[1])
except TypeError:
print "Factor must be a number."
sys.exit(2) #exit if no appropriate input file
input_file = None #no defualt input file
for a in opts[:]:
if a[0] == '-i' or a[0]=='--input':
if a[1] != None:
try:
if a[1][-4:]=='.pdf':
input_file = a[1]
else:
print "Input file must be a PDF."
sys.exit(2) #exit if no appropriate input file
except TypeError:
print "Input file must be a PDF."
sys.exit(2) #exit if no appropriate input file
except IndexError:
print "Input file must be a PDF."
sys.exit(2) #exit if no appropriate input file
else:
print "Please speicfy an input file."
sys.exit(2) #exit if no appropriate input file
output_file = "%s_cropped.pdf" %input_file[:-4] #default output
for a in opts[:]:
if a[0] == '-o' or a[0]=='--output':
if a[1]!= None:
try:
if a[1][-4:]=='.pdf':
output_file = a[1]
else:
print "Output file must be a PDF."
except TypeError:
print "Output file must be a PDF."
except IndexError:
print "Output file must be a PDF."
margin = {"l": 0, "t": 0, "r": 0, "b": 0}
for a in opts[:]:
if a[0] == '-m' or a[0]=='--margin':
if a[1]!= None:
m_temp = a[1].strip("\"").split()
margin["l"] = float(m_temp[0])
margin["t"] = float(m_temp[1])
margin["r"] = float(m_temp[2])
margin["b"] = float(m_temp[3])
else:
print "Error"
input1 = PdfFileReader(file(input_file, "rb"))
output = PdfFileWriter()
outputstream = file(output_file, "wb")
pages = input1.getNumPages()
top_right = {'x': input1.getPage(1).mediaBox.getUpperRight_x(), 'y': input1.getPage(1).mediaBox.getUpperRight_y()}
top_left = {'x': input1.getPage(1).mediaBox.getUpperLeft_x(), 'y': input1.getPage(1).mediaBox.getUpperLeft_y()}
bottom_right = {'x': input1.getPage(1).mediaBox.getLowerRight_x(), 'y': input1.getPage(1).mediaBox.getLowerRight_y()}
bottom_left = {'x': input1.getPage(1).mediaBox.getLowerLeft_x(), 'y': input1.getPage(1).mediaBox.getLowerLeft_y()}
print('Page dim.\t%f by %f' %(top_right['x'], top_right['y']))
cut = cut_length(top_right, 'x', factor)
new_tr = (new_coords(top_right, 'x', cut, margin, code = "tr"), new_coords(top_right, 'y', cut, margin, code = "tr"))
new_br = (new_coords(bottom_right, 'x', cut, margin, code = "br"), new_coords(bottom_right, 'y', cut, margin, code = "br" ))
new_tl = (new_coords(top_left, 'x', cut, margin, code = "tl"), new_coords(top_left, 'y', cut, margin, code = "tl"))
new_bl = (new_coords(bottom_left, 'x', cut, margin, code = "bl"), new_coords(bottom_left, 'y', cut, margin, code = "bl"))
if skipone == 0:
for i in range(0, pages):
page = input1.getPage(i)
page.mediaBox.upperLeft = new_tl
page.mediaBox.upperRight = new_tr
page.mediaBox.lowerLeft = new_bl
page.mediaBox.lowerRight = new_br
output.addPage(page)
else:
for i in range(1, pages):
page = input1.getPage(i)
page.mediaBox.upperLeft = new_tl
page.mediaBox.upperRight = new_tr
page.mediaBox.lowerLeft = new_bl
page.mediaBox.lowerRight = new_br
output.addPage(page)
output.write(outputstream)
outputstream.close()
|
ActiveState/code
|
recipes/Python/576837_Crop_PDF_File_with_pyPdf/recipe-576837.py
|
Python
|
mit
| 5,316
|
"""homeweb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^', include('landing.urls')),
url(r'^dinner/', include('dinner.urls')),
url(r'^admin/', admin.site.urls),
]
|
srenner/homeweb-v2
|
homeweb/homeweb/urls.py
|
Python
|
mit
| 859
|
from sys import argv
import logging
import numpy as np
def pearson_mtx(mtx1, mtx2):
assert mtx1.shape[0] == mtx2.shape[0]
if len(mtx1.shape) == 1:
mtx1 = np.reshape(mtx1, (mtx1.shape[0], 1))
if len(mtx2.shape) == 1:
mtx2 = np.reshape(mtx2, (mtx2.shape[0], 1))
logging.info('Matrix1 size: {}\nMatrix2 size: {}'.format(mtx1.shape, mtx2.shape))
n = mtx1.shape[0]
u = mtx1.shape[1]
v = mtx2.shape[1]
means1 = mtx1.mean(0)
logging.info('Means 1 computed: {}'.format(means1.shape))
means2 = mtx2.mean(0)
logging.info('Means 2 computed: {}'.format(means2.shape))
sqsum1 = mtx1.transpose().dot(mtx1).diagonal()
logging.info('SqMeans 1 computed: {}'.format(sqsum1.shape))
sqsum2 = mtx2.transpose().dot(mtx2).diagonal()
logging.info('SqMeans 2 computed: {}'.format(sqsum2.shape))
pearson = np.zeros((u, v), np.float)
x_ = np.sqrt(np.array([sqsum1[i] - n * (means1[0, i] ** 2) for i in xrange(u)], dtype=np.float64))
y_ = np.sqrt(np.array([sqsum2[i] - n * (means2[0, i] ** 2) for i in xrange(v)], dtype=np.float64))
for i in xrange(u):
vec1 = mtx1[:, i]
for j in xrange(v):
vec2 = mtx2[:, j]
s = vec1.transpose() * vec2
s = s.sum() - means1[0, i] * means2[0, j] * n
if x_[i] == 0 or y_[j] == 0:
pearson[i, j] = 0
else:
pearson[i, j] = s / (x_[i] * y_[j])
return pearson
def main():
with open(argv[1]) as f:
mtx1 = np.loadtxt(f, np.float)
with open(argv[2]) as f:
mtx2 = np.loadtxt(f, np.float)
print pearson_mtx(mtx1, mtx2)
if __name__ == '__main__':
main()
|
juditacs/dsl
|
dsl/utils/pearson.py
|
Python
|
mit
| 1,689
|
import copy
from proxy import Proxy
class ProxyList(list):
"""
A proxy wrapper for a normal Python list.
A lot of functionality is being reproduced from Proxy. Inheriting Proxy would
simplify things a lot but I get type errors when I try to do so. It is not exactly
clear what a partial copy entails for a ProxyList so we will not consider this
option for now.
"""
__slots__ = ["_obj", "__weakref__", "__slots__", "_is_copied",
"_enable_partial_copy", "_attr_map"]
_is_copied = False
_special_names = [
'__add__', '__contains__', '__delitem__', '__delslice__',
'__eq__', '__ge__', '__getitem__', '__getslice__', '__gt__', '__hash__',
'__iadd__', '__imul__', '__iter__', '__le__', '__len__',
'__lt__', '__mul__', '__ne__', '__reduce__', '__reduce_ex__', '__repr__',
'__reversed__', '__rmul__', '__setitem__', '__setslice__', '__sizeof__',
'__str__', '__subclasshook__', '__xor__', 'next',
]
def __init__(self, obj, _partial_copy=False):
object.__setattr__(self, "_obj", obj)
object.__setattr__(self, "_enable_partial_copy", _partial_copy)
def append(self, obj):
if not self._is_copied:
self._obj = copy.deepcopy(self._obj)
self._is_copied = True
self._obj.append(obj)
def count(self, obj):
return self._obj.count(obj)
def extend(self, iterable):
if not self._is_copied:
self._obj = copy.deepcopy(self._obj)
self._is_copied = True
self._obj.extend(iterable)
def index(self, obj):
return self._obj.index(obj)
def insert(self, idx, obj):
if not self._is_copied:
self._obj = copy.deepcopy(self._obj)
self._is_copied = True
self._obj.insert(idx, obj)
def pop(self):
if not self._is_copied:
self._obj = copy.deepcopy(self._obj)
self._is_copied = True
return self._obj.pop()
def remove(self, obj):
if not self._is_copied:
self._obj = copy.deepcopy(self._obj)
self._is_copied = True
self._obj.remove(obj)
def reverse(self):
if not self._is_copied:
self._obj = copy.deepcopy(self._obj)
self._is_copied = True
self._obj.reverse()
def sort(self, cm='None', key='None', reverse='False'):
if not self._is_copied:
self._obj = copy.deepcopy(self._obj)
self._is_copied = True
self._obj.sort(cm, key, reverse)
@classmethod
def _create_class_proxy(cls, theclass):
"""creates a proxy for the given class"""
def make_method(name):
def method(self, *args, **kw):
if name in cls._special_names and args is not ():
args = map(lambda x: x._obj if isinstance(x, Proxy) or
isinstance(x, ProxyList) else x, args)
return getattr(object.__getattribute__(self, "_obj"), name)(*args, **kw)
return method
namespace = {}
for name in cls._special_names:
if hasattr(theclass, name):
namespace[name] = make_method(name)
return type("%s(%s)" % (cls.__name__, theclass.__name__), (cls,), namespace)
def __new__(cls, obj, *args, **kwargs):
"""
creates an proxy instance referencing `obj`. (obj, *args, **kwargs) are
passed to this class' __init__, so deriving classes can define an
__init__ method of their own.
note: _class_proxy_cache is unique per deriving class (each deriving
class must hold its own cache)
"""
try:
cache = cls.__dict__["_class_proxy_cache"]
except KeyError:
cls._class_proxy_cache = cache = {}
try:
theclass = cache[obj.__class__]
except KeyError:
cache[obj.__class__] = theclass = cls._create_class_proxy(obj.__class__)
ins = list.__new__(theclass)
theclass.__init__(ins, obj, *args, **kwargs)
return ins
|
diffoperator/pycow
|
src/pycow/proxylist.py
|
Python
|
mit
| 4,140
|
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import base64
import json
import six
from addonpayments.hpp.card_storage.requests import CardStorageRequest
from addonpayments.hpp.payment.requests import PaymentRequest
from addonpayments.hpp.common.responses import HppResponse
from addonpayments.logger import Logger
logger = Logger().get_logger(__name__)
class JsonUtils(object):
"""
Utils to serialize and deserialize HPP objects to JSON
"""
@staticmethod
def to_json(hpp_object, charset, encoded=False):
"""
Method serialises HppRequest or HppResponse to JSON.
:param hpp_object:
:param charset: string
:param encoded: bool
:return: string
"""
dict_object = hpp_object.to_dict()
if encoded:
return json.dumps(JsonUtils.encode(dict_object, charset))
return json.dumps(dict_object, charset)
@staticmethod
def from_json_hpp_request(json_hpp_request, charset, encoded=False):
"""
Method deserialize JSON to HppRequest.
:param json_hpp_request: string
:param charset: string
:param encoded: bool
:return: HppRequest
"""
obj_request = json.loads(json_hpp_request)
if encoded:
obj_request = JsonUtils.decode(obj_request, charset)
is_card_storage = False
if obj_request.get('CARD_STORAGE_ENABLE') or obj_request.get('card_storage_enable'):
is_card_storage = True
dict_request = {}
supplementary_data = {}
for key, value in six.iteritems(obj_request):
key_hpp = key.lower()
is_supplementary_data = False
if is_card_storage:
if not hasattr(CardStorageRequest, key_hpp):
is_supplementary_data = True
else:
if not hasattr(PaymentRequest, key_hpp):
is_supplementary_data = True
if is_supplementary_data:
supplementary_data[key] = value
else:
dict_request[key_hpp] = value
if supplementary_data:
dict_request['supplementary_data'] = supplementary_data
if is_card_storage:
return CardStorageRequest(**dict_request)
else:
return PaymentRequest(**dict_request)
def from_json_hpp_response(self, json_hpp_response, charset, encoded):
"""
Method deserialize JSON to HppResponse.
:param json_hpp_response: string
:param charset: string
:param encoded: bool
:return: HppResponse
"""
obj_response = json.loads(json_hpp_response)
if encoded:
obj_response = JsonUtils.decode(obj_response, charset)
return self.normalize_response(obj_response)
@staticmethod
def encode(hpp_dict, charset='utf-8'):
"""
Base64 encodes all Hpp Request values.
:param hpp_dict: dict
:param charset: string
:return: dict
"""
for key, value in six.iteritems(hpp_dict):
b64_value = base64.b64encode(six.binary_type(six.text_type(value).encode(charset)))
hpp_dict[key] = b64_value.decode(charset)
return hpp_dict
@staticmethod
def decode(hpp_dict, charset='utf-8'):
"""
Base64 decodes all Hpp Request values.
:param hpp_dict: dict
:param charset: string
"""
for key, value in six.iteritems(hpp_dict):
hpp_dict[key] = six.text_type(base64.b64decode(value), charset)
return hpp_dict
@staticmethod
def normalize_response(obj_response):
"""
Method deserialize JSON to HppResponse.
:type obj_response: dict
:return: HppResponse
"""
obj_response = {key.lower(): value for key, value in six.iteritems(obj_response)}
return HppResponse(**obj_response)
|
ComerciaGP/addonpayments-Python-SDK
|
addonpayments/hpp/utils.py
|
Python
|
mit
| 3,972
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from fairseq import checkpoint_utils, tasks
import sentencepiece as spm
import torch
try:
from simuleval import READ_ACTION, WRITE_ACTION, DEFAULT_EOS
from simuleval.agents import TextAgent
except ImportError:
print("Please install simuleval 'pip install simuleval'")
BOS_PREFIX = "\u2581"
class SimulTransTextAgentJA(TextAgent):
"""
Simultaneous Translation
Text agent for Japanese
"""
def __init__(self, args):
# Whether use gpu
self.gpu = getattr(args, "gpu", False)
# Max len
self.max_len = args.max_len
# Load Model
self.load_model_vocab(args)
# build word splitter
self.build_word_splitter(args)
self.eos = DEFAULT_EOS
def initialize_states(self, states):
states.incremental_states = dict()
states.incremental_states["online"] = dict()
def to_device(self, tensor):
if self.gpu:
return tensor.cuda()
else:
return tensor.cpu()
def load_model_vocab(self, args):
filename = args.model_path
if not os.path.exists(filename):
raise IOError("Model file not found: {}".format(filename))
state = checkpoint_utils.load_checkpoint_to_cpu(filename)
task_args = state["cfg"]["task"]
task_args.data = args.data_bin
task = tasks.setup_task(task_args)
# build model for ensemble
state["cfg"]["model"].load_pretrained_encoder_from = None
state["cfg"]["model"].load_pretrained_decoder_from = None
self.model = task.build_model(state["cfg"]["model"])
self.model.load_state_dict(state["model"], strict=True)
self.model.eval()
self.model.share_memory()
if self.gpu:
self.model.cuda()
# Set dictionary
self.dict = {}
self.dict["tgt"] = task.target_dictionary
self.dict["src"] = task.source_dictionary
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--model-path', type=str, required=True,
help='path to your pretrained model.')
parser.add_argument("--data-bin", type=str, required=True,
help="Path of data binary")
parser.add_argument("--max-len", type=int, default=100,
help="Max length of translation")
parser.add_argument("--tgt-splitter-type", type=str, default="SentencePiece",
help="Subword splitter type for target text.")
parser.add_argument("--tgt-splitter-path", type=str, default=None,
help="Subword splitter model path for target text.")
parser.add_argument("--src-splitter-type", type=str, default="SentencePiece",
help="Subword splitter type for source text.")
parser.add_argument("--src-splitter-path", type=str, default=None,
help="Subword splitter model path for source text.")
# fmt: on
return parser
def build_word_splitter(self, args):
self.spm = {}
for lang in ['src', 'tgt']:
if getattr(args, f'{lang}_splitter_type', None):
path = getattr(args, f'{lang}_splitter_path', None)
if path:
self.spm[lang] = spm.SentencePieceProcessor()
self.spm[lang].Load(path)
def segment_to_units(self, segment, states):
# Split a full word (segment) into subwords (units)
return self.spm['src'].EncodeAsPieces(segment)
def update_model_encoder(self, states):
if len(states.units.source) == 0:
return
src_indices = [
self.dict['src'].index(x)
for x in states.units.source.value
]
if states.finish_read():
# Append the eos index when the prediction is over
src_indices += [self.dict["tgt"].eos_index]
src_indices = self.to_device(
torch.LongTensor(src_indices).unsqueeze(0)
)
src_lengths = self.to_device(
torch.LongTensor([src_indices.size(1)])
)
states.encoder_states = self.model.encoder(src_indices, src_lengths)
torch.cuda.empty_cache()
def update_states_read(self, states):
# Happens after a read action.
self.update_model_encoder(states)
def units_to_segment(self, units, states):
# Merge sub words (units) to full word (segment).
# For Japanese, we can directly send
# the untokenized token to server except the BOS token
# with following option
# --sacrebleu-tokenizer MeCab
# --eval-latency-unit char
# --no-space
token = units.value.pop()
if (
token == self.dict["tgt"].eos_word
or len(states.segments.target) > self.max_len
):
return DEFAULT_EOS
if BOS_PREFIX == token:
return None
if token[0] == BOS_PREFIX:
return token[1:]
else:
return token
def policy(self, states):
if not getattr(states, "encoder_states", None):
# No encoder states, read a token first
return READ_ACTION
# encode previous predicted target tokens
tgt_indices = self.to_device(
torch.LongTensor(
[self.model.decoder.dictionary.eos()]
+ [
self.dict['tgt'].index(x)
for x in states.units.target.value
if x is not None
]
).unsqueeze(0)
)
# Current steps
states.incremental_states["steps"] = {
"src": states.encoder_states["encoder_out"][0].size(0),
"tgt": 1 + len(states.units.target),
}
# Online only means the reading is not finished
states.incremental_states["online"]["only"] = (
torch.BoolTensor([not states.finish_read()])
)
x, outputs = self.model.decoder.forward(
prev_output_tokens=tgt_indices,
encoder_out=states.encoder_states,
incremental_state=states.incremental_states,
)
states.decoder_out = x
torch.cuda.empty_cache()
if outputs.action == 0:
return READ_ACTION
else:
return WRITE_ACTION
def predict(self, states):
# Predict target token from decoder states
decoder_states = states.decoder_out
lprobs = self.model.get_normalized_probs(
[decoder_states[:, -1:]], log_probs=True
)
index = lprobs.argmax(dim=-1)[0, 0].item()
if index != self.dict['tgt'].eos_index:
token = self.dict['tgt'].string([index])
else:
token = self.dict['tgt'].eos_word
return token
|
pytorch/fairseq
|
examples/simultaneous_translation/eval/agents/simul_t2t_enja.py
|
Python
|
mit
| 7,099
|
import csv
import json
import logging
import os.path
import sys
import sqlite3
import xml.etree.ElementTree as xml
FORMAT = '%(asctime)-15s %(levelname)-7s %(funcName)s: %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger('zipviz')
logger.setLevel(logging.INFO)
def construct_database(filename='us_zip_codes.db'):
# Use full KML for detailed boundaries
zip_codes = parse_all_kml({})
# Use deliverable zip CSV for names, centers, and KML?
zip_codes = parse_deliverable_csv(zip_codes)
# Use undeliverable for names, centers only?
zip_codes = parse_undeliverable_csv(zip_codes)
# Write output formats
write_to_database(zip_codes)
#write_to_json(zip_codes)
def write_to_database(zip_codes, filename='us_zip_codes.db'):
logger.info('Creating database file %s' % filename)
conn = sqlite3.connect(filename)
c = conn.cursor()
try:
c.execute('''
CREATE TABLE geo_data (
type char(10),
key char(10),
special integer,
has_poly integer,
json text,
PRIMARY KEY (type, key))''')
conn.commit()
except sqlite3.OperationalError:
logger.warn('Error creating table, it probably exists')
for zip_code in zip_codes:
zip_data = zip_codes[zip_code]
special = zip_data.has_key('undeliverable')
has_poly = zip_data.has_key('poly')
if special == has_poly:
logger.info('Interesting, special == has_poly for %s' % zip_code)
try:
conn.execute(
'insert into geo_data values ("uszip", ?, ?, ?, ?)',
(zip_code, special, has_poly, json.dumps(zip_data)))
conn.commit()
except:
logger.warn('Error inserting row for zip %s' % zip_code)
conn.close()
def write_to_json(zip_codes, directory='json'):
logger.info('Creating JSON files in directory %s' % directory)
for zip_code in zip_codes:
filename = os.path.join(directory, '%s.json' % zip_code)
with open(filename, 'wt') as json_file:
json.dump(zip_codes[zip_code], json_file)
def parse_all_kml(zip_codes, filename='us_zip_codes.kml'):
# Full KML:
# http://www.filosophy.org/post/17/zipcodes_in_kml/
# ...<Placemark>
# <name>!!
# <Polygon><OuterBoundaryIs><LinearRing><coordinates>!!
logger.info('Parsing the KML for all zip codes')
tree = xml.parse(filename)
root = tree.getroot()
entries = root.findall('.//{http://earth.google.com/kml/2.0}Placemark')
logger.info('Found %d entries in KML' % len(entries))
for entry in entries:
zip_code = entry.find('{http://earth.google.com/kml/2.0}name').text
if zip_code[3:] in ('HH', 'XX'):
logger.debug('Ignoring entry for zip3 %s' % zip_code)
else:
logger.debug('Parsing entry for zip code %s' % zip_code)
data = {}
coordinates = entry.findall('.//{http://earth.google.com/kml/2.0}coordinates')
pdata = parse_coordinates(coordinates)
if not pdata is None:
data['poly'] = pdata
zip_codes[zip_code] = data
return zip_codes
def parse_undeliverable_csv(zip_codes, filename='us_undeliverable_zip_codes.csv'):
# Undeliverable:
# https://www.google.com/fusiontables/data?docid=1XXhCde2p1ncNiUSdEgO-mApKRPJsUIidYHNa0KA
logger.info('Parsing the CSV file for undeliverable zip data')
with open(filename, 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if not row[0] == 'zip':
if not zip_codes.has_key(row[0]):
logger.info('no data found for zip code %s' % row[0])
zip_codes[row[0]] = {'undeliverable': True}
zip_code = zip_codes[row[0]]
zip_code['name'] = row[1]
return zip_codes
def parse_deliverable_csv(zip_codes, filename='us_zip_codes.csv'):
# US Zip Codes (partial):
# https://www.google.com/fusiontables/DataSource?docid=1fzwSGnxD0xzJaiYXYX66zuYvG0c5wcEUi5ZI0Q
logger.info('Parsing the CSV file for deliverable zip data')
with open(filename, 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if not row[3] == 'ZIP':
if not zip_codes.has_key(row[3]):
logger.info('no data found for zip code %s' % row[3])
zip_codes[row[3]] = {}
if len(row[11]) > 0:
root = xml.fromstring(row[11])
coordinates = root.findall('.//coordinates')
pdata = parse_coordinates(coordinates)
if not pdata is None:
zip_codes[row[3]]['poly'] = pdata
zip_code = zip_codes[row[3]]
zip_code['name'] = row[2]
zip_code['center'] = {'lat':row[4], 'lng':row[9]}
return zip_codes
def parse_coordinates(coordinates):
data = None
if len(coordinates) > 0:
data = []
for element in coordinates:
for coord in element.text.split():
# KML format is [longitude, latitude, and optional altitude]
values = coord.split(',')
if len(values) == 2:
data.append({'lng':values[0], 'lat':values[1]})
elif len(values) == 3:
data.append({'lng':values[0], 'lat':values[1], 'alt':values[2]})
return data
construct_database()
|
johnstonskj/zipviz
|
data/kml_to_here.py
|
Python
|
mit
| 5,518
|
import zipfile
import os
import tarfile
import unittest
import six
from mock import Mock
from conans.client.tools import untargz, unzip
from conans.client.tools.files import chdir, save
from conans.test.utils.mocks import TestBufferConanOutput
from conans.test.utils.test_files import temp_folder
from conans.errors import ConanException
from conans.model.manifest import gather_files
from conans.util.files import gzopen_without_timestamps
class ZipExtractPlainTest(unittest.TestCase):
def _zipdir(self, path, zipfilename, folder_entry=None):
with zipfile.ZipFile(zipfilename, 'w', zipfile.ZIP_DEFLATED) as z:
if folder_entry:
zif = zipfile.ZipInfo(folder_entry + "/")
z.writestr(zif, "")
for root, _, files in os.walk(path):
for f in files:
file_path = os.path.join(root, f)
if file_path == zipfilename:
continue
relpath = os.path.relpath(file_path, path)
z.write(file_path, relpath)
def test_plain_zip(self):
tmp_folder = temp_folder()
with chdir(tmp_folder):
ori_files_dir = os.path.join(tmp_folder, "subfolder-1.2.3")
file1 = os.path.join(ori_files_dir, "file1")
file2 = os.path.join(ori_files_dir, "folder", "file2")
file3 = os.path.join(ori_files_dir, "file3")
save(file1, "")
save(file2, "")
save(file3, "")
zip_file = os.path.join(tmp_folder, "myzip.zip")
# Zip with a "folder_entry" in the zip (not only for the files)
self._zipdir(tmp_folder, zip_file, folder_entry="subfolder-1.2.3")
# ZIP unzipped regularly
extract_folder = temp_folder()
output = TestBufferConanOutput()
unzip(zip_file, destination=extract_folder, strip_root=False, output=output)
self.assertNotIn("ERROR: Error extract", output)
self.assertTrue(os.path.exists(os.path.join(extract_folder, "subfolder-1.2.3")))
self.assertTrue(os.path.exists(os.path.join(extract_folder, "subfolder-1.2.3", "file1")))
self.assertTrue(os.path.exists(os.path.join(extract_folder, "subfolder-1.2.3", "folder",
"file2")))
self.assertTrue(os.path.exists(os.path.join(extract_folder, "subfolder-1.2.3", "file3")))
# Extract without the subfolder
extract_folder = temp_folder()
output = TestBufferConanOutput()
unzip(zip_file, destination=extract_folder, strip_root=True, output=output)
self.assertNotIn("ERROR: Error extract", output)
self.assertFalse(os.path.exists(os.path.join(extract_folder, "subfolder-1.2.3")))
self.assertTrue(os.path.exists(os.path.join(extract_folder, "file1")))
self.assertTrue(os.path.exists(os.path.join(extract_folder, "folder", "file2")))
self.assertTrue(os.path.exists(os.path.join(extract_folder, "file3")))
def test_invalid_flat(self):
tmp_folder = temp_folder()
with chdir(tmp_folder):
# Not a single dir containing everything
file1 = os.path.join(tmp_folder, "subfolder-1.2.3", "folder2", "file1")
file2 = os.path.join(tmp_folder, "other-1.2.3", "folder", "file2")
save(file1, "")
save(file2, "")
zip_folder = temp_folder()
zip_file = os.path.join(zip_folder, "file.zip")
self._zipdir(tmp_folder, zip_file)
# Extract without the subfolder
extract_folder = temp_folder()
with six.assertRaisesRegex(self, ConanException, "The zip file contains more than 1 folder "
"in the root"):
unzip(zip_file, destination=extract_folder, strip_root=True, output=Mock())
def test_invalid_flat_single_file(self):
tmp_folder = temp_folder()
with chdir(tmp_folder):
save("file1", "contentsfile1")
zip_folder = temp_folder()
zip_file = os.path.join(zip_folder, "file.zip")
self._zipdir(tmp_folder, zip_file)
# Extract without the subfolder
extract_folder = temp_folder()
with six.assertRaisesRegex(self, ConanException, "The zip file contains a file in the root"):
unzip(zip_file, destination=extract_folder, strip_root=True, output=Mock())
class TarExtractPlainTest(unittest.TestCase):
def _compress_folder(self, folder, tgz_path, folder_entry=None):
# Create a tar.gz file with the files in the folder and an additional TarInfo entry
# for the folder_entry (the gather files doesn't return empty dirs)
with open(tgz_path, "wb") as tgz_handle:
tgz = gzopen_without_timestamps("name", mode="w", fileobj=tgz_handle)
if folder_entry:
# Create an empty folder in the tgz file
t = tarfile.TarInfo(folder_entry)
t.mode = 488
t.type = tarfile.DIRTYPE
tgz.addfile(t)
files, _ = gather_files(folder)
for filename, abs_path in files.items():
info = tarfile.TarInfo(name=filename)
with open(os.path.join(folder, filename), 'rb') as file_handler:
tgz.addfile(tarinfo=info, fileobj=file_handler)
tgz.close()
def test_plain_tgz(self):
tmp_folder = temp_folder()
with chdir(tmp_folder):
# Create a couple of files
ori_files_dir = os.path.join(tmp_folder, "subfolder-1.2.3")
file1 = os.path.join(ori_files_dir, "file1")
file2 = os.path.join(ori_files_dir, "folder", "file2")
file3 = os.path.join(ori_files_dir, "file3")
save(file1, "")
save(file2, "")
save(file3, "")
tgz_folder = temp_folder()
tgz_file = os.path.join(tgz_folder, "file.tar.gz")
self._compress_folder(tmp_folder, tgz_file, folder_entry="subfolder-1.2.3")
# Tgz unzipped regularly
extract_folder = temp_folder()
untargz(tgz_file, destination=extract_folder, strip_root=False)
self.assertTrue(os.path.exists(os.path.join(extract_folder, "subfolder-1.2.3")))
self.assertTrue(os.path.exists(os.path.join(extract_folder, "subfolder-1.2.3", "file1")))
self.assertTrue(os.path.exists(os.path.join(extract_folder, "subfolder-1.2.3", "folder",
"file2")))
self.assertTrue(os.path.exists(os.path.join(extract_folder, "subfolder-1.2.3", "file3")))
# Extract without the subfolder
extract_folder = temp_folder()
untargz(tgz_file, destination=extract_folder, strip_root=True)
self.assertFalse(os.path.exists(os.path.join(extract_folder, "subfolder-1.2.3")))
self.assertTrue(os.path.exists(os.path.join(extract_folder, "file1")))
self.assertTrue(os.path.exists(os.path.join(extract_folder, "folder", "file2")))
self.assertTrue(os.path.exists(os.path.join(extract_folder, "file3")))
def test_plain_tgz_common_base(self):
tmp_folder = temp_folder()
with chdir(tmp_folder):
# Create a couple of files
ori_files_dir = os.path.join(tmp_folder, "subfolder-1.2.3")
file1 = os.path.join(ori_files_dir, "folder", "file1")
file2 = os.path.join(ori_files_dir, "folder", "file2")
file3 = os.path.join(ori_files_dir, "folder", "file3")
save(file1, "")
save(file2, "")
save(file3, "")
tgz_folder = temp_folder()
tgz_file = os.path.join(tgz_folder, "file.tar.gz")
self._compress_folder(tmp_folder, tgz_file)
# Tgz unzipped regularly
extract_folder = temp_folder()
untargz(tgz_file, destination=extract_folder, strip_root=True)
self.assertFalse(os.path.exists(os.path.join(extract_folder, "subfolder-1.2.3")))
self.assertTrue(os.path.exists(os.path.join(extract_folder, "folder", "file1")))
self.assertTrue(os.path.exists(os.path.join(extract_folder, "folder", "file2")))
self.assertTrue(os.path.exists(os.path.join(extract_folder, "folder", "file3")))
def test_invalid_flat(self):
tmp_folder = temp_folder()
with chdir(tmp_folder):
# Not a single dir containing everything
file1 = os.path.join(tmp_folder, "subfolder-1.2.3", "folder2", "file1")
file2 = os.path.join(tmp_folder, "other-1.2.3", "folder", "file2")
save(file1, "")
save(file2, "")
tgz_folder = temp_folder()
tgz_file = os.path.join(tgz_folder, "file.tar.gz")
self._compress_folder(tmp_folder, tgz_file)
extract_folder = temp_folder()
with six.assertRaisesRegex(self, ConanException, "The tgz file contains more than 1 folder "
"in the root"):
untargz(tgz_file, destination=extract_folder, strip_root=True)
def test_invalid_flat_single_file(self):
tmp_folder = temp_folder()
with chdir(tmp_folder):
save("file1", "contentsfile1")
zip_folder = temp_folder()
tgz_file = os.path.join(zip_folder, "file.tar.gz")
self._compress_folder(tmp_folder, tgz_file)
# Extract without the subfolder
extract_folder = temp_folder()
with six.assertRaisesRegex(self, ConanException, "The tgz file contains a file in the root"):
unzip(tgz_file, destination=extract_folder, strip_root=True, output=Mock())
|
conan-io/conan
|
conans/test/unittests/util/files/strip_root_extract_test.py
|
Python
|
mit
| 9,684
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tsune.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
DummyDivision/Tsune
|
manage.py
|
Python
|
mit
| 248
|
def return_pow():
return "pow"
|
bigsassy/pytest-pythonpath
|
test_path/sitedir3/pow.py
|
Python
|
mit
| 35
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
import itertools
s=input()
x=((len(list(p)),int(k)) for k,p in itertools.groupby(s))
print(*x)
|
manishbisht/Competitive-Programming
|
Hackerrank/Practice/Python/6.itertools/52.Compress the String!.py
|
Python
|
mit
| 172
|
import socket
import gevent
import cv2.cv as cv
def main():
cv.NamedWindow("camera", 1)
capture = cv.CaptureFromCAM(0)
while True:
img = cv.QueryFrame(capture)
"""
im_gray = cv.CreateImage(cv.GetSize(img),cv.IPL_DEPTH_8U,1)
cv.CvtColor(img,im_gray,cv.CV_RGB2GRAY)
# Sobel operator
dstSobel = cv.CreateMat(im_gray.height, im_gray.width, cv.CV_32FC1)
# Sobel(src, dst, xorder, yorder, apertureSize = 3)
cv.Sobel(im_gray,dstSobel,1,1,3)
"""
cv.ShowImage('camera', img)
# image smoothing and subtraction
# imageBlur = cv.CreateImage(cv.GetSize(im_gray), im_gray.depth, im_gray.nChannels)
# # filering the original image
# # Smooth(src, dst, smoothtype=CV_GAUSSIAN, param1=3, param2=0, param3=0, param4=0)
# cv.Smooth(im_gray, imageBlur, cv.CV_BLUR, 11, 11)
# diff = cv.CreateImage(cv.GetSize(im_gray), im_gray.depth, im_gray.nChannels)
# # subtraction (original - filtered)
# cv.AbsDiff(im_gray,imageBlur,diff)
# cv.ShowImage('camera', diff)
if cv.WaitKey(10) == 27:
break
gevent.sleep(0.5)
cv.DestroyWindow("camera")
if __name__ == "__main__":
main()
|
mabotech/mabo.io
|
py/vision/test1/test1.py
|
Python
|
mit
| 1,352
|
from os.path import join
import sh
from pythonforandroid.recipe import NDKRecipe
from pythonforandroid.util import current_directory
from pythonforandroid.logger import shprint
from multiprocessing import cpu_count
class OpenCVRecipe(NDKRecipe):
'''
.. versionchanged:: 0.7.1
rewrote recipe to support the python bindings (cv2.so) and enable the
build of most of the libraries of the opencv's package, so we can
process images, videos, objects, photos...
'''
version = '4.5.1'
url = 'https://github.com/opencv/opencv/archive/{version}.zip'
depends = ['numpy']
patches = ['patches/p4a_build.patch']
generated_libraries = [
'libopencv_features2d.so',
'libopencv_imgproc.so',
'libopencv_stitching.so',
'libopencv_calib3d.so',
'libopencv_flann.so',
'libopencv_ml.so',
'libopencv_videoio.so',
'libopencv_core.so',
'libopencv_highgui.so',
'libopencv_objdetect.so',
'libopencv_video.so',
'libopencv_dnn.so',
'libopencv_imgcodecs.so',
'libopencv_photo.so',
]
def get_lib_dir(self, arch):
return join(self.get_build_dir(arch.arch), 'build', 'lib', arch.arch)
def get_recipe_env(self, arch):
env = super().get_recipe_env(arch)
env['ANDROID_NDK'] = self.ctx.ndk_dir
env['ANDROID_SDK'] = self.ctx.sdk_dir
return env
def build_arch(self, arch):
build_dir = join(self.get_build_dir(arch.arch), 'build')
shprint(sh.mkdir, '-p', build_dir)
opencv_extras = []
if 'opencv_extras' in self.ctx.recipe_build_order:
opencv_extras_dir = self.get_recipe(
'opencv_extras', self.ctx).get_build_dir(arch.arch)
opencv_extras = [
f'-DOPENCV_EXTRA_MODULES_PATH={opencv_extras_dir}/modules',
'-DBUILD_opencv_legacy=OFF',
]
with current_directory(build_dir):
env = self.get_recipe_env(arch)
python_major = self.ctx.python_recipe.version[0]
python_include_root = self.ctx.python_recipe.include_root(arch.arch)
python_site_packages = self.ctx.get_site_packages_dir(arch)
python_link_root = self.ctx.python_recipe.link_root(arch.arch)
python_link_version = self.ctx.python_recipe.link_version
python_library = join(python_link_root,
'libpython{}.so'.format(python_link_version))
python_include_numpy = join(python_site_packages,
'numpy', 'core', 'include')
shprint(sh.cmake,
'-DP4A=ON',
'-DANDROID_ABI={}'.format(arch.arch),
'-DANDROID_STANDALONE_TOOLCHAIN={}'.format(self.ctx.ndk_dir),
'-DANDROID_NATIVE_API_LEVEL={}'.format(self.ctx.ndk_api),
'-DANDROID_EXECUTABLE={}/tools/android'.format(env['ANDROID_SDK']),
'-DANDROID_SDK_TOOLS_VERSION=6514223',
'-DANDROID_PROJECTS_SUPPORT_GRADLE=ON',
'-DCMAKE_TOOLCHAIN_FILE={}'.format(
join(self.ctx.ndk_dir, 'build', 'cmake',
'android.toolchain.cmake')),
# Make the linkage with our python library, otherwise we
# will get dlopen error when trying to import cv2's module.
'-DCMAKE_SHARED_LINKER_FLAGS=-L{path} -lpython{version}'.format(
path=python_link_root,
version=python_link_version),
'-DBUILD_WITH_STANDALONE_TOOLCHAIN=ON',
# Force to build as shared libraries the cv2's dependant
# libs or we will not be able to link with our python
'-DBUILD_SHARED_LIBS=ON',
'-DBUILD_STATIC_LIBS=OFF',
# Disable some opencv's features
'-DBUILD_opencv_java=OFF',
'-DBUILD_opencv_java_bindings_generator=OFF',
# '-DBUILD_opencv_highgui=OFF',
# '-DBUILD_opencv_imgproc=OFF',
# '-DBUILD_opencv_flann=OFF',
'-DBUILD_TESTS=OFF',
'-DBUILD_PERF_TESTS=OFF',
'-DENABLE_TESTING=OFF',
'-DBUILD_EXAMPLES=OFF',
'-DBUILD_ANDROID_EXAMPLES=OFF',
# Force to only build our version of python
'-DBUILD_OPENCV_PYTHON{major}=ON'.format(major=python_major),
'-DBUILD_OPENCV_PYTHON{major}=OFF'.format(
major='2' if python_major == '3' else '3'),
# Force to install the `cv2.so` library directly into
# python's site packages (otherwise the cv2's loader fails
# on finding the cv2.so library)
'-DOPENCV_SKIP_PYTHON_LOADER=ON',
'-DOPENCV_PYTHON{major}_INSTALL_PATH={site_packages}'.format(
major=python_major, site_packages=python_site_packages),
# Define python's paths for: exe, lib, includes, numpy...
'-DPYTHON_DEFAULT_EXECUTABLE={}'.format(self.ctx.hostpython),
'-DPYTHON{major}_EXECUTABLE={host_python}'.format(
major=python_major, host_python=self.ctx.hostpython),
'-DPYTHON{major}_INCLUDE_PATH={include_path}'.format(
major=python_major, include_path=python_include_root),
'-DPYTHON{major}_LIBRARIES={python_lib}'.format(
major=python_major, python_lib=python_library),
'-DPYTHON{major}_NUMPY_INCLUDE_DIRS={numpy_include}'.format(
major=python_major, numpy_include=python_include_numpy),
'-DPYTHON{major}_PACKAGES_PATH={site_packages}'.format(
major=python_major, site_packages=python_site_packages),
*opencv_extras,
self.get_build_dir(arch.arch),
_env=env)
shprint(sh.make, '-j' + str(cpu_count()), 'opencv_python' + python_major)
# Install python bindings (cv2.so)
shprint(sh.cmake, '-DCOMPONENT=python', '-P', './cmake_install.cmake')
# Copy third party shared libs that we need in our final apk
sh.cp('-a', sh.glob('./lib/{}/lib*.so'.format(arch.arch)),
self.ctx.get_libs_dir(arch.arch))
recipe = OpenCVRecipe()
|
kivy/python-for-android
|
pythonforandroid/recipes/opencv/__init__.py
|
Python
|
mit
| 6,694
|
import logging
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
class QNetwork(nn.Module):
def __init__(self, state_dims=5, action_dims=6, hidden_dims=128):
super(QNetwork, self).__init__()
self.fc1 = nn.Linear(state_dims, hidden_dims)
self.fc2 = nn.Linear(hidden_dims, hidden_dims)
self.out = nn.Linear(hidden_dims, action_dims)
# NOTE: If you need to `.share_memory()` the network, this field will not be
# shared. Instead, make it a `torch.nn.Parameter` (and make sure to update
# it with `self.updates[0] += 1`)
self.updates = 0
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.out(x)
|
LK/Plato
|
plato-server/network.py
|
Python
|
mit
| 757
|
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: ryanss <ryanssdev@icloud.com> (c) 2014-2017
# dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2020
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from dateutil.easter import easter, EASTER_ORTHODOX
from dateutil.relativedelta import relativedelta as rd
from holidays.constants import JAN, MAR, MAY, JUL, NOV, DEC
from holidays.holiday_base import HolidayBase
class Belarus(HolidayBase):
"""
http://president.gov.by/en/holidays_en/
http://www.belarus.by/en/about-belarus/national-holidays
"""
def __init__(self, **kwargs):
self.country = "BY"
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# The current set of holidays came into force in 1998
# http://laws.newsby.org/documents/ukazp/pos05/ukaz05806.htm
if year <= 1998:
return
# New Year's Day
self[date(year, JAN, 1)] = "Новый год"
# Jan 2nd is the national holiday (New Year) from 2020
# http://president.gov.by/uploads/documents/2019/464uk.pdf
if year >= 2020:
# New Year's Day
self[date(year, JAN, 2)] = "Новый год"
# Christmas Day (Orthodox)
self[date(year, JAN, 7)] = "Рождество Христово " \
"(православное Рождество)"
# Women's Day
self[date(year, MAR, 8)] = "День женщин"
# Radunitsa ("Day of Rejoicing")
self[easter(year, method=EASTER_ORTHODOX) + rd(days=9)] = "Радуница"
# Labour Day
self[date(year, MAY, 1)] = "Праздник труда"
# Victory Day
self[date(year, MAY, 9)] = "День Победы"
# Independence Day
self[date(year, JUL, 3)] = "День Независимости Республики Беларусь " \
"(День Республики)"
# October Revolution Day
self[date(year, NOV, 7)] = "День Октябрьской революции"
# Christmas Day (Catholic)
self[date(year, DEC, 25)] = "Рождество Христово " \
"(католическое Рождество)"
class BY(Belarus):
pass
class BLR(Belarus):
pass
|
ryanss/python-holidays
|
holidays/countries/belarus.py
|
Python
|
mit
| 2,698
|
# Copyright (C) 2014 by Maxim Bublis <b@codemonkey.ru>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OF OTHER DEALINGS IN THE SOFTWARE.
"""
Google Code Jam 2008 Beta Problem B
Usage:
python b.py < input.txt > output.txt
"""
import sys
def memoized(func):
_cache = {}
def wrapped(*args):
if not args in _cache:
_cache[args] = func(*args)
return _cache[args]
return wrapped
@memoized
def longest_consequence(goods, guesses):
if not goods or not guesses:
return ()
sub_longest_consequence = max(longest_consequence(goods[:-1], guesses),
longest_consequence(goods, guesses[:-1]),
key=lambda x: (len(x), sorted(x)))
if goods[-1] == guesses[-1]:
return sub_longest_consequence + (goods[-1],)
return sub_longest_consequence
def solve_problem(goods, guesses):
return sorted(set(goods) - set(longest_consequence(goods, guesses)))
if __name__ == "__main__":
num_of_cases = int(sys.stdin.readline().strip())
for i in xrange(1, num_of_cases + 1):
goods = tuple(sys.stdin.readline().strip().split())
raw_prices = map(int, sys.stdin.readline().strip().split())
prices = {goods[i]: price for i, price in enumerate(raw_prices)}
guesses = tuple(sorted(goods, key=lambda guess: (prices[guess], guess)))
print "Case #{0}: {1}".format(i, ' '.join(solve_problem(goods, guesses)))
|
satori/edu
|
codejam/2008/beta/b.py
|
Python
|
mit
| 2,446
|
import config
import utils
import sublime
import sublime_plugin
####################################################################################
# VIEW
####################################################################################
class SwiDebugView(object):
""" The SWIDebugView wraps a normal view, adding some convenience methods.
See wrap_view.
All calls to a View should be made through an SWIDebugView,
adding more passthroughs if necessary. This makes the code flow explicit.
"""
def __init__(self, v):
self.view = v
self.callbacks = []
self.prev_click_position = 0
def __getattr__(self, attr):
# a trick (with the empty __call__)
# to implement default empty event handlers
if attr.startswith('on_'):
return self
raise AttributeError
def __call__(self, *args, **kwargs):
pass
def on_deactivated(self):
if self.view.name() == "File mapping":
# cannot close view during its own event handler
sublime.set_timeout(lambda: self.view.close())
def file_name(self):
return self.view.file_name()
def erase_regions(self, key):
return self.view.erase_regions(key)
def get_regions(self, key):
return self.view.get_regions(key)
def add_regions(self, key, regions, scope = "", icon = "", flags = 0):
return self.view.add_regions(key, regions, scope, icon, flags)
def run_command(self, cmd, args = None):
return self.view.run_command(cmd, args)
def size(self):
return self.view.size()
def window(self):
return self.view.window()
def sel(self):
return self.view.sel()
def insert(self, edit, pt, text):
return self.view.insert(edit, pt, text)
def uri(self):
return 'file://' + os.path.realpath(self.view.file_name())
def show(self, x, show_surrounds = True):
return self.view.show(x, show_surrounds)
def rowcol(self, tp):
return self.view.rowcol(tp)
def lines(self, data=None):
""" Takes a list of line numbers (zero based),
regions, or else uses the selection.
Returns regions, each covering one complete line,
representing the lines included in the supplied input.
"""
lines = []
if data is None:
regions = self.view.sel()
else:
if type(data) != list:
data = [data]
regions = []
for item in data:
if type(item) == int or item.isdigit():
regions.append(self.view.line(self.view.text_point(int(item), 0)))
else:
regions.append(item)
for i in range(len(regions)):
lines.extend(self.view.split_by_newlines(regions[i]))
return [self.view.line(line) for line in lines]
def rows(self, lines):
""" Takes one or more lines and returns the 0-based
line and column of the first character in the line.
"""
if not type(lines) == list:
lines = [lines]
return [self.view.rowcol(line.begin())[0] for line in lines]
def print_click(self, edit, position, text, callback, *args):
""" Inserts the specified text and creates a clickable "button"
around it.
"""
assert(callback)
insert_length = self.insert(edit, position, text)
insert_before = 0
new_region = sublime.Region(position, position + insert_length)
regions = self.view.get_regions('swi_log_clicks')
for region in regions:
if new_region.b < region.a:
break
insert_before += 1
self.callbacks.insert(insert_before, { "callback": callback, "args": args })
regions.append(new_region)
self.view.add_regions('swi_log_clicks', regions, scope=utils.get_setting('interactive_scope'), flags=sublime.DRAW_NO_FILL)
def remove_click(self, index):
""" Removes a clickable "button" with the specified index."""
regions = self.view.get_regions('swi_log_clicks')
del regions[index]
self.view.add_regions('swi_log_clicks', regions, scope=utils.get_setting('interactive_scope'), flags=sublime.DRAW_NO_FILL)
def erase(self, edit, region):
""" Removes our clickable regions
then erases the view
"""
self.callbacks = [] # bug, should only erase callbacks in the region
self.view.erase(edit, region)
def check_click(self):
if not isinstance(self, SwiDebugView):
return
cursor = self.sel()[0].a
index = 0
click_regions = self.get_regions('swi_log_clicks')
for callback in click_regions:
if cursor > callback.a and cursor < callback.b:
if index < len(self.callbacks):
callback = self.callbacks[index]
callback["callback"](*callback["args"])
index += 1
def find_existing_view(console_type):
return find_or_create_view(console_type, False)
def find_or_create_view(console_type, create = True):
found = False
v = None
window = sublime.active_window()
if console_type.startswith('console'):
group = 1
fullName = "Javascript Console"
if console_type == 'stack':
group = 2
fullName = "Javascript Callstack"
if console_type.startswith('scope'):
group = 1
fullName = "Javascript Scope"
if console_type.startswith('mapping'):
group = 0
fullName = "File mapping"
if console_type.startswith('styles'):
group = 1
fullName = "Styles"
window.focus_group(group)
for v in window.views():
if v.name() == fullName:
found = True
break
if not found and not create:
return None
if not found:
v = window.new_file()
v.set_scratch(True)
v.set_read_only(False)
v.set_name(fullName)
v.settings().set('word_wrap', False)
window.set_view_index(v, group, 0)
if console_type.startswith('console'):
v.set_syntax_file('Packages/Web Inspector/swi_log.tmLanguage')
if console_type == 'stack':
v.set_syntax_file('Packages/Web Inspector/swi_stack.tmLanguage')
if console_type.startswith('scope'):
v.set_syntax_file('Packages/Web Inspector/swi_log.tmLanguage')
window.focus_view(v)
v.set_read_only(False)
return wrap_view(v)
def wrap_view(v):
""" Convert a Sublime View into an SWIDebugView
"""
if isinstance(v, SwiDebugView):
return v
if isinstance(v, sublime.View):
id = v.buffer_id()
# Take this opportunity to replace the wrapped view,
# if it's against the same buffer as the previously
# seen view
if id in config.buffers:
config.buffers[id].view = v
else:
config.buffers[id] = SwiDebugView(v)
return config.buffers[id]
return None
def clear_view(name):
v = find_existing_view(name)
if not v:
return
v.run_command('swi_clear_view_internal')
v.show(v.size())
window = sublime.active_window()
if not window:
return
window.focus_group(0)
class SwiClearViewInternalCommand(sublime_plugin.TextCommand):
""" Called internally on a specific view """
def run(self, edit, user_input=None):
v = wrap_view(self.view)
v.erase(edit, sublime.Region(0, self.view.size()))
class SwiMouseUpCommand(sublime_plugin.TextCommand):
""" We use this to discover a "button" has been clicked.
Previously used on_selection_modified, but it fires
more than once per click. and there is no "mouse_up"
event in Sublime to filter those out.
This event handler is hooked up to mouse1 in
Default (xxx).sublime-mousemap - it's not via
the standard EventListener.
"""
def run(self, edit):
utils.assert_main_thread()
wrap_view(self.view).check_click()
class SwiDoubleMouseUpCommand(sublime_plugin.TextCommand):
""" On a double click, we get one of each event, so
run the command only once.
Triple click does not get handled reliably, it
may only be treated as two.
"""
def run(self, edit):
self.view.run_command("swi_mouse_up")
|
sokolovstas/SublimeWebInspector
|
views.py
|
Python
|
mit
| 8,478
|
# -*- coding: utf-8 -*-
"""Tests for the proofreadpage module."""
#
# (C) Pywikibot team, 2015-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import json
import pywikibot
from pywikibot.data import api
from pywikibot.proofreadpage import IndexPage, ProofreadPage
from tests.aspects import unittest, require_modules, TestCase
from tests.basepage_tests import (
BasePageMethodsTestBase,
BasePageLoadRevisionsCachingTestBase,
)
class TestProofreadPageInvalidSite(TestCase):
"""Test ProofreadPage class."""
family = 'wikipedia'
code = 'en'
cached = True
def test_invalid_site_source(self):
"""Test ProofreadPage from invalid Site as source."""
self.assertRaises(pywikibot.UnknownExtension,
ProofreadPage, self.site, 'title')
class TestBasePageMethodsProofreadPage(BasePageMethodsTestBase):
"""Test behavior of ProofreadPage methods inherited from BasePage."""
family = 'wikisource'
code = 'en'
def setUp(self):
"""Set up test case."""
self._page = ProofreadPage(
self.site, 'Page:Popular Science Monthly Volume 1.djvu/12')
super(TestBasePageMethodsProofreadPage, self).setUp()
def test_basepage_methods(self):
"""Test ProofreadPage methods inherited from superclass BasePage."""
self._test_invoke()
self._test_return_datatypes()
class TestLoadRevisionsCachingProofreadPage(BasePageLoadRevisionsCachingTestBase):
"""Test site.loadrevisions() caching."""
family = 'wikisource'
code = 'en'
def setUp(self):
"""Set up test case."""
self._page = ProofreadPage(
self.site, 'Page:Popular Science Monthly Volume 1.djvu/12')
super(TestLoadRevisionsCachingProofreadPage, self).setUp()
def test_page_text(self):
"""Test site.loadrevisions() with Page.text."""
self._test_page_text()
class TestProofreadPageParseTitle(TestCase):
"""Test ProofreadPage._parse_title() function."""
cached = True
# Use sites to run parametrized tests.
sites = {
'1': {
'family': 'wikisource', 'code': 'en',
'title': 'Page:Test.djvu/12',
'tuple': ('Test.djvu', 'djvu', 12),
},
'2': {
'family': 'wikisource', 'code': 'en',
'title': 'Page:Test djvu/12',
'tuple': ('Test djvu', '', 12),
},
'3': {
'family': 'wikisource', 'code': 'en',
'title': 'Page:Test.jpg/12',
'tuple': ('Test.jpg', 'jpg', 12),
},
'4': {
'family': 'wikisource', 'code': 'en',
'title': 'Page:Test jpg/12',
'tuple': ('Test jpg', '', 12),
},
'5': {
'family': 'wikisource', 'code': 'en',
'title': 'Page:Test.jpg',
'tuple': ('Test.jpg', 'jpg', None),
},
'6': {
'family': 'wikisource', 'code': 'en',
'title': 'Page:Test jpg',
'tuple': ('Test jpg', '', None),
},
}
@classmethod
def setUpClass(cls):
"""Prepare get_page dataset for tests."""
super(TestProofreadPageParseTitle, cls).setUpClass()
def test_parse_title(self, key):
"""Test ProofreadPage_parse_title() function."""
data = self.sites[key]
title = data['title']
base, base_ext, num = data['tuple']
page = ProofreadPage(self.site, title)
self.assertEqual(page._base, base)
self.assertEqual(page._base_ext, base_ext)
self.assertEqual(page._num, num)
class TestProofreadPageValidSite(TestCase):
"""Test ProofreadPage class."""
family = 'wikisource'
code = 'en'
cached = True
valid = {
'title': 'Page:Popular Science Monthly Volume 1.djvu/12',
'index': 'Index:Popular Science Monthly Volume 1.djvu',
'ql': 4,
'user': 'T. Mazzei',
'header': u"{{rh|2|''THE POPULAR SCIENCE MONTHLY.''}}",
'footer': u'\n{{smallrefs}}',
'url_image': ('https://upload.wikimedia.org/wikipedia/commons/thumb/a/ac/'
'Popular_Science_Monthly_Volume_1.djvu/'
'page12-1024px-Popular_Science_Monthly_Volume_1.djvu.jpg'),
}
valid_redlink = {
'title': 'Page:Pywikibot test page 3.jpg',
'url_image': ('https://upload.wikimedia.org/wikisource/en/3/37/'
'Pywikibot_test_page_3.jpg'),
}
existing_invalid = {
'title': 'Main Page',
}
existing_unlinked = {
'title': 'Page:Pywikibot unlinked test page',
}
not_existing_invalid = {
'title': 'User:cannot_exists',
'title1': 'User:Popular Science Monthly Volume 1.djvu/12'
}
class_pagetext_fmt = {
True: ('<div class="pagetext">\n\n\n', '</div>'),
False: ('', ''),
}
fmt = ('<noinclude><pagequality level="1" user="{user}" />'
'{class_pagetext}</noinclude>'
'<noinclude>{references}{div_end}</noinclude>')
def test_valid_site_source(self):
"""Test ProofreadPage from valid Site as source."""
page = ProofreadPage(self.site, 'Page:dummy test page')
self.assertEqual(page.namespace(), self.site.proofread_page_ns)
def test_invalid_existing_page_source(self):
"""Test ProofreadPage from invalid existing Page as source."""
source = pywikibot.Page(self.site, self.existing_invalid['title'])
self.assertRaises(ValueError, ProofreadPage, source)
def test_invalid_not_existing_page_source(self):
"""Test ProofreadPage from invalid not existing Page as source."""
# namespace is forced
source = pywikibot.Page(self.site,
self.not_existing_invalid['title'])
fixed_source = pywikibot.Page(self.site,
source.title(withNamespace=False),
ns=self.site.proofread_page_ns)
page = ProofreadPage(fixed_source)
self.assertEqual(page.title(), fixed_source.title())
def test_invalid_not_existing_page_source_wrong_ns(self):
"""Test ProofreadPage from Page not existing in non-Page ns as source."""
source = pywikibot.Page(self.site,
self.not_existing_invalid['title1'])
self.assertRaises(ValueError, ProofreadPage, source)
def test_invalid_link_source(self):
"""Test ProofreadPage from invalid Link as source."""
source = pywikibot.Link(self.not_existing_invalid['title'],
source=self.site)
self.assertRaises(ValueError, ProofreadPage, source)
def test_valid_link_source(self):
"""Test ProofreadPage from valid Link as source."""
source = pywikibot.Link(
self.valid['title'],
source=self.site,
defaultNamespace=self.site.proofread_page_ns)
page = ProofreadPage(source)
self.assertEqual(page.title(withNamespace=False), source.title)
self.assertEqual(page.namespace(), source.namespace)
def test_valid_parsing(self):
"""Test ProofreadPage page parsing functions."""
page = ProofreadPage(self.site, self.valid['title'])
self.assertEqual(page.ql, self.valid['ql'])
self.assertEqual(page.user, self.valid['user'])
self.assertEqual(page.header, self.valid['header'])
self.assertEqual(page.footer, self.valid['footer'])
def test_decompose_recompose_text(self):
"""Test ProofreadPage page decomposing/composing text."""
page = ProofreadPage(self.site, self.valid['title'])
plain_text = pywikibot.Page(self.site, self.valid['title']).text
assert page.text
self.assertEqual(plain_text, page.text)
def test_preload_from_not_existing_page(self):
"""Test ProofreadPage page decomposing/composing text."""
page = ProofreadPage(self.site, 'Page:dummy test page')
# Fetch page text to instantiate page._full_header, in order to allow
# for proper test result preparation.
page.text
class_pagetext, div = self.class_pagetext_fmt[page._full_header._has_div]
self.assertEqual(page.text,
self.fmt.format(user=self.site.username(),
class_pagetext=class_pagetext,
references='<references/>',
div_end=div))
def test_preload_from_empty_text(self):
"""Test ProofreadPage page decomposing/composing text."""
page = ProofreadPage(self.site, 'Page:dummy test page')
page.text = ''
class_pagetext, div = self.class_pagetext_fmt[page._full_header._has_div]
self.assertEqual(page.text,
self.fmt.format(user=self.site.username(),
class_pagetext=class_pagetext,
references='',
div_end=div))
def test_json_format(self):
"""Test conversion to json format."""
page = ProofreadPage(self.site, self.valid['title'])
rvargs = {'rvprop': 'ids|flags|timestamp|user|comment|content',
'rvcontentformat': 'application/json',
'titles': page,
}
rvgen = self.site._generator(api.PropertyGenerator,
type_arg='info|revisions',
total=1, **rvargs)
rvgen.set_maximum_items(-1) # suppress use of rvlimit parameter
try:
pagedict = next(iter(rvgen))
loaded_text = pagedict.get('revisions')[0].get('*')
except (StopIteration, TypeError, KeyError, ValueError, IndexError):
page_text = ''
page_text = page._page_to_json()
self.assertEqual(json.loads(page_text), json.loads(loaded_text))
@require_modules('bs4')
def test_url_image(self):
"""Test fetching of url image of the scan of ProofreadPage."""
page = ProofreadPage(self.site, self.valid['title'])
self.assertEqual(page.url_image, self.valid['url_image'])
page = ProofreadPage(self.site, self.valid_redlink['title'])
self.assertEqual(page.url_image, self.valid_redlink['url_image'])
page = ProofreadPage(self.site, self.existing_unlinked['title'])
# test Exception in property.
self.assertRaises(ValueError, getattr, page, 'url_image')
class TestPageQuality(TestCase):
"""Test page quality."""
family = 'wikisource'
code = 'en'
cached = True
def test_applicable_quality_level(self):
"""Test Page.quality_level when applicable."""
site = self.get_site()
title = 'Page:Popular Science Monthly Volume 49.djvu/1'
page = ProofreadPage(site, title)
self.assertEqual(page.content_model, 'proofread-page')
self.assertEqual(page.quality_level, 0)
@require_modules('bs4')
class TestProofreadPageIndexProperty(TestCase):
"""Test ProofreadPage index property."""
family = 'wikisource'
code = 'en'
cached = True
valid = {
'title': 'Page:Popular Science Monthly Volume 1.djvu/12',
'index': 'Index:Popular Science Monthly Volume 1.djvu',
}
existing_multilinked = {
'title': 'Page:Pywikibot test page.djvu/1',
'index_1': 'Index:Pywikibot test page.djvu',
'index_2': 'Index:Pywikibot test page 2',
}
existing_unlinked = {
'title': 'Page:Pywikibot unlinked test page',
}
def test_index(self):
"""Test index property."""
# Page with Index.
page = ProofreadPage(self.site, self.valid['title'])
index_page = IndexPage(self.site, self.valid['index'])
# Test property.
self.assertEqual(page.index, index_page)
# Test deleter
del page.index
self.assertFalse(hasattr(page, '_index'))
# Test setter with wrong type.
self.assertRaises(TypeError, setattr, page, 'index', 'invalid index')
# Test setter with correct type.
page.index = index_page
self.assertEqual(page.index, index_page)
# Page without Index.
page = ProofreadPage(self.site, self.existing_multilinked['title'])
index_page_1 = IndexPage(self.site, self.existing_multilinked['index_1'])
index_page_2 = IndexPage(self.site, self.existing_multilinked['index_2'])
self.assertEqual(page.index, index_page_1)
self.assertNotEqual(page.index, index_page_2)
self.assertEqual(page._index, (index_page_1, [index_page_2]))
# Page without Index.
page = ProofreadPage(self.site, self.existing_unlinked['title'])
self.assertIs(page.index, None)
self.assertEqual(page._index, (None, []))
@require_modules('bs4')
class IndexPageTestCase(TestCase):
"""Run tests related to IndexPage ProofreadPage extension."""
pass
class TestIndexPageInvalidSite(IndexPageTestCase):
"""Test IndexPage class."""
family = 'wikipedia'
code = 'en'
cached = True
def test_invalid_site_source(self):
"""Test IndexPage from invalid Site as source."""
self.assertRaises(pywikibot.UnknownExtension,
IndexPage, self.site, 'title')
class TestIndexPageValidSite(IndexPageTestCase):
"""Test IndexPage class."""
family = 'wikisource'
code = 'en'
cached = True
valid_index_title = 'Index:Popular Science Monthly Volume 1.djvu'
existing_invalid_title = 'Main Page'
not_existing_invalid_title = 'User:cannot_exists'
def test_valid_site_as_source(self):
"""Test IndexPage from valid Site as source."""
page = IndexPage(self.site, 'Index:dummy test page')
self.assertEqual(page.namespace(), self.site.proofread_index_ns)
def test_invalid_existing_page_as_source(self):
"""Test IndexPage from invalid existing Page as source."""
source = pywikibot.Page(self.site, self.existing_invalid_title)
self.assertRaises(ValueError, IndexPage, source)
def test_invalid_not_existing_page_as_source(self):
"""Test IndexPage from Page not existing in non-Page ns as source."""
source = pywikibot.Page(self.site,
self.not_existing_invalid_title)
self.assertRaises(ValueError, IndexPage, source)
def test_invalid_link_as_source(self):
"""Test IndexPage from invalid Link as source."""
source = pywikibot.Link(self.not_existing_invalid_title,
source=self.site)
self.assertRaises(ValueError, IndexPage, source)
def test_valid_link_as_source(self):
"""Test IndexPage from valid Link as source."""
source = pywikibot.Link(self.valid_index_title,
source=self.site,
defaultNamespace=self.site.proofread_page_ns)
page = IndexPage(source)
self.assertEqual(page.title(withNamespace=False), source.title)
self.assertEqual(page.namespace(), source.namespace)
@require_modules('bs4')
class TestBasePageMethodsIndexPage(BasePageMethodsTestBase):
"""Test behavior of ProofreadPage methods inherited from BasePage."""
family = 'wikisource'
code = 'en'
def setUp(self):
"""Set up test case."""
self._page = IndexPage(
self.site, 'Index:Popular Science Monthly Volume 1.djvu')
super(TestBasePageMethodsIndexPage, self).setUp()
def test_basepage_methods(self):
"""Test IndexPage methods inherited from superclass BasePage."""
self._test_invoke()
self._test_return_datatypes()
class TestLoadRevisionsCachingIndexPage(IndexPageTestCase,
BasePageLoadRevisionsCachingTestBase):
"""Test site.loadrevisions() caching."""
family = 'wikisource'
code = 'en'
def setUp(self):
"""Set up test case."""
self._page = IndexPage(
self.site, 'Index:Popular Science Monthly Volume 1.djvu')
super(TestLoadRevisionsCachingIndexPage, self).setUp()
def test_page_text(self):
"""Test site.loadrevisions() with Page.text."""
self._test_page_text()
class TestIndexPageMappings(IndexPageTestCase):
"""Test IndexPage class."""
sites = {
'enws': {
'family': 'wikisource',
'code': 'en',
'index': 'Index:Popular Science Monthly Volume 1.djvu',
'num_pages': 804,
'page': 'Page:Popular Science Monthly Volume 1.djvu/{0}',
'get_label': [11, 11, '1'],
'get_number': [[1, set([11])],
['Cvr', set([1, 9, 10, 804])],
],
# 'get_page' is filled in setUpClass.
},
'dews': { # dews does not use page convention name/number.
'family': 'wikisource',
'code': 'de',
'index': 'Index:Musen-Almanach für das Jahr 1799',
'num_pages': 272,
'page': 'Seite:Schiller_Musenalmanach_1799_{0:3d}.jpg',
'get_label': [120, 120, '120'], # page no, title no, label
'get_number': [[120, set([120])],
],
# 'get_page' is filled in setUpClass.
},
'frws': {
'family': 'wikisource',
'code': 'fr',
'index': 'Index:Segard - Hymnes profanes, 1894.djvu',
'num_pages': 107,
'page': 'Page:Segard - Hymnes profanes, 1894.djvu/{0}',
'get_label': [11, 11, '8'],
'get_number': [[8, set([11])],
['-', set(range(1, 4)) | set(range(101, 108))],
],
# 'get_page' is filled in setUpClass.
},
}
cached = True
@classmethod
def setUpClass(cls):
"""Prepare get_page dataset for tests."""
super(TestIndexPageMappings, cls).setUpClass()
for key, site_def in cls.sites.items():
site = cls.get_site(name=key)
base_title = site_def['page']
# 'get_page' has same structure as 'get_number'.
site_def['get_page'] = []
for label, page_numbers in site_def['get_number']:
page_set = set(ProofreadPage(site, base_title.format(i))
for i in page_numbers)
site_def['get_page'].append([label, page_set])
def test_check_if_cached(self, key):
"""Test if cache is checked and loaded properly."""
data = self.sites[key]
index_page = IndexPage(self.site, self.sites[key]['index'])
num, title_num, label = data['get_label']
self.assertIs(index_page._cached, False)
fetched_label = index_page.get_label_from_page_number(num)
self.assertIs(index_page._cached, True)
self.assertEqual(label, fetched_label)
# Check if cache is refreshed.
index_page._labels_from_page_number[num] = 'wrong cached value'
self.assertEqual(index_page.get_label_from_page_number(num),
'wrong cached value')
index_page._cached = False
self.assertEqual(index_page.get_label_from_page_number(num), label)
def test_num_pages(self, key):
"""Test num_pages property."""
index_page = IndexPage(self.site, self.sites[key]['index'])
self.assertEqual(index_page.num_pages, self.sites[key]['num_pages'])
def test_get_labels(self, key):
"""Test IndexPage page get_label_from_* functions."""
data = self.sites[key]
num, title_num, label = data['get_label']
index_page = IndexPage(self.site, self.sites[key]['index'])
page_title = self.sites[key]['page'].format(title_num)
proofread_page = ProofreadPage(self.site, page_title)
# Get label from number.
self.assertEqual(index_page.get_label_from_page_number(num), label)
# Error if number does not exists.
self.assertRaises(KeyError, index_page.get_label_from_page_number, -1)
# Get label from page.
self.assertEqual(index_page.get_label_from_page(proofread_page), label)
# Error if page does not exists.
self.assertRaises(KeyError, index_page.get_label_from_page, None)
def test_get_page_and_number(self, key):
"""Test IndexPage page get_page_number functions."""
data = self.sites[key]
index_page = IndexPage(self.site, self.sites[key]['index'])
# Test get_page_numbers_from_label.
for label, num_set in data['get_number']:
# Get set of numbers from label with label as int or str.
self.assertEqual(index_page.get_page_number_from_label(label),
num_set)
self.assertEqual(index_page.get_page_number_from_label(str(label)),
num_set)
# Error if label does not exists.
label, num_set = 'dummy label', []
self.assertRaises(KeyError, index_page.get_page_number_from_label,
'dummy label')
# Test get_page_from_label.
for label, page_set in data['get_page']:
# Get set of pages from label with label as int or str.
self.assertEqual(index_page.get_page_from_label(label),
page_set)
self.assertEqual(index_page.get_page_from_label(str(label)),
page_set)
# Error if label does not exists.
self.assertRaises(KeyError, index_page.get_page_from_label, 'dummy label')
# Test get_page.
for n in num_set:
p = index_page.get_page(n)
self.assertEqual(index_page.get_number(p), n)
# Test get_number.
for p in page_set:
n = index_page.get_number(p)
self.assertEqual(index_page.get_page(n), p)
def test_page_gen(self, key):
"""Test Index page generator."""
data = self.sites[key]
num, title_num, label = data['get_label']
index_page = IndexPage(self.site, self.sites[key]['index'])
page_title = self.sites[key]['page'].format(title_num)
proofread_page = ProofreadPage(self.site, page_title)
# Check start/end limits.
self.assertRaises(ValueError, index_page.page_gen, -1, 2)
self.assertRaises(ValueError, index_page.page_gen, 1, -1)
self.assertRaises(ValueError, index_page.page_gen, 2, 1)
# Check quality filters.
gen = index_page.page_gen(num, num, filter_ql=range(5))
self.assertEqual(list(gen), [proofread_page])
gen = index_page.page_gen(num, num, filter_ql=[0])
self.assertEqual(list(gen), [])
class TestIndexPageMappingsRedlinks(IndexPageTestCase):
"""Test IndexPage mappings with redlinks."""
family = 'wikisource'
code = 'en'
cached = True
index_name = 'Index:Pywikibot test page.djvu'
page_names = ['Page:Pywikibot test page.djvu/1',
'Page:Pywikibot test page.djvu/2',
]
missing_name = 'Page:Pywikibot test page.djvu/2'
@classmethod
def setUpClass(cls):
"""Prepare tests by creating page instances."""
super(TestIndexPageMappingsRedlinks, cls).setUpClass()
cls.index = IndexPage(cls.site, cls.index_name)
cls.pages = [ProofreadPage(cls.site, page) for page in cls.page_names]
cls.missing = ProofreadPage(cls.site, cls.missing_name)
def test_index_redlink(self):
"""Test index property with redlink."""
self.assertEqual(self.missing.index, self.index)
def test_get_page_and_number_redlink(self):
"""Test IndexPage page get_page_number functions with redlinks."""
for page in self.pages:
n = self.index.get_number(page)
self.assertEqual(self.index.get_page(n), page)
def test_page_gen_redlink(self):
"""Test Index page generator with redlinks."""
# Check start/end limits.
self.assertRaises(ValueError, self.index.page_gen, -1, 2)
self.assertRaises(ValueError, self.index.page_gen, 1, -1)
self.assertRaises(ValueError, self.index.page_gen, 2, 1)
gen = self.index.page_gen(1, None, filter_ql=range(5))
self.assertEqual(list(gen), self.pages)
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
|
magul/pywikibot-core
|
tests/proofreadpage_tests.py
|
Python
|
mit
| 24,736
|
from django.core.management.base import BaseCommand, CommandError
from main.models import Port, Country, settings
import csv
# This file is part of https://github.com/cpina/science-cruise-data-management
#
# This project was programmed in a hurry without any prior Django experience,
# while circumnavigating the Antarctic on the ACE expedition, without proper
# Internet access, with 150 scientists using the system and doing at the same
# cruise other data management and system administration tasks.
#
# Sadly there aren't unit tests and we didn't have time to refactor the code
# during the cruise, which is really needed.
#
# Carles Pina (carles@pina.cat) and Jen Thomas (jenny_t152@yahoo.co.uk), 2016-2017.
class Command(BaseCommand):
help = 'Adds data to the port table'
def add_arguments(self, parser):
parser.add_argument('filename', type=str)
def handle(self, *args, **options):
print(options['filename'])
self.import_data_from_csv(options['filename'])
def import_data_from_csv(self, filename):
with open(filename) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
print(row)
port = Port()
port.url = row['Url']
port.code = row['Identifier']
port.name = row['PrefLabel']
if row['country'] != '':
country = Country.objects.filter(name=row['country'])[0]
port.country = country
port.latitude = row['latitude']
port.longitude = row['longitude']
port.version = row['Version']
port.deprecated = row['Deprecated']
port.date = row['Date']
port.source = row['source']
port.save()
|
cpina/science-cruise-data-management
|
ScienceCruiseDataManagement/main/management/commands/importports.py
|
Python
|
mit
| 1,825
|
# encoding: utf-8
"""
Utilities for working with strings and text.
Inheritance diagram:
.. inheritance-diagram:: IPython.utils.text
:parts: 3
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import re
import sys
import textwrap
from string import Formatter
from IPython.external.path import path
from IPython.testing.skipdoctest import skip_doctest_py3, skip_doctest
from IPython.utils import py3compat
#-----------------------------------------------------------------------------
# Declarations
#-----------------------------------------------------------------------------
# datetime.strftime date format for ipython
if sys.platform == 'win32':
date_format = "%B %d, %Y"
else:
date_format = "%B %-d, %Y"
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class LSString(str):
"""String derivative with a special access attributes.
These are normal strings, but with the special attributes:
.l (or .list) : value as list (split on newlines).
.n (or .nlstr): original value (the string itself).
.s (or .spstr): value as whitespace-separated string.
.p (or .paths): list of path objects
Any values which require transformations are computed only once and
cached.
Such strings are very useful to efficiently interact with the shell, which
typically only understands whitespace-separated options for commands."""
def get_list(self):
try:
return self.__list
except AttributeError:
self.__list = self.split('\n')
return self.__list
l = list = property(get_list)
def get_spstr(self):
try:
return self.__spstr
except AttributeError:
self.__spstr = self.replace('\n', ' ')
return self.__spstr
s = spstr = property(get_spstr)
def get_nlstr(self):
return self
n = nlstr = property(get_nlstr)
def get_paths(self):
try:
return self.__paths
except AttributeError:
self.__paths = [path(p)
for p in self.split('\n') if os.path.exists(p)]
return self.__paths
p = paths = property(get_paths)
# FIXME: We need to reimplement type specific displayhook and then add this
# back as a custom printer. This should also be moved outside utils into the
# core.
# def print_lsstring(arg):
# """ Prettier (non-repr-like) and more informative printer for LSString """
# print "LSString (.p, .n, .l, .s available). Value:"
# print arg
#
#
# print_lsstring = result_display.when_type(LSString)(print_lsstring)
class SList(list):
"""List derivative with a special access attributes.
These are normal lists, but with the special attributes:
* .l (or .list) : value as list (the list itself).
* .n (or .nlstr): value as a string, joined on newlines.
* .s (or .spstr): value as a string, joined on spaces.
* .p (or .paths): list of path objects
Any values which require transformations are computed only once and
cached."""
def get_list(self):
return self
l = list = property(get_list)
def get_spstr(self):
try:
return self.__spstr
except AttributeError:
self.__spstr = ' '.join(self)
return self.__spstr
s = spstr = property(get_spstr)
def get_nlstr(self):
try:
return self.__nlstr
except AttributeError:
self.__nlstr = '\n'.join(self)
return self.__nlstr
n = nlstr = property(get_nlstr)
def get_paths(self):
try:
return self.__paths
except AttributeError:
self.__paths = [path(p) for p in self if os.path.exists(p)]
return self.__paths
p = paths = property(get_paths)
def grep(self, pattern, prune=False, field=None):
""" Return all strings matching 'pattern' (a regex or callable)
This is case-insensitive. If prune is true, return all items
NOT matching the pattern.
If field is specified, the match must occur in the specified
whitespace-separated field.
Examples::
a.grep( lambda x: x.startswith('C') )
a.grep('Cha.*log', prune=1)
a.grep('chm', field=-1)
"""
def match_target(s):
if field is None:
return s
parts = s.split()
try:
tgt = parts[field]
return tgt
except IndexError:
return ""
if isinstance(pattern, py3compat.string_types):
pred = lambda x: re.search(pattern, x, re.IGNORECASE)
else:
pred = pattern
if not prune:
return SList([el for el in self if pred(match_target(el))])
else:
return SList([el for el in self if not pred(match_target(el))])
def fields(self, *fields):
""" Collect whitespace-separated fields from string list
Allows quick awk-like usage of string lists.
Example data (in var a, created by 'a = !ls -l')::
-rwxrwxrwx 1 ville None 18 Dec 14 2006 ChangeLog
drwxrwxrwx+ 6 ville None 0 Oct 24 18:05 IPython
* ``a.fields(0)`` is ``['-rwxrwxrwx', 'drwxrwxrwx+']``
* ``a.fields(1,0)`` is ``['1 -rwxrwxrwx', '6 drwxrwxrwx+']``
(note the joining by space).
* ``a.fields(-1)`` is ``['ChangeLog', 'IPython']``
IndexErrors are ignored.
Without args, fields() just split()'s the strings.
"""
if len(fields) == 0:
return [el.split() for el in self]
res = SList()
for el in [f.split() for f in self]:
lineparts = []
for fd in fields:
try:
lineparts.append(el[fd])
except IndexError:
pass
if lineparts:
res.append(" ".join(lineparts))
return res
def sort(self, field=None, nums=False):
""" sort by specified fields (see fields())
Example::
a.sort(1, nums = True)
Sorts a by second field, in numerical order (so that 21 > 3)
"""
#decorate, sort, undecorate
if field is not None:
dsu = [[SList([line]).fields(field), line] for line in self]
else:
dsu = [[line, line] for line in self]
if nums:
for i in range(len(dsu)):
numstr = "".join([ch for ch in dsu[i][0] if ch.isdigit()])
try:
n = int(numstr)
except ValueError:
n = 0
dsu[i][0] = n
dsu.sort()
return SList([t[1] for t in dsu])
# FIXME: We need to reimplement type specific displayhook and then add this
# back as a custom printer. This should also be moved outside utils into the
# core.
# def print_slist(arg):
# """ Prettier (non-repr-like) and more informative printer for SList """
# print "SList (.p, .n, .l, .s, .grep(), .fields(), sort() available):"
# if hasattr(arg, 'hideonce') and arg.hideonce:
# arg.hideonce = False
# return
#
# nlprint(arg) # This was a nested list printer, now removed.
#
# print_slist = result_display.when_type(SList)(print_slist)
def indent(instr, nspaces=4, ntabs=0, flatten=False):
"""Indent a string a given number of spaces or tabstops.
indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces.
Parameters
----------
instr : basestring
The string to be indented.
nspaces : int (default: 4)
The number of spaces to be indented.
ntabs : int (default: 0)
The number of tabs to be indented.
flatten : bool (default: False)
Whether to scrub existing indentation. If True, all lines will be
aligned to the same indentation. If False, existing indentation will
be strictly increased.
Returns
-------
str|unicode : string indented by ntabs and nspaces.
"""
if instr is None:
return
ind = '\t' * ntabs + ' ' * nspaces
if flatten:
pat = re.compile(r'^\s*', re.MULTILINE)
else:
pat = re.compile(r'^', re.MULTILINE)
outstr = re.sub(pat, ind, instr)
if outstr.endswith(os.linesep + ind):
return outstr[:-len(ind)]
else:
return outstr
def list_strings(arg):
"""Always return a list of strings, given a string or list of strings
as input.
Examples
--------
::
In [7]: list_strings('A single string')
Out[7]: ['A single string']
In [8]: list_strings(['A single string in a list'])
Out[8]: ['A single string in a list']
In [9]: list_strings(['A','list','of','strings'])
Out[9]: ['A', 'list', 'of', 'strings']
"""
if isinstance(arg, py3compat.string_types):
return [arg]
else:
return arg
def marquee(txt='', width=78, mark='*'):
"""Return the input string centered in a 'marquee'.
Examples
--------
::
In [16]: marquee('A test',40)
Out[16]: '**************** A test ****************'
In [17]: marquee('A test',40,'-')
Out[17]: '---------------- A test ----------------'
In [18]: marquee('A test',40,' ')
Out[18]: ' A test '
"""
if not txt:
return (mark * width)[:width]
nmark = (width - len(txt) - 2) // len(mark) // 2
if nmark < 0:
nmark = 0
marks = mark * nmark
return '%s %s %s' % (marks, txt, marks)
ini_spaces_re = re.compile(r'^(\s+)')
def num_ini_spaces(strng):
"""Return the number of initial spaces in a string"""
ini_spaces = ini_spaces_re.match(strng)
if ini_spaces:
return ini_spaces.end()
else:
return 0
def format_screen(strng):
"""Format a string for screen printing.
This removes some latex-type format codes."""
# Paragraph continue
par_re = re.compile(r'\\$', re.MULTILINE)
strng = par_re.sub('', strng)
return strng
def dedent(text):
"""Equivalent of textwrap.dedent that ignores unindented first line.
This means it will still dedent strings like:
'''foo
is a bar
'''
For use in wrap_paragraphs.
"""
if text.startswith('\n'):
# text starts with blank line, don't ignore the first line
return textwrap.dedent(text)
# split first line
splits = text.split('\n', 1)
if len(splits) == 1:
# only one line
return textwrap.dedent(text)
first, rest = splits
# dedent everything but the first line
rest = textwrap.dedent(rest)
return '\n'.join([first, rest])
def wrap_paragraphs(text, ncols=80):
"""Wrap multiple paragraphs to fit a specified width.
This is equivalent to textwrap.wrap, but with support for multiple
paragraphs, as separated by empty lines.
Returns
-------
list of complete paragraphs, wrapped to fill `ncols` columns.
"""
paragraph_re = re.compile(r'\n(\s*\n)+', re.MULTILINE)
text = dedent(text).strip()
paragraphs = paragraph_re.split(text)[::2] # every other entry is space
out_ps = []
indent_re = re.compile(r'\n\s+', re.MULTILINE)
for p in paragraphs:
# presume indentation that survives dedent is meaningful formatting,
# so don't fill unless text is flush.
if indent_re.search(p) is None:
# wrap paragraph
p = textwrap.fill(p, ncols)
out_ps.append(p)
return out_ps
def long_substr(data):
"""Return the longest common substring in a list of strings.
Credit: http://stackoverflow.com/questions/2892931/longest-common-substring-from-more-than-two-strings-python
"""
substr = ''
if len(data) > 1 and len(data[0]) > 0:
for i in range(len(data[0])):
for j in range(len(data[0]) - i + 1):
if j > len(substr) and all(data[0][i:i + j] in x for x in data):
substr = data[0][i:i + j]
elif len(data) == 1:
substr = data[0]
return substr
def strip_email_quotes(text):
"""Strip leading email quotation characters ('>').
Removes any combination of leading '>' interspersed with whitespace that
appears *identically* in all lines of the input text.
Parameters
----------
text : str
Examples
--------
Simple uses::
In [2]: strip_email_quotes('> > text')
Out[2]: 'text'
In [3]: strip_email_quotes('> > text\\n> > more')
Out[3]: 'text\\nmore'
Note how only the common prefix that appears in all lines is stripped::
In [4]: strip_email_quotes('> > text\\n> > more\\n> more...')
Out[4]: '> text\\n> more\\nmore...'
So if any line has no quote marks ('>') , then none are stripped from any
of them ::
In [5]: strip_email_quotes('> > text\\n> > more\\nlast different')
Out[5]: '> > text\\n> > more\\nlast different'
"""
lines = text.splitlines()
matches = set()
for line in lines:
prefix = re.match(r'^(\s*>[ >]*)', line)
if prefix:
matches.add(prefix.group(1))
else:
break
else:
prefix = long_substr(list(matches))
if prefix:
strip = len(prefix)
text = '\n'.join([ln[strip:] for ln in lines])
return text
def strip_ansi(source):
"""
Remove ansi escape codes from text.
Parameters
----------
source : str
Source to remove the ansi from
"""
return re.sub(r'\033\[(\d|;)+?m', '', source)
class EvalFormatter(Formatter):
"""A String Formatter that allows evaluation of simple expressions.
Note that this version interprets a : as specifying a format string (as per
standard string formatting), so if slicing is required, you must explicitly
create a slice.
This is to be used in templating cases, such as the parallel batch
script templates, where simple arithmetic on arguments is useful.
Examples
--------
::
In [1]: f = EvalFormatter()
In [2]: f.format('{n//4}', n=8)
Out[2]: '2'
In [3]: f.format("{greeting[slice(2,4)]}", greeting="Hello")
Out[3]: 'll'
"""
def get_field(self, name, args, kwargs):
v = eval(name, kwargs)
return v, name
# XXX: As of Python 3.4, the format string parsing no longer splits on a colon
# inside [], so EvalFormatter can handle slicing. Once we only support 3.4 and
# above, it should be possible to remove FullEvalFormatter.
@skip_doctest_py3
class FullEvalFormatter(Formatter):
"""A String Formatter that allows evaluation of simple expressions.
Any time a format key is not found in the kwargs,
it will be tried as an expression in the kwargs namespace.
Note that this version allows slicing using [1:2], so you cannot specify
a format string. Use :class:`EvalFormatter` to permit format strings.
Examples
--------
::
In [1]: f = FullEvalFormatter()
In [2]: f.format('{n//4}', n=8)
Out[2]: u'2'
In [3]: f.format('{list(range(5))[2:4]}')
Out[3]: u'[2, 3]'
In [4]: f.format('{3*2}')
Out[4]: u'6'
"""
# copied from Formatter._vformat with minor changes to allow eval
# and replace the format_spec code with slicing
def _vformat(self, format_string, args, kwargs, used_args, recursion_depth):
if recursion_depth < 0:
raise ValueError('Max string recursion exceeded')
result = []
for literal_text, field_name, format_spec, conversion in \
self.parse(format_string):
# output the literal text
if literal_text:
result.append(literal_text)
# if there's a field, output it
if field_name is not None:
# this is some markup, find the object and do
# the formatting
if format_spec:
# override format spec, to allow slicing:
field_name = ':'.join([field_name, format_spec])
# eval the contents of the field for the object
# to be formatted
obj = eval(field_name, kwargs)
# do any conversion on the resulting object
obj = self.convert_field(obj, conversion)
# format the object and append to the result
result.append(self.format_field(obj, ''))
return u''.join(py3compat.cast_unicode(s) for s in result)
@skip_doctest_py3
class DollarFormatter(FullEvalFormatter):
"""Formatter allowing Itpl style $foo replacement, for names and attribute
access only. Standard {foo} replacement also works, and allows full
evaluation of its arguments.
Examples
--------
::
In [1]: f = DollarFormatter()
In [2]: f.format('{n//4}', n=8)
Out[2]: u'2'
In [3]: f.format('23 * 76 is $result', result=23*76)
Out[3]: u'23 * 76 is 1748'
In [4]: f.format('$a or {b}', a=1, b=2)
Out[4]: u'1 or 2'
"""
_dollar_pattern = re.compile("(.*?)\$(\$?[\w\.]+)")
def parse(self, fmt_string):
for literal_txt, field_name, format_spec, conversion \
in Formatter.parse(self, fmt_string):
# Find $foo patterns in the literal text.
continue_from = 0
txt = ""
for m in self._dollar_pattern.finditer(literal_txt):
new_txt, new_field = m.group(1, 2)
# $$foo --> $foo
if new_field.startswith("$"):
txt += new_txt + new_field
else:
yield (txt + new_txt, new_field, "", None)
txt = ""
continue_from = m.end()
# Re-yield the {foo} style pattern
yield (txt + literal_txt[continue_from:], field_name, format_spec, conversion)
#-----------------------------------------------------------------------------
# Utils to columnize a list of string
#-----------------------------------------------------------------------------
def _chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in py3compat.xrange(0, len(l), n):
yield l[i:i + n]
def _find_optimal(rlist, separator_size=2, displaywidth=80):
"""Calculate optimal info to columnize a list of string"""
for nrow in range(1, len(rlist) + 1):
chk = list(map(max, _chunks(rlist, nrow)))
sumlength = sum(chk)
ncols = len(chk)
if sumlength + separator_size * (ncols - 1) <= displaywidth:
break
return {'columns_numbers': ncols,
'optimal_separator_width': (displaywidth - sumlength) / (ncols - 1) if (ncols - 1) else 0,
'rows_numbers': nrow,
'columns_width': chk
}
def _get_or_default(mylist, i, default=None):
"""return list item number, or default if don't exist"""
if i >= len(mylist):
return default
else:
return mylist[i]
@skip_doctest
def compute_item_matrix(items, empty=None, *args, **kwargs):
"""Returns a nested list, and info to columnize items
Parameters
----------
items
list of strings to columize
empty : (default None)
default value to fill list if needed
separator_size : int (default=2)
How much caracters will be used as a separation between each columns.
displaywidth : int (default=80)
The width of the area onto wich the columns should enter
Returns
-------
strings_matrix
nested list of string, the outer most list contains as many list as
rows, the innermost lists have each as many element as colums. If the
total number of elements in `items` does not equal the product of
rows*columns, the last element of some lists are filled with `None`.
dict_info
some info to make columnize easier:
columns_numbers
number of columns
rows_numbers
number of rows
columns_width
list of with of each columns
optimal_separator_width
best separator width between columns
Examples
--------
::
In [1]: l = ['aaa','b','cc','d','eeeee','f','g','h','i','j','k','l']
...: compute_item_matrix(l,displaywidth=12)
Out[1]:
([['aaa', 'f', 'k'],
['b', 'g', 'l'],
['cc', 'h', None],
['d', 'i', None],
['eeeee', 'j', None]],
{'columns_numbers': 3,
'columns_width': [5, 1, 1],
'optimal_separator_width': 2,
'rows_numbers': 5})
"""
info = _find_optimal(list(map(len, items)), *args, **kwargs)
nrow, ncol = info['rows_numbers'], info['columns_numbers']
return ([[_get_or_default(items, c * nrow + i, default=empty) for c in range(ncol)] for i in range(nrow)], info)
def columnize(items, separator=' ', displaywidth=80):
""" Transform a list of strings into a single string with columns.
Parameters
----------
items : sequence of strings
The strings to process.
separator : str, optional [default is two spaces]
The string that separates columns.
displaywidth : int, optional [default is 80]
Width of the display in number of characters.
Returns
-------
The formatted string.
"""
if not items:
return '\n'
matrix, info = compute_item_matrix(
items, separator_size=len(separator), displaywidth=displaywidth)
fmatrix = [filter(None, x) for x in matrix]
sjoin = lambda x: separator.join(
[y.ljust(w, ' ') for y, w in zip(x, info['columns_width'])])
return '\n'.join(map(sjoin, fmatrix)) + '\n'
def get_text_list(list_, last_sep=' and ', sep=", ", wrap_item_with=""):
"""
Return a string with a natural enumeration of items
>>> get_text_list(['a', 'b', 'c', 'd'])
'a, b, c and d'
>>> get_text_list(['a', 'b', 'c'], ' or ')
'a, b or c'
>>> get_text_list(['a', 'b', 'c'], ', ')
'a, b, c'
>>> get_text_list(['a', 'b'], ' or ')
'a or b'
>>> get_text_list(['a'])
'a'
>>> get_text_list([])
''
>>> get_text_list(['a', 'b'], wrap_item_with="`")
'`a` and `b`'
>>> get_text_list(['a', 'b', 'c', 'd'], " = ", sep=" + ")
'a + b + c = d'
"""
if len(list_) == 0:
return ''
if wrap_item_with:
list_ = ['%s%s%s' % (wrap_item_with, item, wrap_item_with) for
item in list_]
if len(list_) == 1:
return list_[0]
return '%s%s%s' % (
sep.join(i for i in list_[:-1]),
last_sep, list_[-1])
|
mattvonrocketstein/smash
|
smashlib/ipy3x/utils/text.py
|
Python
|
mit
| 23,460
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
# Introduction: 本程序用于
# Created by galaxy on 2016/9/8 10:50
import os
my_path = os.getcwd()
gbk_dir = os.path.join(my_path, 'gbk')
batch_lines = []
for root, dirs, files in os.walk(gbk_dir):
for each_file in files:
gbk_path = 'gbk/{0}'.format(each_file)
each_cmd = 'python convertGenbank2table.py -g {0} -v 1'.format(gbk_path)
batch_lines.append(each_cmd)
batch_file = os.path.join(my_path, 'convertGenbank2table.sh')
with open(batch_file, 'w') as f1:
for each_batch_cmd in batch_lines:
f1.write('{0}\n'.format(each_batch_cmd))
convert_cmd = 'sh convertGenbank2table.sh'
os.system(convert_cmd)
|
cvn001/RecentHGT
|
src/convertGenbank2table_InBatch.py
|
Python
|
mit
| 691
|
from setuptools import setup, find_packages
from opensimplex import __version__, __author__
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='opensimplex',
version=__version__,
author=__author__,
author_email='opensimplex@larus.se',
description='OpenSimplex n-dimensional gradient noise function.',
long_description=long_description,
long_description_content_type="text/markdown",
keywords='opensimplex simplex noise 2D 3D 4D',
url='https://github.com/lmas/opensimplex',
download_url='https://github.com/lmas/opensimplex/releases',
license='MIT',
packages=find_packages(),
include_package_data=True,
install_requires=[
'numpy>=1.20',
],
zip_safe=False,
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering :: Mathematics',
],
python_requires='>=3.7',
)
|
lmas/opensimplex
|
setup.py
|
Python
|
mit
| 1,011
|
'''
Sort entries in a tabular BLAST output file in reverse order.
-----------------------------------------------------------
(c) 2013 Allegra Via and Kristian Rother
Licensed under the conditions of the Python License
This code appears in section 8.4.2 of the book
"Managing Biological Data with Python".
-----------------------------------------------------------
'''
from operator import itemgetter
input_file = open("BlastOut.csv")
output_file = open("BlastOutSorted.csv","w")
# read BLAST output table
table = []
for line in input_file:
col = line.split(',')
col[2] = float(col[2])
table.append(col)
table_sorted = sorted(table, key=itemgetter(2), reverse=True)
# write sorted table to an output file
for row in table_sorted:
row = [str(x) for x in row]
output_file.write("\t".join(row) + '\n')
input_file.close()
output_file.close()
|
raymonwu/Managing_Your_Biological_Data_with_Python_3
|
08-sorting_data/8.4.2_sort_blast_output.py
|
Python
|
mit
| 883
|
#!/usr/bin/python3
#
# Copyright (c) 2013 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import argparse
import gzip
import math
import random
import sys
from typing import Dict, Iterator, List, Optional, Tuple, cast
from paleomix.common.formats.fasta import FASTA
from paleomix.common.sampling import weighted_sampling
from paleomix.common.sequences import reverse_complement
from paleomix.common.utilities import fragment
def _dexp(lambda_value: float, position: int) -> float:
return lambda_value * math.exp(-lambda_value * position)
def _rexp(lambda_value: float, rng: random.Random) -> float:
return -math.log(rng.random()) / lambda_value
def toint(value: float) -> int:
return int(round(value))
# Adapter added to the 5' end of the forward strand (read from 5' ...)
PCR1 = "AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC%sATCTCGTATGCCGTCTTCTGCTTG"
# Adapter added to the 5' end of the reverse strand (read from 3' ...):
# rev. compl of the forward
PCR2 = "AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGTAGATCTCGGTGGTCGCCGTATCATT"
def _get_indel_length(indel_lambda: float, rng: random.Random) -> int:
return 1 + toint(_rexp(indel_lambda, rng))
def _get_weighted_choices(rng: random.Random, sub_rate: float, indel_rate: float):
choices_by_nt: Dict[str, Iterator[str]] = {}
for src_nt in "ACGT":
choices = "ACGTID"
probs = [sub_rate / 4] * 4 # ACGT
probs += [indel_rate / 2] * 2 # ID
probs[choices.index(src_nt)] = 1 - sum(probs) + sub_rate / 4
choices_by_nt[src_nt] = weighted_sampling(choices, probs, rng)
return choices_by_nt
def _mutate_sequence(
rng: random.Random,
choices: Dict[str, Iterator[str]],
refseq: str,
indel_lambda: float = 0,
) -> Tuple[str, List[int]]:
position = 0
sequence: List[str] = []
positions: List[int] = []
while position < len(refseq):
ref_nt = refseq[position]
if ref_nt not in "ACGT":
read_nt = rng.choice("ACGT")
else:
read_nt = next(choices[ref_nt])
if read_nt == "D":
for _ in range(_get_indel_length(indel_lambda, rng)):
position += 1
elif read_nt == "I":
for _ in range(_get_indel_length(indel_lambda, rng)):
sequence.append(rng.choice("ACGT"))
positions.append(position)
else:
sequence.append(read_nt)
positions.append(position)
position += 1
return "".join(sequence), positions
class Args: # (argparse.Namespace):
fasta: str
output_prefix: str
barcode: str
specimen_seed: Optional[int]
specimen_sub_rate: float
specimen_indel_rate: float
specimen_indel_lambda: float
sample_seed: int
sample_frag_len_mu: int
sample_frag_len_sigma: int
sample_frag_len_min: int
sample_frag_len_max: int
sample_endog_mu: float
sample_endog_sigma: float
damage: bool
damage_seed: Optional[int]
damage_lambda: float
library_seed: Optional[int]
library_pcr_lambda: float
library_barcode: Optional[str]
lanes_num: int
lanes_reads_mu: int
lanes_reads_sigma: int
lanes_per_file: int
reads_sub_rate: float
reads_indel_rate: float
reads_indel_lambda: float
reads_len: int
class Specimen:
"""Represents a specimen, from which samples are derived.
These are mutated by the addition of changes to the sequence
"""
def __init__(self, options: Args, filename: str):
genome = list(FASTA.from_file(filename))
assert len(genome) == 1, len(genome)
self._genome = genome[0].sequence.upper()
rng = random.Random(options.specimen_seed)
choices = _get_weighted_choices(
rng, options.specimen_sub_rate, options.specimen_indel_rate
)
self._sequence, self._positions = _mutate_sequence(
rng, choices, self._genome, options.specimen_indel_lambda
)
@property
def sequence(self):
return self._sequence
@property
def positions(self):
return self._positions
class Sample:
def __init__(self, options: Args, specimen: Specimen):
self._specimen = specimen
self._random = random.Random(options.sample_seed)
self._options = options
frac_endog = self._random.gauss(
options.sample_endog_mu, options.sample_endog_sigma
)
self._frac_endog = min(1, max(0.01, frac_endog))
self._endog_id = 0
self._contam_id = 0
def get_fragment(self) -> Tuple[bool, str, str]:
"""Returns either a DNA fragmnet, representing either a fragment of
the sample genome, or a randomly generated DNA sequence representing
contaminant DNA that is not related to the species."""
if self._random.random() <= self._frac_endog:
return self._get_endogenous_sequence()
return self._get_contaminant_sequence()
def _get_contaminant_sequence(self) -> Tuple[bool, str, str]:
length = self._get_frag_len()
sequence = [self._random.choice("ACGT") for _ in range(length)]
self._contam_id += 1
name = "Seq_junk_%i" % (self._contam_id,)
return (False, name, "".join(sequence))
def _get_endogenous_sequence(self) -> Tuple[bool, str, str]:
length = self._get_frag_len()
max_position = len(self._specimen.sequence) - length
position = self._random.randint(0, max_position)
strand = self._random.choice(("fw", "rv"))
sequence = self._specimen.sequence[position : position + length]
real_pos = self._specimen.positions[position]
if strand == "rv":
sequence = reverse_complement("".join(sequence))
self._endog_id += 1
name = "Seq_%i_%i_%i_%s" % (self._endog_id, real_pos, length, strand)
return (True, name, sequence)
def _get_frag_len(self):
length = toint(
self._random.gauss(
self._options.sample_frag_len_mu, self._options.sample_frag_len_sigma
)
)
return max(
self._options.sample_frag_len_min,
min(self._options.sample_frag_len_max, length),
)
class Damage:
def __init__(self, options: Args, sample: Sample):
self._options = options
self._sample = sample
self._random = random.Random(options.damage_seed)
self._rates = self._calc_damage_rates(options)
def get_fragment(self) -> Tuple[str, str]:
is_endogenous, name, sequence = self._sample.get_fragment()
if is_endogenous and self._options.damage:
sequence = self._damage_sequence(sequence)
return (name, sequence)
def _damage_sequence(self, sequence: str) -> str:
result: List[str] = []
length = len(sequence)
for (position, nucleotide) in enumerate(sequence):
if nucleotide == "C":
if self._random.random() < self._rates[position]:
nucleotide = "T"
elif nucleotide == "G":
rv_position = length - position - 1
if self._random.random() < self._rates[rv_position]:
nucleotide = "A"
result.append(nucleotide)
return "".join(result)
@classmethod
def _calc_damage_rates(cls, options: Args) -> List[float]:
return [
_dexp(options.damage_lambda, position)
for position in range(options.sample_frag_len_max)
]
class Library:
def __init__(self, options: Args, damaged_sample: Damage):
self._options = options
self._damaged_sample = damaged_sample
self._cache = []
self._rng = random.Random(options.library_seed)
self.barcode = options.library_barcode
if self.barcode is None:
self.barcode = "".join(self._rng.choice("ACGT") for _ in range(6))
assert len(self.barcode) == 6, options.barcode
pcr1 = PCR1 % (self.barcode,)
self.lanes = self._generate_lanes(options, self._rng, damaged_sample, pcr1)
@classmethod
def _generate_lanes(
cls,
options: Args,
rng: random.Random,
sample: Damage,
pcr1: str,
):
lane_counts: List[int] = []
for _ in range(options.lanes_num):
lane_counts.append(
toint(random.gauss(options.lanes_reads_mu, options.lanes_reads_sigma))
)
reads = cls._generate_reads(options, rng, sample, sum(lane_counts), pcr1)
lanes: List[Lane] = []
for count in lane_counts:
lanes.append(Lane(options, reads[:count]))
reads = reads[count:]
return lanes
@classmethod
def _generate_reads(
cls,
options: Args,
rng: random.Random,
sample: Damage,
minimum: int,
pcr1: str,
) -> List[Tuple[str, str, str]]:
reads: List[Tuple[str, str, str]] = []
while len(reads) < minimum:
name, sequence = sample.get_fragment()
cur_forward = sequence + pcr1
cur_reverse = reverse_complement(sequence) + PCR2
# Number of PCR copies -- minimum 1
num_dupes = toint(_rexp(options.library_pcr_lambda, rng)) + 1
for dupe_id in range(num_dupes):
cur_name = "%s_%s" % (name, dupe_id)
reads.append((cur_name, cur_forward, cur_reverse))
random.shuffle(reads)
return reads
class Lane:
def __init__(self, options: Args, reads: List[Tuple[str, str, str]]):
rng = random.Random()
choices = _get_weighted_choices(
rng, options.reads_sub_rate, options.reads_indel_rate
)
self._sequences: List[Tuple[str, str, str]] = []
for (name, forward, reverse) in reads:
forward, _ = _mutate_sequence(
rng, choices, forward, options.reads_indel_lambda
)
if len(forward) < options.reads_len:
forward += "A" * (options.reads_len - len(forward))
elif len(forward) > options.reads_len:
forward = forward[: options.reads_len]
reverse, _ = _mutate_sequence(
rng, choices, reverse, options.reads_indel_lambda
)
if len(reverse) < options.reads_len:
reverse += "T" * (options.reads_len - len(reverse))
elif len(reverse) > options.reads_len:
reverse = reverse[: options.reads_len]
self._sequences.append((name, "".join(forward), "".join(reverse)))
@property
def sequences(self):
return self._sequences
def parse_args(argv: List[str]) -> Args:
parser = argparse.ArgumentParser()
parser.add_argument("fasta", help="Input FASTA file")
parser.add_argument("output_prefix", help="Prefix for output filenames")
group = parser.add_argument_group("Specimen")
group.add_argument(
"--specimen-seed",
default=None,
type=int,
help="Seed used to initialize the 'speciment', for the "
"creation of a random genotype. Set to a specific "
"values if runs are to be done for the same "
"genotype.",
)
group.add_argument("--specimen-sub-rate", default=0.005, type=float)
group.add_argument("--specimen-indel-rate", default=0.0005, type=float)
group.add_argument("--specimen-indel-lambda", default=0.9, type=float)
group = parser.add_argument_group("Samples from specimens")
group.add_argument("--sample-seed", default=None)
group.add_argument(
"--sample-frag-length-mu", dest="sample_frag_len_mu", default=100, type=int
)
group.add_argument(
"--sample-frag-length-sigma", dest="sample_frag_len_sigma", default=30, type=int
)
group.add_argument(
"--sample-frag-length-min", dest="sample_frag_len_min", default=0, type=int
)
group.add_argument(
"--sample-frag-length-max", dest="sample_frag_len_max", default=500, type=int
)
group.add_argument(
"--sample-endogenous_mu", dest="sample_endog_mu", default=0.75, type=float
)
group.add_argument(
"--sample-endogenous_sigma", dest="sample_endog_sigma", default=0.10, type=float
)
group = parser.add_argument_group("Post mortem damage of samples")
group.add_argument("--damage", dest="damage", default=False, action="store_true")
group.add_argument("--damage-seed", dest="damage_seed", default=None)
group.add_argument(
"--damage-lambda", dest="damage_lambda", default=0.25, type=float
)
group = parser.add_argument_group("Libraries from samples")
group.add_argument("--library-seed", dest="library_seed", default=None)
group.add_argument(
"--library-pcr-lambda", dest="library_pcr_lambda", default=3, type=float
)
group.add_argument("--library-barcode", dest="library_barcode", default=None)
group = parser.add_argument_group("Lanes from libraries")
group.add_argument("--lanes", dest="lanes_num", default=3, type=int)
group.add_argument(
"--lanes-reads-mu", dest="lanes_reads_mu", default=10000, type=int
)
group.add_argument(
"--lanes-reads-sigma", dest="lanes_reads_sigma", default=2500, type=int
)
group.add_argument(
"--lanes-reads-per-file", dest="lanes_per_file", default=2500, type=int
)
group = parser.add_argument_group("Reads from lanes")
group.add_argument(
"--reads-sub-rate", dest="reads_sub_rate", default=0.005, type=float
)
group.add_argument(
"--reads-indel-rate", dest="reads_indel_rate", default=0.0005, type=float
)
group.add_argument(
"--reads-indel-lambda", dest="reads_indel_lambda", default=0.9, type=float
)
group.add_argument("--reads-length", dest="reads_len", default=100, type=int)
return cast(Args, parser.parse_args(argv))
def main(argv: List[str]) -> int:
options = parse_args(argv)
print("Generating %i lane(s) of synthetic reads" % (options.lanes_num,))
specimen = Specimen(options, options.fasta)
sample = Sample(options, specimen)
damage = Damage(options, sample)
library = Library(options, damage)
for (lnum, lane) in enumerate(library.lanes, start=1):
fragments = fragment(options.lanes_per_file, lane.sequences)
for (readsnum, reads) in enumerate(fragments, start=1):
templ = "%s%s_L%i_R%%s_%02i.fastq.gz" % (
options.output_prefix,
library.barcode,
lnum,
readsnum,
)
print(" Writing %s" % (templ % "{Pair}",))
with gzip.open(templ % 1, "wt") as out_1:
with gzip.open(templ % 2, "wt") as out_2:
for (name, seq_1, seq_2) in reads:
out_1.write("@%s%s/1\n%s\n" % (library.barcode, name, seq_1))
out_1.write("+\n%s\n" % ("I" * len(seq_1),))
out_2.write("@%s%s/2\n%s\n" % (library.barcode, name, seq_2))
out_2.write("+\n%s\n" % ("H" * len(seq_2),))
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
MikkelSchubert/paleomix
|
paleomix/resources/examples/phylo_pipeline/synthesize_reads.py
|
Python
|
mit
| 16,296
|
def y():
raise TypeError
def x():
y()
try:
x()
except TypeError:
print('x')
|
paopao74cn/noworkflow
|
tests/test_exception.py
|
Python
|
mit
| 84
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 Mark Spicer
# Made available under the MIT license.
import time
import freshroastsr700
class Roaster(object):
def __init__(self):
"""Creates a freshroastsr700 object passing in methods included in this
class."""
self.roaster = freshroastsr700.freshroastsr700(
self.update_data, self.next_state, thermostat=True)
def update_data(self):
"""This is a method that will be called every time a packet is opened
from the roaster."""
print("Current Temperature:", self.roaster.current_temp)
def next_state(self):
"""This is a method that will be called when the time remaining ends.
The current state can be: roasting, cooling, idle, sleeping, connecting,
or unkown."""
if(self.roaster.get_roaster_state() == 'roasting'):
self.roaster.time_remaining = 20
self.roaster.cool()
elif(self.roaster.get_roaster_state() == 'cooling'):
self.roaster.idle()
# Create a roaster object.
r = Roaster()
# Conenct to the roaster.
r.roaster.auto_connect()
# Wait for the roaster to be connected.
while(r.roaster.connected is False):
print("Please connect your roaster...")
time.sleep(1)
# Set variables.
r.roaster.target_temp = 320
r.roaster.fan_speed = 9
r.roaster.time_remaining = 40
# Begin roasting.
r.roaster.roast()
# This ensures the example script does not end before the roast.
time.sleep(80)
# Disconnect from the roaster.
r.roaster.disconnect()
|
Roastero/freshroastsr700
|
examples/advanced.py
|
Python
|
mit
| 1,551
|
'''
Provide pre-/postconditions as function decorators.
Example usage:
>>> def in_ge20(inval):
... assert inval >= 20, 'Input value < 20'
...
>>> def out_lt30(retval, inval):
... assert retval < 30, 'Return value >= 30'
...
>>> @precondition(in_ge20)
... @postcondition(out_lt30)
... def inc(value):
... return value + 1
...
>>> inc(5)
Traceback (most recent call last):
...
AssertionError: Input value < 20
>>> inc(29)
Traceback (most recent call last):
...
AssertionError: Return value >= 30
>>> inc(20)
21
You can define as many pre-/postconditions for a function as you
like. It is also possible to specify both types of conditions at once:
>>> @conditions(in_ge20, out_lt30)
... def add1(value):
... return value + 1
...
>>> add1(5)
Traceback (most recent call last):
...
AssertionError: Input value < 20
An interesting feature is the ability to prevent the creation of
pre-/postconditions at function definition time. This makes it
possible to use conditions for debugging and then switch them off for
distribution.
>>> debug = False
>>> @precondition(in_ge20, debug)
... def dec(value):
... return value - 1
...
>>> dec(5)
4
'''
__all__ = ['precondition', 'postcondition', 'conditions']
DEFAULT_ON = True
def precondition(precondition, use_conditions=DEFAULT_ON):
return conditions(precondition, None, use_conditions)
def postcondition(postcondition, use_conditions=DEFAULT_ON):
return conditions(None, postcondition, use_conditions)
class conditions(object):
__slots__ = ('__precondition', '__postcondition')
def __init__(self, pre, post, use_conditions=DEFAULT_ON):
if not use_conditions:
pre, post = None, None
self.__precondition = pre
self.__postcondition = post
def __call__(self, function):
# combine recursive wrappers (@precondition + @postcondition ==
# @conditions)
pres = set((self.__precondition,))
posts = set((self.__postcondition,))
# unwrap function, collect distinct pre-/post conditions
while type(function) is FunctionWrapper:
pres.add(function._pre)
posts.add(function._post)
function = function._func
# filter out None conditions and build pairs of pre- and postconditions
conditions = map(None, filter(None, pres), filter(None, posts))
# add a wrapper for each pair (note that 'conditions' may be empty)
for pre, post in conditions:
function = FunctionWrapper(pre, post, function)
return function
class FunctionWrapper(object):
def __init__(self, precondition, postcondition, function):
self._pre = precondition
self._post = postcondition
self._func = function
def __call__(self, *args, **kwargs):
precondition = self._pre
postcondition = self._post
if precondition:
precondition(*args, **kwargs)
result = self._func(*args, **kwargs)
if postcondition:
postcondition(result, *args, **kwargs)
return result
def __test():
import doctest
doctest.testmod()
if __name__ == "__main__":
__test()
|
spradeepv/dive-into-python
|
decorators/pre_post_decorator.py
|
Python
|
mit
| 3,245
|
import numpy
def shift(mtx, offset):
''' Circular shift 2D matrix samples by OFFSET (a [Y,X] 2-tuple),
such that RES(POS) = MTX(POS-OFFSET). '''
dims = mtx.shape
if len(dims) == 1:
mtx = mtx.reshape((1, dims[0]))
dims = mtx.shape
offset = numpy.mod(numpy.negative(offset), dims)
top = numpy.column_stack((mtx[offset[0]:dims[0], offset[1]:dims[1]],
mtx[offset[0]:dims[0], 0:offset[1]]))
bottom = numpy.column_stack((mtx[0:offset[0], offset[1]:dims[1]],
mtx[0:offset[0], 0:offset[1]]))
ret = numpy.concatenate((top, bottom), axis=0)
return ret
|
tochikuji/pyPyrTools
|
pyrtools/shift.py
|
Python
|
mit
| 670
|
# Read output files and make spectrogram
import pylab as pl
import pyfits as pf
import os
import scipy.stats as stats
import scipy.signal as sig
import astronomy as ast
#import movingaverage as MA
X = pl.load('ec2117ans_2_cc.dat')
files = os.listdir(os.curdir)
files.sort()
ff = []
for f in files:
name,ext = os.path.splitext(f)
if name[:2] == 'EC':
ff.append(f)
# calculate phase based on eclipse ephemeris
T0 = 2453964.3307097
P = 0.1545255
# time between spectra
dtspec = float(pf.getheader(ff[1])['HJD']) - float(pf.getheader(ff[0])['HJD'])
P2 = dtspec*24.0
print 'P2 = ',P2
imHa = []
imHb = []
imHe = []
phase = []
print 'Calculating phase...\n'
for i in range(len(ff[:-2])):
temp = ((float(pf.getheader(ff[i])['HJD']) - T0)/P)
phase.append(temp)
print '...Done!\n'
# now get the phases, sort them and find the order of numbers.
phase = pl.array(phase)
phase2 = (pl.array(phase) - phase[0]) / P2
phase2,argsort = stats.fastsort(pl.array(phase2))
ave = pf.getdata('average.fits')
# speed of light in km/s
c = 2.99792458e5
v = 1500.0
for i in ff[:-2]:
# subtract average from spectrum
data = pf.getdata(i) #- ave
head = pf.getheader(i)
# write average subtracted spectrum to new fits file
#pf.writeto('avesub%s'%i,data=data,header=head)
start = head['CRVAL1']
step = head['CDELT1']
length = head['NAXIS1']
x = start + pl.arange(0,length)*step
# hydrogen alpha
dl = v/c*6563.0
w1 = x > 6563 - dl
w2 = x < 6563 + dl
imHa.append((data[w1*w2]))
dl = v/c*4860.0
w1 = x > 4860 - dl
w2 = x < 4860 + dl
imHb.append(data[w1*w2])
dl = v/c*4686
w1 = x > 4686 - dl
w2 = x < 4686 + dl
imHe.append(data[w1*w2])
print i
# run moving average
def sub_mov_ave(im,L):
# function takes trailed spectra and subtracts moving average of length L
def movingaverage(x,L):
ma = pl.zeros(len(x),dtype='Float64')
# must take the lead-up zone into account (prob slow)
for i in range(0,L):
ma[i] = pl.average(x[0:i+1])
for i in range(L,len(x)):
ma[i] = ma[i-1] + 1.0/L*(x[i]-x[i-L])
return ma
def medfilt(x,L):
ma = sig.medfilt(x,L)
return ma
im_new = pl.array(im).copy()
im_new[:] = 0.0
im=pl.array(im)
s = im.shape
for i in range(s[1]):
im_new[:,i] = medfilt(im[:,i],L)
return im-im_new
L = 51
imHa = pl.array(imHa)
imHb = pl.array(imHb)
imHe = pl.array(imHe)
#imHa = sub_mov_ave(imHa,L)
#imHb = sub_mov_ave(imHb,L)
#imHe = sub_mov_ave(imHe,L)
def sigclip(im,nsig):
# returns min and max values of image inside nsig sigmas
temp = im.ravel()
sd = pl.std(temp)
m = pl.average(temp)
gt = temp > m-nsig*sd
lt = temp < m+nsig*sd
temp = temp[gt*lt]
mini = min(temp)
maxi = max(temp)
return mini,maxi
extent = (-1*v,v,max(phase),min(phase))
# H alpha
pl.figure(figsize=(7,4))
pl.subplots_adjust(wspace=0.001)
#pl.gray()
ax1 = pl.subplot(141)
mini,maxi = sigclip(pl.array(imHa),3)
pl.imshow(pl.array(imHa),vmin=mini,vmax=maxi,aspect='auto',cmap=pl.cm.gray_r,extent=extent,interpolation='bilinear')
# plot the deblended velocities
dat = pl.load('Ha_deblend.dat')
#pl.plot(dat[:,1], dat[:,0], 'yo-')
#pl.plot(dat[:,2], dat[:,0], 'yo-')
pl.xlabel('Velocity (km/s)')
pl.ylabel('Orbital phase')
pl.title(r'$H_{\alpha}$')
pl.xticks(pl.arange(-v,1.2*v,1000)[1:-1])
# H beta
ax2 = pl.subplot(142,sharey=ax1)
mini,maxi = sigclip(pl.array(imHb),3)
pl.imshow(pl.array(imHb),vmin=mini,vmax=maxi,aspect='auto',cmap=pl.cm.gray_r,interpolation='bilinear',extent=extent)
# plot the deblended velocities
dat = pl.load('Hb_deblend.dat')
#pl.plot(dat[:,1], dat[:,0], 'yo-')
#pl.plot(dat[:,2], dat[:,0], 'yo-')
#pl.colorbar()
pl.xlabel('Velocity (km/s)')
pl.title(r'$H_{\beta}$')
pl.xticks(pl.arange(-v,1.2*v,1000)[1:-1])
#pl.figure(figsize=(8,8))
ax3 = pl.subplot(143,sharey=ax1)
mini,maxi = sigclip(pl.array(imHe),3)
pl.imshow(pl.array(imHe),vmin=mini,vmax=maxi,aspect='auto',cmap=pl.cm.gray_r,interpolation='bilinear',extent=extent)
#pl.colorbar()
pl.xlabel('Velocity (km/s)')
#pl.ylabel('Orbital phase')
pl.title(r'$He_{II}$')
pl.xticks(pl.arange(-v,1.2*v,1000)[1:-1])
ax4 = pl.subplot(144,sharey=ax1)
pl.xlabel('Magnitude')
pl.title('Photometry')
T0 -= 2453964.0
photphase = (X[:,0] - T0) / P
pl.plot(X[:,2],photphase,'k.')
pl.ylim(extent[2],extent[3])
xt = pl.xticks()
pl.xticks(xt[0][2:-1:3])
yticklabels = ax2.get_yticklabels() + ax3.get_yticklabels() + ax4.get_yticklabels()
pl.setp(yticklabels, visible=False)
pl.savefig('specgram.png')
#Lines = {}
#Lines['Ha'] = imHa
#Lines['Hb'] = imHb
#Lines['He'] = imHe
## make lightcurves for Ha, Hb and He lines
#print 'Creating lightcurves'
#for line in Lines:
#print line
#pl.figure()
#pl.subplot(211)
#pl.title(line)
#lc = Lines[line].sum(axis=1)
#pl.plot(phase2,lc)
#pl.xlabel('Orbital phase')
#pl.ylabel('Intensity')
#pl.subplot(212)
#f,a = ast.signal.dft((phase*P + T0),lc,0,4000,1)
#pl.plot(f,a)
pl.show()
|
ezietsman/msc-thesis
|
spectroscopy/final/pyspecgram.py
|
Python
|
mit
| 5,152
|
# settings/base.py
'''
THIS PROJECT IS NOW USING DJANGO VERSION 1.11
THIS PROJECT IS NOW USING DJANGO VERSION 2.0
THIS PROJECT IS NOW USING DJANGO VERSION 2.1
'''
import os
import json
# Normally we wouldn't import ANYTHING from Django directly
# into our settings, but ImproperlyConfigured is an exception.
from django.core.exceptions import ImproperlyConfigured
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
# DJANGO
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# LOCAL
'core',
'accounts',
'datasets',
'keywords',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'main.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'main.wsgi.application'
AUTH_USER_MODEL = 'core.User'
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Berlin'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
# I am going to change this to use a cdn maybe that should be in the production file
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, '../spatialdatahub.org-static/build'),
]
# set the STATIC_ROOT to a directory I would like to serve the static files
# from. Maybe just outside of the BASE_DIR would be good for development, but I
# think that maybe a specific place for the deployed version would be good.
#STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static_root')
# Redirect to home URL after login (Default redirects to /accounts/profile/)
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
# This won't work yet I think, it's kind of just here before the real thing
# gets set up
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Going to set up the email with a dummy gmail account
# this all must be changed to environmental variables in the aws elastic beanstalk setup
# also this email account needs to be destroyed.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = os.environ.get('EMAIL_HOST')
EMAIL_PORT = os.environ.get('EMAIL_PORT')
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
DEFAULT_FROM_EMAIL = os.environ.get('DEFAULT_FROM_EMAIL')
EMAIL_USE_TLS = True
|
patcurry/WebGIS
|
main/settings/base.py
|
Python
|
mit
| 4,432
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PoolResizeOptions(Model):
"""Additional parameters for resize operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
def __init__(self, timeout=30, client_request_id=None, return_client_request_id=False, ocp_date=None, if_match=None, if_none_match=None, if_modified_since=None, if_unmodified_since=None):
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
|
AutorestCI/azure-sdk-for-python
|
azure-batch/azure/batch/models/pool_resize_options.py
|
Python
|
mit
| 3,069
|
import warnings
from braintree.util.http import Http
from braintree.successful_result import SuccessfulResult
from braintree.error_result import ErrorResult
from braintree.resource import Resource
from braintree.credit_card import CreditCard
from braintree.address import Address
from braintree.configuration import Configuration
from braintree.ids_search import IdsSearch
from braintree.exceptions.not_found_error import NotFoundError
from braintree.resource_collection import ResourceCollection
from braintree.transparent_redirect import TransparentRedirect
class Customer(Resource):
"""
A class representing a customer.
An example of creating an customer with all available fields::
result = braintree.Customer.create({
"id": "my_customer_id",
"company": "Some company",
"email": "john.doe@example.com",
"fax": "123-555-1212",
"first_name": "John",
"last_name": "Doe",
"phone": "123-555-1221",
"website": "http://www.example.com",
"credit_card": {
"cardholder_name": "John Doe",
"cvv": "123",
"expiration_date": "12/2012",
"number": "4111111111111111",
"token": "my_token",
"billing_address": {
"first_name": "John",
"last_name": "Doe",
"company": "Braintree",
"street_address": "111 First Street",
"extended_address": "Unit 1",
"locality": "Chicago",
"postal_code": "60606",
"region": "IL",
"country_name": "United States of America"
},
"options": {
"verify_card": True
}
},
"custom_fields": {
"my_key": "some value"
}
})
print(result.customer.id)
print(result.customer.first_name)
For more information on Customers, see http://www.braintreepaymentsolutions.com/gateway/customer-api
"""
@staticmethod
def all():
""" Return a collection of all customers. """
return Configuration.gateway().customer.all()
@staticmethod
def confirm_transparent_redirect(query_string):
"""
Confirms a transparent redirect request. It expects the query string from the
redirect request. The query string should _not_ include the leading "?" character. ::
result = braintree.Customer.confirm_transparent_redirect_request("foo=bar&id=12345")
"""
warnings.warn("Please use TransparentRedirect.confirm instead", DeprecationWarning)
return Configuration.gateway().customer.confirm_transparent_redirect(query_string)
@staticmethod
def create(params={}):
"""
Create a Customer::
result = braintree.Customer.create({
"company": "Some company",
"first_name": "John"
})
"""
return Configuration.gateway().customer.create(params)
@staticmethod
def delete(customer_id):
"""
Delete a customer, given a customer_id::
result = braintree.Customer.delete("my_customer_id")
"""
return Configuration.gateway().customer.delete(customer_id)
@staticmethod
def find(customer_id):
"""
Find an customer, given a customer_id. This does not return a result
object. This will raise a :class:`NotFoundError <braintree.exceptions.not_found_error.NotFoundError>` if the provided customer_id
is not found. ::
customer = braintree.Customer.find("my_customer_id")
"""
return Configuration.gateway().customer.find(customer_id)
@staticmethod
def search(*query):
return Configuration.gateway().customer.search(*query)
@staticmethod
def tr_data_for_create(tr_data, redirect_url):
""" Builds tr_data for creating a Customer. """
return Configuration.gateway().customer.tr_data_for_create(tr_data, redirect_url)
@staticmethod
def tr_data_for_update(tr_data, redirect_url):
""" Builds tr_data for updating a Customer. """
return Configuration.gateway().customer.tr_data_for_update(tr_data, redirect_url)
@staticmethod
def transparent_redirect_create_url():
""" Returns the url to use for creating Customers through transparent redirect. """
warnings.warn("Please use TransparentRedirect.url instead", DeprecationWarning)
return Configuration.gateway().customer.transparent_redirect_create_url()
@staticmethod
def transparent_redirect_update_url():
""" Returns the url to use for updating Customers through transparent redirect. """
warnings.warn("Please use TransparentRedirect.url instead", DeprecationWarning)
return Configuration.gateway().customer.transparent_redirect_update_url()
@staticmethod
def update(customer_id, params={}):
"""
Update an existing Customer by customer_id. The params are similar to create::
result = braintree.Customer.update("my_customer_id", {
"last_name": "Smith"
})
"""
return Configuration.gateway().customer.update(customer_id, params)
@staticmethod
def create_signature():
return [
"company", "email", "fax", "first_name", "id", "last_name", "phone", "website",
{"credit_card": CreditCard.create_signature()},
{"custom_fields": ["__any_key__"]}
]
@staticmethod
def update_signature():
return [
"company", "email", "fax", "first_name", "id", "last_name", "phone", "website",
{"credit_card": CreditCard.signature("update_via_customer")},
{"custom_fields": ["__any_key__"]}
]
def __init__(self, gateway, attributes):
Resource.__init__(self, gateway, attributes)
if "credit_cards" in attributes:
self.credit_cards = [CreditCard(gateway, credit_card) for credit_card in self.credit_cards]
if "addresses" in attributes:
self.addresses = [Address(gateway, address) for address in self.addresses]
|
eldarion/braintree_python
|
braintree/customer.py
|
Python
|
mit
| 6,348
|
# -*- coding: utf-8 -*-
import unittest
from nose.plugins.skip import SkipTest
try:
from urllib3.contrib.pyopenssl import (inject_into_urllib3,
extract_from_urllib3)
except ImportError as e:
raise SkipTest('Could not import PyOpenSSL: %r' % e)
from mock import patch, Mock
class TestPyOpenSSLInjection(unittest.TestCase):
"""
Tests for error handling in pyopenssl's 'inject_into urllib3'
"""
def test_inject_validate_fail_cryptography(self):
"""
Injection should not be supported if cryptography is too old.
"""
try:
with patch("cryptography.x509.extensions.Extensions") as mock:
del mock.get_extension_for_class
self.assertRaises(ImportError, inject_into_urllib3)
finally:
# `inject_into_urllib3` is not supposed to succeed.
# If it does, this test should fail, but we need to
# clean up so that subsequent tests are unaffected.
extract_from_urllib3()
def test_inject_validate_fail_pyopenssl(self):
"""
Injection should not be supported if pyOpenSSL is too old.
"""
try:
return_val = Mock()
del return_val._x509
with patch("OpenSSL.crypto.X509", return_value=return_val):
self.assertRaises(ImportError, inject_into_urllib3)
finally:
# `inject_into_urllib3` is not supposed to succeed.
# If it does, this test should fail, but we need to
# clean up so that subsequent tests are unaffected.
extract_from_urllib3()
|
Lukasa/urllib3
|
test/contrib/test_pyopenssl_dependencies.py
|
Python
|
mit
| 1,662
|
import sys
for line in sys.stdin:
try:
print("%.2f" % eval(line.replace(";", "").replace(",", "")))
except Exception:
print("Expressao incorreta")
|
vitorgt/SCC0202
|
t1_calculadora_de_expressoes_aritimeticas.py
|
Python
|
mit
| 172
|
# -*-coding:utf-8 -*
import pygame
from pygame.locals import *
NOM_CARTE_LANCEMENT = "LD26-Ferme"
DOSSIER_RESSOURCES = "Ressources"
FENETRE = dict()
FENETRE["messageErreurInitialisationPygame"]="Une erreur s'est produite durant l'initialisation de Pygame, le programme doit donc se fermer."
FENETRE["messageErreurInitialisationFenetre"]="Une erreur s'est produite durant l'initialisation de la fenêtre, le programme doit donc se fermer."
FENETRE["longueurFenetre"] = 512
FENETRE["largeurFenetre"] = 384
FENETRE["largeurFenetreReelle"] = 416
FENETRE["couleurFenetre"] = (0,0,0) ##Couleur de fond de la fenêtre (hors zones spéciales comme tileset, outils...)
FENETRE["titreFenetre"] = "A Scholar In The Woods"
FENETRE["flagsFenetre"] = DOUBLEBUF#|FULLSCREEN|HWSURFACE
FENETRE["forceDirectX"] = False
VITESSE_PENSEE_PAR_DEFAUT = 40
TEMPS_LECTURE_PENSEE = 2000
FICHIER_ICONE = "Narro.ico"
VITESSE_DEPLACEMENT_JOUEUR_PAR_DEFAUT = 170
LIMITER_FPS = False
NOMBRE_MAX_DE_FPS = 60
|
Rastagong/A-Scholar-In-The-Woods
|
Trunk/constantes.py
|
Python
|
mit
| 983
|
##@file flp.py
#@brief model for solving the capacitated facility location problem
"""
minimize the total (weighted) travel cost from n customers
to some facilities with fixed costs and capacities.
Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012
"""
from pyscipopt import Model, quicksum, multidict
def flp(I,J,d,M,f,c):
"""flp -- model for the capacitated facility location problem
Parameters:
- I: set of customers
- J: set of facilities
- d[i]: demand for customer i
- M[j]: capacity of facility j
- f[j]: fixed cost for using a facility in point j
- c[i,j]: unit cost of servicing demand point i from facility j
Returns a model, ready to be solved.
"""
model = Model("flp")
x,y = {},{}
for j in J:
y[j] = model.addVar(vtype="B", name="y(%s)"%j)
for i in I:
x[i,j] = model.addVar(vtype="C", name="x(%s,%s)"%(i,j))
for i in I:
model.addCons(quicksum(x[i,j] for j in J) == d[i], "Demand(%s)"%i)
for j in M:
model.addCons(quicksum(x[i,j] for i in I) <= M[j]*y[j], "Capacity(%s)"%i)
for (i,j) in x:
model.addCons(x[i,j] <= d[i]*y[j], "Strong(%s,%s)"%(i,j))
model.setObjective(
quicksum(f[j]*y[j] for j in J) +
quicksum(c[i,j]*x[i,j] for i in I for j in J),
"minimize")
model.data = x,y
return model
def make_data():
"""creates example data set"""
I,d = multidict({1:80, 2:270, 3:250, 4:160, 5:180}) # demand
J,M,f = multidict({1:[500,1000], 2:[500,1000], 3:[500,1000]}) # capacity, fixed costs
c = {(1,1):4, (1,2):6, (1,3):9, # transportation costs
(2,1):5, (2,2):4, (2,3):7,
(3,1):6, (3,2):3, (3,3):4,
(4,1):8, (4,2):5, (4,3):3,
(5,1):10, (5,2):8, (5,3):4,
}
return I,J,d,M,f,c
if __name__ == "__main__":
I,J,d,M,f,c = make_data()
model = flp(I,J,d,M,f,c)
model.optimize()
EPS = 1.e-6
x,y = model.data
edges = [(i,j) for (i,j) in x if model.getVal(x[i,j]) > EPS]
facilities = [j for j in y if model.getVal(y[j]) > EPS]
print("Optimal value:", model.getObjVal())
print("Facilities at nodes:", facilities)
print("Edges:", edges)
try: # plot the result using networkx and matplotlib
import networkx as NX
import matplotlib.pyplot as P
P.clf()
G = NX.Graph()
other = [j for j in y if j not in facilities]
customers = ["c%s"%i for i in d]
G.add_nodes_from(facilities)
G.add_nodes_from(other)
G.add_nodes_from(customers)
for (i,j) in edges:
G.add_edge("c%s"%i,j)
position = NX.drawing.layout.spring_layout(G)
NX.draw(G,position,node_color="y",nodelist=facilities)
NX.draw(G,position,node_color="g",nodelist=other)
NX.draw(G,position,node_color="b",nodelist=customers)
P.show()
except ImportError:
print("install 'networkx' and 'matplotlib' for plotting")
|
SCIP-Interfaces/PySCIPOpt
|
examples/finished/flp.py
|
Python
|
mit
| 3,029
|
# -*- coding: utf-8 -*-
__all__ = ['Distribution']
import io
import sys
import re
import os
import warnings
import numbers
import distutils.log
import distutils.core
import distutils.cmd
import distutils.dist
import distutils.command
from distutils.util import strtobool
from distutils.debug import DEBUG
from distutils.fancy_getopt import translate_longopt
import itertools
from collections import defaultdict
from email import message_from_file
from distutils.errors import DistutilsOptionError, DistutilsSetupError
from distutils.util import rfc822_escape
from distutils.version import StrictVersion
from setuptools.extern import packaging
from setuptools.extern import ordered_set
from . import SetuptoolsDeprecationWarning
import setuptools
import setuptools.command
from setuptools import windows_support
from setuptools.monkey import get_unpatched
from setuptools.config import parse_configuration
import pkg_resources
__import__('setuptools.extern.packaging.specifiers')
__import__('setuptools.extern.packaging.version')
def _get_unpatched(cls):
warnings.warn("Do not call this function", DistDeprecationWarning)
return get_unpatched(cls)
def get_metadata_version(self):
mv = getattr(self, 'metadata_version', None)
if mv is None:
if self.long_description_content_type or self.provides_extras:
mv = StrictVersion('2.1')
elif (self.maintainer is not None or
self.maintainer_email is not None or
getattr(self, 'python_requires', None) is not None or
self.project_urls):
mv = StrictVersion('1.2')
elif (self.provides or self.requires or self.obsoletes or
self.classifiers or self.download_url):
mv = StrictVersion('1.1')
else:
mv = StrictVersion('1.0')
self.metadata_version = mv
return mv
def read_pkg_file(self, file):
"""Reads the metadata values from a file object."""
msg = message_from_file(file)
def _read_field(name):
value = msg[name]
if value == 'UNKNOWN':
return None
return value
def _read_list(name):
values = msg.get_all(name, None)
if values == []:
return None
return values
self.metadata_version = StrictVersion(msg['metadata-version'])
self.name = _read_field('name')
self.version = _read_field('version')
self.description = _read_field('summary')
# we are filling author only.
self.author = _read_field('author')
self.maintainer = None
self.author_email = _read_field('author-email')
self.maintainer_email = None
self.url = _read_field('home-page')
self.license = _read_field('license')
if 'download-url' in msg:
self.download_url = _read_field('download-url')
else:
self.download_url = None
self.long_description = _read_field('description')
self.description = _read_field('summary')
if 'keywords' in msg:
self.keywords = _read_field('keywords').split(',')
self.platforms = _read_list('platform')
self.classifiers = _read_list('classifier')
# PEP 314 - these fields only exist in 1.1
if self.metadata_version == StrictVersion('1.1'):
self.requires = _read_list('requires')
self.provides = _read_list('provides')
self.obsoletes = _read_list('obsoletes')
else:
self.requires = None
self.provides = None
self.obsoletes = None
def single_line(val):
# quick and dirty validation for description pypa/setuptools#1390
if '\n' in val:
# TODO after 2021-07-31: Replace with `raise ValueError("newlines not allowed")`
warnings.warn("newlines not allowed and will break in the future")
val = val.replace('\n', ' ')
return val
# Based on Python 3.5 version
def write_pkg_file(self, file): # noqa: C901 # is too complex (14) # FIXME
"""Write the PKG-INFO format data to a file object.
"""
version = self.get_metadata_version()
def write_field(key, value):
file.write("%s: %s\n" % (key, value))
write_field('Metadata-Version', str(version))
write_field('Name', self.get_name())
write_field('Version', self.get_version())
write_field('Summary', single_line(self.get_description()))
write_field('Home-page', self.get_url())
if version < StrictVersion('1.2'):
write_field('Author', self.get_contact())
write_field('Author-email', self.get_contact_email())
else:
optional_fields = (
('Author', 'author'),
('Author-email', 'author_email'),
('Maintainer', 'maintainer'),
('Maintainer-email', 'maintainer_email'),
)
for field, attr in optional_fields:
attr_val = getattr(self, attr)
if attr_val is not None:
write_field(field, attr_val)
write_field('License', self.get_license())
if self.download_url:
write_field('Download-URL', self.download_url)
for project_url in self.project_urls.items():
write_field('Project-URL', '%s, %s' % project_url)
long_desc = rfc822_escape(self.get_long_description())
write_field('Description', long_desc)
keywords = ','.join(self.get_keywords())
if keywords:
write_field('Keywords', keywords)
if version >= StrictVersion('1.2'):
for platform in self.get_platforms():
write_field('Platform', platform)
else:
self._write_list(file, 'Platform', self.get_platforms())
self._write_list(file, 'Classifier', self.get_classifiers())
# PEP 314
self._write_list(file, 'Requires', self.get_requires())
self._write_list(file, 'Provides', self.get_provides())
self._write_list(file, 'Obsoletes', self.get_obsoletes())
# Setuptools specific for PEP 345
if hasattr(self, 'python_requires'):
write_field('Requires-Python', self.python_requires)
# PEP 566
if self.long_description_content_type:
write_field(
'Description-Content-Type',
self.long_description_content_type
)
if self.provides_extras:
for extra in self.provides_extras:
write_field('Provides-Extra', extra)
sequence = tuple, list
def check_importable(dist, attr, value):
try:
ep = pkg_resources.EntryPoint.parse('x=' + value)
assert not ep.extras
except (TypeError, ValueError, AttributeError, AssertionError) as e:
raise DistutilsSetupError(
"%r must be importable 'module:attrs' string (got %r)"
% (attr, value)
) from e
def assert_string_list(dist, attr, value):
"""Verify that value is a string list"""
try:
# verify that value is a list or tuple to exclude unordered
# or single-use iterables
assert isinstance(value, (list, tuple))
# verify that elements of value are strings
assert ''.join(value) != value
except (TypeError, ValueError, AttributeError, AssertionError) as e:
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr, value)
) from e
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
ns_packages = value
assert_string_list(dist, attr, ns_packages)
for nsp in ns_packages:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
parent, sep, child = nsp.rpartition('.')
if parent and parent not in ns_packages:
distutils.log.warn(
"WARNING: %r is declared as a package namespace, but %r"
" is not: please correct this in setup.py", nsp, parent
)
def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
list(itertools.starmap(_check_extra, value.items()))
except (TypeError, ValueError, AttributeError) as e:
raise DistutilsSetupError(
"'extras_require' must be a dictionary whose values are "
"strings or lists of strings containing valid project/version "
"requirement specifiers."
) from e
def _check_extra(extra, reqs):
name, sep, marker = extra.partition(':')
if marker and pkg_resources.invalid_marker(marker):
raise DistutilsSetupError("Invalid environment marker: " + marker)
list(pkg_resources.parse_requirements(reqs))
def assert_bool(dist, attr, value):
"""Verify that value is True, False, 0, or 1"""
if bool(value) != value:
tmpl = "{attr!r} must be a boolean value (got {value!r})"
raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
def check_requirements(dist, attr, value):
"""Verify that install_requires is a valid requirements list"""
try:
list(pkg_resources.parse_requirements(value))
if isinstance(value, (dict, set)):
raise TypeError("Unordered types are not allowed")
except (TypeError, ValueError) as error:
tmpl = (
"{attr!r} must be a string or list of strings "
"containing valid project/version requirement specifiers; {error}"
)
raise DistutilsSetupError(
tmpl.format(attr=attr, error=error)
) from error
def check_specifier(dist, attr, value):
"""Verify that value is a valid version specifier"""
try:
packaging.specifiers.SpecifierSet(value)
except (packaging.specifiers.InvalidSpecifier, AttributeError) as error:
tmpl = (
"{attr!r} must be a string "
"containing valid version specifiers; {error}"
)
raise DistutilsSetupError(
tmpl.format(attr=attr, error=error)
) from error
def check_entry_points(dist, attr, value):
"""Verify that entry_points map is parseable"""
try:
pkg_resources.EntryPoint.parse_map(value)
except ValueError as e:
raise DistutilsSetupError(e) from e
def check_test_suite(dist, attr, value):
if not isinstance(value, str):
raise DistutilsSetupError("test_suite must be a string")
def check_package_data(dist, attr, value):
"""Verify that value is a dictionary of package names to glob lists"""
if not isinstance(value, dict):
raise DistutilsSetupError(
"{!r} must be a dictionary mapping package names to lists of "
"string wildcard patterns".format(attr))
for k, v in value.items():
if not isinstance(k, str):
raise DistutilsSetupError(
"keys of {!r} dict must be strings (got {!r})"
.format(attr, k)
)
assert_string_list(dist, 'values of {!r} dict'.format(attr), v)
def check_packages(dist, attr, value):
for pkgname in value:
if not re.match(r'\w+(\.\w+)*', pkgname):
distutils.log.warn(
"WARNING: %r not a valid package name; please use only "
".-separated package names in setup.py", pkgname
)
_Distribution = get_unpatched(distutils.core.Distribution)
class Distribution(_Distribution):
"""Distribution with support for tests and package data
This is an enhanced version of 'distutils.dist.Distribution' that
effectively adds the following new optional keyword arguments to 'setup()':
'install_requires' -- a string or sequence of strings specifying project
versions that the distribution requires when installed, in the format
used by 'pkg_resources.require()'. They will be installed
automatically when the package is installed. If you wish to use
packages that are not available in PyPI, or want to give your users an
alternate download location, you can add a 'find_links' option to the
'[easy_install]' section of your project's 'setup.cfg' file, and then
setuptools will scan the listed web pages for links that satisfy the
requirements.
'extras_require' -- a dictionary mapping names of optional "extras" to the
additional requirement(s) that using those extras incurs. For example,
this::
extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
indicates that the distribution can optionally provide an extra
capability called "reST", but it can only be used if docutils and
reSTedit are installed. If the user installs your package using
EasyInstall and requests one of your extras, the corresponding
additional requirements will be installed if needed.
'test_suite' -- the name of a test suite to run for the 'test' command.
If the user runs 'python setup.py test', the package will be installed,
and the named test suite will be run. The format is the same as
would be used on a 'unittest.py' command line. That is, it is the
dotted name of an object to import and call to generate a test suite.
'package_data' -- a dictionary mapping package names to lists of filenames
or globs to use to find data files contained in the named packages.
If the dictionary has filenames or globs listed under '""' (the empty
string), those names will be searched for in every package, in addition
to any names for the specific package. Data files found using these
names/globs will be installed along with the package, in the same
location as the package. Note that globs are allowed to reference
the contents of non-package subdirectories, as long as you use '/' as
a path separator. (Globs are automatically converted to
platform-specific paths at runtime.)
In addition to these new keywords, this class also has several new methods
for manipulating the distribution's contents. For example, the 'include()'
and 'exclude()' methods can be thought of as in-place add and subtract
commands that add or remove packages, modules, extensions, and so on from
the distribution.
"""
_DISTUTILS_UNSUPPORTED_METADATA = {
'long_description_content_type': None,
'project_urls': dict,
'provides_extras': ordered_set.OrderedSet,
'license_files': ordered_set.OrderedSet,
}
_patched_dist = None
def patch_missing_pkg_info(self, attrs):
# Fake up a replacement for the data that would normally come from
# PKG-INFO, but which might not yet be built if this is a fresh
# checkout.
#
if not attrs or 'name' not in attrs or 'version' not in attrs:
return
key = pkg_resources.safe_name(str(attrs['name'])).lower()
dist = pkg_resources.working_set.by_key.get(key)
if dist is not None and not dist.has_metadata('PKG-INFO'):
dist._version = pkg_resources.safe_version(str(attrs['version']))
self._patched_dist = dist
def __init__(self, attrs=None):
have_package_data = hasattr(self, "package_data")
if not have_package_data:
self.package_data = {}
attrs = attrs or {}
self.dist_files = []
# Filter-out setuptools' specific options.
self.src_root = attrs.pop("src_root", None)
self.patch_missing_pkg_info(attrs)
self.dependency_links = attrs.pop('dependency_links', [])
self.setup_requires = attrs.pop('setup_requires', [])
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
vars(self).setdefault(ep.name, None)
_Distribution.__init__(self, {
k: v for k, v in attrs.items()
if k not in self._DISTUTILS_UNSUPPORTED_METADATA
})
# Fill-in missing metadata fields not supported by distutils.
# Note some fields may have been set by other tools (e.g. pbr)
# above; they are taken preferrentially to setup() arguments
for option, default in self._DISTUTILS_UNSUPPORTED_METADATA.items():
for source in self.metadata.__dict__, attrs:
if option in source:
value = source[option]
break
else:
value = default() if default else None
setattr(self.metadata, option, value)
self.metadata.version = self._normalize_version(
self._validate_version(self.metadata.version))
self._finalize_requires()
@staticmethod
def _normalize_version(version):
if isinstance(version, setuptools.sic) or version is None:
return version
normalized = str(packaging.version.Version(version))
if version != normalized:
tmpl = "Normalizing '{version}' to '{normalized}'"
warnings.warn(tmpl.format(**locals()))
return normalized
return version
@staticmethod
def _validate_version(version):
if isinstance(version, numbers.Number):
# Some people apparently take "version number" too literally :)
version = str(version)
if version is not None:
try:
packaging.version.Version(version)
except (packaging.version.InvalidVersion, TypeError):
warnings.warn(
"The version specified (%r) is an invalid version, this "
"may not work as expected with newer versions of "
"setuptools, pip, and PyPI. Please see PEP 440 for more "
"details." % version
)
return setuptools.sic(version)
return version
def _finalize_requires(self):
"""
Set `metadata.python_requires` and fix environment markers
in `install_requires` and `extras_require`.
"""
if getattr(self, 'python_requires', None):
self.metadata.python_requires = self.python_requires
if getattr(self, 'extras_require', None):
for extra in self.extras_require.keys():
# Since this gets called multiple times at points where the
# keys have become 'converted' extras, ensure that we are only
# truly adding extras we haven't seen before here.
extra = extra.split(':')[0]
if extra:
self.metadata.provides_extras.add(extra)
self._convert_extras_requirements()
self._move_install_requirements_markers()
def _convert_extras_requirements(self):
"""
Convert requirements in `extras_require` of the form
`"extra": ["barbazquux; {marker}"]` to
`"extra:{marker}": ["barbazquux"]`.
"""
spec_ext_reqs = getattr(self, 'extras_require', None) or {}
self._tmp_extras_require = defaultdict(list)
for section, v in spec_ext_reqs.items():
# Do not strip empty sections.
self._tmp_extras_require[section]
for r in pkg_resources.parse_requirements(v):
suffix = self._suffix_for(r)
self._tmp_extras_require[section + suffix].append(r)
@staticmethod
def _suffix_for(req):
"""
For a requirement, return the 'extras_require' suffix for
that requirement.
"""
return ':' + str(req.marker) if req.marker else ''
def _move_install_requirements_markers(self):
"""
Move requirements in `install_requires` that are using environment
markers `extras_require`.
"""
# divide the install_requires into two sets, simple ones still
# handled by install_requires and more complex ones handled
# by extras_require.
def is_simple_req(req):
return not req.marker
spec_inst_reqs = getattr(self, 'install_requires', None) or ()
inst_reqs = list(pkg_resources.parse_requirements(spec_inst_reqs))
simple_reqs = filter(is_simple_req, inst_reqs)
complex_reqs = itertools.filterfalse(is_simple_req, inst_reqs)
self.install_requires = list(map(str, simple_reqs))
for r in complex_reqs:
self._tmp_extras_require[':' + str(r.marker)].append(r)
self.extras_require = dict(
(k, [str(r) for r in map(self._clean_req, v)])
for k, v in self._tmp_extras_require.items()
)
def _clean_req(self, req):
"""
Given a Requirement, remove environment markers and return it.
"""
req.marker = None
return req
# FIXME: 'Distribution._parse_config_files' is too complex (14)
def _parse_config_files(self, filenames=None): # noqa: C901
"""
Adapted from distutils.dist.Distribution.parse_config_files,
this method provides the same functionality in subtly-improved
ways.
"""
from configparser import ConfigParser
# Ignore install directory options if we have a venv
ignore_options = [] if sys.prefix == sys.base_prefix else [
'install-base', 'install-platbase', 'install-lib',
'install-platlib', 'install-purelib', 'install-headers',
'install-scripts', 'install-data', 'prefix', 'exec-prefix',
'home', 'user', 'root',
]
ignore_options = frozenset(ignore_options)
if filenames is None:
filenames = self.find_config_files()
if DEBUG:
self.announce("Distribution.parse_config_files():")
parser = ConfigParser()
parser.optionxform = str
for filename in filenames:
with io.open(filename, encoding='utf-8') as reader:
if DEBUG:
self.announce(" reading {filename}".format(**locals()))
parser.read_file(reader)
for section in parser.sections():
options = parser.options(section)
opt_dict = self.get_option_dict(section)
for opt in options:
if opt == '__name__' or opt in ignore_options:
continue
val = parser.get(section, opt)
opt = self.warn_dash_deprecation(opt, section)
opt = self.make_option_lowercase(opt, section)
opt_dict[opt] = (filename, val)
# Make the ConfigParser forget everything (so we retain
# the original filenames that options come from)
parser.__init__()
if 'global' not in self.command_options:
return
# If there was a "global" section in the config file, use it
# to set Distribution options.
for (opt, (src, val)) in self.command_options['global'].items():
alias = self.negative_opt.get(opt)
if alias:
val = not strtobool(val)
elif opt in ('verbose', 'dry_run'): # ugh!
val = strtobool(val)
try:
setattr(self, alias or opt, val)
except ValueError as e:
raise DistutilsOptionError(e) from e
def warn_dash_deprecation(self, opt, section):
if section in (
'options.extras_require', 'options.data_files',
):
return opt
underscore_opt = opt.replace('-', '_')
commands = distutils.command.__all__ + setuptools.command.__all__
if (not section.startswith('options') and section != 'metadata'
and section not in commands):
return underscore_opt
if '-' in opt:
warnings.warn(
"Usage of dash-separated '%s' will not be supported in future "
"versions. Please use the underscore name '%s' instead"
% (opt, underscore_opt))
return underscore_opt
def make_option_lowercase(self, opt, section):
if section != 'metadata' or opt.islower():
return opt
lowercase_opt = opt.lower()
warnings.warn(
"Usage of uppercase key '%s' in '%s' will be deprecated in future "
"versions. Please use lowercase '%s' instead"
% (opt, section, lowercase_opt)
)
return lowercase_opt
# FIXME: 'Distribution._set_command_options' is too complex (14)
def _set_command_options(self, command_obj, option_dict=None): # noqa: C901
"""
Set the options for 'command_obj' from 'option_dict'. Basically
this means copying elements of a dictionary ('option_dict') to
attributes of an instance ('command').
'command_obj' must be a Command instance. If 'option_dict' is not
supplied, uses the standard option dictionary for this command
(from 'self.command_options').
(Adopted from distutils.dist.Distribution._set_command_options)
"""
command_name = command_obj.get_command_name()
if option_dict is None:
option_dict = self.get_option_dict(command_name)
if DEBUG:
self.announce(" setting options for '%s' command:" % command_name)
for (option, (source, value)) in option_dict.items():
if DEBUG:
self.announce(" %s = %s (from %s)" % (option, value,
source))
try:
bool_opts = [translate_longopt(o)
for o in command_obj.boolean_options]
except AttributeError:
bool_opts = []
try:
neg_opt = command_obj.negative_opt
except AttributeError:
neg_opt = {}
try:
is_string = isinstance(value, str)
if option in neg_opt and is_string:
setattr(command_obj, neg_opt[option], not strtobool(value))
elif option in bool_opts and is_string:
setattr(command_obj, option, strtobool(value))
elif hasattr(command_obj, option):
setattr(command_obj, option, value)
else:
raise DistutilsOptionError(
"error in %s: command '%s' has no such option '%s'"
% (source, command_name, option))
except ValueError as e:
raise DistutilsOptionError(e) from e
def parse_config_files(self, filenames=None, ignore_option_errors=False):
"""Parses configuration files from various levels
and loads configuration.
"""
self._parse_config_files(filenames=filenames)
parse_configuration(self, self.command_options,
ignore_option_errors=ignore_option_errors)
self._finalize_requires()
def fetch_build_eggs(self, requires):
"""Resolve pre-setup requirements"""
resolved_dists = pkg_resources.working_set.resolve(
pkg_resources.parse_requirements(requires),
installer=self.fetch_build_egg,
replace_conflicting=True,
)
for dist in resolved_dists:
pkg_resources.working_set.add(dist, replace=True)
return resolved_dists
def finalize_options(self):
"""
Allow plugins to apply arbitrary operations to the
distribution. Each hook may optionally define a 'order'
to influence the order of execution. Smaller numbers
go first and the default is 0.
"""
group = 'setuptools.finalize_distribution_options'
def by_order(hook):
return getattr(hook, 'order', 0)
eps = map(lambda e: e.load(), pkg_resources.iter_entry_points(group))
for ep in sorted(eps, key=by_order):
ep(self)
def _finalize_setup_keywords(self):
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
value = getattr(self, ep.name, None)
if value is not None:
ep.require(installer=self.fetch_build_egg)
ep.load()(self, ep.name, value)
def _finalize_2to3_doctests(self):
if getattr(self, 'convert_2to3_doctests', None):
# XXX may convert to set here when we can rely on set being builtin
self.convert_2to3_doctests = [
os.path.abspath(p)
for p in self.convert_2to3_doctests
]
else:
self.convert_2to3_doctests = []
def get_egg_cache_dir(self):
egg_cache_dir = os.path.join(os.curdir, '.eggs')
if not os.path.exists(egg_cache_dir):
os.mkdir(egg_cache_dir)
windows_support.hide_file(egg_cache_dir)
readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
with open(readme_txt_filename, 'w') as f:
f.write('This directory contains eggs that were downloaded '
'by setuptools to build, test, and run plug-ins.\n\n')
f.write('This directory caches those eggs to prevent '
'repeated downloads.\n\n')
f.write('However, it is safe to delete this directory.\n\n')
return egg_cache_dir
def fetch_build_egg(self, req):
"""Fetch an egg needed for building"""
from setuptools.installer import fetch_build_egg
return fetch_build_egg(self, req)
def get_command_class(self, command):
"""Pluggable version of get_command_class()"""
if command in self.cmdclass:
return self.cmdclass[command]
eps = pkg_resources.iter_entry_points('distutils.commands', command)
for ep in eps:
ep.require(installer=self.fetch_build_egg)
self.cmdclass[command] = cmdclass = ep.load()
return cmdclass
else:
return _Distribution.get_command_class(self, command)
def print_commands(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep.resolve()
self.cmdclass[ep.name] = cmdclass
return _Distribution.print_commands(self)
def get_command_list(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep.resolve()
self.cmdclass[ep.name] = cmdclass
return _Distribution.get_command_list(self)
def include(self, **attrs):
"""Add items to distribution that are named in keyword arguments
For example, 'dist.include(py_modules=["x"])' would add 'x' to
the distribution's 'py_modules' attribute, if it was not already
there.
Currently, this method only supports inclusion for attributes that are
lists or tuples. If you need to add support for adding to other
attributes in this or a subclass, you can add an '_include_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
will try to call 'dist._include_foo({"bar":"baz"})', which can then
handle whatever special inclusion logic is needed.
"""
for k, v in attrs.items():
include = getattr(self, '_include_' + k, None)
if include:
include(v)
else:
self._include_misc(k, v)
def exclude_package(self, package):
"""Remove packages, modules, and extensions in named package"""
pfx = package + '.'
if self.packages:
self.packages = [
p for p in self.packages
if p != package and not p.startswith(pfx)
]
if self.py_modules:
self.py_modules = [
p for p in self.py_modules
if p != package and not p.startswith(pfx)
]
if self.ext_modules:
self.ext_modules = [
p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
]
def has_contents_for(self, package):
"""Return true if 'exclude_package(package)' would do something"""
pfx = package + '.'
for p in self.iter_distribution_names():
if p == package or p.startswith(pfx):
return True
def _exclude_misc(self, name, value):
"""Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value, sequence):
raise DistutilsSetupError(
"%s: setting must be a list or tuple (%r)" % (name, value)
)
try:
old = getattr(self, name)
except AttributeError as e:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
) from e
if old is not None and not isinstance(old, sequence):
raise DistutilsSetupError(
name + ": this setting cannot be changed via include/exclude"
)
elif old:
setattr(self, name, [item for item in old if item not in value])
def _include_misc(self, name, value):
"""Handle 'include()' for list/tuple attrs without a special handler"""
if not isinstance(value, sequence):
raise DistutilsSetupError(
"%s: setting must be a list (%r)" % (name, value)
)
try:
old = getattr(self, name)
except AttributeError as e:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
) from e
if old is None:
setattr(self, name, value)
elif not isinstance(old, sequence):
raise DistutilsSetupError(
name + ": this setting cannot be changed via include/exclude"
)
else:
new = [item for item in value if item not in old]
setattr(self, name, old + new)
def exclude(self, **attrs):
"""Remove items from distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
the distribution's 'py_modules' attribute. Excluding packages uses
the 'exclude_package()' method, so all of the package's contained
packages, modules, and extensions are also excluded.
Currently, this method only supports exclusion from attributes that are
lists or tuples. If you need to add support for excluding from other
attributes in this or a subclass, you can add an '_exclude_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
handle whatever special exclusion logic is needed.
"""
for k, v in attrs.items():
exclude = getattr(self, '_exclude_' + k, None)
if exclude:
exclude(v)
else:
self._exclude_misc(k, v)
def _exclude_packages(self, packages):
if not isinstance(packages, sequence):
raise DistutilsSetupError(
"packages: setting must be a list or tuple (%r)" % (packages,)
)
list(map(self.exclude_package, packages))
def _parse_command_opts(self, parser, args):
# Remove --with-X/--without-X options when processing command args
self.global_options = self.__class__.global_options
self.negative_opt = self.__class__.negative_opt
# First, expand any aliases
command = args[0]
aliases = self.get_option_dict('aliases')
while command in aliases:
src, alias = aliases[command]
del aliases[command] # ensure each alias can expand only once!
import shlex
args[:1] = shlex.split(alias, True)
command = args[0]
nargs = _Distribution._parse_command_opts(self, parser, args)
# Handle commands that want to consume all remaining arguments
cmd_class = self.get_command_class(command)
if getattr(cmd_class, 'command_consumes_arguments', None):
self.get_option_dict(command)['args'] = ("command line", nargs)
if nargs is not None:
return []
return nargs
def get_cmdline_options(self):
"""Return a '{cmd: {opt:val}}' map of all command-line options
Option names are all long, but do not include the leading '--', and
contain dashes rather than underscores. If the option doesn't take
an argument (e.g. '--quiet'), the 'val' is 'None'.
Note that options provided by config files are intentionally excluded.
"""
d = {}
for cmd, opts in self.command_options.items():
for opt, (src, val) in opts.items():
if src != "command line":
continue
opt = opt.replace('_', '-')
if val == 0:
cmdobj = self.get_command_obj(cmd)
neg_opt = self.negative_opt.copy()
neg_opt.update(getattr(cmdobj, 'negative_opt', {}))
for neg, pos in neg_opt.items():
if pos == opt:
opt = neg
val = None
break
else:
raise AssertionError("Shouldn't be able to get here")
elif val == 1:
val = None
d.setdefault(cmd, {})[opt] = val
return d
def iter_distribution_names(self):
"""Yield all packages, modules, and extension names in distribution"""
for pkg in self.packages or ():
yield pkg
for module in self.py_modules or ():
yield module
for ext in self.ext_modules or ():
if isinstance(ext, tuple):
name, buildinfo = ext
else:
name = ext.name
if name.endswith('module'):
name = name[:-6]
yield name
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
import sys
if self.help_commands:
return _Distribution.handle_display_options(self, option_order)
# Stdout may be StringIO (e.g. in tests)
if not isinstance(sys.stdout, io.TextIOWrapper):
return _Distribution.handle_display_options(self, option_order)
# Don't wrap stdout if utf-8 is already the encoding. Provides
# workaround for #334.
if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
return _Distribution.handle_display_options(self, option_order)
# Print metadata in UTF-8 no matter the platform
encoding = sys.stdout.encoding
errors = sys.stdout.errors
newline = sys.platform != 'win32' and '\n' or None
line_buffering = sys.stdout.line_buffering
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
try:
return _Distribution.handle_display_options(self, option_order)
finally:
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding, errors, newline, line_buffering)
class DistDeprecationWarning(SetuptoolsDeprecationWarning):
"""Class for warning about deprecations in dist in
setuptools. Not ignored by default, unlike DeprecationWarning."""
|
nataddrho/DigiCue-USB
|
Python3/src/venv/Lib/site-packages/setuptools/dist.py
|
Python
|
mit
| 40,150
|
import os
from ..ast import (Assignment, BinaryOperator, Block, Function, IfStatement,
Invocation, NumericLiteral, StringLiteral, Return, Symbol, ListLiteral,
Import)
from ..ast.inference import infer, SymbolType, resolve_types, is_concrete_type, InferenceError
def join_arguments(args):
out = []
for arg in args:
if type(arg) == str:
out.append(arg)
else:
out.append(generate(arg))
return ','.join(out)
def generate_block_symbols(node, exclude=()):
infered_return, constrains = infer(node)
resolved = resolve_types(constrains)
local_symbols = []
for t in resolved:
if type(t) == SymbolType and not t.name in exclude:
local_symbols.append('var %s;' % t.name)
return '\n'.join(local_symbols)
def generate_function(node):
body = generate_block(node.body)
infered_return, constrains = infer(node)
resolved = resolve_types(constrains)
if not is_concrete_type(infered_return):
infered_return = resolved[infered_return]
args = {}
for arg in node.args:
found = False
for constrain in constrains:
if SymbolType(arg) in constrain:
args[arg] = resolved[SymbolType(arg)]
found = True
if not found:
raise InferenceError('could not infer type of argument: %s' % arg)
args_generated = []
for arg, type_ in args.items():
args_generated.append('%s %s' % (type_, arg))
exclude_symbols = set([node.name]).union(args)
return ('var {name} = function({args}) {{ {symbols}\n{body} }};'
'exports.{name} = {name};'.format(
name=node.name, args=join_arguments(node.args),
symbols=generate_block_symbols(node, exclude=exclude_symbols),
body=body))
def generate_invocation(node):
return ('%s(%s)' %
(''.join(node.func), join_arguments(node.args)))
def generate_assignment(node):
assert type(node.dst) == Symbol
return ('%s = %s;' % (node.dst.name, generate(node.src)))
def generate_binary_operator(node):
return ('(%s %s %s)' % (
generate(node.first),
node.operator,
generate(node.second)))
def generate_if_statement(node):
return ('if(%s) { %s }' % (generate(node.expression),
generate_block(node.body)))
def generate_block(node):
return ';'.join(generate(x) for x in node.body)
def generate_return(node):
return 'return %s' % generate(node.result)
def generate_literal(node):
return node.value
def generate_list_literal(node):
return '[%s]' % ','.join(generate(x) for x in node.value)
def generate_symbol(node):
return node.name
def generate_import(node):
return ''
def generate(node):
generators = {
Function: generate_function,
Invocation: generate_invocation,
Assignment: generate_assignment,
BinaryOperator: generate_binary_operator,
IfStatement: generate_if_statement,
Block: generate_block,
Return: generate_return,
StringLiteral: generate_literal,
NumericLiteral: generate_literal,
ListLiteral: generate_list_literal,
Symbol: generate_symbol,
Import: generate_import
}
try:
return generators[type(node)](node)
except KeyError:
raise RuntimeError('unknown node: %r' % (node,))
|
bbonf/matcha
|
matcha/js/__init__.py
|
Python
|
mit
| 3,412
|
import os
import pytest
from uqbar.strings import normalize
import supriya.synthdefs
import supriya.ugens
@pytest.mark.skipif(
os.environ.get("GITHUB_ACTIONS") == "true",
reason="sclang broken under GitHub Actions",
)
def test_Splay_01_sclang(server):
sc_synthdef = supriya.synthdefs.SuperColliderSynthDef(
"test",
r"""
arg spread=1, level=0.2, center=0.0;
Out.ar(0, Splay.ar(In.ar(0, 5), spread, level, center));
""",
)
sc_compiled_synthdef = sc_synthdef.compile()
synthdef = supriya.synthdefs.SynthDefDecompiler.decompile_synthdefs(
sc_compiled_synthdef
)[0]
assert normalize(str(synthdef)) == normalize(
"""
synthdef:
name: test
ugens:
- Control.kr: null
- BinaryOpUGen(SUBTRACTION).kr:
left: Control.kr[2:center]
right: Control.kr[0:spread]
- MulAdd.kr/0:
addend: Control.kr[2:center]
multiplier: -0.5
source: Control.kr[0:spread]
- MulAdd.kr/1:
addend: Control.kr[2:center]
multiplier: 0.5
source: Control.kr[0:spread]
- BinaryOpUGen(ADDITION).kr:
left: Control.kr[0:spread]
right: Control.kr[2:center]
- BinaryOpUGen(MULTIPLICATION).kr:
left: Control.kr[1:level]
right: 0.4472135901451111
- In.ar:
bus: 0.0
- Pan2.ar/0:
level: 1.0
position: BinaryOpUGen(SUBTRACTION).kr[0]
source: In.ar[0]
- Pan2.ar/1:
level: 1.0
position: MulAdd.kr/0[0]
source: In.ar[1]
- Pan2.ar/2:
level: 1.0
position: Control.kr[2:center]
source: In.ar[2]
- Pan2.ar/3:
level: 1.0
position: MulAdd.kr/1[0]
source: In.ar[3]
- Sum4.ar/0:
input_four: Pan2.ar/0[0]
input_one: Pan2.ar/3[0]
input_three: Pan2.ar/1[0]
input_two: Pan2.ar/2[0]
- Sum4.ar/1:
input_four: Pan2.ar/0[1]
input_one: Pan2.ar/3[1]
input_three: Pan2.ar/1[1]
input_two: Pan2.ar/2[1]
- Pan2.ar/4:
level: 1.0
position: BinaryOpUGen(ADDITION).kr[0]
source: In.ar[4]
- BinaryOpUGen(ADDITION).ar/0:
left: Sum4.ar/0[0]
right: Pan2.ar/4[0]
- BinaryOpUGen(MULTIPLICATION).ar/0:
left: BinaryOpUGen(ADDITION).ar/0[0]
right: BinaryOpUGen(MULTIPLICATION).kr[0]
- BinaryOpUGen(ADDITION).ar/1:
left: Sum4.ar/1[0]
right: Pan2.ar/4[1]
- BinaryOpUGen(MULTIPLICATION).ar/1:
left: BinaryOpUGen(ADDITION).ar/1[0]
right: BinaryOpUGen(MULTIPLICATION).kr[0]
- Out.ar:
bus: 0.0
source[0]: BinaryOpUGen(MULTIPLICATION).ar/0[0]
source[1]: BinaryOpUGen(MULTIPLICATION).ar/1[0]
"""
)
def test_Splay_01_supriya(server):
with supriya.synthdefs.SynthDefBuilder(spread=1, level=0.2, center=0.0) as builder:
source = supriya.ugens.Splay.ar(
source=supriya.ugens.In.ar(bus=0, channel_count=5),
spread=builder["spread"],
level=builder["level"],
center=builder["center"],
)
supriya.ugens.Out.ar(bus=0, source=source)
py_synthdef = builder.build(name="test")
assert normalize(str(py_synthdef)) == normalize(
"""
synthdef:
name: test
ugens:
- Control.kr: null
- BinaryOpUGen(MULTIPLICATION).kr/0:
left: -1.0
right: Control.kr[2:spread]
- BinaryOpUGen(ADDITION).kr/0:
left: BinaryOpUGen(MULTIPLICATION).kr/0[0]
right: Control.kr[0:center]
- BinaryOpUGen(MULTIPLICATION).kr/1:
left: -0.5
right: Control.kr[2:spread]
- BinaryOpUGen(ADDITION).kr/1:
left: BinaryOpUGen(MULTIPLICATION).kr/1[0]
right: Control.kr[0:center]
- BinaryOpUGen(MULTIPLICATION).kr/2:
left: 0.5
right: Control.kr[2:spread]
- BinaryOpUGen(ADDITION).kr/2:
left: BinaryOpUGen(MULTIPLICATION).kr/2[0]
right: Control.kr[0:center]
- BinaryOpUGen(ADDITION).kr/3:
left: Control.kr[2:spread]
right: Control.kr[0:center]
- BinaryOpUGen(MULTIPLICATION).kr/3:
left: Control.kr[1:level]
right: 0.4472135954999579
- In.ar:
bus: 0.0
- Pan2.ar/0:
level: 1.0
position: BinaryOpUGen(ADDITION).kr/0[0]
source: In.ar[0]
- Pan2.ar/1:
level: 1.0
position: BinaryOpUGen(ADDITION).kr/1[0]
source: In.ar[1]
- Pan2.ar/2:
level: 1.0
position: Control.kr[0:center]
source: In.ar[2]
- Pan2.ar/3:
level: 1.0
position: BinaryOpUGen(ADDITION).kr/2[0]
source: In.ar[3]
- Sum4.ar/0:
input_four: Pan2.ar/3[0]
input_one: Pan2.ar/0[0]
input_three: Pan2.ar/2[0]
input_two: Pan2.ar/1[0]
- Sum4.ar/1:
input_four: Pan2.ar/3[1]
input_one: Pan2.ar/0[1]
input_three: Pan2.ar/2[1]
input_two: Pan2.ar/1[1]
- Pan2.ar/4:
level: 1.0
position: BinaryOpUGen(ADDITION).kr/3[0]
source: In.ar[4]
- BinaryOpUGen(ADDITION).ar/0:
left: Sum4.ar/0[0]
right: Pan2.ar/4[0]
- BinaryOpUGen(MULTIPLICATION).ar/0:
left: BinaryOpUGen(ADDITION).ar/0[0]
right: BinaryOpUGen(MULTIPLICATION).kr/3[0]
- BinaryOpUGen(ADDITION).ar/1:
left: Sum4.ar/1[0]
right: Pan2.ar/4[1]
- BinaryOpUGen(MULTIPLICATION).ar/1:
left: BinaryOpUGen(ADDITION).ar/1[0]
right: BinaryOpUGen(MULTIPLICATION).kr/3[0]
- Out.ar:
bus: 0.0
source[0]: BinaryOpUGen(MULTIPLICATION).ar/0[0]
source[1]: BinaryOpUGen(MULTIPLICATION).ar/1[0]
"""
)
py_synthdef.allocate(server=server)
@pytest.mark.skipif(
os.environ.get("GITHUB_ACTIONS") == "true",
reason="sclang broken under GitHub Actions",
)
def test_Splay_02_sclang(server):
sc_synthdef = supriya.synthdefs.SuperColliderSynthDef(
"test",
r"""
arg spread=1, level=0.2;
Out.ar(0, Splay.ar(In.ar(0, 5), spread, level, [-0.25, 0.25]));
""",
)
sc_compiled_synthdef = sc_synthdef.compile()
synthdef = supriya.synthdefs.SynthDefDecompiler.decompile_synthdefs(
sc_compiled_synthdef
)[0]
assert normalize(str(synthdef)) == normalize(
"""
synthdef:
name: test
ugens:
- Control.kr: null
- BinaryOpUGen(SUBTRACTION).kr/0:
left: -0.25
right: Control.kr[0:spread]
- MulAdd.kr/0:
addend: -0.25
multiplier: -0.5
source: Control.kr[0:spread]
- MulAdd.kr/1:
addend: -0.25
multiplier: 0.5
source: Control.kr[0:spread]
- BinaryOpUGen(ADDITION).kr/0:
left: Control.kr[0:spread]
right: -0.25
- BinaryOpUGen(MULTIPLICATION).kr/0:
left: Control.kr[1:level]
right: 0.4472135901451111
- BinaryOpUGen(SUBTRACTION).kr/1:
left: 0.25
right: Control.kr[0:spread]
- MulAdd.kr/2:
addend: 0.25
multiplier: -0.5
source: Control.kr[0:spread]
- MulAdd.kr/3:
addend: 0.25
multiplier: 0.5
source: Control.kr[0:spread]
- BinaryOpUGen(ADDITION).kr/1:
left: Control.kr[0:spread]
right: 0.25
- BinaryOpUGen(MULTIPLICATION).kr/1:
left: Control.kr[1:level]
right: 0.4472135901451111
- In.ar:
bus: 0.0
- Pan2.ar/0:
level: 1.0
position: BinaryOpUGen(SUBTRACTION).kr/0[0]
source: In.ar[0]
- Pan2.ar/1:
level: 1.0
position: MulAdd.kr/0[0]
source: In.ar[1]
- Pan2.ar/2:
level: 1.0
position: -0.25
source: In.ar[2]
- Pan2.ar/3:
level: 1.0
position: MulAdd.kr/1[0]
source: In.ar[3]
- Sum4.ar/0:
input_four: Pan2.ar/0[0]
input_one: Pan2.ar/3[0]
input_three: Pan2.ar/1[0]
input_two: Pan2.ar/2[0]
- Sum4.ar/1:
input_four: Pan2.ar/0[1]
input_one: Pan2.ar/3[1]
input_three: Pan2.ar/1[1]
input_two: Pan2.ar/2[1]
- Pan2.ar/4:
level: 1.0
position: BinaryOpUGen(ADDITION).kr/0[0]
source: In.ar[4]
- BinaryOpUGen(ADDITION).ar/0:
left: Sum4.ar/0[0]
right: Pan2.ar/4[0]
- BinaryOpUGen(MULTIPLICATION).ar/0:
left: BinaryOpUGen(ADDITION).ar/0[0]
right: BinaryOpUGen(MULTIPLICATION).kr/0[0]
- BinaryOpUGen(ADDITION).ar/1:
left: Sum4.ar/1[0]
right: Pan2.ar/4[1]
- BinaryOpUGen(MULTIPLICATION).ar/1:
left: BinaryOpUGen(ADDITION).ar/1[0]
right: BinaryOpUGen(MULTIPLICATION).kr/0[0]
- Pan2.ar/5:
level: 1.0
position: BinaryOpUGen(SUBTRACTION).kr/1[0]
source: In.ar[0]
- Pan2.ar/6:
level: 1.0
position: MulAdd.kr/2[0]
source: In.ar[1]
- Pan2.ar/7:
level: 1.0
position: 0.25
source: In.ar[2]
- Pan2.ar/8:
level: 1.0
position: MulAdd.kr/3[0]
source: In.ar[3]
- Sum4.ar/2:
input_four: Pan2.ar/5[0]
input_one: Pan2.ar/8[0]
input_three: Pan2.ar/6[0]
input_two: Pan2.ar/7[0]
- Sum4.ar/3:
input_four: Pan2.ar/5[1]
input_one: Pan2.ar/8[1]
input_three: Pan2.ar/6[1]
input_two: Pan2.ar/7[1]
- Pan2.ar/9:
level: 1.0
position: BinaryOpUGen(ADDITION).kr/1[0]
source: In.ar[4]
- BinaryOpUGen(ADDITION).ar/2:
left: Sum4.ar/2[0]
right: Pan2.ar/9[0]
- BinaryOpUGen(MULTIPLICATION).ar/2:
left: BinaryOpUGen(ADDITION).ar/2[0]
right: BinaryOpUGen(MULTIPLICATION).kr/1[0]
- Out.ar/0:
bus: 0.0
source[0]: BinaryOpUGen(MULTIPLICATION).ar/0[0]
source[1]: BinaryOpUGen(MULTIPLICATION).ar/2[0]
- BinaryOpUGen(ADDITION).ar/3:
left: Sum4.ar/3[0]
right: Pan2.ar/9[1]
- BinaryOpUGen(MULTIPLICATION).ar/3:
left: BinaryOpUGen(ADDITION).ar/3[0]
right: BinaryOpUGen(MULTIPLICATION).kr/1[0]
- Out.ar/1:
bus: 0.0
source[0]: BinaryOpUGen(MULTIPLICATION).ar/1[0]
source[1]: BinaryOpUGen(MULTIPLICATION).ar/3[0]
"""
)
def test_Splay_02_supriya(server):
with supriya.synthdefs.SynthDefBuilder(spread=1, level=0.2) as builder:
source = supriya.ugens.Splay.ar(
source=supriya.ugens.In.ar(bus=0, channel_count=5),
spread=builder["spread"],
level=builder["level"],
center=[-0.25, 0.25],
)
supriya.ugens.Out.ar(bus=0, source=source)
py_synthdef = builder.build(name="test")
assert normalize(str(py_synthdef)) == normalize(
"""
synthdef:
name: test
ugens:
- Control.kr: null
- BinaryOpUGen(MULTIPLICATION).kr/0:
left: -1.0
right: Control.kr[1:spread]
- BinaryOpUGen(ADDITION).kr/0:
left: BinaryOpUGen(MULTIPLICATION).kr/0[0]
right: -0.25
- BinaryOpUGen(MULTIPLICATION).kr/1:
left: -0.5
right: Control.kr[1:spread]
- BinaryOpUGen(ADDITION).kr/1:
left: BinaryOpUGen(MULTIPLICATION).kr/1[0]
right: -0.25
- BinaryOpUGen(MULTIPLICATION).kr/2:
left: 0.5
right: Control.kr[1:spread]
- BinaryOpUGen(ADDITION).kr/2:
left: BinaryOpUGen(MULTIPLICATION).kr/2[0]
right: -0.25
- BinaryOpUGen(ADDITION).kr/3:
left: Control.kr[1:spread]
right: -0.25
- BinaryOpUGen(MULTIPLICATION).kr/3:
left: Control.kr[0:level]
right: 0.4472135954999579
- BinaryOpUGen(MULTIPLICATION).kr/4:
left: -1.0
right: Control.kr[1:spread]
- BinaryOpUGen(ADDITION).kr/4:
left: BinaryOpUGen(MULTIPLICATION).kr/4[0]
right: 0.25
- BinaryOpUGen(MULTIPLICATION).kr/5:
left: -0.5
right: Control.kr[1:spread]
- BinaryOpUGen(ADDITION).kr/5:
left: BinaryOpUGen(MULTIPLICATION).kr/5[0]
right: 0.25
- BinaryOpUGen(MULTIPLICATION).kr/6:
left: 0.5
right: Control.kr[1:spread]
- BinaryOpUGen(ADDITION).kr/6:
left: BinaryOpUGen(MULTIPLICATION).kr/6[0]
right: 0.25
- BinaryOpUGen(ADDITION).kr/7:
left: Control.kr[1:spread]
right: 0.25
- BinaryOpUGen(MULTIPLICATION).kr/7:
left: Control.kr[0:level]
right: 0.4472135954999579
- In.ar:
bus: 0.0
- Pan2.ar/0:
level: 1.0
position: BinaryOpUGen(ADDITION).kr/0[0]
source: In.ar[0]
- Pan2.ar/1:
level: 1.0
position: BinaryOpUGen(ADDITION).kr/1[0]
source: In.ar[1]
- Pan2.ar/2:
level: 1.0
position: -0.25
source: In.ar[2]
- Pan2.ar/3:
level: 1.0
position: BinaryOpUGen(ADDITION).kr/2[0]
source: In.ar[3]
- Sum4.ar/0:
input_four: Pan2.ar/3[0]
input_one: Pan2.ar/0[0]
input_three: Pan2.ar/2[0]
input_two: Pan2.ar/1[0]
- Sum4.ar/1:
input_four: Pan2.ar/3[1]
input_one: Pan2.ar/0[1]
input_three: Pan2.ar/2[1]
input_two: Pan2.ar/1[1]
- Pan2.ar/4:
level: 1.0
position: BinaryOpUGen(ADDITION).kr/3[0]
source: In.ar[4]
- BinaryOpUGen(ADDITION).ar/0:
left: Sum4.ar/0[0]
right: Pan2.ar/4[0]
- BinaryOpUGen(MULTIPLICATION).ar/0:
left: BinaryOpUGen(ADDITION).ar/0[0]
right: BinaryOpUGen(MULTIPLICATION).kr/3[0]
- BinaryOpUGen(ADDITION).ar/1:
left: Sum4.ar/1[0]
right: Pan2.ar/4[1]
- BinaryOpUGen(MULTIPLICATION).ar/1:
left: BinaryOpUGen(ADDITION).ar/1[0]
right: BinaryOpUGen(MULTIPLICATION).kr/3[0]
- Pan2.ar/5:
level: 1.0
position: BinaryOpUGen(ADDITION).kr/4[0]
source: In.ar[0]
- Pan2.ar/6:
level: 1.0
position: BinaryOpUGen(ADDITION).kr/5[0]
source: In.ar[1]
- Pan2.ar/7:
level: 1.0
position: 0.25
source: In.ar[2]
- Pan2.ar/8:
level: 1.0
position: BinaryOpUGen(ADDITION).kr/6[0]
source: In.ar[3]
- Sum4.ar/2:
input_four: Pan2.ar/8[0]
input_one: Pan2.ar/5[0]
input_three: Pan2.ar/7[0]
input_two: Pan2.ar/6[0]
- Sum4.ar/3:
input_four: Pan2.ar/8[1]
input_one: Pan2.ar/5[1]
input_three: Pan2.ar/7[1]
input_two: Pan2.ar/6[1]
- Pan2.ar/9:
level: 1.0
position: BinaryOpUGen(ADDITION).kr/7[0]
source: In.ar[4]
- BinaryOpUGen(ADDITION).ar/2:
left: Sum4.ar/2[0]
right: Pan2.ar/9[0]
- BinaryOpUGen(MULTIPLICATION).ar/2:
left: BinaryOpUGen(ADDITION).ar/2[0]
right: BinaryOpUGen(MULTIPLICATION).kr/7[0]
- BinaryOpUGen(ADDITION).ar/3:
left: BinaryOpUGen(MULTIPLICATION).ar/0[0]
right: BinaryOpUGen(MULTIPLICATION).ar/2[0]
- BinaryOpUGen(ADDITION).ar/4:
left: Sum4.ar/3[0]
right: Pan2.ar/9[1]
- BinaryOpUGen(MULTIPLICATION).ar/3:
left: BinaryOpUGen(ADDITION).ar/4[0]
right: BinaryOpUGen(MULTIPLICATION).kr/7[0]
- BinaryOpUGen(ADDITION).ar/5:
left: BinaryOpUGen(MULTIPLICATION).ar/1[0]
right: BinaryOpUGen(MULTIPLICATION).ar/3[0]
- Out.ar:
bus: 0.0
source[0]: BinaryOpUGen(ADDITION).ar/3[0]
source[1]: BinaryOpUGen(ADDITION).ar/5[0]
"""
)
py_synthdef.allocate(server=server)
|
josiah-wolf-oberholtzer/supriya
|
tests/synthdefs/test_synthdefs_SynthDefCompiler_splay.py
|
Python
|
mit
| 20,228
|
class Solution(object):
def isSelfCrossing(self, x):
"""
:type x: List[int]
:rtype: bool
"""
n=len(x)
if n<4:
return False
# start with the fourth element
i=3
while i<n:
# if i cross with i-3, it's very simple
if x[i]>=x[i-2] and x[i-1]<=x[i-3]:
return True
# if i cross with i-4, it build a cross with 5 steps
# s1 [2,4,4,4,2]
#(2)****(1)
# * *(0)
# * *(5)
#(3)****(4)
# the last step should be larger than the diff between i-2 and i-4
if i>=4 and x[i-1]==x[i-3] and x[i-2]-x[i-4]<=x[i]:
return True
# i cross with i-5
# [3,6,5,9,4,6]
#(2)******(1)
# * *******(6)
# * (7)* *
# * (0) *
#(3)*********(4)
# the (4-6) should betwen (0-1)
# the (6-7) should larger than (4-3) minus (2-1)
if i>=5 and x[i-1]<=x[i-3] and x[i-1]>=x[i-3]-x[i-5] and x[i-2]>=x[i-4] and x[i]>=x[i-2]-x[i-4]:
return True
i+=1
return False
|
Tanych/CodeTracking
|
335-Self-Crossing/solution.py
|
Python
|
mit
| 1,246
|
#!/usr/bin/env python
import simplekml
import argparse
import colorsys
import numpy
import simplejson
import csv
import os
from lib.config import Config
def RGBToHTMLColor(rgb_tuple):
""" convert an (R, G, B) tuple to #RRGGBB """
hexcolor = '%02x%02x%02xff' % (int(rgb_tuple[0]*256), int(rgb_tuple[1]*256), int(rgb_tuple[2]*256))
# that's it! '%02x' means zero-padded, 2-digit hex values
return hexcolor
def get_colors(num_colors):
colors=[]
for i in numpy.arange(0., 360., 360. / num_colors):
hue = i/360.
lightness = (50 + numpy.random.rand() * 10)/100.
saturation = (90 + numpy.random.rand() * 10)/100.
colors.append(RGBToHTMLColor(colorsys.hls_to_rgb(hue, lightness, saturation)))
return colors
def load_data(savefile):
orders = {}
with open(savefile, 'r') as json_data:
try:
orders = simplejson.load(json_data)
except ValueError as e:
print('invalid json: %s' % e)
raise
return orders
def save_routes_to_csv(config, routes):
savefile = "{}/master.csv".format(config.output_dir)
with open(savefile, "w") as csvfile:
f = csv.writer(csvfile)
# Write CSV Header, If you dont need that, remove this line
f.writerow(["ID", "Name", "Address", "Bags", "Route", "Coments"])
for idx, route in enumerate(routes):
for d in route:
f.writerow([d['id'], d['name'], d['address'], d['count'], "route-{}".format(idx+1), d['comments']])
if config.verbose:
print("Saved {} routes to {}".format(len(routes), savefile))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate a KML file based on the passed in routes.json file.')
parser.add_argument('filename', type=str, help='the routes.json file containing the deliveries')
parser.add_argument('-v', '--verbose', action='store_true', help='increase the output verbosity')
args = parser.parse_args()
config = Config()
config.verbose = args.verbose
if not os.path.isfile(args.filename):
print("There is no such file {}!".format(args.filename))
sys.exit(-1)
routes = load_data(args.filename)
savefile = "{}/deliveries.kml".format(config.output_dir)
if config.verbose:
print("Loaded {} routes from {}".format(len(routes), args.filename))
colors = get_colors(100)
kml = simplekml.Kml(open=1)
num_routes = 0
num_orders = 0
for idx, route in enumerate(routes):
num_routes = idx + 1
for delivery in route:
pnt = kml.newpoint()
pnt.name = "{} {} ({} bags)".format(delivery['id'], delivery['address'], delivery['count'])
pnt.description = "route-{}".format(num_routes)
pnt.coords = [(delivery['lon'], delivery['lat'])]
pnt.style.iconstyle.color = colors[num_routes]
pnt.style.iconstyle.icon.href = None
num_orders = num_orders + 1
if config.verbose:
print("Added point for {} (route-{})".format(delivery['id'], num_routes))
kml.save(savefile)
if config.verbose:
print("Created {} points, one per order.".format(num_orders))
save_routes_to_csv(config, routes)
|
sean/delivery-routes
|
gen_kml.py
|
Python
|
mit
| 3,068
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Flurbo Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test the CHECKLOCKTIMEVERIFY (BIP65) soft-fork logic
#
from test_framework.test_framework import FlurboTestFramework
from test_framework.util import *
class BIP65Test(FlurboTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, []))
self.nodes.append(start_node(1, self.options.tmpdir, ["-blockversion=3"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-blockversion=4"]))
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some old-version blocks
self.nodes[1].generate(100)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 100):
raise AssertionError("Failed to mine 100 version=3 blocks")
# Mine 750 new-version blocks
for i in range(15):
self.nodes[2].generate(50)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 850):
raise AssertionError("Failed to mine 750 version=4 blocks")
# TODO: check that new CHECKLOCKTIMEVERIFY rules are not enforced
# Mine 1 new-version block
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 851):
raise AssertionError("Failed to mine a version=4 blocks")
# TODO: check that new CHECKLOCKTIMEVERIFY rules are enforced
# Mine 198 new-version blocks
for i in range(2):
self.nodes[2].generate(99)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1049):
raise AssertionError("Failed to mine 198 version=4 blocks")
# Mine 1 old-version block
self.nodes[1].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1050):
raise AssertionError("Failed to mine a version=3 block after 949 version=4 blocks")
# Mine 1 new-version blocks
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1051):
raise AssertionError("Failed to mine a version=4 block")
# Mine 1 old-version blocks
try:
self.nodes[1].generate(1)
raise AssertionError("Succeeded to mine a version=3 block after 950 version=4 blocks")
except JSONRPCException:
pass
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1051):
raise AssertionError("Accepted a version=3 block after 950 version=4 blocks")
# Mine 1 new-version blocks
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1052):
raise AssertionError("Failed to mine a version=4 block")
if __name__ == '__main__':
BIP65Test().main()
|
Flurbos/Flurbo
|
qa/rpc-tests/bip65-cltv.py
|
Python
|
mit
| 3,172
|
class BaseTwitterClient():
def __init__(self):
pass
def authenticate(self):
'''Authenticate against the Twitter API and check the return to make sure we have a valid connection, return true or false'''
raise NotImplementedError()
def request(self):
'''Make an HTTP request'''
raise NotImplementedError()
|
blairg23/TweetyPy
|
BaseTwitterClient.py
|
Python
|
mit
| 317
|
from qualdocs.core import *
|
qualdocs/qualdocs
|
qualdocs/__init__.py
|
Python
|
mit
| 27
|
import os
import unittest
import GeometrA
from PIL import Image, ImageTk
from io import BytesIO
import base64
from GeometrA.src.TestScript.TestCase import TestCase
from GeometrA.src.TestScript.TestStep import Step
class TestCaseTestSuite(unittest.TestCase):
def setUp(self):
GeometrA.app.testing = True
buffered = BytesIO()
tmp_image = Image.open(os.getcwd() + '/tests/TestCase/Test/image/exist.png')
tmp_image.save(buffered, format="PNG")
img_string = base64.b64encode(buffered.getvalue())
self.app = GeometrA.app.test_client()
self.image = Image.open(BytesIO(base64.b64decode(img_string)))
def testConstructer(self):
case = TestCase()
self.assertEqual(0, case.getSize())
def testInsert(self):
case = TestCase()
case.insert(act='Click', val=self.image)
case.insert(act='Sleep(s)', val='1')
case.insert(n=0, act='Android Keycode', val='KEYCODE_HOME')
case.insert(step=Step('Set Text', 'Hello World'))
self.assertEqual(4, case.getSize())
self.assertEqual('Android Keycode', case.getSteps(0).getAction())
self.assertEqual('Click', case.getSteps(1).getAction())
self.assertEqual('Sleep(s)', case.getSteps(2).getAction())
self.assertEqual('Set Text', case.getSteps(3).getAction())
def testRefrash(self):
case = TestCase()
case.insert(act='Click', val=self.image)
case.insert(act='', val='')
case.insert(act='Sleep(s)', val='1')
case.refresh()
self.assertEqual('Click', case.getSteps(0).getAction())
self.assertEqual('Sleep(s)', case.getSteps(1).getAction())
self.assertEqual('1', case.getSteps(1).getValue())
def testSetAction(self):
case = TestCase()
case.insert(act='Click', val=self.image)
case.setAction(0, 'Sleep(s)')
self.assertEqual('Sleep(s)', case.getSteps(0).getAction())
def testSetValue(self):
case = TestCase()
case.insert(act='Android Keycode', val='KEYCODE_BACK')
case.setValue(0, 'KEYCODE_HOME')
self.assertEqual('KEYCODE_HOME', case.getSteps(0).getValue())
def testSetStep(self):
case = TestCase()
case.insert(act='Sleep(s)', val='1')
case.setStep(0, 'Set Text', 'Hello World')
self.assertEqual('Set Text', case.getSteps(0).getAction())
self.assertEqual('Hello World', case.getSteps(0).getValue())
def testGetSteps(self):
case = TestCase()
case.insert(act='Sleep(s)', val='1')
case.insert(act='Click', val=self.image)
stepList = case.getSteps()
self.assertEqual('Sleep(s)', stepList[0].getAction())
self.assertEqual('1', stepList[0].getValue())
self.assertEqual('Click', stepList[1].getAction())
def testDelete(self):
case = TestCase()
case.insert(act='Android Keycode', val='KEYCODE_HOME')
case.insert(act='Click', val=self.image)
case.insert(act='Sleep(s)', val='1')
case.delete(1)
self.assertEqual(2, case.getSize())
self.assertEqual('Sleep(s)', case.getSteps(1).getAction())
self.assertEqual('1', case.getSteps(1).getValue())
def testSetStatus(self):
case = TestCase()
case.insert(act='Click', val=self.image)
self.assertEqual('Success', case.setStatus(0, 'Success'))
def testGetStatus(self):
case = TestCase()
case.insert(act='Click', val=self.image)
case.setStatus(0, 'Success')
self.assertEqual('Success', case.getStatus(0))
def testClear(self):
case = TestCase()
case.insert(act='Sleep(s)', val='1')
case.clear()
self.assertEqual(0, case.getSize())
def testCopy(self):
case1 = TestCase()
case1.insert(act='Sleep(s)', val='1')
case2 = case1.copy()
case1.clear()
self.assertEqual(1, case2.getSize())
self.assertEqual('Sleep(s)', case2.getSteps(0).getAction())
self.assertEqual('1', case2.getSteps(0).getValue())
def testAppend(self):
case = TestCase()
case.insert(act='Sleep(s)', val='1')
case.append(0)
self.assertEqual(1, case.getSize())
self.assertEqual('Sleep(s)', case.getSteps(1).getAction())
self.assertEqual('1', case.getSteps(1).getValue())
|
NTUTVisualScript/Visual_Script
|
tests/TestScript/test_TestCase.py
|
Python
|
mit
| 4,375
|
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.modules.events.management.views import WPEventManagement
from indico.util.i18n import _
from indico.web.views import WPNewBase
class WPRoomBookingBase(WPNewBase):
template_prefix = 'rb/'
title = _('Room Booking')
bundles = ('common.js', 'common.css', 'react.js', 'react.css', 'semantic-ui.js', 'semantic-ui.css',
'module_rb.js', 'module_rb.css')
class WPEventBookingList(WPEventManagement):
template_prefix = 'rb/'
sidemenu_option = 'room_booking'
bundles = ('module_rb.event.js', 'module_rb.css')
|
indico/indico
|
indico/modules/rb/views.py
|
Python
|
mit
| 767
|
#from __future__ import division
#
#import os, os.path
#import json
#import itertools
#from collections import namedtuple
#import matplotlib.pyplot as plt
#import numpy as np
#
#from csxj.datasources.parser_tools.article import ArticleData
#
#
#
#BACKGROUND_COLOR = '#e3e1dd'
#LIGHT_COLOR = '#f0efed'
#DARK_COLOR = '#4c4c4c'
#
#
#def get_subdirs(parent_path):
# return [d for d in os.listdir(parent_path) if os.path.isdir(os.path.join(parent_path, d))]
#
#
#
#def get_articles(json_filepath):
# with open(json_filepath, 'r') as f:
# json_content = json.load(f)
# articles_string = json_content['articles']
# return [ArticleData.from_json(article_s) for article_s in articles_string]
#
#
#
#def get_flat_article_list(provider_path):
# all_days = get_subdirs(provider_path)
# all_articles = []
#
#
# for day in all_days:
# day_path = os.path.join(provider_path, day)
# all_batches = get_subdirs(day_path)
#
# for batch in all_batches:
# batch_path = os.path.join(day_path, batch)
# all_articles.extend(get_articles(os.path.join(batch_path, 'articles.json')))
#
# return all_articles
#
#
#def categorize_articles(articles):
# def keyfunc(article):
# return article.category
#
# groups = []
# uniquekeys = []
# data = sorted(articles, key=keyfunc)
# for k, g in itertools.groupby(data, keyfunc):
# groups.append(list(g)) # Store group iterator as a list
# uniquekeys.append(k)
#
# return zip(uniquekeys, groups)
#
#
#
#def count_links(articles):
# ext_links = sum([len(art.external_links) for art in articles])
# int_links = sum([len(art.internal_links) for art in articles])
#
# return ext_links, int_links
#
#
#
#CategoryCounters = namedtuple('CategoryCounters', 'name total_links total_articles link_article_ratio')
#
#
#def make_barchart_in_subplot(ax, xs, title, labels):
# ind = np.arange(len(xs))
# ind = np.arange(len(xs))
# ax.barh(ind, xs, color=LIGHT_COLOR)
# ax.set_yticklabels(ind+0.35, labels, fontsize='small')
# ax.set_title(title)
#
#
#def make_barchart(xs, title, labels):
# ind = np.arange(len(xs))
# plt.barh(ind, xs, color=LIGHT_COLOR)
# plt.yticks(ind+0.35, labels, fontsize='small')
# plt.title(title)
#
#
#
#def sort_categories_by_links_article_ratio(categorized_articles):
# link_counters = list()
#
# max_total_articles = len(max(categorized_articles, key=lambda a: len(a[1]))[1])
#
# for (group, articles) in categorized_articles:
# total_articles = len(articles)
#
# total_links = sum(count_links(articles))
# if total_links and max_total_articles:
# ratio = (total_articles / total_links) / max_total_articles
# link_counters.append(CategoryCounters(name=group,
# total_links=total_links,
# total_articles=total_articles,
# link_article_ratio=ratio))
#
# def keyfunc(counter):
# return counter.link_article_ratio
# link_counters.sort(key=keyfunc)
#
# return link_counters
#
#
#def plot_categories_by_links_article_ratio_in_subplot(ax, categorized_articles, source_name):
# link_counters = sort_categories_by_links_article_ratio(categorized_articles)
#
# x = np.array([c.link_article_ratio for c in link_counters])
#
# def make_label(counter):
# return u'{0} (n_a={1} n_l={2})'.format(u'/'.join(counter.name),
# counter.total_articles,
# counter.total_links)
#
#
# labels = [make_label(c) for c in link_counters]
# make_barchart_in_subplot(ax, x, source_name, labels)
#
#
#def plot_categories_by_links_article_ratio(name, categorized_articles, outdir):
# link_counters = sort_categories_by_links_article_ratio(categorized_articles)
#
# x = np.array([c.link_article_ratio for c in link_counters])
#
# def make_label(counter):
# return u'{0} (n_a={1} n_l={2})'.format(u'/'.join(counter.name),
# counter.total_articles,
# counter.total_links)
#
# plt.clf()
# labels = [make_label(c) for c in link_counters]
# make_barchart(x, 'Categories by article/links ratio ({0})'.format(name), labels)
# plt.savefig(os.path.join(outdir, name+'_article_link_ratio.png'), bbox_inches='tight')
#
#
#
#def plot_categories_by_number_of_articles(name, categorized_articles, outdir):
# article_counters = list()
# for (group, articles) in categorized_articles:
# article_counters.append((group, len(articles)))
#
# def keyfunc(counter):
# return counter[1]
# article_counters.sort(key=keyfunc)
#
# x = np.array([counter[1] for counter in article_counters])
#
# def make_label(article_counter):
# return u'{0}'.format(u'/'.join(article_counter[0]))
#
# plt.clf()
# labels = [make_label(c) for c in article_counters]
# make_barchart(x, '# Articles per category ({0})'.format(name), labels)
# plt.savefig(os.path.join(outdir, name+'_articles_by_category.png'), bbox_inches='tight')
#
#
#
#def plot_categories_by_number_of_links(name, categorized_articles, outdir):
#
# LinkCounter = namedtuple('LinkCounter', 'name total_ext_links total_int_links total_links')
#
# link_counters = list()
# for (group, articles) in categorized_articles:
# n_ext_links, n_int_links = count_links(articles)
# link_counters.append(LinkCounter(name=group,
# total_ext_links=n_ext_links,
# total_int_links=n_int_links,
# total_links=n_ext_links+n_int_links))
#
# def keyfunc(counter):
# return counter.total_links
# link_counters.sort(key=keyfunc)
#
#
# x1 = np.array([counter.total_ext_links for counter in link_counters])
# x2 = np.array([counter.total_int_links for counter in link_counters])
#
# def make_label(link_counter):
# return u'{0}'.format(u'/'.join(link_counter.name))
# labels = [make_label(c) for c in link_counters]
#
# plt.clf()
# plt.autumn()
# ind = np.arange(len(x1))
# p1 = plt.barh(ind, x1, color=DARK_COLOR)
# p2 = plt.barh(ind, x2, left=x1, color=LIGHT_COLOR)
# plt.yticks(ind+0.35, labels, fontsize='small')
# plt.title('Number of links per category ({0})'.format(name))
# plt.legend( (p1[0], p2[0]), ('External links', 'Internal links'), 'lower right' )
# plt.savefig(os.path.join(outdir, name+'_number_of_links.png'), bbox_inches='tight')
#
#
#
#def make_all_figures(db_root, outdir):
# if not os.path.exists(outdir):
# os.mkdir(outdir)
#
# for source_dir in get_subdirs(db_root):
# articles = get_flat_article_list(os.path.join(db_root, source_dir))
#
# categorized_articles = categorize_articles(articles)
#
# plot_categories_by_links_article_ratio(source_dir, categorized_articles, outdir)
# plot_categories_by_number_of_articles(source_dir, categorized_articles, outdir)
# plot_categories_by_number_of_links(source_dir, categorized_articles, outdir)
#
#
#if __name__=='__main__':
# import argparse
#
# parser = argparse.ArgumentParser(description='Make (hopefully) interesting figures from the json db')
# parser.add_argument('--dir', type=str, dest='input_dir', required=True, help='json db directory')
# parser.add_argument('--outdir', type=str, dest='output_dir', required=True, help='directory to dump the figures in')
# args = parser.parse_args()
# make_all_figures(args.input_dir, args.output_dir)
#
#
#
#
|
sevas/csxj-crawler
|
scripts/make_figures.py
|
Python
|
mit
| 7,756
|
#!/usr/bin/env python
########################################################################.......
from __future__ import division, print_function
import sys
import ui
PLACEHOLDER_TEXT = "No Views Presented"
class Multipanel(object):
# Class of the object stored in ui.multipanel
def __init__(self):
# Init
self.views = []
self.curview = None
self.root = ui.View(name="Multipanel")
self.close = ui.Button()
self.close.name = "close"
self.close.enabled = False
self.close.image = ui.Image.named("ionicons-close-round-32")
self.close.action = self.close_tapped
self.root.add_subview(self.close)
self.close.frame = self.root.width - 32, 0, 32, 32
self.close.flex = "LB"
self.tabs = ui.SegmentedControl()
self.tabs.name = "tabs"
self.tabs.enabled = False
self.tabs.selected_index = -1
self.tabs.segments = [PLACEHOLDER_TEXT]
self.tabs.action = self.segment_changed
self.root.add_subview(self.tabs)
self.tabs.frame = 0, 0, self.root.width - self.close.width, self.tabs.height
self.tabs.flex = "WB"
self.placeholder = ui.View()
self.placeholder.background_color = "lightgray"
self.ph_label = ui.Label()
self.ph_label.font = ("<system-bold>", 24)
self.ph_label.text_color = "gray"
self.ph_label.text = "No View Selected"
self.placeholder.add_subview(self.ph_label)
self.ph_label.size_to_fit()
self.ph_label.center = self.placeholder.center
self.ph_label.flex = "TBLR"
self.update_view()
def segment_changed(self, sender):
# Action method for self.tabs
self.update_view()
def close_tapped(self, sender):
# Action method for self.close
self.pop_view()
def update_view(self):
# Update the currently visible view based on self.tabs.selected_index
if self.curview is not None:
self.root.remove_subview(self.curview)
if self.tabs.selected_index >= 0:
self.curview = self.views[self.tabs.selected_index]
else:
self.curview = self.placeholder
self.tabs.segments = [view.name or '' for view in self.views ]
self.root.add_subview(self.curview)
self.curview.frame = (
0,
self.tabs.height,
self.root.width,
self.root.height - self.tabs.height,
)
self.curview.flex = "WH"
def add_view(self, view):
# Add a view to the multipanel
self.views.append(view)
if len(self.tabs.segments) == 1 and self.tabs.segments[0] == PLACEHOLDER_TEXT:
self.tabs.segments = (view.name or "",)
else:
self.tabs.segments += (view.name or "",)
if self.tabs.selected_index < 0:
self.tabs.selected_index = len(self.tabs.segments) - 1
self.tabs.enabled = self.close.enabled = True
self.update_view()
def pop_view(self, index=None):
# Pop a view from the multipanel by index
index = self.tabs.selected_index if index is None else index
view = self.views.pop(index)
if len(self.views) > 0 and self.tabs.selected_index > 0:
self.tabs.selected_index -= 1
if len(self.views) == 0:
self.tabs.segments = [PLACEHOLDER_TEXT]
self.tabs.enabled = self.close.enabled = False
else:
# I'd use pop if this wasn't a tuple...
self.tabs.segments = self.tabs.segments[:index] + self.tabs.segments[index+1:]
self.update_view()
def init():
# Monkey-patch the ui module to use Multipanel
try:
ui.view_real_present
except AttributeError:
ui.view_real_present = ui.View.present
def present(self, mode, **kwargs):
if mode == "panel":
ui.multipanel.add_view(self)
else:
ui.view_real_present(self, mode, **kwargs)
instancemethod = type(Multipanel.add_view)
# ui.View is too builtin for us mere mortals to change its methods.
##ui.View.present = instancemethod(present, None, ui.View)
ui.multipanel = Multipanel()
ui.view_real_present(ui.multipanel.root, "panel")
def main():
# (Re-)Initialize Multipanel if not already present
try:
ui.view_real_present(ui.multipanel.root, "panel")
except AttributeError:
init()
except ValueError:
print("Multipanel appears to be already running.", file=sys.stderr)
return
if __name__ == "__main__":
# Testing
try:
del ui.multipanel
except AttributeError:
pass
main()
|
dgelessus/pythonista-scripts
|
multipanel.py
|
Python
|
mit
| 4,859
|
import log
import sys, string
import xml.dom.minidom
from xml.dom.minidom import Node
global VERBOSE
VERBOSE = False
global TIMERESOURCES
TIMERESOURCES = False
global pkgName, pkgVersion, pkgRevision, pkgArchitecture, pkgCategoryMajor, pkgCategoryMinor
global files, root, protocol, server, location, tags, environment
global paramStack, buildCommands
global xmlConfigFile
global inFormal, inInformation, inDependency, inArchitecture
inDependency = False
inInformation = False
inArchitecture = False
inFormal = False
global pkgDescriptor
pkgDescriptor = {
'pkgDescriptorFormal' : '',
'pkgDescriptorInformation' : '',
'pkgDescriptorDistribution' : '',
'pkgDescriptorDistributionDependencyRun' : '',
'pkgDescriptorDistributionDependencyBuild' : '',
'pkgBuildResourceInformation' : '',
'pkgBuildResourceFiles' : ''
}
global pkgDescriptorFormal
pkgDescriptorFormal = {
'pkgName' : '',
'pkgVersion' : '',
'pkgRevision' : ''
}
global pkgDescriptorInformation
pkgDescriptorInformation = {
'homepage' : '',
'license' : '',
'summary' : '',
'description' : ''
}
global pkgDescriptorDistribution
pkgDescriptorDistribution = {
'distributionName' : '',
'distributionVersion' : '',
'maintainerName' : '',
'maintainerEmail' : '',
'categoryMajor' : '',
'categoryMinor' : '',
'architecture' : ''
}
global pkgDescriptorDistributionDependencyRun
pkgDescriptorDistributionDependencyRun = {
'pkgName' : [],
'pkgVersionOperand' : [],
'pkgVersion' : [],
'pkgRevision' : []
}
global pkgDescriptorDistributionDependencyBuild
pkgDescriptorDistributionDependencyBuild = {
'pkgName' : [],
'pkgVersionOperand' : [],
'pkgVersion' : [],
'pkgRevision' : []
}
global pkgBuildResourceInformation
pkgBuildResourceInformation = {
'protocol' : '',
'server' : '',
'root' : '',
'location' : '',
'patchLevel' : ''
}
global pkgBuildResourceFiles
pkgBuildResourceFiles = {
'type' : [],
'md5' : [],
'file' : []
}
global pkgBuildExecute
pkgBuildExecute = {}
global pkgBuildEnvironment
pkgBuildEnvironment = {}
files = {}
tags = []
environment = []
paramStack = []
buildCommands = []
pkgArchitecture = ""
pkgCategoryMajor = ""
pkgCategoryMinor = ""
global PKG_DESCRIPTOR_FORMAL,\
PKG_DESCRIPTOR_INFORMATION, \
PKG_DESCRIPTOR_DISTRIBUTION, \
PKG_DESCRIPTOR_DISTRIBUTION_MAINTAINER, \
PKG_DESCRIPTOR_DISTRIBUTION_CATEGORY, \
PKG_DESCRIPTOR_DISTRIBUTION_DEPENDENCY_RUN, \
PKG_DESCRIPTOR_DISTRIBUTION_DEPENDENCY_BUILD, \
PKG_BUILD_RESOURCE_INFORMATION, \
PKG_BUILD_RESOURCE_FILES, \
PKG_BUILD_EXECUTE, \
PARSE_TAG, \
PARSE_PARAMETER
PARSE_TAG = 1
PARSE_PARAMETER = 2
PKG_DESCRIPTOR_FORMAL = 10
PKG_DESCRIPTOR_INFORMATION = 20
PKG_DESCRIPTOR_DISTRIBUTION = 30
PKG_DESCRIPTOR_DISTRIBUTION_MAINTAINER = 31
PKG_DESCRIPTOR_DISTRIBUTION_CATEGORY = 32
PKG_DESCRIPTOR_DISTRIBUTION_DEPENDENCY_RUN = 40
PKG_DESCRIPTOR_DISTRIBUTION_DEPENDENCY_BUILD = 41
PKG_BUILD_RESOURCE_INFORMATION = 50
PKG_BUILD_RESOURCE_FILES = 51
PKG_BUILD_EXECUTE = 60
def __init__(xmlConfigFile, verbose, timeResources):
global VERBOSE, TIMERESOURCES
VERBOSE = verbose
TIMERESOURCES = timeResources
log.setLog("infoLog")
#xmlManifestVersion = "1.0"
#
#if (xmlManifestVersion == "1.0"):
# print "m"
#elif (xmlManifestVersion == "1.1"):
# print "m"
#else:
# log.critical("unknown XML manifest version: " + xmlManifestVersion)
if VERBOSE:
log.information(1, 3, "2:", "reading XML file")
if TIMERESOURCES:
log.startTaskTime(1, 4, " ")
read(xmlConfigFile)
if TIMERESOURCES:
log.endTaskTime(1, 4, " ")
if VERBOSE:
log.information(1, 3, "2:", "read XML file")
return parseVars()
def parseVars():
global pkgDescriptorFormal, \
pkgDescriptorInformation, \
pkgDescriptorDistribution, \
pkgDescriptorDistributionDependencyRun, \
pkgDescriptorDistributionDependencyBuild, \
pkgBuildResourceInformation, \
pkgBuildResourceFiles, \
pkgBuildExecute, \
pkgBuildEnvironment, \
pkgDescriptor
pkgDescriptor['pkgDescriptorFormal'] = pkgDescriptorFormal
pkgDescriptor['pkgDescriptorInformation'] = pkgDescriptorInformation
pkgDescriptor['pkgDescriptorDistribution'] = pkgDescriptorDistribution
pkgDescriptor['pkgDescriptorDistributionDependencyRun'] = pkgDescriptorDistributionDependencyRun
pkgDescriptor['pkgDescriptorDistributionDependencyBuild'] = pkgDescriptorDistributionDependencyBuild
pkgDescriptor['pkgBuildResourceInformation'] = pkgBuildResourceInformation
pkgDescriptor['pkgBuildResourceFiles'] = pkgBuildResourceFiles
pkgDescriptor['pkgBuildExecute'] = pkgBuildExecute
pkgDescriptor['pkgBuildEnvironment'] = pkgBuildEnvironment
return pkgDescriptor
def parseTag(varName, varSet, tagValue, parentNode, parseType):
global PKG_DESCRIPTOR_FORMAL, \
PKG_DESCRIPTOR_INFORMATION, \
PKG_DESCRIPTOR_DISTRIBUTION, \
PKG_DESCRIPTOR_DISTRIBUTION_MAINTAINER, \
PKG_DESCRIPTOR_DISTRIBUTION_CATEGORY, \
PKG_DESCRIPTOR_DISTRIBUTION_DEPENDENCY_RUN, \
PKG_DESCRIPTOR_DISTRIBUTION_DEPENDENCY_BUILD, \
PKG_BUILD_RESOURCE_INFORMATION, \
PKG_BUILD_RESOURCE_FILES, \
PKG_BUILD_EXECUTE, \
PARSE_TAG, \
PARSE_PARAMETER
if (varSet == PKG_DESCRIPTOR_FORMAL):
global pkgDescriptorFormal
elif (varSet == PKG_DESCRIPTOR_INFORMATION):
global pkgDescriptorInformation
elif (varSet == PKG_DESCRIPTOR_DISTRIBUTION or \
varSet == PKG_DESCRIPTOR_DISTRIBUTION_MAINTAINER or \
varSet == PKG_DESCRIPTOR_DISTRIBUTION_CATEGORY):
global pkgDescriptorDistribution
elif (varSet == PKG_DESCRIPTOR_DISTRIBUTION_DEPENDENCY_RUN):
global pkgDescriptorDistributionDependencyRun
elif (varSet == PKG_DESCRIPTOR_DISTRIBUTION_DEPENDENCY_BUILD):
global pkgDescriptorDistributionDependencyBuild
elif (varSet == PKG_BUILD_RESOURCE_INFORMATION):
global pkgBuildResourceInformation
elif (varSet == PKG_BUILD_RESOURCE_FILES):
global pkgBuildResourceFiles
elif (varSet == PKG_BUILD_EXECUTE):
global pkgBuildExecute
returnVar = ""
if (parseType == PARSE_TAG):
if (cmp(string.strip(parentNode.nodeName), str(tagValue)) == 0):
for textNode in parentNode.childNodes:
if (textNode.nodeType == Node.TEXT_NODE):
returnVar = string.strip(textNode.nodeValue)
elif (parseType == PARSE_PARAMETER):
returnVar = ""
returnVar = parentNode.strip()
if (cmp(returnVar.strip(), "") != 0):
if (varSet == PKG_DESCRIPTOR_FORMAL):
pkgDescriptorFormal[varName] = returnVar
elif (varSet == PKG_DESCRIPTOR_INFORMATION):
pkgDescriptorInformation[varName] = returnVar
elif (varSet == PKG_DESCRIPTOR_DISTRIBUTION or \
varSet == PKG_DESCRIPTOR_DISTRIBUTION_MAINTAINER or \
varSet == PKG_DESCRIPTOR_DISTRIBUTION_CATEGORY):
pkgDescriptorDistribution[varName] = returnVar
elif (varSet == PKG_DESCRIPTOR_DISTRIBUTION_DEPENDENCY_RUN):
pkgDescriptorDistributionDependencyRun[varName].append(returnVar)
elif (varSet == PKG_DESCRIPTOR_DISTRIBUTION_DEPENDENCY_BUILD):
pkgDescriptorDistributionDependencyBuild[varName].append(returnVar)
elif (varSet == PKG_BUILD_RESOURCE_INFORMATION):
pkgBuildResourceInformation[varName] = returnVar
elif (varSet == PKG_BUILD_RESOURCE_FILES):
pkgBuildResourceFiles[varName].append(returnVar)
elif (varSet == PKG_BUILD_EXECUTE):
pkgDescriptorDistributionDependencyBuild[varName].append(returnVar)
#print pkgDescriptorFormal
def read(configFile):
global pkgName, pkgVersion, pkgRevision, pkgArchitecture, pkgCategoryMajor, pkgCategoryMinor
global pkgInformation
global xmlConfigFile
xmlConfigFile = configFile
global inFormal
doc = xml.dom.minidom.parse(xmlConfigFile)
mapping = {}
for node in doc.childNodes:
for level1Node in node.childNodes:
# in level 1 nodes
# level 1 nodes: descriptor
# build
if (string.strip(level1Node.nodeName) == "descriptor"):
# in level 1 node: descriptor
for descriptorSubNode in level1Node.childNodes:
# in descriptor child nodes
# descriptor child nodes: formal
if (string.strip(descriptorSubNode.nodeName) == "formal"):
for formalSubNode in descriptorSubNode.childNodes:
parseTag("pkgName", PKG_DESCRIPTOR_FORMAL, "name", formalSubNode, PARSE_TAG)
parseTag("pkgVersion", PKG_DESCRIPTOR_FORMAL, "version", formalSubNode, PARSE_TAG)
parseTag("pkgRevision", PKG_DESCRIPTOR_FORMAL, "revision", formalSubNode, PARSE_TAG)
# information
elif (string.strip(descriptorSubNode.nodeName) == "information"):
for informationSubNode in descriptorSubNode.childNodes:
parseTag("homepage", PKG_DESCRIPTOR_INFORMATION, "homepage", informationSubNode, PARSE_TAG)
parseTag("license", PKG_DESCRIPTOR_INFORMATION, "license", informationSubNode, PARSE_TAG)
parseTag("summary", PKG_DESCRIPTOR_INFORMATION, "summary", informationSubNode, PARSE_TAG)
parseTag("description", PKG_DESCRIPTOR_INFORMATION, "description", informationSubNode, PARSE_TAG)
# distribution
elif (string.strip(descriptorSubNode.nodeName) == "distribution"):
for distributionSubNode in descriptorSubNode.childNodes:
parseTag("distributionName", PKG_DESCRIPTOR_DISTRIBUTION, "name", distributionSubNode, PARSE_TAG)
parseTag("distributionVersion", PKG_DESCRIPTOR_DISTRIBUTION, "version", distributionSubNode, PARSE_TAG)
if (string.strip(distributionSubNode.nodeName) == "maintainer"):
for maintainerSubNode in distributionSubNode.childNodes:
parseTag("maintainerName", PKG_DESCRIPTOR_DISTRIBUTION_MAINTAINER, "name", maintainerSubNode, PARSE_TAG)
parseTag("maintainerEmail", PKG_DESCRIPTOR_DISTRIBUTION_MAINTAINER, "email", maintainerSubNode, PARSE_TAG)
elif (string.strip(distributionSubNode.nodeName) == "category"):
for categorySubNode in distributionSubNode.childNodes:
parseTag("categoryMajor", PKG_DESCRIPTOR_DISTRIBUTION_CATEGORY, "major", categorySubNode, PARSE_TAG)
parseTag("categoryMinor", PKG_DESCRIPTOR_DISTRIBUTION_CATEGORY, "minor", categorySubNode, PARSE_TAG)
parseTag("architecture", PKG_DESCRIPTOR_DISTRIBUTION, "architecture", distributionSubNode, PARSE_TAG)
if (string.strip(distributionSubNode.nodeName) == "dependency"):
for dependencySubNode in distributionSubNode.childNodes:
if (string.strip(dependencySubNode.nodeName) == "run"):
for dependencyPkgSubNode in dependencySubNode.childNodes:
if (string.strip(dependencyPkgSubNode.nodeName) == "pkg"):
for pkgSubNode in dependencyPkgSubNode.childNodes:
if (string.strip(pkgSubNode.nodeName) == "name"):
parseTag("pkgName", PKG_DESCRIPTOR_DISTRIBUTION_DEPENDENCY_RUN, "name", pkgSubNode, PARSE_TAG)
elif (string.strip(pkgSubNode.nodeName) == "version"):
parseTag("pkgVersion", PKG_DESCRIPTOR_DISTRIBUTION_DEPENDENCY_RUN, "version", pkgSubNode, PARSE_TAG)
elif (string.strip(pkgSubNode.nodeName) == "operand"):
parseTag("pkgVersionOperand", PKG_DESCRIPTOR_DISTRIBUTION_DEPENDENCY_RUN, "operand", pkgSubNode, PARSE_TAG)
elif (string.strip(pkgSubNode.nodeName) == "revision"):
parseTag("pkgRevision", PKG_DESCRIPTOR_DISTRIBUTION_DEPENDENCY_RUN, "revision", pkgSubNode, PARSE_TAG)
elif (string.strip(dependencySubNode.nodeName) == "build"):
for dependencyPkgSubNode in dependencySubNode.childNodes:
if (string.strip(dependencyPkgSubNode.nodeName) == "pkg"):
for pkgSubNode in dependencyPkgSubNode.childNodes:
if (string.strip(pkgSubNode.nodeName) == "name"):
parseTag("pkgName", PKG_DESCRIPTOR_DISTRIBUTION_DEPENDENCY_BUILD, "name", pkgSubNode, PARSE_TAG)
elif (string.strip(pkgSubNode.nodeName) == "version"):
parseTag("pkgVersion", PKG_DESCRIPTOR_DISTRIBUTION_DEPENDENCY_BUILD, "version", pkgSubNode, PARSE_TAG)
elif (string.strip(pkgSubNode.nodeName) == "operand"):
parseTag("pkgVersionOperand", PKG_DESCRIPTOR_DISTRIBUTION_DEPENDENCY_BUILD, "operand", pkgSubNode, PARSE_TAG)
elif (string.strip(pkgSubNode.nodeName) == "revision"):
parseTag("pkgRevision", PKG_DESCRIPTOR_DISTRIBUTION_DEPENDENCY_BUILD, "revision", pkgSubNode, PARSE_TAG)
elif (cmp(level1Node.nodeName, "build") == 0):
# in level 1 node: build
for buildSubNode in level1Node.childNodes:
# in build child nodes
# build child nodes: resource
if (string.strip(buildSubNode.nodeName) == "resource"):
NodeAttributes = buildSubNode.attributes
for attributeName in NodeAttributes.keys():
attributeValue = NodeAttributes.get(attributeName).nodeValue
if (string.strip(attributeName) == "protocol"):
parseTag("protocol", PKG_BUILD_RESOURCE_INFORMATION, attributeName, attributeValue, PARSE_PARAMETER)
elif (string.strip(attributeName) == "server"):
parseTag("server", PKG_BUILD_RESOURCE_INFORMATION, attributeName, attributeValue, PARSE_PARAMETER)
elif (string.strip(attributeName) == "root"):
parseTag("root", PKG_BUILD_RESOURCE_INFORMATION, attributeName, attributeValue, PARSE_PARAMETER)
for resourceSubNode in buildSubNode.childNodes:
# in resource child nodes
# resource child nodes: archive
if (string.strip(resourceSubNode.nodeName) == "archive"):
for archiveNode in resourceSubNode.childNodes:
if (string.strip(archiveNode.nodeName) == "file"):
parseTag("type", PKG_BUILD_RESOURCE_FILES, "type", "archive", PARSE_PARAMETER)
parseTag("file", PKG_BUILD_RESOURCE_FILES, "file", archiveNode, PARSE_TAG)
NodeAttributes = archiveNode.attributes
for attributeName in NodeAttributes.keys():
attributeValue = NodeAttributes.get(attributeName).nodeValue
if (string.strip(attributeName) == "md5"):
parseTag("md5", PKG_BUILD_RESOURCE_FILES, "md5", attributeValue, PARSE_PARAMETER)
elif (string.strip(resourceSubNode.nodeName) == "patch"):
NodeAttributes = resourceSubNode.attributes
for attributeName in NodeAttributes.keys():
attributeValue = NodeAttributes.get(attributeName).nodeValue
if (string.strip(attributeName) == "level"):
parseTag("patchLevel", PKG_BUILD_RESOURCE_INFORMATION, "level", attributeValue, PARSE_PARAMETER)
elif (string.strip(attributeName) == "location"):
parseTag("location", PKG_BUILD_RESOURCE_INFORMATION, attributeName, attributeValue, PARSE_PARAMETER)
for archiveNode in resourceSubNode.childNodes:
if (string.strip(archiveNode.nodeName) == "file"):
parseTag("type", PKG_BUILD_RESOURCE_FILES, "type", "patch", PARSE_PARAMETER)
parseTag("file", PKG_BUILD_RESOURCE_FILES, "file", archiveNode, PARSE_TAG)
NodeAttributes = archiveNode.attributes
for attributeName in NodeAttributes.keys():
attributeValue = NodeAttributes.get(attributeName).nodeValue
if (string.strip(attributeName) == "md5"):
parseTag("md5", PKG_BUILD_RESOURCE_FILES, "md5", attributeValue, PARSE_PARAMETER)
def getProtocol():
print pkgBuildResourceInformation['protocol']
return pkgName
def getPkgName():
return pkgName
def getPkgVersion():
return pkgVersion
def getPkgRevision():
return pkgRevision
def getPkgArchitecture():
return pkgArchitecture
def getPkgDescriptorFormal():
return pkgDescriptorFormal
|
andreioprisan/apkg
|
source/parser/reader.py
|
Python
|
mit
| 21,750
|
"""Test Device Tracker config entry things."""
from homeassistant.components.device_tracker import DOMAIN, config_entry as ce
from homeassistant.core import callback
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from tests.common import (
MockConfigEntry,
MockEntityPlatform,
MockPlatform,
mock_registry,
)
def test_tracker_entity():
"""Test tracker entity."""
class TestEntry(ce.TrackerEntity):
"""Mock tracker class."""
should_poll = False
instance = TestEntry()
assert instance.force_update
instance.should_poll = True
assert not instance.force_update
async def test_cleanup_legacy(hass, enable_custom_integrations):
"""Test we clean up devices created by old device tracker."""
dev_reg = dr.async_get(hass)
ent_reg = er.async_get(hass)
config_entry = MockConfigEntry(domain="test")
config_entry.add_to_hass(hass)
device1 = dev_reg.async_get_or_create(
config_entry_id=config_entry.entry_id, identifiers={(DOMAIN, "device1")}
)
device2 = dev_reg.async_get_or_create(
config_entry_id=config_entry.entry_id, identifiers={(DOMAIN, "device2")}
)
device3 = dev_reg.async_get_or_create(
config_entry_id=config_entry.entry_id, identifiers={(DOMAIN, "device3")}
)
# Device with light + device tracker entity
entity1a = ent_reg.async_get_or_create(
DOMAIN,
"test",
"entity1a-unique",
config_entry=config_entry,
device_id=device1.id,
)
entity1b = ent_reg.async_get_or_create(
"light",
"test",
"entity1b-unique",
config_entry=config_entry,
device_id=device1.id,
)
# Just device tracker entity
entity2a = ent_reg.async_get_or_create(
DOMAIN,
"test",
"entity2a-unique",
config_entry=config_entry,
device_id=device2.id,
)
# Device with no device tracker entities
entity3a = ent_reg.async_get_or_create(
"light",
"test",
"entity3a-unique",
config_entry=config_entry,
device_id=device3.id,
)
# Device tracker but no device
entity4a = ent_reg.async_get_or_create(
DOMAIN,
"test",
"entity4a-unique",
config_entry=config_entry,
)
# Completely different entity
entity5a = ent_reg.async_get_or_create(
"light",
"test",
"entity4a-unique",
config_entry=config_entry,
)
await hass.config_entries.async_forward_entry_setup(config_entry, DOMAIN)
await hass.async_block_till_done()
for entity in (entity1a, entity1b, entity3a, entity4a, entity5a):
assert ent_reg.async_get(entity.entity_id) is not None
# We've removed device so device ID cleared
assert ent_reg.async_get(entity2a.entity_id).device_id is None
# Removed because only had device tracker entity
assert dev_reg.async_get(device2.id) is None
async def test_register_mac(hass):
"""Test registering a mac."""
dev_reg = dr.async_get(hass)
ent_reg = er.async_get(hass)
config_entry = MockConfigEntry(domain="test")
config_entry.add_to_hass(hass)
mac1 = "12:34:56:AB:CD:EF"
entity_entry_1 = ent_reg.async_get_or_create(
"device_tracker",
"test",
mac1 + "yo1",
original_name="name 1",
config_entry=config_entry,
disabled_by=er.RegistryEntryDisabler.INTEGRATION,
)
ce._async_register_mac(hass, "test", mac1, mac1 + "yo1")
dev_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, mac1)},
)
await hass.async_block_till_done()
entity_entry_1 = ent_reg.async_get(entity_entry_1.entity_id)
assert entity_entry_1.disabled_by is None
async def test_connected_device_registered(hass):
"""Test dispatch on connected device being registered."""
registry = mock_registry(hass)
dispatches = []
@callback
def _save_dispatch(msg):
dispatches.append(msg)
unsub = async_dispatcher_connect(
hass, ce.CONNECTED_DEVICE_REGISTERED, _save_dispatch
)
class MockScannerEntity(ce.ScannerEntity):
"""Mock a scanner entity."""
@property
def ip_address(self) -> str:
return "5.4.3.2"
@property
def unique_id(self) -> str:
return self.mac_address
class MockDisconnectedScannerEntity(MockScannerEntity):
"""Mock a disconnected scanner entity."""
@property
def mac_address(self) -> str:
return "aa:bb:cc:dd:ee:ff"
@property
def is_connected(self) -> bool:
return True
@property
def hostname(self) -> str:
return "connected"
class MockConnectedScannerEntity(MockScannerEntity):
"""Mock a disconnected scanner entity."""
@property
def mac_address(self) -> str:
return "aa:bb:cc:dd:ee:00"
@property
def is_connected(self) -> bool:
return False
@property
def hostname(self) -> str:
return "disconnected"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Mock setup entry method."""
async_add_entities(
[MockConnectedScannerEntity(), MockDisconnectedScannerEntity()]
)
return True
platform = MockPlatform(async_setup_entry=async_setup_entry)
config_entry = MockConfigEntry(entry_id="super-mock-id")
entity_platform = MockEntityPlatform(
hass, platform_name=config_entry.domain, platform=platform
)
assert await entity_platform.async_setup_entry(config_entry)
await hass.async_block_till_done()
full_name = f"{entity_platform.domain}.{config_entry.domain}"
assert full_name in hass.config.components
assert len(hass.states.async_entity_ids()) == 0 # should be disabled
assert len(registry.entities) == 2
assert (
registry.entities["test_domain.test_aa_bb_cc_dd_ee_ff"].config_entry_id
== "super-mock-id"
)
unsub()
assert dispatches == [
{"ip": "5.4.3.2", "mac": "aa:bb:cc:dd:ee:ff", "host_name": "connected"}
]
|
rohitranjan1991/home-assistant
|
tests/components/device_tracker/test_config_entry.py
|
Python
|
mit
| 6,345
|
#! /usr/bin/env python3.7
HELP_TEXT = ["!rank <user?>", "Retrieve basic information about an osu user."]
def call(salty_inst, c_msg, **kwargs):
try:
user = c_msg["message"].split("rank ")[1]
except IndexError:
user = ""
osu_nick = user or c_msg["channel"][1:]
success, response = salty_inst.osu_api.get_user(osu_nick, **kwargs)
if not success:
return False, "Error retrieving user from osu api ({})."\
.format(response.status_code)
try:
response = response[0]
except IndexError:
return False, "No users found with name: {0}.".format(osu_nick)
msg = "{} is level {} with {}% accuracy and ranked {}.".format(
response["username"],
int(round(float(response["level"]))),
round(float(response["accuracy"]), 2),
"{:,}".format(int(response["pp_rank"]))
)
return True, msg
def test(salty_inst, c_msg, **kwargs):
assert True
|
BatedUrGonnaDie/salty_bot
|
modules/commands/rank.py
|
Python
|
mit
| 949
|
"""Convenience function to create a context for the built in error functions"""
import logging
import copy
import symengine
from pycalphad import variables as v
from pycalphad.codegen.callables import build_callables
from pycalphad.core.utils import instantiate_models, filter_phases, unpack_components
from espei.error_functions import get_zpf_data, get_thermochemical_data, get_equilibrium_thermochemical_data
from espei.utils import database_symbols_to_fit, get_model_dict
_log = logging.getLogger(__name__)
def setup_context(dbf, datasets, symbols_to_fit=None, data_weights=None, phase_models=None, make_callables=True):
"""
Set up a context dictionary for calculating error.
Parameters
----------
dbf : Database
A pycalphad Database that will be fit
datasets : PickleableTinyDB
A database of single- and multi-phase data to fit
symbols_to_fit : list of str
List of symbols in the Database that will be fit. If None (default) are
passed, then all parameters prefixed with `VV` followed by a number,
e.g. VV0001 will be fit.
Returns
-------
Notes
-----
A copy of the Database is made and used in the context. To commit changes
back to the original database, the dbf.symbols.update method should be used.
"""
dbf = copy.deepcopy(dbf)
if phase_models is not None:
comps = sorted(phase_models['components'])
else:
comps = sorted([sp for sp in dbf.elements])
if symbols_to_fit is None:
symbols_to_fit = database_symbols_to_fit(dbf)
else:
symbols_to_fit = sorted(symbols_to_fit)
data_weights = data_weights if data_weights is not None else {}
if len(symbols_to_fit) == 0:
raise ValueError('No degrees of freedom. Database must contain symbols starting with \'V\' or \'VV\', followed by a number.')
else:
_log.info('Fitting %s degrees of freedom.', len(symbols_to_fit))
for x in symbols_to_fit:
if isinstance(dbf.symbols[x], symengine.Piecewise):
_log.debug('Replacing %s in database', x)
dbf.symbols[x] = dbf.symbols[x].args[0]
# construct the models for each phase, substituting in the SymEngine symbol to fit.
if phase_models is not None:
model_dict = get_model_dict(phase_models)
else:
model_dict = {}
_log.trace('Building phase models (this may take some time)')
import time
t1 = time.time()
phases = sorted(filter_phases(dbf, unpack_components(dbf, comps), dbf.phases.keys()))
parameters = dict(zip(symbols_to_fit, [0]*len(symbols_to_fit)))
models = instantiate_models(dbf, comps, phases, model=model_dict, parameters=parameters)
if make_callables:
eq_callables = build_callables(dbf, comps, phases, models, parameter_symbols=symbols_to_fit,
output='GM', build_gradients=True, build_hessians=True,
additional_statevars={v.N, v.P, v.T})
else:
eq_callables = None
t2 = time.time()
_log.trace('Finished building phase models (%0.2fs)', t2-t1)
_log.trace('Getting non-equilibrium thermochemical data (this may take some time)')
t1 = time.time()
thermochemical_data = get_thermochemical_data(dbf, comps, phases, datasets, model=model_dict, weight_dict=data_weights, symbols_to_fit=symbols_to_fit)
t2 = time.time()
_log.trace('Finished getting non-equilibrium thermochemical data (%0.2fs)', t2-t1)
_log.trace('Getting equilibrium thermochemical data (this may take some time)')
t1 = time.time()
eq_thermochemical_data = get_equilibrium_thermochemical_data(dbf, comps, phases, datasets, model=model_dict, parameters=parameters, data_weight_dict=data_weights)
t2 = time.time()
_log.trace('Finished getting equilibrium thermochemical data (%0.2fs)', t2-t1)
_log.trace('Getting ZPF data (this may take some time)')
t1 = time.time()
zpf_data = get_zpf_data(dbf, comps, phases, datasets, model=model_dict, parameters=parameters)
t2 = time.time()
_log.trace('Finished getting ZPF data (%0.2fs)', t2-t1)
# context for the log probability function
# for all cases, parameters argument addressed in MCMC loop
error_context = {
'symbols_to_fit': symbols_to_fit,
'zpf_kwargs': {
'zpf_data': zpf_data,
'data_weight': data_weights.get('ZPF', 1.0),
},
'equilibrium_thermochemical_kwargs': {
'eq_thermochemical_data': eq_thermochemical_data,
},
'thermochemical_kwargs': {
'thermochemical_data': thermochemical_data,
},
'activity_kwargs': {
'dbf': dbf, 'comps': comps, 'phases': phases, 'datasets': datasets,
'phase_models': models, 'callables': eq_callables,
'data_weight': data_weights.get('ACR', 1.0),
},
}
return error_context
|
PhasesResearchLab/ESPEI
|
espei/error_functions/context.py
|
Python
|
mit
| 4,910
|
import pygame
import sys
pygame.init()
w = pygame.display.set_mode((800,800))
pygame.display.set_caption("QFPD")
clock = pygame.time.Clock()
sprite = pygame.image.load("test.png")
leave = False
def drawSprite(x, y):
w.blit(sprite, (x, y))
while not leave:
drawSprite(0, 0)
for event in pygame.event.get():
print(event)
if event.type == pygame.QUIT:
leave = True
for i in range(800):
w.fill((255,255,255))
drawSprite(i, 0)
pygame.display.update()
clock.tick(60)
pygame.quit()
sys.exit()
|
AbusementPark/Quest-For-Pink-Donught
|
QFPD/main.py
|
Python
|
mit
| 588
|
import datetime
import json
import logging
import md5
import random
import tba_config
import urllib
import uuid
import webapp2
from google.appengine.api import urlfetch
from google.appengine.ext import deferred
from google.appengine.ext import ndb
from consts.account_permissions import AccountPermissions
from consts.auth_type import AuthType
from consts.event_type import EventType
from controllers.base_controller import CacheableHandler
from datafeeds.parser_base import ParserInputException
from helpers.user_bundle import UserBundle
from helpers.validation_helper import ValidationHelper
from models.api_auth_access import ApiAuthAccess
from models.cached_response import CachedResponse
from models.event import Event
from models.sitevar import Sitevar
# used for deferred call
def track_call(api_action, api_label, x_tba_app_id):
"""
For more information about GAnalytics Protocol Parameters, visit
https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters
"""
analytics_id = Sitevar.get_by_id("google_analytics.id")
if analytics_id is None:
logging.warning("Missing sitevar: google_analytics.id. Can't track API usage.")
else:
GOOGLE_ANALYTICS_ID = analytics_id.contents['GOOGLE_ANALYTICS_ID']
payload = urllib.urlencode({
'v': 1,
'tid': GOOGLE_ANALYTICS_ID,
'cid': uuid.uuid3(uuid.NAMESPACE_X500, str(x_tba_app_id)),
't': 'event',
'ec': 'api-v02',
'ea': api_action,
'el': api_label,
'cd1': x_tba_app_id, # custom dimension 1
'ni': 1,
'sc': 'end', # forces tracking session to end
})
urlfetch.fetch(
url='https://www.google-analytics.com/collect',
validate_certificate=True,
method=urlfetch.POST,
deadline=30,
payload=payload,
)
class ApiBaseController(CacheableHandler):
API_VERSION = 2
SHOULD_ADD_ADMIN_BAR = False
def __init__(self, *args, **kw):
super(ApiBaseController, self).__init__(*args, **kw)
self.response.headers['content-type'] = 'application/json; charset="utf-8"'
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers['X-Robots-Tag'] = 'noindex'
def handle_exception(self, exception, debug):
"""
Handle an HTTP exception and actually writeout a
response.
Called by webapp when abort() is called, stops code excution.
"""
if isinstance(exception, webapp2.HTTPException):
self.response.set_status(exception.code)
self.response.out.write(self._errors)
else:
logging.exception(exception)
self.response.set_status(500)
def get(self, *args, **kw):
self._validate_tba_app_id()
self._errors = ValidationHelper.validate(self._validators)
if self._errors:
self.abort(400)
super(ApiBaseController, self).get(*args, **kw)
self.response.headers['X-TBA-Version'] = '{}'.format(self.API_VERSION)
self.response.headers['Vary'] = 'Accept-Encoding'
if not self._errors:
self._track_call(*args, **kw)
def options(self, *args, **kw):
"""
Supply an OPTIONS method in order to comply with CORS preflghted requests
https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS#Preflighted_requests
"""
self.response.headers['Access-Control-Allow-Methods'] = "GET, OPTIONS"
self.response.headers['Access-Control-Allow-Headers'] = 'X-TBA-App-Id'
def _read_cache(self):
"""
Overrides parent method to use CachedResponse instead of memcache
"""
response = CachedResponse.get_by_id(self.cache_key)
if response:
self._last_modified = response.updated
return response
else:
return None
def _write_cache(self, response):
"""
Overrides parent method to use CachedResponse instead of memcache
"""
if tba_config.CONFIG["response_cache"]:
CachedResponse(
id=self.cache_key,
headers_json=json.dumps(dict(response.headers)),
body=response.body,
).put()
@classmethod
def delete_cache_multi(cls, cache_keys):
"""
Overrides parent method to use CachedResponse instead of memcache
"""
logging.info("Deleting cache keys: {}".format(cache_keys))
ndb.delete_multi([ndb.Key(CachedResponse, cache_key) for cache_key in cache_keys])
def _track_call_defer(self, api_action, api_label):
if random.random() < tba_config.GA_RECORD_FRACTION:
deferred.defer(track_call, api_action, api_label, self.x_tba_app_id, _queue="api-track-call", _url='/_ah/queue/deferred_apiv2_track_call')
def _validate_tba_app_id(self):
"""
Tests the presence of a X-TBA-App-Id header or URL param.
"""
self.x_tba_app_id = self.request.headers.get("X-TBA-App-Id")
if self.x_tba_app_id is None:
self.x_tba_app_id = self.request.get('X-TBA-App-Id')
logging.info("X-TBA-App-Id: {}".format(self.x_tba_app_id))
if not self.x_tba_app_id:
self._errors = json.dumps({"Error": "X-TBA-App-Id is a required header or URL param. Please see http://www.thebluealliance.com/apidocs for more info."})
self.abort(400)
x_tba_app_id_parts = self.x_tba_app_id.split(':')
if len(x_tba_app_id_parts) != 3 or any(len(part) == 0 for part in x_tba_app_id_parts):
self._errors = json.dumps({"Error": "X-TBA-App-Id must follow a specific format. Please see http://www.thebluealliance.com/apidocs for more info."})
self.abort(400)
class ApiTrustedBaseController(webapp2.RequestHandler):
REQUIRED_AUTH_TYPES = set()
def __init__(self, *args, **kw):
super(ApiTrustedBaseController, self).__init__(*args, **kw)
self.response.headers['content-type'] = 'application/json; charset="utf-8"'
self.response.headers['Access-Control-Allow-Origin'] = '*'
self._user_bundle = UserBundle()
def handle_exception(self, exception, debug):
"""
Handle an HTTP exception and actually writeout a
response.
Called by webapp when abort() is called, stops code excution.
"""
logging.info(exception)
if isinstance(exception, webapp2.HTTPException):
self.response.set_status(exception.code)
self.response.out.write(self._errors)
else:
self.response.set_status(500)
def options(self, event_key):
"""
Supply an OPTIONS method in order to comply with CORS preflghted requests
https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS#Preflighted_requests
"""
self.response.headers['Access-Control-Allow-Methods'] = "POST, OPTIONS"
self.response.headers['Access-Control-Allow-Headers'] = 'Content-Type, X-TBA-Auth-Id, X-TBA-Auth-Sig'
def _validate_auth(self, auth, event_key):
status_sitevar_future = Sitevar.get_by_id_async('trustedapi')
allowed_event_keys = [ekey.id() for ekey in auth.event_list]
if event_key not in allowed_event_keys:
return "Only allowed to edit events: {}".format(', '.join(allowed_event_keys))
missing_auths = self.REQUIRED_AUTH_TYPES.difference(set(auth.auth_types_enum))
if missing_auths != set():
return "You do not have permission to edit: {}. If this is incorrect, please contact TBA admin.".format(",".join([AuthType.write_type_names[ma] for ma in missing_auths]))
if auth.expiration and auth.expiration < datetime.datetime.now():
return "These keys expired on {}. Contact TBA admin to make changes".format(auth.expiration)
status_sitevar = status_sitevar_future.get_result()
if status_sitevar:
for auth_type in self.REQUIRED_AUTH_TYPES:
if not status_sitevar.contents.get(str(auth_type), True): # Fail open
return "The trusted API has been temporarily disabled by the TBA admins. Please contact them for more details."
return None
def post(self, event_key):
event_key = event_key.lower() # Normalize keys to lower case (TBA convention)
# Make sure we are processing for a valid event first
# (it's fine to do this before auth, since leaking the existence of an
# event isn't really that big a deal)
self.event = Event.get_by_id(event_key)
if not self.event:
self._errors = json.dumps({"Error": "Event {} not found".format(event_key)})
self.abort(404)
# Start by allowing admins to edit any event
user_is_admin = (self._user_bundle.user and self._user_bundle.is_current_user_admin)
# Also grant access if the user as the EVENTWIZARD permission and this
# is a current year offseason event
account = self._user_bundle.account
current_year = datetime.datetime.now().year
user_has_permission = (self.event.event_type_enum == EventType.OFFSEASON
and self.event.year == current_year
and account is not None
and AccountPermissions.OFFSEASON_EVENTWIZARD in account.permissions)
user_has_auth = (user_is_admin or user_has_permission)
if not user_has_auth and self._user_bundle.user:
# See if this user has any auth keys granted to its account
now = datetime.datetime.now()
auth_tokens = ApiAuthAccess.query(ApiAuthAccess.owner == account.key,
ApiAuthAccess.event_list == ndb.Key(Event, event_key),
ndb.OR(ApiAuthAccess.expiration == None, ApiAuthAccess.expiration >= now)).fetch()
user_has_auth = any(self._validate_auth(auth, event_key) is None for auth in auth_tokens)
if not user_has_auth:
# If not, check if auth id/secret were passed as headers
auth_id = self.request.headers.get('X-TBA-Auth-Id')
if not auth_id:
self._errors = json.dumps({"Error": "Must provide a request header parameter 'X-TBA-Auth-Id'"})
self.abort(400)
auth_sig = self.request.headers.get('X-TBA-Auth-Sig')
if not auth_sig:
self._errors = json.dumps({"Error": "Must provide a request header parameter 'X-TBA-Auth-Sig'"})
self.abort(400)
auth = ApiAuthAccess.get_by_id(auth_id)
expected_sig = md5.new('{}{}{}'.format(auth.secret if auth else None, self.request.path, self.request.body)).hexdigest()
if not auth or expected_sig != auth_sig:
logging.info("Auth sig: {}, Expected sig: {}".format(auth_sig, expected_sig))
self._errors = json.dumps({"Error": "Invalid X-TBA-Auth-Id and/or X-TBA-Auth-Sig!"})
self.abort(401)
# Checks event key is valid, correct auth types, and expiration
error = self._validate_auth(auth, event_key)
if error:
self._errors = json.dumps({"Error": error})
self.abort(401)
try:
self._process_request(self.request, event_key)
except ParserInputException, e:
self._errors = json.dumps({"Error": e.message})
self.abort(400)
|
jaredhasenklein/the-blue-alliance
|
controllers/api/api_base_controller.py
|
Python
|
mit
| 11,616
|
"""
TrueType Fonts
"""
from .base_font import PdfBaseFont
class TrueTypeFont(PdfBaseFont):
"""For our purposes, these are just a more restricted form of the Type 1
Fonts, so...we're done here."""
def text_space_coords(self, x, y):
"""Type1 fonts just scale by 1/1000 to convert from glyph space"""
return x/1000., y/1000.
|
ajmarks/gymnast
|
gymnast/pdf_elements/fonts/true_type.py
|
Python
|
mit
| 359
|
#!/usr/bin/env python
#coding:utf-8
# Purpose: test node organizer
# Created: 31.01.2011
# Copyright (C) 2011, Manfred Moitzi
# License: MIT
from __future__ import unicode_literals, print_function, division
__author__ = "mozman <mozman@gmx.at>"
import unittest
# test helpers
from mytesttools import create_node
# objects to test
from ezodf2.nodeorganizer import EpilogueTagBlock
def getETB(nodes, tags='xyz'):
return EpilogueTagBlock(create_node(nodes), tags)
class TestEpilogueTagBlockBasics(unittest.TestCase):
def test_xmlnode_is_none_error(self):
with self.assertRaises(ValueError):
EpilogueTagBlock(None, '')
def test_no_epilogue_tags(self):
with self.assertRaises(ValueError):
EpilogueTagBlock(create_node('abc'), '')
def test_unique_order_tags(self):
with self.assertRaises(ValueError):
EpilogueTagBlock(create_node('abc'), 'abcc')
def test_get_count(self):
etb = getETB('aabbccghixxyyzz')
self.assertEqual(len(etb), 6)
def test_get_epilogue_only_tree(self):
etb = getETB('xxyyzz')
self.assertEqual(len(etb), 6)
def test_get_count_without_eiplogue(self):
etb = getETB('aabbccghi')
self.assertEqual(len(etb), 0)
def test_get_count_empty_tree(self):
etb = getETB('')
self.assertEqual(len(etb), 0)
def test_get_count_from_not_well_formed_tree(self):
etb = getETB('aabbccgxzhi')
self.assertEqual(len(etb), 0)
class TestEpilogueTagBlockTagInfo(unittest.TestCase):
def test_get_tag_info_z(self):
etb = getETB('aabbccghixxyyzz')
start_index, count = etb.tag_info('z')
self.assertEqual((13, 2), (start_index, count))
def test_get_tag_info_x(self):
etb = getETB('aabbccghixxxyyzz')
start_index, count = etb.tag_info('x')
self.assertEqual((9, 3), (start_index, count))
def test_get_tag_info_for_not_existing_tag(self):
etb = getETB('aabbccghixxxzz')
start_index, count = etb.tag_info('y')
self.assertEqual((-1, 0), (start_index, count))
def test_get_tag_info_invalid_tag_error(self):
etb = getETB('aabbccghixxxzz')
with self.assertRaises(ValueError):
etb.tag_info('w')
def test_get_tag_info_for_tag_not_in_epilogue(self):
etb = getETB('aabbccghixxxgyyzz')
start_index, count = etb.tag_info('x')
self.assertEqual((-1, 0), (start_index, count))
class TestEpilogueTagBlockInsertPositionAfter(unittest.TestCase):
def test_tag_error(self):
tb = EpilogueTagBlock(create_node('abc'), 'xyz')
with self.assertRaises(ValueError):
tb.insert_position_after('d')
def test_after_existing_tag(self):
tb = getETB('aabbccghixxyyzz', 'xyz')
self.assertEqual(tb.insert_position_after('x'), 11)
self.assertEqual(tb.insert_position_after('y'), 13)
self.assertEqual(tb.insert_position_after('z'), 15)
def test_after_not_existing_tag(self):
tb = getETB('aabbccghixxzz', 'xyz')
self.assertEqual(tb.insert_position_after('x'), 11)
self.assertEqual(tb.insert_position_after('y'), 11)
self.assertEqual(tb.insert_position_after('z'), 13)
def test_without_epilogue(self):
tb = getETB('aabbccghi', 'xyz')
self.assertEqual(tb.insert_position_after('x'), 9)
self.assertEqual(tb.insert_position_after('y'), 9)
self.assertEqual(tb.insert_position_after('y'), 9)
def test_for_empty_node(self):
tb = getETB('', 'xyz')
self.assertEqual(tb.insert_position_after('x'), 0)
self.assertEqual(tb.insert_position_after('y'), 0)
self.assertEqual(tb.insert_position_after('z'), 0)
class TestEpilogueTagBlockInsertPositionBefore(unittest.TestCase):
def test_tag_error(self):
tb = getETB('abc', 'xyz')
with self.assertRaises(ValueError):
tb.insert_position_before('d')
def test_before_existing_tag(self):
tb = getETB('aabbccghixxyyzz', 'xyz')
self.assertEqual(tb.insert_position_before('x'), 9)
self.assertEqual(tb.insert_position_before('y'), 11)
self.assertEqual(tb.insert_position_before('z'), 13)
def test_before_not_existing_tag(self):
tb = getETB('aabbccghixxzz', 'xyz')
self.assertEqual(tb.insert_position_before('x'), 9)
self.assertEqual(tb.insert_position_before('y'), 11)
self.assertEqual(tb.insert_position_before('z'), 11)
def test_without_epilogue(self):
tb = getETB('aabbccghi', 'xyz')
self.assertEqual(tb.insert_position_before('x'), 9)
self.assertEqual(tb.insert_position_before('y'), 9)
self.assertEqual(tb.insert_position_before('y'), 9)
def test_for_empty_node(self):
tb = getETB('', 'xyz')
self.assertEqual(tb.insert_position_before('x'), 0)
self.assertEqual(tb.insert_position_before('y'), 0)
self.assertEqual(tb.insert_position_before('z'), 0)
def test_insert_before_epilogue_block(self):
tb = getETB('aabbccghixxzz', 'xyz')
self.assertEqual(tb.insert_position_before(), 9)
if __name__=='__main__':
unittest.main()
|
iwschris/ezodf2
|
tests/test_epilogue_tagblock.py
|
Python
|
mit
| 5,381
|
from functools import partial
import traceback
import sys
from typing import TYPE_CHECKING
from PyQt5.QtCore import QObject, pyqtSignal
from PyQt5.QtWidgets import (QHBoxLayout, QLabel, QVBoxLayout)
from electrum_ltc.plugin import hook
from electrum_ltc.i18n import _
from electrum_ltc.gui.qt.util import ThreadedButton, Buttons, EnterButton, WindowModalDialog, OkButton
from .labels import LabelsPlugin
if TYPE_CHECKING:
from electrum_ltc.gui.qt import ElectrumGui
from electrum_ltc.gui.qt.main_window import ElectrumWindow
from electrum_ltc.wallet import Abstract_Wallet
class QLabelsSignalObject(QObject):
labels_changed_signal = pyqtSignal(object)
class Plugin(LabelsPlugin):
def __init__(self, *args):
LabelsPlugin.__init__(self, *args)
self.obj = QLabelsSignalObject()
self._init_qt_received = False
def requires_settings(self):
return True
def settings_widget(self, window: WindowModalDialog):
return EnterButton(_('Settings'),
partial(self.settings_dialog, window))
def settings_dialog(self, window: WindowModalDialog):
wallet = window.parent().wallet
if not wallet.get_fingerprint():
window.show_error(_("{} plugin does not support this type of wallet.")
.format("Label Sync"))
return
d = WindowModalDialog(window, _("Label Settings"))
hbox = QHBoxLayout()
hbox.addWidget(QLabel("Label sync options:"))
upload = ThreadedButton("Force upload",
partial(self.push, wallet),
partial(self.done_processing_success, d),
partial(self.done_processing_error, d))
download = ThreadedButton("Force download",
partial(self.pull, wallet, True),
partial(self.done_processing_success, d),
partial(self.done_processing_error, d))
vbox = QVBoxLayout()
vbox.addWidget(upload)
vbox.addWidget(download)
hbox.addLayout(vbox)
vbox = QVBoxLayout(d)
vbox.addLayout(hbox)
vbox.addSpacing(20)
vbox.addLayout(Buttons(OkButton(d)))
return bool(d.exec_())
def on_pulled(self, wallet):
self.obj.labels_changed_signal.emit(wallet)
def done_processing_success(self, dialog, result):
dialog.show_message(_("Your labels have been synchronised."))
def done_processing_error(self, dialog, exc_info):
self.logger.error("Error synchronising labels", exc_info=exc_info)
dialog.show_error(_("Error synchronising labels") + f':\n{repr(exc_info[1])}')
@hook
def init_qt(self, gui: 'ElectrumGui'):
if self._init_qt_received: # only need/want the first signal
return
self._init_qt_received = True
# If the user just enabled the plugin, the 'load_wallet' hook would not
# get called for already loaded wallets, hence we call it manually for those:
for window in gui.windows:
self.load_wallet(window.wallet, window)
@hook
def load_wallet(self, wallet: 'Abstract_Wallet', window: 'ElectrumWindow'):
self.obj.labels_changed_signal.connect(window.update_tabs)
self.start_wallet(wallet)
@hook
def on_close_window(self, window):
try:
self.obj.labels_changed_signal.disconnect(window.update_tabs)
except TypeError:
pass # 'method' object is not connected
self.stop_wallet(window.wallet)
|
pooler/electrum-ltc
|
electrum_ltc/plugins/labels/qt.py
|
Python
|
mit
| 3,637
|
##
# @file main.py
# @brief this file run the case that only magnetic field exists
# @author Pi-Yueh Chuang (pychuang@gwu.edu)
# @version alpha
# @date 2015-11-17
# python 3
import numpy
from Field import Field
from Particle import Particle
e = 1.602176565e-19 # [C]
me = 9.10938291e-31 # [kg]
eps0 = 8.85e-12 # [s^2 C^2 m^-2 Kg^-1]
dt_e = 1e-11 # time step, [s]
dt_i = 1e-5 # time step, [s]
Ne = 1
Ni = 0
Nx = Ny = Nz = 100
Lx = Ly = Lz = 5e-4
B = 0.1 # [T]
E = 0.0 # [V/m]
V0 = numpy.array([1e5, 2e5, 1.5e5]) # initial velocity, [m/s]
X0 = numpy.array([0., 0., 0.]) # initial position
electrons = numpy.empty(Ne, dtype=Particle)
ions = numpy.empty(Ni, dtype=Particle)
electrons[0] = Particle(X0, V0, -e, me, dt_e)
field = Field(Nx, Ny, Nz, Lx, Ly, Lz)
field.initE(E)
field.initB(B)
trace = numpy.zeros((3, 100))
for i in range(100):
field.updateTho(electrons, ions)
field.updateParticleProps(electrons)
for particle in electrons:
particle.updateVX()
print(i, electrons[0].X)
trace[:, i] = electrons[0].X.copy()
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot
fig = pyplot.figure(figsize=(12,9), dpi=75)
ax = fig.gca(projection='3d')
ax.plot(trace[0, :], trace[1, :], trace[2, :],
lw=2, color='k')
ax.set_title('One electron under\nmagnetic field independent to plasma',
fontsize=18)
ax.set_xlim(-2.5e-5, 0)
ax.set_xticks([-2.5e-5, -2.0e-5, -1.5e-5, -1e-5,-5e-6, 0])
ax.set_xticklabels([-25, -20, -15, -10,-5, 0])
ax.xaxis.get_major_formatter().set_offset_string("1e-6")
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
ax.set_xlabel("x (m)", fontsize=16)
ax.set_ylim(-1e-5, 2e-5)
ax.set_yticks([-10e-6, -5e-6, 0, 5e-6, 10e-6, 15e-6, 20e-6])
ax.set_yticklabels([-10, -5, 0, 5, 10, 15, 20])
ax.yaxis.get_major_formatter().set_offset_string("1e-6")
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
ax.set_ylabel("y (m)", fontsize=16)
ax.set_zlim(0, 1.5e-4)
ax.set_zticks([0, 5e-5, 10e-5, 15e-5])
ax.set_zticklabels([0, 5, 10, 15])
ax.zaxis.get_major_formatter().set_offset_string("1e-5")
ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
ax.set_zlabel("z (m)", fontsize=16)
ax.set_top_view()
pyplot.savefig("../figures/verify_B=0.1_E=0.png", dpi=75)
pyplot.show()
|
piyueh/PlasmaPIC
|
src/verify_B=0.1_E=0.py
|
Python
|
mit
| 2,272
|
from disco.types.message import MessageEmbed
from disco.api.http import APIException
import re
import string
from abc import ABC, abstractmethod
import gevent
class TriggerItemReminder(object):
def __init__(self, content, embed=None, attachments=[]):
self.content = content
self.embed = embed
self.attachments = attachments
self.attachmentsData = [open(apath, 'rb') for apath in self.attachments]
class TriggerCooldown(ABC):
@abstractmethod
def isSatisfied(self, event):
pass
def onReply(self, event, msg):
pass
def onMessageUpdate(self, event):
pass
class TriggerCooldownTimeInterval(TriggerCooldown):
def __init__(self, secs):
self.seconds = secs
self.timeStampPerChannel = {}
def isSatisfied(self, event):
return event.channel_id not in self.timeStampPerChannel or ((event.timestamp - self.timeStampPerChannel[event.channel_id]).total_seconds() >= self.seconds)
def onReply(self, event, msg):
self.timeStampPerChannel[event.channel_id] = event.timestamp
class TriggerCooldownMsgInterval(TriggerCooldown):
def __init__(self, interval):
self.msgInterval = interval
self.msgCounterPerChannel = {}
def isSatisfied(self, event):
if event.channel_id not in self.msgCounterPerChannel:
self.msgCounterPerChannel[event.channel_id] = self.msgInterval
return True
elif self.msgCounterPerChannel[event.channel_id] <= 0:
return True
else:
return False
def onReply(self, event, msg):
self.msgCounterPerChannel[event.channel_id] = self.msgInterval
def onMessageUpdate(self, event):
if event.channel_id in self.msgCounterPerChannel and self.msgCounterPerChannel[event.channel_id] > 0:
self.msgCounterPerChannel[event.channel_id] -= 1
class TriggerItemBase(object):
def __init__(self, tokens, reminder, replacementTokens=None, cds=[], messageDuration=None, logger=None):
self.patterns = tokens
self.reminder = reminder
self.replacementTokens = replacementTokens
self.cooldowns = cds
self.messageDuration = messageDuration
self.logger = logger
def attachLogger(self, logger):
self.logger = logger
def logMessage(self, msg):
if self.logger:
self.logger.info(msg)
else:
print(msg)
def onMessageUpdate(self, e):
for c in self.cooldowns:
c.onMessageUpdate(e)
def delete_message_task(self, msg):
try:
msg.delete()
except APIException:
self.logMessage("FAILED deletion of message from gevent")
def onReply(self, event, msg):
for c in self.cooldowns:
c.onReply(event, msg)
if self.messageDuration is not None:
gevent.spawn_later(self.messageDuration, self.delete_message_task, msg)
def areCooldownsSatisfied(self, e):
for c in self.cooldowns:
if not c.isSatisfied(e):
return False
# Here all cooldowns are satisfied
return True
def craftReply(self, event, satisfiedPatternIndex):
e = None
# here, we check for None since empty string means "suppress embeds"
if self.reminder.embed is not None:
e = MessageEmbed()
e.set_image(url=self.reminder.embed)
atts = []
if self.reminder.attachments:
atts = [(self.reminder.attachments[i], self.reminder.attachmentsData[i]) for i in range(len(self.reminder.attachments))]
m = self.reminder.content
m = m.replace(u'$AUTHOR', u'<@' + str(event.author.id) + '>')
# chech if we have tokens to substitute for this satisfied pattern
if satisfiedPatternIndex < len(self.replacementTokens):
for index, t in enumerate(self.replacementTokens[satisfiedPatternIndex]):
m = m.replace("$" + str(index + 1), t)
return (m, e, atts)
def satisfies(self, event):
pass
class TriggerItemRegex(TriggerItemBase):
def __init__(self, tokens, reminder, replacementTokens=None, cds=[], messageDuration=None, logger=None):
TriggerItemBase.__init__(self, tokens, reminder, replacementTokens, cds, messageDuration, logger)
self.patterns = [re.compile(t) for t in tokens]
def satisfies(self, event):
text = event.content.lower()
for index, p in enumerate(self.patterns):
if p.search(text) and self.areCooldownsSatisfied(event):
return self.craftReply(event, index)
return (None, None, [])
class TriggerItemEqualStems(TriggerItemBase):
def __init__(self, tokens, reminder, lang=None, replacementTokens=None, cds=[], messageDuration=None, logger=None):
TriggerItemBase.__init__(self, tokens, reminder, replacementTokens, cds, messageDuration, logger)
from nltk.stem import SnowballStemmer
self.language = "english" if not lang else lang
self.stemmer = SnowballStemmer(self.language)
self.translatorPunctuation = str.maketrans('', '', string.punctuation)
self.patterns = tokens
def ensureLanguage(self, text):
if not self.language:
self.logMessage('WARNING: can not ensure language if current language is not set')
return False
else:
from polyglot.detect import Detector
from polyglot.detect.base import UnknownLanguage
try:
detector = Detector(text)
if detector.languages:
# for l in detector.languages:
# self.logMessage(l.name)
return self.language == detector.languages[0].name.lower()
except UnknownLanguage as err:
self.logMessage("Exception during language detection: {0}".format(err))
def satisfies(self, event):
text = event.content.lower()
if self.ensureLanguage(text) and any(p in text for p in self.patterns):
words = text.translate(self.translatorPunctuation).split()
for w in words:
for index, p in enumerate(self.patterns):
if p in w: # preliminary match: pattern is in word
# check if it matches also with the stem
stemmed = self.stemmer.stem(w)
if (p == stemmed) and (stemmed != w) and (self.areCooldownsSatisfied(event)): # we exclude words that were already stems, they are usually false positives
return self.craftReply(event, index)
return (None, None, [])
|
winterismute/mcreminder-discordbot
|
bot/triggeritem.py
|
Python
|
mit
| 5,795
|
""" Base class for all the queue system implementations """
import subprocess, os
import plugins
class QueueSystem:
def __init__(self, *args):
pass
def submitSlaveJob(self, cmdArgs, slaveEnv, logDir, submissionRules, jobType):
try:
process = subprocess.Popen(cmdArgs, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=logDir, env=self.getSlaveEnvironment(slaveEnv))
stdout, stderr = process.communicate()
errorMessage = self.findErrorMessage(stderr, cmdArgs, jobType)
except OSError:
errorMessage = self.getFullSubmitError("local machine is not a submit host: running '" + cmdArgs[0] + "' failed.", cmdArgs, jobType)
if errorMessage:
return None, errorMessage
else:
return self.findJobId(stdout), None
def supportsPolling(self):
return True
def findErrorMessage(self, stderr, *args):
if len(stderr) > 0:
basicError = self.findSubmitError(stderr)
if basicError:
return self.getFullSubmitError(basicError, *args)
def getFullSubmitError(self, errorMessage, cmdArgs, jobType):
qname = self.getQueueSystemName()
err = "Failed to submit "
if jobType:
err += jobType + " "
err += "to " + qname + " (" + errorMessage.strip() + ")\n" + \
"Submission command was '" + self.formatCommand(cmdArgs) + "'\n"
return err
def getSlaveEnvironment(self, env):
if len(env) >= len(os.environ): # full environment sent
return env
else:
return self.makeSlaveEnvironment(env)
def getSlaveVarsToBlock(self):
return []
def getCapacity(self):
pass # treated as no restriction
def setRemoteProcessId(self, *args):
pass # only cloud cares about this
def getRemoteTestMachine(self, *args):
pass # only cloud cares about this
def slavesOnRemoteSystem(self):
return False
def makeSlaveEnvironment(self, env):
newEnv = plugins.copyEnvironment(ignoreVars=self.getSlaveVarsToBlock())
for var, value in env.items():
newEnv[var] = value
return newEnv
def getQueueSystemName(self):
modname = self.__class__.__module__
return modname.split(".")[-1].upper()
def addExtraAndCommand(self, args, submissionRules, commandArgs):
args += submissionRules.getExtraSubmitArgs()
if commandArgs:
args += self.shellWrapArgs(commandArgs)
return args
def formatCommand(self, cmdArgs):
return " ".join(cmdArgs[:-2]) + " ... "
def getSubmitCmdArgs(self, submissionRules, commandArgs=[], slaveEnv={}):
return commandArgs
def getJobFailureInfo(self, jobId):
name = self.getQueueSystemName()
header = "-" * 10 + " Full accounting info from " + name + " " + "-" * 10 + "\n"
if jobId is None:
return header + "No job has been submitted to " + name
else:
return header + self._getJobFailureInfo(jobId)
def shellWrapArgs(self, commandArgs):
# Must use exec so as not to create extra processes: SGE's qdel isn't very clever when
# it comes to noticing extra shells
return [ "exec", "$SHELL -c \"exec " + plugins.commandLineString(commandArgs, defaultQuoteChar="'") + "\"" ]
|
emilybache/texttest-runner
|
src/main/python/lib/queuesystem/abstractqueuesystem.py
|
Python
|
mit
| 3,518
|
#!/usr/bin/env python
import setpath
import unittest
from bike.refactor.extractMethod import ExtractMethod, \
extractMethod, coords
from bike import testdata
from bike.testutils import *
from bike.parsing.load import Cache
def assertTokensAreSame(t1begin, t1end, tokens):
it = t1begin.clone()
pos = 0
while it != t1end:
assert it.deref() == tokens[pos]
it.incr()
pos+=1
assert pos == len(tokens)
def helper(src,startcoords, endcoords, newname):
sourcenode = createAST(src)
extractMethod(tmpfile, startcoords, endcoords, newname)
return sourcenode.getSource()
class TestExtractMethod(BRMTestCase):
def test_extractsPass(self):
srcBefore=trimLines("""
class MyClass:
def myMethod(self):
pass
""")
srcAfter=trimLines("""
class MyClass:
def myMethod(self):
self.newMethod()
def newMethod(self):
pass
""")
src = helper(srcBefore, coords(3, 8), coords(3, 12), "newMethod")
self.assertEqual(src,srcAfter)
def test_extractsPassWhenFunctionAllOnOneLine(self):
srcBefore=trimLines("""
class MyClass:
def myMethod(self): pass # comment
""")
srcAfter=trimLines("""
class MyClass:
def myMethod(self): self.newMethod() # comment
def newMethod(self):
pass
""")
src = helper(srcBefore, coords(2, 24), coords(2, 28),"newMethod")
self.assertEqual(src,srcAfter)
def test_extractsPassFromForLoop(self):
srcBefore=trimLines("""
class MyClass:
def myMethod(self): # comment
for i in foo:
pass
""")
srcAfter=trimLines("""
class MyClass:
def myMethod(self): # comment
for i in foo:
self.newMethod()
def newMethod(self):
pass
""")
src = helper(srcBefore, coords(4, 12), coords(4, 16), "newMethod")
self.assertEqual(srcAfter, src)
def test_newMethodHasArgumentsForUsedTemporarys(self):
srcBefore=trimLines("""
class MyClass:
def myMethod(self, c):
a = something()
b = somethingelse()
print a + b + c + d
print \"hello\"
dosomethingelse(a, b)
""")
srcAfter=trimLines("""
class MyClass:
def myMethod(self, c):
a = something()
b = somethingelse()
self.newMethod(a, b, c)
dosomethingelse(a, b)
def newMethod(self, a, b, c):
print a + b + c + d
print \"hello\"
""")
src = helper(srcBefore, coords(5, 8), coords(6, 21), "newMethod")
self.assertEqual(srcAfter, src)
def test_newMethodHasSingleArgument(self):
srcBefore=trimLines("""
class MyClass:
def myMethod(self):
a = something()
print a
print \"hello\"
dosomethingelse(a, b)
""")
srcAfter=trimLines("""
class MyClass:
def myMethod(self):
a = something()
self.newMethod(a)
dosomethingelse(a, b)
def newMethod(self, a):
print a
print \"hello\"
""")
src = helper(srcBefore, coords(4, 8), coords(5, 21), "newMethod")
self.assertEqual(srcAfter, src)
def test_doesntHaveDuplicateArguments(self):
srcBefore=trimLines("""
class MyClass:
def myMethod(self):
a = 3
print a
print a
""")
srcAfter=trimLines("""
class MyClass:
def myMethod(self):
a = 3
self.newMethod(a)
def newMethod(self, a):
print a
print a
""")
src = helper(srcBefore, coords(4, 0), coords(6, 0), "newMethod")
self.assertEqual(srcAfter, src)
def test_extractsQueryWhenFunctionAllOnOneLine(self):
srcBefore=trimLines("""
class MyClass:
def myMethod(self, a): print a # comment
""")
srcAfter=trimLines("""
class MyClass:
def myMethod(self, a): self.newMethod(a) # comment
def newMethod(self, a):
print a
""")
src = helper(srcBefore, coords(2, 27), coords(2, 34), "newMethod")
self.assertEqual(srcAfter, src)
def test_worksWhenAssignmentsToTuples(self):
srcBefore=trimLines("""
class MyClass:
def myMethod(self):
a, b, c = 35, 36, 37
print a + b
""")
srcAfter=trimLines("""
class MyClass:
def myMethod(self):
a, b, c = 35, 36, 37
self.newMethod(a, b)
def newMethod(self, a, b):
print a + b
""")
src = helper(srcBefore, coords(4, 8), coords(4, 19), "newMethod")
self.assertEqual(srcAfter, src)
def test_worksWhenUserSelectsABlockButDoesntSelectTheHangingDedent(self):
srcBefore=trimLines("""
class MyClass:
def myMethod(self): # comment
for i in foo:
pass
""")
srcAfter=trimLines("""
class MyClass:
def myMethod(self): # comment
for i in foo:
self.newMethod()
def newMethod(self):
pass
""")
src = helper(srcBefore, coords(4, 8), coords(4, 16), "newMethod")
self.assertEqual(srcAfter, src)
def test_newMethodHasSingleReturnValue(self):
srcBefore=trimLines("""
class MyClass:
def myMethod(self):
a = 35 # <-- extract me
print a
""")
srcAfter=trimLines("""
class MyClass:
def myMethod(self):
a = self.newMethod()
print a
def newMethod(self):
a = 35 # <-- extract me
return a
""")
src = helper(srcBefore, coords(3, 4),
coords(3, 34), "newMethod")
self.assertEqual(srcAfter, src)
def test_newMethodHasMultipleReturnValues(self):
srcBefore=trimLines("""
class MyClass:
def myMethod(self):
a = 35
b = 352
print a + b
""")
srcAfter=trimLines("""
class MyClass:
def myMethod(self):
a, b = self.newMethod()
print a + b
def newMethod(self):
a = 35
b = 352
return a, b
""")
src = helper(srcBefore, coords(3, 8),
coords(4, 15), "newMethod")
self.assertEqual(srcAfter, src)
def test_worksWhenMovingCodeJustAfterDedent(self):
srcBefore=trimLines("""
class MyClass:
def myMethod(self): # comment
for i in foo:
pass
print \"hello\"
""")
srcAfter=trimLines("""
class MyClass:
def myMethod(self): # comment
for i in foo:
pass
self.newMethod()
def newMethod(self):
print \"hello\"
""")
src = helper(srcBefore, coords(5, 8),
coords(5, 21), "newMethod")
self.assertEqual(srcAfter, src)
def test_extractsPassWhenSelectionCoordsAreReversed(self):
srcBefore=trimLines("""
class MyClass:
def myMethod(self):
pass
""")
srcAfter=trimLines("""
class MyClass:
def myMethod(self):
self.newMethod()
def newMethod(self):
pass
""")
src = helper(srcBefore, coords(3, 12), coords(3, 8), "newMethod")
self.assertEqual(srcAfter, src)
def test_extractsExpression(self):
srcBefore=trimLines("""
class MyClass:
def myMethod(self): # comment
a = 32
b = 2 + a * 1 + 2
""")
srcAfter=trimLines("""
class MyClass:
def myMethod(self): # comment
a = 32
b = 2 + self.newMethod(a) + 2
def newMethod(self, a):
return a * 1
""")
src = helper(srcBefore, coords(4, 16), coords(4, 21), "newMethod")
self.assertEqual(srcAfter, src)
def test_extractsExpression2(self):
srcBefore=trimLines("""
class MyClass:
def myMethod(self): # comment
g = 32
assert output.thingy(g) == \"bah\"
""")
srcAfter=trimLines("""
class MyClass:
def myMethod(self): # comment
g = 32
assert self.newMethod(g) == \"bah\"
def newMethod(self, g):
return output.thingy(g)
""")
src = helper(srcBefore, coords(4, 15), coords(4, 31), "newMethod")
self.assertEqual(srcAfter, src)
class TestExtractFunction(BRMTestCase):
def runTarget(self, src, begincoords, endcoords, newname):
ast = createAST(src)
extractFunction(ast, begincoords, endcoords, newname)
return ast
def test_extractsFunction(self):
srcBefore=trimLines("""
def myFunction(): # comment
a = 3
c = a + 99
b = c * 1
print b
""")
srcAfter=trimLines("""
def myFunction(): # comment
a = 3
b = newFunction(a)
print b
def newFunction(a):
c = a + 99
b = c * 1
return b
""")
src = helper(srcBefore, coords(3, 4),
coords(4, 13), "newFunction")
self.assertEqual(srcAfter, src)
def test_extractsAssignToAttribute(self):
srcBefore=trimLines("""
def simulateLoad(path):
item = foo()
item.decl = line
""")
srcAfter=trimLines("""
def simulateLoad(path):
item = foo()
newFunction(item)
def newFunction(item):
item.decl = line
""")
src = helper(srcBefore, coords(3, 0),
coords(4, 0), "newFunction")
self.assertEqual(srcAfter, src)
def test_extractsFromFirstBlockOfIfElseStatement(self):
srcBefore=trimLines("""
def foo():
if bah:
print \"hello1\"
print \"hello2\"
elif foo:
pass
""")
srcAfter=trimLines("""
def foo():
if bah:
newFunction()
print \"hello2\"
elif foo:
pass
def newFunction():
print \"hello1\"
""")
src = helper(srcBefore, coords(3, 0),
coords(4, 0), "newFunction")
self.assertEqual(srcAfter, src)
def test_extractsAugAssign(self):
srcBefore=trimLines("""
def foo():
a = 3
a += 1
print a
""")
srcAfter=trimLines("""
def foo():
a = 3
a = newFunction(a)
print a
def newFunction(a):
a += 1
return a
""")
src = helper(srcBefore, coords(3, 0),
coords(4, 0), "newFunction")
self.assertEqual(srcAfter, src)
def test_extractsForLoopUsingLoopVariable(self):
srcBefore=trimLines("""
def foo():
for i in range(1, 3):
print i
""")
srcAfter=trimLines("""
def foo():
for i in range(1, 3):
newFunction(i)
def newFunction(i):
print i
""")
src = helper(srcBefore, coords(3, 0),
coords(4, 0), "newFunction")
self.assertEqual(srcAfter, src)
def test_extractWhileLoopVariableIncrement(self):
srcBefore=trimLines("""
def foo():
a = 0
while a != 3:
a = a+1
""")
srcAfter=trimLines("""
def foo():
a = 0
while a != 3:
a = newFunction(a)
def newFunction(a):
a = a+1
return a
""")
src = helper(srcBefore, coords(4, 0),
coords(5, 0), "newFunction")
self.assertEqual(srcAfter, src)
def test_extractAssignedVariableUsedInOuterForLoop(self):
srcBefore=trimLines("""
def foo():
b = 0
for a in range(1, 3):
b = b+1
while b != 2:
print a
b += 1
""")
srcAfter=trimLines("""
def foo():
b = 0
for a in range(1, 3):
b = b+1
while b != 2:
b = newFunction(a, b)
def newFunction(a, b):
print a
b += 1
return b
""")
src = helper(srcBefore, coords(6, 0),
coords(8, 0), "newFunction")
self.assertEqual(srcAfter, src)
def test_extractsConditionalFromExpression(self):
srcBefore=trimLines("""
def foo():
if 123+3:
print aoue
""")
srcAfter=trimLines("""
def foo():
if newFunction():
print aoue
def newFunction():
return 123+3
""")
src = helper(srcBefore, coords(2, 7),
coords(2, 12), "newFunction")
self.assertEqual(srcAfter, src)
def test_extractCodeAfterCommentInMiddleOfFnDoesntRaiseParseException(self):
srcBefore=trimLines("""
def theFunction():
print 1
# comment
print 2
""")
srcAfter=trimLines("""
def theFunction():
print 1
# comment
newFunction()
def newFunction():
print 2
""")
src = helper(srcBefore, coords(4, 0),
coords(5, 0), "newFunction")
self.assertEqual(srcAfter, src)
def test_canExtractQueryFromNestedIfStatement(self):
srcBefore=trimLines("""
def theFunction():
if foo: # comment
if bah:
pass
""")
srcAfter=trimLines("""
def theFunction():
if foo: # comment
if newFunction():
pass
def newFunction():
return bah
""")
src = helper(srcBefore, coords(3, 11),
coords(3, 14), "newFunction")
self.assertEqual(srcAfter, src)
def test_doesntMessUpTheNextFunctionOrClass(self):
srcBefore=trimLines("""
def myFunction():
a = 3
print \"hello\"+a # extract me
class MyClass:
def myMethod(self):
b = 12 # extract me
c = 3 # and me
d = 2 # and me
print b, c
""")
srcAfter=trimLines("""
def myFunction():
a = 3
newFunction(a)
def newFunction(a):
print \"hello\"+a # extract me
class MyClass:
def myMethod(self):
b = 12 # extract me
c = 3 # and me
d = 2 # and me
print b, c
""")
# extract code on one line
src = helper(srcBefore, coords(3, 4),
coords(3, 34), "newFunction")
self.assertEqual(srcAfter, src)
# extract code on 2 lines (most common user method)
resetRoot()
Cache.instance.reset()
Root()
src = helper(srcBefore, coords(3, 0),
coords(4, 0), "newFunction")
self.assertEqual(srcAfter, src)
def test_doesntBallsUpIndentWhenTheresALineWithNoSpacesInIt(self):
srcBefore=trimLines("""
def theFunction():
if 1:
pass
pass
""")
srcAfter=trimLines("""
def theFunction():
newFunction()
def newFunction():
if 1:
pass
pass
""")
src = helper(srcBefore, coords(2, 4),
coords(5, 8), "newFunction")
self.assertEqual(srcAfter, src)
def test_doesntHaveToBeInsideAFunction(self):
srcBefore=trimLines(r"""
a = 1
print a + 2
f(b)
""")
srcAfter=trimLines(r"""
a = 1
newFunction(a)
def newFunction(a):
print a + 2
f(b)
""")
src = helper(srcBefore, coords(2, 0),
coords(3, 4), "newFunction")
self.assertEqual(srcAfter, src)
def test_doesntBarfWhenEncountersMethodCalledOnCreatedObj(self):
srcBefore=trimLines(r"""
results = QueryEngine(q).foo()
""")
srcAfter=trimLines(r"""
newFunction()
def newFunction():
results = QueryEngine(q).foo()
""")
src = helper(srcBefore, coords(1, 0),
coords(2, 0), "newFunction")
self.assertEqual(srcAfter, src)
def test_worksIfNoLinesBeforeExtractedCode(self):
srcBefore=trimLines(r"""
print a + 2
f(b)
""")
srcAfter=trimLines(r"""
newFunction()
def newFunction():
print a + 2
f(b)
""")
src = helper(srcBefore, coords(1, 0),
coords(2, 4), "newFunction")
self.assertEqual(srcAfter, src)
class TestGetRegionAsString(BRMTestCase):
def test_getsHighlightedSingleLinePassStatement(self):
src=trimLines("""
class MyClass:
def myMethod(self):
pass
""")
sourcenode = createAST(src)
em = ExtractMethod(sourcenode, coords(3, 8),
coords(3, 12), "foobah")
em.getRegionToBuffer()
self.assertEqual(len(em.extractedLines), 1)
self.assertEqual(em.extractedLines[0], "pass\n")
def test_getsSingleLinePassStatementWhenWholeLineIsHighlighted(self):
src=trimLines("""
class MyClass:
def myMethod(self):
pass
""")
sourcenode = createAST(src)
em = ExtractMethod(sourcenode, coords(3, 0),
coords(3, 12), "foobah")
em.getRegionToBuffer()
self.assertEqual(len(em.extractedLines), 1)
self.assertEqual(em.extractedLines[0], "pass\n")
def test_getsMultiLineRegionWhenJustRegionIsHighlighted(self):
src=trimLines("""
class MyClass:
def myMethod(self):
print 'hello'
pass
""")
region=trimLines("""
print 'hello'
pass
""")
sourcenode = createAST(src)
em = ExtractMethod(sourcenode, coords(3, 8),
coords(4, 12), "foobah")
em.getRegionToBuffer()
self.assertEqual(em.extractedLines, region.splitlines(1))
def test_getsMultiLineRegionWhenRegionLinesAreHighlighted(self):
src=trimLines("""
class MyClass:
def myMethod(self):
print 'hello'
pass
""")
region=trimLines("""
print 'hello'
pass
""")
sourcenode = createAST(src)
em = ExtractMethod(sourcenode, coords(3, 0),
coords(5, 0), "foobah")
em.getRegionToBuffer()
self.assertEqual(em.extractedLines, region.splitlines(1))
def test_getsHighlightedSubstringOfLine(self):
src=trimLines("""
class MyClass:
def myMethod(self):
if a == 3:
pass
""")
region=trimLines("""
a == 3
""")
sourcenode = createAST(src)
em = ExtractMethod(sourcenode, coords(3, 11),
coords(3, 17), "foobah")
em.getRegionToBuffer()
self.assertEqual(em.extractedLines, region.splitlines(1))
class TestGetTabwidthOfParentFunction(BRMTestCase):
def test_getsTabwidthForSimpleMethod(self):
src=trimLines("""
class MyClass:
def myMethod(self):
pass
""")
sourcenode = createAST(src)
em = ExtractMethod(sourcenode, coords(3, 11),
coords(3, 17), "foobah")
self.assertEqual(em.getTabwidthOfParentFunction(), 4)
def test_getsTabwidthForFunctionAtRootScope(self):
src=trimLines("""
def myFn(self):
pass
""")
sourcenode = createAST(src)
em = ExtractMethod(sourcenode, coords(2, 0),
coords(2, 9), "foobah")
self.assertEqual(em.getTabwidthOfParentFunction(), 0)
if __name__ == "__main__":
unittest.main()
|
srusskih/SublimeBicycleRepair
|
bike/refactor/test_extractMethod.py
|
Python
|
mit
| 21,788
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 11.03.2019 21:45
:Licence MIT
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import Grammar, Nonterminal, Rule
from grammpy.exceptions import RuleNotDefinedException, RuleSyntaxException, TerminalDoesNotExistsException, NonterminalDoesNotExistsException
class N(Nonterminal): pass
class X(Nonterminal): pass
class A(Rule): rule=([N], [0])
class B(Rule): rule=([N], [1])
class C(Rule): rule=([N], [2])
class UndefinedRule(Rule): pass
class InvalidRule(Rule): rule=(N, [0, 1])
class NotInsideRule1(Rule): rule=([N], [3])
class NotInsideRule2(Rule): rule=([X], [0])
class AddingTest(TestCase):
def test_haveEmpty(self):
gr = Grammar(terminals=[0, 1, 2],
nonterminals=[N])
self.assertNotIn(A, gr.rules)
self.assertNotIn(B, gr.rules)
self.assertNotIn(C, gr.rules)
def test_correctAddOne(self):
gr = Grammar(terminals=[0, 1, 2],
nonterminals=[N])
self.assertEqual(gr.rules.size(), 0)
self.assertEqual(len(gr.rules), 0)
self.assertNotIn(A, gr.rules)
gr.rules.add(A)
self.assertEqual(gr.rules.size(), 1)
self.assertEqual(len(gr.rules), 1)
self.assertIn(A, gr.rules)
def test_correctAddTwo(self):
gr = Grammar(terminals=[0, 1, 2],
nonterminals=[N])
self.assertEqual(gr.rules.size(), 0)
self.assertEqual(len(gr.rules), 0)
self.assertNotIn(A, gr.rules)
self.assertNotIn(B, gr.rules)
gr.rules.add(A)
self.assertEqual(gr.rules.size(), 1)
self.assertEqual(len(gr.rules), 1)
self.assertIn(A, gr.rules)
self.assertNotIn(B, gr.rules)
gr.rules.add(B)
self.assertEqual(gr.rules.size(), 2)
self.assertEqual(len(gr.rules), 2)
self.assertIn(A, gr.rules)
self.assertIn(B, gr.rules)
def test_addThreeAsParameters(self):
gr = Grammar(terminals=[0, 1, 2],
nonterminals=[N])
gr.rules.add(A, B, C)
self.assertEqual(gr.rules.size(), 3)
self.assertEqual(len(gr.rules), 3)
self.assertIn(A, gr.rules)
self.assertIn(B, gr.rules)
self.assertIn(C, gr.rules)
def test_addThreeAsArray(self):
gr = Grammar(terminals=[0, 1, 2],
nonterminals=[N])
gr.rules.add(*[A, B, C])
self.assertEqual(gr.rules.size(), 3)
self.assertEqual(len(gr.rules), 3)
self.assertIn(A, gr.rules)
self.assertIn(B, gr.rules)
self.assertIn(C, gr.rules)
def test_oneSeparateTwoTuple(self):
gr = Grammar(terminals=[0, 1, 2],
nonterminals=[N])
gr.rules.add(A)
self.assertEqual(gr.rules.size(), 1)
self.assertEqual(len(gr.rules), 1)
self.assertIn(A, gr.rules)
self.assertNotIn(B, gr.rules)
self.assertNotIn(C, gr.rules)
gr.rules.add(*(B, C))
self.assertEqual(gr.rules.size(), 3)
self.assertEqual(len(gr.rules), 3)
self.assertIn(A, gr.rules)
self.assertIn(B, gr.rules)
self.assertIn(C, gr.rules)
def test_addSameTwiceInParameters(self):
gr = Grammar(terminals=[0, 1, 2],
nonterminals=[N])
gr.rules.add(A, B, A, C)
self.assertEqual(gr.rules.size(), 3)
self.assertEqual(len(gr.rules), 3)
self.assertIn(A, gr.rules)
self.assertIn(B, gr.rules)
self.assertIn(C, gr.rules)
def test_addSameTwiceInSequence(self):
gr = Grammar(terminals=[0, 1, 2],
nonterminals=[N])
gr.rules.add(A, C)
self.assertEqual(gr.rules.size(), 2)
self.assertEqual(len(gr.rules), 2)
self.assertIn(A, gr.rules)
self.assertNotIn(B, gr.rules)
self.assertIn(C, gr.rules)
gr.rules.add(A, B)
self.assertEqual(gr.rules.size(), 3)
self.assertEqual(len(gr.rules), 3)
self.assertIn(A, gr.rules)
self.assertIn(B, gr.rules)
self.assertIn(C, gr.rules)
def test_addUndefined(self):
gr = Grammar(terminals=[0, 1, 2],
nonterminals=[N])
with self.assertRaises(RuleNotDefinedException):
gr.rules.add(UndefinedRule)
def test_addInvalid(self):
gr = Grammar(terminals=[0, 1, 2],
nonterminals=[N])
with self.assertRaises(RuleSyntaxException):
gr.rules.add(InvalidRule)
def test_addNotInside1(self):
gr = Grammar(terminals=[0, 1, 2],
nonterminals=[N])
with self.assertRaises(TerminalDoesNotExistsException):
gr.rules.add(NotInsideRule1)
def test_addNotInside2(self):
gr = Grammar(terminals=[0, 1, 2],
nonterminals=[N])
with self.assertRaises(NonterminalDoesNotExistsException):
gr.rules.add(NotInsideRule2)
def test_addUndefinedAsSecond(self):
gr = Grammar(terminals=[0, 1, 2],
nonterminals=[N])
gr.rules.add(A)
with self.assertRaises(RuleNotDefinedException):
gr.rules.add(UndefinedRule)
def test_addInvalidAsSecond(self):
gr = Grammar(terminals=[0, 1, 2],
nonterminals=[N])
gr.rules.add(A)
with self.assertRaises(RuleSyntaxException):
gr.rules.add(InvalidRule)
def test_addNotInside1AsSecond(self):
gr = Grammar(terminals=[0, 1, 2],
nonterminals=[N])
gr.rules.add(A)
with self.assertRaises(TerminalDoesNotExistsException):
gr.rules.add(NotInsideRule1)
def test_addNotInside2AsSecond(self):
gr = Grammar(terminals=[0, 1, 2],
nonterminals=[N])
gr.rules.add(A)
with self.assertRaises(NonterminalDoesNotExistsException):
gr.rules.add(NotInsideRule2)
def test_addUndefinedInParameters(self):
gr = Grammar(terminals=[0, 1, 2],
nonterminals=[N])
gr.rules.add(A)
with self.assertRaises(RuleNotDefinedException):
gr.rules.add(B, UndefinedRule)
def test_addInvalidInParameters(self):
gr = Grammar(terminals=[0, 1, 2],
nonterminals=[N])
gr.rules.add(A)
with self.assertRaises(RuleSyntaxException):
gr.rules.add(B, InvalidRule)
def test_addNotInside1InParameters(self):
gr = Grammar(terminals=[0, 1, 2],
nonterminals=[N])
gr.rules.add(A)
with self.assertRaises(TerminalDoesNotExistsException):
gr.rules.add(B, NotInsideRule1)
def test_addNotInside2InParameters(self):
gr = Grammar(terminals=[0, 1, 2],
nonterminals=[N])
gr.rules.add(A)
with self.assertRaises(NonterminalDoesNotExistsException):
gr.rules.add(B, NotInsideRule2)
if __name__ == '__main__':
main()
|
PatrikValkovic/grammpy
|
tests/grammpy_test/rule_tests/handling_tests/AddingTest.py
|
Python
|
mit
| 7,025
|
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import SGD
from sklearn import datasets
from sklearn.model_selection import train_test_split
np.random.seed(1234)
'''
データの生成
'''
mnist = datasets.fetch_mldata('MNIST original', data_home='.')
n = len(mnist.data)
N = 10000 # MNISTの一部を使う
indices = np.random.permutation(range(n))[:N] # ランダムにN枚を選択
X = mnist.data[indices]
y = mnist.target[indices]
Y = np.eye(10)[y.astype(int)] # 1-of-K 表現に変換
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, train_size=0.8)
'''
モデル設定
'''
n_in = len(X[0]) # 784
n_hidden = 200
n_out = len(Y[0]) # 10
alpha = 0.01
model = Sequential()
model.add(Dense(n_hidden, input_dim=n_in))
model.add(LeakyReLU(alpha=alpha))
model.add(Dense(n_hidden))
model.add(LeakyReLU(alpha=alpha))
model.add(Dense(n_hidden))
model.add(LeakyReLU(alpha=alpha))
model.add(Dense(n_hidden))
model.add(LeakyReLU(alpha=alpha))
model.add(Dense(n_out))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.01), metrics=['accuracy'])
'''
モデル訓練
'''
epochs = 20
batch_size = 200
model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size)
'''
評価
'''
print (model.evaluate(X_test, Y_test))
|
inoue0124/TensorFlow_Keras
|
chapter4/leaky_relu_keras.py
|
Python
|
mit
| 1,484
|
from collections import namedtuple
Point = namedtuple('Point', 'x y')
Size = namedtuple('Size', 'w h')
def is_child(child, parent):
"""
returns True if class child is child of class parent
and only if it is child, returns False otherwise
"""
try:
return issubclass(child, parent) and child is not parent
except TypeError:
return False
def color_variant(color, scale=1):
"""
darken or lighten a color
"""
return map(lambda x: int(min(max(x * scale, 0), 255)), color)
def distance(coords1, coords2):
return (
max(abs(coords1[0] - coords2[0]), 1) +
max(abs(coords1[1] - coords2[1]), 1)
) / 2
|
evuez/disclosure
|
generic.py
|
Python
|
mit
| 618
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# CAM签名/鉴权错误。
AUTHFAILURE = 'AuthFailure'
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# 告警策略通知模板已经绑定到了某个告警策略上。
FAILEDOPERATION_BINDEDALARM = 'FailedOperation.BindedAlarm'
# 检索日志触发最大条数限制。
FAILEDOPERATION_GETLOGREACHLIMIT = 'FailedOperation.GetlogReachLimit'
# 低频不支持配置kv和tag索引。
FAILEDOPERATION_INVALIDINDEXRULEFORSEARCHLOW = 'FailedOperation.InValidIndexRuleForSearchLow'
# 该告警策略状态异常,请检查下日志主题ID是否都存在。
FAILEDOPERATION_INVALIDALARM = 'FailedOperation.InvalidAlarm'
# 检索游标已失效或不存在。
FAILEDOPERATION_INVALIDCONTEXT = 'FailedOperation.InvalidContext'
# 离线存储保存时间不得少于7天。
FAILEDOPERATION_INVALIDPERIOD = 'FailedOperation.InvalidPeriod'
# 相同的日志集已存在。
FAILEDOPERATION_LOGSETCONFLICT = 'FailedOperation.LogsetConflict'
# 日志集下存在日志主题。
FAILEDOPERATION_LOGSETNOTEMPTY = 'FailedOperation.LogsetNotEmpty'
# 无效的Content。
FAILEDOPERATION_MISSINGCONTENT = 'FailedOperation.MissingContent'
# 修改的生命周期被禁止。
FAILEDOPERATION_PERIODMODIFYFORBIDDEN = 'FailedOperation.PeriodModifyForbidden'
# 查询语句运行失败。
FAILEDOPERATION_QUERYERROR = 'FailedOperation.QueryError'
# 读qps超过限制。
FAILEDOPERATION_READQPSLIMIT = 'FailedOperation.ReadQpsLimit'
# 查询超时。
FAILEDOPERATION_SEARCHTIMEOUT = 'FailedOperation.SearchTimeout'
# 投递任务不允许重试。
FAILEDOPERATION_SHIPPERTASKNOTTORETRY = 'FailedOperation.ShipperTaskNotToRetry'
# 查询语句解析错误。
FAILEDOPERATION_SYNTAXERROR = 'FailedOperation.SyntaxError'
# 请求标签服务限频。
FAILEDOPERATION_TAGQPSLIMIT = 'FailedOperation.TagQpsLimit'
# 日志主题已关闭。
FAILEDOPERATION_TOPICCLOSED = 'FailedOperation.TopicClosed'
# 日志主题已隔离。
FAILEDOPERATION_TOPICISOLATED = 'FailedOperation.TopicIsolated'
# 写qps超过限制。
FAILEDOPERATION_WRITEQPSLIMIT = 'FailedOperation.WriteQpsLimit'
# 写流量超过限制。
FAILEDOPERATION_WRITETRAFFICLIMIT = 'FailedOperation.WriteTrafficLimit'
# 内部错误。
INTERNALERROR = 'InternalError'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# 告警策略已经存在。
INVALIDPARAMETER_ALARMCONFLICT = 'InvalidParameter.AlarmConflict'
# 告警策略通知模板已经存在。
INVALIDPARAMETER_ALARMNOTICECONFLICT = 'InvalidParameter.AlarmNoticeConflict'
# 相同的采集配置规则已经存在。
INVALIDPARAMETER_CONFIGCONFLICT = 'InvalidParameter.ConfigConflict'
# 无效的Content。
INVALIDPARAMETER_CONTENT = 'InvalidParameter.Content'
# 低频不支持配置kv和tag索引。
INVALIDPARAMETER_INVALIDINDEXRULEFORSEARCHLOW = 'InvalidParameter.InValidIndexRuleForSearchLow'
# 指定日志主题已经存在索引规则。
INVALIDPARAMETER_INDEXCONFLICT = 'InvalidParameter.IndexConflict'
# 相同的日志集已存在。
INVALIDPARAMETER_LOGSETCONFLICT = 'InvalidParameter.LogsetConflict'
# 同名机器组已经存在。
INVALIDPARAMETER_MACHINEGROUPCONFLICT = 'InvalidParameter.MachineGroupConflict'
# 投递规则命名冲突。
INVALIDPARAMETER_SHIPPERCONFLICT = 'InvalidParameter.ShipperConflict'
# 指定日志集下已经有同名的日志主题。
INVALIDPARAMETER_TOPICCONFLICT = 'InvalidParameter.TopicConflict'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# 超过配额限制。
LIMITEXCEEDED = 'LimitExceeded'
# 采集规则配置超过最大值限制。
LIMITEXCEEDED_CONFIG = 'LimitExceeded.Config'
# 日志导出数量超出限制。
LIMITEXCEEDED_EXPORT = 'LimitExceeded.Export'
# 并发查询超过限制,单topic并发最大值15。
LIMITEXCEEDED_LOGSEARCH = 'LimitExceeded.LogSearch'
# 日志大小超过限制。
LIMITEXCEEDED_LOGSIZE = 'LimitExceeded.LogSize'
# 日志集数量超出限制。
LIMITEXCEEDED_LOGSET = 'LimitExceeded.Logset'
# 机器组超过限制。
LIMITEXCEEDED_MACHINEGROUP = 'LimitExceeded.MachineGroup'
# 机器组IP超过限制。
LIMITEXCEEDED_MACHINEGROUPIP = 'LimitExceeded.MachineGroupIp'
# 机器组Label超过限制。
LIMITEXCEEDED_MACHINEGROUPLABELS = 'LimitExceeded.MachineGroupLabels'
# 分区超过限制。
LIMITEXCEEDED_PARTITION = 'LimitExceeded.Partition'
# 检索接口返回的日志量太大, 超过20MB限制。
LIMITEXCEEDED_SEARCHRESULTTOOLARGE = 'LimitExceeded.SearchResultTooLarge'
# 投递规则超出限制。
LIMITEXCEEDED_SHIPPER = 'LimitExceeded.Shipper'
# tag超过限制。
LIMITEXCEEDED_TAG = 'LimitExceeded.Tag'
# 日志主题数目超过限制。
LIMITEXCEEDED_TOPIC = 'LimitExceeded.Topic'
# 缺少参数错误。
MISSINGPARAMETER = 'MissingParameter'
# 操作被拒绝。
OPERATIONDENIED = 'OperationDenied'
# ACL校验失败。
OPERATIONDENIED_ACLFAILED = 'OperationDenied.ACLFailed'
# 账户已销毁。
OPERATIONDENIED_ACCOUNTDESTROY = 'OperationDenied.AccountDestroy'
# 账户欠费。
OPERATIONDENIED_ACCOUNTISOLATE = 'OperationDenied.AccountIsolate'
# 账户不存在。
OPERATIONDENIED_ACCOUNTNOTEXISTS = 'OperationDenied.AccountNotExists'
# 低频不支持告警。
OPERATIONDENIED_ALARMNOTSUPPORTFORSEARCHLOW = 'OperationDenied.AlarmNotSupportForSearchLow'
# 字段没有开启分析功能。
OPERATIONDENIED_ANALYSISSWITCHCLOSE = 'OperationDenied.AnalysisSwitchClose'
# 通知模板已绑定告警,无法删除。
OPERATIONDENIED_NOTICEHASALARM = 'OperationDenied.NoticeHasAlarm'
# 操作低频检索不支持。
OPERATIONDENIED_OPERATIONNOTSUPPORTINSEARCHLOW = 'OperationDenied.OperationNotSupportInSearchLow'
# topic绑定了数据加工。
OPERATIONDENIED_TOPICHASDATAFORMTASK = 'OperationDenied.TopicHasDataFormTask'
# topic绑定了函数投递。
OPERATIONDENIED_TOPICHASDELIVERFUNCTION = 'OperationDenied.TopicHasDeliverFunction'
# 资源不存在。
RESOURCENOTFOUND = 'ResourceNotFound'
# agent version不存在。
RESOURCENOTFOUND_AGENTVERSIONNOTEXIST = 'ResourceNotFound.AgentVersionNotExist'
# 告警策略不存在。
RESOURCENOTFOUND_ALARMNOTEXIST = 'ResourceNotFound.AlarmNotExist'
# 告警策略通知模板不存在。
RESOURCENOTFOUND_ALARMNOTICENOTEXIST = 'ResourceNotFound.AlarmNoticeNotExist'
# 指定的采集规则配置不存在。
RESOURCENOTFOUND_CONFIGNOTEXIST = 'ResourceNotFound.ConfigNotExist'
# 日志导出不存在。
RESOURCENOTFOUND_EXPORTNOTEXIST = 'ResourceNotFound.ExportNotExist'
# 索引不存在。
RESOURCENOTFOUND_INDEXNOTEXIST = 'ResourceNotFound.IndexNotExist'
# 指定的日志集不存在。
RESOURCENOTFOUND_LOGSETNOTEXIST = 'ResourceNotFound.LogsetNotExist'
# 机器组不存在。
RESOURCENOTFOUND_MACHINEGROUPNOTEXIST = 'ResourceNotFound.MachineGroupNotExist'
# 分区不存在。
RESOURCENOTFOUND_PARTITIONNOTEXIST = 'ResourceNotFound.PartitionNotExist'
# 投递规则不存在。
RESOURCENOTFOUND_SHIPPERNOTEXIST = 'ResourceNotFound.ShipperNotExist'
# 投递任务不存在。
RESOURCENOTFOUND_SHIPPERTASKNOTEXIST = 'ResourceNotFound.ShipperTaskNotExist'
# 日志主题不存在。
RESOURCENOTFOUND_TOPICNOTEXIST = 'ResourceNotFound.TopicNotExist'
# 操作不支持。
UNSUPPORTEDOPERATION = 'UnsupportedOperation'
|
tzpBingo/github-trending
|
codespace/python/tencentcloud/cls/v20201016/errorcodes.py
|
Python
|
mit
| 7,850
|
import aioredis
import pickle
from aiohttp_cached.exceptions import ImproperlyConfigured
from aiohttp_cached.cache.backends import AsyncBaseCache
__all__ = (
'RedisCache',
)
class RedisCache(AsyncBaseCache):
def __init__(self, location: str, params: dict, loop=None):
super().__init__(location, params, loop=loop)
self._host = None
self._db = None
self._pool = None
async def init(self):
"""
Must be invoked after class instantiation to process object properties asynchronously
:return: Redis cache object
:rtype: RedisCache
"""
self.init_connection_params()
self._pool = await self._create_pool()
return self
async def _create_pool(self):
return await aioredis.create_pool(self._host, db=self._db, loop=self._loop,
**self._params.get('OPTIONS', {}))
def init_connection_params(self):
if ':' in self._location:
try:
_host, _port, _db = self._location.split(':')
except ValueError:
raise ImproperlyConfigured("Host, port and database or unix \
socked path must be specified (e.g. localhost:6379:1).")
try:
_port = int(_port)
_db = int(_db)
except (ValueError, TypeError):
raise ImproperlyConfigured("Port and db values must be an integer.")
self._host = (_host, _port)
self._db = _db
else:
self._host = self._location # unix socket path
self._db = self._params.get('DB', 0)
async def get(self, key: str, default=None):
async with self._pool.get() as redis:
redis_value = await redis.get(self.build_key(key))
if redis_value is None:
return default
try:
result = int(redis_value)
except (ValueError, TypeError):
result = self.unpack_object(redis_value)
return result
async def set(self, key: str, value, timeout: int = 0):
packed_obj = self.pack_object(value)
async with self._pool.get() as redis:
await redis.set(self.build_key(key), packed_obj, expire=timeout)
async def delete(self, key: str):
async with self._pool.get() as redis:
await redis.delete(self.build_key(key))
async def has(self, key: str) -> bool:
async with self._pool.get() as redis:
return await redis.exists(self.build_key(key))
async def clear(self):
async with self._pool.get() as redis:
if self._key_prefix:
_keys = await redis.keys('%s*' % self._key_prefix)
if _keys:
await redis.delete(*_keys)
else:
await redis.flushdb()
async def close(self):
pass
def unpack_object(self, value):
if isinstance(value, memoryview):
return bytes(value)
try:
return pickle.loads(value)
except TypeError:
return None
def pack_object(self, value):
return pickle.dumps(value)
|
TheML9I/aiohttp-cached
|
aiohttp_cached/cache/backends/redis.py
|
Python
|
mit
| 3,246
|
class Solution:
# @return an integer
def lengthOfLongestSubstring(self, s):
s = s.strip()
if not s:
return 0
else:
a = []
max = 0
for i in s:
if i not in a:
a.append(i)
else:
if len(a) > max:
max = len(a)
a = a[a.index(i)+1:]
a.append(i)
if len(a) > max:
max = len(a)
return max
if __name__ == "__main__":
s = Solution()
print s.lengthOfLongestSubstring("")
print s.lengthOfLongestSubstring("p")
print s.lengthOfLongestSubstring("abcabcbb")
print s.lengthOfLongestSubstring("abc")
print s.lengthOfLongestSubstring("bbbb")
print s.lengthOfLongestSubstring("wlrbbmqbhcdarzowkkyhiddqscdxrjmowfrxsjybldbefsarcbynecdyggxxpklorellnmpapqfwkhopkmco")
|
Crayzero/LeetCodeProgramming
|
Solutions/Longest Substring Without Repeating Characters/longestSubstring.py
|
Python
|
mit
| 928
|
"""
https://leetcode.com/problems/binary-string-with-substrings-representing-1-to-n/
https://leetcode.com/submissions/detail/217341263/
"""
class Solution:
def queryString(self, S: str, N: int) -> bool:
for i in range(1, N + 1):
s = bin(i)[2:]
if s not in S:
return False
return True
import unittest
class Test(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(solution.queryString('0110', 3), True)
self.assertEqual(solution.queryString('0110', 4), False)
if __name__ == '__main__':
unittest.main()
|
vivaxy/algorithms
|
python/problems/binary_string_with_substrings_representing_1_to_n.py
|
Python
|
mit
| 626
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_selenium_astride
----------------------------------
Tests for `selenium_astride` module.
"""
from flask import Flask, render_template, redirect, url_for, request, session, flash
from flask.ext.testing import LiveServerTestCase
from selenium import webdriver
import unittest
from tests import pages
class Entry(object):
def __init__(self, title, text):
self.title = title
self.text = text
entries = [Entry('Post1', 'Hello world')]
class SeleniumAstrideTest(LiveServerTestCase):
def create_app(self):
app = Flask(__name__)
@app.route('/')
def show_entries():
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
entries.append(Entry(request.form['title'], request.form['text']))
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
app.config['USERNAME'] = 'floren'
app.config['PASSWORD'] = 'astride'
app.config['TESTING'] = True
app.config['LIVESERVER_PORT'] = 8943
return app
def setUp(self):
self.app = self.create_app()
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('disable-gpu')
self.browser = webdriver.Chrome(options=options)
self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
def test_check_element(self):
self.browser.get(self.get_server_url())
home_page = pages.HomePage(self.browser)
first_entry_title = home_page.first_entry()
self.assertEqual('Post1', first_entry_title)
def test_click_link(self):
self.browser.get((self.get_server_url()))
home_page = pages.HomePage(self.browser)
home_page.go_login()
login_page = pages.LoginPage(self.browser)
self.assertEqual('Login', login_page.title_page())
def test_page_elements(self):
self.browser.get(self.get_server_url() + '/login')
login_page = pages.LoginPage(self.browser)
login_page.username = "floren"
login_page.username.clear()
login_page.username = "floren"
login_page.password = "astride"
login_page.password.clear()
login_page.login()
self.assertIn('Invalid password', login_page.get_error())
if __name__ == '__main__':
unittest.main()
|
reclamador/python_selenium_astride
|
tests/test_selenium_astride.py
|
Python
|
mit
| 3,265
|
import theano
import lasagne
import numpy as np
import theano.tensor as T
from numpy import random as rnd, linalg as la
from layers import UnitaryLayer, UnitaryKronLayer, RecurrentUnitaryLayer, ComplexLayer, WTTLayer, ModRelu
from matplotlib import pyplot as plt
from utils.optimizations import nesterov_momentum, custom_sgd
from lasagne.nonlinearities import rectify
np.set_printoptions(linewidth=200, suppress=True)
#theano.config.exception_verbosity='high'
#theano.config.mode='DebugMode'
#theano.config.optimizer='None'
# Min/max sequence length
MIN_LENGTH = 50
MAX_LENGTH = 51
# Number of units in the hidden (recurrent) layer
N_HIDDEN = 81
# Number of training sequences in each batch
N_BATCH = 100
# Optimization learning rate
LEARNING_RATE = 3 * 1e-4
# All gradients above this will be clipped
GRAD_CLIP = 100
# How often should we check the output?
EPOCH_SIZE = 100
# Number of epochs to train the net
NUM_EPOCHS = 600
# Exact sequence length
TIME_SEQUENCES=100
def gen_data(min_length=MIN_LENGTH, max_length=MAX_LENGTH, n_batch=N_BATCH):
'''
Generate a batch of sequences for the "add" task, e.g. the target for the
following
``| 0.5 | 0.7 | 0.3 | 0.1 | 0.2 | ... | 0.5 | 0.9 | ... | 0.8 | 0.2 |
| 0 | 0 | 1 | 0 | 0 | | 0 | 1 | | 0 | 0 |``
would be 0.3 + .9 = 1.2. This task was proposed in [1]_ and explored in
e.g. [2]_.
Parameters
----------
min_length : int
Minimum sequence length.
max_length : int
Maximum sequence length.
n_batch : int
Number of samples in the batch.
Returns
-------
X : np.ndarray
Input to the network, of shape (n_batch, max_length, 2), where the last
dimension corresponds to the two sequences shown above.
y : np.ndarray
Correct output for each sample, shape (n_batch,).
mask : np.ndarray
A binary matrix of shape (n_batch, max_length) where ``mask[i, j] = 1``
when ``j <= (length of sequence i)`` and ``mask[i, j] = 0`` when ``j >
(length of sequence i)``.
References
----------
.. [1] Hochreiter, Sepp, and Jürgen Schmidhuber. "Long short-term memory."
Neural computation 9.8 (1997): 1735-1780.
.. [2] Sutskever, Ilya, et al. "On the importance of initialization and
momentum in deep learning." Proceedings of the 30th international
conference on machine learning (ICML-13). 2013.
'''
# Generate X - we'll fill the last dimension later
X = np.concatenate([np.random.uniform(size=(n_batch, max_length, 1)),
np.zeros((n_batch, max_length, 1))],
axis=-1)
mask = np.zeros((n_batch, max_length), dtype='int32')
y = np.zeros((n_batch,))
# Compute masks and correct values
for n in range(n_batch):
# Randomly choose the sequence length
length = np.random.randint(min_length, max_length)
# Make the mask for this sample 1 within the range of length
mask[n, :length] = 1
# Zero out X after the end of the sequence
X[n, length:, 0] = 0
# Set the second dimension to 1 at the indices to add
X[n, np.random.randint(length/10), 1] = 1
X[n, np.random.randint(length/2, length), 1] = 1
# Multiply and sum the dimensions of X to get the target value
y[n] = np.sum(X[n, :, 0]*X[n, :, 1])
# Center the inputs and outputs
X -= X.reshape(-1, 2).mean(axis=0)
y -= y.mean()
return (X.astype(theano.config.floatX), y.astype(theano.config.floatX),
mask.astype('int32'))
def main(n_iter, n_batch, n_hidden, time_steps, learning_rate, savefile, model, input_type, out_every_t, loss_function):
n_input = 2
n_output = 1
n_train = 100000
n_test = 10000
num_batches = n_train // n_batch
# --- Create data --------------------
train_x, train_y, train_mask = gen_data(min_length=time_steps, max_length=time_steps + 1, n_batch=n_train)
test_x, test_y, val_mask = gen_data(min_length=time_steps, max_length=time_steps + 1, n_batch=n_test)
s_train_x = theano.shared(train_x)
s_train_y = theano.shared(train_y)
s_test_x = theano.shared(test_x)
s_test_y = theano.shared(test_y)
gradient_clipping = np.float32(1)
learning_rate = theano.shared(np.array(learning_rate, dtype=theano.config.floatX))
# building network
l_in = lasagne.layers.InputLayer(shape=(None, MAX_LENGTH, N_INPUT))
l_mask = lasagne.layers.InputLayer(shape=(None, MAX_LENGTH),input_var=T.imatrix("mask"))
l_in_hid = lasagne.layers.DenseLayer(lasagne.layers.InputLayer((None, N_INPUT)), N_HIDDEN * 2)
# building hidden-hidden recurrent layer
if model == "":
pass
if __name__ == "__main__":
print("Building network ...")
N_INPUT=2
learning_rate = theano.shared(np.array(LEARNING_RATE, dtype=theano.config.floatX))
# input layer of shape (n_batch, n_timestems, n_input)
l_in = lasagne.layers.InputLayer(shape=(None, MAX_LENGTH, N_INPUT))
# mask of shape (n_batch, n_timesteps)
l_mask = lasagne.layers.InputLayer(shape=(None, MAX_LENGTH),input_var=T.imatrix("mask"))
# define input-to-hidden and hidden-to-hidden linear transformations
l_in_hid = lasagne.layers.DenseLayer(lasagne.layers.InputLayer((None, N_INPUT)), N_HIDDEN * 2)
#l_hid_hid = ComplexLayer(lasagne.layers.InputLayer((None, N_HIDDEN * 2)))
l_hid_hid = UnitaryLayer(lasagne.layers.InputLayer((None, N_HIDDEN * 2)))
manifolds = {}
#l_hid_hid = WTTLayer(lasagne.layers.InputLayer((None, N_HIDDEN * 2)), [3]*4, [2]*3)
manifold = l_hid_hid.manifold
if not isinstance(manifold, list):
manifold = [manifold]
manifolds = {man.str_id: man for man in manifold}
#manifolds = {}
# recurrent layer using linearities defined above
l_rec = RecurrentUnitaryLayer(l_in, l_in_hid, l_hid_hid, nonlinearity=ModRelu(lasagne.layers.InputLayer((None, N_HIDDEN * 2))),
mask_input=l_mask, only_return_final=True)
print(lasagne.layers.get_output_shape(l_rec))
# nonlinearity for recurrent layer output
#l_nonlin = ModRelu(l_rec)
#print(lasagne.layers.get_output_shape(l_nonlin))
l_reshape = lasagne.layers.ReshapeLayer(l_rec, (-1, N_HIDDEN * 2))
print(lasagne.layers.get_output_shape(l_reshape))
# Our output layer is a simple dense connection, with 1 output unit
l_dense = lasagne.layers.DenseLayer(l_reshape, num_units=1, nonlinearity=None)
l_out = lasagne.layers.ReshapeLayer(l_dense, (N_BATCH, -1))
print(lasagne.layers.get_output_shape(l_out))
target_values = T.vector('target_output')
# lasagne.layers.get_output produces a variable for the output of the net
network_output = lasagne.layers.get_output(l_out)
predicted_values = network_output.flatten()
# Our cost will be mean-squared error
cost = T.mean((predicted_values - target_values)**2)
# Retrieve all parameters from the network
all_params = lasagne.layers.get_all_params(l_out,trainable=True)
print(all_params)
print(lasagne.layers.get_all_params(l_rec))
# Compute SGD updates for training
print("Computing updates ...")
updates = custom_sgd(cost, all_params, LEARNING_RATE, manifolds)
# Theano functions for training and computing cost
print("Compiling functions ...")
train = theano.function([l_in.input_var, target_values, l_mask.input_var],
cost, updates=updates, on_unused_input='warn')
compute_cost = theano.function(
[l_in.input_var, target_values, l_mask.input_var], cost, on_unused_input='warn')
# We'll use this "validation set" to periodically check progress
X_val, y_val, mask_val = gen_data(n_batch=100)
#TEST
#ll = lasagne.layers.InputLayer((None, N_HIDDEN * 2))
#v = ModRelu(ll)
#v_out =lasagne.layers.get_output(v)
#print(T.grad(v_out.mean(),ll.input_var).eval({ll.input_var: np.zeros([5,N_HIDDEN*2])})) #with ones its okay
#TEST
try:
for epoch in range(NUM_EPOCHS):
if (epoch + 1) % 100 == 0:
learning_rate.set_value(learning_rate.get_value() * 0.9)
cost_val = compute_cost(X_val, y_val, mask_val)
for _ in range(EPOCH_SIZE):
X, y, m = gen_data()
train(X, y, m.astype('int32'))
print("Epoch {} validation cost = {}".format(epoch, cost_val))
except KeyboardInterrupt:
pass
|
Nehoroshiy/urnn
|
examples/lasagne_rnn.py
|
Python
|
mit
| 8,511
|
#!/usr/bin/env python
import os
import os.path
import re
import sys
# Ugly hack so we can import pytoutv_plus
lib_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'video-tools'))
sys.path.append(lib_path)
import pytoutv_plus
def main():
filenames = []
for (dirpath, dirnames, dirfilenames) in os.walk(os.getcwd()):
if dirfilenames != []:
filenames.extend(dirfilenames)
data = pytoutv_plus.Data()
filename_chars = 'àÀâÂçÇéÉèÈêÊëîÎôÔ\w\-\'\.\(\)'
pattern = re.compile('([{0}]+)\.(S([\d]+)E[\d]+)\.([{0}]+)\.([\d]+)kbps\.ts'.format(filename_chars))
for filename in filenames:
# Skip anything not ending in .ts
if not filename.endswith('.ts'):
continue
match = pattern.search(filename)
if match:
emission_title = match.group(1).replace('.', ' ')
episode_sae = match.group(2)
episode_season = match.group(3)
episode_title = match.group(4).replace('.', ' ')
episode_bitrate = int(match.group(5)) * 1000
else:
sys.stderr.write('Warning: no match for file {}\n'.format(filename))
# Go to the next file
continue
for emission in data.emissions:
if emission_title.lower() == emission.title.lower():
break
else:
sys.stderr.write('Warning: no match for emission {}\n'.format(emission_title))
# Go to the next file
continue
for episode in emission.episodes:
if episode_title.lower() == episode.title.lower():
print('Skipping {} - {} - {}'.format(
emission_title,
episode_sae,
episode_title))
# Episode match, go to next file
break
else:
# If we've had an emission match but no episode match, add the episode to the emission
print('Importing {} - {} - {}'.format(
emission_title,
episode_sae,
episode_title))
ep = pytoutv_plus.Episode()
ep.title = episode_title
ep.bitrate = episode_bitrate
emission.episodes.append(ep)
data.write()
if __name__ == '__main__':
main()
|
bmaupin/junkpile
|
python/one-offs/import-toutv.py
|
Python
|
mit
| 2,456
|
# -*- coding: cp936 -*-
#!/usr/bin/env python
# Author:
# Quinn Song <quinn4dev@gmail.com>
# add_brackets.py: insert one or two pairs of brackets randomly;
# add return a valid expression
import random
import re
def apply_pattern1 (source):
"""
Add brakets using pattern 1
"""
source = re.sub('([+*/-])(?!\(|\d+\))', '\g<1>(',source, count = 1)
return re.sub('(\(\d+[+*/-]\d+)([+*/-]*)', '\g<1>)\g<2>',source, count = 1)
def apply_pattern2(source):
"""
Add brackets using the lambda functions of four patterns
"""
f0 = lambda s: re.sub(r'^(.*[+*/-].*)((?:[+*/-]\d+)+)', '(\g<1>)\g<2>', s)
f1 = lambda s: re.sub('(\d+[+*/-])(.*[+*/-].*)$', '\g<1>(\g<2>)',s)
f2 = lambda s: re.sub('(\d+[+*/-])(.*[+*/-].*)([+*/-]\d+)$', '\g<1>(\g<2>)\g<3>',s)
f3 = lambda s: re.sub('(\d+[+*/-])((?:\d+[+*/-]){1,3}\d+)([+*/-]\d+)$', '\g<1>(\g<2>)\g<3>',s)
f = [ f0, f1, f2, f3 ]
choice = random.randint(0, 3)
return f[choice](source)
def add_br (source):
"""
Apply patterns randomly based on defined list
"""
none = lambda x: x
g = [ apply_pattern1, apply_pattern2, none]
choice = random.choice([0, 0, 1, 1, 1, 1, 2, 2])
return g[choice](source)
def main (source):
"""
Main function
"""
tmp = add_br(source)
return add_br(tmp) if random.choice([True, False]) else tmp
if __name__ == '__main__':
#op = '1+2-3*5/8-9+20*3-6'
op = '27+4*41-5+7-11'
print main(op)
|
QuinnSong/LoveMath
|
src/add_brackets.py
|
Python
|
mit
| 1,489
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.7
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_SimAppObjNameDefault_DistributionSystem_SitePowerDemand', [dirname(__file__)])
except ImportError:
import _SimAppObjNameDefault_DistributionSystem_SitePowerDemand
return _SimAppObjNameDefault_DistributionSystem_SitePowerDemand
if fp is not None:
try:
_mod = imp.load_module('_SimAppObjNameDefault_DistributionSystem_SitePowerDemand', fp, pathname, description)
finally:
fp.close()
return _mod
_SimAppObjNameDefault_DistributionSystem_SitePowerDemand = swig_import_helper()
del swig_import_helper
else:
import _SimAppObjNameDefault_DistributionSystem_SitePowerDemand
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
try:
import weakref
weakref_proxy = weakref.proxy
except:
weakref_proxy = lambda x: x
import base
import SimAppObjNameDefault_DistributionSystem_HvacAirLoop
import SimAppObjNameDefault_BldgComponentGroup_HvacComponent
class SimAppObjNameDefault_DistributionSystem_SitePowerDemand(SimAppObjNameDefault_DistributionSystem_HvacAirLoop.SimAppObjNameDefault_DistributionSystem):
__swig_setmethods__ = {}
for _s in [SimAppObjNameDefault_DistributionSystem_HvacAirLoop.SimAppObjNameDefault_DistributionSystem]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimAppObjNameDefault_DistributionSystem_SitePowerDemand, name, value)
__swig_getmethods__ = {}
for _s in [SimAppObjNameDefault_DistributionSystem_HvacAirLoop.SimAppObjNameDefault_DistributionSystem]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimAppObjNameDefault_DistributionSystem_SitePowerDemand, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.new_SimAppObjNameDefault_DistributionSystem_SitePowerDemand(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.SimAppObjNameDefault_DistributionSystem_SitePowerDemand__clone(self, f, c)
__swig_destroy__ = _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.delete_SimAppObjNameDefault_DistributionSystem_SitePowerDemand
__del__ = lambda self: None
SimAppObjNameDefault_DistributionSystem_SitePowerDemand_swigregister = _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.SimAppObjNameDefault_DistributionSystem_SitePowerDemand_swigregister
SimAppObjNameDefault_DistributionSystem_SitePowerDemand_swigregister(SimAppObjNameDefault_DistributionSystem_SitePowerDemand)
class SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence(base.sequence_common):
__swig_setmethods__ = {}
for _s in [base.sequence_common]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence, name, value)
__swig_getmethods__ = {}
for _s in [base.sequence_common]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.new_SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence(*args)
try:
self.this.append(this)
except:
self.this = this
def assign(self, n, x):
return _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence_assign(self, n, x)
def begin(self, *args):
return _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence_begin(self, *args)
def end(self, *args):
return _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence_end(self, *args)
def rbegin(self, *args):
return _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence_rbegin(self, *args)
def rend(self, *args):
return _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence_rend(self, *args)
def at(self, *args):
return _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence_at(self, *args)
def front(self, *args):
return _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence_front(self, *args)
def back(self, *args):
return _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence_back(self, *args)
def push_back(self, *args):
return _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence_push_back(self, *args)
def pop_back(self):
return _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence_pop_back(self)
def detach_back(self, pop=True):
return _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence_detach_back(self, pop)
def insert(self, *args):
return _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence_insert(self, *args)
def erase(self, *args):
return _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence_erase(self, *args)
def detach(self, position, r, erase=True):
return _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence_detach(self, position, r, erase)
def swap(self, x):
return _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence_swap(self, x)
__swig_destroy__ = _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.delete_SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence
__del__ = lambda self: None
SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence_swigregister = _SimAppObjNameDefault_DistributionSystem_SitePowerDemand.SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence_swigregister
SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence_swigregister(SimAppObjNameDefault_DistributionSystem_SitePowerDemand_sequence)
# This file is compatible with both classic and new-style classes.
|
EnEff-BIM/EnEffBIM-Framework
|
SimModel_Python_API/simmodel_swig/Release/SimAppObjNameDefault_DistributionSystem_SitePowerDemand.py
|
Python
|
mit
| 9,158
|
from pylab import *
D = loadtxt("MCP4161-104E-P_Resistance_vs_Ntap.csv",delimiter=",")
X = D[:,0]
Y = D[:,1]
p = polyfit(X,Y,1)
X_fit = arange(257)
Y_fit = X_fit*p[0] + p[1]
plot(X,Y,'b.')
plot(X_fit,Y_fit, 'k-')
title("Calibration of MCP4161-104E/P\n8bit 100k Digital Potentiomenter")
xlabel("Tap Number, N")
ylabel(r"Resistance $R_{W0\rightarrow B0}$ [k$\Omega$]")
text(10,80,"$Y = %0.4f X + %0.4f$" % (p[0],p[1]))
savefig("MCP4161-104E-P_Resistance_vs_Ntap.png")
|
p-v-o-s/olm-pstat
|
test/MCP4161-104E-P_Resistance_vs_Ntap_analysis.py
|
Python
|
mit
| 470
|
from django.db import models
from samples.models import Specimen
from picker.models import SpecimenSource
from analysis.models import Analysis
from agency.models import Agency
# Create your models here.
class Order(models.Model):
specimen = models.ForeignKey(Specimen)
analysis = models.ForeignKey(Analysis)
order_time = models.DateTimeField(auto_now_add=True)
submitted = models.BooleanField(default=False)
submitted_time = models.DateTimeField()
def __unicode__(self):
return self.specimen + str(self.order_time)
class Package(models.Model):
submitted = models.BooleanField(default=False)
submitted_time = models.DateTimeField(auto_now_add=True)
orders = models.ManyToManyField(Order)
tracking_number = models.CharField(max_length=255)
submitting_agency = models.ForeignKey(Agency)
def __unicode__(self):
return str(self.id) + str(self.submitting_agency) + str(submitted_time)
|
timothypage/etor
|
etor/order/models.py
|
Python
|
mit
| 952
|
import random
from collections import defaultdict
class MarkovGenerator(object):
def __init__(self, corpus, tuple_size=3):
""" Initialize the MarkovGenerator object.
Digests the corpus of text provided as a list of tokens and creates a cache of
predicted next-word values
Code for markov generator based on: http://agiliq.com/blog/2009/06/
generating-pseudo-random-text-with-markov-chains-u/
:param corpus: (list) a source text of word tokens to generate random text from
:param tuple_size: (int: default 3) the size of the tuple to use to generate
text. A larger tuple will increase memory usage, but produce
more realistic results
"""
self.corpus = corpus
self.corpus_size = len(corpus)
self.tuple_size = tuple_size
self.cache = defaultdict(list)
self._initialize_cache()
def _generate_ngrams(self):
""" Generate ngrams from the corpus
For each token in the corpus, generate a list of likely next words for the
Markov text generator to return.
:yield: (tuple) a tuple of length n
"""
n = self.tuple_size
if len(self.corpus) < n:
return
for i in range(len(self.corpus) - (n - 1)):
yield tuple([self.corpus[i + x] for x in range(n)])
def _initialize_cache(self):
""" Initialize the cache
Set up the cache object to generate predicted strings.
"""
for word_tuple in self._generate_ngrams():
self.cache[word_tuple[0:-1]].append(word_tuple[-1])
def generate_markov_text(self, size, override_seed=None):
""" Generate a pseudo-random block of text
:param size: (int) Length of text to generate. Should be << than the
size of the total corpus for good results
:param override_seed: (str: default None) Word to seed the generator
with if set
:return: (str) a string of randomly-generated text
"""
if not override_seed:
seed = random.randint(0, self.corpus_size - self.tuple_size)
else:
indices = [i for i, x in enumerate(self.corpus) if x == override_seed]
try:
seed = random.choice(indices)
except IndexError:
seed = random.randint(0, self.corpus_size - self.tuple_size)
seed_words = self.corpus[seed: seed + self.tuple_size]
gen_words = []
for i in xrange(size):
gen_words.append(seed_words[0])
seed_words.pop(0)
try:
seed_words.append(random.choice(self.cache[tuple(seed_words)]))
# catch cases where there isn't a word to pick
except IndexError:
seed_words.append(random.choice(self.corpus))
gen_words.append(seed_words[0])
return ' '.join(gen_words)
|
bmd/markov-at-the-movies
|
markov_generator.py
|
Python
|
mit
| 2,998
|
from .cypher import Create, Match, Merge
from .graph import Graph
from .ogm import OGMBase, OneToManyRelation, ManyToManyRelation
from .primitives import Node, Relationship
from .shared.objects import Property
|
TwoBitAlchemist/NeoAlchemy
|
neoalchemy/__init__.py
|
Python
|
mit
| 210
|
# THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class UacSetOrganizationResponse(BaseType):
def __init__(self, organization=None):
required = {
"organization": False,
}
self.organization = organization
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
from .UacOrganization import UacOrganization
tmp = d.get('organization', None)
if tmp is not None:
d['organization'] = UacOrganization.from_json(tmp)
return UacSetOrganizationResponse(**d)
|
mitdbg/modeldb
|
client/verta/verta/_swagger/_public/uac/model/UacSetOrganizationResponse.py
|
Python
|
mit
| 645
|
from django.conf.urls import patterns, url
from django.views.generic import ListView, DetailView
from .models import Audio, Videos
urlpatterns = patterns('',
url(r'^audio/$', ListView.as_view(model=Audio,
queryset=Audio.objects.all(),
paginate_by=5),
name='audio_lista'),
url(r'^audio/(?P<slug>[-_\w]+)/$', DetailView.as_view(model=Audio,
queryset=Audio.objects.all(),
),
name='audio_detalles'),
url(r'^videos/$', ListView.as_view(model=Videos,
queryset=Videos.objects.all(),
paginate_by=5),
name='videos_lista'),
url(r'^videos/(?P<slug>[-_\w]+)/$', DetailView.as_view(model=Videos,
queryset=Videos.objects.all(),
),
name='videos_detalles'),
)
|
CARocha/cesesma
|
multimedia/urls.py
|
Python
|
mit
| 1,282
|
#!/usr/bin/python
import os
import shutil
base = os.path.dirname(os.path.realpath(__file__))
base = os.path.join(base, '../boot')
def create_dir(path):
path = os.path.join(base, path)
if not os.path.exists(path):
os.makedirs(path)
def delete_dir(path):
path = os.path.join(base, path)
shutil.rmtree(path)
def copy_file(source, target):
source = os.path.join(base, source)
target = os.path.join(base, target)
shutil.copyfile(source, target)
create_dir('image')
create_dir('image/boot/grub/grup-i386-pc')
create_dir('image/modules')
copy_file('grub.cfg', 'image/boot/grub/grub.cfg')
copy_file('initrd', 'image/modules/initrd')
copy_file('../build/sys/firedrake', 'image/boot/firedrake')
os.chdir(base)
os.system('grub-mkrescue --modules="boot" --output="./Firedrake.iso" "./image"')
delete_dir('image')
|
JustSid/Firedrake
|
scripts/make_image.py
|
Python
|
mit
| 851
|
#!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
#
# Program Parameters
#
import os
import sys
import subprocess
from os import access, getenv, X_OK
jar_file = 'DeDup-0.12.4.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') == None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args)
def main():
java = java_executable()
jar_dir = real_dirname(sys.argv[0])
(mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java]+ mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
if '--jar_dir' in sys.argv[1:]:
print(jar_path)
else:
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
|
rob-p/bioconda-recipes
|
recipes/dedup/dedup.py
|
Python
|
mit
| 2,655
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualNetworkGatewayConnection(Resource):
"""A common class for general resource information.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param authorization_key: The authorizationKey.
:type authorization_key: str
:param virtual_network_gateway1: Required. The reference to virtual
network gateway resource.
:type virtual_network_gateway1:
~azure.mgmt.network.v2018_01_01.models.VirtualNetworkGateway
:param virtual_network_gateway2: The reference to virtual network gateway
resource.
:type virtual_network_gateway2:
~azure.mgmt.network.v2018_01_01.models.VirtualNetworkGateway
:param local_network_gateway2: The reference to local network gateway
resource.
:type local_network_gateway2:
~azure.mgmt.network.v2018_01_01.models.LocalNetworkGateway
:param connection_type: Required. Gateway connection type. Possible values
are: 'Ipsec','Vnet2Vnet','ExpressRoute', and 'VPNClient. Possible values
include: 'IPsec', 'Vnet2Vnet', 'ExpressRoute', 'VPNClient'
:type connection_type: str or
~azure.mgmt.network.v2018_01_01.models.VirtualNetworkGatewayConnectionType
:param routing_weight: The routing weight.
:type routing_weight: int
:param shared_key: The IPSec shared key.
:type shared_key: str
:ivar connection_status: Virtual network Gateway connection status.
Possible values are 'Unknown', 'Connecting', 'Connected' and
'NotConnected'. Possible values include: 'Unknown', 'Connecting',
'Connected', 'NotConnected'
:vartype connection_status: str or
~azure.mgmt.network.v2018_01_01.models.VirtualNetworkGatewayConnectionStatus
:ivar tunnel_connection_status: Collection of all tunnels' connection
health status.
:vartype tunnel_connection_status:
list[~azure.mgmt.network.v2018_01_01.models.TunnelConnectionHealth]
:ivar egress_bytes_transferred: The egress bytes transferred in this
connection.
:vartype egress_bytes_transferred: long
:ivar ingress_bytes_transferred: The ingress bytes transferred in this
connection.
:vartype ingress_bytes_transferred: long
:param peer: The reference to peerings resource.
:type peer: ~azure.mgmt.network.v2018_01_01.models.SubResource
:param enable_bgp: EnableBgp flag
:type enable_bgp: bool
:param use_policy_based_traffic_selectors: Enable policy-based traffic
selectors.
:type use_policy_based_traffic_selectors: bool
:param ipsec_policies: The IPSec Policies to be considered by this
connection.
:type ipsec_policies:
list[~azure.mgmt.network.v2018_01_01.models.IpsecPolicy]
:param resource_guid: The resource GUID property of the
VirtualNetworkGatewayConnection resource.
:type resource_guid: str
:ivar provisioning_state: The provisioning state of the
VirtualNetworkGatewayConnection resource. Possible values are: 'Updating',
'Deleting', and 'Failed'.
:vartype provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'virtual_network_gateway1': {'required': True},
'connection_type': {'required': True},
'connection_status': {'readonly': True},
'tunnel_connection_status': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'ingress_bytes_transferred': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'},
'virtual_network_gateway1': {'key': 'properties.virtualNetworkGateway1', 'type': 'VirtualNetworkGateway'},
'virtual_network_gateway2': {'key': 'properties.virtualNetworkGateway2', 'type': 'VirtualNetworkGateway'},
'local_network_gateway2': {'key': 'properties.localNetworkGateway2', 'type': 'LocalNetworkGateway'},
'connection_type': {'key': 'properties.connectionType', 'type': 'str'},
'routing_weight': {'key': 'properties.routingWeight', 'type': 'int'},
'shared_key': {'key': 'properties.sharedKey', 'type': 'str'},
'connection_status': {'key': 'properties.connectionStatus', 'type': 'str'},
'tunnel_connection_status': {'key': 'properties.tunnelConnectionStatus', 'type': '[TunnelConnectionHealth]'},
'egress_bytes_transferred': {'key': 'properties.egressBytesTransferred', 'type': 'long'},
'ingress_bytes_transferred': {'key': 'properties.ingressBytesTransferred', 'type': 'long'},
'peer': {'key': 'properties.peer', 'type': 'SubResource'},
'enable_bgp': {'key': 'properties.enableBgp', 'type': 'bool'},
'use_policy_based_traffic_selectors': {'key': 'properties.usePolicyBasedTrafficSelectors', 'type': 'bool'},
'ipsec_policies': {'key': 'properties.ipsecPolicies', 'type': '[IpsecPolicy]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualNetworkGatewayConnection, self).__init__(**kwargs)
self.authorization_key = kwargs.get('authorization_key', None)
self.virtual_network_gateway1 = kwargs.get('virtual_network_gateway1', None)
self.virtual_network_gateway2 = kwargs.get('virtual_network_gateway2', None)
self.local_network_gateway2 = kwargs.get('local_network_gateway2', None)
self.connection_type = kwargs.get('connection_type', None)
self.routing_weight = kwargs.get('routing_weight', None)
self.shared_key = kwargs.get('shared_key', None)
self.connection_status = None
self.tunnel_connection_status = None
self.egress_bytes_transferred = None
self.ingress_bytes_transferred = None
self.peer = kwargs.get('peer', None)
self.enable_bgp = kwargs.get('enable_bgp', None)
self.use_policy_based_traffic_selectors = kwargs.get('use_policy_based_traffic_selectors', None)
self.ipsec_policies = kwargs.get('ipsec_policies', None)
self.resource_guid = kwargs.get('resource_guid', None)
self.provisioning_state = None
self.etag = kwargs.get('etag', None)
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/virtual_network_gateway_connection.py
|
Python
|
mit
| 7,595
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "jsonwatch-"
cfg.versionfile_source = "jsonwatchqt/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
|
MrLeeh/jsonwatchqt
|
jsonwatchqt/_version.py
|
Python
|
mit
| 15,771
|
from questions import signals
|
Kuzenkov/SimpleAnalogueStackOverflow
|
questions/__init__.py
|
Python
|
mit
| 29
|
"""Mongodb implementations of assessment objects."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods,too-few-public-methods
# Number of methods are defined in specification
# pylint: disable=protected-access
# Access to protected methods allowed in package mongo package scope
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
#from ..osid.objects import OsidObject
#from ..id.objects import IdList
#import importlib
import importlib
from bson.objectid import ObjectId
from . import mdata_conf
from .. import types
from .. import utilities
from ...abstract_osid.assessment import objects as abc_assessment_objects
from ..id.objects import IdList
from ..osid.metadata import Metadata
from ..osid.objects import OsidObject
from ..primitives import DateTime
from ..primitives import Duration
from ..primitives import Id
from ..primitives import Type
from ..utilities import MongoClientValidated
from ..utilities import get_provider_manager
from dlkit.abstract_osid.osid import errors
from dlkit.mongo.osid import markers as osid_markers
from dlkit.mongo.osid import objects as osid_objects
from dlkit.primordium.id.primitives import Id
default_language_type = Type(**types.Language().get_type_data('DEFAULT'))
default_script_type = Type(**types.Script().get_type_data('DEFAULT'))
default_format_type = Type(**types.Format().get_type_data('DEFAULT'))
class Question(abc_assessment_objects.Question, osid_objects.OsidObject):
"""A ``Question`` represents the question portion of an assessment item.
Like all OSID objects, a ``Question`` is identified by its ``Id``
and any persisted references should use the ``Id``.
"""
_record_type_data_sets = {}
_namespace = 'assessment.Question'
def __init__(self, osid_object_map, runtime=None):
osid_objects.OsidObject.__init__(self, osid_object_map, runtime)
self._record_type_data_sets = self._get_registry('QUESTION_RECORD_TYPES')
self._records = dict()
self._load_records(osid_object_map['recordTypeIds'])
self._catalog_name = 'bank'
@utilities.arguments_not_none
def get_question_record(self, question_record_type):
"""Gets the item record corresponding to the given ``Question`` record ``Type``.
This method is used to retrieve an object implementing the
requested record. The ``question_record_type`` may be the
``Type`` returned in ``get_record_types()`` or any of its
parents in a ``Type`` hierarchy where
``has_record_type(question_record_type)`` is ``true`` .
arg: question_record_type (osid.type.Type): the type of the
record to retrieve
return: (osid.assessment.records.QuestionRecord) - the question
record
raise: NullArgument - ``question_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported - ``has_record_type(question_record_type)``
is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return self._get_record(question_record_type)
##
# Overide osid.Identifiable.get_id() method to cast this question id as its item id:
def get_id(self):
return Id(self._my_map['itemId'])
id_ = property(fget=get_id)
ident = property(fget=get_id)
##
# This method mirrors that in the Item so that questions can also be inspected for learning objectives:
def get_learning_objective_ids(self):
collection = MongoClientValidated('assessment',
collection='Item',
runtime=self._runtime)
item_map = collection.find_one({'_id': ObjectId(Id(self._my_map['itemId']).get_identifier())})
return IdList(item_map['learningObjectiveIds'])
def get_object_map(self):
obj_map = dict(self._my_map)
my_idstr = obj_map['itemId']
del obj_map['itemId']
lo_ids = self.get_learning_objective_ids()
obj_map['learningObjectiveIds'] = [str(lo_id) for lo_id in lo_ids]
obj_map = osid_objects.OsidObject.get_object_map(self, obj_map)
obj_map['id'] = my_idstr
return obj_map
object_map = property(fget=get_object_map)
class QuestionForm(abc_assessment_objects.QuestionForm, osid_objects.OsidObjectForm):
"""This is the form for creating and updating ``Questions``."""
_record_type_data_sets = {}
_namespace = 'assessment.Question'
def __init__(self, osid_object_map=None, record_types=None, runtime=None, **kwargs):
osid_objects.OsidForm.__init__(self, runtime=runtime)
self._record_type_data_sets = self._get_registry('QUESTION_RECORD_TYPES')
self._kwargs = kwargs
if 'catalog_id' in kwargs:
self._catalog_id = kwargs['catalog_id']
self._init_metadata(**kwargs)
self._records = dict()
self._supported_record_type_ids = []
if osid_object_map is not None:
self._for_update = True
self._my_map = osid_object_map
self._load_records(osid_object_map['recordTypeIds'])
else:
self._my_map = {}
self._for_update = False
self._init_map(**kwargs)
if record_types is not None:
self._init_records(record_types)
self._supported_record_type_ids = self._my_map['recordTypeIds']
def _init_metadata(self, **kwargs):
osid_objects.OsidObjectForm._init_metadata(self, **kwargs)
def _init_map(self, **kwargs):
osid_objects.OsidObjectForm._init_map(self)
self._my_map['itemId'] = str(kwargs['item_id'])
self._my_map['assignedBankIds'] = [str(kwargs['bank_id'])]
@utilities.arguments_not_none
def get_question_form_record(self, question_record_type):
"""Gets the ``QuestionFormRecord`` corresponding to the given question record ``Type``.
arg: question_record_type (osid.type.Type): the question
record type
return: (osid.assessment.records.QuestionFormRecord) - the
question record
raise: NullArgument - ``question_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported - ``has_record_type(question_record_type)``
is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return self._get_record(question_record_type)
class QuestionList(abc_assessment_objects.QuestionList, osid_objects.OsidList):
"""Like all ``OsidLists,`` ``QuestionList`` provides a means for accessing ``Question`` elements sequentially
either one
at a time or many at a time.
Examples: while (ql.hasNext()) { Question question =
ql.getNextQuestion(); }
or
while (ql.hasNext()) {
Question[] question = al.getNextQuestions(ql.available());
}
"""
def get_next_question(self):
"""Gets the next ``Question`` in this list.
return: (osid.assessment.Question) - the next ``Question`` in
this list. The ``has_next()`` method should be used to
test that a next ``Question`` is available before
calling this method.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resource
return self.next()
def next(self):
return self._get_next_object(Question)
next_question = property(fget=get_next_question)
@utilities.arguments_not_none
def get_next_questions(self, n):
"""Gets the next set of ``Question`` elements in this list which must be less than or equal to the number
returned from ``available()``.
arg: n (cardinal): the number of ``Question`` elements
requested which should be less than or equal to
``available()``
return: (osid.assessment.Question) - an array of ``Question``
elements.The length of the array is less than or equal
to the number specified.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resources
return self._get_next_n(n)
class Answer(abc_assessment_objects.Answer, osid_objects.OsidObject):
"""An ``Answer`` represents the question portion of an assessment item.
Like all OSID objects, an ``Answer`` is identified by its ``Id`` and
any persisted references should use the ``Id``.
"""
_record_type_data_sets = {}
_namespace = 'assessment.Answer'
def __init__(self, osid_object_map, runtime=None):
osid_objects.OsidObject.__init__(self, osid_object_map, runtime)
self._record_type_data_sets = self._get_registry('ANSWER_RECORD_TYPES')
self._records = dict()
self._load_records(osid_object_map['recordTypeIds'])
self._catalog_name = 'bank'
@utilities.arguments_not_none
def get_answer_record(self, answer_record_type):
"""Gets the answer record corresponding to the given ``Answer`` record ``Type``.
This method is used to retrieve an object implementing the
requested records. The ``answer_record_type`` may be the
``Type`` returned in ``get_record_types()`` or any of its
parents in a ``Type`` hierarchy where
``has_record_type(answer_record_type)`` is ``true`` .
arg: answer_record_type (osid.type.Type): the type of the
record to retrieve
return: (osid.assessment.records.AnswerRecord) - the answer
record
raise: NullArgument - ``answer_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported - ``has_record_type(answer_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
return self._get_record(answer_record_type)
def get_object_map(self):
obj_map = dict(self._my_map)
del obj_map['itemId']
return osid_objects.OsidObject.get_object_map(self, obj_map)
object_map = property(fget=get_object_map)
class AnswerForm(abc_assessment_objects.AnswerForm, osid_objects.OsidObjectForm):
"""This is the form for creating and updating ``Answers``."""
_record_type_data_sets = {}
_namespace = 'assessment.Answer'
def __init__(self, osid_object_map=None, record_types=None, runtime=None, **kwargs):
osid_objects.OsidForm.__init__(self, runtime=runtime)
self._record_type_data_sets = self._get_registry('ANSWER_RECORD_TYPES')
self._kwargs = kwargs
if 'catalog_id' in kwargs:
self._catalog_id = kwargs['catalog_id']
self._init_metadata(**kwargs)
self._records = dict()
self._supported_record_type_ids = []
if osid_object_map is not None:
self._for_update = True
self._my_map = osid_object_map
self._load_records(osid_object_map['recordTypeIds'])
else:
self._my_map = {}
self._for_update = False
self._init_map(**kwargs)
if record_types is not None:
self._init_records(record_types)
self._supported_record_type_ids = self._my_map['recordTypeIds']
def _init_metadata(self, **kwargs):
osid_objects.OsidObjectForm._init_metadata(self, **kwargs)
def _init_map(self, **kwargs):
osid_objects.OsidObjectForm._init_map(self)
self._my_map['itemId'] = str(kwargs['item_id'])
self._my_map['assignedBankIds'] = [str(kwargs['bank_id'])]
@utilities.arguments_not_none
def get_answer_form_record(self, answer_record_type):
"""Gets the ``AnswerFormRecord`` corresponding to the given answer record ``Type``.
arg: answer_record_type (osid.type.Type): the answer record
type
return: (osid.assessment.records.AnswerFormRecord) - the answer
record
raise: NullArgument - ``answer_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported - ``has_record_type(answer_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
return self._get_record(answer_record_type)
class AnswerList(abc_assessment_objects.AnswerList, osid_objects.OsidList):
"""Like all ``OsidLists,`` ``AnswerList`` provides a means for accessing ``Answer`` elements sequentially either
one at a
time or many at a time.
Examples: while (al.hasNext()) { Answer answer = al.getNextAnswer();
}
or
while (al.hasNext()) {
Answer[] answer = al.getNextAnswers(al.available());
}
"""
def get_next_answer(self):
"""Gets the next ``Answer`` in this list.
return: (osid.assessment.Answer) - the next ``Answer`` in this
list. The ``has_next()`` method should be used to test
that a next ``Answer`` is available before calling this
method.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resource
return self.next()
def next(self):
return self._get_next_object(Answer)
next_answer = property(fget=get_next_answer)
@utilities.arguments_not_none
def get_next_answers(self, n):
"""Gets the next set of ``Answer`` elements in this list which must be less than or equal to the number returned
from ``available()``.
arg: n (cardinal): the number of ``Answer`` elements
requested which should be less than or equal to
``available()``
return: (osid.assessment.Answer) - an array of ``Answer``
elements.The length of the array is less than or equal
to the number specified.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resources
return self._get_next_n(n)
class Item(abc_assessment_objects.Item, osid_objects.OsidObject, osid_markers.Aggregateable):
"""An ``Item`` represents an individual assessment item such as a question.
Like all OSID objects, a ``Item`` is identified by its ``Id`` and
any persisted references should use the ``Id``.
An ``Item`` is composed of a ``Question`` and an ``Answer``.
"""
_record_type_data_sets = {}
_namespace = 'assessment.Item'
def __init__(self, osid_object_map, runtime=None):
osid_objects.OsidObject.__init__(self, osid_object_map, runtime)
self._record_type_data_sets = self._get_registry('ITEM_RECORD_TYPES')
self._records = dict()
self._load_records(osid_object_map['recordTypeIds'])
self._catalog_name = 'bank'
def get_learning_objective_ids(self):
"""Gets the ``Ids`` of any ``Objectives`` corresponding to this item.
return: (osid.id.IdList) - the learning objective ``Ids``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_asset_ids_template
return IdList(self._my_map['learningObjectiveIds'])
learning_objective_ids = property(fget=get_learning_objective_ids)
def get_learning_objectives(self):
"""Gets the any ``Objectives`` corresponding to this item.
return: (osid.learning.ObjectiveList) - the learning objectives
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_assets_template
try:
mgr = self._get_provider_manager('LEARNING')
except ImportError:
raise errors.OperationFailed('failed to instantiate LearningManager')
if not mgr.supports_objective_lookup():
raise errors.OperationFailed('Learning does not support Objective lookup')
lookup_session = mgr.get_objective_lookup_session()
lookup_session.use_federated_objective_bank_view()
return lookup_session.get_objectives_by_ids(self.get_learning_objective_ids())
learning_objectives = property(fget=get_learning_objectives)
def get_question_id(self):
"""Gets the ``Id`` of the ``Question``.
return: (osid.id.Id) - the question ``Id``
*compliance: mandatory -- This method must be implemented.*
"""
self.get_question().get_id()
question_id = property(fget=get_question_id)
def get_question(self):
"""Gets the question.
return: (osid.assessment.Question) - the question
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return Question(self._my_map['question'], runtime=self._runtime)
question = property(fget=get_question)
def get_answer_ids(self):
"""Gets the ``Ids`` of the answers.
Questions may have more than one acceptable answer.
return: (osid.id.IdList) - the answer ``Ids``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.Asset.get_asset_content_ids_template
id_list = []
for answer in self.get_answers():
id_list.append(answer.get_id())
return AnswerList(id_list)
answer_ids = property(fget=get_answer_ids)
def get_answers(self):
"""Gets the answers.
return: (osid.assessment.AnswerList) - the answers
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.Asset.get_asset_contents_template
return AnswerList(self._my_map['answers'], runtime=self._runtime)
def _delete(self):
for answer in self.get_answers():
answer._delete()
osid_objects.OsidObject._delete(self)
answers = property(fget=get_answers)
@utilities.arguments_not_none
def get_item_record(self, item_record_type):
"""Gets the item record corresponding to the given ``Item`` record ``Type``.
This method is used to retrieve an object implementing the
requested records. The ``item_record_type`` may be the ``Type``
returned in ``get_record_types()`` or any of its parents in a
``Type`` hierarchy where ``has_record_type(item_record_type)``
is ``true`` .
arg: item_record_type (osid.type.Type): the type of the
record to retrieve
return: (osid.assessment.records.ItemRecord) - the item record
raise: NullArgument - ``item_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported - ``has_record_type(item_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
return self._get_record(item_record_type)
def get_configuration(self):
config = dict()
try:
dict.update(self.get_question().get_configuration())
except AttributeError:
pass
for record in self._records:
try:
dict.update(record.get_configuration())
except AttributeError:
pass
return config # SHould this method build a real OSID configuration instead?
def get_object_map(self):
obj_map = dict(self._my_map)
if obj_map['question']:
obj_map['question'] = self.get_question().get_object_map()
obj_map['answers'] = []
for answer in self.get_answers():
obj_map['answers'].append(answer.get_object_map())
return osid_objects.OsidObject.get_object_map(self, obj_map)
object_map = property(fget=get_object_map)
def _delete(self):
try:
self.get_question()._delete()
except:
pass
finally:
for answer in self.get_answers():
answer._delete()
osid_objects.OsidObject._delete(self)
class ItemForm(abc_assessment_objects.ItemForm, osid_objects.OsidObjectForm, osid_objects.OsidAggregateableForm):
"""This is the form for creating and updating ``Items``.
Like all ``OsidForm`` objects, various data elements may be set here
for use in the create and update methods in the
``ItemAdminSession``. For each data element that may be set,
metadata may be examined to provide display hints or data
constraints.
"""
_record_type_data_sets = {}
_namespace = 'assessment.Item'
def __init__(self, osid_object_map=None, record_types=None, runtime=None, **kwargs):
osid_objects.OsidForm.__init__(self, runtime=runtime)
self._record_type_data_sets = self._get_registry('ITEM_RECORD_TYPES')
self._kwargs = kwargs
if 'catalog_id' in kwargs:
self._catalog_id = kwargs['catalog_id']
self._init_metadata(**kwargs)
self._records = dict()
self._supported_record_type_ids = []
if osid_object_map is not None:
self._for_update = True
self._my_map = osid_object_map
self._load_records(osid_object_map['recordTypeIds'])
else:
self._my_map = {}
self._for_update = False
self._init_map(**kwargs)
if record_types is not None:
self._init_records(record_types)
self._supported_record_type_ids = self._my_map['recordTypeIds']
def _init_metadata(self, **kwargs):
osid_objects.OsidObjectForm._init_metadata(self, **kwargs)
self._learning_objectives_metadata = {
'element_id': Id(
self._authority,
self._namespace,
'learning_objectives')}
self._learning_objectives_metadata.update(mdata_conf.ITEM_LEARNING_OBJECTIVES)
self._learning_objectives_default = self._learning_objectives_metadata['default_id_values']
def _init_map(self, **kwargs):
osid_objects.OsidObjectForm._init_map(self)
self._my_map['learningObjectiveIds'] = self._learning_objectives_default
self._my_map['assignedBankIds'] = [str(kwargs['bank_id'])]
self._my_map['question'] = None
self._my_map['answers'] = []
def get_learning_objectives_metadata(self):
"""Gets the metadata for learning objectives.
return: (osid.Metadata) - metadata for the learning objectives
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.ActivityForm.get_assets_metadata_template
metadata = dict(self._learning_objectives_metadata)
metadata.update({'existing_learning_objectives_values': self._my_map['learningObjectiveIds']})
return Metadata(**metadata)
learning_objectives_metadata = property(fget=get_learning_objectives_metadata)
@utilities.arguments_not_none
def set_learning_objectives(self, objective_ids):
"""Sets the learning objectives.
arg: objective_ids (osid.id.Id[]): the learning objective
``Ids``
raise: InvalidArgument - ``objective_ids`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.ActivityForm.set_assets_template
if not isinstance(objective_ids, list):
raise errors.InvalidArgument()
if self.get_learning_objectives_metadata().is_read_only():
raise errors.NoAccess()
idstr_list = []
for object_id in objective_ids:
if not self._is_valid_id(object_id):
raise errors.InvalidArgument()
idstr_list.append(str(object_id))
self._my_map['learningObjectiveIds'] = idstr_list
def clear_learning_objectives(self):
"""Clears the learning objectives.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.ActivityForm.clear_assets_template
if (self.get_learning_objectives_metadata().is_read_only() or
self.get_learning_objectives_metadata().is_required()):
raise errors.NoAccess()
self._my_map['learningObjectiveIds'] = self._learning_objectives_default
learning_objectives = property(fset=set_learning_objectives, fdel=clear_learning_objectives)
@utilities.arguments_not_none
def get_item_form_record(self, item_record_type):
"""Gets the ``ItemnFormRecord`` corresponding to the given item record ``Type``.
arg: item_record_type (osid.type.Type): the item record type
return: (osid.assessment.records.ItemFormRecord) - the item
record
raise: NullArgument - ``item_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported - ``has_record_type(item_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
return self._get_record(item_record_type)
class ItemList(abc_assessment_objects.ItemList, osid_objects.OsidList):
"""Like all ``OsidLists,`` ``ItemList`` provides a means for accessing ``Item`` elements sequentially either one at
a time
or many at a time.
Examples: while (il.hasNext()) { Item item = il.getNextItem(); }
or
while (il.hasNext()) {
Item[] items = il.getNextItems(il.available());
}
"""
def get_next_item(self):
"""Gets the next ``Item`` in this list.
return: (osid.assessment.Item) - the next ``Item`` in this list.
The ``has_next()`` method should be used to test that a
next ``Item`` is available before calling this method.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resource
return self.next()
def next(self):
return self._get_next_object(Item)
next_item = property(fget=get_next_item)
@utilities.arguments_not_none
def get_next_items(self, n):
"""Gets the next set of ``Item`` elements in this list which must be less than or equal to the number returned
from ``available()``.
arg: n (cardinal): the number of ``Item`` elements requested
which should be less than or equal to ``available()``
return: (osid.assessment.Item) - an array of ``Item``
elements.The length of the array is less than or equal
to the number specified.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resources
return self._get_next_n(n)
class Assessment(abc_assessment_objects.Assessment, osid_objects.OsidObject):
"""An ``Assessment`` represents a sequence of assessment items.
Like all OSID objects, an ``Assessment`` is identified by its ``Id``
and any persisted references should use the ``Id``.
An ``Assessment`` may have an accompanying rubric used for assessing
performance. The rubric assessment is established canonically in
this ``Assessment``.
"""
_record_type_data_sets = {}
_namespace = 'assessment.Assessment'
def __init__(self, osid_object_map, runtime=None):
osid_objects.OsidObject.__init__(self, osid_object_map, runtime)
self._record_type_data_sets = self._get_registry('ASSESSMENT_RECORD_TYPES')
self._records = dict()
self._load_records(osid_object_map['recordTypeIds'])
self._catalog_name = 'bank'
def get_level_id(self):
"""Gets the ``Id`` of a ``Grade`` corresponding to the assessment difficulty.
return: (osid.id.Id) - a grade ``Id``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_id_template
if not self._my_map['levelId']:
raise errors.IllegalState('this Assessment has no level')
else:
return Id(self._my_map['levelId'])
level_id = property(fget=get_level_id)
def get_level(self):
"""Gets the ``Grade`` corresponding to the assessment difficulty.
return: (osid.grading.Grade) - the level
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_template
if not self._my_map['levelId']:
raise errors.IllegalState('this Assessment has no level')
mgr = self._get_provider_manager('GRADING')
if not mgr.supports_grade_lookup():
raise errors.OperationFailed('Grading does not support Grade lookup')
lookup_session = mgr.get_grade_lookup_session()
lookup_session.use_federated_gradebook_view()
osid_object = lookup_session.get_grade(self.get_level_id())
return osid_object
level = property(fget=get_level)
def has_rubric(self):
"""Tests if a rubric assessment is associated with this assessment.
return: (boolean) - ``true`` if a rubric is available, ``false``
otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.has_avatar_template
return bool(self._my_map['rubricId'])
def get_rubric_id(self):
"""Gets the ``Id`` of the rubric.
return: (osid.id.Id) - an assessment ``Id``
raise: IllegalState - ``has_rubric()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_id_template
if not self._my_map['rubricId']:
raise errors.IllegalState('this Assessment has no rubric')
else:
return Id(self._my_map['rubricId'])
rubric_id = property(fget=get_rubric_id)
def get_rubric(self):
"""Gets the rubric.
return: (osid.assessment.Assessment) - the assessment
raise: IllegalState - ``has_rubric()`` is ``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_template
if not self._my_map['rubricId']:
raise errors.IllegalState('this Assessment has no rubric')
mgr = self._get_provider_manager('ASSESSMENT')
if not mgr.supports_assessment_lookup():
raise errors.OperationFailed('Assessment does not support Assessment lookup')
lookup_session = mgr.get_assessment_lookup_session()
lookup_session.use_federated_bank_view()
osid_object = lookup_session.get_assessment(self.get_rubric_id())
return osid_object
rubric = property(fget=get_rubric)
@utilities.arguments_not_none
def get_assessment_record(self, assessment_record_type):
"""Gets the assessment record corresponding to the given ``Assessment`` record ``Type``.
This method is used to retrieve an object implementing the
requested record. The ``assessment_record_type`` may be the
``Type`` returned in ``get_record_types()`` or any of its
parents in a ``Type`` hierarchy where
``has_record_type(assessment_record_type)`` is ``true`` .
arg: assessment_record_type (osid.type.Type): the type of the
record to retrieve
return: (osid.assessment.records.AssessmentRecord) - the
assessment record
raise: NullArgument - ``assessment_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(assessment_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return self._get_record(assessment_record_type)
def get_object_map(self):
obj_map = dict(self._my_map)
if 'itemIds' in obj_map:
del obj_map['itemIds']
return osid_objects.OsidObject.get_object_map(self, obj_map)
object_map = property(fget=get_object_map)
class AssessmentForm(abc_assessment_objects.AssessmentForm, osid_objects.OsidObjectForm):
"""This is the form for creating and updating ``Assessments``.
Like all ``OsidForm`` objects, various data elements may be set here
for use in the create and update methods in the
``AssessmentAdminSession``. For each data element that may be set,
metadata may be examined to provide display hints or data
constraints.
"""
_record_type_data_sets = {}
_namespace = 'assessment.Assessment'
def __init__(self, osid_object_map=None, record_types=None, runtime=None, **kwargs):
osid_objects.OsidForm.__init__(self, runtime=runtime)
self._record_type_data_sets = self._get_registry('ASSESSMENT_RECORD_TYPES')
self._kwargs = kwargs
if 'catalog_id' in kwargs:
self._catalog_id = kwargs['catalog_id']
self._init_metadata(**kwargs)
self._records = dict()
self._supported_record_type_ids = []
if osid_object_map is not None:
self._for_update = True
self._my_map = osid_object_map
self._load_records(osid_object_map['recordTypeIds'])
else:
self._my_map = {}
self._for_update = False
self._init_map(**kwargs)
if record_types is not None:
self._init_records(record_types)
self._supported_record_type_ids = self._my_map['recordTypeIds']
def _init_metadata(self, **kwargs):
osid_objects.OsidObjectForm._init_metadata(self, **kwargs)
self._rubric_metadata = {
'element_id': Id(
self._authority,
self._namespace,
'rubric')}
self._rubric_metadata.update(mdata_conf.ASSESSMENT_RUBRIC)
self._level_metadata = {
'element_id': Id(
self._authority,
self._namespace,
'level')}
self._level_metadata.update(mdata_conf.ASSESSMENT_LEVEL)
self._rubric_default = self._rubric_metadata['default_id_values'][0]
self._level_default = self._level_metadata['default_id_values'][0]
def _init_map(self, **kwargs):
osid_objects.OsidObjectForm._init_map(self)
self._my_map['rubricId'] = self._rubric_default
self._my_map['assignedBankIds'] = [str(kwargs['bank_id'])]
self._my_map['levelId'] = self._level_default
def get_level_metadata(self):
"""Gets the metadata for a grade level.
return: (osid.Metadata) - metadata for the grade level
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._level_metadata)
metadata.update({'existing_level_values': self._my_map['levelId']})
return Metadata(**metadata)
level_metadata = property(fget=get_level_metadata)
@utilities.arguments_not_none
def set_level(self, grade_id):
"""Sets the level of difficulty expressed as a ``Grade``.
arg: grade_id (osid.id.Id): the grade level
raise: InvalidArgument - ``grade_id`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``grade_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_avatar_template
if self.get_level_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_id(grade_id):
raise errors.InvalidArgument()
self._my_map['levelId'] = str(grade_id)
def clear_level(self):
"""Clears the grade level.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_avatar_template
if (self.get_level_metadata().is_read_only() or
self.get_level_metadata().is_required()):
raise errors.NoAccess()
self._my_map['levelId'] = self._level_default
level = property(fset=set_level, fdel=clear_level)
def get_rubric_metadata(self):
"""Gets the metadata for a rubric assessment.
return: (osid.Metadata) - metadata for the assesment
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._rubric_metadata)
metadata.update({'existing_rubric_values': self._my_map['rubricId']})
return Metadata(**metadata)
rubric_metadata = property(fget=get_rubric_metadata)
@utilities.arguments_not_none
def set_rubric(self, assessment_id):
"""Sets the rubric expressed as another assessment.
arg: assessment_id (osid.id.Id): the assessment ``Id``
raise: InvalidArgument - ``assessment_id`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``assessment_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_avatar_template
if self.get_rubric_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_id(assessment_id):
raise errors.InvalidArgument()
self._my_map['rubricId'] = str(assessment_id)
def clear_rubric(self):
"""Clears the rubric.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_avatar_template
if (self.get_rubric_metadata().is_read_only() or
self.get_rubric_metadata().is_required()):
raise errors.NoAccess()
self._my_map['rubricId'] = self._rubric_default
rubric = property(fset=set_rubric, fdel=clear_rubric)
@utilities.arguments_not_none
def get_assessment_form_record(self, assessment_record_type):
"""Gets the ``AssessmentFormRecord`` corresponding to the given assessment record ``Type``.
arg: assessment_record_type (osid.type.Type): the assessment
record type
return: (osid.assessment.records.AssessmentFormRecord) - the
assessment record
raise: NullArgument - ``assessment_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(assessment_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return self._get_record(assessment_record_type)
class AssessmentList(abc_assessment_objects.AssessmentList, osid_objects.OsidList):
"""Like all ``OsidLists,`` ``AssessmentList`` provides a means for accessing ``Assessment`` elements sequentially
either
one at a time or many at a time.
Examples: while (al.hasNext()) { Assessment assessment =
al.getNextAssessment(); }
or
while (al.hasNext()) {
Assessment[] assessments = al.hetNextAssessments(al.available());
}
"""
def get_next_assessment(self):
"""Gets the next ``Assessment`` in this list.
return: (osid.assessment.Assessment) - the next ``Assessment``
in this list. The ``has_next()`` method should be used
to test that a next ``Assessment`` is available before
calling this method.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resource
return self.next()
def next(self):
return self._get_next_object(Assessment)
next_assessment = property(fget=get_next_assessment)
@utilities.arguments_not_none
def get_next_assessments(self, n):
"""Gets the next set of ``Assessment`` elements in this list which must be less than or equal to the number
returned from ``available()``.
arg: n (cardinal): the number of ``Assessment`` elements
requested which should be less than or equal to
``available()``
return: (osid.assessment.Assessment) - an array of
``Assessment`` elements.The length of the array is less
than or equal to the number specified.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resources
return self._get_next_n(n)
class AssessmentOffered(abc_assessment_objects.AssessmentOffered, osid_objects.OsidObject, osid_markers.Subjugateable):
"""An ``AssessmentOffered`` represents a sequence of assessment items.
Like all OSID objects, an ``AssessmentOffered`` is identified by its
``Id`` and any persisted references should use the ``Id``.
"""
_record_type_data_sets = {}
_namespace = 'assessment.AssessmentOffered'
def __init__(self, osid_object_map, runtime=None):
osid_objects.OsidObject.__init__(self, osid_object_map, runtime)
self._record_type_data_sets = self._get_registry('ASSESSMENT_OFFERED_RECORD_TYPES')
self._records = dict()
self._load_records(osid_object_map['recordTypeIds'])
self._catalog_name = 'bank'
def get_assessment_id(self):
"""Gets the assessment ``Id`` corresponding to this assessment offering.
return: (osid.id.Id) - the assessment id
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_objective_id
return Id(self._my_map['assessmentId'])
assessment_id = property(fget=get_assessment_id)
def get_assessment(self):
"""Gets the assessment corresponding to this assessment offereng.
return: (osid.assessment.Assessment) - the assessment
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_objective
mgr = self._get_provider_manager('ASSESSMENT')
if not mgr.supports_assessment_lookup():
raise errors.OperationFailed('Assessment does not support Assessment lookup')
lookup_session = mgr.get_assessment_lookup_session()
lookup_session.use_federated_bank_view()
return lookup_session.get_assessment(self.get_assessment_id())
assessment = property(fget=get_assessment)
def get_level_id(self):
"""Gets the ``Id`` of a ``Grade`` corresponding to the assessment difficulty.
return: (osid.id.Id) - a grade id
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_id_template
if not self._my_map['levelId']:
raise errors.IllegalState('this AssessmentOffered has no level')
else:
return Id(self._my_map['levelId'])
level_id = property(fget=get_level_id)
def get_level(self):
"""Gets the ``Grade`` corresponding to the assessment difficulty.
return: (osid.grading.Grade) - the level
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_template
if not self._my_map['levelId']:
raise errors.IllegalState('this AssessmentOffered has no level')
mgr = self._get_provider_manager('GRADING')
if not mgr.supports_grade_lookup():
raise errors.OperationFailed('Grading does not support Grade lookup')
lookup_session = mgr.get_grade_lookup_session()
lookup_session.use_federated_gradebook_view()
osid_object = lookup_session.get_grade(self.get_level_id())
return osid_object
level = property(fget=get_level)
def are_items_sequential(self):
"""Tests if the items or parts in this assessment are taken sequentially.
return: (boolean) - ``true`` if the items are taken
sequentially, ``false`` if the items can be skipped and
revisited
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.is_group_template
return self._my_map['itemsSequential']
def are_items_shuffled(self):
"""Tests if the items or parts appear in a random order.
return: (boolean) - ``true`` if the items appear in a random
order, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.is_group_template
return self._my_map['itemsShuffled']
def has_start_time(self):
"""Tests if there is a fixed start time for this assessment.
return: (boolean) - ``true`` if there is a fixed start time,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetContent.has_url_template
try:
return bool(self._my_map['startTime'])
except KeyError:
return False
def get_start_time(self):
"""Gets the start time for this assessment.
return: (osid.calendaring.DateTime) - the designated start time
raise: IllegalState - ``has_start_time()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.assessment.AssessmentOffered.get_start_time_template
if not bool(self._my_map['startTime']):
raise errors.IllegalState()
dt = self._my_map['startTime']
return DateTime(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond)
start_time = property(fget=get_start_time)
def has_deadline(self):
"""Tests if there is a fixed end time for this assessment.
return: (boolean) - ``true`` if there is a fixed end time,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetContent.has_url_template
try:
return bool(self._my_map['deadline'])
except KeyError:
return False
def get_deadline(self):
"""Gets the end time for this assessment.
return: (osid.calendaring.DateTime) - the designated end time
raise: IllegalState - ``has_deadline()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.assessment.AssessmentOffered.get_start_time_template
if not bool(self._my_map['deadline']):
raise errors.IllegalState()
dt = self._my_map['deadline']
return DateTime(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond)
deadline = property(fget=get_deadline)
def has_duration(self):
"""Tests if there is a fixed duration for this assessment.
return: (boolean) - ``true`` if there is a fixed duration,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetContent.has_url_template
try:
return bool(self._my_map['duration'])
except KeyError:
return False
def get_duration(self):
"""Gets the duration for this assessment.
return: (osid.calendaring.Duration) - the duration
raise: IllegalState - ``has_duration()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.assessment.AssessmentOffered.get_duration_template
if not bool(self._my_map['duration']):
raise errors.IllegalState()
return Duration(**self._my_map['duration'])
duration = property(fget=get_duration)
def is_scored(self):
"""Tests if this assessment will be scored.
return: (boolean) - ``true`` if this assessment will be scored
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.is_group_template
return self._my_map['scored']
def get_score_system_id(self):
"""Gets the grade system ``Id`` for the score.
return: (osid.id.Id) - the grade system ``Id``
raise: IllegalState - ``is_scored()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_id_template
if not self._my_map['scoreSystemId']:
raise errors.IllegalState('this AssessmentOffered has no score_system')
else:
return Id(self._my_map['scoreSystemId'])
score_system_id = property(fget=get_score_system_id)
def get_score_system(self):
"""Gets the grade system for the score.
return: (osid.grading.GradeSystem) - the grade system
raise: IllegalState - ``is_scored()`` is ``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_template
if not self._my_map['scoreSystemId']:
raise errors.IllegalState('this AssessmentOffered has no score_system')
mgr = self._get_provider_manager('GRADING')
if not mgr.supports_grade_system_lookup():
raise errors.OperationFailed('Grading does not support GradeSystem lookup')
lookup_session = mgr.get_grade_system_lookup_session()
lookup_session.use_federated_gradebook_view()
osid_object = lookup_session.get_grade_system(self.get_score_system_id())
return osid_object
score_system = property(fget=get_score_system)
def is_graded(self):
"""Tests if this assessment will be graded.
return: (boolean) - ``true`` if this assessment will be graded,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.is_group_template
return self._my_map['graded']
def get_grade_system_id(self):
"""Gets the grade system ``Id`` for the grade.
return: (osid.id.Id) - the grade system ``Id``
raise: IllegalState - ``is_graded()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_id_template
if not self._my_map['gradeSystemId']:
raise errors.IllegalState('this AssessmentOffered has no grade_system')
else:
return Id(self._my_map['gradeSystemId'])
grade_system_id = property(fget=get_grade_system_id)
def get_grade_system(self):
"""Gets the grade system for the grade.
return: (osid.grading.GradeSystem) - the grade system
raise: IllegalState - ``is_graded()`` is ``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_template
if not self._my_map['gradeSystemId']:
raise errors.IllegalState('this AssessmentOffered has no grade_system')
mgr = self._get_provider_manager('GRADING')
if not mgr.supports_grade_system_lookup():
raise errors.OperationFailed('Grading does not support GradeSystem lookup')
lookup_session = mgr.get_grade_system_lookup_session()
lookup_session.use_federated_gradebook_view()
osid_object = lookup_session.get_grade_system(self.get_grade_system_id())
return osid_object
grade_system = property(fget=get_grade_system)
def has_rubric(self):
"""Tests if a rubric assessment is associated with this assessment.
return: (boolean) - ``true`` if a rubric is available, ``false``
otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.has_avatar_template
return bool(self._my_map['rubricId'])
def get_rubric_id(self):
"""Gets the ``Id`` of the rubric.
return: (osid.id.Id) - an assessment offered ``Id``
raise: IllegalState - ``has_rubric()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_id_template
if not self._my_map['rubricId']:
raise errors.IllegalState('this AssessmentOffered has no rubric')
else:
return Id(self._my_map['rubricId'])
rubric_id = property(fget=get_rubric_id)
def get_rubric(self):
"""Gets the rubric.
return: (osid.assessment.AssessmentOffered) - the assessment
offered
raise: IllegalState - ``has_rubric()`` is ``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_template
if not self._my_map['rubricId']:
raise errors.IllegalState('this AssessmentOffered has no rubric')
mgr = self._get_provider_manager('ASSESSMENT')
if not mgr.supports_assessment_offered_lookup():
raise errors.OperationFailed('Assessment does not support AssessmentOffered lookup')
lookup_session = mgr.get_assessment_offered_lookup_session()
lookup_session.use_federated_bank_view()
osid_object = lookup_session.get_assessment_offered(self.get_rubric_id())
return osid_object
rubric = property(fget=get_rubric)
@utilities.arguments_not_none
def get_assessment_offered_record(self, assessment_taken_record_type):
"""Gets the assessment offered record corresponding to the given ``AssessmentOffered`` record ``Type``.
This method is used to retrieve an object implementing the
requested record. The ``assessment_offered_record_type`` may be
the ``Type`` returned in ``get_record_types()`` or any of its
parents in a ``Type`` hierarchy where
``has_record_type(assessment_offered_record_type)`` is ``true``
.
arg: assessment_taken_record_type (osid.type.Type): an
assessment offered record type
return: (osid.assessment.records.AssessmentOfferedRecord) - the
assessment offered record
raise: NullArgument - ``assessment_offered_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(assessment_offered_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
return self._get_record(assessment_taken_record_type)
def get_display_name(self):
# Overrides osid.objects.OsidObject.get_display_name to default to Assessment's
# display_name if none has been authored for this AssessmentOffered
from ..osid.objects import OsidObject
if osid_objects.OsidObject.get_display_name(self).get_text():
return osid_objects.OsidObject.get_display_name(self)
else:
return self.get_assessment().get_display_name()
def get_description(self):
# Overrides osid.objects.OsidObject.get_description to default to Assessment's
# description if none has been authored for this AssessmentOffered
from ..osid.objects import OsidObject
if osid_objects.OsidObject.get_description(self).get_text():
return osid_objects.OsidObject.get_description(self)
else:
return self.get_assessment().get_description()
def get_object_map(self):
obj_map = dict(self._my_map)
if obj_map['startTime'] is not None:
start_time = obj_map['startTime']
obj_map['startTime'] = dict()
obj_map['startTime']['year'] = start_time.year
obj_map['startTime']['month'] = start_time.month
obj_map['startTime']['day'] = start_time.day
obj_map['startTime']['hour'] = start_time.hour
obj_map['startTime']['minute'] = start_time.minute
obj_map['startTime']['second'] = start_time.second
obj_map['startTime']['microsecond'] = start_time.microsecond
if obj_map['deadline'] is not None:
deadline = obj_map['deadline']
obj_map['startTime'] = dict()
obj_map['startTime']['year'] = deadline.year
obj_map['startTime']['month'] = deadline.month
obj_map['startTime']['day'] = deadline.day
obj_map['startTime']['hour'] = deadline.hour
obj_map['startTime']['minute'] = deadline.minute
obj_map['startTime']['second'] = deadline.second
obj_map['startTime']['microsecond'] = deadline.microsecond
obj_map = osid_objects.OsidObject.get_object_map(self, obj_map)
if obj_map['displayName']['text'] == '':
obj_map['displayName']['text'] = self.get_display_name().get_text()
if obj_map['description']['text'] == '':
obj_map['description']['text'] = self.get_description().get_text()
return obj_map
object_map = property(fget=get_object_map)
class AssessmentOfferedForm(abc_assessment_objects.AssessmentOfferedForm, osid_objects.OsidObjectForm, osid_objects.OsidSubjugateableForm):
"""This is the form for creating and updating an ``AssessmentOffered``.
Like all ``OsidForm`` objects, various data elements may be set here
for use in the create and update methods in the
``AssessmentOfferedAdminSession``. For each data element that may be
set, metadata may be examined to provide display hints or data
constraints.
"""
_record_type_data_sets = {}
_namespace = 'assessment.AssessmentOffered'
def __init__(self, osid_object_map=None, record_types=None, runtime=None, **kwargs):
osid_objects.OsidForm.__init__(self, runtime=runtime)
self._record_type_data_sets = self._get_registry('ASSESSMENT_OFFERED_RECORD_TYPES')
self._kwargs = kwargs
if 'catalog_id' in kwargs:
self._catalog_id = kwargs['catalog_id']
self._init_metadata(**kwargs)
self._records = dict()
self._supported_record_type_ids = []
if osid_object_map is not None:
self._for_update = True
self._my_map = osid_object_map
self._load_records(osid_object_map['recordTypeIds'])
else:
self._my_map = {}
self._for_update = False
self._init_map(**kwargs)
if record_types is not None:
self._init_records(record_types)
self._supported_record_type_ids = self._my_map['recordTypeIds']
def _init_metadata(self, **kwargs):
osid_objects.OsidObjectForm._init_metadata(self, **kwargs)
self._level_metadata = {
'element_id': Id(
self._authority,
self._namespace,
'level')}
self._level_metadata.update(mdata_conf.ASSESSMENT_OFFERED_LEVEL)
self._start_time_metadata = {
'element_id': Id(
self._authority,
self._namespace,
'start_time')}
self._start_time_metadata.update(mdata_conf.ASSESSMENT_OFFERED_START_TIME)
self._grade_system_metadata = {
'element_id': Id(
self._authority,
self._namespace,
'grade_system')}
self._grade_system_metadata.update(mdata_conf.ASSESSMENT_OFFERED_GRADE_SYSTEM)
self._items_shuffled_metadata = {
'element_id': Id(
self._authority,
self._namespace,
'items_shuffled')}
self._items_shuffled_metadata.update(mdata_conf.ASSESSMENT_OFFERED_ITEMS_SHUFFLED)
self._score_system_metadata = {
'element_id': Id(
self._authority,
self._namespace,
'score_system')}
self._score_system_metadata.update(mdata_conf.ASSESSMENT_OFFERED_SCORE_SYSTEM)
self._deadline_metadata = {
'element_id': Id(
self._authority,
self._namespace,
'deadline')}
self._deadline_metadata.update(mdata_conf.ASSESSMENT_OFFERED_DEADLINE)
self._duration_metadata = {
'element_id': Id(
self._authority,
self._namespace,
'duration')}
self._duration_metadata.update(mdata_conf.ASSESSMENT_OFFERED_DURATION)
self._items_sequential_metadata = {
'element_id': Id(
self._authority,
self._namespace,
'items_sequential')}
self._items_sequential_metadata.update(mdata_conf.ASSESSMENT_OFFERED_ITEMS_SEQUENTIAL)
self._level_default = self._level_metadata['default_id_values'][0]
self._start_time_default = self._start_time_metadata['default_date_time_values'][0]
self._grade_system_default = self._grade_system_metadata['default_id_values'][0]
self._items_shuffled_default = None
self._score_system_default = self._score_system_metadata['default_id_values'][0]
self._deadline_default = self._deadline_metadata['default_date_time_values'][0]
self._duration_default = self._duration_metadata['default_duration_values'][0]
self._items_sequential_default = None
def _init_map(self, **kwargs):
osid_objects.OsidObjectForm._init_map(self)
self._my_map['levelId'] = self._level_default
self._my_map['startTime'] = self._start_time_default
self._my_map['gradeSystemId'] = self._grade_system_default
self._my_map['itemsShuffled'] = self._items_shuffled_default
self._my_map['scoreSystemId'] = self._score_system_default
self._my_map['deadline'] = self._deadline_default
self._my_map['assignedBankIds'] = [str(kwargs['bank_id'])]
self._my_map['duration'] = self._duration_default
self._my_map['assessmentId'] = str(kwargs['assessment_id'])
self._my_map['itemsSequential'] = self._items_sequential_default
def get_level_metadata(self):
"""Gets the metadata for a grade level.
return: (osid.Metadata) - metadata for the grade level
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._level_metadata)
metadata.update({'existing_level_values': self._my_map['levelId']})
return Metadata(**metadata)
level_metadata = property(fget=get_level_metadata)
@utilities.arguments_not_none
def set_level(self, grade_id):
"""Sets the level of difficulty expressed as a ``Grade``.
arg: grade_id (osid.id.Id): the grade level
raise: InvalidArgument - ``grade_id`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_avatar_template
if self.get_level_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_id(grade_id):
raise errors.InvalidArgument()
self._my_map['levelId'] = str(grade_id)
def clear_level(self):
"""Clears the level.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_avatar_template
if (self.get_level_metadata().is_read_only() or
self.get_level_metadata().is_required()):
raise errors.NoAccess()
self._my_map['levelId'] = self._level_default
level = property(fset=set_level, fdel=clear_level)
def get_items_sequential_metadata(self):
"""Gets the metadata for sequential operation.
return: (osid.Metadata) - metadata for the sequential flag
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._items_sequential_metadata)
metadata.update({'existing_items_sequential_values': self._my_map['itemsSequential']})
return Metadata(**metadata)
items_sequential_metadata = property(fget=get_items_sequential_metadata)
@utilities.arguments_not_none
def set_items_sequential(self, sequential):
"""Sets the items sequential flag.
arg: sequential (boolean): ``true`` if the items are taken
sequentially, ``false`` if the items can be skipped and
revisited
raise: InvalidArgument - ``sequential`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_group_template
if self.get_items_sequential_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_boolean(sequential):
raise errors.InvalidArgument()
self._my_map['itemsSequential'] = sequential
def clear_items_sequential(self):
"""Clears the items sequential flag.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_group_template
if (self.get_items_sequential_metadata().is_read_only() or
self.get_items_sequential_metadata().is_required()):
raise errors.NoAccess()
self._my_map['itemsSequential'] = self._items_sequential_default
items_sequential = property(fset=set_items_sequential, fdel=clear_items_sequential)
def get_items_shuffled_metadata(self):
"""Gets the metadata for shuffling items.
return: (osid.Metadata) - metadata for the shuffled flag
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._items_shuffled_metadata)
metadata.update({'existing_items_shuffled_values': self._my_map['itemsShuffled']})
return Metadata(**metadata)
items_shuffled_metadata = property(fget=get_items_shuffled_metadata)
@utilities.arguments_not_none
def set_items_shuffled(self, shuffle):
"""Sets the shuffle flag.
The shuffle flag may be overidden by other assessment sequencing
rules.
arg: shuffle (boolean): ``true`` if the items are shuffled,
``false`` if the items appear in the designated order
raise: InvalidArgument - ``shuffle`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_group_template
if self.get_items_shuffled_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_boolean(shuffle):
raise errors.InvalidArgument()
self._my_map['itemsShuffled'] = shuffle
def clear_items_shuffled(self):
"""Clears the shuffle flag.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_group_template
if (self.get_items_shuffled_metadata().is_read_only() or
self.get_items_shuffled_metadata().is_required()):
raise errors.NoAccess()
self._my_map['itemsShuffled'] = self._items_shuffled_default
items_shuffled = property(fset=set_items_shuffled, fdel=clear_items_shuffled)
def get_start_time_metadata(self):
"""Gets the metadata for the assessment start time.
return: (osid.Metadata) - metadata for the start time
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._start_time_metadata)
metadata.update({'existing_start_time_values': self._my_map['startTime']})
return Metadata(**metadata)
start_time_metadata = property(fget=get_start_time_metadata)
@utilities.arguments_not_none
def set_start_time(self, start):
"""Sets the assessment start time.
arg: start (osid.calendaring.DateTime): assessment start time
raise: InvalidArgument - ``start`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.assessment.AssessmentOfferedForm.set_start_time_template
if self.get_start_time_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_date_time(
start,
self.get_start_time_metadata()):
raise errors.InvalidArgument()
self._my_map['startTime'] = start
def clear_start_time(self):
"""Clears the start time.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
if (self.get_start_time_metadata().is_read_only() or
self.get_start_time_metadata().is_required()):
raise errors.NoAccess()
self._my_map['startTime'] = self._start_time_default
start_time = property(fset=set_start_time, fdel=clear_start_time)
def get_deadline_metadata(self):
"""Gets the metadata for the assessment deadline.
return: (osid.Metadata) - metadata for the end time
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._deadline_metadata)
metadata.update({'existing_deadline_values': self._my_map['deadline']})
return Metadata(**metadata)
deadline_metadata = property(fget=get_deadline_metadata)
@utilities.arguments_not_none
def set_deadline(self, end):
"""Sets the assessment end time.
arg: end (timestamp): assessment end time
raise: InvalidArgument - ``end`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.assessment.AssessmentOfferedForm.set_start_time_template
if self.get_deadline_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_timestamp(
end,
self.get_deadline_metadata()):
raise errors.InvalidArgument()
self._my_map['deadline'] = end
def clear_deadline(self):
"""Clears the deadline.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
if (self.get_deadline_metadata().is_read_only() or
self.get_deadline_metadata().is_required()):
raise errors.NoAccess()
self._my_map['deadline'] = self._deadline_default
deadline = property(fset=set_deadline, fdel=clear_deadline)
def get_duration_metadata(self):
"""Gets the metadata for the assessment duration.
return: (osid.Metadata) - metadata for the duration
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._duration_metadata)
metadata.update({'existing_duration_values': self._my_map['duration']})
return Metadata(**metadata)
duration_metadata = property(fget=get_duration_metadata)
@utilities.arguments_not_none
def set_duration(self, duration):
"""Sets the assessment duration.
arg: duration (osid.calendaring.Duration): assessment
duration
raise: InvalidArgument - ``duration`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.assessment.AssessmentOfferedForm.set_duration_template
if self.get_duration_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_duration(duration,
self.get_duration_metadata()):
raise errors.InvalidArgument()
map = dict()
map['days'] = duration.days
map['seconds'] = duration.seconds
map['microseconds'] = duration.microseconds
self._my_map['duration'] = map
def clear_duration(self):
"""Clears the duration.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
duration = property(fset=set_duration, fdel=clear_duration)
def get_score_system_metadata(self):
"""Gets the metadata for a score system.
return: (osid.Metadata) - metadata for the grade system
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._score_system_metadata)
metadata.update({'existing_score_system_values': self._my_map['scoreSystemId']})
return Metadata(**metadata)
score_system_metadata = property(fget=get_score_system_metadata)
@utilities.arguments_not_none
def set_score_system(self, grade_system_id):
"""Sets the scoring system.
arg: grade_system_id (osid.id.Id): the grade system
raise: InvalidArgument - ``grade_system_id`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_avatar_template
if self.get_score_system_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_id(grade_system_id):
raise errors.InvalidArgument()
self._my_map['scoreSystemId'] = str(grade_system_id)
def clear_score_system(self):
"""Clears the score system.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_avatar_template
if (self.get_score_system_metadata().is_read_only() or
self.get_score_system_metadata().is_required()):
raise errors.NoAccess()
self._my_map['scoreSystemId'] = self._score_system_default
score_system = property(fset=set_score_system, fdel=clear_score_system)
def get_grade_system_metadata(self):
"""Gets the metadata for a grading system.
return: (osid.Metadata) - metadata for the grade system
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._grade_system_metadata)
metadata.update({'existing_grade_system_values': self._my_map['gradeSystemId']})
return Metadata(**metadata)
grade_system_metadata = property(fget=get_grade_system_metadata)
@utilities.arguments_not_none
def set_grade_system(self, grade_system_id):
"""Sets the grading system.
arg: grade_system_id (osid.id.Id): the grade system
raise: InvalidArgument - ``grade_system_id`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_avatar_template
if self.get_grade_system_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_id(grade_system_id):
raise errors.InvalidArgument()
self._my_map['gradeSystemId'] = str(grade_system_id)
def clear_grade_system(self):
"""Clears the grading system.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_avatar_template
if (self.get_grade_system_metadata().is_read_only() or
self.get_grade_system_metadata().is_required()):
raise errors.NoAccess()
self._my_map['gradeSystemId'] = self._grade_system_default
grade_system = property(fset=set_grade_system, fdel=clear_grade_system)
@utilities.arguments_not_none
def get_assessment_offered_form_record(self, assessment_offered_record_type):
"""Gets the ``AssessmentOfferedFormRecord`` corresponding to the given assessment record ``Type``.
arg: assessment_offered_record_type (osid.type.Type): the
assessment offered record type
return: (osid.assessment.records.AssessmentOfferedFormRecord) -
the assessment offered record
raise: NullArgument - ``assessment_offered_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(assessment_offered_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
return self._get_record(assessment_offered_record_type)
class AssessmentOfferedList(abc_assessment_objects.AssessmentOfferedList, osid_objects.OsidList):
"""Like all ``OsidLists,`` ``AssessmentOfferedList`` provides a means for accessing ``AssessmentTaken`` elements
sequentially either one at a time or many at a time.
Examples: while (aol.hasNext()) { AssessmentOffered assessment =
aol.getNextAssessmentOffered();
or
while (aol.hasNext()) {
AssessmentOffered[] assessments = aol.hetNextAssessmentsOffered(aol.available());
}
"""
def get_next_assessment_offered(self):
"""Gets the next ``AssessmentOffered`` in this list.
return: (osid.assessment.AssessmentOffered) - the next
``AssessmentOffered`` in this list. The ``has_next()``
method should be used to test that a next
``AssessmentOffered`` is available before calling this
method.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resource
return self.next()
def next(self):
return self._get_next_object(AssessmentOffered)
next_assessment_offered = property(fget=get_next_assessment_offered)
@utilities.arguments_not_none
def get_next_assessments_offered(self, n):
"""Gets the next set of ``AssessmentOffered`` elements in this list which must be less than or equal to the
number returned from ``available()``.
arg: n (cardinal): the number of ``AssessmentOffered``
elements requested which should be less than or equal to
``available()``
return: (osid.assessment.AssessmentOffered) - an array of
``AssessmentOffered`` elements.The length of the array
is less than or equal to the number specified.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resources
return self._get_next_n(n)
class AssessmentTaken(abc_assessment_objects.AssessmentTaken, osid_objects.OsidObject):
"""Represents a taken assessment or an assessment in progress."""
_record_type_data_sets = {}
_namespace = 'assessment.AssessmentTaken'
def __init__(self, osid_object_map, runtime=None):
osid_objects.OsidObject.__init__(self, osid_object_map, runtime)
self._record_type_data_sets = self._get_registry('ASSESSMENT_TAKEN_RECORD_TYPES')
self._records = dict()
self._load_records(osid_object_map['recordTypeIds'])
self._catalog_name = 'bank'
def get_assessment_offered_id(self):
"""Gets the ``Id`` of the ``AssessmentOffered``.
return: (osid.id.Id) - the assessment offered ``Id``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_objective_id
return Id(self._my_map['assessmentOfferedId'])
assessment_offered_id = property(fget=get_assessment_offered_id)
def get_assessment_offered(self):
"""Gets the ``AssessmentOffered``.
return: (osid.assessment.AssessmentOffered) - the assessment
offered
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_objective
mgr = self._get_provider_manager('ASSESSMENT')
if not mgr.supports_assessment_offered_lookup():
raise errors.OperationFailed('Assessment does not support AssessmentOffered lookup')
lookup_session = mgr.get_assessment_offered_lookup_session()
lookup_session.use_federated_bank_view()
return lookup_session.get_assessment_offered(self.get_assessment_offered_id())
assessment_offered = property(fget=get_assessment_offered)
def get_taker_id(self):
"""Gets the ``Id`` of the resource who took or is taking this assessment.
return: (osid.id.Id) - the resource ``Id``
*compliance: mandatory -- This method must be implemented.*
"""
if self._my_map['takerId']:
return Id(self._my_map['takerId'])
else:
return Id(self._my_map['takingAgentId'])
taker_id = property(fget=get_taker_id)
def get_taker(self):
"""Gets the ``Resource`` taking this assessment.
return: (osid.resource.Resource) - the resource
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
taker = property(fget=get_taker)
def get_taking_agent_id(self):
"""Gets the ``Id`` of the ``Agent`` who took or is taking the assessment.
return: (osid.id.Id) - the agent ``Id``
*compliance: mandatory -- This method must be implemented.*
"""
return Id(self._my_map['takingAgentId'])
taking_agent_id = property(fget=get_taking_agent_id)
def get_taking_agent(self):
"""Gets the ``Agent``.
return: (osid.authentication.Agent) - the agent
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
taking_agent = property(fget=get_taking_agent)
def has_started(self):
"""Tests if this assessment has begun.
return: (boolean) - ``true`` if the assessment has begun,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# This needs to be updated to only reflect actual start time??
if 'started' in self._my_map and self._my_map['started']:
return True
else:
my_assessment_offered = self.get_assessment_offered()
if my_assessment_offered.has_start_time():
self._my_map['started'] = DateTime.now() >= my_assessment_offered.get_start_time()
return self._my_map['started']
else:
self._my_map['started'] = True
return True
def get_actual_start_time(self):
"""Gets the time this assessment was started.
return: (osid.calendaring.DateTime) - the start time
raise: IllegalState - ``has_started()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
if not self.has_started():
raise errors.IllegalState('this assessment has not yet started')
if self._my_map['actualStartTime'] is None:
raise errors.IllegalState('this assessment has not yet been started by the taker')
else:
return self._my_map['actualStartTime']
actual_start_time = property(fget=get_actual_start_time)
def has_ended(self):
"""Tests if this assessment has ended.
return: (boolean) - ``true`` if the assessment has ended,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Perhaps this should just check for existance of self._my_map['completionTime']?
return bool('ended' in self._my_map and self._my_map['ended'])
def get_completion_time(self):
"""Gets the time of this assessment was completed.
return: (osid.calendaring.DateTime) - the end time
raise: IllegalState - ``has_ended()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
if not self.has_ended():
raise errors.IllegalState('this assessment has not yet ended')
if not self._my_map['completionTime']:
raise errors.OperationFailed('someone forgot to set the completion time')
return self._my_map['completionTime']
completion_time = property(fget=get_completion_time)
def get_time_spent(self):
"""Gets the total time spent taking this assessment.
return: (osid.calendaring.Duration) - the total time spent
*compliance: mandatory -- This method must be implemented.*
"""
if self.has_started() and self.has_ended():
return self.get_completion_time() - self.get_actual_start_time()
else:
raise errors.IllegalState()
time_spent = property(fget=get_time_spent)
def get_completion(self):
"""Gets a completion percentage of the assessment.
return: (cardinal) - the percent complete (0-100)
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.assessment.AssessmentTaken.get_completion_template
return int(self._my_map['completion'])
completion = property(fget=get_completion)
def is_scored(self):
"""Tests if a score is available for this assessment.
return: (boolean) - ``true`` if a score is available, ``false``
otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.is_group_template
return self._my_map['scored']
def get_score_system_id(self):
"""Gets a score system ``Id`` for the assessment.
return: (osid.id.Id) - the grade system
raise: IllegalState - ``is_score()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_template
if not self._my_map['scoreSystemId']:
raise errors.IllegalState('this AssessmentTaken has no score_system')
mgr = self._get_provider_manager('ID')
if not mgr.supports_id_lookup():
raise errors.OperationFailed('Id does not support Id lookup')
lookup_session = mgr.get_id_lookup_session()
lookup_session.use_federated_no_catalog_view()
osid_object = lookup_session.get_id(self.get_score_system_id())
return osid_object
score_system_id = property(fget=get_score_system_id)
def get_score_system(self):
"""Gets a grade system for the score.
return: (osid.grading.GradeSystem) - the grade system
raise: IllegalState - ``is_scored()`` is ``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_template
if not self._my_map['scoreSystemId']:
raise errors.IllegalState('this AssessmentTaken has no score_system')
mgr = self._get_provider_manager('GRADING')
if not mgr.supports_grade_system_lookup():
raise errors.OperationFailed('Grading does not support GradeSystem lookup')
lookup_session = mgr.get_grade_system_lookup_session()
lookup_session.use_federated_gradebook_view()
osid_object = lookup_session.get_grade_system(self.get_score_system_id())
return osid_object
score_system = property(fget=get_score_system)
def get_score(self):
"""Gets a score for the assessment.
return: (decimal) - the score
raise: IllegalState - ``is_scored()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.assessment.AssessmentTaken.get_score_template
return float(self._my_map['score'])
score = property(fget=get_score)
def is_graded(self):
"""Tests if a grade is available for this assessment.
return: (boolean) - ``true`` if a grade is available, ``false``
otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.is_group_template
return self._my_map['graded']
def get_grade_id(self):
"""Gets a grade ``Id`` for the assessment.
return: (osid.id.Id) - the grade
raise: IllegalState - ``is_graded()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_id_template
if not self._my_map['gradeId']:
raise errors.IllegalState('this AssessmentTaken has no grade')
else:
return Id(self._my_map['gradeId'])
grade_id = property(fget=get_grade_id)
def get_grade(self):
"""Gets a grade for the assessment.
return: (osid.grading.Grade) - the grade
raise: IllegalState - ``is_graded()`` is ``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_template
if not self._my_map['gradeId']:
raise errors.IllegalState('this AssessmentTaken has no grade')
mgr = self._get_provider_manager('GRADING')
if not mgr.supports_grade_lookup():
raise errors.OperationFailed('Grading does not support Grade lookup')
lookup_session = mgr.get_grade_lookup_session()
lookup_session.use_federated_gradebook_view()
osid_object = lookup_session.get_grade(self.get_grade_id())
return osid_object
grade = property(fget=get_grade)
def get_feedback(self):
"""Gets any overall comments available for this assessment by the grader.
return: (osid.locale.DisplayText) - comments
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.Asset.get_title_template
return DisplayText(self._my_map['feedback'])
feedback = property(fget=get_feedback)
def has_rubric(self):
"""Tests if a rubric assessment is associated with this assessment.
return: (boolean) - ``true`` if a rubric is available, ``false``
otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.has_avatar_template
return bool(self._my_map['rubricId'])
def get_rubric_id(self):
"""Gets the ``Id`` of the rubric.
return: (osid.id.Id) - an assessment taken ``Id``
raise: IllegalState - ``has_rubric()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_id_template
if not self._my_map['rubricId']:
raise errors.IllegalState('this AssessmentTaken has no rubric')
else:
return Id(self._my_map['rubricId'])
rubric_id = property(fget=get_rubric_id)
def get_rubric(self):
"""Gets the rubric.
return: (osid.assessment.AssessmentTaken) - the assessment taken
raise: IllegalState - ``has_rubric()`` is ``false``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_template
if not self._my_map['rubricId']:
raise errors.IllegalState('this AssessmentTaken has no rubric')
mgr = self._get_provider_manager('ASSESSMENT')
if not mgr.supports_assessment_taken_lookup():
raise errors.OperationFailed('Assessment does not support AssessmentTaken lookup')
lookup_session = mgr.get_assessment_taken_lookup_session()
lookup_session.use_federated_bank_view()
osid_object = lookup_session.get_assessment_taken(self.get_rubric_id())
return osid_object
rubric = property(fget=get_rubric)
@utilities.arguments_not_none
def get_assessment_taken_record(self, assessment_taken_record_type):
"""Gets the assessment taken record corresponding to the given ``AssessmentTaken`` record ``Type``.
This method is used to retrieve an object implementing the
requested record. The ``assessment_taken_record_type`` may be
the ``Type`` returned in ``get_record_types()`` or any of its
parents in a ``Type`` hierarchy where
``has_record_type(assessment_taken_record_type)`` is ``true`` .
arg: assessment_taken_record_type (osid.type.Type): an
assessment taken record type
return: (osid.assessment.records.AssessmentTakenRecord) - the
assessment taken record
raise: NullArgument - ``assessment_taken_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(assessment_taken_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
return self._get_record(assessment_taken_record_type)
def get_display_name(self):
# Overrides osid.objects.OsidObject.get_display_name to default to AssessmentOffered's
# display_name if none has been authored for this AssessmentTaken
from ..osid.objects import OsidObject
if OsidObject.get_display_name(self).get_text():
return OsidObject.get_display_name(self)
else:
return self.get_assessment_offered().get_display_name()
def get_description(self):
# Overrides osid.objects.OsidObject.get_description to default to AssessmentOffered's
# description if none has been authored for this AssessmentTaken
from ..osid.objects import OsidObject
if OsidObject.get_description(self).get_text():
return OsidObject.get_description(self)
else:
return self.get_assessment_offered().get_description()
def get_object_map(self):
obj_map = dict(self._my_map)
if obj_map['actualStartTime'] is not None:
actual_start_time = obj_map['actualStartTime']
obj_map['actualStartTime'] = dict()
obj_map['actualStartTime']['year'] = actual_start_time.year
obj_map['actualStartTime']['month'] = actual_start_time.month
obj_map['actualStartTime']['day'] = actual_start_time.day
obj_map['actualStartTime']['hour'] = actual_start_time.hour
obj_map['actualStartTime']['minute'] = actual_start_time.minute
obj_map['actualStartTime']['second'] = actual_start_time.second
obj_map['actualStartTime']['microsecond'] = actual_start_time.microsecond
if obj_map['completionTime'] is not None:
completion_time = obj_map['completionTime']
obj_map['completionTime'] = dict()
obj_map['completionTime']['year'] = completion_time.year
obj_map['completionTime']['month'] = completion_time.month
obj_map['completionTime']['day'] = completion_time.day
obj_map['completionTime']['hour'] = completion_time.hour
obj_map['completionTime']['minute'] = completion_time.minute
obj_map['completionTime']['second'] = completion_time.second
obj_map['completionTime']['microsecond'] = completion_time.microsecond
if 'itemIds' in obj_map:
del obj_map['itemIds']
if 'responses' in obj_map:
del obj_map['responses']
obj_map = osid_objects.OsidObject.get_object_map(self, obj_map)
if obj_map['displayName']['text'] == '':
obj_map['displayName']['text'] = self.get_display_name().get_text()
if obj_map['description']['text'] == '':
obj_map['description']['text'] = self.get_description().get_text()
return obj_map
object_map = property(fget=get_object_map)
class AssessmentTakenForm(abc_assessment_objects.AssessmentTakenForm, osid_objects.OsidObjectForm):
"""This is the form for creating and updating an ``AssessmentTaken``.
Like all ``OsidForm`` objects, various data elements may be set here
for use in the create and update methods in the
``AssessmentTakenAdminSession``. For each data element that may be
set, metadata may be examined to provide display hints or data
constraints.
"""
_record_type_data_sets = {}
_namespace = 'assessment.AssessmentTaken'
def __init__(self, osid_object_map=None, record_types=None, runtime=None, **kwargs):
osid_objects.OsidForm.__init__(self, runtime=runtime)
self._record_type_data_sets = self._get_registry('ASSESSMENT_TAKEN_RECORD_TYPES')
self._kwargs = kwargs
if 'catalog_id' in kwargs:
self._catalog_id = kwargs['catalog_id']
self._init_metadata(**kwargs)
self._records = dict()
self._supported_record_type_ids = []
if osid_object_map is not None:
self._for_update = True
self._my_map = osid_object_map
self._load_records(osid_object_map['recordTypeIds'])
else:
self._my_map = {}
self._for_update = False
self._init_map(**kwargs)
if record_types is not None:
self._init_records(record_types)
self._supported_record_type_ids = self._my_map['recordTypeIds']
def _init_metadata(self, **kwargs):
osid_objects.OsidObjectForm._init_metadata(self, **kwargs)
self._taker_metadata = {
'element_id': Id(
self._authority,
self._namespace,
'taker')}
self._taker_metadata.update(mdata_conf.ASSESSMENT_TAKEN_TAKER)
self._taker_default = self._taker_metadata['default_id_values'][0]
def _init_map(self, **kwargs):
osid_objects.OsidObjectForm._init_map(self)
self._my_map['assessmentOfferedId'] = str(kwargs['assessment_offered_id'])
self._my_map['takerId'] = self._taker_default
self._my_map['assignedBankIds'] = [str(kwargs['bank_id'])]
self._my_map['actualStartTime'] = None
self._my_map['gradeId'] = ''
self._my_map['completionTime'] = None
self._my_map['score'] = ''
def get_taker_metadata(self):
"""Gets the metadata for a resource to manually set which resource will be taking the assessment.
return: (osid.Metadata) - metadata for the resource
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._taker_metadata)
metadata.update({'existing_taker_values': self._my_map['takerId']})
return Metadata(**metadata)
taker_metadata = property(fget=get_taker_metadata)
@utilities.arguments_not_none
def set_taker(self, resource_id):
"""Sets the resource who will be taking this assessment.
arg: resource_id (osid.id.Id): the resource Id
raise: InvalidArgument - ``resource_id`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_avatar_template
if self.get_taker_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_id(resource_id):
raise errors.InvalidArgument()
self._my_map['takerId'] = str(resource_id)
def clear_taker(self):
"""Clears the resource.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.clear_avatar_template
if (self.get_taker_metadata().is_read_only() or
self.get_taker_metadata().is_required()):
raise errors.NoAccess()
self._my_map['takerId'] = self._taker_default
taker = property(fset=set_taker, fdel=clear_taker)
@utilities.arguments_not_none
def get_assessment_taken_form_record(self, assessment_taken_record_type):
"""Gets the ``AssessmentTakenFormRecord`` corresponding to the given assessment taken record ``Type``.
arg: assessment_taken_record_type (osid.type.Type): the
assessment taken record type
return: (osid.assessment.records.AssessmentTakenFormRecord) -
the assessment taken record
raise: NullArgument - ``assessment_taken_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(assessment_taken_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
return self._get_record(assessment_taken_record_type)
class AssessmentTakenList(abc_assessment_objects.AssessmentTakenList, osid_objects.OsidList):
"""Like all ``OsidLists,`` ``AssessmentTakenList`` provides a means for accessing ``AssessmentTaken`` elements
sequentially either one at a time or many at a time.
Examples: while (atl.hasNext()) { AssessmentTaken assessment =
atl.getNextAssessmentTaken();
or
while (atl.hasNext()) {
AssessmentTaken[] assessments = atl.hetNextAssessmentsTaken(atl.available());
}
"""
def get_next_assessment_taken(self):
"""Gets the next ``AssessmentTaken`` in this list.
return: (osid.assessment.AssessmentTaken) - the next
``AssessmentTaken`` in this list. The ``has_next()``
method should be used to test that a next
``AssessmentTaken`` is available before calling this
method.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resource
return self.next()
def next(self):
return self._get_next_object(AssessmentTaken)
next_assessment_taken = property(fget=get_next_assessment_taken)
@utilities.arguments_not_none
def get_next_assessments_taken(self, n):
"""Gets the next set of ``AssessmentTaken`` elements in this list which must be less than or equal to the number
returned from ``available()``.
arg: n (cardinal): the number of ``AssessmentTaken`` elements
requested which should be less than or equal to
``available()``
return: (osid.assessment.AssessmentTaken) - an array of
``AssessmentTaken`` elements.The length of the array is
less than or equal to the number specified.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resources
return self._get_next_n(n)
class AssessmentSection(abc_assessment_objects.AssessmentSection, osid_objects.OsidObject):
"""Represents an assessment section.
An assessment section represents a cluster of questions used to
organize the execution of an assessment. The section is the student
aspect of an assessment part.
"""
_record_type_data_sets = {}
_namespace = 'assessment.AssessmentSection'
def __init__(self, osid_object_map, runtime=None):
osid_objects.OsidObject.__init__(self, osid_object_map, runtime)
self._record_type_data_sets = self._get_registry('ASSESSMENT_SECTION_RECORD_TYPES')
self._records = dict()
self._load_records(osid_object_map['recordTypeIds'])
self._catalog_name = 'bank'
def get_assessment_taken_id(self):
"""Gets the ``Id`` of the ``AssessmentTaken``.
return: (osid.id.Id) - the assessment taken ``Id``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_objective_id
return Id(self._my_map['assessmentTakenId'])
assessment_taken_id = property(fget=get_assessment_taken_id)
def get_assessment_taken(self):
"""Gets the ``AssessmentTakeb``.
return: (osid.assessment.AssessmentTaken) - the assessment taken
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.learning.Activity.get_objective
mgr = self._get_provider_manager('ASSESSMENT')
if not mgr.supports_assessment_taken_lookup():
raise errors.OperationFailed('Assessment does not support AssessmentTaken lookup')
lookup_session = mgr.get_assessment_taken_lookup_session()
lookup_session.use_federated_bank_view()
return lookup_session.get_assessment_taken(self.get_assessment_taken_id())
assessment_taken = property(fget=get_assessment_taken)
def has_allocated_time(self):
"""Tests if this section must be completed within an allocated time.
return: (boolean) - ``true`` if this section has an allocated
time, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return self.get_assessment_taken().get_assessment_offered().has_duration()
def get_allocated_time(self):
"""Gets the allocated time for this section.
return: (osid.calendaring.Duration) - allocated time
raise: IllegalState - ``has_allocated_time()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return self.get_assessment_taken().get_assessment_offered().get_duration()
allocated_time = property(fget=get_allocated_time)
def are_items_sequential(self):
"""Tests if the items or parts in this section are taken sequentially.
return: (boolean) - ``true`` if the items are taken
sequentially, ``false`` if the items can be skipped and
revisited
*compliance: mandatory -- This method must be implemented.*
"""
return self.get_assessment_taken().get_assessment_offered().are_items_sequential()
def are_items_shuffled(self):
"""Tests if the items or parts appear in a random order.
return: (boolean) - ``true`` if the items appear in a random
order, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return self.get_assessment_taken().get_assessment_offered().are_items_shuffled()
@utilities.arguments_not_none
def get_assessment_section_record(self, assessment_section_record_type):
"""Gets the assessment section record corresponding to the given ``AssessmentSection`` record ``Type``.
This method is used to retrieve an object implementing the
requested record. The ``assessment_section_record_type`` may be
the ``Type`` returned in ``get_record_types()`` or any of its
parents in a ``Type`` hierarchy where
``has_record_type(assessment_section_record_type)`` is ``true``
.
arg: assessment_section_record_type (osid.type.Type): an
assessment section record type
return: (osid.assessment.records.AssessmentSectionRecord) - the
assessment section record
raise: NullArgument - ``assessment_section_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(assessment_section_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
return self._get_record(assessment_section_record_type)
class AssessmentSectionList(abc_assessment_objects.AssessmentSectionList, osid_objects.OsidList):
"""Like all ``OsidLists,`` ``AssessmentSectionList`` provides a means for accessing ``AssessmentSection`` elements
sequentially either one at a time or many at a time.
Examples: while (asl.hasNext()) { AssessmentSection section =
asl.getNextAssessmentSection();
or
while (asl.hasNext()) {
AssessmentSection[] sections = asl.hetNextAssessmentSections(asl.available());
}
"""
def get_next_assessment_section(self):
"""Gets the next ``AssessmentSection`` in this list.
return: (osid.assessment.AssessmentSection) - the next
``AssessmentSection`` in this list. The ``has_next()``
method should be used to test that a next
``AssessmentSection`` is available before calling this
method.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resource
return self.next()
def next(self):
return self._get_next_object(AssessmentSection)
next_assessment_section = property(fget=get_next_assessment_section)
@utilities.arguments_not_none
def get_next_assessment_sections(self, n):
"""Gets the next set of ``AssessmentSection`` elements in this list which must be less than or equal to the
number returned from ``available()``.
arg: n (cardinal): the number of ``AssessmentSection``
elements requested which should be less than or equal to
``available()``
return: (osid.assessment.AssessmentSection) - an array of
``AssessmentSection`` elements.The length of the array
is less than or equal to the number specified.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resources
return self._get_next_n(n)
class Bank(abc_assessment_objects.Bank, osid_objects.OsidCatalog):
"""A bank defines a collection of assessments and items."""
_record_type_data_sets = {}
_namespace = 'assessment.Bank'
def __init__(self, osid_catalog_map, runtime=None):
osid_objects.OsidCatalog.__init__(self, osid_catalog_map, runtime)
self._record_type_data_sets = self._get_registry('BANK_RECORD_TYPES')
self._records = dict()
# This check is here for transition purposes:
try:
self._load_records(osid_catalog_map['recordTypeIds'])
except KeyError:
print 'KeyError: recordTypeIds key not found in ', self._my_map['displayName']['text']
self._load_records([]) # In place for transition purposes
@utilities.arguments_not_none
def get_bank_record(self, bank_record_type):
"""Gets the bank record corresponding to the given ``Bank`` record ``Type``.
This method is used to retrieve an object implementing the
requested record. The ``bank_record_type`` may be the ``Type``
returned in ``get_record_types()`` or any of its parents in a
``Type`` hierarchy where ``has_record_type(bank_record_type)``
is ``true`` .
arg: bank_record_type (osid.type.Type): a bank record type
return: (osid.assessment.records.BankRecord) - the bank record
raise: NullArgument - ``bank_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported - ``has_record_type(bank_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class BankForm(abc_assessment_objects.BankForm, osid_objects.OsidCatalogForm):
"""This is the form for creating and updating banks.
Like all ``OsidForm`` objects, various data elements may be set here
for use in the create and update methods in the
``BankAdminSession``. For each data element that may be set,
metadata may be examined to provide display hints or data
constraints.
"""
_record_type_data_sets = {}
_namespace = 'assessment.Bank'
def __init__(self, osid_catalog_map=None, record_types=None, runtime=None, **kwargs):
osid_objects.OsidForm.__init__(self, runtime=runtime)
self._record_type_data_sets = self._get_registry('BANK_RECORD_TYPES')
self._kwargs = kwargs
self._init_metadata(**kwargs)
self._records = dict()
if osid_catalog_map is not None:
self._for_update = True
self._my_map = osid_catalog_map
self._load_records(osid_catalog_map['recordTypeIds'])
else:
self._my_map = {}
self._for_update = False
self._init_map(**kwargs)
if record_types is not None:
self._init_records(record_types)
self._supported_record_type_ids = self._my_map['recordTypeIds']
def _init_metadata(self, **kwargs):
osid_objects.OsidObjectForm._init_metadata(self)
osid_objects.OsidSourceableForm._init_metadata(self)
def _init_map(self, **kwargs):
osid_objects.OsidObjectForm._init_map(self)
osid_objects.OsidSourceableForm._init_map(self, **kwargs)
@utilities.arguments_not_none
def get_bank_form_record(self, bank_record_type):
"""Gets the ``BankFormRecord`` corresponding to the given bank record ``Type``.
arg: bank_record_type (osid.type.Type): a bank record type
return: (osid.assessment.records.BankFormRecord) - the bank
record
raise: NullArgument - ``bank_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported - ``has_record_type(bank_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class BankList(abc_assessment_objects.BankList, osid_objects.OsidList):
"""Like all ``OsidLists,`` ``BankList`` provides a means for accessing ``Bank`` elements sequentially either one at
a time
or many at a time.
Examples: while (bl.hasNext()) { Bank bank = bl.getNextBank(); }
or
while (bl.hasNext()) {
Bank[] banks = bl.getNextBanks(bl.available());
}
"""
def get_next_bank(self):
"""Gets the next ``Bank`` in this list.
return: (osid.assessment.Bank) - the next ``Bank`` in this list.
The ``has_next()`` method should be used to test that a
next ``Bank`` is available before calling this method.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resource
return self.next()
def next(self):
return self._get_next_object(Bank)
next_bank = property(fget=get_next_bank)
@utilities.arguments_not_none
def get_next_banks(self, n):
"""Gets the next set of ``Bank`` elements in this list which must be less than or equal to the return from
``available()``.
arg: n (cardinal): the number of ``Bank`` elements requested
which must be less than or equal to ``available()``
return: (osid.assessment.Bank) - an array of ``Bank``
elements.The length of the array is less than or equal
to the number specified.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resources
return self._get_next_n(n)
class BankNode(abc_assessment_objects.BankNode, osid_objects.OsidNode):
"""This interface is a container for a partial hierarchy retrieval.
The number of hierarchy levels traversable through this interface
depend on the number of levels requested in the
``BankHierarchySession``.
"""
def __init__(self, node_map, runtime=None, proxy=None, lookup_session=None):
osid_objects.OsidNode.__init__(self, node_map)
self._lookup_session = lookup_session
self._runtime = runtime
self._proxy = proxy
def get_object_node_map(self):
node_map = dict(self.get_bank().get_object_map())
node_map['type'] = 'BankNode'
node_map['parentNodes'] = []
node_map['childNodes'] = []
for bank_node in self.get_parent_bank_nodes():
node_map['parentNodes'].append(bank_node.get_object_node_map())
for bank_node in self.get_child_bank_nodes():
node_map['childNodes'].append(bank_node.get_object_node_map())
return node_map
def get_bank(self):
"""Gets the ``Bank`` at this node.
return: (osid.assessment.Bank) - the bank represented by this
node
*compliance: mandatory -- This method must be implemented.*
"""
if self._lookup_session is None:
mgr = get_provider_manager('ASSESSMENT', runtime=self._runtime, proxy=self._proxy)
self._lookup_session = mgr.get_bank_lookup_session()
return self._lookup_session.get_bank(Id(self._my_map['id']))
bank = property(fget=get_bank)
def get_parent_bank_nodes(self):
"""Gets the parents of this bank.
return: (osid.assessment.BankNodeList) - the parents of this
node
*compliance: mandatory -- This method must be implemented.*
"""
parent_bank_nodes = []
for node in self._my_map['parentNodes']:
parent_bank_nodes.append(BankNode(
node._my_map,
runtime=self._runtime,
proxy=self._proxy,
lookup_session=self._lookup_session))
return BankNodeList(parent_bank_nodes)
parent_bank_nodes = property(fget=get_parent_bank_nodes)
def get_child_bank_nodes(self):
"""Gets the children of this bank.
return: (osid.assessment.BankNodeList) - the children of this
node
*compliance: mandatory -- This method must be implemented.*
"""
parent_bank_nodes = []
for node in self._my_map['childNodes']:
parent_bank_nodes.append(BankNode(
node._my_map,
runtime=self._runtime,
proxy=self._proxy,
lookup_session=self._lookup_session))
return BankNodeList(parent_bank_nodes)
child_bank_nodes = property(fget=get_child_bank_nodes)
class BankNodeList(abc_assessment_objects.BankNodeList, osid_objects.OsidList):
"""Like all ``OsidLists,`` ``BankNodeList`` provides a means for accessing ``BankNode`` elements sequentially
either one
at a time or many at a time.
Examples: while (bnl.hasNext()) { BankNode node =
bnl.getNextBankNode(); }
or
while (bnl.hasNext()) {
BankNode[] nodes = bnl.getNextBankNodes(bnl.available());
}
"""
def get_next_bank_node(self):
"""Gets the next ``BankNode`` in this list.
return: (osid.assessment.BankNode) - the next ``BankNode`` in
this list. The ``has_next()`` method should be used to
test that a next ``BankNode`` is available before
calling this method.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resource
return self.next()
def next(self):
return self._get_next_object(BankNode)
next_bank_node = property(fget=get_next_bank_node)
@utilities.arguments_not_none
def get_next_bank_nodes(self, n):
"""Gets the next set of ``BankNode`` elements in this list which must be less than or equal to the return from
``available()``.
arg: n (cardinal): the number of ``BankNode`` elements
requested which must be less than or equal to
``available()``
return: (osid.assessment.BankNode) - an array of ``BanklNode``
elements.The length of the array is less than or equal
to the number specified.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resources
return self._get_next_n(n)
class ResponseList(abc_assessment_objects.ResponseList, osid_objects.OsidList):
"""Like all ``OsidLists,`` ``ResponseList`` provides a means for accessing ``Response`` elements sequentially
either one
at a time or many at a time.
Examples: while (rl.hasNext()) { Response response =
rl.getNextResponse(); }
or
while (rl.hasNext()) {
Response[] responses = rl.getNextResponses(rl.available());
}
"""
def get_next_response(self):
"""Gets the next ``Response`` in this list.
return: (osid.assessment.Response) - the next ``Response`` in
this list. The ``has_next()`` method should be used to
test that a next ``Response`` is available before
calling this method.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resource
return self.next()
def next(self):
return self._get_next_object(Response)
next_response = property(fget=get_next_response)
@utilities.arguments_not_none
def get_next_responses(self, n):
"""Gets the next set of ``Response`` elements in this list which must be less than or equal to the return from
``available()``.
arg: n (cardinal): the number of ``Response`` elements
requested which must be less than or equal to
``available()``
return: (osid.assessment.Response) - an array of ``Response``
elements.The length of the array is less than or equal
to the number specified.
raise: IllegalState - no more elements available in this list
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceList.get_next_resources
return self._get_next_n(n)
|
birdland/dlkit-doc
|
dlkit/mongo/assessment/objects.py
|
Python
|
mit
| 131,041
|
from FlightInfo import FlightInfo
from Interval import *
from Gate import Gate
from Queue import *
from random import *
def assign(intervals, num_gates=0, start_time=scheduled_start_time, end_time=scheduled_end_time):
gates = [Gate() for i in xrange(0, num_gates)]
# initialise priority queue
pq = PriorityQueue()
for gate in gates:
pq.put((-9999, gate))
sorted_intervals = sorted(intervals, key=start_time)
while sorted_intervals:
interval = sorted_intervals.pop(0);
if not pq.empty():
end_time_gate = pq.get()
earliest_end_time = end_time_gate[0]
gate = end_time_gate[1]
if earliest_end_time <= start_time(interval):
gate.append(interval)
pq.put((end_time(interval), gate))
continue
pq.put(end_time_gate)
pq.put((end_time(interval), Gate(interval)))
gates = []
while not pq.empty():
gates.append(pq.get()[1])
return gates
# delays flights with probability p by a number in [min_delay, max_delay] in minutes
def delay_flight_infos(flight_infos, p, min_delay, max_delay):
for flight_info in flight_infos:
if random() < p:
dx = randint(min_delay, max_delay)
flight_info.add_delay(dx)
else:
flight_info.add_delay(0)
# reassigns intervals in gates if there are collisions
# returns [gates, overflow gates, # of reassignments]
def reassign(gates, intervals):
def get_slack(gate, index, interval):
start = gate[index+1].scheduled_start_time if index < len(gate)-1 else 1439
return start - interval.delayed_end_time
interval_to_gate = {}
for gate in gates:
for i, interval in enumerate(gate):
interval_to_gate[interval] = gate
reassign_count = 0
overflow_gates = []
sorted_intervals = sorted(intervals, key=delayed_start_time)
while sorted_intervals:
interval = sorted_intervals.pop(0)
if interval.delayed():
gate = interval_to_gate[interval]
index = gate.index(interval)
# check for collisions, maybe no need to reassign
collision = ((index > 0 and not gate[index-1].delayed_end_time <= interval.delayed_start_time)
or
(index+1 < len(gate) and not interval.delayed_end_time <= gate[index+1].scheduled_start_time))
if not collision:
continue
# find gate with most slack to reassign to
gate.remove(interval)
most_slack_gate = None
most_slack_index = None
most_slack = None
for gate2 in gates + overflow_gates:
index = gate2.free_index(interval)
if index < 0: # no free slots
continue
slack = get_slack(gate2, index, interval)
if most_slack is None or most_slack < slack:
most_slack_gate = gate2
most_slack_index = index
most_slack = slack
if most_slack is None: # no gates are free
overflow_gates.append(Gate(interval))
else:
most_slack_gate.insert(most_slack_index, interval)
reassign_count += 1
return [gates, overflow_gates, reassign_count]
|
sozos/RAS
|
source/schedule.py
|
Python
|
mit
| 2,831
|