code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
from gravity.tae.match import lev_distance
from gravity.tae import distance
from gravity.tae.match.c_lev_distance import fLevDistanceDiag
from gravity.tae.match.c_lev_distance import fLevPath
s1 = "Test Den Haag entity"
s2 = "Test Den Haag entity"
m1 = fLevDistanceDiag(2).matrix(s1, s2, 111)
print m1.toString(s1, s2)
m2 = lev_distance.fLevDistanceDiag(2).matrix(s1, s2, 111)
print m2.toString(s1, s2)
print m1 == m2
p1_1 = fLevPath()(m1)
p1_2 = fLevPath()(m2)
print p1_1
print p1_2
p2_1 = lev_distance.fLevPath()(m1)
p2_2 = lev_distance.fLevPath()(m2)
print p2_1
print p2_2
print m2.toString(s1, s2, p1_2)
assert m1 == m2
assert p1_1 == p1_2 and p1_2 == p2_1 and p2_1 == p2_2
## 01234567890123456
s1 = "Test amsterdamm entity"
s2 = "Test Amsterdam entity"
print "==="*40
m = fLevDistanceDiag(24).matrix(s2, s1, 222)
print m.toString(s2, s1, fLevPath()(m))
| vfulco/scalpel | samples/clev_sample_2.py | Python | lgpl-3.0 | 885 |
from __future__ import print_function
import json
import os
import numpy as np
import sys
import h5py
from gensim.models import Word2Vec
from gensim.utils import simple_preprocess
from keras.engine import Input
from keras.layers import Embedding, merge
from keras.models import Model
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.preprocessing import sequence
from embeddings import Embeddings
from keras.callbacks import ModelCheckpoint
from nltk.tokenize import word_tokenize
import random
# ## Instantiate Embeddings
embeddings = Embeddings(100, 4, 1, 4)
# ### getting data from preprocessing
word2vec_weights = embeddings.get_weights()
word2index, index2word = embeddings.get_vocabulary()
word2vec_model = embeddings.get_model()
tokenized_indexed_sentences = embeddings.get_tokenized_indexed_sentences()
# ### generating training data
window_size = 5
vocab_size = len(word2index)
print(vocab_size)
seq_in = []
seq_out = []
# generating dataset
for sentence in tokenized_indexed_sentences:
for i in range(len(sentence)-window_size-1):
x = sentence[i:i + window_size]
y = sentence[i + window_size]
seq_in.append(x)#[]
seq_out.append(word2vec_weights[y])
# converting seq_in and seq_out into numpy array
seq_in = np.array(seq_in)
seq_out = np.array(seq_out)
n_samples = len(seq_in)
print ("Number of samples : ", n_samples)
seq_in.shape
# ## Defining model
# Changes to the model to be done here
model = Sequential()
model.add(Embedding(input_dim = word2vec_weights.shape[0], output_dim = word2vec_weights.shape[1], weights = [word2vec_weights]))
model.add(LSTM(1024,return_sequences = True))
model.add(Dropout(0.2))
model.add(LSTM(512))
#model.add(Dropout(0.2))
model.add(Dense(word2vec_weights.shape[1], activation = 'relu'))
model.load_weights("../weights/lstm-2-1024-512-batchsize-128-epochs-25/weights.24-0.22.hdf5")
model.compile(loss = 'mse', optimizer = 'adam',metrics = ['accuracy'])
model.summary()
model_weights_path = "../weights/lstm-2-1024-512-batchsize-128-epochs-25"
if not os.path.exists(model_weights_path):
os.makedirs(model_weights_path)
checkpoint_path = model_weights_path + '/weights.{epoch:02d}-{val_acc:.2f}.hdf5'
checkpoint = ModelCheckpoint(filepath = checkpoint_path, monitor = 'val_acc', verbose = 1, save_best_only = False, mode = 'max')
# ## Train Model
#model_fit_summary = model.fit(seq_in, seq_out, epochs=25, verbose=1, validation_split=0.2, batch_size=128, callbacks=[checkpoint])
# ### model predict
np.array(pattern[0])
list(sent[0])
list(seq_in)
start = 0
sentence_test = "In which regions in particular did"
indexed_sentences = embeddings.tokenize_index_sentence(sentence_test)
print("indexed_sentences ",indexed_sentences)
sent = np.array(indexed_sentences)
#pattern = list(seq_in[start])
pattern = list(sent[start])
print("\"",' '.join(index2word[index] for index in pattern))
for i in range(5):
prediction = model.predict(np.array([pattern]))
pred_word = word2vec_model.similar_by_vector(prediction[0])[0][0]
sys.stdout.write(pred_word+" ")
pattern.append(word2index[pred_word])
pattern = pattern[:len(pattern)]
e_model = embeddings.get_model()
e_model.similar_by_word("profitabl")
# ## Accuracy
def accuracy():
count = 0
correct = 0
for sub_sample_in, sub_sample_out in zip(seq_in, seq_out):
ypred = model.predict_on_batch(np.expand_dims(sub_sample_in, axis = 0))[0]
ytrue = sub_sample_out
pred_word = word2vec_model.similar_by_vector(ypred)[0][0]
true_word = word2vec_model.similar_by_vector(ytrue)[0][0]
similarity = word2vec_model.similarity(pred_word, true_word)
if similarity == 1:
correct += 1
count += 1
print("Accuracy {0}".format(correct/count))
#seq_out[0]
accuracy()
model_results = model_fit_summary.history
model_results.update(model_fit_summary.params)
model_results["train_accuracy"] = accuracy()
# n = no. of predictions
# accuracy = accuracy(400)
#print(model_results)
text_file_path = "../weights/lstm-2-1024-512-batchsize-128-epochs-25/model_results.json"
with open(text_file_path, "w") as f:
json.dump(model_results, f)
| nishant-jain-94/Autofill | src/lstm-2-1024-512-batchsize-128-epochs-25-acc%3D1.py | Python | gpl-3.0 | 4,277 |
from django.contrib import admin
from sorl.thumbnail.admin import AdminImageMixin
from events.models import Event
class EventAdmin(AdminImageMixin, admin.ModelAdmin):
# use objects instead of the default manager
def queryset(self, request):
# use our manager, rather than the default one
qs = self.model.objects.get_query_set()
# we need this from the superclass method
# provide an altenrative so we don't try to get *None
ordering = self.ordering or ()
if ordering:
qs = qs.order_by(*ordering)
return qs
list_display = ('__unicode__', 'start_date', 'end_date',)
list_filter = ('start_date', 'end_date',)
prepopulated_fields = {'slug': ['name']}
search_fields = ['name', 'start_date', 'end_date', 'description']
admin.site.register(Event, EventAdmin)
| jbergantine/django-events | events/admin.py | Python | mit | 852 |
# -*- coding: utf-8 -*-
"""
Default Controllers
"""
module = "default"
# -----------------------------------------------------------------------------
def call():
"Call an XMLRPC, JSONRPC or RSS service"
# If webservices don't use sessions, avoid cluttering up the storage
#session.forget()
return service()
# -----------------------------------------------------------------------------
def download():
""" Download a file """
# Load the Model
tablename = request.args[0].split(".", 1)[0]
s3mgr.load(tablename)
return response.download(request, db)
# =============================================================================
def register_validation(form):
""" Validate the fields in registration form """
# Mobile Phone
if "mobile" in form.vars and form.vars.mobile:
regex = re.compile(single_phone_number_pattern)
if not regex.match(form.vars.mobile):
form.errors.mobile = T("Invalid phone number")
elif deployment_settings.get_auth_registration_mobile_phone_mandatory():
form.errors.mobile = T("Phone number is required")
org = deployment_settings.get_auth_registration_organisation_id_default()
if org:
# Add to default organisation
form.vars.organisation_id = org
return
# -----------------------------------------------------------------------------
def register_onaccept(form):
""" Tasks to be performed after a new user registers """
# Add newly-registered users to Person Registry, add 'Authenticated' role
# If Organisation is provided, then: add HRM record & add to 'Org_X_Access' role
person_id = auth.s3_register(form)
if form.vars.organisation_id and not deployment_settings.get_hrm_show_staff():
# Convert HRM record to a volunteer
htable = s3db.hrm_human_resource
query = (htable.person_id == person_id)
db(query).update(type=2)
# Add to required roles:
roles = deployment_settings.get_auth_registration_roles()
if roles or deployment_settings.has_module("delphi"):
utable = auth.settings.table_user
ptable = s3db.pr_person
ltable = s3db.pr_person_user
query = (ptable.id == person_id) & \
(ptable.pe_id == ltable.pe_id) & \
(ltable.user_id == utable.id)
user = db(query).select(utable.id,
ltable.user_id,
limitby=(0, 1)).first()
if roles:
gtable = auth.settings.table_group
mtable = auth.settings.table_membership
query = (gtable.uuid.belongs(roles))
rows = db(query).select(gtable.id)
for role in rows:
mtable.insert(user_id=user[ltable._tablename].user_id,
group_id=role.id)
if deployment_settings.has_module("delphi"):
# Add user as a participant of the default problem group
table = s3db.delphi_group
query = (table.uuid == "DEFAULT")
group = db(query).select(table.id,
limitby=(0, 1)).first()
if group:
table = s3db.delphi_membership
table.insert(group_id=group.id,
user_id=user[utable._tablename].id,
status=3)
# -----------------------------------------------------------------------------
auth.settings.register_onvalidation = register_validation
auth.settings.register_onaccept = register_onaccept
_table_user = auth.settings.table_user
_table_user.first_name.label = T("First Name")
_table_user.first_name.comment = SPAN("*", _class="req")
_table_user.last_name.label = T("Last Name")
if deployment_settings.get_L10n_mandatory_lastname():
_table_user.last_name.comment = SPAN("*", _class="req")
_table_user.email.label = T("E-mail")
_table_user.email.comment = SPAN("*", _class="req")
_table_user.password.comment = SPAN("*", _class="req")
_table_user.language.label = T("Language")
_table_user.language.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Language"),
T("The language you wish the site to be displayed in.")))
_table_user.language.represent = lambda opt: s3_languages.get(opt, UNKNOWN_OPT)
# Organisation widget for use in Registration Screen
# NB User Profile is only editable by Admin - using User Management
organisation_represent = s3db.org_organisation_represent
org_widget = IS_ONE_OF(db, "org_organisation.id",
organisation_represent,
orderby="org_organisation.name",
sort=True)
if deployment_settings.get_auth_registration_organisation_mandatory():
_table_user.organisation_id.requires = org_widget
else:
_table_user.organisation_id.requires = IS_NULL_OR(org_widget)
# For the User Profile:
_table_user.utc_offset.comment = DIV(_class="tooltip",
_title="%s|%s" % (auth.messages.label_utc_offset,
auth.messages.help_utc_offset))
_table_user.organisation_id.represent = organisation_represent
_table_user.organisation_id.comment = DIV(_class="tooltip",
_title="%s|%s|%s" % (T("Organization"),
T("The default Organization for whom you are acting."),
T("This setting can only be controlled by the Administrator.")))
org_site_represent = s3db.org_site_represent
_table_user.site_id.represent = org_site_represent
_table_user.site_id.comment = DIV(_class="tooltip",
_title="%s|%s|%s" % (T("Facility"),
T("The default Facility for which you are acting."),
T("This setting can only be controlled by the Administrator.")))
# =============================================================================
def index():
""" Main Home Page """
title = deployment_settings.get_system_name()
response.title = title
item = ""
if deployment_settings.has_module("cms"):
table = s3db.cms_post
item = db(table.module == module).select(table.body,
limitby=(0, 1)).first()
if item:
item = DIV(XML(item.body))
else:
item = ""
if deployment_settings.has_module("cr"):
s3mgr.load("cr_shelter")
SHELTERS = s3.crud_strings["cr_shelter"].subtitle_list
else:
SHELTERS = ""
# Menu Boxes
menu_btns = [#div, label, app, function
["facility", SHELTERS, "cr", "shelter"],
["facility", T("Warehouses"), "inv", "warehouse"],
["facility", T("Hospitals"), "hms", "hospital"],
["facility", T("Offices"), "org", "office"],
["sit", T("Incidents"), "irs", "ireport"],
["sit", T("Assessments"), "survey", "series"],
["sit", T("Assets"), "asset", "asset"],
["sit", T("Inventory Items"), "inv", "inv_item"],
#["dec", T("Gap Map"), "project", "gap_map"],
#["dec", T("Gap Report"), "project", "gap_report"],
["dec", T("Requests"), "req", "req"],
["res", T("Projects"), "project", "project"],
["res", T("Activities"), "project", "activity"],
["res", T("Commitments"), "req", "commit"],
["res", T("Sent Shipments"), "inv", "send"],
["res", T("Received Shipments"), "inv", "recv"]
]
# Change to (Mitigation)/Preparedness/Response/Recovery?
menu_divs = {"facility": DIV( H3(T("Facilities")),
_id = "facility_box", _class = "menu_box"),
"sit": DIV( H3(T("Situation")),
_id = "menu_div_sit", _class = "menu_div"),
"dec": DIV( H3(T("Decision")),
_id = "menu_div_dec", _class = "menu_div"),
"res": DIV( H3(T("Response")),
_id = "menu_div_res", _class = "menu_div"),
}
for div, label, app, function in menu_btns:
if deployment_settings.has_module(app):
# @ToDo: Also check permissions (e.g. for anonymous users)
menu_divs[div].append(A( DIV(label,
_class = "menu-btn-r"),
_class = "menu-btn-l",
_href = URL(app,function)
)
)
div_arrow = DIV(IMG(_src = "/%s/static/img/arrow_blue_right.png" % \
request.application),
_class = "div_arrow")
sit_dec_res_box = DIV(menu_divs["sit"],
div_arrow,
menu_divs["dec"],
div_arrow,
menu_divs["res"],
_id = "sit_dec_res_box",
_class = "menu_box fleft swidth"
#div_additional,
)
facility_box = menu_divs["facility"]
facility_box.append( A( IMG(_src = "/%s/static/img/map_icon_128.png" % \
request.application),
_href = URL(c="gis", f="index"),
_title = T("Map")
)
)
datatable_ajax_source = ""
# Check logged in AND permissions
if AUTHENTICATED in session.s3.roles and \
auth.s3_has_permission("read", db.org_organisation):
org_items = organisation()
datatable_ajax_source = "/%s/default/organisation.aaData" % \
request.application
response.s3.actions = None
response.view = "default/index.html"
auth.permission.controller = "org"
auth.permission.function = "site"
permitted_facilities = auth.permission.permitted_facilities(redirect_on_error=False)
manage_facility_box = ""
if permitted_facilities:
facility_list = s3_represent_facilities(db, permitted_facilities,
link=False)
facility_opts = [OPTION(opt[1], _value = opt[0])
for opt in facility_list]
if facility_list:
manage_facility_box = DIV(H3(T("Manage Your Facilities")),
SELECT(_id = "manage_facility_select",
_style = "max-width:400px;",
*facility_opts
),
A(T("Go"),
_href = URL(c="default", f="site",
args=[facility_list[0][0]]),
#_disabled = "disabled",
_id = "manage_facility_btn",
_class = "action-btn"
),
_id = "manage_facility_box",
_class = "menu_box fleft")
response.s3.jquery_ready.append( """
$('#manage_facility_select').change(function() {
$('#manage_facility_btn').attr('href', S3.Ap.concat('/default/site/', $('#manage_facility_select').val()));
})""" )
else:
manage_facility_box = DIV()
org_box = DIV( H3(T("Organizations")),
A(T("Add Organization"),
_href = URL(c="org", f="organisation",
args=["create"]),
_id = "add-btn",
_class = "action-btn",
_style = "margin-right: 10px;"),
org_items["items"],
_id = "org_box",
_class = "menu_box fleft"
)
else:
manage_facility_box = ""
org_box = ""
# @ToDo: Replace this with an easily-customisable section on the homepage
#settings = db(db.s3_setting.id == 1).select(limitby=(0, 1)).first()
#if settings:
# admin_name = settings.admin_name
# admin_email = settings.admin_email
# admin_tel = settings.admin_tel
#else:
# # db empty and prepopulate is false
# admin_name = T("Sahana Administrator").xml(),
# admin_email = "support@Not Set",
# admin_tel = T("Not Set").xml(),
# Login/Registration forms
self_registration = deployment_settings.get_security_self_registration()
registered = False
login_form = None
login_div = None
register_form = None
register_div = None
if AUTHENTICATED not in session.s3.roles:
# This user isn't yet logged-in
if request.cookies.has_key("registered"):
# This browser has logged-in before
registered = True
if self_registration:
# Provide a Registration box on front page
request.args = ["register"]
if deployment_settings.get_terms_of_service():
auth.messages.submit_button = T("I accept. Create my account.")
else:
auth.messages.submit_button = T("Register")
register_form = auth()
register_div = DIV(H3(T("Register")),
P(XML(T("If you would like to help, then please %(sign_up_now)s") % \
dict(sign_up_now=B(T("sign-up now"))))))
# Add client-side validation
s3_register_validation()
if session.s3.debug:
response.s3.scripts.append( "%s/jquery.validate.js" % s3_script_dir )
else:
response.s3.scripts.append( "%s/jquery.validate.min.js" % s3_script_dir )
if request.env.request_method == "POST":
post_script = """// Unhide register form
$('#register_form').removeClass('hide');
// Hide login form
$('#login_form').addClass('hide');"""
else:
post_script = ""
register_script = """
// Change register/login links to avoid page reload, make back button work.
$('#register-btn').attr('href', '#register');
$('#login-btn').attr('href', '#login');
%s
// Redirect Register Button to unhide
$('#register-btn').click(function() {
// Unhide register form
$('#register_form').removeClass('hide');
// Hide login form
$('#login_form').addClass('hide');
});
// Redirect Login Button to unhide
$('#login-btn').click(function() {
// Hide register form
$('#register_form').addClass('hide');
// Unhide login form
$('#login_form').removeClass('hide');
});""" % post_script
response.s3.jquery_ready.append(register_script)
# Provide a login box on front page
request.args = ["login"]
auth.messages.submit_button = T("Login")
login_form = auth()
login_div = DIV(H3(T("Login")),
P(XML(T("Registered users can %(login)s to access the system" % \
dict(login=B(T("login")))))))
if deployment_settings.frontpage.rss:
response.s3.external_stylesheets.append( "http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.css" )
response.s3.scripts.append( "http://www.google.com/jsapi?key=notsupplied-wizard" )
response.s3.scripts.append( "http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.js" )
counter = 0
feeds = ""
for feed in deployment_settings.frontpage.rss:
counter += 1
feeds = "".join((feeds,
"{title: '%s',\n" % feed["title"],
"url: '%s'}" % feed["url"]))
# Don't add a trailing comma for old IEs
if counter != len(deployment_settings.frontpage.rss):
feeds += ",\n"
feed_control = "".join(("""
function LoadDynamicFeedControl() {
var feeds = [
""", feeds, """
];
var options = {
// milliseconds before feed is reloaded (5 minutes)
feedCycleTime : 300000,
numResults : 5,
stacked : true,
horizontal : false,
title : '""", str(T("News")), """'
};
new GFdynamicFeedControl(feeds, 'feed-control', options);
}
// Load the feeds API and set the onload callback.
google.load('feeds', '1');
google.setOnLoadCallback(LoadDynamicFeedControl);"""))
response.s3.js_global.append( feed_control )
return dict(title = title,
item = item,
sit_dec_res_box = sit_dec_res_box,
facility_box = facility_box,
manage_facility_box = manage_facility_box,
org_box = org_box,
r = None, # Required for dataTable to work
datatable_ajax_source = datatable_ajax_source,
#admin_name=admin_name,
#admin_email=admin_email,
#admin_tel=admin_tel,
self_registration=self_registration,
registered=registered,
login_form=login_form,
login_div=login_div,
register_form=register_form,
register_div=register_div
)
# -----------------------------------------------------------------------------
def organisation():
"""
Function to handle pagination for the org list on the homepage
"""
table = db.org_organisation
table.id.label = T("Organization")
table.id.represent = organisation_represent
response.s3.dataTable_sPaginationType = "two_button"
response.s3.dataTable_sDom = "rtip" #"frtip" - filter broken
response.s3.dataTable_iDisplayLength = 25
s3mgr.configure("org_organisation",
listadd = False,
addbtn = True,
super_entity = db.pr_pentity,
linkto = "/%s/org/organisation/%s" % (request.application,
"%s"),
list_fields = ["id",])
return s3_rest_controller("org", "organisation")
# -----------------------------------------------------------------------------
def site():
"""
@todo: Avoid redirect
"""
s3mgr.load("org_site")
if len(request.args):
site_id = request.args[0]
site_r = db.org_site[site_id]
tablename = site_r.instance_type
table = s3db.table(tablename)
if table:
query = (table.site_id == site_id)
id = db(query).select(db[tablename].id,
limitby = (0, 1)).first().id
cf = tablename.split("_", 1)
redirect(URL(c = cf[0],
f = cf[1],
args = [id]))
raise HTTP(404)
# -----------------------------------------------------------------------------
def message():
#if "verify_email_sent" in request.args:
title = T("Account Registered - Please Check Your Email")
message = T( "%(system_name)s has sent an email to %(email)s to verify your email address.\nPlease check your email to verify this address. If you do not receive this email please check you junk email or spam filters." )\
% {"system_name": deployment_settings.get_system_name(),
"email": request.vars.email}
image = "email_icon.png"
return dict(title = title,
message = message,
image_src = "/%s/static/img/%s" % (request.application, image)
)
# -----------------------------------------------------------------------------
def rapid():
""" Set/remove rapid data entry flag """
val = request.vars.get("val", True)
if val == "0":
val = False
else:
val = True
session.s3.rapid_data_entry = val
response.view = "xml.html"
return dict(item=str(session.s3.rapid_data_entry))
# -----------------------------------------------------------------------------
def user_profile_onaccept(form):
""" Update the UI locale from user profile """
if form.vars.language:
session.s3.language = form.vars.language
return
# -----------------------------------------------------------------------------
def user():
""" Auth functions based on arg. See gluon/tools.py """
auth.settings.on_failed_authorization = URL(f="error")
_table_user = auth.settings.table_user
if request.args and request.args(0) == "profile":
#_table_user.organisation.writable = False
_table_user.utc_offset.readable = True
_table_user.utc_offset.writable = True
# If we have an opt_in and some post_vars then update the opt_in value
if deployment_settings.get_auth_opt_in_to_email() and request.post_vars:
opt_list = deployment_settings.get_auth_opt_in_team_list()
removed = []
selected = []
for opt_in in opt_list:
if opt_in in request.post_vars:
selected.append(opt_in)
else:
removed.append(opt_in)
ptable = s3db.pr_person
putable = s3db.pr_person_user
query = (putable.user_id == request.post_vars.id) & \
(putable.pe_id == ptable.pe_id)
person_id = db(query).select(ptable.id, limitby=(0, 1)).first().id
db(ptable.id == person_id).update(opt_in = selected)
g_table = s3db["pr_group"]
gm_table = s3db["pr_group_membership"]
# Remove them from any team they are a member of in the removed list
for team in removed:
query = (g_table.name == team) & \
(gm_table.group_id == g_table.id) & \
(gm_table.person_id == person_id)
gm_rec = db(query).select(g_table.id, limitby=(0, 1)).first()
if gm_rec:
db(gm_table.id == gm_rec.id).delete()
# Add them to the team (if they are not already a team member)
for team in selected:
query = (g_table.name == team) & \
(gm_table.group_id == g_table.id) & \
(gm_table.person_id == person_id)
gm_rec = db(query).select(g_table.id, limitby=(0, 1)).first()
if not gm_rec:
query = (g_table.name == team)
team_rec = db(query).select(g_table.id, limitby=(0, 1)).first()
# if the team doesn't exist then add it
if team_rec == None:
team_id = g_table.insert(name = team, group_type = 5)
else:
team_id = team_rec.id
gm_table.insert(group_id = team_id,
person_id = person_id)
auth.settings.profile_onaccept = user_profile_onaccept
self_registration = deployment_settings.get_security_self_registration()
login_form = register_form = None
if request.args and request.args(0) == "login":
auth.messages.submit_button = T("Login")
form = auth()
login_form = form
if s3.crud.submit_style:
form[0][-1][1][0]["_class"] = s3.crud.submit_style
elif request.args and request.args(0) == "register":
if not self_registration:
session.error = T("Registration not permitted")
redirect(URL(f="index"))
if deployment_settings.get_terms_of_service():
auth.messages.submit_button = T("I accept. Create my account.")
else:
auth.messages.submit_button = T("Register")
# Default the profile language to the one currently active
_table_user.language.default = T.accepted_language
form = auth()
register_form = form
# Add client-side validation
s3_register_validation()
elif request.args and request.args(0) == "change_password":
form = auth()
elif request.args and request.args(0) == "profile":
if deployment_settings.get_auth_openid():
form = DIV(form, openid_login_form.list_user_openids())
else:
form = auth()
# add an opt in clause to receive emails depending on the deployment settings
if deployment_settings.get_auth_opt_in_to_email():
ptable = s3db.pr_person
ltable = s3db.pr_person_user
opt_list = deployment_settings.get_auth_opt_in_team_list()
query = (ltable.user_id == form.record.id) & \
(ltable.pe_id == ptable.pe_id)
db_opt_in_list = db(query).select(ptable.opt_in, limitby=(0, 1)).first().opt_in
for opt_in in opt_list:
field_id = "%s_opt_in_%s" % (_table_user, opt_list)
if opt_in in db_opt_in_list:
checked = "selected"
else:
checked = None
form[0].insert(-1,
TR(TD(LABEL("Receive %s updates:" % opt_in,
_for="opt_in",
_id=field_id + SQLFORM.ID_LABEL_SUFFIX),
_class="w2p_fl"),
INPUT(_name=opt_in, _id=field_id, _type="checkbox", _checked=checked),
_id=field_id + SQLFORM.ID_ROW_SUFFIX))
else:
# Retrieve Password
form = auth()
# Use Custom Ext views
# Best to not use an Ext form for login: can't save username/password in browser & can't hit 'Enter' to submit!
#if request.args(0) == "login":
# response.title = T("Login")
# response.view = "auth/login.html"
return dict(form=form,
login_form=login_form,
register_form=register_form,
self_registration=self_registration)
# -----------------------------------------------------------------------------
def facebook():
""" Login using Facebook """
if not auth.settings.facebook:
redirect(URL(f="user", args=request.args, vars=request.vars))
auth.settings.login_form = s3base.FaceBookAccount()
form = auth()
return dict(form=form)
# -----------------------------------------------------------------------------
def google():
""" Login using Google """
if not auth.settings.google:
redirect(URL(f="user", args=request.args, vars=request.vars))
auth.settings.login_form = s3base.GooglePlusAccount()
form = auth()
return dict(form=form)
# -----------------------------------------------------------------------------
def source():
""" RESTful CRUD controller """
return s3_rest_controller("s3", "source")
# -----------------------------------------------------------------------------
# About Sahana
def apath(path=""):
""" Application path """
import os
from gluon.fileutils import up
opath = up(request.folder)
#TODO: This path manipulation is very OS specific.
while path[:3] == "../": opath, path=up(opath), path[3:]
return os.path.join(opath,path).replace("\\", "/")
def about():
"""
The About page provides details on the software dependencies and
versions available to this instance of Sahana Eden.
@ToDo: Avoid relying on Command Line tools which may not be in path
- pull back info from Python modules instead?
"""
import sys
import subprocess
import string
python_version = sys.version
web2py_version = open(apath("../VERSION"), "r").read()[8:]
sahana_version = open(os.path.join(request.folder, "VERSION"), "r").read()
# Database
sqlite_version = None
mysql_version = None
mysqldb_version = None
pgsql_version = None
psycopg_version = None
if db_string[0].find("sqlite") != -1:
try:
import sqlite3
#sqlite_version = (subprocess.Popen(["sqlite3", "-version"], stdout=subprocess.PIPE).communicate()[0]).rstrip()
sqlite_version = sqlite3.version
except:
sqlite_version = T("Unknown")
elif db_string[0].find("mysql") != -1:
try:
mysql_version = (subprocess.Popen(["mysql", "--version"], stdout=subprocess.PIPE).communicate()[0]).rstrip()[10:]
except:
mysql_version = T("Unknown")
try:
import MySQLdb
mysqldb_version = MySQLdb.__revision__
except:
mysqldb_version = T("Not installed or incorrectly configured.")
else:
# Postgres
try:
pgsql_reply = (subprocess.Popen(["psql", "--version"], stdout=subprocess.PIPE).communicate()[0])
pgsql_version = string.split(pgsql_reply)[2]
except:
pgsql_version = T("Unknown")
try:
import psycopg2
psycopg_version = psycopg2.__version__
except:
psycopg_version = T("Not installed or incorrectly configured.")
# Libraries
try:
import reportlab
reportlab_version = reportlab.Version
except:
reportlab_version = T("Not installed or incorrectly configured.")
try:
import xlwt
xlwt_version = xlwt.__VERSION__
except:
xlwt_version = T("Not installed or incorrectly configured.")
return dict(
python_version=python_version,
sahana_version=sahana_version,
web2py_version=web2py_version,
sqlite_version=sqlite_version,
mysql_version=mysql_version,
mysqldb_version=mysqldb_version,
pgsql_version=pgsql_version,
psycopg_version=psycopg_version,
reportlab_version=reportlab_version,
xlwt_version=xlwt_version
)
# -----------------------------------------------------------------------------
def help():
""" Custom View """
response.title = T("Help")
return dict()
# -----------------------------------------------------------------------------
def contact():
"""
Give the user options to contact the site admins.
Either:
An internal Support Requests database
or:
Custom View
"""
if auth.is_logged_in() and deployment_settings.has_module("support"):
# Provide an internal Support Requests ticketing system.
prefix = "support"
resourcename = "req"
tablename = "%s_%s" % (prefix, resourcename)
table = s3db[tablename]
# Pre-processor
def prep(r):
if r.interactive:
# Only Admins should be able to update ticket status
status = table.status
actions = table.actions
if not auth.s3_has_role(ADMIN):
status.writable = False
actions.writable = False
if r.method != "update":
status.readable = False
status.writable = False
actions.readable = False
actions.writable = False
return True
response.s3.prep = prep
output = s3_rest_controller(prefix, resourcename)
return output
else:
# Default: Simple Custom View
response.title = T("Contact us")
return dict()
# END =========================================================================
| flavour/iscram | controllers/default.py | Python | mit | 32,108 |
from logging import info, warning as warn
from servi.command import Command
from servi.exceptions import ServiError
from servi.template_mgr import TemplateManager
from servi.command import process_and_run_command_line as servi_run
"""
Logic:
* Error if
* Master is changed and template is changed (and not ignored)
* Warning if
* Ignored file and master and template both changed
(eg: template Servifile has new content)
* Possible role changed
(eg: role in playbook.yml is commented out but still there and
is changed)
"""
class UpdateCommand(Command):
def register_command_line(self, sub_parsers):
parser_update = sub_parsers.add_parser(
'update', help='Update project with latest template')
parser_update.set_defaults(command_func=self.run)
def run(self, args, extra_args):
t = TemplateManager()
ignored = TemplateManager.ignored_files
master_and_tmpl_changed = t.m_mod & t.t_mod
#if len(changed_or_removed_files - changed_but_ignored_files) > 0:
if master_and_tmpl_changed - ignored(master_and_tmpl_changed):
raise ServiError(
'The following files were changed in your master and updated '
'in the template:\n'
'{0}\n\n'
'If you want to reinitialize your templates '
'(with automatic backup) run "servi init -f". \n'
'If you want to copy a specific file, do "servi copy". \n'
'To see all changes, do "servi diff".'
.format(master_and_tmpl_changed
- ignored(master_and_tmpl_changed)))
if ignored(master_and_tmpl_changed):
warn('The following files from the template were changed but\n'
'are on your SERVI_IGNORE_FILES list and will not be '
'updated:\n'
'{0}\n\n'
'Try a "servi diff" followed by "servi copy" to manually '
'update changed ignored files.\n'
.format(sorted(ignored(master_and_tmpl_changed))))
if t.modified_possible_roles:
warn('The following lines in your ansible_confg/playbook.yml '
'looked like roles that are commented out.\n'
'The Template and Master versions differ.\n'
'** Because they are commented, they are ignored.**\n'
'{0}\n'.format(sorted(t.modified_possible_roles)))
info('Updating MASTER with Servi template version: {0}'
.format(t.m_template.template_version))
t.update_master()
info('MASTER updated. Showing diff (after update).')
servi_run("-v2 diff -l")
return True
command = UpdateCommand()
| rr326/servi | servi/commands/update.py | Python | mit | 2,813 |
# Time-stamp: <2016-03-15 Tue 19:11:36 Shaikh>
# -*- coding: utf-8 -*-
import sys
import time
f = None
try:
f = open("poem.txt")
# Our usual file-reading idiom
while True:
line = f.readline()
if len(line) == 0:
break
print(line, end='', flush=True)
# sys.stdout.flush()
print('Press ctrl+c now')
# To make sure it runs for a while
time.sleep(2)
except IOError:
print("Could not find file 'poem.txt'")
except KeyboardInterrupt:
print("!! You canceled the reading from the file.")
finally:
if f:
f.close()
print("(Cleaning up: Closed the file)")
| SyrakuShaikh/python | learning/a_byte_of_python/exceptions_finally.py | Python | gpl-3.0 | 650 |
import os
import sys
import shutil
if os.name == "nt":
SLASH = "\\"
else:
SLASH = "/"
CWD = os.path.dirname(os.path.realpath(__file__)) + SLASH
os.chdir(CWD)
def list_folder():
for directory in os.scandir(CWD):
if os.path.isdir(directory):
if not ".git" in str(directory) and not "pycache" in str(directory):
print(directory)
makecbz(directory)
os.chdir(CWD)
def makecbz(folder):
folder = os.path.abspath(os.path.join(folder, ""))
print(os.path.exists(folder))
os.chdir(folder)
for directory in os.scandir(folder):
if directory.is_dir():
zipname = os.path.basename(os.path.abspath(directory.path))
print(zipname)
shutil.make_archive(zipname, 'zip', directory.path)
zipname = zipname + ".zip"
os.rename(zipname, zipname.replace(".zip", ".cbz"))
#try cbz subfolders(chapters), create zip folder with all cbz files and then cbz everything
def __main__():
list_folder()
if __name__ == "__main__":
__main__() | WhosMyName/MangaFoxCatcher | cbzarchiver.py | Python | gpl-3.0 | 1,126 |
# -*- coding: utf-8 -*-
"""
Liquid is a form management tool for web frameworks.
Copyright (C) 2014, Bence Faludi (b.faludi@mito.hu)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, <see http://www.gnu.org/licenses/>.
"""
class State( object ):
"""
State object of given element. It knows the element is visible or
required, or maybe in disabled status, etc.
"""
# void
def __init__( self, required, hidden, readonly, disabled, focus, error = None ):
"""
State object of given element. It knows the element is visible or
required, or maybe in disabled status, etc.
@param required: Require to fill out the element?
@type required: bool
@param hidden: Visibility of the element
@type hidden: bool
@param readonly: Can you modify the element or it is readonly?
@type readonly: bool
@param disabled: Element is disabled for modification?
@type disabled: bool
@param focus: Is this the active element?
@type focus: bool
@param error: Current error message
@type error: unicode
"""
self.setError( error )
self.setRequired( required )
self.setHidden( hidden )
self.setReadonly( readonly )
self.setDisabled( disabled )
self.setFocus( focus )
# dict
def getState( self ):
"""
Returns the current state of the element.
@return: Current state of the element
@rtype: dict
"""
return {
'required': self.isRequired(),
'readonly': self.isReadonly(),
'focus': self.isFocus(),
'disabled': self.isDisabled(),
'hidden': self.isHidden(),
'error': self.getError()
}
# void
def setError( self, error ):
"""
Set the error message of the field. This error message will be
shown in the interface.
@param error: Current error message
@type error: unicode
"""
self.error = error
# void
def setRequired( self, required = True ):
"""
Set the required property of the element.
@param required: Require to fill out the element?
@type required: bool
"""
self.required = required
# void
def setFocus( self, focus = True ):
"""
Set the focus of the element.
@param focus: Is this the active element?
@type focus: bool
"""
self.focus = focus
# void
def setHidden( self, hidden = True ):
"""
Set the visibility of the element.
@param hidden: Visibility of the element
@type hidden: bool
"""
self.hidden = hidden
# void
def setReadonly( self, readonly = True ):
"""
Set the readonly property of the element.
@param readonly: Can you modify the element or it is readonly?
@type readonly: bool
"""
self.readonly = readonly
# void
def setDisabled( self, disabled = True ):
"""
Set the disabled property of the element.
@param disabled: Element is disabled for modification?
@type disabled: bool
"""
self.disabled = disabled
# bool
def isError( self ):
"""
Is any error message set?
@return: Is any error message set?
@rtype: bool
"""
return self.error is not None
# bool
def isRequired( self ):
"""
Is it required?
@return: Is it required?
@rtype: bool
"""
return self.required
# bool
def isFocus( self ):
"""
Is it in focus state?
@return: Is it in focus state?
@rtype: bool
"""
return self.focus
# bool
def isHidden( self ):
"""
Is it hidden?
@return: Is it hidden?
@rtype: bool
"""
return self.hidden
# bool
def isReadonly( self ):
"""
Is it readonly?
@return: Is it readonly?
@rtype: bool
"""
return self.readonly
# bool
def isDisabled( self ):
"""
Is it disabled?
@return: Is it disabled?
@rtype: bool
"""
return self.disabled
# bool
def isActive( self ):
"""
Is it active? We only need to validate those elements which
are visible, not disabled and not readonly.
@return: Is it active?
@rtype: bool
"""
if self.isHidden() or self.isReadonly() or self.isDisabled():
return False
return True
# unicode
def getError( self ):
"""
Returns the error message.
@return: Error message
@rtype: unicode
"""
return self.error
| bfaludi/liquid4m | liquid4m/state.py | Python | gpl-3.0 | 5,432 |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import sys
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
__RCSID__ = "$Id$"
@DIRACScript()
def main():
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
fieldsToShow = ('ComponentName', 'Type', 'Host', 'Port', 'Status', 'Message')
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
result = gMonitor.getComponentsStatusWebFormatted(sortingList=[['ComponentName', 'ASC']])
if not result['OK']:
print("ERROR: %s" % result['Message'])
sys.exit(1)
paramNames = result['Value']['ParameterNames']
records = result['Value']['Records']
fieldLengths = []
for param in paramNames:
fieldLengths.append(len(param))
for record in records:
for i, _ in enumerate(record):
if paramNames[i] in fieldsToShow:
fieldLengths[i] = max(fieldLengths[i], len(str(record[i])))
# Print time!
line = []
sepLine = []
for i, param in enumerate(paramNames):
if param in fieldsToShow:
line.append("%s%s" % (param, " " * (fieldLengths[i] - len(param))))
sepLine.append("-" * fieldLengths[i])
print("|".join(line))
sepLine = "+".join(sepLine)
print(sepLine)
for record in records:
line = []
for i, _ in enumerate(record):
if paramNames[i] in fieldsToShow:
val = str(record[i])
line.append("%s%s" % (val, " " * (fieldLengths[i] - len(val))))
print("|".join(line))
# print sepLine
if __name__ == "__main__":
main()
| yujikato/DIRAC | src/DIRAC/FrameworkSystem/scripts/dirac_monitoring_get_components_status.py | Python | gpl-3.0 | 1,633 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for getting the model object by name and configuration."""
from demogen.models.nin import Nin
from demogen.models.resnet import ResNet
def get_model(model_name, config):
"""Create a callable model function according to config.
Args:
model_name: The type of model to be created.
config: A tf.contrib.training.HParams data structure
for model hyperparameters.
Returns:
A callable model function built according to the config.
"""
if model_name == 'nin':
width = int(192 * config.wide)
return Nin(
width,
dropout=config.dropout,
batchnorm=config.batchnorm,
decay_fac=config.decay_fac,
num_classes=config.num_class,
spatial_dropout=config.spatial_dropout
)
elif model_name == 'resnet':
resnet_size = 32
num_blocks = (resnet_size - 2) // 6
return ResNet(
bottleneck=False,
num_filters=int(16 * config.wide),
kernel_size=3,
conv_stride=1,
first_pool_size=None,
first_pool_stride=None,
block_sizes=[num_blocks] * 3,
block_strides=[1, 2, 2],
pre_activation=True,
weight_decay=config.weight_decay,
norm_type=config.normalization,
loss_filter_fn=lambda _: True,
num_classes=config.num_class,
)
raise NotImplementedError('Model {} is not in dataset'.format(model_name))
| google-research/google-research | demogen/models/get_model.py | Python | apache-2.0 | 1,997 |
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Imports
#
import bpy
import os
from .gltf2_debug import *
#
# Globals
#
#
# Functions
#
def get_used_materials():
"""
Gathers and returns all unfiltered, valid Blender materials.
"""
materials = []
for blender_material in bpy.data.materials:
if blender_material.node_tree and blender_material.use_nodes:
for currentNode in blender_material.node_tree.nodes:
if isinstance(currentNode, bpy.types.ShaderNodeGroup):
if currentNode.node_tree.name.startswith('glTF Metallic Roughness'):
materials.append(blender_material)
elif currentNode.node_tree.name.startswith('glTF Specular Glossiness'):
materials.append(blender_material)
else:
materials.append(blender_material)
return materials
def get_material_requires_texcoords(glTF, index):
"""
Query function, if a material "needs" texture cooridnates. This is the case, if a texture is present and used.
"""
if glTF.get('materials') is None:
return False
materials = glTF['materials']
if index < 0 or index >= len(materials):
return False
material = materials[index]
# General
if material.get('emissiveTexture') is not None:
return True
if material.get('normalTexture') is not None:
return True
if material.get('occlusionTexture') is not None:
return True
# Metallic roughness
if material.get('baseColorTexture') is not None:
return True
if material.get('metallicRoughnessTexture') is not None:
return True
# Specular glossiness
if material.get('diffuseTexture') is not None:
return True
if material.get('specularGlossinessTexture') is not None:
return True
# Common Material
if material.get('diffuseTexture') is not None:
return True
if material.get('specularTexture') is not None:
return True
if material.get('shininessTexture') is not None:
return True
if material.get('ambientTexture') is not None:
return True
return False
def get_material_requires_normals(glTF, index):
"""
Query function, if a material "needs" normals. This is the case, if a texture is present and used.
At point of writing, same function as for texture coordinates.
"""
return get_material_requires_texcoords(glTF, index)
def get_image_index(export_settings, uri):
"""
Return the image index in the glTF array.
"""
if export_settings['gltf_uri'] is None:
return -1
if uri in export_settings['gltf_uri']:
return export_settings['gltf_uri'].index(uri)
return -1
def get_texture_index_by_filepath(export_settings, glTF, filepath):
"""
Return the texture index in the glTF array by a given filepath.
"""
if filepath is None:
return -1
uri = get_uri(filepath)
if export_settings['gltf_uri'] is None:
return -1
if glTF.get('textures') is None:
return -1
image_uri = export_settings['gltf_uri']
index = 0
for texture in glTF['textures']:
current_image_uri = image_uri[texture['source']]
if current_image_uri == uri:
return index
index += 1
return -1
def get_texture_index(export_settings, glTF, name, shader_node_group):
"""
Return the texture index in the glTF array.
"""
if shader_node_group is None:
return -1
if not isinstance(shader_node_group, bpy.types.ShaderNodeGroup):
return -1
if shader_node_group.inputs.get(name) is None:
return -1
if len(shader_node_group.inputs[name].links) == 0:
return -1
from_node = shader_node_group.inputs[name].links[0].from_node
#
if not isinstance(from_node, bpy.types.ShaderNodeTexImage):
return -1
if from_node.image is None or from_node.image.size[0] == 0 or from_node.image.size[1] == 0:
return -1
return get_texture_index_by_filepath(export_settings, glTF, from_node.image.filepath)
def get_texcoord_index(glTF, name, shader_node_group):
"""
Return the texture coordinate index, if assigend and used.
"""
if shader_node_group is None:
return 0
if not isinstance(shader_node_group, bpy.types.ShaderNodeGroup):
return 0
if shader_node_group.inputs.get(name) is None:
return 0
if len(shader_node_group.inputs[name].links) == 0:
return 0
from_node = shader_node_group.inputs[name].links[0].from_node
#
if not isinstance(from_node, bpy.types.ShaderNodeTexImage):
return 0
#
if len(from_node.inputs['Vector'].links) == 0:
return 0
input_node = from_node.inputs['Vector'].links[0].from_node
if not isinstance(input_node, bpy.types.ShaderNodeUVMap):
return 0
if input_node.uv_map == '':
return 0
#
# Try to gather map index.
for blender_mesh in bpy.data.meshes:
texCoordIndex = blender_mesh.uv_textures.find(input_node.uv_map)
if texCoordIndex >= 0:
return texCoordIndex
return 0
def get_material_index(glTF, name):
"""
Return the material index in the glTF array.
"""
if name is None:
return -1
if glTF.get('materials') is None:
return -1
index = 0
for material in glTF['materials']:
if material['name'] == name:
return index
index += 1
return -1
def get_mesh_index(glTF, name):
"""
Return the mesh index in the glTF array.
"""
if glTF.get('meshes') is None:
return -1
index = 0
for mesh in glTF['meshes']:
if mesh['name'] == name:
return index
index += 1
return -1
def get_skin_index(glTF, name, index_offset):
"""
Return the skin index in the glTF array.
"""
if glTF.get('skins') is None:
return -1
skeleton = get_node_index(glTF, name)
index = 0
for skin in glTF['skins']:
if skin['skeleton'] == skeleton:
return index + index_offset
index += 1
return -1
def get_camera_index(glTF, name):
"""
Return the camera index in the glTF array.
"""
if glTF.get('cameras') is None:
return -1
index = 0
for camera in glTF['cameras']:
if camera['name'] == name:
return index
index += 1
return -1
def get_light_index_cmn(glTF, name):
"""
Return the light index in the glTF array.
"""
if glTF.get('extensions') is None:
return -1
extensions = glTF['extensions']
if extensions.get('KHR_lights_cmn') is None:
return -1
khr_lights_cmn = extensions['KHR_lights_cmn']
if khr_lights_cmn.get('lights') is None:
return -1
lights = khr_lights_cmn['lights']
index = 0
for light in lights:
if light['name'] == name:
return index
index += 1
return -1
def get_light_index_pbr(glTF, name):
"""
Return the light index in the glTF array.
"""
if glTF.get('extensions') is None:
return -1
extensions = glTF['extensions']
if extensions.get('KHR_lights_pbr') is None:
return -1
khr_lights_pbr = extensions['KHR_lights_pbr']
if khr_lights_pbr.get('lights') is None:
return -1
lights = khr_lights_pbr['lights']
index = 0
for light in lights:
if light['name'] == name:
return index
index += 1
return -1
def get_node_index(glTF, name):
"""
Return the node index in the glTF array.
"""
if glTF.get('nodes') is None:
return -1
index = 0
for node in glTF['nodes']:
if node['name'] == name:
return index
index += 1
return -1
def get_scene_index(glTF, name):
"""
Return the scene index in the glTF array.
"""
if glTF.get('scenes') is None:
return -1
index = 0
for scene in glTF['scenes']:
if scene['name'] == name:
return index
index += 1
return -1
def get_uri(filepath):
"""
Return the final PNG uri depending on a filepath.
"""
return os.path.splitext(bpy.path.basename(filepath))[0] + '.png'
def get_node(data_path):
"""
Return Blender node on a given Blender data path.
"""
if data_path is None:
return None
index = data_path.find("[\"")
if (index == -1):
return None
node_name = data_path[(index + 2):]
index = node_name.find("\"")
if (index == -1):
return None
return node_name[:(index)]
def get_data_path(data_path):
"""
Return Blender data path.
"""
index = data_path.rfind('.')
if index == -1:
return data_path
return data_path[(index + 1):]
def get_scalar(default_value, init_value = 0.0):
"""
Return scalar with a given default/fallback value.
"""
return_value = init_value
if default_value is None:
return return_value
return_value = default_value
return return_value
def get_vec2(default_value, init_value = [0.0, 0.0]):
"""
Return vec2 with a given default/fallback value.
"""
return_value = init_value
if default_value is None or len(default_value) < 2:
return return_value
index = 0
for number in default_value:
return_value[index] = number
index += 1
if index == 2:
return return_value
return return_value
def get_vec3(default_value, init_value = [0.0, 0.0, 0.0]):
"""
Return vec3 with a given default/fallback value.
"""
return_value = init_value
if default_value is None or len(default_value) < 3:
return return_value
index = 0
for number in default_value:
return_value[index] = number
index += 1
if index == 3:
return return_value
return return_value
def get_vec4(default_value, init_value = [0.0, 0.0, 0.0, 1.0]):
"""
Return vec4 with a given default/fallback value.
"""
return_value = init_value
if default_value is None or len(default_value) < 4:
return return_value
index = 0
for number in default_value:
return_value[index] = number
index += 1
if index == 4:
return return_value
return return_value
def get_index(list, name):
"""
Return index of a glTF element by a given name.
"""
if list is None or name is None:
return -1
index = 0
for element in list:
if element.get('name') is None:
return -1
if element['name'] == name:
return index
index += 1
return -1
| nikoladimitroff/Zmey | Tools/BlenderPlugins/scripts/addons/io_scene_gltf2/gltf2_get.py | Python | mit | 11,682 |
# PID controller
# Author: Kannan K Puthuval, University of Illinois, kputhuva@illinois.edu
# Updated: 2014-10-03
# Description: This provides an implementation of PID control.
# Dependencies: xml
# To do
# handle scheduling
import xml.etree.ElementTree as ET
class PID:
def __init__(self,target=0,kP=0,kI=0,kD=0,outMin=0,outMax=1):
self.target = target
self.kP = kP
self.kI = kI
self.kD = kD
self.outMin = outMin
self.outMax = outMax
self.lastInput = 0
self.I = 0
self.error = 0
self.output = 0
def update(self,input):
# Calculate current error
self.error = input - self.target
# Calculate proportional term
self.P = self.kP * self.error
# Calculate integral only if output is in bounds
if self.output > self.outMin and self.output < self.outMax:
self.I = self.I + self.kI*self.error
# Calculate derivative term
self.D = self.kD * ( input - self.lastInput)
# Save this input for next iteration
self.lastInput = input
# Calculate output bounded by min and max
self.output = self.P + self.I + self.D
if self.output > self.outMax:
self.output = self.outMax
elif self.output < self.outMin:
self.output = self.outMin
return self.output
def setTarget(self,target):
self.target = target
def setParams(self, target=0, kP=0, kI=0, kD=0, outMin=0, outMax=1):
self.target = target
self.kP = kP
self.kI = kI
self.kD = kD
self.outMin = outMin
self.outMax = outMax | kannanputhuval/fumigator | PID.py | Python | gpl-2.0 | 1,462 |
#!/usr/bin/env python3
import unittest
import seasons
class NamingSchemeTestCase(unittest.TestCase):
testep = seasons.Episode('Dragon Ball Super',
5,
'dbs.s1.e5.mp4',
extension='mp4')
def test_series_title(self):
scheme = '{t} - S{s} E{e}'
self.testep.makefilename(3, scheme)
self.assertIn('Dragon Ball Super', self.testep.newfilename)
def test_series_title_dots(self):
scheme = '{t.dot} - S{s} E{e}'
self.testep.makefilename(3, scheme)
self.assertIn('Dragon.Ball.Super', self.testep.newfilename)
def test_season_num(self):
scheme = '{t.dot} - S{s} E{e}'
self.testep.makefilename(3, scheme)
self.assertIn('S3', self.testep.newfilename)
def test_season_num_pad(self):
scheme = '{t.dot} - S{s.pad} E{e}'
self.testep.makefilename(3, scheme)
self.assertIn('S03', self.testep.newfilename)
def test_episode_num(self):
scheme = '{t.dot} - S{s} E{e}'
self.testep.makefilename(3, scheme)
self.assertIn('E5', self.testep.newfilename)
def test_episode_num_pad(self):
scheme = '{t.dot} - S{s} E{e.pad}'
self.testep.makefilename(3, scheme)
self.assertIn('E05', self.testep.newfilename)
if __name__ == '__main__':
unittest.main()
| t-sullivan/rename-TV | test.py | Python | mit | 1,397 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import common
import main_viacom
SITE = "logotv"
NAME = "LogoTV"
ALIAS = ["Logo"]
DESCRIPTION = "Logo TV is an American digital cable and satellite television channel that is owned by Viacom Media Networks. The channel focuses on lifestyle programming aimed primarily at lesbian, gay, bisexual, and transgender people."
API = "http://api.mtv.com/api/hVqrnHigT6Rq/"
SHOWS = API + "promolist/10394912.json"
def masterlist():
return main_viacom.masterlist(SITE, SHOWS)
def seasons(url = common.args.url):
return main_viacom.seasons(SITE, API, url)
def episodes(url = common.args.url):
return main_viacom.episodes(SITE, url)
def play():
main_viacom.play_video2(API, common.args.url)
def list_qualities():
return main_viacom.list_qualities2(API, common.args.url)
| moneymaker365/plugin.video.ustvvod | resources/lib/stations/logotv.py | Python | gpl-2.0 | 811 |
#!/usr/bin/env python
# The Gedit XML Tools plugin provides many useful tools for XML development.
# Copyright (C) 2008 Simon Wenner, Copyright (C) 2012 Jono Finger
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk, Gedit, GObject
from lxml import etree
# Tools menu items
ui_str = """<ui>
<menubar name="MenuBar">
<menu name="ToolsMenu" action="Tools">
<placeholder name="ToolsOps_2">
<menuitem name="XMLTools1" action="XMLvalidate"/>
<menuitem name="XMLTools2" action="XMLrelaxng"/>
<menuitem name="XMLTools3" action="XMLxpath"/>
</placeholder>
</menu>
</menubar>
</ui>
"""
def validateXML(xml_string):
try:
etree.clear_error_log()
parser = etree.XMLParser()
xml = etree.fromstring(xml_string, parser)
return xml
except etree.XMLSyntaxError as e:
error_list = []
for error in e.error_log:
error_list.append((error.line, error.message))
return error_list
except Exception as e:
error_list = []
error_list.append((0, "unknown error " + str(e)))
return error_list
def validateRelaxNG(xml):
try:
rng = etree.RelaxNG(xml)
return rng
except etree.RelaxNGError as e:
error_list = []
for error in e.error_log:
error_list.append((error.line, error.message))
return error_list
except Exception as e:
error_list = []
error_list.append((0, "unknown error " + str(e)))
return error_list
def runXpath(xml, xpath_query):
result = ""
try:
xRes = xml.xpath(xpath_query)
for x in xRes:
result += etree.tostring(x) + "\n"
except Exception as e:
result = "XPath syntax error: " + str(e) + "\n"
return result
class XMLToolsWindowHelper:
def __init__(self, plugin, window):
self._window = window
self._plugin = plugin
# add bottom panel field
self._scroll_field = Gtk.ScrolledWindow()
self._panel_field = Gtk.TextView()
self._output_buffer = self._panel_field.get_buffer()
self._scroll_field.add_with_viewport(self._panel_field)
# set properties of panel field
self._panel_field.set_editable(False)
self._scroll_field.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self._scroll_field.show_all()
panel = window.get_bottom_panel()
image = Gtk.Image()
image.set_from_stock(Gtk.STOCK_DND_MULTIPLE, Gtk.IconSize.BUTTON)
panel.add_item(self._scroll_field, "XML Tools", "XML Tools", image)
# Insert menu items
self._insert_menu()
def deactivate(self):
# Remove any installed menu items
self._remove_menu()
panel = self._window.get_bottom_panel()
panel.remove_item(self._scroll_field)
self._scroll_field = None
self._window = None
self._plugin = None
self._action_group = None
def _insert_menu(self):
manager = self._window.get_ui_manager()
self._action_group = Gtk.ActionGroup("XMLToolsPyPluginActions")
self._action_group.add_actions([("XMLvalidate", None, _("Validate XML"),
'F5', _("Validate an XML file"),
self.validate_document)])
self._action_group.add_actions([("XMLrelaxng", None, _("Validate RelaxNG"),
None, _("Validate an RelaxNG file"),
self.validate_relaxng)])
self._action_group.add_actions([("XMLxpath", None, _("Run XPath query"),
None, _("XPath query editor"),
self.create_xpath_query_editor)])
manager.insert_action_group(self._action_group, -1)
self._ui_id = manager.add_ui_from_string(ui_str)
def _remove_menu(self):
manager = self._window.get_ui_manager()
manager.remove_ui(self._ui_id)
manager.remove_action_group(self._action_group)
manager.ensure_update()
def update_ui(self):
self._action_group.set_sensitive(self._window.get_active_document() != None)
def create_xpath_query_editor(self, action):
qwin = XMLQueryWindow(self)
def validate_document(self, action):
doc = self._window.get_active_document()
if not doc:
return
buff = "Validating: " + doc.get_uri_for_display() + "\n"
xmlText = doc.get_text(doc.get_start_iter(), doc.get_end_iter(), True)
validateResult = validateXML(xmlText)
if type(validateResult) is etree._Element :
buff += "XML is valid!"
else:
buff += "XML is NOT valid!\n"
for t in validateResult:
buff += "Error on line: " + str(t[0]) + " -- " + t[1] + "\n"
self._output_buffer.set_text(buff)
panel = self._window.get_bottom_panel()
panel.activate_item(self._scroll_field)
panel.set_property("visible", True)
def validate_relaxng(self, action):
doc = self._window.get_active_document()
if not doc:
return
buff = "Validating: " + doc.get_uri_for_display() + "\n"
xmlText = doc.get_text(doc.get_start_iter(), doc.get_end_iter(), True)
validateXmlResult = validateXML(xmlText)
if type(validateXmlResult) is etree._Element :
validateRngResult = validateRelaxNG(validateXmlResult)
if type(validateRngResult) is etree.RelaxNG :
buff += "RelaxNG is valid!"
else:
buff += "RelaxNG is NOT valid!\n"
for t in validateRngResult:
buff += "Error on line: " + str(t[0]) + " -- " + t[1] + "\n"
else:
buff += "XML is NOT valid!\n"
for t in validateXmlResult:
buff += "Error on line: " + str(t[0]) + " -- " + t[1] + "\n"
self._output_buffer.set_text(buff)
panel = self._window.get_bottom_panel()
panel.activate_item(self._scroll_field)
panel.set_property("visible", True)
def xpath_query_on_document(self, xpath_string):
doc = self._window.get_active_document()
if not doc:
return
buff = "XPath result:\n"
xmlText = doc.get_text(doc.get_start_iter(), doc.get_end_iter(), True)
validateXmlResult = validateXML(xmlText)
if type(validateXmlResult) is etree._Element :
buff += runXpath(validateXmlResult, xpath_string)
else:
buff += "XML is NOT valid!\n"
for t in validateXmlResult:
buff += "Error on line: " + str(t[0]) + " -- " + t[1] + "\n"
# show result
self._output_buffer.set_text(buff)
panel = self._window.get_bottom_panel()
panel.activate_item(self._scroll_field)
panel.set_property("visible", True)
class XMLQueryWindow:
def __init__(self, window_helper):
self._window_helper = window_helper
self._window = Gtk.Window()
self._window.set_title("XPath query editor")
self._window.set_border_width(10)
self._window.set_position(Gtk.WindowPosition.CENTER)
self._window.connect("delete_event", self.delete_event)
self.field = Gtk.ScrolledWindow()
self.tv = Gtk.TextView()
self.field.add_with_viewport(self.tv)
self.field.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.field.set_size_request(400, 100)
vbox = Gtk.VBox(False, 10)
vbox.pack_start(self.field, True, True, 0)
hbox = Gtk.HBox(False, 0)
button = Gtk.Button(None, Gtk.STOCK_EXECUTE)
button.connect("clicked", self.query_event, None)
hbox.pack_start(button, False, False, 0)
button = Gtk.Button(None, Gtk.STOCK_CLEAR)
button.connect("clicked", self.clear_event, None)
hbox.pack_start(button, False, False, 0)
button = Gtk.Button(None, Gtk.STOCK_CLOSE)
button.connect("clicked", self.delete_event, None)
hbox.pack_end(button, False, False, 0)
vbox.pack_start(hbox, False, False, 0)
self._window.add(vbox)
self._window.show_all()
def delete_event(self, widget, event, data=None):
self._window.destroy()
return False
def query_event(self, widget, event, data=None):
buff = self.tv.get_buffer()
self._window_helper.xpath_query_on_document(buff.get_text(buff.get_start_iter(), buff.get_end_iter(), True))
return False
def clear_event(self, widget, event, data=None):
self.tv.set_buffer(Gtk.TextBuffer())
return False
class WindowActivatable(GObject.Object, Gedit.WindowActivatable):
window = GObject.property(type=Gedit.Window)
def __init__(self):
GObject.Object.__init__(self)
self._instances = {}
def do_activate(self):
self._instances[self.window] = XMLToolsWindowHelper(self, self.window)
def do_deactivate(self):
self._instances[self.window].deactivate()
del self._instances[self.window]
def update_ui(self):
self._instances[self.window].update_ui()
| jonocodes/gedit-xmltools | xmltools.py | Python | gpl-3.0 | 10,152 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# bts_tools - Tools to easily manage the bitshares client
# Copyright (c) 2014 Nicolas Wack <wackou@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from flask import render_template, Flask
from bts_tools import views, core
from bts_tools import rpcutils as rpc
from bts_tools.slogging import sanitize_output
from geolite2 import geolite2
from functools import lru_cache
from datetime import datetime
import bts_tools
import bts_tools.monitor
import threading
import json
import logging
log = logging.getLogger(__name__)
def format_datetime(d):
if isinstance(d, datetime):
return d.isoformat(' ')
if not d.strip():
return ''
if d == 'unknown':
return d
if '-' in d and ':' in d:
# already formatted, just make it slightly nicer
return d.replace('T', ' ')
return '%s-%s-%s %s:%s:%s' % (d[0:4], d[4:6], d[6:8], d[9:11], d[11:13], d[13:15])
@lru_cache()
def get_country_for_ip(ip):
if not ip.strip():
return None
reader = geolite2.reader()
try:
return reader.get(ip)['country']['iso_code'].lower()
except:
return None
def add_ip_flag(ip):
country = get_country_for_ip(ip)
if not country:
return ip
flag = '<i class="famfamfam-flag-%s" style="margin:0 8px 0 0;"></i>' % country
return '<table><tr><td>%s</td><td>%s</td></tr></table>' % (flag, ip)
def create_app(settings_override=None):
"""Returns the BitShares Delegate Tools Server dashboard application instance"""
print('creating Flask app bts_tools')
app = Flask('bts_tools', instance_relative_config=True)
app.config.from_object(settings_override)
app.register_blueprint(views.bp)
# Register custom error handlers
app.errorhandler(404)(lambda e: (render_template('errors/404.html'), 404))
# custom filter for showing dates
app.jinja_env.filters['datetime'] = format_datetime
app.jinja_env.filters['sanitize_output'] = sanitize_output
app.jinja_env.filters['add_ip_flag'] = add_ip_flag
# make bts_tools module available in all the templates
app.jinja_env.globals.update(core=bts_tools.core,
backbone=bts_tools.backbone,
rpc=bts_tools.rpcutils,
network_utils=bts_tools.network_utils,
monitor=bts_tools.monitor,
process=bts_tools.process)
core.load_db()
for i, ((host, port), nodes) in enumerate(rpc.graphene_clients()):
# launch only 1 monitoring thread for each running instance of the client
delay = i * core.config['monitoring']['feeds']['check_time_interval'] / len(rpc.graphene_clients())
t = threading.Thread(target=bts_tools.monitor.monitoring_thread, args=nodes, kwargs={'delay': delay})
t.daemon = True
t.start()
return app
| wackou/bts_tools | bts_tools/frontend.py | Python | gpl-3.0 | 3,547 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
def execute():
frappe.reload_doc('manufacturing', 'doctype', 'job_card_time_log')
if (frappe.db.table_exists("Job Card")
and frappe.get_meta("Job Card").has_field("actual_start_date")):
time_logs = []
for d in frappe.get_all('Job Card',
fields = ["actual_start_date", "actual_end_date", "time_in_mins", "name", "for_quantity"],
filters = {'docstatus': ("<", 2)}):
if d.actual_start_date:
time_logs.append([d.actual_start_date, d.actual_end_date, d.time_in_mins,
d.for_quantity, d.name, 'Job Card', 'time_logs', frappe.generate_hash("", 10)])
if time_logs:
frappe.db.sql(""" INSERT INTO
`tabJob Card Time Log`
(from_time, to_time, time_in_mins, completed_qty, parent, parenttype, parentfield, name)
values {values}
""".format(values = ','.join(['%s'] * len(time_logs))), tuple(time_logs))
frappe.reload_doc('manufacturing', 'doctype', 'job_card')
frappe.db.sql(""" update `tabJob Card` set total_completed_qty = for_quantity,
total_time_in_mins = time_in_mins where docstatus < 2 """)
| mhbu50/erpnext | erpnext/patches/v11_1/make_job_card_time_logs.py | Python | gpl-3.0 | 1,348 |
from waldur_core.core import WaldurExtension
class AuthSocialExtension(WaldurExtension):
@staticmethod
def django_app():
return 'waldur_auth_social'
@staticmethod
def django_urls():
from .urls import urlpatterns
return urlpatterns
@staticmethod
def celery_tasks():
from datetime import timedelta
return {
'waldur-pull-remote-eduteams-users': {
'task': 'waldur_auth_social.pull_remote_eduteams_users',
'schedule': timedelta(minutes=5),
'args': (),
},
}
| opennode/waldur-mastermind | src/waldur_auth_social/extension.py | Python | mit | 602 |
'''Simple example of using the SWIG generated TWS wrapper to request historical
data from interactive brokers.
Note:
* Communication with TWS is asynchronous; requests to TWS are made through the
EPosixClientSocket class and TWS responds at some later time via the functions
in our EWrapper subclass.
* If you're using a demo account TWS will only respond with a limited time
period, no matter what is requested. Also the data returned is probably wholly
unreliable.
'''
from datetime import datetime
from threading import Event
from swigibpy import EWrapper, EPosixClientSocket, Contract
WAIT_TIME = 10.0
###
class HistoricalDataExample(EWrapper):
'''Callback object passed to TWS, these functions will be called directly
by TWS.
'''
def __init__(self):
super(HistoricalDataExample, self).__init__()
self.got_history = Event()
def orderStatus(self, id, status, filled, remaining, avgFillPrice, permId,
parentId, lastFilledPrice, clientId, whyHeld):
pass
def openOrder(self, orderID, contract, order, orderState):
pass
def nextValidId(self, orderId):
'''Always called by TWS but not relevant for our example'''
pass
def openOrderEnd(self):
'''Always called by TWS but not relevant for our example'''
pass
def managedAccounts(self, openOrderEnd):
'''Called by TWS but not relevant for our example'''
pass
def historicalData(self, reqId, date, open, high,
low, close, volume,
barCount, WAP, hasGaps):
if date[:8] == 'finished':
print("History request complete")
self.got_history.set()
else:
date = datetime.strptime(date, "%Y%m%d").strftime("%d %b %Y")
print(("History %s - Open: %s, High: %s, Low: %s, Close: "
"%s, Volume: %d") % (date, open, high, low, close, volume))
# Instantiate our callback object
callback = HistoricalDataExample()
# Instantiate a socket object, allowing us to call TWS directly. Pass our
# callback object so TWS can respond.
tws = EPosixClientSocket(callback, reconnect_auto=True)
# Connect to tws running on localhost
if not tws.eConnect("", 7496, 42):
raise RuntimeError('Failed to connect to TWS')
# Simple contract for GOOG
contract = Contract()
contract.exchange = "SMART"
contract.symbol = "GOOG"
contract.secType = "STK"
contract.currency = "USD"
today = datetime.today()
print("Requesting historical data for %s" % contract.symbol)
# Request some historical data.
tws.reqHistoricalData(
2, # tickerId,
contract, # contract,
today.strftime("%Y%m%d %H:%M:%S %Z"), # endDateTime,
"1 W", # durationStr,
"1 day", # barSizeSetting,
"TRADES", # whatToShow,
0, # useRTH,
1, # formatDate
None # chartOptions
)
print("\n====================================================================")
print(" History requested, waiting %ds for TWS responses" % WAIT_TIME)
print("====================================================================\n")
try:
callback.got_history.wait(timeout=WAIT_TIME)
except KeyboardInterrupt:
pass
finally:
if not callback.got_history.is_set():
print('Failed to get history within %d seconds' % WAIT_TIME)
print("\nDisconnecting...")
tws.eDisconnect()
| Komnomnomnom/swigibpy | examples/historicaldata.py | Python | bsd-3-clause | 3,675 |
import csv
import pandas
import django
from django.conf import settings
from django.contrib import admin
from django.http import HttpResponse, HttpResponseForbidden
from builtins import str as text
def export_as_csv(admin_model, request, queryset):
"""
Generic csv export admin action.
based on http://djangosnippets.org/snippets/1697/
"""
# everyone has perms to export as csv unless explicitly defined
if getattr(settings, 'DJANGO_EXPORTS_REQUIRE_PERM', None):
admin_opts = admin_model.opts
codename = '%s_%s' % ('csv', admin_opts.object_name.lower())
has_csv_permission = request.user.has_perm("%s.%s" % (admin_opts.app_label, codename))
else:
has_csv_permission = admin_model.has_csv_permission(request) \
if (hasattr(admin_model, 'has_csv_permission') and callable(getattr(admin_model, 'has_csv_permission'))) \
else True
if has_csv_permission:
opts = admin_model.model._meta
if getattr(admin_model, 'csv_fields', None):
field_names = admin_model.csv_fields
else:
field_names = [field.name for field in opts.fields]
field_names.sort()
if django.VERSION[0] == 1 and django.VERSION[1] <= 5:
response = HttpResponse(mimetype='text/csv')
else:
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s.csv' % text(opts).replace('.', '_')
queryset = queryset.values_list(*field_names)
pandas.DataFrame(list(queryset), columns=field_names).to_csv(response, index=False, encoding='utf-8')
return response
return HttpResponseForbidden()
export_as_csv.short_description = "Export selected objects as csv file"
class CSVExportAdmin(admin.ModelAdmin):
def get_actions(self, request):
actions = super(CSVExportAdmin, self).get_actions(request)
if self.has_csv_permission(request):
actions['export_as_csv'] = (export_as_csv, 'export_as_csv', "Export selected objects as csv file")
return actions
def has_csv_permission(self, request, obj=None):
"""
Returns True if the given request has permission to add an object.
Can be overridden by the user in subclasses. By default, we assume
all staff users can use this action unless `DJANGO_EXPORTS_REQUIRE_PERM`
is set to True in your django settings.
"""
if getattr(settings, 'DJANGO_EXPORTS_REQUIRE_PERM', None):
opts = self.opts
codename = '%s_%s' % ('csv', opts.object_name.lower())
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
return True
if getattr(settings, 'DJANGO_CSV_GLOBAL_EXPORTS_ENABLED', True):
admin.site.add_action(export_as_csv)
| rochapps/django-csv-exports | django_csv_exports/admin.py | Python | bsd-2-clause | 2,837 |
import os
import webapp2
from google.appengine.ext.webapp import template
import datetime
import time
import urllib
import wsgiref.handlers
import csv
import logging
from google.appengine.ext import db
from google.appengine.api import users
from busyflow.pivotal import PivotalClient
from xml.sax.saxutils import escape
from gaesessions import get_current_session
class GetProjects(webapp2.RequestHandler):
def post(self):
output = OutputHTML( self )
output.post()
class GetStories ( webapp2.RequestHandler ):
def post ( self ):
for story in stories:
self.response.out.write( story['name'] )
self.response.out.write("<p>")
class OutputHTML ( webapp2.RequestHandler ):
def get ( self ):
def post ( self ):
# initialize the class properties
self.projectId = None
self.filter = ''
self.featuresChecked = "checked='true'"
self.bugsChecked = "checked='true'"
self.choresChecked = "checked='true'"
self.releasesChecked = ""
session = get_current_session()
logging.info( " post ")
# if we're authenticating get the key from the input
if self.request.get('APIKey', default_value=None) != None :
self.apikey = self.request.get('APIKey')
# if the session is active
if session.is_active():
# and it has an APIKey
if session.has_key('APIKey') :
# but the stored API Key has changed, store the new value and clear everything else
if self.request.get('APIKey') != session['APIKey'] :
session['APIKey'] = self.request.get('APIKey')
session.pop('projectId')
session.pop('filter')
else :
# if we're authenticating but the session isn't active, it is recommended that you rotate the session ID (security)
session.regenerate_id()
session['APIKey'] = self.apikey
# if we're getting the stories
elif self.request.get('projects', default_value=None) != None :
self.apikey = session['APIKey']
self.projectId = self.request.get('projects')
session['projectId'] = self.projectId
self.filter = self.request.get('filter')
session['filter'] = self.filter
if self.request.get('featuresChecked') != '' :
self.featuresChecked = "checked='true'"
else :
self.featuresChecked = ''
session['featuresChecked'] = self.featuresChecked
if self.request.get('bugsChecked') != '' :
self.bugsChecked = "checked='true'"
else :
self.bugsChecked = ''
session['bugsChecked'] = self.bugsChecked
if self.request.get('choresChecked') != '' :
self.choresChecked = "checked='true'"
else :
self.choresChecked = ''
session['choresChecked'] = self.choresChecked
if self.request.get('releasesChecked') != '' :
self.releasesChecked = "checked='true'"
else :
self.releasesChecked = ''
session['releasesChecked'] = self.releasesChecked
client = PivotalClient(token=self.apikey, cache=None)
projects = client.projects.all()['projects']
self.response.out.write("""
<head>
<link type="text/css" rel="stylesheet" href="/stylesheets/main.css" />
</head>
<html>
<body>
<h1>Pivotal PDF</h1>
<h2>A User Story Document Generator</h2>
<h3>Step 1: Authenticate</h3>
<form action="/authenticate" method="post">
Pivotal Tracker API Key
""")
apiKey = """<div><input type="text" name="APIKey" size="60" value="{0}"></input></div>""".format( self.apikey )
self.response.out.write( apiKey )
self.response.out.write("""
<div><input type="submit" value="Login"></div>
</form>
<p>
<h3>Step 2: Select Project</h3>
<form action="/getStories" method="post">
<div><select name="projects" size="10" style="width:300px;margin:5px 0 5px 0;">
""")
# if we havn't selected a project and there is at least 1, the select the first by default
if self.projectId == None and len(projects) > 0 :
self.projectId = projects[0]['id']
for project in projects:
if project['id'] == self.projectId :
option = """<option selected="selected" value="{0}">{1}</option>""".format( project['id'], project['name'] )
else:
option = """<option value="{0}">{1}</option>""".format( project['id'], project['name'] )
self.response.out.write( option )
self.response.out.write("""
</select></div>
<p>
<h3>Step 3: Select Story Types and Enter Story Search Filter</h3>
<div><label for="featuresChecked">Features</label><input type="checkbox" id="featuresChecked" name="featuresChecked" {1}>
<label for="bugsChecked">Bugs</label><input type="checkbox" id="bugsChecked" name="bugsChecked" {2} >
<label for="choresChecked">Chores</label><input type="checkbox" id="choresChecked" name="choresChecked" {3} >
<label for="releasesChecked">Releases</label><input type="checkbox" id="releasesChecked" name="releasesChecked" {4} >
</div>
<br/>
This is the same as the Search box in Pivotal Tracker
<div><input type="text" name="filter" size="60" value="{0}"></input></div>
""".format( self.filter, self.featuresChecked, self.bugsChecked, self.choresChecked, self.releasesChecked ))
self.response.out.write("""
<p>
<div><input type="submit" value="Get Stories" ></div>
</form>
""")
stories = []
labels = {}
# add the story types to the filter
typeFilter = ' type:none,'
if self.featuresChecked != '' :
typeFilter += 'feature,'
if self.bugsChecked != '' :
typeFilter += 'bug,'
if self.choresChecked != '' :
typeFilter += 'chore,'
if self.releasesChecked != '' :
typeFilter += 'release'
self.filter += typeFilter
session['filter'] = self.filter
# if a project is selected, get it's stories
if self.projectId != None :
stories = client.stories.get_filter(self.projectId, self.filter, True )['stories']
self.response.out.write("""
<form action="/generatePDF" method="post">
""")
# list the stories
self.response.out.write("""
<p>
<h3>Story List</h3>
<div><select name="stories" size="20" style="width:300px;margin:5px 0 5px 0;" multiple="multiple">
""")
for story in stories :
option = """<option value="{0}">{1}</option>""".format( story['id'], story['name'] )
self.response.out.write( option )
# as we go through each, pick out the label and add it to our list
if 'labels' in story :
for label in story['labels'] :
labels[label] = label
self.response.out.write("""
</select></div>
""")
# list the labels
self.response.out.write("""
<p>
<h3>Label List</h3>
<div><select name="labels" size="20" style="width:300px;margin:5px 0 5px 0;" multiple="multiple">
""")
for label in labels :
option = """<option value="{0}">{0}</option>""".format( label )
self.response.out.write( option )
self.response.out.write("""
</select></div>
""")
self.response.out.write("""
<p>
<div>
<input type="radio" name="format" value="full" checked="True"/>Full Report<br/>
<input type="radio" name="format" value="summary"/>Summary Report<br/>
</div>
<p>
""")
# if there are no stories, disable the Output PDF button
if len(stories) == 0 :
self.response.out.write( """
<div><input type="submit" value="Generate PDF" disabled="true"></div>
""")
else :
self.response.out.write( """
<div><input type="submit" name="outputType" value="Generate PDF" ></div>
""")
self.response.out.write("""
</form>
</body>
</html>
""")
| xtopherbrandt/pivotalpdf | pivotal_pdf_input.py | Python | gpl-2.0 | 8,838 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-12 06:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contest', '0002_auto_20160112_0642'),
]
operations = [
migrations.AddField(
model_name='candidate',
name='gender',
field=models.CharField(choices=[('M', 'Male'), ('F', 'Female')], default='M', max_length=1),
preserve_default=False,
),
]
| azuer88/tabulator | tabulator/contest/migrations/0003_candidate_gender.py | Python | gpl-3.0 | 541 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Pepi documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 31 17:00:06 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('../..'))
class Mock(object):
__all__ = []
def __init__(self, *args, **kw):
pass
def __call__(self, *args, **kw):
return Mock()
def __mul__(self, other):
return Mock()
def __and__(self, other):
return Mock()
def __bool__(self):
return False
def __nonzero__(self):
return False
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
else:
return Mock()
sys.modules['picamera'] = Mock()
sys.modules['picamera.array'] = Mock()
sys.modules['PiCamera'] = Mock()
sys.modules['numpy'] = Mock()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.viewcode',
'sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PEPI'
copyright = '2017, Curtis West'
author = 'Curtis West'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.0'
# The full version, including alpha/beta/rc tags.
release = '3.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
if not on_rtd:
html_theme = 'alabaster'
else:
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pepidoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Pepi.tex', 'Pepi Documentation',
'Curtis West', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pepi', 'Pepi Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Pepi', 'Pepi Documentation',
author, 'Pepi', 'One line description of project.',
'Miscellaneous'),
]
| curtiswest/pepi | docs/conf.py | Python | apache-2.0 | 5,971 |
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from random import choice
from string import ascii_uppercase, digits
# Create your models here.
def id_generator(size=8, chars=ascii_uppercase + digits):
return ''.join(choice(chars) for _ in range(size))
class Poll(models.Model):
owner = models.ForeignKey(User)
name = models.CharField(_('Poll name'), max_length=128)
code = models.CharField(_('Code'), max_length=8, unique=True, default=id_generator)
passes = models.IntegerField(default=0)
creation_date = models.DateTimeField(_('Date created'), auto_now_add=True)
def __str__(self):
return 'Poll {name} by {owner}'.format(name=self.name, owner=self.owner)
class Meta:
ordering = ['-name']
verbose_name = _('Poll')
verbose_name_plural = _('Polls')
class Question(models.Model):
from_poll = models.ForeignKey('Poll')
caption = models.CharField(_('Question caption'), max_length=128)
def __str__(self):
return '{caption}'.format(caption=self.caption)
class Meta:
ordering = ['-caption']
verbose_name = _('Question')
verbose_name_plural = _('Questions')
class AnswerContainer(models.Model):
telegram_username = models.CharField(_('Telegram username'), max_length=128)
on_poll = models.ForeignKey('Poll')
def __str__(self):
return '{telegram_username} answer on poll {poll}'.format(telegram_username=self.telegram_username,
poll=self.on_poll)
class Meta:
verbose_name = _('Answer Container')
verbose_name_plural = _('Answer Containers')
class Answer(models.Model):
from_container = models.ForeignKey('AnswerContainer')
on_question = models.ForeignKey('Question')
caption = models.CharField(_('Answer caption'), max_length=128)
def __str__(self):
return 'Answer: {caption} for question {question_id}'.format(caption=self.caption,
question_id=self.on_question.id)
class Meta:
ordering = ['-caption']
verbose_name = _('Answer')
verbose_name_plural = _('Answers')
| apy2017/Anaconda | techbot_web/poll_editor/models.py | Python | mit | 2,362 |
# Authors:
# Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2011 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ipalib import api, errors
from ipalib import Str, StrEnum, Bool
from ipalib.plugable import Registry
from ipalib.plugins.baseldap import *
from ipalib import _, ngettext
from ipalib.plugins.hbacrule import is_all
__doc__ = _("""
SELinux User Mapping
Map IPA users to SELinux users by host.
Hosts, hostgroups, users and groups can be either defined within
the rule or it may point to an existing HBAC rule. When using
--hbacrule option to selinuxusermap-find an exact match is made on the
HBAC rule name, so only one or zero entries will be returned.
EXAMPLES:
Create a rule, "test1", that sets all users to xguest_u:s0 on the host "server":
ipa selinuxusermap-add --usercat=all --selinuxuser=xguest_u:s0 test1
ipa selinuxusermap-add-host --hosts=server.example.com test1
Create a rule, "test2", that sets all users to guest_u:s0 and uses an existing HBAC rule for users and hosts:
ipa selinuxusermap-add --usercat=all --hbacrule=webserver --selinuxuser=guest_u:s0 test2
Display the properties of a rule:
ipa selinuxusermap-show test2
Create a rule for a specific user. This sets the SELinux context for
user john to unconfined_u:s0-s0:c0.c1023 on any machine:
ipa selinuxusermap-add --hostcat=all --selinuxuser=unconfined_u:s0-s0:c0.c1023 john_unconfined
ipa selinuxusermap-add-user --users=john john_unconfined
Disable a rule:
ipa selinuxusermap-disable test1
Enable a rule:
ipa selinuxusermap-enable test1
Find a rule referencing a specific HBAC rule:
ipa selinuxusermap-find --hbacrule=allow_some
Remove a rule:
ipa selinuxusermap-del john_unconfined
SEEALSO:
The list controlling the order in which the SELinux user map is applied
and the default SELinux user are available in the config-show command.
""")
register = Registry()
notboth_err = _('HBAC rule and local members cannot both be set')
def validate_selinuxuser(ugettext, user):
"""
An SELinux user has 3 components: user:MLS:MCS. user and MLS are required.
user traditionally ends with _u but this is not mandatory.
The regex is ^[a-zA-Z][a-zA-Z_]*
The MLS part can only be:
Level: s[0-15](-s[0-15])
Then MCS could be c[0-1023].c[0-1023] and/or c[0-1023]-c[0-c0123]
Meaning
s0 s0-s1 s0-s15:c0.c1023 s0-s1:c0,c2,c15.c26 s0-s0:c0.c1023
Returns a message on invalid, returns nothing on valid.
"""
regex_name = re.compile(r'^[a-zA-Z][a-zA-Z_]*$')
regex_mls = re.compile(r'^s[0-9][1-5]{0,1}(-s[0-9][1-5]{0,1}){0,1}$')
regex_mcs = re.compile(r'^c(\d+)([.,-]c(\d+))*?$')
# If we add in ::: we don't have to check to see if some values are
# empty
(name, mls, mcs, ignore) = (user + ':::').split(':', 3)
if not regex_name.match(name):
return _('Invalid SELinux user name, only a-Z and _ are allowed')
if not mls or not regex_mls.match(mls):
return _('Invalid MLS value, must match s[0-15](-s[0-15])')
m = regex_mcs.match(mcs)
if mcs and (not m or (m.group(3) and (int(m.group(3)) > 1023))):
return _('Invalid MCS value, must match c[0-1023].c[0-1023] '
'and/or c[0-1023]-c[0-c0123]')
return None
def validate_selinuxuser_inlist(ldap, user):
"""
Ensure the user is in the list of allowed SELinux users.
Returns nothing if the user is found, raises an exception otherwise.
"""
config = ldap.get_ipa_config()
item = config.get('ipaselinuxusermaporder', [])
if len(item) != 1:
raise errors.NotFound(reason=_('SELinux user map list not '
'found in configuration'))
userlist = item[0].split('$')
if user not in userlist:
raise errors.NotFound(
reason=_('SELinux user %(user)s not found in '
'ordering list (in config)') % dict(user=user))
return
@register()
class selinuxusermap(LDAPObject):
"""
SELinux User Map object.
"""
container_dn = api.env.container_selinux
object_name = _('SELinux User Map rule')
object_name_plural = _('SELinux User Map rules')
object_class = ['ipaassociation', 'ipaselinuxusermap']
permission_filter_objectclasses = ['ipaselinuxusermap']
default_attributes = [
'cn', 'ipaenabledflag',
'description', 'usercategory', 'hostcategory',
'ipaenabledflag', 'memberuser', 'memberhost',
'memberhostgroup', 'seealso', 'ipaselinuxuser',
]
uuid_attribute = 'ipauniqueid'
rdn_attribute = 'ipauniqueid'
attribute_members = {
'memberuser': ['user', 'group'],
'memberhost': ['host', 'hostgroup'],
}
managed_permissions = {
'System: Read SELinux User Maps': {
'replaces_global_anonymous_aci': True,
'ipapermbindruletype': 'all',
'ipapermright': {'read', 'search', 'compare'},
'ipapermdefaultattr': {
'accesstime', 'cn', 'description', 'hostcategory',
'ipaenabledflag', 'ipaselinuxuser', 'ipauniqueid',
'memberhost', 'memberuser', 'seealso', 'usercategory',
'objectclass', 'member',
},
},
'System: Add SELinux User Maps': {
'ipapermright': {'add'},
'replaces': [
'(target = "ldap:///ipauniqueid=*,cn=usermap,cn=selinux,$SUFFIX")(version 3.0;acl "permission:Add SELinux User Maps";allow (add) groupdn = "ldap:///cn=Add SELinux User Maps,cn=permissions,cn=pbac,$SUFFIX";)',
],
'default_privileges': {'SELinux User Map Administrators'},
},
'System: Modify SELinux User Maps': {
'ipapermright': {'write'},
'ipapermdefaultattr': {
'cn', 'ipaenabledflag', 'ipaselinuxuser', 'memberhost',
'memberuser', 'seealso'
},
'replaces': [
'(targetattr = "cn || memberuser || memberhost || seealso || ipaselinuxuser || ipaenabledflag")(target = "ldap:///ipauniqueid=*,cn=usermap,cn=selinux,$SUFFIX")(version 3.0;acl "permission:Modify SELinux User Maps";allow (write) groupdn = "ldap:///cn=Modify SELinux User Maps,cn=permissions,cn=pbac,$SUFFIX";)',
],
'default_privileges': {'SELinux User Map Administrators'},
},
'System: Remove SELinux User Maps': {
'ipapermright': {'delete'},
'replaces': [
'(target = "ldap:///ipauniqueid=*,cn=usermap,cn=selinux,$SUFFIX")(version 3.0;acl "permission:Remove SELinux User Maps";allow (delete) groupdn = "ldap:///cn=Remove SELinux User Maps,cn=permissions,cn=pbac,$SUFFIX";)',
],
'default_privileges': {'SELinux User Map Administrators'},
},
}
# These maps will not show as members of other entries
label = _('SELinux User Maps')
label_singular = _('SELinux User Map')
takes_params = (
Str('cn',
cli_name='name',
label=_('Rule name'),
primary_key=True,
),
Str('ipaselinuxuser', validate_selinuxuser,
cli_name='selinuxuser',
label=_('SELinux User'),
),
Str('seealso?',
cli_name='hbacrule',
label=_('HBAC Rule'),
doc=_('HBAC Rule that defines the users, groups and hostgroups'),
),
StrEnum('usercategory?',
cli_name='usercat',
label=_('User category'),
doc=_('User category the rule applies to'),
values=(u'all', ),
),
StrEnum('hostcategory?',
cli_name='hostcat',
label=_('Host category'),
doc=_('Host category the rule applies to'),
values=(u'all', ),
),
Str('description?',
cli_name='desc',
label=_('Description'),
),
Bool('ipaenabledflag?',
label=_('Enabled'),
flags=['no_option'],
),
Str('memberuser_user?',
label=_('Users'),
flags=['no_create', 'no_update', 'no_search'],
),
Str('memberuser_group?',
label=_('User Groups'),
flags=['no_create', 'no_update', 'no_search'],
),
Str('memberhost_host?',
label=_('Hosts'),
flags=['no_create', 'no_update', 'no_search'],
),
Str('memberhost_hostgroup?',
label=_('Host Groups'),
flags=['no_create', 'no_update', 'no_search'],
),
)
def _normalize_seealso(self, seealso):
"""
Given a HBAC rule name verify its existence and return the dn.
"""
if not seealso:
return None
try:
dn = DN(seealso)
return str(dn)
except ValueError:
try:
entry_attrs = self.backend.find_entry_by_attr(
self.api.Object['hbacrule'].primary_key.name,
seealso,
self.api.Object['hbacrule'].object_class,
[''],
DN(self.api.Object['hbacrule'].container_dn, api.env.basedn))
seealso = entry_attrs.dn
except errors.NotFound:
raise errors.NotFound(reason=_('HBAC rule %(rule)s not found') % dict(rule=seealso))
return seealso
def _convert_seealso(self, ldap, entry_attrs, **options):
"""
Convert an HBAC rule dn into a name
"""
if options.get('raw', False):
return
if 'seealso' in entry_attrs:
hbac_attrs = ldap.get_entry(entry_attrs['seealso'][0], ['cn'])
entry_attrs['seealso'] = hbac_attrs['cn'][0]
@register()
class selinuxusermap_add(LDAPCreate):
__doc__ = _('Create a new SELinux User Map.')
msg_summary = _('Added SELinux User Map "%(value)s"')
def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys, **options):
assert isinstance(dn, DN)
# rules are enabled by default
entry_attrs['ipaenabledflag'] = 'TRUE'
validate_selinuxuser_inlist(ldap, entry_attrs['ipaselinuxuser'])
# hbacrule is not allowed when usercat or hostcat is set
is_to_be_set = lambda x: x in entry_attrs and entry_attrs[x] != None
are_local_members_to_be_set = any(is_to_be_set(attr)
for attr in ('usercategory',
'hostcategory'))
is_hbacrule_to_be_set = is_to_be_set('seealso')
if is_hbacrule_to_be_set and are_local_members_to_be_set:
raise errors.MutuallyExclusiveError(reason=notboth_err)
if is_hbacrule_to_be_set:
entry_attrs['seealso'] = self.obj._normalize_seealso(entry_attrs['seealso'])
return dn
def post_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
self.obj._convert_seealso(ldap, entry_attrs, **options)
return dn
@register()
class selinuxusermap_del(LDAPDelete):
__doc__ = _('Delete a SELinux User Map.')
msg_summary = _('Deleted SELinux User Map "%(value)s"')
@register()
class selinuxusermap_mod(LDAPUpdate):
__doc__ = _('Modify a SELinux User Map.')
msg_summary = _('Modified SELinux User Map "%(value)s"')
def pre_callback(self, ldap, dn, entry_attrs, attrs_list, *keys, **options):
assert isinstance(dn, DN)
try:
_entry_attrs = ldap.get_entry(dn, attrs_list)
except errors.NotFound:
self.obj.handle_not_found(*keys)
is_to_be_deleted = lambda x: (x in _entry_attrs and x in entry_attrs) and \
entry_attrs[x] == None
# makes sure the local members and hbacrule is not set at the same time
# memberuser or memberhost could have been set using --setattr
is_to_be_set = lambda x: ((x in _entry_attrs and _entry_attrs[x] != None) or \
(x in entry_attrs and entry_attrs[x] != None)) and \
not is_to_be_deleted(x)
are_local_members_to_be_set = any(is_to_be_set(attr)
for attr in ('usercategory',
'hostcategory',
'memberuser',
'memberhost'))
is_hbacrule_to_be_set = is_to_be_set('seealso')
# this can disable all modifications if hbacrule and local members were
# set at the same time bypassing this commad, e.g. using ldapmodify
if are_local_members_to_be_set and is_hbacrule_to_be_set:
raise errors.MutuallyExclusiveError(reason=notboth_err)
if is_all(entry_attrs, 'usercategory') and 'memberuser' in entry_attrs:
raise errors.MutuallyExclusiveError(reason="user category "
"cannot be set to 'all' while there are allowed users")
if is_all(entry_attrs, 'hostcategory') and 'memberhost' in entry_attrs:
raise errors.MutuallyExclusiveError(reason="host category "
"cannot be set to 'all' while there are allowed hosts")
if 'ipaselinuxuser' in entry_attrs:
validate_selinuxuser_inlist(ldap, entry_attrs['ipaselinuxuser'])
if 'seealso' in entry_attrs:
entry_attrs['seealso'] = self.obj._normalize_seealso(entry_attrs['seealso'])
return dn
def post_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
self.obj._convert_seealso(ldap, entry_attrs, **options)
return dn
@register()
class selinuxusermap_find(LDAPSearch):
__doc__ = _('Search for SELinux User Maps.')
msg_summary = ngettext(
'%(count)d SELinux User Map matched', '%(count)d SELinux User Maps matched', 0
)
def execute(self, *args, **options):
# If searching on hbacrule we need to find the uuid to search on
if options.get('seealso'):
hbacrule = options['seealso']
try:
hbac = api.Command['hbacrule_show'](hbacrule,
all=True)['result']
dn = hbac['dn']
except errors.NotFound:
return dict(count=0, result=[], truncated=False)
options['seealso'] = dn
return super(selinuxusermap_find, self).execute(*args, **options)
def post_callback(self, ldap, entries, truncated, *args, **options):
if options.get('pkey_only', False):
return truncated
for attrs in entries:
self.obj._convert_seealso(ldap, attrs, **options)
return truncated
@register()
class selinuxusermap_show(LDAPRetrieve):
__doc__ = _('Display the properties of a SELinux User Map rule.')
def post_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
self.obj._convert_seealso(ldap, entry_attrs, **options)
return dn
@register()
class selinuxusermap_enable(LDAPQuery):
__doc__ = _('Enable an SELinux User Map rule.')
msg_summary = _('Enabled SELinux User Map "%(value)s"')
has_output = output.standard_value
def execute(self, cn, **options):
ldap = self.obj.backend
dn = self.obj.get_dn(cn)
try:
entry_attrs = ldap.get_entry(dn, ['ipaenabledflag'])
except errors.NotFound:
self.obj.handle_not_found(cn)
entry_attrs['ipaenabledflag'] = ['TRUE']
try:
ldap.update_entry(entry_attrs)
except errors.EmptyModlist:
raise errors.AlreadyActive()
return dict(
result=True,
value=pkey_to_value(cn, options),
)
@register()
class selinuxusermap_disable(LDAPQuery):
__doc__ = _('Disable an SELinux User Map rule.')
msg_summary = _('Disabled SELinux User Map "%(value)s"')
has_output = output.standard_value
def execute(self, cn, **options):
ldap = self.obj.backend
dn = self.obj.get_dn(cn)
try:
entry_attrs = ldap.get_entry(dn, ['ipaenabledflag'])
except errors.NotFound:
self.obj.handle_not_found(cn)
entry_attrs['ipaenabledflag'] = ['FALSE']
try:
ldap.update_entry(entry_attrs)
except errors.EmptyModlist:
raise errors.AlreadyInactive()
return dict(
result=True,
value=pkey_to_value(cn, options),
)
@register()
class selinuxusermap_add_user(LDAPAddMember):
__doc__ = _('Add users and groups to an SELinux User Map rule.')
member_attributes = ['memberuser']
member_count_out = ('%i object added.', '%i objects added.')
def pre_callback(self, ldap, dn, found, not_found, *keys, **options):
assert isinstance(dn, DN)
try:
entry_attrs = ldap.get_entry(dn, self.obj.default_attributes)
dn = entry_attrs.dn
except errors.NotFound:
self.obj.handle_not_found(*keys)
if 'usercategory' in entry_attrs and \
entry_attrs['usercategory'][0].lower() == 'all':
raise errors.MutuallyExclusiveError(
reason=_("users cannot be added when user category='all'"))
if 'seealso' in entry_attrs:
raise errors.MutuallyExclusiveError(reason=notboth_err)
return dn
@register()
class selinuxusermap_remove_user(LDAPRemoveMember):
__doc__ = _('Remove users and groups from an SELinux User Map rule.')
member_attributes = ['memberuser']
member_count_out = ('%i object removed.', '%i objects removed.')
@register()
class selinuxusermap_add_host(LDAPAddMember):
__doc__ = _('Add target hosts and hostgroups to an SELinux User Map rule.')
member_attributes = ['memberhost']
member_count_out = ('%i object added.', '%i objects added.')
def pre_callback(self, ldap, dn, found, not_found, *keys, **options):
assert isinstance(dn, DN)
try:
entry_attrs = ldap.get_entry(dn, self.obj.default_attributes)
dn = entry_attrs.dn
except errors.NotFound:
self.obj.handle_not_found(*keys)
if 'hostcategory' in entry_attrs and \
entry_attrs['hostcategory'][0].lower() == 'all':
raise errors.MutuallyExclusiveError(
reason=_("hosts cannot be added when host category='all'"))
if 'seealso' in entry_attrs:
raise errors.MutuallyExclusiveError(reason=notboth_err)
return dn
@register()
class selinuxusermap_remove_host(LDAPRemoveMember):
__doc__ = _('Remove target hosts and hostgroups from an SELinux User Map rule.')
member_attributes = ['memberhost']
member_count_out = ('%i object removed.', '%i objects removed.')
| cluck/freeipa | ipalib/plugins/selinuxusermap.py | Python | gpl-3.0 | 19,617 |
"""allow bash-completion for argparse with argcomplete if installed
needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
to find the magic string, so _ARGCOMPLETE env. var is never set, and
this does not need special code.
argcomplete does not support python 2.5 (although the changes for that
are minor).
Function try_argcomplete(parser) should be called directly before
the call to ArgumentParser.parse_args().
The filescompleter is what you normally would use on the positional
arguments specification, in order to get "dirname/" after "dirn<TAB>"
instead of the default "dirname ":
optparser.add_argument(Config._file_or_dir, nargs='*'
).completer=filescompleter
Other, application specific, completers should go in the file
doing the add_argument calls as they need to be specified as .completer
attributes as well. (If argcomplete is not installed, the function the
attribute points to will not be used).
SPEEDUP
=======
The generic argcomplete script for bash-completion
(/etc/bash_completion.d/python-argcomplete.sh )
uses a python program to determine startup script generated by pip.
You can speed up completion somewhat by changing this script to include
# PYTHON_ARGCOMPLETE_OK
so the the python-argcomplete-check-easy-install-script does not
need to be called to find the entry point of the code and see if that is
marked with PYTHON_ARGCOMPLETE_OK
INSTALL/DEBUGGING
=================
To include this support in another application that has setup.py generated
scripts:
- add the line:
# PYTHON_ARGCOMPLETE_OK
near the top of the main python entry point
- include in the file calling parse_args():
from _argcomplete import try_argcomplete, filescompleter
, call try_argcomplete just before parse_args(), and optionally add
filescompleter to the positional arguments' add_argument()
If things do not work right away:
- switch on argcomplete debugging with (also helpful when doing custom
completers):
export _ARC_DEBUG=1
- run:
python-argcomplete-check-easy-install-script $(which appname)
echo $?
will echo 0 if the magic line has been found, 1 if not
- sometimes it helps to find early on errors using:
_ARGCOMPLETE=1 _ARC_DEBUG=1 appname
which should throw a KeyError: 'COMPLINE' (which is properly set by the
global argcomplete script).
"""
import sys
import os
from glob import glob
class FastFilesCompleter:
'Fast file completer class'
def __init__(self, directories=True):
self.directories = directories
def __call__(self, prefix, **kwargs):
"""only called on non option completions"""
if os.path.sep in prefix[1:]: #
prefix_dir = len(os.path.dirname(prefix) + os.path.sep)
else:
prefix_dir = 0
completion = []
globbed = []
if '*' not in prefix and '?' not in prefix:
if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash
globbed.extend(glob(prefix + '.*'))
prefix += '*'
globbed.extend(glob(prefix))
for x in sorted(globbed):
if os.path.isdir(x):
x += '/'
# append stripping the prefix (like bash, not like compgen)
completion.append(x[prefix_dir:])
return completion
if os.environ.get('_ARGCOMPLETE'):
try:
import argcomplete.completers
except ImportError:
sys.exit(-1)
filescompleter = FastFilesCompleter()
def try_argcomplete(parser):
argcomplete.autocomplete(parser)
else:
def try_argcomplete(parser): pass
filescompleter = None
| razvanc-r/godot-python | tests/bindings/lib/_pytest/_argcomplete.py | Python | mit | 3,624 |
# Copyright (C) 2008, One Laptop Per Child
# Copyright (C) 2009 Simon Schampijer
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Gdk
from gettext import gettext as _
from sugar3.activity.widgets import EditToolbar as BaseEditToolbar
from sugar3.graphics import iconentry
from sugar3.graphics.toolbutton import ToolButton
from sugar3.graphics import style
class EditToolbar(BaseEditToolbar):
def __init__(self, act):
BaseEditToolbar.__init__(self)
self._activity = act
self._browser = None
self.undo.connect('clicked', self.__undo_cb)
self.redo.connect('clicked', self.__redo_cb)
self.copy.connect('clicked', self.__copy_cb)
self.paste.connect('clicked', self.__paste_cb)
separator = Gtk.SeparatorToolItem()
separator.set_draw(False)
separator.set_expand(True)
self.insert(separator, -1)
separator.show()
search_item = Gtk.ToolItem()
self.search_entry = iconentry.IconEntry()
self.search_entry.set_icon_from_name(iconentry.ICON_ENTRY_PRIMARY,
'entry-search')
self.search_entry.add_clear_button()
self.search_entry.connect('activate', self.__search_entry_activate_cb)
self.search_entry.connect('changed', self.__search_entry_changed_cb)
width = int(Gdk.Screen.width() / 3)
self.search_entry.set_size_request(width, -1)
search_item.add(self.search_entry)
self.search_entry.show()
self.insert(search_item, -1)
search_item.show()
self._prev = ToolButton('go-previous-paired')
self._prev.set_tooltip(_('Previous'))
self._prev.props.sensitive = False
self._prev.connect('clicked', self.__find_previous_cb)
self.insert(self._prev, -1)
self._prev.show()
self._next = ToolButton('go-next-paired')
self._next.set_tooltip(_('Next'))
self._next.props.sensitive = False
self._next.connect('clicked', self.__find_next_cb)
self.insert(self._next, -1)
self._next.show()
tabbed_view = self._activity.get_canvas()
GObject.idle_add(lambda:
self._connect_to_browser(tabbed_view.props.current_browser))
tabbed_view.connect_after('switch-page', self.__switch_page_cb)
def __switch_page_cb(self, tabbed_view, page, page_num):
self._connect_to_browser(tabbed_view.props.current_browser)
def _connect_to_browser(self, browser):
if self._browser is not None:
self._browser.disconnect(self._selection_changed_hid)
self._browser = browser
self._update_undoredo_buttons()
self._update_copypaste_buttons()
self._selection_changed_hid = self._browser.connect(
'selection-changed', self._selection_changed_cb)
def _selection_changed_cb(self, widget):
self._update_undoredo_buttons()
self._update_copypaste_buttons()
def _update_undoredo_buttons(self):
self.undo.set_sensitive(self._browser.can_undo())
self.redo.set_sensitive(self._browser.can_redo())
def _update_copypaste_buttons(self):
self.copy.set_sensitive(self._browser.can_copy_clipboard())
self.paste.set_sensitive(self._browser.can_paste_clipboard())
def __undo_cb(self, button):
self._browser.undo()
self._update_undoredo_buttons()
def __redo_cb(self, button):
self._browser.redo()
self._update_undoredo_buttons()
def __copy_cb(self, button):
self._browser.copy_clipboard()
def __paste_cb(self, button):
self._browser.paste_clipboard()
def _find_and_mark_text(self, entry):
search_text = entry.get_text()
self._browser.unmark_text_matches()
self._browser.mark_text_matches(search_text, case_sensitive=False,
limit=0)
self._browser.set_highlight_text_matches(True)
found = self._browser.search_text(search_text, case_sensitive=False,
forward=True, wrap=True)
return found
def __search_entry_activate_cb(self, entry):
self._find_and_mark_text(entry)
def __search_entry_changed_cb(self, entry):
found = self._find_and_mark_text(entry)
if not found:
self._prev.props.sensitive = False
self._next.props.sensitive = False
entry.modify_text(Gtk.StateType.NORMAL,
style.COLOR_BUTTON_GREY.get_gdk_color())
else:
self._prev.props.sensitive = True
self._next.props.sensitive = True
entry.modify_text(Gtk.StateType.NORMAL,
style.COLOR_BLACK.get_gdk_color())
def __find_previous_cb(self, button):
search_text = self.search_entry.get_text()
self._browser.search_text(search_text, case_sensitive=False,
forward=False, wrap=True)
def __find_next_cb(self, button):
search_text = self.search_entry.get_text()
self._browser.search_text(search_text, case_sensitive=False,
forward=True, wrap=True)
| samdroid-apps/browse | edittoolbar.py | Python | gpl-2.0 | 5,982 |
from csv import reader
import random
import time
def getRandomBloodType():
bloodTypes = ['A+', 'A-', "B+", "B-", "AB+", "AB-", "O+", "O-"]
index = random.randrange(len(bloodTypes))
return bloodTypes[index]
def strTimeProp(start, end, format, prop):
stime = time.mktime(time.strptime(start, format))
etime = time.mktime(time.strptime(end, format))
ptime = stime + prop * (etime - stime)
return time.strftime(format, time.localtime(ptime))
def randomDate(start, end, prop):
return strTimeProp(start, end, '%Y-%m-%d', prop)
names = open("patientNames.csv", "r")
rows = reader(names)
startDate = "1920-01-01"
endDate = "2017-10-01"
i = 0
for row in rows:
if i > 10:
exit
else:
bt = getRandomBloodType()
d = randomDate(startDate, endDate, random.random())
print '\t(\'' + row[0] + ' ' + row[1] + '\', DATE \'' + d + '\', \'' + bt + '\'),'
| tburgebeckley/phlebotomy | p4/pyscript/buildPatients.py | Python | gpl-3.0 | 924 |
# Copyright 2012 Pinterest.com
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import json
import pytest
import six
from pymemcache.client.base import Client
from pymemcache.exceptions import (
MemcacheIllegalInputError,
MemcacheClientError
)
from pymemcache.serde import (
python_memcache_serializer,
python_memcache_deserializer
)
def get_set_helper(client, key, value, key2, value2):
result = client.get(key)
assert result is None
client.set(key, value, noreply=False)
result = client.get(key)
assert result == value
client.set(key2, value2, noreply=True)
result = client.get(key2)
assert result == value2
result = client.get_many([key, key2])
assert result == {key: value, key2: value2}
result = client.get_many([])
assert result == {}
@pytest.mark.integration()
def test_get_set(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module)
client.flush_all()
key = b'key'
value = b'value'
key2 = b'key2'
value2 = b'value2'
get_set_helper(client, key, value, key2, value2)
@pytest.mark.integration()
def test_get_set_unicode_key(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module,
allow_unicode_keys=True)
client.flush_all()
key = u"こんにちは"
value = b'hello'
key2 = 'my☃'
value2 = b'value2'
get_set_helper(client, key, value, key2, value2)
@pytest.mark.integration()
def test_add_replace(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module)
client.flush_all()
result = client.add(b'key', b'value', noreply=False)
assert result is True
result = client.get(b'key')
assert result == b'value'
result = client.add(b'key', b'value2', noreply=False)
assert result is False
result = client.get(b'key')
assert result == b'value'
result = client.replace(b'key1', b'value1', noreply=False)
assert result is False
result = client.get(b'key1')
assert result is None
result = client.replace(b'key', b'value2', noreply=False)
assert result is True
result = client.get(b'key')
assert result == b'value2'
@pytest.mark.integration()
def test_append_prepend(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module)
client.flush_all()
result = client.append(b'key', b'value', noreply=False)
assert result is False
result = client.get(b'key')
assert result is None
result = client.set(b'key', b'value', noreply=False)
assert result is True
result = client.append(b'key', b'after', noreply=False)
assert result is True
result = client.get(b'key')
assert result == b'valueafter'
result = client.prepend(b'key1', b'value', noreply=False)
assert result is False
result = client.get(b'key1')
assert result is None
result = client.prepend(b'key', b'before', noreply=False)
assert result is True
result = client.get(b'key')
assert result == b'beforevalueafter'
@pytest.mark.integration()
def test_cas(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module)
client.flush_all()
result = client.cas(b'key', b'value', b'1', noreply=False)
assert result is None
result = client.set(b'key', b'value', noreply=False)
assert result is True
result = client.cas(b'key', b'value', b'1', noreply=False)
assert result is False
result, cas = client.gets(b'key')
assert result == b'value'
result = client.cas(b'key', b'value1', cas, noreply=False)
assert result is True
result = client.cas(b'key', b'value2', cas, noreply=False)
assert result is False
@pytest.mark.integration()
def test_gets(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module)
client.flush_all()
result = client.gets(b'key')
assert result == (None, None)
result = client.set(b'key', b'value', noreply=False)
assert result is True
result = client.gets(b'key')
assert result[0] == b'value'
@pytest.mark.delete()
def test_delete(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module)
client.flush_all()
result = client.delete(b'key', noreply=False)
assert result is False
result = client.get(b'key')
assert result is None
result = client.set(b'key', b'value', noreply=False)
assert result is True
result = client.delete(b'key', noreply=False)
assert result is True
result = client.get(b'key')
assert result is None
@pytest.mark.integration()
def test_incr_decr(client_class, host, port, socket_module):
client = Client((host, port), socket_module=socket_module)
client.flush_all()
result = client.incr(b'key', 1, noreply=False)
assert result is None
result = client.set(b'key', b'0', noreply=False)
assert result is True
result = client.incr(b'key', 1, noreply=False)
assert result == 1
def _bad_int():
client.incr(b'key', b'foobar')
with pytest.raises(MemcacheClientError):
_bad_int()
result = client.decr(b'key1', 1, noreply=False)
assert result is None
result = client.decr(b'key', 1, noreply=False)
assert result == 0
result = client.get(b'key')
assert result == b'0'
@pytest.mark.integration()
def test_misc(client_class, host, port, socket_module):
client = Client((host, port), socket_module=socket_module)
client.flush_all()
@pytest.mark.integration()
def test_serialization_deserialization(host, port, socket_module):
def _ser(key, value):
return json.dumps(value).encode('ascii'), 1
def _des(key, value, flags):
if flags == 1:
return json.loads(value.decode('ascii'))
return value
client = Client((host, port), serializer=_ser, deserializer=_des,
socket_module=socket_module)
client.flush_all()
value = {'a': 'b', 'c': ['d']}
client.set(b'key', value)
result = client.get(b'key')
assert result == value
@pytest.mark.integration()
def test_serde_serialization(client_class, host, port, socket_module):
def check(value):
client.set(b'key', value, noreply=False)
result = client.get(b'key')
assert result == value
assert type(result) is type(value)
client = client_class((host, port), serializer=python_memcache_serializer,
deserializer=python_memcache_deserializer,
socket_module=socket_module)
client.flush_all()
check(b'byte string')
check(u'unicode string')
check('olé')
check(u'olé')
check(1)
check(123123123123123123123)
check({'a': 'pickle'})
check([u'one pickle', u'two pickle'])
testdict = defaultdict(int)
testdict[u'one pickle']
testdict[b'two pickle']
check(testdict)
@pytest.mark.integration()
def test_errors(client_class, host, port, socket_module):
client = client_class((host, port), socket_module=socket_module)
client.flush_all()
def _key_with_ws():
client.set(b'key with spaces', b'value', noreply=False)
with pytest.raises(MemcacheIllegalInputError):
_key_with_ws()
def _key_with_illegal_carriage_return():
client.set(b'\r\nflush_all', b'value', noreply=False)
with pytest.raises(MemcacheIllegalInputError):
_key_with_illegal_carriage_return()
def _key_too_long():
client.set(b'x' * 1024, b'value', noreply=False)
with pytest.raises(MemcacheClientError):
_key_too_long()
def _unicode_key_in_set():
client.set(six.u('\u0FFF'), b'value', noreply=False)
with pytest.raises(MemcacheClientError):
_unicode_key_in_set()
def _unicode_key_in_get():
client.get(six.u('\u0FFF'))
with pytest.raises(MemcacheClientError):
_unicode_key_in_get()
def _unicode_value_in_set():
client.set(b'key', six.u('\u0FFF'), noreply=False)
with pytest.raises(MemcacheClientError):
_unicode_value_in_set()
| bwalks/pymemcache | pymemcache/test/test_integration.py | Python | apache-2.0 | 8,843 |
import os
import sys
from distutils.core import setup
import py2exe
origIsSystemDLL = py2exe.build_exe.isSystemDLL
def isSystemDLL(pathname):
dlls = ("libfreetype-6.dll", "libogg-0.dll", "sdl_ttf.dll")
if os.path.basename(pathname).lower() in dlls:
return 0
return origIsSystemDLL(pathname)
py2exe.build_exe.isSystemDLL = isSystemDLL
sys.argv.append('py2exe')
setup(
name = 'Flappy Frog',
version = '1.0',
author = 'Yaoshicn',
options = {
'py2exe': {
'bundle_files': 1, # doesn't work on win64
'compressed': True,
}
},
windows = [{
'script': "flappy.py",
'icon_resources': [
(1, 'flappy.ico')
]
}],
zipfile=None,
)
| Yaoshicn/FlappyFrog | setup.py | Python | gpl-3.0 | 789 |
#!/usr/bin/env python3
# -*- coding: utf8; -*-
#
# Copyright (C) 2016 : Kathrin Hanauer
#
# This file is part of texpy (TexWithPython).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''A LaTeX document in Python'''
class LatexDocument:
def __init__(self, fileName='texpy.tex', partial=False):
self.__filename = fileName
self.__documentclass = 'article'
self.__documentOptions = []
self.__packages = []
self.__packageOptions = {}
self.__tikzlibs = []
self.__preview = False
self.__partial = partial
self.__content = []
def setFileName(self, fileName):
self.__filename = fileName
def enablePreview(self, previewOptions=['active',
'tightpage']):
self.__preview = True
self.addPackage('preview', previewOptions)
def setDocumentClass(self, docClass):
self.__documentclass = docClass
def addDocumentOption(self, option):
self.__documentOptions.append(option)
def isPartial(self):
return self.__partial
def setPartial(self, partial):
self.__partial = partial
def usesPackage(self, package):
return package in self.__packages
def hasPackageOption(self, package, option):
return (self.usesPackage(package)
and option in self.__packageOptions[package])
def addPackage(self, package, options=[]):
if not self.usesPackage(package):
self.__packages.append(package)
self.__packageOptions[package] = []
for opt in options:
if opt not in self.__packageOptions[package]:
self.__packageOptions[package].append(opt)
def usesTikzLibrary(self, tikzlib):
return self.usesPackage('tikz') and tikzlib in self.__tikzlibs
def addTikzLibrary(self, tikzlib):
if tikzlib not in self.__tikzlibs:
self.__tikzlibs.append(tikzlib)
self.addPackage('tikz')
def addContent(self, content):
self.__content.append(content)
def write(self, data):
self.addContent(data)
def flush(self):
pass
def beginTabular(self, formatString):
self.addContent('\\begin{tabular}{%s}\n' % formatString)
def endTabular(self):
self.addContent('\\end{tabular}\n')
def beginTikzPicture(self, options=''):
self.addContent('\\begin{tikzpicture}[%s]\n' % options)
def endTikzPicture(self):
self.addContent('\\end{tikzpicture}\n')
def _writeDocumentHeader(self, doc):
doc.write('\\documentclass[%s]{%s}\n' % (
','.join(self.__documentOptions),
self.__documentclass))
for package in self.__packages:
doc.write('\\usepackage[%s]{%s}\n' % (
','.join(self.__packageOptions[package]), package))
if len(self.__tikzlibs) > 0:
doc.write('\\usetikzlibrary{%s}\n' %
','.join(t for t in self.__tikzlibs))
doc.write('\\begin{document}\n')
if self.__preview:
doc.write('\\begin{preview}\n')
def _writeRequiredPackages(self, doc):
if len(self.__packages) > 0:
doc.write('%' * 60 + '\n')
doc.write('% Please make sure to use the following packages:\n')
for package in self.__packages:
options = self.__packageOptions[package]
if len(options) > 0:
doc.write('%% %s with options %s\n' % (
package, ','.join(options)))
else:
doc.write('%% %s\n' % package)
if len(self.__tikzlibs) > 0:
doc.write('%\n% and the following tikz libraries:\n')
doc.write('% ' + ','.join(t for t in self.__tikzlibs) + '\n')
doc.write('%' * 60 + '\n\n')
def _writeDocumentFooter(self, doc):
if self.__preview:
doc.write('\\end{preview}\n')
doc.write('\\end{document}\n')
def writeDocument(self):
with open(self.__filename, 'w') as doc:
if not self.__partial:
self._writeDocumentHeader(doc)
else:
self._writeRequiredPackages(doc)
for c in self.__content:
doc.write(c)
if not self.__partial:
self._writeDocumentFooter(doc)
| kalyi/TexWithPython | src/texpy/latexdocument.py | Python | gpl-3.0 | 4,984 |
# -*- encoding: utf-8 -*-
from django import forms
from models import *
"""
Classes
"""
class LoginForm(forms.Form):
username = forms.CharField(
widget=forms.TextInput(attrs={
'class': 'form-control', 'placeholder': 'Usuario'})
)
password = forms.CharField(
widget=forms.PasswordInput(attrs={
'class': 'form-control', 'placeholder': 'Contraseña'}),
)
class UpdateDataUserForm(forms.ModelForm):
email = forms.CharField(
widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Email'}),
error_messages={
'required': 'El email es requerido.',
'invalid': 'Ingrese un email valido'
}
)
class Meta:
model = User
fields = ['username', 'first_name', 'last_name', 'email']
widgets = {
'username': forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Usuario'}),
'first_name': forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Nombres'}),
'last_name': forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Apellidos'}),
'email': forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Email'}),
}
class UpdatePasswordUserForm(forms.Form):
password = forms.CharField(
widget=forms.PasswordInput(attrs={
'class': 'form-control', 'placeholder': 'Password actual'}),
)
new_password = forms.CharField(
widget=forms.PasswordInput(attrs={
'class': 'form-control', 'placeholder': 'Nuevo Password'}),
)
repeat_password = forms.CharField(
widget=forms.PasswordInput(attrs={
'class': 'form-control', 'placeholder': 'Repita nuevo Password'}),
)
def clean(self):
pass1 = self.cleaned_data['new_password']
pass2 = self.cleaned_data['repeat_password']
if len(pass1) < 5:
raise forms.ValidationError(
'El nuevo password debe tener al menos 5 caracteres')
if pass1 != pass2:
raise forms.ValidationError('Los passwords no coinciden.')
return self.cleaned_data
class SprintForm(forms.ModelForm):
class Meta:
model = Sprint
fields = ['name', 'date_start', 'date_finish', 'planning']
widgets = {
'name': forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Nombre'}),
'date_start': forms.TextInput(
attrs={
'class': 'form-control datepicker',
'placeholder': 'Fecha inicio DD/MM/YYY ó YYY-MM-DD'
}),
'date_finish': forms.TextInput(
attrs={
'class': 'form-control datepicker',
'placeholder': 'Fecha fin DD/MM/YYY ó YYY-MM-DD'
}),
}
class RetrospectiveUserForm(forms.ModelForm):
good = forms.CharField(
widget=forms.TextInput(attrs={
'class': 'form-control', 'placeholder': 'Usuario'})
)
bad = forms.CharField(
widget=forms.PasswordInput(attrs={
'class': 'form-control', 'placeholder': 'Contraseña'}),
)
suggestions = forms.CharField(
widget=forms.PasswordInput(attrs={
'class': 'form-control', 'placeholder': 'Contraseña'}),
)
| alfegupe/retro | retrospective/forms.py | Python | bsd-3-clause | 3,446 |
import pygame
from OpenGL.GL import *
from OpenGL.GLU import *
import camera
def main():
pygame.init()
display=(1280,720)
pygame.display.set_mode(display, DOUBLEBUF|OPENGL)
gluPerspective(70, (display[0]/display[1]), 0.1, 150000000)#camera distance maximum is one AU, we may change.
camera.Camera(0,0,0,0) # camera starts at the center of the map
#not moving and pointing forward
while True: #main loop
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.ENTER: #going to change this to hold down
direction = camera.Camera.get_dir()
dirx,diry,dirz=direction[0],direction[1],direction[2]
sum_dir = abs(dirx)+abs(diry)+abs(dirz)
velx,vely,velz=(dirx/sum_dir)*0.0001,(diry/sumdir)*0.0001,(dirz/sumdir)*0.0001
camera.Camera.accelerate(velx,vely,velz)
#adds to total velocity 0.0001 in total, distributed accross all 3 axes
if event.type == pygame.SHIFT:
direction = camera.Camera.get_dir()
dirx,diry,dirz=direction[0],direction[1],direction[2]
sum_dir = abs(dirx)+abs(diry)+abs(dirz)
velx,vely,velz=(dirx/sum_dir)*-0.0001,(diry/sumdir)*-0.0001,(dirz/sumdir)*-0.0001
camera.Camera.accelerate(velx,vely,velz) #subtracts from total velocity
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
| baldengineers/space-engine | mainloop.py | Python | mit | 1,574 |
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from collections import OrderedDict, namedtuple
import os.path
import time
import flask
import gevent
import psutil
from digits import device_query
from digits.task import Task
from digits.utils import subclass, override
# NOTE: Increment this everytime the picked object changes
PICKLE_VERSION = 2
# Used to store network outputs
NetworkOutput = namedtuple('NetworkOutput', ['kind', 'data'])
@subclass
class TrainTask(Task):
"""
Defines required methods for child classes
"""
def __init__(self, job, dataset, train_epochs, snapshot_interval, learning_rate, lr_policy, **kwargs):
"""
Arguments:
job -- model job
dataset -- a DatasetJob containing the dataset for this model
train_epochs -- how many epochs of training data to train on
snapshot_interval -- how many epochs between taking a snapshot
learning_rate -- the base learning rate
lr_policy -- a hash of options to be used for the learning rate policy
Keyword arguments:
gpu_count -- how many GPUs to use for training (integer)
selected_gpus -- a list of GPU indexes to be used for training
batch_size -- if set, override any network specific batch_size with this value
batch_accumulation -- accumulate gradients over multiple batches
val_interval -- how many epochs between validating the model with an epoch of validation data
pretrained_model -- filename for a model to use for fine-tuning
crop_size -- crop each image down to a square of this size
use_mean -- subtract the dataset's mean file or mean pixel
random_seed -- optional random seed
data_aug -- data augmentation options
"""
self.gpu_count = kwargs.pop('gpu_count', None)
self.selected_gpus = kwargs.pop('selected_gpus', None)
self.batch_size = kwargs.pop('batch_size', None)
self.batch_accumulation = kwargs.pop('batch_accumulation', None)
self.val_interval = kwargs.pop('val_interval', None)
self.pretrained_model = kwargs.pop('pretrained_model', None)
self.crop_size = kwargs.pop('crop_size', None)
self.use_mean = kwargs.pop('use_mean', None)
self.random_seed = kwargs.pop('random_seed', None)
self.solver_type = kwargs.pop('solver_type', None)
self.rms_decay = kwargs.pop('rms_decay', None)
self.shuffle = kwargs.pop('shuffle', None)
self.network = kwargs.pop('network', None)
self.framework_id = kwargs.pop('framework_id', None)
self.data_aug = kwargs.pop('data_aug', None)
super(TrainTask, self).__init__(job_dir = job.dir(), **kwargs)
self.pickver_task_train = PICKLE_VERSION
self.job = job
self.dataset = dataset
self.train_epochs = train_epochs
self.snapshot_interval = snapshot_interval
self.learning_rate = learning_rate
self.lr_policy = lr_policy
self.current_epoch = 0
self.snapshots = []
# data gets stored as dicts of lists (for graphing)
self.train_outputs = OrderedDict()
self.val_outputs = OrderedDict()
def __getstate__(self):
state = super(TrainTask, self).__getstate__()
if 'dataset' in state:
del state['dataset']
if 'snapshots' in state:
del state['snapshots']
if '_labels' in state:
del state['_labels']
if '_hw_socketio_thread' in state:
del state['_hw_socketio_thread']
return state
def __setstate__(self, state):
if state['pickver_task_train'] < 2:
state['train_outputs'] = OrderedDict()
state['val_outputs'] = OrderedDict()
tl = state.pop('train_loss_updates', None)
vl = state.pop('val_loss_updates', None)
va = state.pop('val_accuracy_updates', None)
lr = state.pop('lr_updates', None)
if tl:
state['train_outputs']['epoch'] = NetworkOutput('Epoch', [x[0] for x in tl])
state['train_outputs']['loss'] = NetworkOutput('SoftmaxWithLoss', [x[1] for x in tl])
state['train_outputs']['learning_rate'] = NetworkOutput('LearningRate', [x[1] for x in lr])
if vl:
state['val_outputs']['epoch'] = NetworkOutput('Epoch', [x[0] for x in vl])
if va:
state['val_outputs']['accuracy'] = NetworkOutput('Accuracy', [x[1]/100 for x in va])
state['val_outputs']['loss'] = NetworkOutput('SoftmaxWithLoss', [x[1] for x in vl])
if state['use_mean'] == True:
state['use_mean'] = 'pixel'
elif state['use_mean'] == False:
state['use_mean'] = 'none'
state['pickver_task_train'] = PICKLE_VERSION
super(TrainTask, self).__setstate__(state)
self.snapshots = []
self.dataset = None
@override
def offer_resources(self, resources):
if 'gpus' not in resources:
return None
if not resources['gpus']:
return {} # don't use a GPU at all
if self.gpu_count is not None:
identifiers = []
for resource in resources['gpus']:
if resource.remaining() >= 1:
identifiers.append(resource.identifier)
if len(identifiers) == self.gpu_count:
break
if len(identifiers) == self.gpu_count:
return {'gpus': [(i, 1) for i in identifiers]}
else:
return None
elif self.selected_gpus is not None:
all_available = True
for i in self.selected_gpus:
available = False
for gpu in resources['gpus']:
if i == gpu.identifier:
if gpu.remaining() >= 1:
available = True
break
if not available:
all_available = False
break
if all_available:
return {'gpus': [(i, 1) for i in self.selected_gpus]}
else:
return None
return None
@override
def before_run(self):
# start a thread which sends SocketIO updates about hardware utilization
gpus = None
if 'gpus' in self.current_resources:
gpus = [identifier for (identifier, value) in self.current_resources['gpus']]
self._hw_socketio_thread = gevent.spawn(
self.hw_socketio_updater,
gpus)
def hw_socketio_updater(self, gpus):
"""
This thread sends SocketIO messages about hardware utilization
to connected clients
Arguments:
gpus -- a list of identifiers for the GPUs currently being used
"""
from digits.webapp import app, socketio
devices = []
if gpus is not None:
for index in gpus:
device = device_query.get_device(index)
if device:
devices.append((index, device))
else:
raise RuntimeError('Failed to load gpu information for GPU #"%s"' % index)
# this thread continues until killed in after_run()
while True:
# CPU (Non-GPU) Info
data_cpu = {}
if hasattr(self, 'p') and self.p is not None:
data_cpu['pid'] = self.p.pid
try:
ps = psutil.Process(self.p.pid) # 'self.p' is the system call object
if ps.is_running():
if psutil.version_info[0] >= 2:
data_cpu['cpu_pct'] = ps.cpu_percent(interval=1)
data_cpu['mem_pct'] = ps.memory_percent()
data_cpu['mem_used'] = ps.memory_info().rss
else:
data_cpu['cpu_pct'] = ps.get_cpu_percent(interval=1)
data_cpu['mem_pct'] = ps.get_memory_percent()
data_cpu['mem_used'] = ps.get_memory_info().rss
except psutil.NoSuchProcess:
# In rare case of instant process crash or PID went zombie (report nothing)
pass
data_gpu = []
for index, device in devices:
update = {'name': device.name, 'index': index}
nvml_info = device_query.get_nvml_info(index)
if nvml_info is not None:
update.update(nvml_info)
data_gpu.append(update)
with app.app_context():
html = flask.render_template('models/gpu_utilization.html',
data_gpu = data_gpu,
data_cpu = data_cpu)
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'gpu_utilization',
'html': html,
},
namespace='/jobs',
room=self.job_id,
)
gevent.sleep(1)
def send_progress_update(self, epoch):
"""
Sends socketio message about the current progress
"""
if self.current_epoch == epoch:
return
self.current_epoch = epoch
self.progress = epoch/self.train_epochs
self.emit_progress_update()
def save_train_output(self, *args):
"""
Save output to self.train_outputs
"""
from digits.webapp import socketio
if not self.save_output(self.train_outputs, *args):
return
if self.last_train_update and (time.time() - self.last_train_update) < 5:
return
self.last_train_update = time.time()
self.logger.debug('Training %s%% complete.' % round(100 * self.current_epoch/self.train_epochs,2))
# loss graph data
data = self.combined_graph_data()
if data:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'combined_graph',
'data': data,
},
namespace='/jobs',
room=self.job_id,
)
if data['columns']:
# isolate the Loss column data for the sparkline
graph_data = data['columns'][0][1:]
socketio.emit('task update',
{
'task': self.html_id(),
'job_id': self.job_id,
'update': 'combined_graph',
'data': graph_data,
},
namespace='/jobs',
room='job_management',
)
# lr graph data
data = self.lr_graph_data()
if data:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'lr_graph',
'data': data,
},
namespace='/jobs',
room=self.job_id,
)
def save_val_output(self, *args):
"""
Save output to self.val_outputs
"""
from digits.webapp import socketio
if not self.save_output(self.val_outputs, *args):
return
# loss graph data
data = self.combined_graph_data()
if data:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'combined_graph',
'data': data,
},
namespace='/jobs',
room=self.job_id,
)
def save_output(self, d, name, kind, value):
"""
Save output to self.train_outputs or self.val_outputs
Returns true if all outputs for this epoch have been added
Arguments:
d -- the dictionary where the output should be stored
name -- name of the output (e.g. "accuracy")
kind -- the type of outputs (e.g. "Accuracy")
value -- value for this output (e.g. 0.95)
"""
# don't let them be unicode
name = str(name)
kind = str(kind)
# update d['epoch']
if 'epoch' not in d:
d['epoch'] = NetworkOutput('Epoch', [self.current_epoch])
elif d['epoch'].data[-1] != self.current_epoch:
d['epoch'].data.append(self.current_epoch)
if name not in d:
d[name] = NetworkOutput(kind, [])
epoch_len = len(d['epoch'].data)
name_len = len(d[name].data)
# save to back of d[name]
if name_len > epoch_len:
raise Exception('Received a new output without being told the new epoch')
elif name_len == epoch_len:
# already exists
if isinstance(d[name].data[-1], list):
d[name].data[-1].append(value)
else:
d[name].data[-1] = [d[name].data[-1], value]
elif name_len == epoch_len - 1:
# expected case
d[name].data.append(value)
else:
# we might have missed one
for _ in xrange(epoch_len - name_len - 1):
d[name].data.append(None)
d[name].data.append(value)
for key in d:
if key not in ['epoch', 'learning_rate']:
if len(d[key].data) != epoch_len:
return False
return True
@override
def after_run(self):
if hasattr(self, '_hw_socketio_thread'):
self._hw_socketio_thread.kill()
def detect_snapshots(self):
"""
Populate self.snapshots with snapshots that exist on disk
Returns True if at least one usable snapshot is found
"""
return False
def snapshot_list(self):
"""
Returns an array of arrays for creating an HTML select field
"""
return [[s[1], 'Epoch #%s' % s[1]] for s in reversed(self.snapshots)]
def est_next_snapshot(self):
"""
Returns the estimated time in seconds until the next snapshot is taken
"""
return None
def can_view_weights(self):
"""
Returns True if this Task can visualize the weights of each layer for a given model
"""
raise NotImplementedError()
def view_weights(self, model_epoch=None, layers=None):
"""
View the weights for a specific model and layer[s]
"""
return None
def can_view_activations(self):
"""
Returns True if this Task can visualize the activations of a model after inference
"""
raise NotImplementedError()
def infer_one(self, data, model_epoch=None, layers=None):
"""
Run inference on one input
"""
return None
def can_infer_many(self):
"""
Returns True if this Task can run inference on many inputs
"""
raise NotImplementedError()
def infer_many(self, data, model_epoch=None):
"""
Run inference on many inputs
"""
return None
def get_snapshot(self, epoch=-1):
"""
return snapshot file for specified epoch
"""
snapshot_filename = None
if len(self.snapshots) == 0:
return "no snapshots"
if epoch == -1 or not epoch:
epoch = self.snapshots[-1][1]
snapshot_filename = self.snapshots[-1][0]
else:
for f, e in self.snapshots:
if e == epoch:
snapshot_filename = f
break
if not snapshot_filename:
raise ValueError('Invalid epoch')
return snapshot_filename
def get_snapshot_filename(self,epoch=-1):
"""
Return the filename for the specified epoch
"""
path, name = os.path.split(self.get_snapshot(epoch))
return name
def get_labels(self):
"""
Read labels from labels_file and return them in a list
"""
# The labels might be set already
if hasattr(self, '_labels') and self._labels and len(self._labels) > 0:
return self._labels
assert hasattr(self.dataset, 'labels_file'), 'labels_file not set'
assert self.dataset.labels_file, 'labels_file not set'
assert os.path.exists(self.dataset.path(self.dataset.labels_file)), 'labels_file does not exist'
labels = []
with open(self.dataset.path(self.dataset.labels_file)) as infile:
for line in infile:
label = line.strip()
if label:
labels.append(label)
assert len(labels) > 0, 'no labels in labels_file'
self._labels = labels
return self._labels
def lr_graph_data(self):
"""
Returns learning rate data formatted for a C3.js graph
Keyword arguments:
"""
if not self.train_outputs or 'epoch' not in self.train_outputs or 'learning_rate' not in self.train_outputs:
return None
# return 100-200 values or fewer
stride = max(len(self.train_outputs['epoch'].data)/100,1)
e = ['epoch'] + self.train_outputs['epoch'].data[::stride]
lr = ['lr'] + self.train_outputs['learning_rate'].data[::stride]
return {
'columns': [e, lr],
'xs': {
'lr': 'epoch'
},
'names': {
'lr': 'Learning Rate'
},
}
def combined_graph_data(self, cull=True):
"""
Returns all train/val outputs in data for one C3.js graph
Keyword arguments:
cull -- if True, cut down the number of data points returned to a reasonable size
"""
data = {
'columns': [],
'xs': {},
'axes': {},
'names': {},
}
added_train_data = False
added_val_data = False
if self.train_outputs and 'epoch' in self.train_outputs:
if cull:
# max 200 data points
stride = max(len(self.train_outputs['epoch'].data)/100,1)
else:
# return all data
stride = 1
for name, output in self.train_outputs.iteritems():
if name not in ['epoch', 'learning_rate']:
col_id = '%s-train' % name
data['xs'][col_id] = 'train_epochs'
data['names'][col_id] = '%s (train)' % name
if 'accuracy' in output.kind.lower() or 'accuracy' in name.lower():
data['columns'].append([col_id] + [
(100*x if x is not None else 'none')
for x in output.data[::stride]])
data['axes'][col_id] = 'y2'
else:
data['columns'].append([col_id] + [
(x if x is not None else 'none')
for x in output.data[::stride]])
added_train_data = True
if added_train_data:
data['columns'].append(['train_epochs'] + self.train_outputs['epoch'].data[::stride])
if self.val_outputs and 'epoch' in self.val_outputs:
if cull:
# max 200 data points
stride = max(len(self.val_outputs['epoch'].data)/100,1)
else:
# return all data
stride = 1
for name, output in self.val_outputs.iteritems():
if name not in ['epoch']:
col_id = '%s-val' % name
data['xs'][col_id] = 'val_epochs'
data['names'][col_id] = '%s (val)' % name
if 'accuracy' in output.kind.lower() or 'accuracy' in name.lower():
data['columns'].append([col_id] + [
(100*x if x is not None else 'none')
for x in output.data[::stride]])
data['axes'][col_id] = 'y2'
else:
data['columns'].append([col_id] + [
(x if x is not None else 'none')
for x in output.data[::stride]])
added_val_data = True
if added_val_data:
data['columns'].append(['val_epochs'] + self.val_outputs['epoch'].data[::stride])
if added_train_data:
return data
else:
# return None if only validation data exists
# helps with ordering of columns in graph
return None
# return id of framework used for training
def get_framework_id(self):
"""
Returns a string
"""
return self.framework_id
def get_model_files(self):
"""
return path to model file
"""
raise NotImplementedError()
def get_network_desc(self):
"""
return text description of model
"""
raise NotImplementedError()
def get_task_stats(self,epoch=-1):
"""
return a dictionary of task statistics
"""
raise NotImplementedError()
| TimZaman/DIGITS | digits/model/tasks/train.py | Python | bsd-3-clause | 21,893 |
__author__ = 'shyue'
| materialsvirtuallab/pyhull | pyhull/tests/__init__.py | Python | mit | 21 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from base import GAETestCase
from gaepermission import facade
from mock import Mock, patch
from routes.login import google, facebook
import settings
from tekton.gae.middleware.redirect import RedirectResponse
class IndexTests(GAETestCase):
@patch('routes.login.facebook.facade')
def test_facebook_login(self, facade_mock):
cmd_mock = Mock()
cmd_mock.pending_link = False
facade_mock.login_facebook = Mock(return_value=cmd_mock)
response = facebook.index(Mock(), 'token')
self.assertIsInstance(response, RedirectResponse)
@patch('routes.login.facebook.facade.login_facebook')
@patch('routes.login.facebook.facade.send_passwordless_login_link')
def test_facebook_login_for_already_email_registered_user(self, send_login_mock, login_facebook_mock):
email = 'foo@gmail.com'
cmd_mock = Mock()
cmd_mock.main_user_from_email.email = email
login_facebook_mock.return_value = cmd_mock
token = 'token'
facade.save_or_update_passwordless_app_data('id', token).execute()
facade.save_user_cmd(email).execute()
resp_mock = Mock()
response = facebook.index(resp_mock, token)
login_facebook_mock.assert_called_once_with(token, resp_mock)
self.assertEqual(email, send_login_mock.call_args[0][0])
self.assert_can_render(response)
class FormTests(GAETestCase):
def test_success(self):
response = facebook.form()
self.assert_can_render(response)
class SaveTests(GAETestCase):
def test_success(self):
app_id = 'app_id'
token = 'token'
response=facebook.save(app_id, token)
self.assertIsInstance(response,RedirectResponse)
| renzon/tekton | backend/test/login_tests/facebook.py | Python | mit | 1,801 |
import copy
import datetime
import decimal
import math
import warnings
from itertools import tee
from django.db import connection
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.functional import curry
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode, force_unicode, smart_str
from django.utils.ipv6 import clean_ipv6_address
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
BLANK_CHOICE_NONE = [("", "None")]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specifed in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
class Field(object):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _(u'Value %r is not a valid choice.'),
'null': _(u'This field cannot be null.'),
'blank': _(u'This field cannot be blank.'),
'unique': _(u'%(model_name)s with this %(field_label)s '
u'already exists.'),
}
# Generic field type description, usually overriden by subclasses
def _description(self):
return _(u'Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date, self.unique_for_month = (unique_for_date,
unique_for_month)
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't
# explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self.validators = self.default_validators + validators
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
def __cmp__(self, other):
# This is needed because bisect does not take a comparison function.
return cmp(self.creation_counter, other.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
memodict[id(self)] = obj
return obj
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
def run_validators(self, value):
if value in validators.EMPTY_VALUES:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
msg = self.error_messages['invalid_choice'] % value
raise exceptions.ValidationError(msg)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'])
if not self.blank and value in validators.EMPTY_VALUES:
raise exceptions.ValidationError(self.error_messages['blank'])
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific DATA_TYPES dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return (connection.creation.data_types[self.get_internal_type()]
% data)
except KeyError:
return None
@property
def unique(self):
return self._unique or self.primary_key
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname, self.column = self.get_attname_column()
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name):
self.set_attributes_from_name(name)
self.model = cls
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""
Returns field's value prepared for saving into a database.
"""
return self.get_db_prep_value(value, connection=connection,
prepared=False)
def get_prep_lookup(self, lookup_type, value):
"""
Perform preliminary non-db specific lookup checks and conversions
"""
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
if lookup_type in (
'regex', 'iregex', 'month', 'day', 'week_day', 'search',
'contains', 'icontains', 'iexact', 'startswith', 'istartswith',
'endswith', 'iendswith', 'isnull'
):
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
try:
return int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer "
"argument")
raise TypeError("Field has invalid lookup: %s" % lookup_type)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Returns field's value prepared for database lookup.
"""
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabel_aliases method, it will need to
# be invoked before the final SQL is evaluated
if hasattr(value, 'relabel_aliases'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('regex', 'iregex', 'month', 'day', 'week_day',
'search'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection,
prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection,
prepared=prepared) for v in value]
elif lookup_type in ('contains', 'icontains'):
return ["%%%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'iexact':
return [connection.ops.prep_for_iexact_query(value)]
elif lookup_type in ('startswith', 'istartswith'):
return ["%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type in ('endswith', 'iendswith'):
return ["%%%s" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
if self.get_internal_type() == 'DateField':
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return connection.ops.year_lookup_bounds(value)
def has_default(self):
"""
Returns a boolean of whether this field has a default value.
"""
return self.default is not NOT_PROVIDED
def get_default(self):
"""
Returns the default value for this field.
"""
if self.has_default():
if callable(self.default):
return self.default()
return force_unicode(self.default, strings_only=True)
if (not self.empty_strings_allowed or (self.null and
not connection.features.interprets_empty_strings_as_nulls)):
return None
return ""
def get_validator_unique_lookup_type(self):
return '%s__exact' % self.name
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
first_choice = include_blank and blank_choice or []
if self.choices:
return first_choice + list(self.choices)
rel_model = self.rel.to
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname),
smart_unicode(x))
for x in rel_model._default_manager.complex_filter(
self.rel.limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_unicode(x))
for x in rel_model._default_manager.complex_filter(
self.rel.limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True,
blank_choice=BLANK_CHOICE_DASH):
"""
Returns flattened choices with a default blank choice included.
"""
first_choice = include_blank and blank_choice or []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_unicode(self._get_val_from_obj(obj))
def bind(self, fieldmapping, original, bound_field_class):
return bound_field_class(self, fieldmapping, original)
def _get_choices(self):
if hasattr(self._choices, 'next'):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice,value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=forms.CharField, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in kwargs.keys():
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
"""
return getattr(obj, self.attname)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value must be an integer."),
}
def __init__(self, *args, **kwargs):
assert kwargs.get('primary_key', False) is True, \
"%ss must have primary_key=True." % self.__class__.__name__
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name):
assert not cls._meta.has_auto_field, \
"A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value must be either True or False."),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
if 'default' not in kwargs and not kwargs.get('null'):
kwargs['default'] = False
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = (self.null or
not (self.has_default() or 'initial' in kwargs))
defaults = {'choices': self.get_choices(
include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _(u'Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
class DateField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value has an invalid date format. It must be "
u"in YYYY-MM-DD format."),
'invalid_date': _(u"'%s' value has the correct format (YYYY-MM-DD) "
u"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
value = smart_str(value)
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
msg = self.error_messages['invalid_date'] % value
raise exceptions.ValidationError(msg)
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name):
super(DateField,self).contribute_to_class(cls, name)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=False))
def get_prep_lookup(self, lookup_type, value):
# For "__month", "__day", and "__week_day" lookups, convert the value
# to an int so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day'):
return int(value)
return super(DateField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_date(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value has an invalid format. It must be in "
u"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _(u"'%s' value has the correct format "
u"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _(u"'%s' value has the correct format "
u"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
u"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn(u"DateTimeField received a naive datetime (%s)"
u" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
value = smart_str(value)
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
msg = self.error_messages['invalid_datetime'] % value
raise exceptions.ValidationError(msg)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
msg = self.error_messages['invalid_date'] % value
raise exceptions.ValidationError(msg)
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
# get_prep_lookup is inherited from DateField
def get_prep_value(self, value):
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
warnings.warn(u"DateTimeField received a naive datetime (%s)"
u" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_datetime(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def _format(self, value):
if isinstance(value, basestring) or value is None:
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.util.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import util
return util.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("E-mail address")
def __init__(self, *args, **kwargs):
# max_length should be overridden to 254 characters to be fully
# compliant with RFCs 3696 and 5321
kwargs['max_length'] = kwargs.get('max_length', 75)
CharField.__init__(self, *args, **kwargs)
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, allow_files=True, allow_folders=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs['max_length'] = kwargs.get('max_length', 100)
Field.__init__(self, verbose_name, name, **kwargs)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
'allow_files': self.allow_files,
'allow_folders': self.allow_folders,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be an integer."),
}
description = _("Integer")
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if ((lookup_type == 'gte' or lookup_type == 'lt')
and isinstance(value, float)):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class GenericIPAddressField(Field):
empty_strings_allowed = True
description = _("IP address")
default_error_messages = {}
def __init__(self, protocol='both', unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value and ':' in value:
return clean_ipv6_address(value,
self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return value or None
def get_prep_value(self, value):
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return value
def formfield(self, **kwargs):
defaults = {'form_class': forms.GenericIPAddressField}
defaults.update(kwargs)
return super(GenericIPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type,
value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def get_prep_value(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def formfield(self, **kwargs):
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value has an invalid format. It must be in "
u"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _(u"'%s' value has the correct format "
u"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
value = smart_str(value)
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
msg = self.error_messages['invalid_time'] % value
raise exceptions.ValidationError(msg)
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_time(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
description = _("URL")
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
CharField.__init__(self, verbose_name, name, **kwargs)
self.validators.append(validators.URLValidator())
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
| leereilly/django-1 | django/db/models/fields/__init__.py | Python | bsd-3-clause | 47,225 |
import threading, time
from sqlalchemy import pool, interfaces, select, event
import sqlalchemy as tsa
from sqlalchemy import testing
from sqlalchemy.testing.util import gc_collect, lazy_gc
from sqlalchemy.testing import eq_, assert_raises
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing import fixtures
mcid = 1
class MockDBAPI(object):
throw_error = False
def connect(self, *args, **kwargs):
if self.throw_error:
raise Exception("couldnt connect !")
delay = kwargs.pop('delay', 0)
if delay:
time.sleep(delay)
return MockConnection()
class MockConnection(object):
closed = False
def __init__(self):
global mcid
self.id = mcid
mcid += 1
def close(self):
self.closed = True
def rollback(self):
pass
def cursor(self):
return MockCursor()
class MockCursor(object):
def execute(self, *args, **kw):
pass
def close(self):
pass
class PoolTestBase(fixtures.TestBase):
def setup(self):
pool.clear_managers()
@classmethod
def teardown_class(cls):
pool.clear_managers()
def _queuepool_fixture(self, **kw):
dbapi, pool = self._queuepool_dbapi_fixture(**kw)
return pool
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
return dbapi, pool.QueuePool(creator=lambda: dbapi.connect('foo.db'),
**kw)
class PoolTest(PoolTestBase):
def test_manager(self):
manager = pool.manage(MockDBAPI(), use_threadlocal=True)
c1 = manager.connect('foo.db')
c2 = manager.connect('foo.db')
c3 = manager.connect('bar.db')
c4 = manager.connect("foo.db", bar="bat")
c5 = manager.connect("foo.db", bar="hoho")
c6 = manager.connect("foo.db", bar="bat")
assert c1.cursor() is not None
assert c1 is c2
assert c1 is not c3
assert c4 is c6
assert c4 is not c5
def test_manager_with_key(self):
class NoKws(object):
def connect(self, arg):
return MockConnection()
manager = pool.manage(NoKws(), use_threadlocal=True)
c1 = manager.connect('foo.db', sa_pool_key="a")
c2 = manager.connect('foo.db', sa_pool_key="b")
c3 = manager.connect('bar.db', sa_pool_key="a")
assert c1.cursor() is not None
assert c1 is not c2
assert c1 is c3
def test_bad_args(self):
manager = pool.manage(MockDBAPI())
connection = manager.connect(None)
def test_non_thread_local_manager(self):
manager = pool.manage(MockDBAPI(), use_threadlocal = False)
connection = manager.connect('foo.db')
connection2 = manager.connect('foo.db')
self.assert_(connection.cursor() is not None)
self.assert_(connection is not connection2)
@testing.fails_on('+pyodbc',
"pyodbc cursor doesn't implement tuple __eq__")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select([1], bind=testing.db)))
expected = [(1, )]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (pool.SingletonThreadPool, pool.StaticPool,
pool.QueuePool, pool.NullPool, pool.AssertionPool):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.throw_error = True
p.dispose()
p.recreate()
def testthreadlocal_del(self):
self._do_testthreadlocal(useclose=False)
def testthreadlocal_close(self):
self._do_testthreadlocal(useclose=True)
def _do_testthreadlocal(self, useclose=False):
dbapi = MockDBAPI()
for p in pool.QueuePool(creator=dbapi.connect,
pool_size=3, max_overflow=-1,
use_threadlocal=True), \
pool.SingletonThreadPool(creator=dbapi.connect,
use_threadlocal=True):
c1 = p.connect()
c2 = p.connect()
self.assert_(c1 is c2)
c3 = p.unique_connection()
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
c2 = p.connect()
self.assert_(c1 is c2)
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
if useclose:
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
c3.close()
c2.close()
self.assert_(c1.connection is not None)
c1.close()
c1 = c2 = c3 = None
# extra tests with QueuePool to ensure connections get
# __del__()ed when dereferenced
if isinstance(p, pool.QueuePool):
lazy_gc()
self.assert_(p.checkedout() == 0)
c1 = p.connect()
c2 = p.connect()
if useclose:
c2.close()
c1.close()
else:
c2 = None
c1 = None
lazy_gc()
self.assert_(p.checkedout() == 0)
def test_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info['foo'] = 'bar'
c.close()
del c
c = p.connect()
self.assert_('foo' in c.info)
c.invalidate()
c = p.connect()
self.assert_('foo' not in c.info)
c.info['foo2'] = 'bar2'
c.detach()
self.assert_('foo2' in c.info)
c2 = p.connect()
self.assert_(c.connection is not c2.connection)
self.assert_(not c2.info)
self.assert_('foo2' in c.info)
class PoolDialectTest(PoolTestBase):
def _dialect(self):
canary = []
class PoolDialect(object):
def do_rollback(self, dbapi_connection):
canary.append('R')
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
canary.append('C')
dbapi_connection.commit()
def do_close(self, dbapi_connection):
canary.append('CL')
dbapi_connection.close()
return PoolDialect(), canary
def _do_test(self, pool_cls, assertion):
mock_dbapi = MockDBAPI()
dialect, canary = self._dialect()
p = pool_cls(creator=mock_dbapi.connect)
p._dialect = dialect
conn = p.connect()
conn.close()
p.dispose()
p.recreate()
conn = p.connect()
conn.close()
eq_(canary, assertion)
def test_queue_pool(self):
self._do_test(pool.QueuePool, ['R', 'CL', 'R'])
def test_assertion_pool(self):
self._do_test(pool.AssertionPool, ['R', 'CL', 'R'])
def test_singleton_pool(self):
self._do_test(pool.SingletonThreadPool, ['R', 'CL', 'R'])
def test_null_pool(self):
self._do_test(pool.NullPool, ['R', 'CL', 'R', 'CL'])
def test_static_pool(self):
self._do_test(pool.StaticPool, ['R', 'R'])
class PoolEventsTest(PoolTestBase):
def _first_connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def first_connect(*arg, **kw):
canary.append('first_connect')
event.listen(p, 'first_connect', first_connect)
return p, canary
def _connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def connect(*arg, **kw):
canary.append('connect')
event.listen(p, 'connect', connect)
return p, canary
def _checkout_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkout(*arg, **kw):
canary.append('checkout')
event.listen(p, 'checkout', checkout)
return p, canary
def _checkin_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkin(*arg, **kw):
canary.append('checkin')
event.listen(p, 'checkin', checkin)
return p, canary
def _reset_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def reset(*arg, **kw):
canary.append('reset')
event.listen(p, 'reset', reset)
return p, canary
def test_first_connect_event(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
eq_(canary, ['first_connect'])
def test_first_connect_event_fires_once(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['first_connect'])
def test_first_connect_on_previously_recreated(self):
p, canary = self._first_connect_event_fixture()
p2 = p.recreate()
c1 = p.connect()
c2 = p2.connect()
eq_(canary, ['first_connect', 'first_connect'])
def test_first_connect_on_subsequently_recreated(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['first_connect', 'first_connect'])
def test_connect_event(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
eq_(canary, ['connect'])
def test_connect_event_fires_subsequent(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['connect', 'connect'])
def test_connect_on_previously_recreated(self):
p, canary = self._connect_event_fixture()
p2 = p.recreate()
c1 = p.connect()
c2 = p2.connect()
eq_(canary, ['connect', 'connect'])
def test_connect_on_subsequently_recreated(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['connect', 'connect'])
def test_checkout_event(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
eq_(canary, ['checkout'])
def test_checkout_event_fires_subsequent(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['checkout', 'checkout'])
def test_checkout_event_on_subsequently_recreated(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['checkout', 'checkout'])
def test_checkin_event(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['checkin'])
def test_reset_event(self):
p, canary = self._reset_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['reset'])
def test_checkin_event_gc(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
del c1
lazy_gc()
eq_(canary, ['checkin'])
def test_checkin_event_on_subsequently_recreated(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['checkin'])
c2.close()
eq_(canary, ['checkin', 'checkin'])
def test_listen_targets_scope(self):
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
def listen_four(*args):
canary.append("listen_four")
engine = testing_engine(testing.db.url)
event.listen(pool.Pool, 'connect', listen_one)
event.listen(engine.pool, 'connect', listen_two)
event.listen(engine, 'connect', listen_three)
event.listen(engine.__class__, 'connect', listen_four)
engine.execute(select([1])).close()
eq_(
canary, ["listen_one","listen_four", "listen_two","listen_three"]
)
def test_listen_targets_per_subclass(self):
"""test that listen() called on a subclass remains specific to that subclass."""
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
event.listen(pool.Pool, 'connect', listen_one)
event.listen(pool.QueuePool, 'connect', listen_two)
event.listen(pool.SingletonThreadPool, 'connect', listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
assert listen_one in p1.dispatch.connect
assert listen_two in p1.dispatch.connect
assert listen_three not in p1.dispatch.connect
assert listen_one in p2.dispatch.connect
assert listen_two not in p2.dispatch.connect
assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
p2.connect()
eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
def teardown(self):
# TODO: need to get remove() functionality
# going
pool.Pool.dispatch._clear()
class DeprecatedPoolListenerTest(PoolTestBase):
@testing.requires.predictable_gc
@testing.uses_deprecated(r".*Use event.listen")
def test_listeners(self):
class InstrumentingListener(object):
def __init__(self):
if hasattr(self, 'connect'):
self.connect = self.inst_connect
if hasattr(self, 'first_connect'):
self.first_connect = self.inst_first_connect
if hasattr(self, 'checkout'):
self.checkout = self.inst_checkout
if hasattr(self, 'checkin'):
self.checkin = self.inst_checkin
self.clear()
def clear(self):
self.connected = []
self.first_connected = []
self.checked_out = []
self.checked_in = []
def assert_total(innerself, conn, fconn, cout, cin):
eq_(len(innerself.connected), conn)
eq_(len(innerself.first_connected), fconn)
eq_(len(innerself.checked_out), cout)
eq_(len(innerself.checked_in), cin)
def assert_in(innerself, item, in_conn, in_fconn,
in_cout, in_cin):
self.assert_((item in innerself.connected) == in_conn)
self.assert_((item in innerself.first_connected) == in_fconn)
self.assert_((item in innerself.checked_out) == in_cout)
self.assert_((item in innerself.checked_in) == in_cin)
def inst_connect(self, con, record):
print "connect(%s, %s)" % (con, record)
assert con is not None
assert record is not None
self.connected.append(con)
def inst_first_connect(self, con, record):
print "first_connect(%s, %s)" % (con, record)
assert con is not None
assert record is not None
self.first_connected.append(con)
def inst_checkout(self, con, record, proxy):
print "checkout(%s, %s, %s)" % (con, record, proxy)
assert con is not None
assert record is not None
assert proxy is not None
self.checked_out.append(con)
def inst_checkin(self, con, record):
print "checkin(%s, %s)" % (con, record)
# con can be None if invalidated
assert record is not None
self.checked_in.append(con)
class ListenAll(tsa.interfaces.PoolListener, InstrumentingListener):
pass
class ListenConnect(InstrumentingListener):
def connect(self, con, record):
pass
class ListenFirstConnect(InstrumentingListener):
def first_connect(self, con, record):
pass
class ListenCheckOut(InstrumentingListener):
def checkout(self, con, record, proxy, num):
pass
class ListenCheckIn(InstrumentingListener):
def checkin(self, con, record):
pass
def assert_listeners(p, total, conn, fconn, cout, cin):
for instance in (p, p.recreate()):
self.assert_(len(instance.dispatch.connect) == conn)
self.assert_(len(instance.dispatch.first_connect) == fconn)
self.assert_(len(instance.dispatch.checkout) == cout)
self.assert_(len(instance.dispatch.checkin) == cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0, 0)
p.add_listener(ListenAll())
assert_listeners(p, 1, 1, 1, 1, 1)
p.add_listener(ListenConnect())
assert_listeners(p, 2, 2, 1, 1, 1)
p.add_listener(ListenFirstConnect())
assert_listeners(p, 3, 2, 2, 1, 1)
p.add_listener(ListenCheckOut())
assert_listeners(p, 4, 2, 2, 2, 1)
p.add_listener(ListenCheckIn())
assert_listeners(p, 5, 2, 2, 2, 2)
del p
snoop = ListenAll()
p = self._queuepool_fixture(listeners=[snoop])
assert_listeners(p, 1, 1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
cc = c.connection
snoop.assert_in(cc, True, True, True, False)
c.close()
snoop.assert_in(cc, True, True, True, True)
del c, cc
snoop.clear()
# this one depends on immediate gc
c = p.connect()
cc = c.connection
snoop.assert_in(cc, False, False, True, False)
snoop.assert_total(0, 0, 1, 0)
del c, cc
lazy_gc()
snoop.assert_total(0, 0, 1, 1)
p.dispose()
snoop.clear()
c = p.connect()
c.close()
c = p.connect()
snoop.assert_total(1, 0, 2, 1)
c.close()
snoop.assert_total(1, 0, 2, 2)
# invalidation
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.invalidate()
snoop.assert_total(1, 0, 1, 1)
c.close()
snoop.assert_total(1, 0, 1, 1)
del c
lazy_gc()
snoop.assert_total(1, 0, 1, 1)
c = p.connect()
snoop.assert_total(2, 0, 2, 1)
c.close()
del c
lazy_gc()
snoop.assert_total(2, 0, 2, 2)
# detached
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.detach()
snoop.assert_total(1, 0, 1, 0)
c.close()
del c
snoop.assert_total(1, 0, 1, 0)
c = p.connect()
snoop.assert_total(2, 0, 2, 0)
c.close()
del c
snoop.assert_total(2, 0, 2, 1)
# recreated
p = p.recreate()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
c.close()
snoop.assert_total(1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 2, 1)
c.close()
snoop.assert_total(1, 1, 2, 2)
@testing.uses_deprecated(r".*Use event.listen")
def test_listeners_callables(self):
def connect(dbapi_con, con_record):
counts[0] += 1
def checkout(dbapi_con, con_record, con_proxy):
counts[1] += 1
def checkin(dbapi_con, con_record):
counts[2] += 1
i_all = dict(connect=connect, checkout=checkout, checkin=checkin)
i_connect = dict(connect=connect)
i_checkout = dict(checkout=checkout)
i_checkin = dict(checkin=checkin)
for cls in (pool.QueuePool, pool.StaticPool):
counts = [0, 0, 0]
def assert_listeners(p, total, conn, cout, cin):
for instance in (p, p.recreate()):
eq_(len(instance.dispatch.connect), conn)
eq_(len(instance.dispatch.checkout), cout)
eq_(len(instance.dispatch.checkin), cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0)
p.add_listener(i_all)
assert_listeners(p, 1, 1, 1, 1)
p.add_listener(i_connect)
assert_listeners(p, 2, 1, 1, 1)
p.add_listener(i_checkout)
assert_listeners(p, 3, 1, 1, 1)
p.add_listener(i_checkin)
assert_listeners(p, 4, 1, 1, 1)
del p
p = self._queuepool_fixture(listeners=[i_all])
assert_listeners(p, 1, 1, 1, 1)
c = p.connect()
assert counts == [1, 1, 0]
c.close()
assert counts == [1, 1, 1]
c = p.connect()
assert counts == [1, 2, 1]
p.add_listener(i_checkin)
c.close()
assert counts == [1, 2, 2]
class QueuePoolTest(PoolTestBase):
def testqueuepool_del(self):
self._do_testqueuepool(useclose=False)
def testqueuepool_close(self):
self._do_testqueuepool(useclose=True)
def _do_testqueuepool(self, useclose=False):
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1)
def status(pool):
tup = pool.size(), pool.checkedin(), pool.overflow(), \
pool.checkedout()
print 'Pool size: %d Connections in pool: %d Current '\
'Overflow: %d Current Checked out connections: %d' % tup
return tup
c1 = p.connect()
self.assert_(status(p) == (3, 0, -2, 1))
c2 = p.connect()
self.assert_(status(p) == (3, 0, -1, 2))
c3 = p.connect()
self.assert_(status(p) == (3, 0, 0, 3))
c4 = p.connect()
self.assert_(status(p) == (3, 0, 1, 4))
c5 = p.connect()
self.assert_(status(p) == (3, 0, 2, 5))
c6 = p.connect()
self.assert_(status(p) == (3, 0, 3, 6))
if useclose:
c4.close()
c3.close()
c2.close()
else:
c4 = c3 = c2 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 3, 3))
if useclose:
c1.close()
c5.close()
c6.close()
else:
c1 = c5 = c6 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 0, 0))
c1 = p.connect()
c2 = p.connect()
self.assert_(status(p) == (3, 1, 0, 2), status(p))
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
self.assert_(status(p) == (3, 2, 0, 1))
c1.close()
lazy_gc()
assert not pool._refs
def test_timeout(self):
p = self._queuepool_fixture(pool_size=3,
max_overflow=0,
timeout=2)
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
now = time.time()
try:
c4 = p.connect()
assert False
except tsa.exc.TimeoutError, e:
assert int(time.time() - now) == 2
def test_timeout_race(self):
# test a race condition where the initial connecting threads all race
# to queue.Empty, then block on the mutex. each thread consumes a
# connection as they go in. when the limit is reached, the remaining
# threads go in, and get TimeoutError; even though they never got to
# wait for the timeout on queue.get(). the fix involves checking the
# timeout again within the mutex, and if so, unlocking and throwing
# them back to the start of do_get()
dbapi = MockDBAPI()
p = pool.QueuePool(
creator = lambda: dbapi.connect(delay=.05),
pool_size = 2,
max_overflow = 1, use_threadlocal = False, timeout=3)
timeouts = []
def checkout():
for x in xrange(1):
now = time.time()
try:
c1 = p.connect()
except tsa.exc.TimeoutError, e:
timeouts.append(time.time() - now)
continue
time.sleep(4)
c1.close()
threads = []
for i in xrange(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join()
assert len(timeouts) > 0
for t in timeouts:
assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts
# normally, the timeout should under 4 seconds,
# but on a loaded down buildbot it can go up.
assert t < 10, "Not all timeouts were < 10 seconds %r" % timeouts
def _test_overflow(self, thread_count, max_overflow):
gc_collect()
dbapi = MockDBAPI()
def creator():
time.sleep(.05)
return dbapi.connect()
p = pool.QueuePool(creator=creator,
pool_size=3, timeout=2,
max_overflow=max_overflow)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in xrange(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join()
self.assert_(max(peaks) <= max_overflow)
lazy_gc()
assert not pool._refs
def test_waiters_handled(self):
"""test that threads waiting for connections are
handled when the pool is replaced.
"""
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
success = []
for timeout in (None, 30):
for max_overflow in (0, -1, 3):
p = pool.QueuePool(creator=creator,
pool_size=2, timeout=timeout,
max_overflow=max_overflow)
def waiter(p):
conn = p.connect()
time.sleep(.5)
success.append(True)
conn.close()
time.sleep(.2)
c1 = p.connect()
c2 = p.connect()
for i in range(2):
t = threading.Thread(target=waiter, args=(p, ))
t.setDaemon(True) # so the tests dont hang if this fails
t.start()
c1.invalidate()
c2.invalidate()
p2 = p._replace()
time.sleep(2)
eq_(len(success), 12)
@testing.requires.python26
def test_notify_waiters(self):
dbapi = MockDBAPI()
canary = []
def creator1():
canary.append(1)
return dbapi.connect()
def creator2():
canary.append(2)
return dbapi.connect()
p1 = pool.QueuePool(creator=creator1,
pool_size=1, timeout=None,
max_overflow=0)
p2 = pool.QueuePool(creator=creator2,
pool_size=1, timeout=None,
max_overflow=-1)
def waiter(p):
conn = p.connect()
time.sleep(.5)
conn.close()
c1 = p1.connect()
for i in range(5):
t = threading.Thread(target=waiter, args=(p1, ))
t.setDaemon(True)
t.start()
time.sleep(.5)
eq_(canary, [1])
p1._pool.abort(p2)
time.sleep(1)
eq_(canary, [1, 2, 2, 2, 2, 2])
def test_dispose_closes_pooled(self):
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
p = pool.QueuePool(creator=creator,
pool_size=2, timeout=None,
max_overflow=0)
c1 = p.connect()
c2 = p.connect()
conns = [c1.connection, c2.connection]
c1.close()
eq_([c.closed for c in conns], [False, False])
p.dispose()
eq_([c.closed for c in conns], [True, False])
# currently, if a ConnectionFairy is closed
# after the pool has been disposed, there's no
# flag that states it should be invalidated
# immediately - it just gets returned to the
# pool normally...
c2.close()
eq_([c.closed for c in conns], [True, False])
# ...and that's the one we'll get back next.
c3 = p.connect()
assert c3.connection is conns[1]
def test_no_overflow(self):
self._test_overflow(40, 0)
def test_max_overflow(self):
self._test_overflow(40, 5)
def test_mixed_close(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = None
assert p.checkedout() == 1
c1 = None
lazy_gc()
assert p.checkedout() == 0
lazy_gc()
assert not pool._refs
def test_overflow_no_gc_tlocal(self):
self._test_overflow_no_gc(True)
def test_overflow_no_gc(self):
self._test_overflow_no_gc(False)
def _test_overflow_no_gc(self, threadlocal):
p = self._queuepool_fixture(pool_size=2,
max_overflow=2)
# disable weakref collection of the
# underlying connections
strong_refs = set()
def _conn():
c = p.connect()
strong_refs.add(c.connection)
return c
for j in xrange(5):
conns = [_conn() for i in xrange(4)]
for c in conns:
c.close()
still_opened = len([c for c in strong_refs if not c.closed])
eq_(still_opened, 2)
@testing.requires.predictable_gc
def test_weakref_kaboom(self):
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2 = None
del c1
del c2
gc_collect()
assert p.checkedout() == 0
c3 = p.connect()
assert c3 is not None
def test_trick_the_counter(self):
"""this is a "flaw" in the connection pool; since threadlocal
uses a single ConnectionFairy per thread with an open/close
counter, you can fool the counter into giving you a
ConnectionFairy with an ambiguous counter. i.e. its not true
reference counting."""
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = p.connect()
c2.close()
self.assert_(p.checkedout() != 0)
c2.close()
self.assert_(p.checkedout() == 0)
def test_recycle(self):
p = self._queuepool_fixture(pool_size=1,
max_overflow=0,
recycle=3)
c1 = p.connect()
c_id = id(c1.connection)
c1.close()
c2 = p.connect()
assert id(c2.connection) == c_id
c2.close()
time.sleep(4)
c3 = p.connect()
assert id(c3.connection) != c_id
def test_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_recreate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
p2 = p.recreate()
assert p2.size() == 1
assert p2._use_threadlocal is False
assert p2._max_overflow == 0
def test_reconnect(self):
"""tests reconnect operations at the pool level. SA's
engine/dialect includes another layer of reconnect support for
'database was lost' errors."""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_detach(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1.detach()
c_id = c1.connection.id
c2 = p.connect()
assert c2.connection.id != c1.connection.id
dbapi.raise_error = True
c2.invalidate()
c2 = None
c2 = p.connect()
assert c2.connection.id != c1.connection.id
con = c1.connection
assert not con.closed
c1.close()
assert con.closed
def test_threadfairy(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c1.close()
c2 = p.connect()
assert c2.connection is not None
class SingletonThreadPoolTest(PoolTestBase):
def test_cleanup(self):
self._test_cleanup(False)
def test_cleanup_no_gc(self):
self._test_cleanup(True)
def _test_cleanup(self, strong_refs):
"""test that the pool's connections are OK after cleanup() has
been called."""
dbapi = MockDBAPI()
p = pool.SingletonThreadPool(creator=dbapi.connect,
pool_size=3)
if strong_refs:
sr = set()
def _conn():
c = p.connect()
sr.add(c.connection)
return c
else:
def _conn():
return p.connect()
def checkout():
for x in xrange(10):
c = _conn()
assert c
c.cursor()
c.close()
time.sleep(.1)
threads = []
for i in xrange(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join()
assert len(p._all_conns) == 3
if strong_refs:
still_opened = len([c for c in sr if not c.closed])
eq_(still_opened, 3)
class AssertionPoolTest(PoolTestBase):
def test_connect_error(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator = lambda: dbapi.connect('foo.db'))
c1 = p.connect()
assert_raises(AssertionError, p.connect)
def test_connect_multiple(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator = lambda: dbapi.connect('foo.db'))
c1 = p.connect()
c1.close()
c2 = p.connect()
c2.close()
c3 = p.connect()
assert_raises(AssertionError, p.connect)
class NullPoolTest(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator = lambda: dbapi.connect('foo.db'))
c1 = p.connect()
c_id = c1.connection.id
c1.close(); c1=None
c1 = p.connect()
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
class StaticPoolTest(PoolTestBase):
def test_recreate(self):
dbapi = MockDBAPI()
creator = lambda: dbapi.connect('foo.db')
p = pool.StaticPool(creator)
p2 = p.recreate()
assert p._creator is p2._creator
| rclmenezes/sqlalchemy | test/engine/test_pool.py | Python | mit | 37,265 |
from __future__ import division, print_function, absolute_import
import itertools
import warnings
from numpy.testing import (assert_, assert_equal, assert_almost_equal,
assert_array_almost_equal, assert_raises, assert_array_equal,
dec, TestCase, run_module_suite, assert_allclose)
from numpy import mgrid, pi, sin, ogrid, poly1d, linspace
import numpy as np
from scipy._lib.six import xrange
from scipy._lib._version import NumpyVersion
from scipy.interpolate import (interp1d, interp2d, lagrange, PPoly, BPoly,
ppform, splrep, splev, splantider, splint, sproot, Akima1DInterpolator,
RegularGridInterpolator, LinearNDInterpolator, NearestNDInterpolator,
RectBivariateSpline, interpn)
from scipy.interpolate import _ppoly
from scipy._lib._gcutils import assert_deallocated
class TestInterp2D(TestCase):
def test_interp2d(self):
y, x = mgrid[0:2:20j, 0:pi:21j]
z = sin(x+0.5*y)
I = interp2d(x, y, z)
assert_almost_equal(I(1.0, 2.0), sin(2.0), decimal=2)
v,u = ogrid[0:2:24j, 0:pi:25j]
assert_almost_equal(I(u.ravel(), v.ravel()), sin(u+0.5*v), decimal=2)
def test_interp2d_meshgrid_input(self):
# Ticket #703
x = linspace(0, 2, 16)
y = linspace(0, pi, 21)
z = sin(x[None,:] + y[:,None]/2.)
I = interp2d(x, y, z)
assert_almost_equal(I(1.0, 2.0), sin(2.0), decimal=2)
def test_interp2d_meshgrid_input_unsorted(self):
np.random.seed(1234)
x = linspace(0, 2, 16)
y = linspace(0, pi, 21)
z = sin(x[None,:] + y[:,None]/2.)
ip1 = interp2d(x.copy(), y.copy(), z, kind='cubic')
np.random.shuffle(x)
z = sin(x[None,:] + y[:,None]/2.)
ip2 = interp2d(x.copy(), y.copy(), z, kind='cubic')
np.random.shuffle(x)
np.random.shuffle(y)
z = sin(x[None,:] + y[:,None]/2.)
ip3 = interp2d(x, y, z, kind='cubic')
x = linspace(0, 2, 31)
y = linspace(0, pi, 30)
assert_equal(ip1(x, y), ip2(x, y))
assert_equal(ip1(x, y), ip3(x, y))
def test_interp2d_eval_unsorted(self):
y, x = mgrid[0:2:20j, 0:pi:21j]
z = sin(x + 0.5*y)
func = interp2d(x, y, z)
xe = np.array([3, 4, 5])
ye = np.array([5.3, 7.1])
assert_allclose(func(xe, ye), func(xe, ye[::-1]))
assert_raises(ValueError, func, xe, ye[::-1], 0, 0, True)
def test_interp2d_linear(self):
# Ticket #898
a = np.zeros([5, 5])
a[2, 2] = 1.0
x = y = np.arange(5)
b = interp2d(x, y, a, 'linear')
assert_almost_equal(b(2.0, 1.5), np.array([0.5]), decimal=2)
assert_almost_equal(b(2.0, 2.5), np.array([0.5]), decimal=2)
def test_interp2d_bounds(self):
x = np.linspace(0, 1, 5)
y = np.linspace(0, 2, 7)
z = x[None, :]**2 + y[:, None]
ix = np.linspace(-1, 3, 31)
iy = np.linspace(-1, 3, 33)
b = interp2d(x, y, z, bounds_error=True)
assert_raises(ValueError, b, ix, iy)
b = interp2d(x, y, z, fill_value=np.nan)
iz = b(ix, iy)
mx = (ix < 0) | (ix > 1)
my = (iy < 0) | (iy > 2)
assert_(np.isnan(iz[my,:]).all())
assert_(np.isnan(iz[:,mx]).all())
assert_(np.isfinite(iz[~my,:][:,~mx]).all())
class TestInterp1D(object):
def setUp(self):
self.x10 = np.arange(10.)
self.y10 = np.arange(10.)
self.x25 = self.x10.reshape((2,5))
self.x2 = np.arange(2.)
self.y2 = np.arange(2.)
self.x1 = np.array([0.])
self.y1 = np.array([0.])
self.y210 = np.arange(20.).reshape((2, 10))
self.y102 = np.arange(20.).reshape((10, 2))
self.fill_value = -100.0
def test_validation(self):
# Make sure that appropriate exceptions are raised when invalid values
# are given to the constructor.
# These should all work.
interp1d(self.x10, self.y10, kind='linear')
interp1d(self.x10, self.y10, kind='cubic')
interp1d(self.x10, self.y10, kind='slinear')
interp1d(self.x10, self.y10, kind='quadratic')
interp1d(self.x10, self.y10, kind='zero')
interp1d(self.x10, self.y10, kind='nearest')
interp1d(self.x10, self.y10, kind=0)
interp1d(self.x10, self.y10, kind=1)
interp1d(self.x10, self.y10, kind=2)
interp1d(self.x10, self.y10, kind=3)
# x array must be 1D.
assert_raises(ValueError, interp1d, self.x25, self.y10)
# y array cannot be a scalar.
assert_raises(ValueError, interp1d, self.x10, np.array(0))
# Check for x and y arrays having the same length.
assert_raises(ValueError, interp1d, self.x10, self.y2)
assert_raises(ValueError, interp1d, self.x2, self.y10)
assert_raises(ValueError, interp1d, self.x10, self.y102)
interp1d(self.x10, self.y210)
interp1d(self.x10, self.y102, axis=0)
# Check for x and y having at least 1 element.
assert_raises(ValueError, interp1d, self.x1, self.y10)
assert_raises(ValueError, interp1d, self.x10, self.y1)
assert_raises(ValueError, interp1d, self.x1, self.y1)
def test_init(self):
# Check that the attributes are initialized appropriately by the
# constructor.
assert_(interp1d(self.x10, self.y10).copy)
assert_(not interp1d(self.x10, self.y10, copy=False).copy)
assert_(interp1d(self.x10, self.y10).bounds_error)
assert_(not interp1d(self.x10, self.y10, bounds_error=False).bounds_error)
assert_(np.isnan(interp1d(self.x10, self.y10).fill_value))
assert_equal(interp1d(self.x10, self.y10, fill_value=3.0).fill_value,
3.0)
assert_equal(interp1d(self.x10, self.y10).axis, 0)
assert_equal(interp1d(self.x10, self.y210).axis, 1)
assert_equal(interp1d(self.x10, self.y102, axis=0).axis, 0)
assert_array_equal(interp1d(self.x10, self.y10).x, self.x10)
assert_array_equal(interp1d(self.x10, self.y10).y, self.y10)
assert_array_equal(interp1d(self.x10, self.y210).y, self.y210)
def test_assume_sorted(self):
# Check for unsorted arrays
interp10 = interp1d(self.x10, self.y10)
interp10_unsorted = interp1d(self.x10[::-1], self.y10[::-1])
assert_array_almost_equal(interp10_unsorted(self.x10), self.y10)
assert_array_almost_equal(interp10_unsorted(1.2), np.array([1.2]))
assert_array_almost_equal(interp10_unsorted([2.4, 5.6, 6.0]),
interp10([2.4, 5.6, 6.0]))
# Check assume_sorted keyword (defaults to False)
interp10_assume_kw = interp1d(self.x10[::-1], self.y10[::-1],
assume_sorted=False)
assert_array_almost_equal(interp10_assume_kw(self.x10), self.y10)
interp10_assume_kw2 = interp1d(self.x10[::-1], self.y10[::-1],
assume_sorted=True)
# Should raise an error for unsorted input if assume_sorted=True
assert_raises(ValueError, interp10_assume_kw2, self.x10)
# Check that if y is a 2-D array, things are still consistent
interp10_y_2d = interp1d(self.x10, self.y210)
interp10_y_2d_unsorted = interp1d(self.x10[::-1], self.y210[:, ::-1])
assert_array_almost_equal(interp10_y_2d(self.x10),
interp10_y_2d_unsorted(self.x10))
def test_linear(self):
# Check the actual implementation of linear interpolation.
interp10 = interp1d(self.x10, self.y10)
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array([1.2]))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([2.4, 5.6, 6.0]))
def test_cubic(self):
# Check the actual implementation of spline interpolation.
interp10 = interp1d(self.x10, self.y10, kind='cubic')
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array([1.2]))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([2.4, 5.6, 6.0]),)
def test_nearest(self):
# Check the actual implementation of nearest-neighbour interpolation.
interp10 = interp1d(self.x10, self.y10, kind='nearest')
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array(1.))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([2., 6., 6.]),)
@dec.knownfailureif(True, "zero-order splines fail for the last point")
def test_zero(self):
# Check the actual implementation of zero-order spline interpolation.
interp10 = interp1d(self.x10, self.y10, kind='zero')
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array(1.))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([2., 6., 6.]))
def _bounds_check(self, kind='linear'):
# Test that our handling of out-of-bounds input is correct.
extrap10 = interp1d(self.x10, self.y10, fill_value=self.fill_value,
bounds_error=False, kind=kind)
assert_array_equal(extrap10(11.2), np.array(self.fill_value))
assert_array_equal(extrap10(-3.4), np.array(self.fill_value))
assert_array_equal(extrap10([[[11.2], [-3.4], [12.6], [19.3]]]),
np.array(self.fill_value),)
assert_array_equal(extrap10._check_bounds(
np.array([-1.0, 0.0, 5.0, 9.0, 11.0])),
np.array([True, False, False, False, True]))
raises_bounds_error = interp1d(self.x10, self.y10, bounds_error=True,
kind=kind)
assert_raises(ValueError, raises_bounds_error, -1.0)
assert_raises(ValueError, raises_bounds_error, 11.0)
raises_bounds_error([0.0, 5.0, 9.0])
def _bounds_check_int_nan_fill(self, kind='linear'):
x = np.arange(10).astype(np.int_)
y = np.arange(10).astype(np.int_)
c = interp1d(x, y, kind=kind, fill_value=np.nan, bounds_error=False)
yi = c(x - 1)
assert_(np.isnan(yi[0]))
assert_array_almost_equal(yi, np.r_[np.nan, y[:-1]])
def test_bounds(self):
for kind in ('linear', 'cubic', 'nearest',
'slinear', 'zero', 'quadratic'):
self._bounds_check(kind)
self._bounds_check_int_nan_fill(kind)
def _nd_check_interp(self, kind='linear'):
# Check the behavior when the inputs and outputs are multidimensional.
# Multidimensional input.
interp10 = interp1d(self.x10, self.y10, kind=kind)
assert_array_almost_equal(interp10(np.array([[3., 5.], [2., 7.]])),
np.array([[3., 5.], [2., 7.]]))
# Scalar input -> 0-dim scalar array output
assert_(isinstance(interp10(1.2), np.ndarray))
assert_equal(interp10(1.2).shape, ())
# Multidimensional outputs.
interp210 = interp1d(self.x10, self.y210, kind=kind)
assert_array_almost_equal(interp210(1.), np.array([1., 11.]))
assert_array_almost_equal(interp210(np.array([1., 2.])),
np.array([[1., 2.], [11., 12.]]))
interp102 = interp1d(self.x10, self.y102, axis=0, kind=kind)
assert_array_almost_equal(interp102(1.), np.array([2.0, 3.0]))
assert_array_almost_equal(interp102(np.array([1., 3.])),
np.array([[2., 3.], [6., 7.]]))
# Both at the same time!
x_new = np.array([[3., 5.], [2., 7.]])
assert_array_almost_equal(interp210(x_new),
np.array([[[3., 5.], [2., 7.]],
[[13., 15.], [12., 17.]]]))
assert_array_almost_equal(interp102(x_new),
np.array([[[6., 7.], [10., 11.]],
[[4., 5.], [14., 15.]]]))
def _nd_check_shape(self, kind='linear'):
# Check large ndim output shape
a = [4, 5, 6, 7]
y = np.arange(np.prod(a)).reshape(*a)
for n, s in enumerate(a):
x = np.arange(s)
z = interp1d(x, y, axis=n, kind=kind)
assert_array_almost_equal(z(x), y, err_msg=kind)
x2 = np.arange(2*3*1).reshape((2,3,1)) / 12.
b = list(a)
b[n:n+1] = [2,3,1]
assert_array_almost_equal(z(x2).shape, b, err_msg=kind)
def test_nd(self):
for kind in ('linear', 'cubic', 'slinear', 'quadratic', 'nearest'):
self._nd_check_interp(kind)
self._nd_check_shape(kind)
def _check_complex(self, dtype=np.complex_, kind='linear'):
x = np.array([1, 2.5, 3, 3.1, 4, 6.4, 7.9, 8.0, 9.5, 10])
y = x * x ** (1 + 2j)
y = y.astype(dtype)
# simple test
c = interp1d(x, y, kind=kind)
assert_array_almost_equal(y[:-1], c(x)[:-1])
# check against interpolating real+imag separately
xi = np.linspace(1, 10, 31)
cr = interp1d(x, y.real, kind=kind)
ci = interp1d(x, y.imag, kind=kind)
assert_array_almost_equal(c(xi).real, cr(xi))
assert_array_almost_equal(c(xi).imag, ci(xi))
def test_complex(self):
for kind in ('linear', 'nearest', 'cubic', 'slinear', 'quadratic',
'zero'):
self._check_complex(np.complex64, kind)
self._check_complex(np.complex128, kind)
@dec.knownfailureif(True, "zero-order splines fail for the last point")
def test_nd_zero_spline(self):
# zero-order splines don't get the last point right,
# see test_zero above
#yield self._nd_check_interp, 'zero'
#yield self._nd_check_interp, 'zero'
pass
def test_circular_refs(self):
# Test interp1d can be automatically garbage collected
x = np.linspace(0, 1)
y = np.linspace(0, 1)
# Confirm interp can be released from memory after use
with assert_deallocated(interp1d, x, y) as interp:
new_y = interp([0.1, 0.2])
del interp
class TestLagrange(TestCase):
def test_lagrange(self):
p = poly1d([5,2,1,4,3])
xs = np.arange(len(p.coeffs))
ys = p(xs)
pl = lagrange(xs,ys)
assert_array_almost_equal(p.coeffs,pl.coeffs)
class TestAkima1DInterpolator(TestCase):
def test_eval(self):
x = np.arange(0., 11.)
y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
ak = Akima1DInterpolator(x, y)
xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
8.6, 9.9, 10.])
yi = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
4.1363636363636366866103344, 5.9803623910336236590978842,
5.5067291516462386624652936, 5.2031367459745245795943447,
4.1796554159017080820603951, 3.4110386597938129327189927,
3.])
assert_allclose(ak(xi), yi)
def test_eval_2d(self):
x = np.arange(0., 11.)
y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
y = np.column_stack((y, 2. * y))
ak = Akima1DInterpolator(x, y)
xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
8.6, 9.9, 10.])
yi = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
4.1363636363636366866103344,
5.9803623910336236590978842,
5.5067291516462386624652936,
5.2031367459745245795943447,
4.1796554159017080820603951,
3.4110386597938129327189927, 3.])
yi = np.column_stack((yi, 2. * yi))
assert_allclose(ak(xi), yi)
def test_eval_3d(self):
x = np.arange(0., 11.)
y_ = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
y = np.empty((11, 2, 2))
y[:, 0, 0] = y_
y[:, 1, 0] = 2. * y_
y[:, 0, 1] = 3. * y_
y[:, 1, 1] = 4. * y_
ak = Akima1DInterpolator(x, y)
xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
8.6, 9.9, 10.])
yi = np.empty((13, 2, 2))
yi_ = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
4.1363636363636366866103344,
5.9803623910336236590978842,
5.5067291516462386624652936,
5.2031367459745245795943447,
4.1796554159017080820603951,
3.4110386597938129327189927, 3.])
yi[:, 0, 0] = yi_
yi[:, 1, 0] = 2. * yi_
yi[:, 0, 1] = 3. * yi_
yi[:, 1, 1] = 4. * yi_
assert_allclose(ak(xi), yi)
def test_extend(self):
x = np.arange(0., 11.)
y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
ak = Akima1DInterpolator(x, y)
try:
ak.extend()
except NotImplementedError as e:
if str(e) != ("Extending a 1D Akima interpolator is not "
"yet implemented"):
raise
except:
raise
class TestPPolyCommon(TestCase):
# test basic functionality for PPoly and BPoly
def test_sort_check(self):
c = np.array([[1, 4], [2, 5], [3, 6]])
x = np.array([0, 1, 0.5])
assert_raises(ValueError, PPoly, c, x)
assert_raises(ValueError, BPoly, c, x)
def test_extend(self):
# Test adding new points to the piecewise polynomial
np.random.seed(1234)
order = 3
x = np.unique(np.r_[0, 10 * np.random.rand(30), 10])
c = 2*np.random.rand(order+1, len(x)-1, 2, 3) - 1
for cls in (PPoly, BPoly):
pp = cls(c[:,:9], x[:10])
pp.extend(c[:,9:], x[10:])
pp2 = cls(c[:,10:], x[10:])
pp2.extend(c[:,:10], x[:10], right=False)
pp3 = cls(c, x)
assert_array_equal(pp.c, pp3.c)
assert_array_equal(pp.x, pp3.x)
assert_array_equal(pp2.c, pp3.c)
assert_array_equal(pp2.x, pp3.x)
def test_extend_diff_orders(self):
# Test extending polynomial with different order one
np.random.seed(1234)
x = np.linspace(0, 1, 6)
c = np.random.rand(2, 5)
x2 = np.linspace(1, 2, 6)
c2 = np.random.rand(4, 5)
for cls in (PPoly, BPoly):
pp1 = cls(c, x)
pp2 = cls(c2, x2)
pp_comb = cls(c, x)
pp_comb.extend(c2, x2[1:])
# NB. doesn't match to pp1 at the endpoint, because pp1 is not
# continuous with pp2 as we took random coefs.
xi1 = np.linspace(0, 1, 300, endpoint=False)
xi2 = np.linspace(1, 2, 300)
assert_allclose(pp1(xi1), pp_comb(xi1))
assert_allclose(pp2(xi2), pp_comb(xi2))
def test_shape(self):
np.random.seed(1234)
c = np.random.rand(8, 12, 5, 6, 7)
x = np.sort(np.random.rand(13))
xp = np.random.rand(3, 4)
for cls in (PPoly, BPoly):
p = cls(c, x)
assert_equal(p(xp).shape, (3, 4, 5, 6, 7))
# 'scalars'
for cls in (PPoly, BPoly):
p = cls(c[..., 0, 0, 0], x)
assert_equal(np.shape(p(0.5)), ())
assert_equal(np.shape(p(np.array(0.5))), ())
if NumpyVersion(np.__version__) >= '1.7.0':
# can't use dtype=object (with any numpy; what fails is
# constructing the object array here for old numpy)
assert_raises(ValueError, p, np.array([[0.1, 0.2], [0.4]]))
def test_complex_coef(self):
np.random.seed(12345)
x = np.sort(np.random.random(13))
c = np.random.random((8, 12)) * (1. + 0.3j)
c_re, c_im = c.real, c.imag
xp = np.random.random(5)
for cls in (PPoly, BPoly):
p, p_re, p_im = cls(c, x), cls(c_re, x), cls(c_im, x)
for nu in [0, 1, 2]:
assert_allclose(p(xp, nu).real, p_re(xp, nu))
assert_allclose(p(xp, nu).imag, p_im(xp, nu))
class TestPolySubclassing(TestCase):
class P(PPoly):
pass
class B(BPoly):
pass
def _make_polynomials(self):
np.random.seed(1234)
x = np.sort(np.random.random(3))
c = np.random.random((4, 2))
return self.P(c, x), self.B(c, x)
def test_derivative(self):
pp, bp = self._make_polynomials()
for p in (pp, bp):
pd = p.derivative()
assert_equal(p.__class__, pd.__class__)
ppa = pp.antiderivative()
assert_equal(pp.__class__, ppa.__class__)
def test_from_spline(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0)
pp = self.P.from_spline(spl)
assert_equal(pp.__class__, self.P)
def test_conversions(self):
pp, bp = self._make_polynomials()
pp1 = self.P.from_bernstein_basis(bp)
assert_equal(pp1.__class__, self.P)
bp1 = self.B.from_power_basis(pp)
assert_equal(bp1.__class__, self.B)
def test_from_derivatives(self):
x = [0, 1, 2]
y = [[1], [2], [3]]
bp = self.B.from_derivatives(x, y)
assert_equal(bp.__class__, self.B)
class TestPPoly(TestCase):
def test_simple(self):
c = np.array([[1, 4], [2, 5], [3, 6]])
x = np.array([0, 0.5, 1])
p = PPoly(c, x)
assert_allclose(p(0.3), 1*0.3**2 + 2*0.3 + 3)
assert_allclose(p(0.7), 4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6)
def test_multi_shape(self):
c = np.random.rand(6, 2, 1, 2, 3)
x = np.array([0, 0.5, 1])
p = PPoly(c, x)
assert_equal(p.x.shape, x.shape)
assert_equal(p.c.shape, c.shape)
assert_equal(p(0.3).shape, c.shape[2:])
assert_equal(p(np.random.rand(5,6)).shape,
(5,6) + c.shape[2:])
dp = p.derivative()
assert_equal(dp.c.shape, (5, 2, 1, 2, 3))
ip = p.antiderivative()
assert_equal(ip.c.shape, (7, 2, 1, 2, 3))
def test_construct_fast(self):
np.random.seed(1234)
c = np.array([[1, 4], [2, 5], [3, 6]], dtype=float)
x = np.array([0, 0.5, 1])
p = PPoly.construct_fast(c, x)
assert_allclose(p(0.3), 1*0.3**2 + 2*0.3 + 3)
assert_allclose(p(0.7), 4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6)
def test_vs_alternative_implementations(self):
np.random.seed(1234)
c = np.random.rand(3, 12, 22)
x = np.sort(np.r_[0, np.random.rand(11), 1])
p = PPoly(c, x)
xp = np.r_[0.3, 0.5, 0.33, 0.6]
expected = _ppoly_eval_1(c, x, xp)
assert_allclose(p(xp), expected)
expected = _ppoly_eval_2(c[:,:,0], x, xp)
assert_allclose(p(xp)[:,0], expected)
def test_from_spline(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0)
pp = PPoly.from_spline(spl)
xi = np.linspace(0, 1, 200)
assert_allclose(pp(xi), splev(xi, spl))
def test_derivative_simple(self):
np.random.seed(1234)
c = np.array([[4, 3, 2, 1]]).T
dc = np.array([[3*4, 2*3, 2]]).T
ddc = np.array([[2*3*4, 1*2*3]]).T
x = np.array([0, 1])
pp = PPoly(c, x)
dpp = PPoly(dc, x)
ddpp = PPoly(ddc, x)
assert_allclose(pp.derivative().c, dpp.c)
assert_allclose(pp.derivative(2).c, ddpp.c)
def test_derivative_eval(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0)
pp = PPoly.from_spline(spl)
xi = np.linspace(0, 1, 200)
for dx in range(0, 3):
assert_allclose(pp(xi, dx), splev(xi, spl, dx))
def test_derivative(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
xi = np.linspace(0, 1, 200)
for dx in range(0, 10):
assert_allclose(pp(xi, dx), pp.derivative(dx)(xi),
err_msg="dx=%d" % (dx,))
def test_antiderivative_of_constant(self):
# https://github.com/scipy/scipy/issues/4216
p = PPoly([[1.]], [0, 1])
assert_equal(p.antiderivative().c, PPoly([[1], [0]], [0, 1]).c)
assert_equal(p.antiderivative().x, PPoly([[1], [0]], [0, 1]).x)
def test_antiderivative_regression_4355(self):
# https://github.com/scipy/scipy/issues/4355
p = PPoly([[1., 0.5]], [0, 1, 2])
q = p.antiderivative()
assert_equal(q.c, [[1, 0.5], [0, 1]])
assert_equal(q.x, [0, 1, 2])
assert_allclose(p.integrate(0, 2), 1.5)
assert_allclose(q(2) - q(0), 1.5)
def test_antiderivative_simple(self):
np.random.seed(1234)
# [ p1(x) = 3*x**2 + 2*x + 1,
# p2(x) = 1.6875]
c = np.array([[3, 2, 1], [0, 0, 1.6875]]).T
# [ pp1(x) = x**3 + x**2 + x,
# pp2(x) = 1.6875*(x - 0.25) + pp1(0.25)]
ic = np.array([[1, 1, 1, 0], [0, 0, 1.6875, 0.328125]]).T
# [ ppp1(x) = (1/4)*x**4 + (1/3)*x**3 + (1/2)*x**2,
# ppp2(x) = (1.6875/2)*(x - 0.25)**2 + pp1(0.25)*x + ppp1(0.25)]
iic = np.array([[1/4, 1/3, 1/2, 0, 0],
[0, 0, 1.6875/2, 0.328125, 0.037434895833333336]]).T
x = np.array([0, 0.25, 1])
pp = PPoly(c, x)
ipp = pp.antiderivative()
iipp = pp.antiderivative(2)
iipp2 = ipp.antiderivative()
assert_allclose(ipp.x, x)
assert_allclose(ipp.c.T, ic.T)
assert_allclose(iipp.c.T, iic.T)
assert_allclose(iipp2.c.T, iic.T)
def test_antiderivative_vs_derivative(self):
np.random.seed(1234)
x = np.linspace(0, 1, 30)**2
y = np.random.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
for dx in range(0, 10):
ipp = pp.antiderivative(dx)
# check that derivative is inverse op
pp2 = ipp.derivative(dx)
assert_allclose(pp.c, pp2.c)
# check continuity
for k in range(dx):
pp2 = ipp.derivative(k)
r = 1e-13
endpoint = r*pp2.x[:-1] + (1 - r)*pp2.x[1:]
assert_allclose(pp2(pp2.x[1:]), pp2(endpoint),
rtol=1e-7, err_msg="dx=%d k=%d" % (dx, k))
def test_antiderivative_vs_spline(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
for dx in range(0, 10):
pp2 = pp.antiderivative(dx)
spl2 = splantider(spl, dx)
xi = np.linspace(0, 1, 200)
assert_allclose(pp2(xi), splev(xi, spl2),
rtol=1e-7)
def test_integrate(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
a, b = 0.3, 0.9
ig = pp.integrate(a, b)
ipp = pp.antiderivative()
assert_allclose(ig, ipp(b) - ipp(a))
assert_allclose(ig, splint(a, b, spl))
a, b = -0.3, 0.9
ig = pp.integrate(a, b, extrapolate=True)
assert_allclose(ig, ipp(b) - ipp(a))
assert_(np.isnan(pp.integrate(a, b, extrapolate=False)).all())
def test_roots(self):
x = np.linspace(0, 1, 31)**2
y = np.sin(30*x)
spl = splrep(x, y, s=0, k=3)
pp = PPoly.from_spline(spl)
r = pp.roots()
r = r[(r >= 0 - 1e-15) & (r <= 1 + 1e-15)]
assert_allclose(r, sproot(spl), atol=1e-15)
def test_roots_idzero(self):
# Roots for piecewise polynomials with identically zero
# sections.
c = np.array([[-1, 0.25], [0, 0], [-1, 0.25]]).T
x = np.array([0, 0.4, 0.6, 1.0])
pp = PPoly(c, x)
assert_array_equal(pp.roots(),
[0.25, 0.4, np.nan, 0.6 + 0.25])
def test_roots_repeated(self):
# Check roots repeated in multiple sections are reported only
# once.
# [(x + 1)**2 - 1, -x**2] ; x == 0 is a repeated root
c = np.array([[1, 0, -1], [-1, 0, 0]]).T
x = np.array([-1, 0, 1])
pp = PPoly(c, x)
assert_array_equal(pp.roots(), [-2, 0])
assert_array_equal(pp.roots(extrapolate=False), [0])
def test_roots_discont(self):
# Check that a discontinuity across zero is reported as root
c = np.array([[1], [-1]]).T
x = np.array([0, 0.5, 1])
pp = PPoly(c, x)
assert_array_equal(pp.roots(), [0.5])
assert_array_equal(pp.roots(discontinuity=False), [])
def test_roots_random(self):
# Check high-order polynomials with random coefficients
np.random.seed(1234)
num = 0
for extrapolate in (True, False):
for order in range(0, 20):
x = np.unique(np.r_[0, 10 * np.random.rand(30), 10])
c = 2*np.random.rand(order+1, len(x)-1, 2, 3) - 1
pp = PPoly(c, x)
r = pp.roots(discontinuity=False, extrapolate=extrapolate)
for i in range(2):
for j in range(3):
rr = r[i,j]
if rr.size > 0:
# Check that the reported roots indeed are roots
num += rr.size
val = pp(rr, extrapolate=extrapolate)[:,i,j]
cmpval = pp(rr, nu=1, extrapolate=extrapolate)[:,i,j]
assert_allclose(val/cmpval, 0, atol=1e-7,
err_msg="(%r) r = %s" % (extrapolate,
repr(rr),))
# Check that we checked a number of roots
assert_(num > 100, repr(num))
def test_roots_croots(self):
# Test the complex root finding algorithm
np.random.seed(1234)
for k in range(1, 15):
c = np.random.rand(k, 1, 130)
if k == 3:
# add a case with zero discriminant
c[:,0,0] = 1, 2, 1
w = np.empty(c.shape, dtype=complex)
_ppoly._croots_poly1(c, w)
if k == 1:
assert_(np.isnan(w).all())
continue
res = 0
cres = 0
for i in range(k):
res += c[i,None] * w**(k-1-i)
cres += abs(c[i,None] * w**(k-1-i))
with np.errstate(invalid='ignore'):
res /= cres
res = res.ravel()
res = res[~np.isnan(res)]
assert_allclose(res, 0, atol=1e-10)
def test_extrapolate_attr(self):
# [ 1 - x**2 ]
c = np.array([[-1, 0, 1]]).T
x = np.array([0, 1])
for extrapolate in [True, False, None]:
pp = PPoly(c, x, extrapolate=extrapolate)
pp_d = pp.derivative()
pp_i = pp.antiderivative()
if extrapolate is False:
assert_(np.isnan(pp([-0.1, 1.1])).all())
assert_(np.isnan(pp_i([-0.1, 1.1])).all())
assert_(np.isnan(pp_d([-0.1, 1.1])).all())
assert_equal(pp.roots(), [1])
else:
assert_allclose(pp([-0.1, 1.1]), [1-0.1**2, 1-1.1**2])
assert_(not np.isnan(pp_i([-0.1, 1.1])).any())
assert_(not np.isnan(pp_d([-0.1, 1.1])).any())
assert_allclose(pp.roots(), [1, -1])
class TestBPoly(TestCase):
def test_simple(self):
x = [0, 1]
c = [[3]]
bp = BPoly(c, x)
assert_allclose(bp(0.1), 3.)
def test_simple2(self):
x = [0, 1]
c = [[3], [1]]
bp = BPoly(c, x) # 3*(1-x) + 1*x
assert_allclose(bp(0.1), 3*0.9 + 1.*0.1)
def test_simple3(self):
x = [0, 1]
c = [[3], [1], [4]]
bp = BPoly(c, x) # 3 * (1-x)**2 + 2 * x (1-x) + 4 * x**2
assert_allclose(bp(0.2),
3 * 0.8*0.8 + 1 * 2*0.2*0.8 + 4 * 0.2*0.2)
def test_simple4(self):
x = [0, 1]
c = [[1], [1], [1], [2]]
bp = BPoly(c, x)
assert_allclose(bp(0.3), 0.7**3 +
3 * 0.7**2 * 0.3 +
3 * 0.7 * 0.3**2 +
2 * 0.3**3)
def test_simple5(self):
x = [0, 1]
c = [[1], [1], [8], [2], [1]]
bp = BPoly(c, x)
assert_allclose(bp(0.3), 0.7**4 +
4 * 0.7**3 * 0.3 +
8 * 6 * 0.7**2 * 0.3**2 +
2 * 4 * 0.7 * 0.3**3 +
0.3**4)
def test_multi_shape(self):
c = np.random.rand(6, 2, 1, 2, 3)
x = np.array([0, 0.5, 1])
p = BPoly(c, x)
assert_equal(p.x.shape, x.shape)
assert_equal(p.c.shape, c.shape)
assert_equal(p(0.3).shape, c.shape[2:])
assert_equal(p(np.random.rand(5,6)).shape,
(5,6)+c.shape[2:])
dp = p.derivative()
assert_equal(dp.c.shape, (5, 2, 1, 2, 3))
def test_interval_length(self):
x = [0, 2]
c = [[3], [1], [4]]
bp = BPoly(c, x)
xval = 0.1
s = xval / 2 # s = (x - xa) / (xb - xa)
assert_allclose(bp(xval), 3 * (1-s)*(1-s) + 1 * 2*s*(1-s) + 4 * s*s)
def test_two_intervals(self):
x = [0, 1, 3]
c = [[3, 0], [0, 0], [0, 2]]
bp = BPoly(c, x) # [3*(1-x)**2, 2*((x-1)/2)**2]
assert_allclose(bp(0.4), 3 * 0.6*0.6)
assert_allclose(bp(1.7), 2 * (0.7/2)**2)
def test_extrapolate_attr(self):
x = [0, 2]
c = [[3], [1], [4]]
bp = BPoly(c, x)
for extrapolate in (True, False, None):
bp = BPoly(c, x, extrapolate=extrapolate)
bp_d = bp.derivative()
if extrapolate is False:
assert_(np.isnan(bp([-0.1, 2.1])).all())
assert_(np.isnan(bp_d([-0.1, 2.1])).all())
else:
assert_(not np.isnan(bp([-0.1, 2.1])).any())
assert_(not np.isnan(bp_d([-0.1, 2.1])).any())
class TestBPolyCalculus(TestCase):
def test_derivative(self):
x = [0, 1, 3]
c = [[3, 0], [0, 0], [0, 2]]
bp = BPoly(c, x) # [3*(1-x)**2, 2*((x-1)/2)**2]
bp_der = bp.derivative()
assert_allclose(bp_der(0.4), -6*(0.6))
assert_allclose(bp_der(1.7), 0.7)
# derivatives in-place
assert_allclose([bp(0.4, nu=1), bp(0.4, nu=2), bp(0.4, nu=3)],
[-6*(1-0.4), 6., 0.])
assert_allclose([bp(1.7, nu=1), bp(1.7, nu=2), bp(1.7, nu=3)],
[0.7, 1., 0])
def test_derivative_ppoly(self):
# make sure it's consistent w/ power basis
np.random.seed(1234)
m, k = 5, 8 # number of intervals, order
x = np.sort(np.random.random(m))
c = np.random.random((k, m-1))
bp = BPoly(c, x)
pp = PPoly.from_bernstein_basis(bp)
for d in range(k):
bp = bp.derivative()
pp = pp.derivative()
xp = np.linspace(x[0], x[-1], 21)
assert_allclose(bp(xp), pp(xp))
def test_deriv_inplace(self):
np.random.seed(1234)
m, k = 5, 8 # number of intervals, order
x = np.sort(np.random.random(m))
c = np.random.random((k, m-1))
bp = BPoly(c, x)
xp = np.linspace(x[0], x[-1], 21)
for i in range(k):
assert_allclose(bp(xp, i), bp.derivative(i)(xp))
class TestPolyConversions(TestCase):
def test_bp_from_pp(self):
x = [0, 1, 3]
c = [[3, 2], [1, 8], [4, 3]]
pp = PPoly(c, x)
bp = BPoly.from_power_basis(pp)
pp1 = PPoly.from_bernstein_basis(bp)
xp = [0.1, 1.4]
assert_allclose(pp(xp), bp(xp))
assert_allclose(pp(xp), pp1(xp))
def test_bp_from_pp_random(self):
np.random.seed(1234)
m, k = 5, 8 # number of intervals, order
x = np.sort(np.random.random(m))
c = np.random.random((k, m-1))
pp = PPoly(c, x)
bp = BPoly.from_power_basis(pp)
pp1 = PPoly.from_bernstein_basis(bp)
xp = np.linspace(x[0], x[-1], 21)
assert_allclose(pp(xp), bp(xp))
assert_allclose(pp(xp), pp1(xp))
def test_pp_from_bp(self):
x = [0, 1, 3]
c = [[3, 3], [1, 1], [4, 2]]
bp = BPoly(c, x)
pp = PPoly.from_bernstein_basis(bp)
bp1 = BPoly.from_power_basis(pp)
xp = [0.1, 1.4]
assert_allclose(bp(xp), pp(xp))
assert_allclose(bp(xp), bp1(xp))
class TestBPolyFromDerivatives(TestCase):
def test_make_poly_1(self):
c1 = BPoly._construct_from_derivatives(0, 1, [2], [3])
assert_allclose(c1, [2., 3.])
def test_make_poly_2(self):
c1 = BPoly._construct_from_derivatives(0, 1, [1, 0], [1])
assert_allclose(c1, [1., 1., 1.])
# f'(0) = 3
c2 = BPoly._construct_from_derivatives(0, 1, [2, 3], [1])
assert_allclose(c2, [2., 7./2, 1.])
# f'(1) = 3
c3 = BPoly._construct_from_derivatives(0, 1, [2], [1, 3])
assert_allclose(c3, [2., -0.5, 1.])
def test_make_poly_3(self):
# f'(0)=2, f''(0)=3
c1 = BPoly._construct_from_derivatives(0, 1, [1, 2, 3], [4])
assert_allclose(c1, [1., 5./3, 17./6, 4.])
# f'(1)=2, f''(1)=3
c2 = BPoly._construct_from_derivatives(0, 1, [1], [4, 2, 3])
assert_allclose(c2, [1., 19./6, 10./3, 4.])
# f'(0)=2, f'(1)=3
c3 = BPoly._construct_from_derivatives(0, 1, [1, 2], [4, 3])
assert_allclose(c3, [1., 5./3, 3., 4.])
def test_make_poly_12(self):
np.random.seed(12345)
ya = np.r_[0, np.random.random(5)]
yb = np.r_[0, np.random.random(5)]
c = BPoly._construct_from_derivatives(0, 1, ya, yb)
pp = BPoly(c[:, None], [0, 1])
for j in range(6):
assert_allclose([pp(0.), pp(1.)], [ya[j], yb[j]])
pp = pp.derivative()
def test_raise_degree(self):
np.random.seed(12345)
x = [0, 1]
k, d = 8, 5
c = np.random.random((k, 1, 2, 3, 4))
bp = BPoly(c, x)
c1 = BPoly._raise_degree(c, d)
bp1 = BPoly(c1, x)
xp = np.linspace(0, 1, 11)
assert_allclose(bp(xp), bp1(xp))
def test_xi_yi(self):
assert_raises(ValueError, BPoly.from_derivatives, [0, 1], [0])
def test_coords_order(self):
xi = [0, 0, 1]
yi = [[0], [0], [0]]
assert_raises(ValueError, BPoly.from_derivatives, xi, yi)
def test_zeros(self):
xi = [0, 1, 2, 3]
yi = [[0, 0], [0], [0, 0], [0, 0]] # NB: will have to raise the degree
pp = BPoly.from_derivatives(xi, yi)
assert_(pp.c.shape == (4, 3))
ppd = pp.derivative()
for xp in [0., 0.1, 1., 1.1, 1.9, 2., 2.5]:
assert_allclose([pp(xp), ppd(xp)], [0., 0.])
def _make_random_mk(self, m, k):
# k derivatives at each breakpoint
np.random.seed(1234)
xi = np.asarray([1. * j**2 for j in range(m+1)])
yi = [np.random.random(k) for j in range(m+1)]
return xi, yi
def test_random_12(self):
m, k = 5, 12
xi, yi = self._make_random_mk(m, k)
pp = BPoly.from_derivatives(xi, yi)
for order in range(k//2):
assert_allclose(pp(xi), [yy[order] for yy in yi])
pp = pp.derivative()
def test_order_zero(self):
m, k = 5, 12
xi, yi = self._make_random_mk(m, k)
assert_raises(ValueError, BPoly.from_derivatives,
**dict(xi=xi, yi=yi, orders=0))
def test_orders_too_high(self):
m, k = 5, 12
xi, yi = self._make_random_mk(m, k)
pp = BPoly.from_derivatives(xi, yi, orders=2*k-1) # this is still ok
assert_raises(ValueError, BPoly.from_derivatives, # but this is not
**dict(xi=xi, yi=yi, orders=2*k))
def test_orders_global(self):
m, k = 5, 12
xi, yi = self._make_random_mk(m, k)
# ok, this is confusing. Local polynomials will be of the order 5
# which means that up to the 2nd derivatives will be used at each point
order = 5
pp = BPoly.from_derivatives(xi, yi, orders=order)
for j in range(order//2+1):
assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
pp = pp.derivative()
assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))
# now repeat with `order` being even: on each interval, it uses
# order//2 'derivatives' @ the right-hand endpoint and
# order//2+1 @ 'derivatives' the left-hand endpoint
order = 6
pp = BPoly.from_derivatives(xi, yi, orders=order)
for j in range(order//2):
assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
pp = pp.derivative()
assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))
def test_orders_local(self):
m, k = 7, 12
xi, yi = self._make_random_mk(m, k)
orders = [o + 1 for o in range(m)]
for i, x in enumerate(xi[1:-1]):
pp = BPoly.from_derivatives(xi, yi, orders=orders)
for j in range(orders[i] // 2 + 1):
assert_allclose(pp(x - 1e-12), pp(x + 1e-12))
pp = pp.derivative()
assert_(not np.allclose(pp(x - 1e-12), pp(x + 1e-12)))
def test_yi_trailing_dims(self):
m, k = 7, 5
xi = np.sort(np.random.random(m+1))
yi = np.random.random((m+1, k, 6, 7, 8))
pp = BPoly.from_derivatives(xi, yi)
assert_equal(pp.c.shape, (2*k, m, 6, 7, 8))
class TestPpform(TestCase):
def test_shape(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
np.random.seed(1234)
c = np.random.rand(3, 12, 5, 6, 7)
x = np.sort(np.random.rand(13))
p = ppform(c, x)
xp = np.random.rand(3, 4)
assert_equal(p(xp).shape, (3, 4, 5, 6, 7))
def _ppoly_eval_1(c, x, xps):
"""Evaluate piecewise polynomial manually"""
out = np.zeros((len(xps), c.shape[2]))
for i, xp in enumerate(xps):
if xp < 0 or xp > 1:
out[i,:] = np.nan
continue
j = np.searchsorted(x, xp) - 1
d = xp - x[j]
assert_(x[j] <= xp < x[j+1])
r = sum(c[k,j] * d**(c.shape[0]-k-1)
for k in range(c.shape[0]))
out[i,:] = r
return out
def _ppoly_eval_2(coeffs, breaks, xnew, fill=np.nan):
"""Evaluate piecewise polynomial manually (another way)"""
a = breaks[0]
b = breaks[-1]
K = coeffs.shape[0]
saveshape = np.shape(xnew)
xnew = np.ravel(xnew)
res = np.empty_like(xnew)
mask = (xnew >= a) & (xnew <= b)
res[~mask] = fill
xx = xnew.compress(mask)
indxs = np.searchsorted(breaks, xx)-1
indxs = indxs.clip(0, len(breaks))
pp = coeffs
diff = xx - breaks.take(indxs)
V = np.vander(diff, N=K)
values = np.array([np.dot(V[k, :], pp[:, indxs[k]]) for k in xrange(len(xx))])
res[mask] = values
res.shape = saveshape
return res
class TestRegularGridInterpolator(TestCase):
def _get_sample_4d(self):
# create a 4d grid of 3 points in each dimension
points = [(0., .5, 1.)] * 4
values = np.asarray([0., .5, 1.])
values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
return points, values
def _get_sample_4d_2(self):
# create another 4d grid of 3 points in each dimension
points = [(0., .5, 1.)] * 2 + [(0., 5., 10.)] * 2
values = np.asarray([0., .5, 1.])
values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
return points, values
def test_list_input(self):
points, values = self._get_sample_4d()
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
for method in ['linear', 'nearest']:
interp = RegularGridInterpolator(points,
values.tolist(),
method=method)
v1 = interp(sample.tolist())
interp = RegularGridInterpolator(points,
values,
method=method)
v2 = interp(sample)
assert_allclose(v1, v2)
def test_complex(self):
points, values = self._get_sample_4d()
values = values - 2j*values
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
for method in ['linear', 'nearest']:
interp = RegularGridInterpolator(points, values,
method=method)
rinterp = RegularGridInterpolator(points, values.real,
method=method)
iinterp = RegularGridInterpolator(points, values.imag,
method=method)
v1 = interp(sample)
v2 = rinterp(sample) + 1j*iinterp(sample)
assert_allclose(v1, v2)
def test_linear_xi1d(self):
points, values = self._get_sample_4d_2()
interp = RegularGridInterpolator(points, values)
sample = np.asarray([0.1, 0.1, 10., 9.])
wanted = 1001.1
assert_array_almost_equal(interp(sample), wanted)
def test_linear_xi3d(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values)
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
wanted = np.asarray([1001.1, 846.2, 555.5])
assert_array_almost_equal(interp(sample), wanted)
def test_nearest(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values, method="nearest")
sample = np.asarray([0.1, 0.1, .9, .9])
wanted = 1100.
assert_array_almost_equal(interp(sample), wanted)
sample = np.asarray([0.1, 0.1, 0.1, 0.1])
wanted = 0.
assert_array_almost_equal(interp(sample), wanted)
sample = np.asarray([0., 0., 0., 0.])
wanted = 0.
assert_array_almost_equal(interp(sample), wanted)
sample = np.asarray([1., 1., 1., 1.])
wanted = 1111.
assert_array_almost_equal(interp(sample), wanted)
sample = np.asarray([0.1, 0.4, 0.6, 0.9])
wanted = 1055.
assert_array_almost_equal(interp(sample), wanted)
def test_linear_edges(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values)
sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]])
wanted = np.asarray([0., 1111.])
assert_array_almost_equal(interp(sample), wanted)
def test_valid_create(self):
# create a 2d grid of 3 points in each dimension
points = [(0., .5, 1.), (0., 1., .5)]
values = np.asarray([0., .5, 1.])
values0 = values[:, np.newaxis]
values1 = values[np.newaxis, :]
values = (values0 + values1 * 10)
assert_raises(ValueError, RegularGridInterpolator, points, values)
points = [((0., .5, 1.), ), (0., .5, 1.)]
assert_raises(ValueError, RegularGridInterpolator, points, values)
points = [(0., .5, .75, 1.), (0., .5, 1.)]
assert_raises(ValueError, RegularGridInterpolator, points, values)
points = [(0., .5, 1.), (0., .5, 1.), (0., .5, 1.)]
assert_raises(ValueError, RegularGridInterpolator, points, values)
points = [(0., .5, 1.), (0., .5, 1.)]
assert_raises(ValueError, RegularGridInterpolator, points, values,
method="undefmethod")
def test_valid_call(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values)
sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]])
assert_raises(ValueError, interp, sample, "undefmethod")
sample = np.asarray([[0., 0., 0.], [1., 1., 1.]])
assert_raises(ValueError, interp, sample)
sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.1]])
assert_raises(ValueError, interp, sample)
def test_out_of_bounds_extrap(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values, bounds_error=False,
fill_value=None)
sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
[21, 2.1, -1.1, -11], [2.1, 2.1, -1.1, -1.1]])
wanted = np.asarray([0., 1111., 11., 11.])
assert_array_almost_equal(interp(sample, method="nearest"), wanted)
wanted = np.asarray([-111.1, 1222.1, -11068., -1186.9])
assert_array_almost_equal(interp(sample, method="linear"), wanted)
def test_out_of_bounds_extrap2(self):
points, values = self._get_sample_4d_2()
interp = RegularGridInterpolator(points, values, bounds_error=False,
fill_value=None)
sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
[21, 2.1, -1.1, -11], [2.1, 2.1, -1.1, -1.1]])
wanted = np.asarray([0., 11., 11., 11.])
assert_array_almost_equal(interp(sample, method="nearest"), wanted)
wanted = np.asarray([-12.1, 133.1, -1069., -97.9])
assert_array_almost_equal(interp(sample, method="linear"), wanted)
def test_out_of_bounds_fill(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values, bounds_error=False,
fill_value=np.nan)
sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
[2.1, 2.1, -1.1, -1.1]])
wanted = np.asarray([np.nan, np.nan, np.nan])
assert_array_almost_equal(interp(sample, method="nearest"), wanted)
assert_array_almost_equal(interp(sample, method="linear"), wanted)
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
wanted = np.asarray([1001.1, 846.2, 555.5])
assert_array_almost_equal(interp(sample), wanted)
def test_nearest_compare_qhull(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values, method="nearest")
points_qhull = itertools.product(*points)
points_qhull = [p for p in points_qhull]
points_qhull = np.asarray(points_qhull)
values_qhull = values.reshape(-1)
interp_qhull = NearestNDInterpolator(points_qhull, values_qhull)
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
assert_array_almost_equal(interp(sample), interp_qhull(sample))
def test_linear_compare_qhull(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values)
points_qhull = itertools.product(*points)
points_qhull = [p for p in points_qhull]
points_qhull = np.asarray(points_qhull)
values_qhull = values.reshape(-1)
interp_qhull = LinearNDInterpolator(points_qhull, values_qhull)
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
assert_array_almost_equal(interp(sample), interp_qhull(sample))
def test_duck_typed_values(self):
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
values = MyValue((5, 7))
for method in ('nearest', 'linear'):
interp = RegularGridInterpolator((x, y), values,
method=method)
v1 = interp([0.4, 0.7])
interp = RegularGridInterpolator((x, y), values._v,
method=method)
v2 = interp([0.4, 0.7])
assert_allclose(v1, v2)
def test_invalid_fill_value(self):
np.random.seed(1234)
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
values = np.random.rand(5, 7)
# integers can be cast to floats
RegularGridInterpolator((x, y), values, fill_value=1)
# complex values cannot
assert_raises(ValueError, RegularGridInterpolator,
(x, y), values, fill_value=1+2j)
def test_fillvalue_type(self):
# from #3703; test that interpolator object construction succeeds
values = np.ones((10, 20, 30), dtype='>f4')
points = [np.arange(n) for n in values.shape]
xi = [(1, 1, 1)]
interpolator = RegularGridInterpolator(points, values)
interpolator = RegularGridInterpolator(points, values, fill_value=0.)
class MyValue(object):
"""
Minimal indexable object
"""
def __init__(self, shape):
self.ndim = 2
self.shape = shape
self._v = np.arange(np.prod(shape)).reshape(shape)
def __getitem__(self, idx):
return self._v[idx]
def __array_interface__(self):
return None
def __array__(self):
raise RuntimeError("No array representation")
class TestInterpN(TestCase):
def _sample_2d_data(self):
x = np.arange(1, 6)
x = np.array([.5, 2., 3., 4., 5.5])
y = np.arange(1, 6)
y = np.array([.5, 2., 3., 4., 5.5])
z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
return x, y, z
def test_spline_2d(self):
x, y, z = self._sample_2d_data()
lut = RectBivariateSpline(x, y, z)
xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
assert_array_almost_equal(interpn((x, y), z, xi, method="splinef2d"),
lut.ev(xi[:, 0], xi[:, 1]))
def test_list_input(self):
x, y, z = self._sample_2d_data()
xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
for method in ['nearest', 'linear', 'splinef2d']:
v1 = interpn((x, y), z, xi, method=method)
v2 = interpn((x.tolist(), y.tolist()), z.tolist(),
xi.tolist(), method=method)
assert_allclose(v1, v2, err_msg=method)
def test_spline_2d_outofbounds(self):
x = np.array([.5, 2., 3., 4., 5.5])
y = np.array([.5, 2., 3., 4., 5.5])
z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
lut = RectBivariateSpline(x, y, z)
xi = np.array([[1, 2.3, 6.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, -4.0, 5.0, 1.0, 3]]).T
actual = interpn((x, y), z, xi, method="splinef2d",
bounds_error=False, fill_value=999.99)
expected = lut.ev(xi[:, 0], xi[:, 1])
expected[2:4] = 999.99
assert_array_almost_equal(actual, expected)
# no extrapolation for splinef2d
assert_raises(ValueError, interpn, (x, y), z, xi, method="splinef2d",
bounds_error=False, fill_value=None)
def _sample_4d_data(self):
points = [(0., .5, 1.)] * 2 + [(0., 5., 10.)] * 2
values = np.asarray([0., .5, 1.])
values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
return points, values
def test_linear_4d(self):
# create a 4d grid of 3 points in each dimension
points, values = self._sample_4d_data()
interp_rg = RegularGridInterpolator(points, values)
sample = np.asarray([[0.1, 0.1, 10., 9.]])
wanted = interpn(points, values, sample, method="linear")
assert_array_almost_equal(interp_rg(sample), wanted)
def test_4d_linear_outofbounds(self):
# create a 4d grid of 3 points in each dimension
points, values = self._sample_4d_data()
sample = np.asarray([[0.1, -0.1, 10.1, 9.]])
wanted = 999.99
actual = interpn(points, values, sample, method="linear",
bounds_error=False, fill_value=999.99)
assert_array_almost_equal(actual, wanted)
def test_nearest_4d(self):
# create a 4d grid of 3 points in each dimension
points, values = self._sample_4d_data()
interp_rg = RegularGridInterpolator(points, values, method="nearest")
sample = np.asarray([[0.1, 0.1, 10., 9.]])
wanted = interpn(points, values, sample, method="nearest")
assert_array_almost_equal(interp_rg(sample), wanted)
def test_4d_nearest_outofbounds(self):
# create a 4d grid of 3 points in each dimension
points, values = self._sample_4d_data()
sample = np.asarray([[0.1, -0.1, 10.1, 9.]])
wanted = 999.99
actual = interpn(points, values, sample, method="nearest",
bounds_error=False, fill_value=999.99)
assert_array_almost_equal(actual, wanted)
def test_xi_1d(self):
# verify that 1D xi works as expected
points, values = self._sample_4d_data()
sample = np.asarray([0.1, 0.1, 10., 9.])
v1 = interpn(points, values, sample, bounds_error=False)
v2 = interpn(points, values, sample[None,:], bounds_error=False)
assert_allclose(v1, v2)
def test_xi_nd(self):
# verify that higher-d xi works as expected
points, values = self._sample_4d_data()
np.random.seed(1234)
sample = np.random.rand(2, 3, 4)
v1 = interpn(points, values, sample, method='nearest',
bounds_error=False)
assert_equal(v1.shape, (2, 3))
v2 = interpn(points, values, sample.reshape(-1, 4),
method='nearest', bounds_error=False)
assert_allclose(v1, v2.reshape(v1.shape))
def test_xi_broadcast(self):
# verify that the interpolators broadcast xi
x, y, values = self._sample_2d_data()
points = (x, y)
xi = np.linspace(0, 1, 2)
yi = np.linspace(0, 3, 3)
for method in ['nearest', 'linear', 'splinef2d']:
sample = (xi[:,None], yi[None,:])
v1 = interpn(points, values, sample, method=method,
bounds_error=False)
assert_equal(v1.shape, (2, 3))
xx, yy = np.meshgrid(xi, yi)
sample = np.c_[xx.T.ravel(), yy.T.ravel()]
v2 = interpn(points, values, sample,
method=method, bounds_error=False)
assert_allclose(v1, v2.reshape(v1.shape))
def test_nonscalar_values(self):
# Verify that non-scalar valued values also works
points, values = self._sample_4d_data()
np.random.seed(1234)
values = np.random.rand(3, 3, 3, 3, 6)
sample = np.random.rand(7, 11, 4)
for method in ['nearest', 'linear']:
v = interpn(points, values, sample, method=method,
bounds_error=False)
assert_equal(v.shape, (7, 11, 6), err_msg=method)
vs = [interpn(points, values[...,j], sample, method=method,
bounds_error=False)
for j in range(6)]
v2 = np.array(vs).transpose(1, 2, 0)
assert_allclose(v, v2, err_msg=method)
# Vector-valued splines supported with fitpack
assert_raises(ValueError, interpn, points, values, sample,
method='splinef2d')
def test_complex(self):
x, y, values = self._sample_2d_data()
points = (x, y)
values = values - 2j*values
sample = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
for method in ['linear', 'nearest']:
v1 = interpn(points, values, sample, method=method)
v2r = interpn(points, values.real, sample, method=method)
v2i = interpn(points, values.imag, sample, method=method)
v2 = v2r + 1j*v2i
assert_allclose(v1, v2)
# Complex-valued data not supported by spline2fd
with warnings.catch_warnings():
warnings.simplefilter("error", category=np.ComplexWarning)
assert_raises(np.ComplexWarning, interpn, points, values,
sample, method='splinef2d')
def test_duck_typed_values(self):
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
values = MyValue((5, 7))
for method in ('nearest', 'linear'):
v1 = interpn((x, y), values, [0.4, 0.7], method=method)
v2 = interpn((x, y), values._v, [0.4, 0.7], method=method)
assert_allclose(v1, v2)
def test_matrix_input(self):
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
values = np.matrix(np.random.rand(5, 7))
sample = np.random.rand(3, 7, 2)
for method in ('nearest', 'linear', 'splinef2d'):
v1 = interpn((x, y), values, sample, method=method)
v2 = interpn((x, y), np.asarray(values), sample, method=method)
assert_allclose(v1, np.asmatrix(v2))
if __name__ == "__main__":
run_module_suite()
| nvoron23/scipy | scipy/interpolate/tests/test_interpolate.py | Python | bsd-3-clause | 63,394 |
"""Routes configuration
The more specific and detailed routes should be defined first so they
may take precedent over the more generic routes. For more information
refer to the routes manual at http://routes.groovie.org/docs/
"""
from routes import Mapper
def make_map(config):
"""Create, configure and return the routes Mapper"""
map = Mapper(directory=config['pylons.paths']['controllers'],
always_scan=config['debug'])
map.minimization = False
map.explicit = False
# The ErrorController route (handles 404/500 error pages); it should
# likely stay at the top, ensuring it can always be resolved
map.connect('/error/{action}', controller='error')
map.connect('/error/{action}/{id}', controller='error')
map.connect('/browse', controller='browse', action='main')
map.connect('/browse/page/{page}', controller='browse', action='main')
map.connect('/browse/best', controller='browse', action='best')
map.connect('/browse/best/page/{page}', controller='browse', action='best')
map.connect('/browse/worst', controller='browse', action='worst')
map.connect('/browse/worst/page/{page}', controller='browse', action='worst')
map.connect('/browse/tags', controller='browse', action='tags')
map.connect('/browse/tags/{tag}', controller='browse', action='tags')
map.connect('/browse/tags/{tag}/page/{page}', controller='browse', action='tags')
map.connect('/browse/disapproved', controller='browse', action='disapproved')
map.connect('/browse/disapproved/page/{page}', controller='browse', action='disapproved')
map.connect('/browse/unapproved', controller='browse', action='unapproved')
map.connect('/browse/unapproved/page/{page}', controller='browse', action='unapproved')
map.connect('/browse/deleted', controller='browse', action='deleted')
map.connect('/browse/deleted/page/{page}', controller='browse', action='deleted')
map.connect('/browse/reported', controller='browse', action='reported')
map.connect('/browse/reported/page/{page}', controller='browse', action='reported')
map.connect('/browse/favourites', controller='browse', action='favourites')
map.connect('/browse/favourites/page/{page}', controller='browse', action='favourites')
map.connect('/browse/random', controller='browse', action='random')
map.connect('/browse/{ref_id}', controller='browse', action='view_one')
map.connect('/search', controller='browse', action='search')
map.connect('/search/{term}', controller='browse', action='search')
map.connect('/search/{term}/page/{page}', controller='browse', action='search')
map.connect('/create', controller='create', action='quote')
map.connect('/signup', controller='account', action='create')
map.connect('/login', controller='account', action='login')
map.connect('/logout', controller='account', action='logout')
map.connect('/reset_password', controller='account', action='reset_password')
map.connect('/api/v1/quotes/{quote_id}/approve', controller='api_v1', action='approve')
map.connect('/api/v1/quotes/{quote_id}/delete', controller='api_v1', action='delete')
map.connect('/api/v1/quotes/{quote_id}/disapprove', controller='api_v1', action='disapprove')
map.connect('/api/v1/quotes/{quote_id}/favourite', controller='api_v1', action='favourite')
map.connect('/api/v1/quotes/{quote_id}/report', controller='api_v1', action='report')
map.connect('/api/v1/quotes/{quote_id}/vote/{direction}', controller='api_v1', action='vote')
map.connect('/', controller='home', action='main')
map.redirect('/*(url)/', '/{url}', _redirect_code='301 Moved Permanently')
return map
| kopf/porick | porick/config/routing.py | Python | apache-2.0 | 3,710 |
# Copyright 2008-2015 Canonical
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check http://launchpad.net/filesync-server
"""A config management layer."""
import os
import yaml
from utilities import devconfig
def _load():
"""Load configuration from a file."""
fpath = os.environ["CONFIG"]
with open(fpath, "rt") as fh:
data = yaml.load(fh)
return data
class _Config(dict):
"""The configuration holder."""
def __init__(self, data=None):
if data is None:
data = _load()
super(_Config, self).__init__(data)
def __getattr__(self, name):
value = self[name]
if isinstance(value, dict) and not isinstance(value, _Config):
wrapped = _Config(value)
self[name] = wrapped
return wrapped
else:
return value
def __setattr__(self, name, value):
self[name] = value
def __str__(self):
return "<Config at %d: %s>" % (
id(self), super(_Config, self).__str__())
# instantiate the config and dynamically load the active ports
config = _Config()
devconfig.development_ports(config)
| zhsso/ubunto-one | lib/config.py | Python | agpl-3.0 | 1,777 |
"""
"Alpha function" synapse of Rall. Chapter 12.2
"""
from __future__ import division
from PyDSTool import *
from PyDSTool.Toolbox.phaseplane import *
from common_lib import *
icdict = {'a1': 0, 'a2': 0}
pardict = {'tau_syn': 2, 'vthresh': -10,
'vpre': -80}
DSargs = args()
DSargs.name = 'alpha_syn'
DSargs.ics = icdict
DSargs.pars = pardict
DSargs.tdata = [0, 3]
DSargs.auxvars = ['vpre_aux']
DSargs.algparams = {'init_step': 1e-3}
# PyDSTool has a built-in Heaviside function
DSargs.varspecs = {'a1': '(-a1+heav(vpre-vthresh))/tau_syn',
'a2': '(-a2+a1)/tau_syn',
'vpre_aux': 'vpre'}
syn = Generator.Vode_ODEsystem(DSargs)
# primitive protocol for mimicking the pre-synaptic voltage's
# action potential for 1 ms starting at t = 5ms
t1 = 5
s1 = args(pars={'vpre': -80},
tdata=[0, t1])
t2 = 1
s2 = args(pars={'vpre': 50},
tdata=[0, t2])
t3 = 40
s3 = args(pars={'vpre': -80},
tdata=[0, t3])
def alpha(t):
"""Explicit solution of alpha function for Dirac delta function impulse
for presynaptic spike.
Uses current value of tau_syn in model.
Accepts scalar or vector t.
"""
tau = syn.pars['tau_syn']
return t/(tau*tau)*exp(-t/tau)
def test(tau):
syn.set(pars={'tau_syn': tau})
traj, pts = pcw_protocol(syn, [s1,s2,s3])
plt.figure(1)
plt.clf()
plt.plot(pts['t'], pts['vpre_aux']*0.001, 'k', linewidth=3, label='pre-syn v /1000')
plt.plot(pts['t'], pts['a2'], 'g', linewidth=2, label='a2 (output)')
plt.plot(pts['t'], pts['a1'], 'r:', label='a1')
ts = linspace(0, 30, 500)
ss = alpha(ts)
# offset ts for alpha function by onset of pre-synaptic spike
plt.plot(ts+t1, ss, 'g--', label='s (explicit)')
plt.xlabel('t')
plt.legend(loc='upper right')
plt.title('tau syn = %.2f ms' % tau)
plt.ylim([-0.1, 0.9])
plt.xlim([0, max(pts['t'])])
plt.show()
test(0.5)
| robclewley/compneuro | alpha_syn.py | Python | bsd-3-clause | 1,936 |
# -*- coding: utf-8 -*-
#
# vinit_example.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Initial membrane voltage
------------------------
Plot several runs of the `iaf_cond_exp_sfa_rr` neuron without input for various
initial values of the membrane potential.
'''
'''
First, the necessary modules for simulation and plotting are imported.
'''
import nest
import numpy
import pylab as pl
'''
A loop runs over a range of initial membrane voltages.
In the beginnig of each iteration, the simulation kernel is put back to its
initial state using `ResetKernel`.
Next, a neuron is instantiated with `Create`. The used neuron model
`iaf_cond_exp_sfa_rr` is an implementation of a spiking neuron with
integrate-and-fire dynamics, conductance-based synapses, an additional
spike-frequency adaptation and relative refractory mechanisms as described in
Dayan, P. and Abbott, L.F. (2001) **Theoretical neuroscience**,
*MIT Press*, page 166. Incoming spike events induce a post-synaptic change of
conductance modelled by an exponential function. `SetStatus` allows to
assign the initial membrane voltage of the current loop run to the neuron.
`Create` is used once more to instantiate a `voltmeter` as recording device
which is subsequently connected to the neuron with `Connect`.
Then, a simulation with a duration of 75 ms is started with `Simulate`.
When the simulation has finished, the recorded times and membrane voltages are
read from the voltmeter via `GetStatus` where they can be accessed through the
key `events` of the status dictionary.
Finally, the time course of the membrane voltages is plotted for each of the
different inital values.
'''
for vinit in numpy.arange(-100, -50, 10, float):
nest.ResetKernel()
cbn = nest.Create("iaf_cond_exp_sfa_rr")
nest.SetStatus(cbn, "V_m", vinit)
voltmeter = nest.Create("voltmeter")
nest.Connect(voltmeter, cbn)
nest.Simulate(75.0)
t = nest.GetStatus(voltmeter,"events")[0]["times"]
v = nest.GetStatus(voltmeter,"events")[0]["V_m"]
pl.plot(t, v, label="initial V_m = %.2f mV" % vinit)
'''
Set the legend and the labels for the plot outside of the loop.
'''
pl.legend(loc=4)
pl.xlabel("time (ms)")
pl.ylabel("V_m (mV)")
| kristoforcarlson/nest-simulator-fork | pynest/examples/vinit_example.py | Python | gpl-2.0 | 2,868 |
import os, scrapy, argparse
from realclearpolitics.spiders.spider import RcpSpider
from scrapy.crawler import CrawlerProcess
parser = argparse.ArgumentParser('Scrap realclearpolitics polls data')
parser.add_argument('url', action="store")
parser.add_argument('--locale', action="store", default='')
parser.add_argument('--race', action="store", default='primary')
parser.add_argument('--csv', dest='to_csv', action='store_true')
parser.add_argument('--output', dest='output', action='store')
args = parser.parse_args()
url = args.url
extra_fields = { 'locale': args.locale, 'race': args.race }
if (args.to_csv):
if args.output is None:
filename = url.split('/')[-1].split('.')[0]
output = filename + ".csv"
print("No output file specified : using " + output)
else:
output = args.output
if not output.endswith(".csv"):
output = output + ".csv"
if os.path.isfile(output):
os.remove(output)
os.system("scrapy crawl realclearpoliticsSpider -a url="+url+" -o "+output)
else:
settings = {
'ITEM_PIPELINES' : {
'realclearpolitics.pipeline.PollPipeline': 300,
},
'LOG_LEVEL' : 'ERROR',
'DOWNLOAD_HANDLERS' : {'s3': None,}
}
process = CrawlerProcess(settings);
process.crawl(RcpSpider, url, extra_fields)
process.start()
| dpxxdp/berniemetrics | private/scrapers/realclearpolitics-scraper/scraper.py | Python | mit | 1,355 |
import re
def increment_string(strng):
if all([(not x.isdigit()) for x in strng]): return strng + '1'
return re.sub('\d+$', lambda m: increment(m.group(0)), strng)
def increment(s):
num = str(int(('1' if s.startswith('0') else '') + s)+1)
return num[1:] if s.startswith('0') else num
| Orange9000/Codewars | Solutions/5kyu/5kyu_string_incrementer.py | Python | mit | 306 |
"""
API for submitting background tasks by an instructor for a course.
Also includes methods for getting information about tasks that have
already been submitted, filtered either by running state or input
arguments.
"""
import hashlib
from celery.states import READY_STATES
from xmodule.modulestore.django import modulestore
from instructor_task.models import InstructorTask
from instructor_task.tasks import (rescore_problem,
reset_problem_attempts,
delete_problem_state,
send_bulk_course_email,
calculate_grades_csv)
from instructor_task.api_helper import (check_arguments_for_rescoring,
encode_problem_and_student_input,
submit_task)
from bulk_email.models import CourseEmail
def get_running_instructor_tasks(course_id):
"""
Returns a query of InstructorTask objects of running tasks for a given course.
Used to generate a list of tasks to display on the instructor dashboard.
"""
instructor_tasks = InstructorTask.objects.filter(course_id=course_id)
# exclude states that are "ready" (i.e. not "running", e.g. failure, success, revoked):
for state in READY_STATES:
instructor_tasks = instructor_tasks.exclude(task_state=state)
return instructor_tasks.order_by('-id')
def get_instructor_task_history(course_id, problem_url=None, student=None, task_type=None):
"""
Returns a query of InstructorTask objects of historical tasks for a given course,
that optionally match a particular problem, a student, and/or a task type.
"""
instructor_tasks = InstructorTask.objects.filter(course_id=course_id)
if problem_url is not None or student is not None:
_, task_key = encode_problem_and_student_input(problem_url, student)
instructor_tasks = instructor_tasks.filter(task_key=task_key)
if task_type is not None:
instructor_tasks = instructor_tasks.filter(task_type=task_type)
return instructor_tasks.order_by('-id')
def submit_rescore_problem_for_student(request, course_id, problem_url, student):
"""
Request a problem to be rescored as a background task.
The problem will be rescored for the specified student only. Parameters are the `course_id`,
the `problem_url`, and the `student` as a User object.
The url must specify the location of the problem, using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being rescored for this student, or NotImplementedError if
the problem doesn't support rescoring.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: let exceptions return up to the caller.
check_arguments_for_rescoring(course_id, problem_url)
task_type = 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_problem_and_student_input(problem_url, student)
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
def submit_rescore_problem_for_all_students(request, course_id, problem_url):
"""
Request a problem to be rescored as a background task.
The problem will be rescored for all students who have accessed the
particular problem in a course and have provided and checked an answer.
Parameters are the `course_id` and the `problem_url`.
The url must specify the location of the problem, using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being rescored, or NotImplementedError if the problem doesn't
support rescoring.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: let exceptions return up to the caller.
check_arguments_for_rescoring(course_id, problem_url)
# check to see if task is already running, and reserve it otherwise
task_type = 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_problem_and_student_input(problem_url)
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
def submit_reset_problem_attempts_for_all_students(request, course_id, problem_url):
"""
Request to have attempts reset for a problem as a background task.
The problem's attempts will be reset for all students who have accessed the
particular problem in a course. Parameters are the `course_id` and
the `problem_url`. The url must specify the location of the problem,
using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being reset.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: make sure that the problem_url is defined
# (since that's currently typed in). If the corresponding module descriptor doesn't exist,
# an exception will be raised. Let it pass up to the caller.
modulestore().get_instance(course_id, problem_url)
task_type = 'reset_problem_attempts'
task_class = reset_problem_attempts
task_input, task_key = encode_problem_and_student_input(problem_url)
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
def submit_delete_problem_state_for_all_students(request, course_id, problem_url):
"""
Request to have state deleted for a problem as a background task.
The problem's state will be deleted for all students who have accessed the
particular problem in a course. Parameters are the `course_id` and
the `problem_url`. The url must specify the location of the problem,
using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the particular problem's state is already being deleted.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: make sure that the problem_url is defined
# (since that's currently typed in). If the corresponding module descriptor doesn't exist,
# an exception will be raised. Let it pass up to the caller.
modulestore().get_instance(course_id, problem_url)
task_type = 'delete_problem_state'
task_class = delete_problem_state
task_input, task_key = encode_problem_and_student_input(problem_url)
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
def submit_bulk_course_email(request, course_id, email_id):
"""
Request to have bulk email sent as a background task.
The specified CourseEmail object will be sent be updated for all students who have enrolled
in a course. Parameters are the `course_id` and the `email_id`, the id of the CourseEmail object.
AlreadyRunningError is raised if the same recipients are already being emailed with the same
CourseEmail object.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# Assume that the course is defined, and that the user has already been verified to have
# appropriate access to the course. But make sure that the email exists.
# We also pull out the To argument here, so that is displayed in
# the InstructorTask status.
email_obj = CourseEmail.objects.get(id=email_id)
to_option = email_obj.to_option
task_type = 'bulk_course_email'
task_class = send_bulk_course_email
# Pass in the to_option as a separate argument, even though it's (currently)
# in the CourseEmail. That way it's visible in the progress status.
# (At some point in the future, we might take the recipient out of the CourseEmail,
# so that the same saved email can be sent to different recipients, as it is tested.)
task_input = {'email_id': email_id, 'to_option': to_option}
task_key_stub = "{email_id}_{to_option}".format(email_id=email_id, to_option=to_option)
# create the key value by using MD5 hash:
task_key = hashlib.md5(task_key_stub).hexdigest()
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
def submit_calculate_grades_csv(request, course_id):
"""
AlreadyRunningError is raised if the course's grades are already being updated.
"""
task_type = 'grade_course'
task_class = calculate_grades_csv
task_input = {}
task_key = ""
return submit_task(request, task_type, task_class, course_id, task_input, task_key)
| XiaodunServerGroup/ddyedx | lms/djangoapps/instructor_task/api.py | Python | agpl-3.0 | 10,240 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Routine for decoding the CIFAR-10 binary file format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
# Process images of this size. Note that this differs from the original CIFAR
# image size of 32 x 32. If one alters this number, then the entire model
# architecture will change and any model would need to be retrained.
IMAGE_SIZE = 24
# Global constants describing the CIFAR-10 data set.
NUM_CLASSES = 10
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
def read_cifar10(filename_queue):
"""Reads and parses examples from CIFAR10 data files.
Recommendation: if you want N-way read parallelism, call this function
N times. This will give you N independent Readers reading different
files & positions within those files, which will give better mixing of
examples.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
An object representing a single example, with the following fields:
height: number of rows in the result (32)
width: number of columns in the result (32)
depth: number of color channels in the result (3)
key: a scalar string Tensor describing the filename & record number
for this example.
label: an int32 Tensor with the label in the range 0..9.
uint8image: a [height, width, depth] uint8 Tensor with the image data
"""
class CIFAR10Record(object):
pass
result = CIFAR10Record()
# Dimensions of the images in the CIFAR-10 dataset.
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
label_bytes = 1 # 2 for CIFAR-100
result.height = 32
result.width = 32
result.depth = 3
image_bytes = result.height * result.width * result.depth
# Every record consists of a label followed by the image, with a
# fixed number of bytes for each.
record_bytes = label_bytes + image_bytes
# Read a record, getting filenames from the filename_queue. No
# header or footer in the CIFAR-10 format, so we leave header_bytes
# and footer_bytes at their default of 0.
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
result.key, value = reader.read(filename_queue)
# Convert from a string to a vector of uint8 that is record_bytes long.
record_bytes = tf.decode_raw(value, tf.uint8)
# The first bytes represent the label, which we convert from uint8->int32.
result.label = tf.cast(
tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(
tf.strided_slice(record_bytes, [label_bytes],
[label_bytes + image_bytes]),
[result.depth, result.height, result.width])
# Convert from [depth, height, width] to [height, width, depth].
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
return result
def _generate_image_and_label_batch(image, label, min_queue_examples,
batch_size, shuffle):
"""Construct a queued batch of images and labels.
Args:
image: 3-D Tensor of [height, width, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
batch_size: Number of images per batch.
shuffle: boolean indicating whether to use a shuffling queue.
Returns:
images: Images. 4D tensor of [batch_size, height, width, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
num_preprocess_threads = 16
if shuffle:
images, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
else:
images, label_batch = tf.train.batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size)
# Display the training images in the visualizer.
tf.summary.image('images', images)
return images, tf.reshape(label_batch, [batch_size])
def distorted_inputs(data_dir, batch_size):
"""Construct distorted input for CIFAR training using the Reader ops.
Args:
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
filenames = [
os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 6)
]
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for training the network. Note the many random
# distortions applied to the image.
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(reshaped_image, [height, width, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# the order their operation.
distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
distorted_image = tf.image.random_contrast(
distorted_image, lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_standardization(distorted_image)
# Set the shapes of tensors.
float_image.set_shape([height, width, 3])
read_input.label.set_shape([1])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * min_fraction_of_examples_in_queue)
print('Filling queue with %d CIFAR images before starting to train. '
'This will take a few minutes.' % min_queue_examples)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(
float_image,
read_input.label,
min_queue_examples,
batch_size,
shuffle=True)
def inputs(eval_data, data_dir, batch_size):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
if not eval_data:
filenames = [
os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 6)
]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
else:
filenames = [os.path.join(data_dir, 'test_batch.bin')]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(
reshaped_image, width, height)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_standardization(resized_image)
# Set the shapes of tensors.
float_image.set_shape([height, width, 3])
read_input.label.set_shape([1])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(
num_examples_per_epoch * min_fraction_of_examples_in_queue)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(
float_image,
read_input.label,
min_queue_examples,
batch_size,
shuffle=False)
| google-research/google-research | model_pruning/examples/cifar10/cifar10_input.py | Python | apache-2.0 | 9,542 |
import os
import argparse
import cPickle
import operator
import itertools
from Common.psteff import *
def rerank(model_file, ctx_file, rnk_file, \
score=False, no_normalize=False, fallback=False):
pst = PSTInfer()
pst.load(model_file)
output_file = open(rnk_file + "_ADJ" + (".f" if score else ".gen"), "w")
begin = True
for ctx_line, rnk_line in itertools.izip(open(ctx_file), open(rnk_file)):
suffix = ctx_line.strip().split('\t')
candidates = rnk_line.strip().split('\t')
candidates, scores = pst.rerank(
suffix, candidates, no_normalize=no_normalize, fallback=fallback)
if not score:
reranked = [x[0] for x in sorted(zip(candidates, scores),
key=operator.itemgetter(1),
reverse=False)]
print >> output_file, '\t'.join(reranked)
else:
if begin:
print >> output_file, 'ADJ'
begin=False
for s in scores:
print >> output_file, s
output_file.close()
| sordonia/hed-qs | baselines/ADJ/adj_rerank.py | Python | bsd-3-clause | 1,124 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_version(*file_paths):
"""Retrieves the version from elasticsearch_flex/__init__.py"""
filename = os.path.join(os.path.dirname(__file__), *file_paths)
version_file = open(filename).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
version = get_version("elasticsearch_flex", "__init__.py")
if sys.argv[-1] == 'publish':
try:
import wheel
print("Wheel version: ", wheel.__version__)
except ImportError:
print('Wheel library missing. Please run "pip install wheel"')
sys.exit()
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
print("Tagging the version on git:")
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='dj-elasticsearch-flex',
version=version,
description="""Elasticsearch for Django which lets you do stuff.""",
long_description=readme + '\n\n' + history,
author='Prashant Sinha',
author_email='prashant+git@noop.pw',
url='https://github.com/prashnts/dj-elasticsearch-flex',
packages=[
'elasticsearch_flex',
],
include_package_data=True,
install_requires=[],
license="MIT",
zip_safe=False,
keywords='dj-elasticsearch-flex',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| prashnts/dj-elasticsearch-flex | setup.py | Python | mit | 2,493 |
"""Test mysql db upgrade"""
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_msg
import misc
import srv_control
from forge_cfg import world
# 1.6.3 version is our starting point here. In 1.6.0 CB backend was introduced in mysql
# but there were no changes in schema between 1.6.0 and 1.6.3. In the tests are included
# scripts to build database and populate it with data. Then test will try to upgrade it
# to newest database schema. Similar test will be for postgres but without CB in it.
def _send_cmd(cmd, arg):
if "remote" not in arg:
arg.update({"remote": {"type": "mysql"}})
if "get" not in cmd:
if "server-tags" not in arg:
arg.update({"server-tags": ["abc"]})
cmd = dict(command=cmd, arguments=arg)
return srv_msg.send_ctrl_cmd(cmd, exp_result=0)
def _create_mysql_dump():
# start Kea with specific version, run this and you will get DB dump with data in all
# tables, it's designed for kea 1.6.3; if you are using later, using more commands
# is probably required
srv_msg.remove_file_from_server('$(SOFTWARE_INSTALL_PATH)/my_db_v6.sql')
world.f_cfg.multi_threading_enabled = False
misc.test_setup()
srv_control.add_hooks('libdhcp_host_cmds.so')
srv_control.add_hooks('libdhcp_cb_cmds.so')
srv_control.add_hooks('libdhcp_mysql_cb.so')
srv_control.open_control_channel()
srv_control.agent_control_channel('$(MGMT_ADDRESS)')
world.reservation_backend = "mysql"
srv_control.define_temporary_lease_db_backend('mysql')
cb_config = {"config-databases": [{"user": "$(DB_USER)",
"password": "$(DB_PASSWD)",
"name": "$(DB_NAME)",
"type": "mysql"}]}
world.dhcp_cfg["config-control"] = cb_config
world.dhcp_cfg["server-tag"] = "abc"
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
cmd = {"command": "remote-server6-set",
"arguments": {"remote": {"type": "mysql"},
"servers": [{"server-tag": "abc"}]}}
srv_msg.send_ctrl_cmd(cmd, exp_result=0)
subnets = [{"shared-network-name": "", "id": 2, "interface": "$(SERVER_IFACE)",
"pools": [{"pool": "2001:db8:1::10-2001:db8:1::10",
"option-data": [{"code": 7, "data": "12",
"always-send": True, "csv-format": True}]}],
"pd-pools": [{"delegated-len": 91,
"prefix": "2001:db8:2::",
"prefix-len": 90}],
"reservation-mode": "all",
"subnet": "2001:db8:1::/64",
"valid-lifetime": 1000,
"rebind-timer": 500,
"renew-timer": 200,
"option-data": [{"code": 7, "data": "123",
"always-send": True,
"csv-format": True}]}]
_send_cmd("remote-subnet6-set", dict(subnets=subnets))
shared_networks = [{"name": "net1",
"client-class": "abc",
"require-client-classes": ["XYZ"],
"rebind-timer": 200,
"renew-timer": 100,
"calculate-tee-times": True,
"t1-percent": 0.5,
"t2-percent": 0.8,
"rapid-commit": True,
"valid-lifetime": 300,
"reservation-mode": "global",
"user-context": {"some weird network": 55},
"interface": "$(SERVER_IFACE)",
"option-data": [{"code": 7,
"data": "123",
"always-send": True,
"csv-format": True}]}]
_send_cmd("remote-network6-set", {"shared-networks": shared_networks})
parameters = {"decline-probation-period": 123456}
_send_cmd("remote-global-parameter6-set", dict(parameters=parameters))
options = [{"name": "sip-server-dns", "data": "isc.example.com"}]
_send_cmd("remote-option6-global-set", dict(options=options))
option_def = [{"name": "foo", "code": 222, "type": "uint32"}]
_send_cmd("remote-option-def6-set", {"option-defs": option_def})
cmd = {"command": "config-reload", "arguments": {}}
srv_msg.send_ctrl_cmd(cmd)
hr = {"reservation": {"subnet-id": 2,
"duid": "01:02:03:04:05:06:07:08:09:0A",
"ip-addresses": ["2001:db8:1::1"],
"prefixes": ["2001:db8:2:abcd::/64"],
"hostname": "foo.example.com",
"option-data": [{"name": "vendor-opts", "data": "4491"}]}}
_send_cmd("reservation-add", hr)
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_sets_value('Client', 'ia_id', 61439)
srv_msg.client_sets_value('Client', 'ia_pd', 24511)
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '2001:db8:1::10')
# create dump of database with events and procedures
srv_msg.execute_shell_cmd("mysqldump --events --routines -u $(DB_USER) -p'$(DB_PASSWD)' $(DB_NAME) > $(SOFTWARE_INSTALL_PATH)/my_db_v6.sql")
# replace interface and user used on setup that was used to generate dump to value later changed to interface
# it's needed otherwise kea would not start on differently configured setup
srv_msg.execute_shell_cmd("sed -i 's/$(SERVER_IFACE)/!serverinterface!/g' $(SOFTWARE_INSTALL_PATH)/my_db_v6.sql")
srv_msg.execute_shell_cmd("sed -i 's/$(DB_USER)/!db_user!/g' $(SOFTWARE_INSTALL_PATH)/my_db_v6.sql")
# Uncomment this test to build your own database dump
# @pytest.mark.v6
# def test_create_mysql_dump():
# _create_mysql_dump()
@pytest.mark.v6
def test_v6_upgrade_mysql_db():
# new db parameters
tmp_db_name = "kea_tmp_db"
tmp_user_name = "kea_tmp_user"
# # make sure that new db does not exists
# srv_msg.execute_shell_cmd("mysql -u root -N -B -e \"DROP DATABASE IF EXISTS %s;\"" % tmp_db_name)
# create new db without schema
srv_control.build_database(db_name=tmp_db_name, db_user=tmp_user_name, init_db=False)
# send db dump file
srv_msg.remove_file_from_server('/tmp/my_db_v6.sql')
srv_msg.send_file_to_server('tests/dhcpv6/db_upgrade/my_db_v6.sql', '/tmp/my_db_v6.sql')
# switch interface and username to the one setup is using
srv_msg.execute_shell_cmd("sed -i 's/!serverinterface!/$(SERVER_IFACE)/g' /tmp/my_db_v6.sql")
srv_msg.execute_shell_cmd("sed -i 's/!db_user!/%s/g' /tmp/my_db_v6.sql" % tmp_user_name)
# this solves the problem: "Variable 'sql_mode' can't be set to the value of 'NO_AUTO_CREATE_USER'"
srv_msg.execute_shell_cmd("sed -i 's/NO_AUTO_CREATE_USER,//g' /tmp/my_db_v6.sql")
# recreate db content in new db
srv_msg.execute_shell_cmd("mysql -u%s -p$(DB_PASSWD) %s < /tmp/my_db_v6.sql" % (tmp_user_name, tmp_db_name))
# start kea, which should fail due to mismatch in db version
misc.test_setup()
srv_control.add_hooks('libdhcp_host_cmds.so')
srv_control.add_hooks('libdhcp_lease_cmds.so')
srv_control.add_hooks('libdhcp_cb_cmds.so')
srv_control.add_hooks('libdhcp_mysql_cb.so')
srv_control.open_control_channel()
srv_control.agent_control_channel('$(MGMT_ADDRESS)')
hosts = {"hosts-databases": [{"user": tmp_user_name,
"password": "$(DB_PASSWD)",
"name": tmp_db_name,
"type": "mysql"}]}
leases = {"lease-database": {"user": tmp_user_name,
"password": "$(DB_PASSWD)",
"name": tmp_db_name,
"type": "mysql"}}
cb_config = {"config-databases": [{"user": tmp_user_name,
"password": "$(DB_PASSWD)",
"name": tmp_db_name,
"type": "mysql"}]}
world.dhcp_cfg.update(hosts)
world.dhcp_cfg.update(leases)
world.dhcp_cfg["config-control"] = cb_config
world.dhcp_cfg["server-tag"] = "abc"
srv_control.build_and_send_config_files()
srv_control.start_srv_during_process('DHCP', 'started')
# upgrade with kea admin
kea_admin = world.f_cfg.sbin_join('kea-admin')
srv_msg.execute_shell_cmd("sudo %s db-upgrade mysql -u %s -p $(DB_PASSWD) -n %s" % (kea_admin, tmp_user_name, tmp_db_name))
# start kea
srv_control.start_srv('DHCP', 'started')
# check reservation
hr_get = {"subnet-id": 2, "identifier-type": "duid", "identifier": "01:02:03:04:05:06:07:08:09:0A"}
resp = _send_cmd("reservation-get", hr_get)["arguments"]
assert resp["duid"] == "01:02:03:04:05:06:07:08:09:0a"
assert resp["hostname"] == "foo.example.com"
assert resp["ip-addresses"] == ["2001:db8:1::1"]
assert resp["option-data"] == [{"always-send": False,
"code": 17,
"csv-format": True,
"data": "4491",
"name": "vendor-opts", "space": "dhcp6"}]
assert resp["prefixes"] == ["2001:db8:2:abcd::/64"]
# check lease
lease_get = {"duid": "00:03:00:01:f6:f5:f4:f3:f2:01"}
resp = _send_cmd("lease6-get-by-duid", lease_get)["arguments"]
assert len(resp["leases"]) == 2
for lease in resp["leases"]:
if lease["type"] == "IA_NA":
assert lease["duid"] == "00:03:00:01:f6:f5:f4:f3:f2:01"
assert lease["hw-address"] == "f6:f5:f4:f3:f2:01"
assert lease["iaid"] == 61439
assert lease["ip-address"] == "2001:db8:1::10"
assert lease["preferred-lft"] == 3000
assert lease["state"] == 0
assert lease["subnet-id"] == 2
assert lease["valid-lft"] == 1000
if lease["type"] == "IA_PD":
assert lease["duid"] == "00:03:00:01:f6:f5:f4:f3:f2:01"
assert lease["hw-address"] == "f6:f5:f4:f3:f2:01"
assert lease["iaid"] == 24511
assert lease["ip-address"] == "2001:db8:2::"
assert lease["preferred-lft"] == 3000
assert lease["prefix-len"] == 91
assert lease["state"] == 0
assert lease["subnet-id"] == 2
assert lease["valid-lft"] == 1000
# check config
cmd = dict(command="config-get", arguments={})
cfg = srv_msg.send_ctrl_cmd(cmd, exp_result=0)["arguments"]
assert len(cfg["Dhcp6"]["subnet6"]) == 1
assert len(cfg["Dhcp6"]["option-def"]) == 1
assert len(cfg["Dhcp6"]["option-data"]) == 1
assert len(cfg["Dhcp6"]["shared-networks"]) == 1
# let's check subnet and network parameters one by one, it's possible that new
# parameters will be added in future and it will trash this test, we are sure that
# no new parameters will be added in 1.6.3 schema.
resp = _send_cmd("remote-subnet6-get-by-id", {"subnets": [{"id": 2}]})["arguments"]
assert resp["count"] == 1
subnet = resp["subnets"][0]
assert subnet["id"] == 2
assert subnet["metadata"] == {"server-tags": ["abc"]}
assert subnet["option-data"] == [{"always-send": True,
"code": 7,
"csv-format": True,
"data": "123",
"name": "preference",
"space": "dhcp6"}]
assert subnet["pools"][0] == {"option-data": [{"always-send": True,
"code": 7,
"csv-format": True,
"data": "12",
"name": "preference",
"space": "dhcp6"}],
"pool": "2001:db8:1::10/128"}
assert subnet["pd-pools"] == [{"delegated-len": 91,
"option-data": [],
"prefix": "2001:db8:2::",
"prefix-len": 90}]
assert subnet["rebind-timer"] == 500
assert subnet["renew-timer"] == 200
assert subnet["shared-network-name"] is None
assert subnet["subnet"] == "2001:db8:1::/64"
assert subnet["valid-lifetime"] == 1000
resp = _send_cmd("remote-network6-get", {"shared-networks": [{"name": "net1"}]})["arguments"]
assert resp["count"] == 1
network = resp["shared-networks"][0]
assert network["client-class"] == "abc"
assert network["metadata"] == {"server-tags": ["abc"]}
assert network["name"] == "net1"
assert network["option-data"] == [{"always-send": True,
"code": 7,
"csv-format": True,
"data": "123",
"name": "preference",
"space": "dhcp6"}]
assert network["rebind-timer"] == 200
assert network["renew-timer"] == 100
assert network["require-client-classes"] == ["XYZ"]
assert network["user-context"] == {"some weird network": 55}
assert network["valid-lifetime"] == 300
resp = _send_cmd("remote-global-parameter6-get", {"server-tags": ["abc"], "parameters": ["decline-probation-period"]})["arguments"]
assert resp["count"] == 1
assert resp["parameters"] == {"decline-probation-period": 123456, "metadata": {"server-tags": ["abc"]}}
resp = _send_cmd("remote-option6-global-get", {"server-tags": ["abc"], "options": [{"code": 21}]})["arguments"]
assert resp["count"] == 1
assert resp["options"][0] == {"always-send": False,
"code": 21,
"csv-format": True,
"data": "isc.example.com",
"metadata": {"server-tags": ["abc"]},
"name": "sip-server-dns",
"space": "dhcp6"}
resp = _send_cmd("remote-option-def6-get", {"server-tags": ["abc"], "option-defs": [{"code": 222}]})["arguments"]
assert resp["count"] == 1
assert resp["option-defs"][0] == {"array": False,
"code": 222,
"encapsulate": "",
"metadata": {"server-tags": ["abc"]},
"name": "foo",
"record-types": "",
"space": "dhcp6",
"type": "uint32"}
| isc-projects/forge | tests/dhcpv6/db_upgrade/test_db_mysql_upgrade.py | Python | isc | 15,807 |
from __future__ import unicode_literals
import unittest
from test_plus.test import TestCase
from ..factories import MangaFactory
from ...models import Manga
class MangaViewsTest(TestCase):
def setUp(self):
MangaFactory.reset_sequence(0)
for i in range(4):
MangaFactory()
def test_response200_list_view(self):
response = self.get_check_200('list-mangas')
def test_get_list(self):
self.get('list-mangas')
self.response_200()
self.assertInContext('list')
self.assertEqual(self.context['list'].count(), 4)
def test_get_create(self):
self.get('create-manga')
self.response_200()
self.assertInContext('form')
def test_get_create(self):
response = self.post('create-manga', follow=True, data={'name': "manga-tests"})
self.response_200()
@unittest.expectedFailure
def test_get_detail(self):
self.get('detail-manga')
self.response_200()
def test_get_detail_with_manga(self):
manga = Manga.objects.all()[0]
self.get('detail-manga', name=manga.slug)
self.response_200()
| leonardoo/lemanga | apps/manga/tests/views/test_manga.py | Python | mit | 1,152 |
"""FHIR namespaced endpoints, such as local valuesets"""
from flask import Blueprint, jsonify
from ..system_uri import NHHD_291036, TRUENTH_VALUESET_NHHD_291036
fhir_api = Blueprint('fhir_api', __name__, url_prefix='/fhir')
@fhir_api.route('/valueset/{}'.format(NHHD_291036))
def valueset_nhhd_291036():
"""Returns JSON representation of the TrueNTH subset of the valueset
This valueset is used to define "indigenous status" from an Australian
perspective. It refers specifically to::
Australian Institute of Health and Welfare's
National Health Data Dictionary 2012 version 16
Spec: http://www.aihw.gov.au/WorkArea/DownloadAsset.aspx?id=10737422824
METeOR identifier: 291036
See also `FHIR valuesets <https://www.hl7.org/FHIR/valueset.html>`_
See also the background from the `pivotal issue
<https://www.pivotaltracker.com/n/projects/1225464/stories/133560247>`_
"""
valueset = {
"resourceType": "ValueSet",
"id": NHHD_291036,
"url": TRUENTH_VALUESET_NHHD_291036,
"name": (
"Indigenous Status as defined by Australian Institute of Health "
"and Welfare's National Health Data Dictionary 2012 version 1A6 "
"Spec: "
"http://www.aihw.gov.au/WorkArea/DownloadAsset.aspx?id=10737422824"
" METeOR identifier: 291036"),
"meta": {
"lastUpdated": "2016-11-03T00:00:00.000Z"
},
"codeSystem": {
"extension": [
{"url": "http://hl7.org/fhir/StructureDefinition/valueset-oid",
"valueUri": "urn:oid:2.16.840.1.113883.5.104"
}
],
"system": TRUENTH_VALUESET_NHHD_291036,
"caseSensitive": "true",
"concept": []
}
}
concepts = valueset['codeSystem']['concept']
for name, value in (
('Aboriginal but not Torres Strait Islander origin', '1'),
('Torres Strait Islander but not Aboriginal origin', '2'),
('Both Aboriginal and Torres Strait Islander origin', '3'),
('Neither Aboriginal nor Torres Strait Islander origin', '4'),
('Not stated/inadequately described', '9')
):
concepts.append(
{"code": value, "abstract": "false", "display": name,
"definition": name})
return jsonify(**valueset)
| uwcirg/true_nth_usa_portal | portal/views/fhir.py | Python | bsd-3-clause | 2,386 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-06 17:08
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='productlist',
old_name='cid',
new_name='column',
),
migrations.RenameField(
model_name='productlist',
old_name='pid',
new_name='product',
),
migrations.RenameField(
model_name='productlist',
old_name='val',
new_name='value',
),
]
| IVaN4B/maugli | maugli/store/migrations/0002_auto_20160606_2008.py | Python | gpl-3.0 | 696 |
import os
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
SCREENSHOT_PATH = None
OPENGL = {
"version": (3, 3),
}
WINDOW = {
"class": "demosys.context.pyqt.Window",
"size": (1280, 720),
"aspect_ratio": 16 / 9,
"fullscreen": False,
"resizable": True,
"title": "Examples",
"vsync": True,
"cursor": True,
"samples": 4,
}
HEADLESS_DURATION = 100.0
ROCKET = {
"mode": "editor",
"rps": 24,
"project": None,
"files": None,
}
| Contraz/demosys-py | examples/settings.py | Python | isc | 487 |
#TODO:
# -Implement Clebsch-Gordan symmetries
# -Improve simplification method
# -Implement new simpifications
"""Clebsch-Gordon Coefficients."""
from sympy import Add, expand, Eq, Expr, Function, Mul, Piecewise, Pow, sqrt, Sum, symbols, sympify, Wild
from sympy.printing.pretty.stringpict import prettyForm, stringPict
from sympy.physics.quantum.kronecker import KroneckerDelta
from sympy.physics.wigner import wigner_3j, clebsch_gordan
__all__ = [
'Wigner3j',
'CG',
'cg_simp'
]
#-----------------------------------------------------------------------------
# CG Coefficients
#-----------------------------------------------------------------------------
class Wigner3j(Expr):
"""Class for the Wigner-3j symbols
Wigner 3j-symbols are coefficients determined by the coupling of
two angular momenta. When created, they are expressed as symbolic
quantities that can be evaluated using the doit() method.
Parameters
==========
j1, m1, j2, m2, j3, m3 : Number, Symbol
Terms determining the angular momentum of coupled angular momentum
systems.
Examples
========
Declare a Wigner-3j coefficient and calcualte its value
>>> from sympy.physics.quantum.cg import Wigner3j
>>> w3j = Wigner3j(6,0,4,0,2,0)
>>> w3j
(6, 4, 2)
(0, 0, 0)
>>> w3j.doit()
sqrt(715)/143
References
==========
[1] Varshalovich, D A, Quantum Theory of Angular Momentum. 1988.
"""
def __new__(cls, j1, m1, j2, m2, j3, m3):
j1,m1,j2,m2,j3,m3 = map(sympify, (j1,m1,j2,m2,j3,m3))
return Expr.__new__(cls, j1, m1, j2, m2, j3, m3)
@property
def j1(self):
return self.args[0]
@property
def m1(self):
return self.args[1]
@property
def j2(self):
return self.args[2]
@property
def m2(self):
return self.args[3]
@property
def j3(self):
return self.args[4]
@property
def m3(self):
return self.args[5]
@property
def is_symbolic(self):
return not (self.j1.is_number and self.j2.is_number and self.j3.is_number and
self.m1.is_number and self.m2.is_number and self.m3.is_number)
# This is modified from the _print_Matrix method
def _sympystr(self, printer, *args):
res = [[printer._print(self.j1), printer._print(self.j2), printer._print(self.j3)], \
[printer._print(self.m1), printer._print(self.m2), printer._print(self.m3)]]
maxw = [-1] * 3
for j in range(3):
maxw[j] = max([ len(res[i][j]) for i in range(2) ])
for i, row in enumerate(res):
for j, elem in enumerate(row):
row[j] = elem.rjust(maxw[j])
res[i] = "(" + ", ".join(row) + ")"
return '\n'.join(res)
# This is modified from the _print_Matrix method
def _pretty(self, printer, *args):
m = ((printer._print(self.j1), printer._print(self.m1)), \
(printer._print(self.j2), printer._print(self.m2)), \
(printer._print(self.j3), printer._print(self.m3)))
hsep = 2
vsep = 1
maxw = [-1] * 3
for j in range(3):
maxw[j] = max([ m[j][i].width() for i in range(2) ])
D = None
for i in range(2):
D_row = None
for j in range(3):
s = m[j][i]
wdelta = maxw[j] - s.width()
wleft = wdelta //2
wright = wdelta - wleft
s = prettyForm(*s.right(' '*wright))
s = prettyForm(*s.left(' '*wleft))
if D_row is None:
D_row = s
continue
D_row = prettyForm(*D_row.right(' '*hsep))
D_row = prettyForm(*D_row.right(s))
if D is None:
D = D_row
continue
for _ in range(vsep):
D = prettyForm(*D.below(' '))
D = prettyForm(*D.below(D_row))
D = prettyForm(*D.parens())
return D
def _latex(self, printer, *args):
return r'\left(\begin{array}{ccc} %s & %s & %s \\ %s & %s & %s \end{array}\right)' % \
(printer._print(self.j1), printer._print(self.j2), printer._print(self.j3), \
printer._print(self.m1), printer._print(self.m2), printer._print(self.m3))
def doit(self, **hints):
if self.is_symbolic:
raise ValueError("Coefficients must be numerical")
return wigner_3j(self.j1, self.j2, self.j3, self.m1, self.m2, self.m3)
class CG(Wigner3j):
"""Class for Clebsch-Gordan coefficient
Clebsch-Gordan coefficients describe the angular momentum coupling between
two systems. The coefficients give the expansion of a coupled total angular
momentum state and an uncoupled tensor product state. The Clebsch-Gordan
coefficients are defined as:
CG(j1,m1,j2,m2,j3,m3) = <j1,m1; j2,m2 | j3,m3>
Parameters
==========
j1, m1, j2, m2, j3, m3 : Number, Symbol
Terms determining the angular momentum of coupled angular momentum
systems.
Examples
========
Define a Clebsch-Gordan coefficient and evaluate its value
>>> from sympy.physics.quantum.cg import CG
>>> from sympy import S
>>> cg = CG(S(3)/2, S(3)/2, S(1)/2, -S(1)/2, 1, 1)
>>> cg
CG(3/2, 3/2, 1/2, -1/2, 1, 1)
>>> cg.doit()
sqrt(3)/2
References
==========
[1] Varshalovich, D A, Quantum Theory of Angular Momentum. 1988.
"""
def doit(self, **hints):
if self.is_symbolic:
raise ValueError("Coefficients must be numerical")
return clebsch_gordan(self.j1,self.j2, self.j3, self.m1, self.m2, self.m3)
def _sympystr(self, printer, *args):
return 'CG(%s, %s, %s, %s, %s, %s)' % \
(printer._print(self.j1), printer._print(self.m1), printer._print(self.j2), \
printer._print(self.m2), printer._print(self.j3), printer._print(self.m3))
def _pretty(self, printer, *args):
bot = printer._print(self.j1)
bot = prettyForm(*bot.right(','))
bot = prettyForm(*bot.right(printer._print(self.m1)))
bot = prettyForm(*bot.right(','))
bot = prettyForm(*bot.right(printer._print(self.j2)))
bot = prettyForm(*bot.right(','))
bot = prettyForm(*bot.right(printer._print(self.m2)))
top = printer._print(self.j3)
top = prettyForm(*top.right(','))
top = prettyForm(*top.right(printer._print(self.m3)))
pad = max(top.width(), bot.width())
bot = prettyForm(*bot.left(' '))
top = prettyForm(*top.left(' '))
if not pad == bot.width():
bot = prettyForm(*bot.right(' ' * (pad-bot.width())))
if not pad == top.width():
top = prettyForm(*top.right(' ' * (pad-top.width())))
s = stringPict('C' + ' '*pad)
s = prettyForm(*s.below(bot))
s = prettyForm(*s.above(top))
return s
def _latex(self, printer, *args):
return r'C^{%s,%s}_{%s,%s,%s,%s}' % \
(printer._print(self.j3), printer._print(self.m3),
printer._print(self.j1), printer._print(self.m1),
printer._print(self.j2), printer._print(self.m2))
def cg_simp(e):
"""Simplify and combine CG coefficients
This function uses various symmetry and properties of sums and
products of Clebsch-Gordan coefficients to simplify statements
involving these terms
Examples
========
Simplify the sum over CG(a,alpha,0,0,a,alpha) for all alpha to
2*a+1
>>> from sympy.physics.quantum.cg import CG, cg_simp
>>> a = CG(1,1,0,0,1,1)
>>> b = CG(1,0,0,0,1,0)
>>> c = CG(1,-1,0,0,1,-1)
>>> cg_simp(a+b+c)
3
References
==========
[1] Varshalovich, D A, Quantum Theory of Angular Momentum. 1988.
"""
if isinstance(e, Add):
return _cg_simp_add(e)
elif isinstance(e, Sum):
return _cg_simp_sum(e)
elif isinstance(e, Mul):
return Mul(*[cg_simp(arg) for arg in e.args])
elif isinstance(e, Pow):
return Pow(cg_simp(e.base), e.exp)
else:
return e
def _cg_simp_add(e):
#TODO: Improve simplification method
"""Takes a sum of terms involving Clebsch-Gordan coefficients and
simplifies the terms.
First, we create two lists, cg_part, which is all the terms involving CG
coefficients, and other_part, which is all other terms. The cg_part list
is then passed to the simplification methods, which return the new cg_part
and any additional terms that are added to other_part
"""
cg_part = []
other_part = []
e = expand(e)
for arg in e.args:
if arg.has(CG):
if isinstance(arg, Sum):
other_part.append(_cg_simp_sum(arg))
elif isinstance(arg, Mul):
terms = 1
for term in arg.args:
if isinstance(term, Sum):
terms *= _cg_simp_sum(term)
else:
terms *= term
if terms.has(CG):
cg_part.append(terms)
else:
other_part.append(terms)
else:
cg_part.append(arg)
else:
other_part.append(arg)
cg_part, other = _check_varsh_871_1(cg_part)
other_part.append(other)
cg_part, other = _check_varsh_871_2(cg_part)
other_part.append(other)
cg_part, other = _check_varsh_872_9(cg_part)
other_part.append(other)
return Add(*cg_part)+Add(*other_part)
def _check_varsh_871_1(term_list):
# Sum( CG(a,alpha,b,0,a,alpha), (alpha, -a, a)) == KroneckerDelta(b,0)
a,alpha,b,lt = map(Wild,('a','alpha','b','lt'))
expr = lt*CG(a,alpha,b,0,a,alpha)
simp = (2*a+1)*KroneckerDelta(b,0)
sign = lt/abs(lt)
build_expr = 2*a+1
index_expr = a+alpha
return _check_cg_simp(expr, simp, sign, lt, term_list, (a,alpha,b,lt), (a,b), build_expr, index_expr)
def _check_varsh_871_2(term_list):
# Sum((-1)**(a-alpha)*CG(a,alpha,a,-alpha,c,0),(alpha,-a,a))
a,alpha,c,lt = map(Wild,('a','alpha','c','lt'))
expr = lt*CG(a,alpha,a,-alpha,c,0)
simp = sqrt(2*a+1)*KroneckerDelta(c,0)
sign = (-1)**(a-alpha)*lt/abs(lt)
build_expr = 2*a+1
index_expr = a+alpha
return _check_cg_simp(expr, simp, sign, lt, term_list, (a,alpha,c,lt), (a,c), build_expr, index_expr)
def _check_varsh_872_9(term_list):
# Sum( CG(a,alpha,b,beta,c,gamma)*CG(a,alpha',b,beta',c,gamma), (gamma, -c, c), (c, abs(a-b), a+b))
a,alpha,alphap,b,beta,betap,c,gamma,lt = map(Wild, ('a','alpha','alphap','b','beta','betap','c','gamma','lt'))
# Case alpha==alphap, beta==betap
# For numerical alpha,beta
expr = lt*CG(a,alpha,b,beta,c,gamma)**2
simp = 1
sign = lt/abs(lt)
x = abs(a-b)
y = abs(alpha+beta)
build_expr = a+b+1-Piecewise((x,x>y),(0,Eq(x,y)),(y,y>x))
index_expr = a+b-c
term_list, other1 = _check_cg_simp(expr, simp, sign, lt, term_list, (a,alpha,b,beta,c,gamma,lt), (a,alpha,b,beta), build_expr, index_expr)
# For symbolic alpha,beta
x = abs(a-b)
y = a+b
build_expr = (y+1-x)*(x+y+1)
index_expr = (c-x)*(x+c)+c+gamma
term_list, other2 = _check_cg_simp(expr, simp, sign, lt, term_list, (a,alpha,b,beta,c,gamma,lt), (a,alpha,b,beta), build_expr, index_expr)
# Case alpha!=alphap or beta!=betap
# Note: this only works with leading term of 1, pattern matching is unable to match when there is a Wild leading term
# For numerical alpha,alphap,beta,betap
expr = CG(a,alpha,b,beta,c,gamma)*CG(a,alphap,b,betap,c,gamma)
simp = KroneckerDelta(alpha,alphap)*KroneckerDelta(beta,betap)
sign = sympify(1)
x = abs(a-b)
y = abs(alpha+beta)
build_expr = a+b+1-Piecewise((x,x>y),(0,Eq(x,y)),(y,y>x))
index_expr = a+b-c
term_list, other3 = _check_cg_simp(expr, simp, sign, sympify(1), term_list, (a,alpha,alphap,b,beta,betap,c,gamma), (a,alpha,alphap,b,beta,betap), build_expr, index_expr)
# For symbolic alpha,alphap,beta,betap
x = abs(a-b)
y = a+b
build_expr = (y+1-x)*(x+y+1)
index_expr = (c-x)*(x+c)+c+gamma
term_list, other4 = _check_cg_simp(expr, simp, sign, sympify(1), term_list, (a,alpha,alphap,b,beta,betap,c,gamma), (a,alpha,alphap,b,beta,betap), build_expr, index_expr)
return term_list, other1+other2+other4
def _check_cg_simp(expr, simp, sign, lt, term_list, variables, dep_variables, build_index_expr, index_expr):
""" Checks for simplifications that can be made, returning a tuple of the
simplified list of terms and any terms generated by simplification.
Parameters
==========
expr: expression
The expression with Wild terms that will be matched to the terms in
the sum
simp: expression
The expression with Wild terms that is substituted in place of the CG
terms in the case of simplification
sign: expression
The expression with Wild terms denoting the sign that is on expr that
must match
lt: expression
The expression with Wild terms that gives the leading term of the
matched expr
term_list: list
A list of all of the terms is the sum to be simplified
variables: list
A list of all the variables that appears in expr
dep_variables: list
A list of the variables that must match for all the terms in the sum,
i.e. the dependant variables
build_index_expr: expression
Expression with Wild terms giving the number of elements in cg_index
index_expr: expression
Expression with Wild terms giving the index terms have when storing
them to cg_index
"""
other_part = 0
i = 0
while i < len(term_list):
sub_1 = _check_cg(term_list[i], expr, len(variables))
if sub_1 is None:
i += 1
continue
if not sympify(build_index_expr.subs(sub_1)).is_number:
i += 1
continue
sub_dep = [(x,sub_1[x]) for x in dep_variables]
cg_index = [None] * build_index_expr.subs(sub_1)
for j in range(i,len(term_list)):
sub_2 = _check_cg(term_list[j], expr.subs(sub_dep), len(variables)-len(dep_variables), sign=(sign.subs(sub_1),sign.subs(sub_dep)))
if sub_2 is None:
continue
if not sympify(index_expr.subs(sub_dep).subs(sub_2)).is_number:
continue
cg_index[index_expr.subs(sub_dep).subs(sub_2)] = j, expr.subs(lt,1).subs(sub_dep).subs(sub_2), lt.subs(sub_2), sign.subs(sub_dep).subs(sub_2)
if all(i is not None for i in cg_index):
min_lt = min(*[ abs(term[2]) for term in cg_index ])
indicies = [ term[0] for term in cg_index]
indicies.sort()
indicies.reverse()
[ term_list.pop(i) for i in indicies ]
for term in cg_index:
if abs(term[2]) > min_lt:
term_list.append( (term[2]-min_lt*term[3]) * term[1] )
other_part += min_lt * (sign*simp).subs(sub_1)
else:
i += 1
return term_list, other_part
def _check_cg(cg_term, expr, length, sign=None):
"""Checks whether a term matches the given expression"""
# TODO: Check for symmetries
matches = cg_term.match(expr)
if matches is None:
return
if not sign is None:
if not isinstance(sign, tuple):
raise TypeError('sign must be a tuple')
if not sign[0] == (sign[1]).subs(matches):
return
if len(matches) == length:
return matches
def _cg_simp_sum(e):
e = _check_varsh_sum_871_1(e)
e = _check_varsh_sum_871_2(e)
e = _check_varsh_sum_872_4(e)
return e
def _check_varsh_sum_871_1(e):
a = Wild('a')
alpha = symbols('alpha')
b = Wild('b')
match = e.match(Sum(CG(a,alpha,b,0,a,alpha),(alpha,-a,a)))
if not match is None and len(match) == 2:
return ((2*a+1)*KroneckerDelta(b,0)).subs(match)
return e
def _check_varsh_sum_871_2(e):
a = Wild('a')
alpha = symbols('alpha')
c = Wild('c')
match = e.match(Sum((-1)**(a-alpha)*CG(a,alpha,a,-alpha,c,0),(alpha,-a,a)))
if not match is None and len(match) == 2:
return (sqrt(2*a+1)*KroneckerDelta(c,0)).subs(match)
return e
def _check_varsh_sum_872_4(e):
a = Wild('a')
alpha = Wild('alpha')
b = Wild('b')
beta = Wild('beta')
c = Wild('c')
cp = Wild('cp')
gamma = Wild('gamma')
gammap = Wild('gammap')
match1 = e.match(Sum(CG(a,alpha,b,beta,c,gamma)*CG(a,alpha,b,beta,cp,gammap),(alpha,-a,a),(beta,-b,b)))
if not match1 is None and len(match1) == 8:
return (KroneckerDelta(c,cp)*KroneckerDelta(gamma,gammap)).subs(match1)
match2 = e.match(Sum(CG(a,alpha,b,beta,c,gamma)**2,(alpha,-a,a),(beta,-b,b)))
if not match2 is None and len(match2) == 6:
return 1
return e
def _cg_list(term):
if isinstance(term, CG):
return (term,), 1, 1
cg = []
coeff = 1
if not (isinstance(term, Mul) or isinstance(term, Pow)):
raise NotImplementedError('term must be CG, Add, Mul or Pow')
if isinstance(term, Pow) and sympify(term.exp).is_number:
if sympify(term.exp).is_number:
[ cg.append(term.base) for _ in range(term.exp) ]
else:
return (term,), 1, 1
if isinstance(term, Mul):
for arg in term.args:
if isinstance(arg, CG):
cg.append(arg)
else:
coeff *= arg
return cg, coeff, coeff/abs(coeff)
| Cuuuurzel/KiPyCalc | sympy_old/physics/quantum/cg.py | Python | mit | 17,738 |
# To run (bash):
# python DESCalSpec.py > DESCalSpec.log 2>&1 &
#
# To run (tcsh):
# python DESCalSpec.py >& DESCalSpec.log &
#
# (In both cases, be sure to edit calspecDir
# and bandsDir below to their locations on
# your machine.)
# DLT, 2017-06-06
# based in part on scripts by Jack Mueller and Jacob Robertson.
# Initial setup...
import csv
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import glob
import os
import string
from pyraf import iraf
from pyraf.iraf import stsdas,hst_calib,synphot
import shutil
import pyfits
# Be sure to edit these next two lines appropriately...
calspecDir = '/Users/dtucker/IRAF/SynphotData/grp/hst/cdbs/calspec'
bandsDir = '/Users/dtucker/IRAF/DECam/StdBands_Y3A2'
# List of filter bands (in order of increasing average wavelength)...
bandList = ['g', 'r', 'i', 'z', 'Y']
# This file will contain the raw output from calcphot...
rawOutputFile = 'calspec_stdbands_y3a2.raw.fits'
# If rawOutputFile already exists, rename the
# current rawOutputFile to rawOutputFile~...
if os.path.isfile(rawOutputFile):
shutil.move(rawOutputFile, rawOutputFile+'~')
# Create a list of all FITS files in the calspec directory...
specFileNameList = glob.glob(calspecDir+'/*.fits')
# Loop over the list of all FITS files in the calspec directory...
for specFileName in specFileNameList:
# Extract the basename for specFileName...
baseName = os.path.basename(specFileName)
print baseName,
# Capture specFileNames that won't play well with calcphot
# by using a try/except block...
try:
# Just consider specFileNames that have WMIN (minimum wavelength)
# and WMAX (maximum wavelength) keywords in their FITS headers...
hdulist = pyfits.open(specFileName)
wavemin = hdulist[0].header['WMIN']
wavemax = hdulist[0].header['WMAX']
hdulist.close()
except:
print 'FITS table is missing the WMIN and/or WMAX keywords... skipping...'
continue
print wavemin, wavemax
# Skip those calspec spectra that do not fully
# cover the range of the DES Y3A2 grizY filter
# standard bandpass tables...
if ( (wavemin > 3000.) or (wavemax < 11000.) ):
print 'Spectrum does not fully cover DES Y3A2 grizY filter standard bandpasses... Skipping...'
continue
for band in bandList:
print band,
bandFileName = bandsDir+'/y3a2_std_passband_'+band+'.fits'
print bandFileName
try:
iraf.calcphot(obsmode=bandFileName,spectrum=specFileName,out=rawOutputFile,form='abmag',append='yes')
except:
print 'Synphot command calcphot failed on this spectrum... continuing...'
# Read in rawOutputFile to create a reformatted version in CSV format...
hdulist = pyfits.open(rawOutputFile)
tbdata = hdulist[1].data
# Extact spectrum names as a list...
snameList = tbdata['TARGETID'].tolist()
snameList = [ (os.path.split(sname)[1].strip()) for sname in snameList ]
# Extact filter names as a list...
fnameList = tbdata['OBSMODE'].tolist()
fnameList = [ (os.path.split(fname)[1].strip().split('.fits')[0][-1]) for fname in fnameList ]
# Extact ABmags as a list...
abmagList = tbdata['COUNTRATE'].tolist()
# Form a pandas dataframe from the filter, spectrum, and abmag lists...
catdf = pd.DataFrame(np.column_stack([fnameList,snameList,abmagList]), columns=['BAND','SPECTRUM','ABMAG'])
catdf.head(10)
# Ensure that ABMAG is a float...
catdf.ABMAG = catdf.ABMAG.astype('float')
# Ensure that there are no duplicate rows...
# (probably no longer necessary since shutil.copyfile
# has been changed to shutil.move elsewhere in this
# script)...
catdf.drop_duplicates(inplace=True)
# Pivot the pandas dataframe table so that the filter names are now column names
# (and the ABmags are arranged by filter column name)...
catdf2 = catdf.pivot_table('ABMAG', index='SPECTRUM', columns=['BAND'], aggfunc=sum)
# Now, SPECTRUM is the index; let's also make it a column...
catdf2['SPECTRUM'] = catdf2.index
# Rearrange the columns in this order: "SPECTRUM, g, r, i, z, Y"
cols = ['SPECTRUM','g','r','i','z','Y']
catdf2 = catdf2[cols]
# Reset the index of the pandas dataframe to a running id number...
catdf2.reset_index(drop=True, inplace=True)
# This file will contain the final, reformated output, in CSV format...
outputFile = 'calspec_stdbands_y3a2.csv'
# If outputFile already exists, rename the
# current outputFile to outputFile~...
if os.path.isfile(outputFile):
shutil.move(outputFile, outputFile+'~')
# Output the the pandas dataframe as a CSV file...
catdf2.to_csv(outputFile, index=False)
# Finis!
exit()
| DESatAPSU/DAWDs | python/DESCalSpec.py | Python | mit | 4,676 |
# Copyright (C) 2005 Johan Dahlin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# TODO:
# Parser tags: atk, relation
# Document public API
# Parser subclass
# Improved unittest coverage
# Old style toolbars
# Require importer/resolver (gazpacho itself)
# GBoxed properties
import os
from gettext import textdomain, dgettext
from xml.parsers import expat
import gobject
import gtk
from gtk import gdk
from gazpacho.loader.custom import adapter_registry, flagsfromstring, \
str2bool
__all__ = ['ObjectBuilder', 'ParseError']
class ParseError(Exception):
pass
class Stack(list):
push = list.append
def peek(self):
if self:
return self[-1]
class BaseInfo:
def __init__(self):
self.data = ''
def __repr__(self):
return '<%s data=%r>' % (self.__class__.__name__,
self.data)
class WidgetInfo(BaseInfo):
def __init__(self, attrs, parent):
BaseInfo.__init__(self)
self.klass = str(attrs.get('class'))
self.id = str(attrs.get('id'))
self.constructor = attrs.get('constructor')
self.children = []
self.properties = []
self.signals = []
self.uis = []
self.accelerators = []
self.parent = parent
self.gobj = None
# If it's a placeholder, used by code for unsupported widgets
self.placeholder = False
def is_internal_child(self):
return self.parent and self.parent.internal_child
def __repr__(self):
return '<WidgetInfo of type %s>' % (self.klass)
class ChildInfo(BaseInfo):
def __init__(self, attrs, parent):
BaseInfo.__init__(self)
self.internal_child = attrs.get('internal-child')
self.properties = []
self.packing_properties = []
self.placeholder = False
self.parent = parent
self.widget = None
def __repr__(self):
return '<ChildInfo containing a %s>' % (self.widget)
class PropertyInfo(BaseInfo):
def __init__(self, attrs):
BaseInfo.__init__(self)
self.name = str(attrs.get('name'))
self.translatable = str2bool(attrs.get('translatable', 'no'))
self.context = str2bool(attrs.get('context', 'no'))
self.agent = attrs.get('agent') # libglade
self.comments = attrs.get('comments')
def __repr__(self):
return '<PropertyInfo of type %s=%r>' % (self.name, self.data)
class SignalInfo(BaseInfo):
def __init__(self, attrs):
BaseInfo.__init__(self)
self.name = attrs.get('name')
self.handler = attrs.get('handler')
self.after = str2bool(attrs.get('after', 'no'))
self.object = attrs.get('object')
self.last_modification_time = attrs.get('last_modification_time')
self.gobj = None
class AcceleratorInfo(BaseInfo):
def __init__(self, attrs):
BaseInfo.__init__(self)
self.key = gdk.keyval_from_name(attrs.get('key'))
self.modifiers = flagsfromstring(attrs.get('modifiers'),
flags=gdk.ModifierType)
self.signal = str(attrs.get('signal'))
class UIInfo(BaseInfo):
def __init__(self, attrs):
BaseInfo.__init__(self)
self.id = attrs.get('id')
self.filename = attrs.get('filename')
self.merge = str2bool(attrs.get('merge', 'yes'))
class ExpatParser(object):
def __init__(self, domain):
self._domain = domain
self.requires = []
self._stack = Stack()
self._state_stack = Stack()
self._parser = expat.ParserCreate()
self._parser.buffer_text = True
self._parser.StartElementHandler = self._handle_startelement
self._parser.EndElementHandler = self._handle_endelement
self._parser.CharacterDataHandler = self._handle_characterdata
# Public API
def parse_file(self, filename):
fp = open(filename)
self._parser.ParseFile(fp)
def parse_stream(self, buffer):
self._parser.Parse(buffer)
# Expat callbacks
def _handle_startelement(self, name, attrs):
self._state_stack.push(name)
name = name.replace('-', '_')
func = getattr(self, '_start_%s' % name, None)
if func:
item = func(attrs)
self._stack.push(item)
def _handle_endelement(self, name):
self._state_stack.pop()
name = name.replace('-', '_')
func = getattr(self, '_end_%s' % name, None)
if func:
item = self._stack.pop()
func(item)
def _handle_characterdata(self, data):
info = self._stack.peek()
if info:
info.data += str(data)
# Tags
def _start_glade_interface(self, attrs):
# libglade extension, add a domain argument to the interface
if 'domain' in attrs:
self._domain = str(attrs['domain'])
def _end_glade_interface(self, obj):
pass
def _start_requires(self, attrs):
self.requires.append(attrs)
def _end_requires(self, obj):
pass
def _start_signal(self, attrs):
if not 'name' in attrs:
raise ParseError("<signal> needs a name attribute")
if not 'handler' in attrs:
raise ParseError("<signal> needs a handler attribute")
return SignalInfo(attrs)
def _end_signal(self, signal):
obj = self._stack.peek()
obj.signals.append(signal)
def _start_widget(self, attrs):
if not 'class' in attrs:
raise ParseError("<widget> needs a class attribute")
if not 'id' in attrs:
raise ParseError("<widget> needs an id attribute")
return WidgetInfo(attrs, self._stack.peek())
_start_object = _start_widget
def _end_widget(self, obj):
obj.parent = self._stack.peek()
if not obj.gobj:
obj.gobj = gobj = self._build_phase1(obj)
self._build_phase2(obj)
if obj.parent:
obj.parent.widget = obj.gobj
_end_object = _end_widget
def _start_child(self, attrs):
obj = self._stack.peek()
obj.gobj = self._build_phase1(obj)
return ChildInfo(attrs, parent=obj)
def _end_child(self, child):
obj = self._stack.peek()
obj.children.append(child)
def _start_property(self, attrs):
if not 'name' in attrs:
raise ParseError("<property> needs a name attribute")
return PropertyInfo(attrs)
def _end_property(self, prop):
if prop.agent and prop.agent not in ('libglade', 'gazpacho'):
return
# gettext cannot really handle empty strings, so we need to filter
# them out, otherwise we'll get the po header as the content!
# Note that we should not write properties with empty strings from
# the start, but that is another bug
if prop.translatable and prop.data:
prop.data = dgettext(self._domain, prop.data)
obj = self._stack.peek()
property_type = self._state_stack.peek()
if property_type == 'widget' or property_type == 'object':
obj.properties.append(prop)
elif property_type == 'packing':
obj.packing_properties.append(prop)
else:
raise ParseError("property must be a node of widget or packing")
def _start_ui(self, attrs):
if not 'id' in attrs:
raise ParseError("<ui> needs an id attribute")
return UIInfo(attrs)
def _end_ui(self, ui):
if not ui.data or ui.filename:
raise ParseError("<ui> needs CDATA or filename")
obj = self._stack.peek()
obj.uis.append(ui)
def _start_placeholder(self, attrs):
pass
def _end_placeholder(self, placeholder):
obj = self._stack.peek()
obj.placeholder = True
def _start_accelerator(self, attrs):
if not 'key' in attrs:
raise ParseError("<accelerator> needs a key attribute")
if not 'modifiers' in attrs:
raise ParseError("<accelerator> needs a modifiers attribute")
if not 'signal' in attrs:
raise ParseError("<accelerator> needs a signal attribute")
obj = self._stack.peek()
return AcceleratorInfo(attrs)
def _end_accelerator(self, accelerator):
obj = self._stack.peek()
obj.accelerators.append(accelerator)
class ObjectBuilder:
def __init__(self, filename='', buffer=None, root=None, placeholder=None,
custom=None, domain=None):
if ((not filename and not buffer) or
(filename and buffer)):
raise TypeError("need a filename or a buffer")
self._filename = filename
self._buffer = buffer
self._root = root
self._placeholder = placeholder
self._custom = custom
self.toplevels = []
self.sizegroups = []
# name -> GObject
self._widgets = {}
self._signals = []
# GObject -> Constructor
self._constructed_objects = {}
# ui definition name -> UIMerge, see _mergeui
self._uidefs = {}
# ui definition name -> constructor name (backwards compatibility)
self._uistates = {}
self._tooltips = gtk.Tooltips()
self._tooltips.enable()
self._focus_widget = None
self._default_widget = None
self._toplevel = None
self._accel_group = None
self._delayed_properties = {}
self._internal_children = {}
# If domain is not specified, fetch the default one by
# calling textdomain() without arguments
if not domain:
domain = textdomain()
self._parser = ExpatParser(domain)
self._parser._build_phase1 = self._build_phase1
self._parser._build_phase2 = self._build_phase2
if filename:
self._parser.parse_file(filename)
elif buffer:
self._parser.parse_stream(buffer)
self._parse_done()
def __len__(self):
return len(self._widgets)
def __nonzero__(self):
return True
# Public API
def get_widget(self, widget):
return self._widgets.get(widget)
def get_widgets(self):
return self._widgets.values()
def signal_autoconnect(self, obj):
for gobj, name, handler_name, after, object_name in self.get_signals():
# Firstly, try to map it as a dictionary
try:
handler = obj[handler_name]
except (AttributeError, TypeError):
# If it fails, try to map it to an attribute
handler = getattr(obj, handler_name, None)
if not handler:
continue
if object_name:
other = self._widgets.get(object_name)
if after:
gobj.connect_object_after(name, handler, other)
else:
gobj.connect_object(name, handler, other)
else:
if after:
gobj.connect_after(name, handler)
else:
gobj.connect(name, handler)
def show_windows(self):
# Doesn't quite work, disabled for now
# # First set focus, warn if more than one is focused
# toplevel_focus_widgets = []
# for widget in self.get_widgets():
# if not isinstance(widget, gtk.Widget):
# continue
# if widget.get_data('gazpacho::is-focus'):
# toplevel = widget.get_toplevel()
# name = toplevel.get_name()
# if name in toplevel_focus_widgets:
# print ("Warning: Window %s has more than one "
# "focused widget" % name)
# toplevel_focus_widgets.append(name)
# At last, display all of the visible windows
for toplevel in self.toplevels:
if not isinstance(toplevel, gtk.Window):
continue
value = toplevel.get_data('gazpacho::visible')
toplevel.set_property('visible', value)
def get_internal_children(self, gobj):
if not gobj in self._internal_children:
return []
return self._internal_children[gobj]
# Adapter API
def add_signal(self, gobj, name, handler, after=False, sig_object=None):
self._signals.append((gobj, name, handler, after, sig_object))
def get_signals(self):
return self._signals
def find_resource(self, filename):
dirname = os.path.dirname(self._filename)
path = os.path.join(dirname, filename)
if os.access(path, os.R_OK):
return path
def get_ui_definitions(self):
return [(name, info.data) for name, info in self._uidefs.items()]
def get_constructor(self, gobj):
return self._constructed_objects[gobj]
def ensure_accel(self):
if not self._accel_group:
self._accel_group = gtk.AccelGroup()
if self._toplevel:
self._toplevel.add_accel_group(self._accel_group)
return self._accel_group
def add_delayed_property(self, obj_id, pspec, value):
delayed = self._delayed_properties
if not obj_id in delayed:
delayed_properties = delayed[obj_id] =[]
else:
delayed_properties = delayed[obj_id]
delayed_properties.append((pspec, value))
# Private
def _setup_signals(self, gobj, signals):
for signal in signals:
self.add_signal(gobj, signal.name, signal.handler,
signal.after, signal.object)
def _setup_accelerators(self, widget, accelerators):
if not accelerators:
return
accel_group = self.ensure_accel()
widget.set_data('gazpacho::accel-group', accel_group)
for accelerator in accelerators:
widget.add_accelerator(accelerator.signal,
accel_group,
accelerator.key,
accelerator.modifiers,
gtk.ACCEL_VISIBLE)
def _apply_delayed_properties(self):
for obj_id, props in self._delayed_properties.items():
widget = self._widgets.get(obj_id)
if widget is None:
raise AssertionError
adapter = adapter_registry.get_adapter(widget, self)
prop_list = []
for pspec, value in props:
if gobject.type_is_a(pspec.value_type, gobject.GObject):
other = self._widgets.get(value)
if other is None:
raise ParseError(
"property %s of %s refers to widget %s which "
"does not exist" % (pspec.name, obj_id,value))
prop_list.append((pspec.name, other))
else:
raise NotImplementedError(
"Only delayed object properties are "
"currently supported")
adapter.set_properties(widget, prop_list)
def _merge_ui(self, uimanager_name, name,
filename='', data=None, merge=True):
uimanager = self._widgets[uimanager_name]
if merge:
if filename:
filename = self.find_resource(filename)
# XXX Catch GError
merge_id = uimanager.add_ui_from_file(filename)
elif data:
# XXX Catch GError
merge_id = uimanager.add_ui_from_string(data)
else:
raise AssertionError
else:
merge_id = -1
class UIMerge:
def __init__(self, uimanager, filename, data, merge_id):
self.uimanager = uimanager,
self.filename = filename
self.data = data
self.merge_id = merge_id
current = self._uidefs.get(name)
if current:
current.merge_id = merge_id
else:
self._uidefs[name] = UIMerge(uimanager, filename, data,
merge_id)
# Backwards compatibility
self._uistates[name] = uimanager_name
def _uimanager_construct(self, uimanager_name, obj_id):
uimanager = self._widgets[uimanager_name]
widget = uimanager.get_widget('ui/' + obj_id)
if widget is None:
# XXX: untested code
uimanager_name = self._uistates.get(obj_id)
if not uimanager_name:
raise AssertionError
uimanager = self._widgets[uimanager_name]
return widget
def _find_internal_child(self, obj):
child = None
childname = str(obj.parent.internal_child)
parent = obj.parent
while parent:
if isinstance(parent, ChildInfo):
parent = parent.parent
continue
gparent = parent.gobj
if not gparent:
break
adapter = adapter_registry.get_adapter(gparent, self)
child = adapter.find_internal_child(gparent, childname)
if child is not None:
break
parent = parent.parent
if child is not None:
if not gparent in self._internal_children:
self._internal_children[gparent] = []
self._internal_children[gparent].append((childname, child))
return child
def _create_custom(self, obj):
kwargs = dict(name=obj.id)
for prop in obj.properties:
prop_name = prop.name
if prop_name in ('string1', 'string2',
'creation_function',
'last_modification_time'):
kwargs[prop_name] = prop.data
elif prop_name in ('int1', 'int2'):
kwargs[prop_name] = int(prop.data)
if not self._custom:
return gtk.Label('<Custom: %s>' % obj.id)
elif callable(self._custom):
func = self._custom
return func(**kwargs)
else:
func_name = kwargs['creation_function']
try:
func = self._custom[func_name]
except (TypeError, KeyError, AttributeError):
func = getattr(self._custom, func_name, None)
return func(name=obj.id,
string1=kwargs.get('string1', None),
string2=kwargs.get('string2', None),
int1=kwargs.get('int1', None),
int2=kwargs.get('int2', None))
def _create_placeholder(self, obj=None):
if not obj:
klass = name = 'unknown'
else:
name = obj.id
klass = obj.klass
if not self._placeholder:
return
return self._placeholder(name)
def _add_widget(self, object_id, gobj):
gobj.set_data('gazpacho::object-id', object_id)
self._widgets[object_id] = gobj
def _build_phase1(self, obj):
root = self._root
if root and root != obj.id:
return
if obj.klass == 'Custom':
gobj = self._create_custom(obj)
if gobj:
self._add_widget(obj.id, gobj)
return gobj
try:
gtype = gobject.type_from_name(obj.klass)
except RuntimeError:
print 'Could not construct object: %s' % obj.klass
obj.placeholder = True
return self._create_placeholder(obj)
adapter = adapter_registry.get_adapter(gtype, self)
construct, normal = adapter.get_properties(gtype,
obj.id,
obj.properties)
if obj.is_internal_child():
gobj = self._find_internal_child(obj)
elif obj.constructor:
if self._widgets.has_key(obj.constructor):
gobj = self._uimanager_construct(obj.constructor, obj.id)
constructor = obj.constructor
# Backwards compatibility
elif self._uistates.has_key(obj.constructor):
constructor = self._uistates[obj.constructor]
gobj = self._uimanager_construct(constructor, obj.id)
else:
raise ParseError("constructor %s for object %s could not "
"be found" % (obj.id, obj.constructor))
self._constructed_objects[gobj] = self._widgets[constructor]
else:
gobj = adapter.construct(obj.id, gtype, construct)
if gobj:
self._add_widget(obj.id, gobj)
adapter.set_properties(gobj, normal)
# This is a little tricky
# We assume the default values for all these are nonzero, eg
# either False or None
# We also need to handle the case when we have two labels, if we
# do we respect the first one. This is due to a bug in the save code
for propinfo in obj.properties:
key = 'i18n_is_translatable_%s' % propinfo.name
if not gobj.get_data(key) and propinfo.translatable:
gobj.set_data(key, propinfo.translatable)
key = 'i18n_has_context_%s' % propinfo.name
if not gobj.get_data(key) and propinfo.context:
gobj.set_data(key, propinfo.context)
# XXX: Rename to i18n_comments
key = 'i18n_comment_%s' % propinfo.name
if not gobj.get_data(key) and propinfo.comments:
gobj.set_data(key, propinfo.comments)
return gobj
def _build_phase2(self, obj):
# If we have a root set, we don't want to construct all
# widgets, filter out unwanted here
root = self._root
if root and root != obj.id:
return
# Skip this step for placeholders, so we don't
# accidentally try to pack something into unsupported widgets
if obj.placeholder:
return
gobj = obj.gobj
if not gobj:
return
adapter = adapter_registry.get_adapter(gobj, self)
for child in obj.children:
self._pack_child(adapter, gobj, child)
self._setup_signals(gobj, obj.signals)
self._setup_accelerators(gobj, obj.accelerators)
# Toplevels
if not obj.parent:
if isinstance(gobj, gtk.UIManager):
for ui in obj.uis:
self._merge_ui(obj.id,
ui.id, ui.filename, ui.data, ui.merge)
self.accelgroup = gobj.get_accel_group()
elif isinstance(gobj, gtk.Window):
self._set_toplevel(gobj)
self.toplevels.append(gobj)
def _pack_child(self, adapter, gobj, child):
if child.placeholder:
widget = self._create_placeholder()
if not widget:
return
elif child.widget:
widget = child.widget
else:
return
if child.internal_child:
gobj = child.parent.gobj
name = child.parent.id
if isinstance(gobj, gtk.Widget):
gobj.set_name(name)
self._add_widget(name, gobj)
return
# 5) add child
try:
adapter.add(gobj,
widget,
child.packing_properties)
except NotImplementedError, e:
print TypeError('%s does not support children' % (
gobject.type_name(gobj)))
def _attach_accel_groups(self):
# This iterates of all objects constructed by a gtk.UIManager
# And attaches an accelgroup to the toplevel window of them
for widget, constructor in self._constructed_objects.items():
if not isinstance(constructor, gtk.UIManager):
continue
toplevel = widget.get_toplevel()
if not isinstance(toplevel, gtk.Window):
continue
accel_group = constructor.get_accel_group()
if not accel_group in gtk.accel_groups_from_object(toplevel):
toplevel.add_accel_group(accel_group)
def _setup_sizegroups(self):
for widget in self._widgets.values():
# Collect all the sizegroups
if isinstance(widget, gtk.SizeGroup):
self.sizegroups.append(widget)
continue
# And setup the widgets which has a sizegroup
if not isinstance(widget, gtk.Widget):
continue
group_name = widget.get_data('gazpacho::sizegroup')
if group_name is None:
continue
group = self.get_widget(group_name)
if group is None:
raise ParseError("sizegroup %s does not exist" %
group_name)
group.add_widget(widget)
# Keep a list of widgets inside the sizegroup.
# Perhaps GTK+ should provide an api for this.
sgwidgets = group.get_data('gazpacho::sizegroup-widgets')
if sgwidgets is None:
sgwidgets = []
group.set_data('gazpacho::sizegroup-widgets', sgwidgets)
sgwidgets.append(widget)
def _parse_done(self):
self._apply_delayed_properties()
self._attach_accel_groups()
self._setup_sizegroups()
self.show_windows()
def _set_toplevel(self, window):
if self._focus_widget:
self._focus_widget.grab_focus()
self._focus_widget = None
if self._default_widget:
if self._default_widget.flags() & gtk.CAN_DEFAULT:
self._default_widget.grab_default()
self._default_widget = None
if self._accel_group:
self._accel_group = None
# the window should hold a reference to the tooltips object
window.set_data('gazpacho::tooltips', self._tooltips)
self._toplevel = window
if __name__ == '__main__':
import sys
ob = ObjectBuilder(filename=sys.argv[1])
for toplevel in ob.toplevels:
if not isinstance(toplevel, gtk.Window):
continue
toplevel.connect('delete-event', gtk.main_quit)
toplevel.show_all()
gtk.main()
| MartinSoto/Seamless | src/gazpacho/loader/loader.py | Python | gpl-2.0 | 27,770 |
from flask import Blueprint, jsonify, render_template, request, redirect, url_for, abort
from model import Course, Review
mod = Blueprint('courses', __name__, url_prefix='/courses')
@mod.route("/")
def index():
#print "Courses route worked"
return render_template('courses/courses.html')
@mod.route('/<string:course>')
def show_course(course):
#print course
result = Course.query.filter_by(cid=course).first()
if result:
return render_template('courses/show_course.html', course=result)
else:
abort(404)
#already have a top rated route without courses prefix
#@mod.route('/top_rated')
#def top_index():
# return render_template('courses/top_rated.html')
@mod.route('/_top_query')
def top_query():
list = []
index = 0
courses_list = Course.query.all()
for course in courses_list:
reviews_list = Review.query.filter_by(cid=course.cid).all()
average = 0
if len(reviews_list) > 0:
for review in reviews_list:
average = average + review.rscr
average = average / len(reviews_list)
list.append([course, average])
list = sorted(list, key=lambda tup: tup[1], reverse=True)
courses = []
for item in list:
temp_dict = item[0].serialize
temp_dict['avg'] = item[1]
courses.append(temp_dict)
return jsonify(courses = courses)
@mod.route('/_query')
def query():
key = request.args.get('key', '')
if key == '':
results = Course.query.all()
return jsonify(courses = [i.serialize for i in results])
else:
results = Course.query.filter("cid like :value or cname like :value").params(value = '%' + key + '%').all()
return jsonify(courses = [i.serialize for i in results])
@mod.route('/_query_by_instructor')
def query_by_instructor():
key = request.args.get('key', '')
if key:
results = Course.query.filter("instructor like :value").params(value = '%' + key + '%').all()
return jsonify(courses = [i.serialize for i in results])
@mod.route('/_semester/<string:semester>')
def semester(semester):
key = request.args.get('key', '')
if key == '':
results = Course.query.filter_by(csem=semester).all()
return jsonify(courses = [i.serialize for i in results])
else:
results = Course.query.filter("(cid like :value or cname like :value) and csem like :sem").params(value = '%' + key + '%', sem = '%' + semester + '%').all()
return jsonify(courses = [i.serialize for i in results])
| umarmiti/COMP-4350--Group-8 | cris/courses/controller.py | Python | mit | 2,440 |
#!../../../../virtualenv/bin/python3
# -*- coding: utf-8 -*-
# NB: The shebang line above assumes you've installed a python virtual environment alongside your working copy of the
# <4most-4gp-scripts> git repository. It also only works if you invoke this python script from the directory where it
# is located. If these two assumptions are incorrect (e.g. you're using Conda), you can still use this script by typing
# <python synthesize_ges_dwarfs.py>, but <./synthesize_ges_dwarfs.py> will not work.
"""
Take stellar parameters of GES dwarf stars and synthesize spectra using TurboSpectrum.
"""
import json
import logging
import numpy as np
from astropy.io import fits
from lib.base_synthesizer import Synthesizer
# List of elements whose abundances we pass to TurboSpectrum
# Elements with neutral abundances, e.g. LI1
element_list = (
"He", "Li", "C", "O", "Ne", "Na", "Mg", "Al", "Si", "S", "Ca", "Sc", "Ti", "V", "Cr", "Mn", "Co", "Ni", "Cu", "Zn",
"Sr", "Y", "Zr", "Nb", "Mo", "Ru")
# Elements with ionised abundances, e.g. N2
element_list_ionised = ("N", "Ba", "La", "Ce", "Pr", "Nd", "Sm", "Eu", "Gd", "Dy")
# Start logging our progress
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s:%(filename)s:%(message)s',
datefmt='%d/%m/%Y %H:%M:%S')
logger = logging.getLogger(__name__)
logger.info("Synthesizing GES dwarf spectra")
# Instantiate base synthesizer
synthesizer = Synthesizer(library_name="ges_dwarf_sample",
logger=logger,
docstring=__doc__)
# Table supplies list of abundances for GES stars
f = fits.open("../../downloads/GES_iDR5_WG15_Recommended.fits")
ges = f[1].data
ges_fields = ges.names
# Obtain solar abundances, needed to convert values in file into solar units
sun_id = np.where(ges.OBJECT == 'Sun_Benchmarks_BordeauxLib3 ')[0]
# Filter objects on SNR
min_SNR = 50
selection = np.where((ges.SNR > min_SNR) & (ges.REC_WG == 'WG11') & (ges.LOGG > 3.5))[0]
stellar_data = ges[selection]
# Loop over stars extracting stellar parameters from FITS file
star_list = []
for star_index in range(len(stellar_data)):
star_list_item = {
"name": stellar_data.CNAME[star_index],
"Teff": float(stellar_data.TEFF[star_index]),
"[Fe/H]": float(stellar_data.FEH[star_index]),
"logg": float(stellar_data.LOGG[star_index]),
"extra_metadata": {
"[alpha/Fe]": float(stellar_data.ALPHA_FE[star_index])
},
"free_abundances": {},
"input_data": {}
}
# Pass list of the abundances of individual elements to TurboSpectrum
free_abundances = star_list_item["free_abundances"]
for elements, ionisation_state in ((element_list, 1), (element_list_ionised, 2)):
for element in elements:
if (not synthesizer.args.elements) or (element in synthesizer.args.elements.split(",")):
fits_field_name = "{}{}".format(element.upper(), ionisation_state)
# Normalise abundance of element to solar
abundance = stellar_data[fits_field_name][star_index] - ges[fits_field_name][sun_id]
if np.isfinite(abundance):
free_abundances[element] = float(abundance)
# Propagate all ionisation states into metadata
metadata = star_list_item["extra_metadata"]
for element in element_list:
abundances_all = []
for ionisation_state in range(1, 5):
fits_field_name = "{}{}".format(element.upper(), ionisation_state)
if fits_field_name in ges_fields:
abundance = stellar_data[fits_field_name][star_index] - ges[fits_field_name][sun_id]
abundances_all.append(float(abundance))
else:
abundances_all.append(None)
metadata["[{}/H]_ionised_states".format(element)] = json.dumps(abundances_all)
# Propagate all input fields from the FITS file into <input_data>
input_data = star_list_item["input_data"]
for col_name in ges_fields:
if col_name == "CNAME":
continue
value = stellar_data[col_name][star_index]
if ges.dtype[col_name].type is np.string_:
typed_value = str(value)
else:
typed_value = float(value)
input_data[col_name] = typed_value
star_list.append(star_list_item)
# Pass list of stars to synthesizer
synthesizer.set_star_list(star_list)
# Output data into sqlite3 db
synthesizer.dump_stellar_parameters_to_sqlite()
# Create new SpectrumLibrary
synthesizer.create_spectrum_library()
# Iterate over the spectra we're supposed to be synthesizing
synthesizer.do_synthesis()
# Close TurboSpectrum synthesizer instance
synthesizer.clean_up()
| dcf21/4most-4gp-scripts | src/scripts/synthesize_samples/synthesize_ges_dwarfs.py | Python | mit | 4,751 |
from south.db import db
from django.db import models
from treepages.models import *
class Migration:
def forwards(self, orm):
# Deleting field 'Page.template'
db.delete_column('treepages_page', 'template')
def backwards(self, orm):
# Adding field 'Page.template'
db.add_column('treepages_page', 'template', orm['treepages.page:template'])
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'treepages.page': {
'_cached_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'db_index': 'True', 'blank': 'True'}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_pages'", 'to': "orm['auth.User']"}),
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['treepages.Page']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('AutoSlugField', ["_('slug')"], {'unique': 'True', 'populate_from': "'title'"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['treepages']
| scotu/django-treepages | treepages/migrations/0002_no_selectable_template.py | Python | mit | 4,758 |
import json
from django import forms
from django.forms import widgets
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from wagtail.admin.staticfiles import versioned_static
from wagtail.core.models import Page
from wagtail.utils.widgets import WidgetWithScript
class AdminChooser(WidgetWithScript, widgets.Input):
input_type = 'hidden'
choose_one_text = _("Choose an item")
choose_another_text = _("Choose another item")
clear_choice_text = _("Clear choice")
link_to_chosen_text = _("Edit this item")
show_edit_link = True
show_clear_link = True
# when looping over form fields, this one should appear in visible_fields, not hidden_fields
# despite the underlying input being type="hidden"
is_hidden = False
def get_instance(self, model_class, value):
# helper method for cleanly turning 'value' into an instance object.
# DEPRECATED - subclasses should override WidgetWithScript.get_value_data instead
if value is None:
return None
try:
return model_class.objects.get(pk=value)
except model_class.DoesNotExist:
return None
def get_instance_and_id(self, model_class, value):
# DEPRECATED - subclasses should override WidgetWithScript.get_value_data instead
if value is None:
return (None, None)
elif isinstance(value, model_class):
return (value, value.pk)
else:
try:
return (model_class.objects.get(pk=value), value)
except model_class.DoesNotExist:
return (None, None)
def value_from_datadict(self, data, files, name):
# treat the empty string as None
result = super().value_from_datadict(data, files, name)
if result == '':
return None
else:
return result
def __init__(self, **kwargs):
# allow choose_one_text / choose_another_text to be overridden per-instance
if 'choose_one_text' in kwargs:
self.choose_one_text = kwargs.pop('choose_one_text')
if 'choose_another_text' in kwargs:
self.choose_another_text = kwargs.pop('choose_another_text')
if 'clear_choice_text' in kwargs:
self.clear_choice_text = kwargs.pop('clear_choice_text')
if 'link_to_chosen_text' in kwargs:
self.link_to_chosen_text = kwargs.pop('link_to_chosen_text')
if 'show_edit_link' in kwargs:
self.show_edit_link = kwargs.pop('show_edit_link')
if 'show_clear_link' in kwargs:
self.show_clear_link = kwargs.pop('show_clear_link')
super().__init__(**kwargs)
class AdminPageChooser(AdminChooser):
choose_one_text = _('Choose a page')
choose_another_text = _('Choose another page')
link_to_chosen_text = _('Edit this page')
def __init__(self, target_models=None, can_choose_root=False, user_perms=None, **kwargs):
super().__init__(**kwargs)
if target_models:
model_names = [model._meta.verbose_name.title() for model in target_models if model is not Page]
if len(model_names) == 1:
self.choose_one_text += ' (' + model_names[0] + ')'
self.user_perms = user_perms
self.target_models = list(target_models or [Page])
self.can_choose_root = bool(can_choose_root)
def _get_lowest_common_page_class(self):
"""
Return a Page class that is an ancestor for all Page classes in
``target_models``, and is also a concrete Page class itself.
"""
if len(self.target_models) == 1:
# Shortcut for a single page type
return self.target_models[0]
else:
return Page
@property
def model_names(self):
return [
'{app}.{model}'.format(app=model._meta.app_label, model=model._meta.model_name)
for model in self.target_models
]
@property
def client_options(self):
# a JSON-serializable representation of the configuration options needed for the
# client-side behaviour of this widget
return {
'model_names': self.model_names,
'can_choose_root': self.can_choose_root,
'user_perms': self.user_perms,
}
def get_value_data(self, value):
if value is None:
return None
elif isinstance(value, Page):
page = value.specific
else: # assume page ID
model_class = self._get_lowest_common_page_class()
try:
page = model_class.objects.get(pk=value)
except model_class.DoesNotExist:
return None
page = page.specific
parent_page = page.get_parent()
return {
'id': page.pk,
'display_title': page.get_admin_display_title(),
'parent_id': parent_page.pk if parent_page else None,
'edit_url': reverse('wagtailadmin_pages:edit', args=[page.pk]),
}
def render_html(self, name, value_data, attrs):
value_data = value_data or {}
original_field_html = super().render_html(name, value_data.get('id'), attrs)
return render_to_string("wagtailadmin/widgets/page_chooser.html", {
'widget': self,
'original_field_html': original_field_html,
'attrs': attrs,
'value': bool(value_data), # only used by chooser.html to identify blank values
'display_title': value_data.get('display_title', ''),
'edit_url': value_data.get('edit_url', ''),
})
def render_js_init(self, id_, name, value_data):
value_data = value_data or {}
return "createPageChooser({id}, {parent}, {options});".format(
id=json.dumps(id_),
parent=json.dumps(value_data.get('parent_id')),
options=json.dumps(self.client_options),
)
@property
def media(self):
return forms.Media(js=[
versioned_static('wagtailadmin/js/page-chooser-modal.js'),
versioned_static('wagtailadmin/js/page-chooser.js'),
])
| kaedroho/wagtail | wagtail/admin/widgets/chooser.py | Python | bsd-3-clause | 6,255 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-02-16 08:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import orgsema.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Drzava',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sifra', models.CharField(max_length=2, verbose_name='\u0161ifra')),
('naziv', models.CharField(max_length=100, verbose_name='naziv')),
],
options={
'verbose_name': 'dr\u017eava',
'verbose_name_plural': 'dr\u017eave',
},
),
migrations.CreateModel(
name='NaseljenoMesto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('naziv', models.CharField(max_length=50, verbose_name='naziv')),
('zip_code', models.CharField(max_length=15, verbose_name='po\u0161tanski broj')),
],
options={
'verbose_name': 'naseljeno mesto',
'verbose_name_plural': 'naseljena mesta',
},
),
migrations.CreateModel(
name='Okrug',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sifra', models.CharField(max_length=2, verbose_name='\u0161ifra')),
('naziv', models.CharField(max_length=100, verbose_name='naziv')),
],
options={
'verbose_name': 'okrug',
'verbose_name_plural': 'okruzi',
},
),
migrations.CreateModel(
name='Opstina',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('naziv', models.CharField(max_length=100, verbose_name='naziv')),
('maticni_broj', models.CharField(max_length=5, verbose_name='mati\u010dni broj')),
('okrug', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orgsema.Okrug', verbose_name='okrug')),
],
options={
'verbose_name': 'op\u0161tina',
'verbose_name_plural': 'op\u0161tine',
},
),
migrations.CreateModel(
name='OrgJed',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sifra', models.CharField(max_length=6, verbose_name='\u0161ifra')),
('naziv', models.CharField(max_length=100, verbose_name='naziv')),
('nivo', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='nivo')),
('adresa', models.CharField(blank=True, max_length=200, null=True, verbose_name='adresa')),
('email', models.CharField(blank=True, max_length=200, null=True, verbose_name='email')),
('aktivna', models.BooleanField(default=True, verbose_name='aktivna')),
('mesto', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='orgsema.NaseljenoMesto', verbose_name='mesto')),
('nadjed', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='orgsema.OrgJed', verbose_name='nadre\u0111ena jedinica')),
],
options={
'verbose_name': 'organizaciona jedinica',
'verbose_name_plural': 'organizacione jedinice',
},
),
migrations.CreateModel(
name='Radnik',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('administracija', models.BooleanField(verbose_name='administracija')),
('izvestaji', models.BooleanField(verbose_name='izve\u0161taji')),
('avatar', models.FileField(blank=True, null=True, upload_to=orgsema.models.get_upload_path_avatar)),
('orgjed', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orgsema.OrgJed', verbose_name='organizaciona jedinica')),
],
options={
'verbose_name': 'radnik',
'verbose_name_plural': 'radnici',
},
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('naziv', models.CharField(max_length=100, verbose_name='naziv')),
('drzava', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orgsema.Drzava', verbose_name='dr\u017eava')),
],
options={
'verbose_name': 'region',
'verbose_name_plural': 'regioni',
},
),
migrations.CreateModel(
name='Uloga',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('naziv', models.CharField(max_length=100, verbose_name='naziv')),
],
options={
'verbose_name': 'uloga',
'verbose_name_plural': 'uloge',
},
),
migrations.CreateModel(
name='Valuta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sifra', models.CharField(max_length=3, verbose_name='\u0161ifra')),
('naziv', models.CharField(max_length=100, verbose_name='naziv')),
],
options={
'verbose_name': 'valuta',
'verbose_name_plural': 'valute',
},
),
migrations.AddField(
model_name='radnik',
name='uloga',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orgsema.Uloga', verbose_name='uloga'),
),
migrations.AddField(
model_name='radnik',
name='user',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='okrug',
name='region',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orgsema.Region', verbose_name='region'),
),
migrations.AddField(
model_name='naseljenomesto',
name='opstina',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orgsema.Opstina', verbose_name='op\u0161tina'),
),
migrations.AddField(
model_name='drzava',
name='valuta',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orgsema.Valuta', verbose_name='valuta'),
),
]
| mbranko/kartonpmv | orgsema/migrations/0001_initial.py | Python | mit | 7,463 |
#------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
""" Workaround for combobox focus problem in wx 2.6. """
# Major package imports
import wx
#-------------------------------------------------------------------------------
# Constants:
#-------------------------------------------------------------------------------
# Mapping from key code to key event handler names:
Handlers = {
wx.WXK_LEFT: '_left_key',
wx.WXK_RIGHT: '_right_key',
wx.WXK_UP: '_up_key',
wx.WXK_DOWN: '_down_key',
wx.WXK_ESCAPE: '_escape_key'
}
#-------------------------------------------------------------------------------
# 'ComboboxFocusHandler' class:
#-------------------------------------------------------------------------------
class ComboboxFocusHandler(wx.EvtHandler):
def __init__(self, grid):
wx.EvtHandler.__init__(self)
self._grid = grid
wx.EVT_KEY_DOWN(self, self._on_key)
def _on_key(self, evt):
""" Called when a key is pressed. """
getattr( self, Handlers.get( evt.GetKeyCode(), '_ignore_key' ))( evt )
#-- Key Event Handlers --------------------------------------------------------
def _ignore_key ( self, evt ):
evt.Skip()
def _escape_key ( self, evt ):
self._grid.DisableCellEditControl()
def _left_key ( self, evt ):
if not (evt.ControlDown() or evt.AltDown()):
evt.Skip()
return
grid, row, col, rows, cols = self._grid_info()
grid._no_reset_row = True
first = True
while first or (not self._edit_cell( row, col )):
col -= 1
if col < 0:
col = cols - 1
row -= 1
if row < 0:
if not first:
break
row = rows - 1
first = False
def _right_key ( self, evt ):
if not (evt.ControlDown() or evt.AltDown()):
evt.Skip()
return
grid, row, col, rows, cols = self._grid_info()
grid._no_reset_row = True
first = True
while first or (not self._edit_cell( row, col )):
col += 1
if col >= cols:
col = 0
row += 1
if row >= rows:
if not first:
break
row = 0
first = False
def _up_key ( self, evt ):
if not (evt.ControlDown() or evt.AltDown()):
evt.Skip()
return
grid, row, col, rows, cols = self._grid_info()
grid._no_reset_col = True
row -= 1
if row < 0:
row = rows - 1
self._edit_cell( row, col )
def _down_key ( self, evt ):
if not (evt.ControlDown() or evt.AltDown()):
evt.Skip()
return
grid, row, col, rows, cols = self._grid_info()
grid._no_reset_col = True
row += 1
if row >= rows:
row = 0
self._edit_cell( row, col )
#-- Private Methods -----------------------------------------------------------
def _grid_info ( self ):
g = self._grid
return ( g, g.GetGridCursorRow(), g.GetGridCursorCol(),
g.GetNumberRows(), g.GetNumberCols() )
def _edit_cell ( self, row, col ):
grid = self._grid
grid.DisableCellEditControl()
grid.SetGridCursor( row, col )
if not grid.CanEnableCellControl():
return False
grid.EnableCellEditControl()
grid.MakeCellVisible( row, col )
return True
#### EOF ####################################################################
| pankajp/pyface | pyface/ui/wx/grid/combobox_focus_handler.py | Python | bsd-3-clause | 4,285 |
"""
=========================================================================
2 samples permutation test on source data with spatio-temporal clustering
=========================================================================
Tests if the source space data are significantly different between
2 groups of subjects (simulated here using one subject's data).
The multiple comparisons problem is addressed with a cluster-level
permutation test across space and time.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import os.path as op
import numpy as np
from scipy import stats as stats
import mne
from mne import spatial_src_connectivity
from mne.stats import spatio_temporal_cluster_test, summarize_clusters_stc
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
stc_fname = data_path + '/MEG/sample/sample_audvis-meg-lh.stc'
subjects_dir = data_path + '/subjects'
src_fname = subjects_dir + '/fsaverage/bem/fsaverage-ico-5-src.fif'
# Load stc to in common cortical space (fsaverage)
stc = mne.read_source_estimate(stc_fname)
stc.resample(50, npad='auto')
# Read the source space we are morphing to
src = mne.read_source_spaces(src_fname)
fsave_vertices = [s['vertno'] for s in src]
stc = mne.morph_data('sample', 'fsaverage', stc, grade=fsave_vertices,
smooth=20, subjects_dir=subjects_dir)
n_vertices_fsave, n_times = stc.data.shape
tstep = stc.tstep
n_subjects1, n_subjects2 = 7, 9
print('Simulating data for %d and %d subjects.' % (n_subjects1, n_subjects2))
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X1 = np.random.randn(n_vertices_fsave, n_times, n_subjects1) * 10
X2 = np.random.randn(n_vertices_fsave, n_times, n_subjects2) * 10
X1[:, :, :] += stc.data[:, :, np.newaxis]
# make the activity bigger for the second set of subjects
X2[:, :, :] += 3 * stc.data[:, :, np.newaxis]
# We want to compare the overall activity levels for each subject
X1 = np.abs(X1) # only magnitude
X2 = np.abs(X2) # only magnitude
###############################################################################
# Compute statistic
# -----------------
#
# To use an algorithm optimized for spatio-temporal clustering, we
# just pass the spatial connectivity matrix (instead of spatio-temporal)
print('Computing connectivity.')
connectivity = spatial_src_connectivity(src)
# Note that X needs to be a list of multi-dimensional array of shape
# samples (subjects_k) x time x space, so we permute dimensions
X1 = np.transpose(X1, [2, 1, 0])
X2 = np.transpose(X2, [2, 1, 0])
X = [X1, X2]
# Now let's actually do the clustering. This can take a long time...
# Here we set the threshold quite high to reduce computation.
p_threshold = 0.0001
f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2.,
n_subjects1 - 1, n_subjects2 - 1)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu =\
spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=1,
threshold=f_threshold)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
###############################################################################
# Visualize the clusters
# ----------------------
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
fsave_vertices = [np.arange(10242), np.arange(10242)]
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# blue blobs are for condition A != condition B
brain = stc_all_cluster_vis.plot('fsaverage', hemi='both', colormap='mne',
views='lateral', subjects_dir=subjects_dir,
time_label='Duration significant (ms)')
brain.save_image('clusters.png')
| teonlamont/mne-python | tutorials/plot_stats_cluster_spatio_temporal_2samp.py | Python | bsd-3-clause | 4,494 |
# -*- encoding: utf-8 -*-
#############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today Serpent Consulting Services Pvt. Ltd.
# (<http://www.serpentcs.com>)
# Copyright (C) 2004 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
#############################################################################
{
"name": "Restaurant Management - Reporting",
"version": "0.03",
"author": "Serpent Consulting Services Pvt. Ltd., OpenERP SA",
"website": "http://www.serpentcs.com, http://www.openerp.com",
"depends": ["hotel_restaurant", "report_hotel_reservation"],
"category": "Generic Modules/Hotel Restaurant",
"data": [
"security/ir.model.access.csv",
"views/report_hotel_restaurant_view.xml",
],
"description": """
Module shows the status of restaurant reservation
* Current status of reserved tables
* List status of tables as draft or done state
""",
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| MarcosCommunity/odoo | comunity_modules/report_hotel_restaurant/__openerp__.py | Python | agpl-3.0 | 1,798 |
from __future__ import unicode_literals
from .conf import get_settings
globals().update(
get_settings(
'general.yml-example',
election_app='kenya',
tests=True,
),
)
NOSE_ARGS += ['-a', 'country=kenya']
| mysociety/yournextrepresentative | mysite/settings/tests_kenya.py | Python | agpl-3.0 | 237 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for live migration VM operations.
"""
import os
import sys
from nova import exception
from nova import flags
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova.virt.hyperv import baseops
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
# Check needed for unit testing on Unix
if sys.platform == 'win32':
import wmi
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
class LiveMigrationOps(baseops.BaseOps):
def __init__(self, volumeops):
super(LiveMigrationOps, self).__init__()
self._vmutils = vmutils.VMUtils()
self._volumeops = volumeops
def _check_live_migration_config(self):
try:
self._conn_v2
except Exception:
raise vmutils.HyperVException(
_('Live migration is not supported " \
"by this version of Hyper-V'))
migration_svc = self._conn_v2.Msvm_VirtualSystemMigrationService()[0]
vsmssd = migration_svc.associators(
wmi_association_class='Msvm_ElementSettingData',
wmi_result_class='Msvm_VirtualSystemMigrationServiceSettingData')[0]
if not vsmssd.EnableVirtualSystemMigration:
raise vmutils.HyperVException(
_('Live migration is not enabled on this host'))
if not migration_svc.MigrationServiceListenerIPAddressList:
raise vmutils.HyperVException(
_('Live migration networks are not configured on this host'))
def live_migration(self, context, instance_ref, dest, post_method,
recover_method, block_migration=False):
LOG.debug(_("live_migration called"), instance=instance_ref)
instance_name = instance_ref["name"]
try:
self._check_live_migration_config()
vm_name = self._vmutils.lookup(self._conn, instance_name)
if vm_name is None:
raise exception.InstanceNotFound(instance=instance_name)
vm = self._conn_v2.Msvm_ComputerSystem(
ElementName=instance_name)[0]
vm_settings = vm.associators(
wmi_association_class='Msvm_SettingsDefineState',
wmi_result_class='Msvm_VirtualSystemSettingData')[0]
new_resource_setting_data = []
sasds = vm_settings.associators(
wmi_association_class='Msvm_VirtualSystemSettingDataComponent',
wmi_result_class='Msvm_StorageAllocationSettingData')
for sasd in sasds:
if sasd.ResourceType == 31 and \
sasd.ResourceSubType == \
"Microsoft:Hyper-V:Virtual Hard Disk":
#sasd.PoolId = ""
new_resource_setting_data.append(sasd.GetText_(1))
LOG.debug(_("Getting live migration networks for remote "
"host: %s"), dest)
_conn_v2_remote = wmi.WMI(
moniker='//' + dest + '/root/virtualization/v2')
migration_svc_remote = \
_conn_v2_remote.Msvm_VirtualSystemMigrationService()[0]
remote_ip_address_list = \
migration_svc_remote.MigrationServiceListenerIPAddressList
# VirtualSystemAndStorage
vsmsd = self._conn_v2.query("select * from "
"Msvm_VirtualSystemMigrationSettingData "
"where MigrationType = 32771")[0]
vsmsd.DestinationIPAddressList = remote_ip_address_list
migration_setting_data = vsmsd.GetText_(1)
migration_svc =\
self._conn_v2.Msvm_VirtualSystemMigrationService()[0]
LOG.debug(_("Starting live migration for instance: %s"),
instance_name)
(job_path, ret_val) = migration_svc.MigrateVirtualSystemToHost(
ComputerSystem=vm.path_(),
DestinationHost=dest,
MigrationSettingData=migration_setting_data,
NewResourceSettingData=new_resource_setting_data)
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._vmutils.check_job_status(job_path)
else:
success = (ret_val == 0)
if not success:
raise vmutils.HyperVException(
_('Failed to live migrate VM %s') % instance_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug(_("Calling live migration recover_method "
"for instance: %s"), instance_name)
recover_method(context, instance_ref, dest, block_migration)
LOG.debug(_("Calling live migration post_method for instance: %s"),
instance_name)
post_method(context, instance_ref, dest, block_migration)
def pre_live_migration(self, context, instance, block_device_info,
network_info):
LOG.debug(_("pre_live_migration called"), instance=instance)
self._check_live_migration_config()
if FLAGS.use_cow_images:
ebs_root = self._volumeops.volume_in_mapping(
self._volumeops.get_default_root_device(),
block_device_info)
if not ebs_root:
base_vhd_path = self._vmutils.get_base_vhd_path(
instance["image_ref"])
if not os.path.exists(base_vhd_path):
self._vmutils.fetch_image(base_vhd_path, context,
instance["image_ref"],
instance["user_id"],
instance["project_id"])
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info, block_migration):
LOG.debug(_("post_live_migration_at_destination called"),
instance=instance_ref)
def compare_cpu(self, cpu_info):
LOG.debug(_("compare_cpu called %s"), cpu_info)
return True
| tylertian/Openstack | openstack F/nova/nova/virt/hyperv/livemigrationops.py | Python | apache-2.0 | 6,665 |
import pandas as pd
import numpy as np
print(pd.__version__)
# 1.0.0
print(pd.DataFrame.agg is pd.DataFrame.aggregate)
# True
df = pd.DataFrame({'A': [0, 1, 2], 'B': [3, 4, 5]})
print(df)
# A B
# 0 0 3
# 1 1 4
# 2 2 5
print(df.agg(['sum', 'mean', 'min', 'max']))
# A B
# sum 3.0 12.0
# mean 1.0 4.0
# min 0.0 3.0
# max 2.0 5.0
print(type(df.agg(['sum', 'mean', 'min', 'max'])))
# <class 'pandas.core.frame.DataFrame'>
print(df.agg(['sum']))
# A B
# sum 3 12
print(type(df.agg(['sum'])))
# <class 'pandas.core.frame.DataFrame'>
print(df.agg('sum'))
# A 3
# B 12
# dtype: int64
print(type(df.agg('sum')))
# <class 'pandas.core.series.Series'>
print(df.agg({'A': ['sum', 'min', 'max'],
'B': ['mean', 'min', 'max']}))
# A B
# max 2.0 5.0
# mean NaN 4.0
# min 0.0 3.0
# sum 3.0 NaN
print(df.agg({'A': 'sum', 'B': 'mean'}))
# A 3.0
# B 4.0
# dtype: float64
print(df.agg({'A': ['sum'], 'B': ['mean']}))
# A B
# mean NaN 4.0
# sum 3.0 NaN
print(df.agg({'A': ['min', 'max'], 'B': 'mean'}))
# A B
# max 2.0 NaN
# mean NaN 4.0
# min 0.0 NaN
print(df.agg(['sum', 'mean', 'min', 'max'], axis=1))
# sum mean min max
# 0 3.0 1.5 0.0 3.0
# 1 5.0 2.5 1.0 4.0
# 2 7.0 3.5 2.0 5.0
s = df['A']
print(s)
# 0 0
# 1 1
# 2 2
# Name: A, dtype: int64
print(s.agg(['sum', 'mean', 'min', 'max']))
# sum 3.0
# mean 1.0
# min 0.0
# max 2.0
# Name: A, dtype: float64
print(type(s.agg(['sum', 'mean', 'min', 'max'])))
# <class 'pandas.core.series.Series'>
print(s.agg(['sum']))
# sum 3
# Name: A, dtype: int64
print(type(s.agg(['sum'])))
# <class 'pandas.core.series.Series'>
print(s.agg('sum'))
# 3
print(type(s.agg('sum')))
# <class 'numpy.int64'>
print(s.agg({'Total': 'sum', 'Average': 'mean', 'Min': 'min', 'Max': 'max'}))
# Total 3.0
# Average 1.0
# Min 0.0
# Max 2.0
# Name: A, dtype: float64
# print(s.agg({'NewLabel_1': ['sum', 'max'], 'NewLabel_2': ['mean', 'min']}))
# SpecificationError: nested renamer is not supported
print(df.agg(['mad', 'amax', 'dtype']))
# A B
# mad 0.666667 0.666667
# amax 2 5
# dtype int64 int64
print(df['A'].mad())
# 0.6666666666666666
print(np.amax(df['A']))
# 2
print(df['A'].dtype)
# int64
# print(df.agg(['xxx']))
# AttributeError: 'xxx' is not a valid function for 'Series' object
# print(df.agg('xxx'))
# AttributeError: 'xxx' is not a valid function for 'DataFrame' object
print(hasattr(pd.DataFrame, '__array__'))
# True
print(hasattr(pd.core.groupby.GroupBy, '__array__'))
# False
print(df.agg([np.sum, max]))
# A B
# sum 3 12
# max 2 5
print(np.sum(df['A']))
# 3
print(max(df['A']))
# 2
print(np.abs(df['A']))
# 0 0
# 1 1
# 2 2
# Name: A, dtype: int64
print(df.agg([np.abs]))
# A B
# absolute absolute
# 0 0 3
# 1 1 4
# 2 2 5
# print(df.agg([np.abs, max]))
# ValueError: cannot combine transform and aggregation operations
def my_func(x):
return min(x) / max(x)
print(df.agg([my_func, lambda x: min(x) / max(x)]))
# A B
# my_func 0.0 0.6
# <lambda> 0.0 0.6
print(df['A'].std())
# 1.0
print(df['A'].std(ddof=0))
# 0.816496580927726
print(df.agg(['std', lambda x: x.std(ddof=0)]))
# A B
# std 1.000000 1.000000
# <lambda> 0.816497 0.816497
print(df.agg('std', ddof=0))
# A 0.816497
# B 0.816497
# dtype: float64
print(df.agg(['std'], ddof=0))
# A B
# std 1.0 1.0
df_str = df.assign(C=['X', 'Y', 'Z'])
print(df_str)
# A B C
# 0 0 3 X
# 1 1 4 Y
# 2 2 5 Z
# df_str['C'].mean()
# TypeError: Could not convert XYZ to numeric
print(df_str.agg(['sum', 'mean']))
# A B C
# sum 3.0 12.0 XYZ
# mean 1.0 4.0 NaN
print(df_str.agg(['mean', 'std']))
# A B
# mean 1.0 4.0
# std 1.0 1.0
print(df_str.agg(['sum', 'min', 'max']))
# A B C
# sum 3 12 XYZ
# min 0 3 X
# max 2 5 Z
print(df_str.select_dtypes(include='number').agg(['sum', 'mean']))
# A B
# sum 3.0 12.0
# mean 1.0 4.0
| nkmk/python-snippets | notebook/pandas_agg.py | Python | mit | 4,230 |
import astropy.io.fits
import matplotlib.pyplot as plt
import numpy as np
import os
REPO_DIR = '/Users/Jake/Research/code/m31flux'
def log10(val):
return np.where(val > 0, val, np.nan)
def plot_map(data, outfile, label, limits, stretch, cmap):
fig_dx = 6.0
ax2_dy = 0.15
data = data[::-1].T # rotate to landscape
ny, nx = data.shape
plt.close()
h2 = float(ny)/nx * fig_dx
s1, s2 = 0.5, 0.1
h1 = s1 + ax2_dy + s2
fig_dy = h1 + h2
fig = plt.figure(figsize=(fig_dx, fig_dy))
ax1 = fig.add_axes((0, h1/fig_dy, 1, h2/fig_dy))
# Stretch
data = np.clip(data, *limits)
data = stretch(data)
# Plot
img = ax1.imshow(data, origin='lower', interpolation='nearest', cmap=cmap)
ax1.xaxis.set_visible(False)
ax1.yaxis.set_visible(False)
for spine in ax1.spines.values():
spine.set_visible(False)
ax2 = fig.add_axes((0.02, s1/fig_dy, 0.96, ax2_dy/fig_dy))
cb = fig.colorbar(img, cax=ax2, orientation='horizontal')
ax2.tick_params(labelsize=10)
ax2.set_xlabel(label, size=10)
# Write
fig.savefig(outfile)
plt.close()
def plot_scatter(x, y, xlabel, ylabel, outfile):
fig_dx = 4.0
fig = plt.figure(figsize=(fig_dx, fig_dx))
ax = fig.add_axes((0.15, 0.15, 0.80, 0.80))
ax.set_rasterization_zorder(0)
plt.axhline(0, color='0.6', ls='--')
plt.plot(x, y, 'k.', mec='none', ms=3, alpha=0.2, zorder=-1)
ax.set_xlabel(xlabel, size=10)
ax.set_ylabel(ylabel, size=10)
ax.tick_params(labelsize=10)
# Write
fig.savefig(outfile, dpi=200)
plt.close()
def main():
limits = (1e-16, 7e-15)
cmap = plt.cm.gist_heat_r
weights_file = os.path.join(REPO_DIR, 'maps', 'weights.fits')
weights = astropy.io.fits.getdata(weights_file)
filename = os.path.join(REPO_DIR, 'maps', 'galex_fuv.fits')
galex_fuv = astropy.io.fits.getdata(filename) * weights
plotname = os.path.join(REPO_DIR, 'figs', 'galex_fuv.pdf')
label = (r'$f_\mathrm{FUV,obs} \, '
r'\mathrm{(erg \,s^{-1} \,cm^{-2} \,\AA^{-1})}$')
plot_map(galex_fuv, plotname, label, limits, log10, cmap)
filename = os.path.join(REPO_DIR, 'maps', 'mod_fuv_red.fits')
mod_fuv_red = astropy.io.fits.getdata(filename)
plotname = os.path.join(REPO_DIR, 'figs', 'mod_fuv_red.pdf')
label = (r'$f_\mathrm{FUV,mod} \, '
r'\mathrm{(erg \,s^{-1} \,cm^{-2} \,\AA^{-1})}$')
plot_map(mod_fuv_red, plotname, label, limits, log10, cmap)
#filename = os.path.join(REPO_DIR, 'maps', 'mod_fuv_int.fits')
#mod_fuv_int = astropy.io.fits.getdata(filename)
#plotname = os.path.join(REPO_DIR, 'figs', 'mod_fuv_int.pdf')
#label = (r'intrinsic $f_\mathrm{FUV,mod} \, '
# r'\mathrm{(erg \,s^{-1} \,cm^{-2} \,\AA^{-1})}$')
#plot_map(mod_fuv_int, plotname, label, limits, log10, cmap)
fuv_ratio = mod_fuv_red / galex_fuv
plotname = os.path.join(REPO_DIR, 'figs', 'fuv_ratio.pdf')
label = r'$\log_{10}(f_\mathrm{FUV,mod} / f_\mathrm{FUV,obs})$'
ratio_limits = (0.1, 10)
ratio_cmap = plt.cm.RdBu_r
plot_map(fuv_ratio, plotname, label, ratio_limits, np.log10, ratio_cmap)
xlabel = (r'$\log_{10}(f_\mathrm{FUV,obs} / '
r'\mathrm{(erg \,s^{-1} \,cm^{-2} \,\AA^{-1})})$')
ylabel = r'$\log_{10}(f_\mathrm{FUV,mod} / f_\mathrm{FUV,obs})$'
plotname = os.path.join(REPO_DIR, 'figs', 'fuv_ratio_plot.pdf')
plot_scatter(np.log10(galex_fuv), np.log10(fuv_ratio),
xlabel, ylabel, plotname)
filename = os.path.join(REPO_DIR, 'maps', 'galex_nuv.fits')
galex_nuv = astropy.io.fits.getdata(filename) * weights
plotname = os.path.join(REPO_DIR, 'figs', 'galex_nuv.pdf')
label = (r'$f_\mathrm{NUV,obs} \, '
r'\mathrm{(erg \,s^{-1} \,cm^{-2} \,\AA^{-1})}$')
plot_map(galex_nuv, plotname, label, limits, log10, cmap)
filename = os.path.join(REPO_DIR, 'maps', 'mod_nuv_red.fits')
mod_nuv_red = astropy.io.fits.getdata(filename)
plotname = os.path.join(REPO_DIR, 'figs', 'mod_nuv_red.pdf')
label = (r'$f_\mathrm{NUV,mod} \, '
r'\mathrm{(erg \,s^{-1} \,cm^{-2} \,\AA^{-1})}$')
plot_map(mod_nuv_red, plotname, label, limits, log10, cmap)
#filename = os.path.join(REPO_DIR, 'maps', 'mod_nuv_int.fits')
#mod_nuv_int = astropy.io.fits.getdata(filename)
#plotname = os.path.join(REPO_DIR, 'figs', 'mod_nuv_int.pdf')
#label = (r'intrinsic $f_\mathrm{NUV,mod} \, '
# r'\mathrm{(erg \,s^{-1} \,cm^{-2} \,\AA^{-1})}$')
#plot_map(mod_nuv_int, plotname, label, limits, log10, cmap)
nuv_ratio = mod_nuv_red / galex_nuv
plotname = os.path.join(REPO_DIR, 'figs', 'nuv_ratio.pdf')
label = r'$\log_{10}(f_\mathrm{NUV,mod} / f_\mathrm{NUV,obs})$'
ratio_limits = (0.1, 10)
ratio_cmap = plt.cm.RdBu_r
plot_map(nuv_ratio, plotname, label, ratio_limits, np.log10, ratio_cmap)
xlabel = (r'$\log_{10}(f_\mathrm{NUV,obs} / '
r'\mathrm{(erg \,s^{-1} \,cm^{-2} \,\AA^{-1})})$')
ylabel = r'$\log_{10}(f_\mathrm{NUV,mod} / f_\mathrm{NUV,obs})$'
plotname = os.path.join(REPO_DIR, 'figs', 'nuv_ratio_plot.pdf')
plot_scatter(np.log10(galex_nuv), np.log10(nuv_ratio),
xlabel, ylabel, plotname)
if __name__ == '__main__':
main()
| jesaerys/m31flux | scripts/figs.py | Python | mit | 5,305 |
"""
Test for using a configuration file
"""
import os
import unittest
import tempfile
import logging
import scitokens
import scitokens.utils.config
from six.moves import configparser
class TestConfig(unittest.TestCase):
"""
Test the configuration parsing
"""
def setUp(self):
self.dir_path = os.path.dirname(os.path.realpath(__file__))
scitokens.utils.config.configuration = configparser.ConfigParser(scitokens.utils.config.CONFIG_DEFAULTS)
def tearDown(self):
# Clear the config back to defaults each time
scitokens.set_config()
def test_config_file(self):
"""
Test the configuration with a regular config file
"""
# Get the current directory and pass it the path of test_config.ini
scitokens.set_config(os.path.join(self.dir_path, "test_config.ini"))
self.assertEqual(scitokens.utils.config.get("log_file"), "")
self.assertEqual(scitokens.utils.config.get("log_level"), "DEBUG")
def test_passing_config(self):
"""
Test the passing of a configuration parser object
"""
new_config = configparser.ConfigParser()
new_config.add_section("scitokens")
new_config.set("scitokens", "log_level", "WARNING")
scitokens.set_config(new_config)
self.assertEqual(scitokens.utils.config.get("log_level"), "WARNING")
def test_passing_config_log(self):
"""
Test the with log_file
"""
new_config = configparser.ConfigParser()
new_config.add_section("scitokens")
new_config.set("scitokens", "log_level", "WARNING")
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, "tmp.log")
new_config.set("scitokens", "log_file", tmp_file)
scitokens.set_config(new_config)
self.assertEqual(scitokens.utils.config.get("log_level"), "WARNING")
self.assertEqual(scitokens.utils.config.get("log_file"), tmp_file)
# Log a line
logger = logging.getLogger("scitokens")
logger.error("This is an error")
self.assertTrue(os.path.getsize(tmp_file) > 0)
# close the log files so that TemporaryDirectory can delete itself
for handler in logger.handlers:
handler.close()
def test_no_config(self):
"""
Test when there is no config
"""
# This should throw an exception if there is an error
self.assertEqual(scitokens.utils.config.get("cache_location"), "")
| scitokens/scitokens | tests/test_config.py | Python | apache-2.0 | 2,589 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-25 00:10
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import filer.fields.file
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0001_initial'),
('filer', '0007_auto_20161016_1055'),
('vouchers', '0001_initial'),
('financial', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='revenueitem',
name='purchasedVoucher',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='vouchers.Voucher', verbose_name='Purchased voucher/gift certificate'),
),
migrations.AddField(
model_name='revenueitem',
name='registration',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Registration'),
),
migrations.AddField(
model_name='revenueitem',
name='submissionUser',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='revenuessubmittedby', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='expenseitem',
name='attachment',
field=filer.fields.file.FilerFileField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='expense_attachment', to='filer.File', verbose_name='Attach File (optional)'),
),
migrations.AddField(
model_name='expenseitem',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='financial.ExpenseCategory'),
),
migrations.AddField(
model_name='expenseitem',
name='event',
field=models.ForeignKey(blank=True, help_text='If this item is associated with an Event, enter it here.', null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Event'),
),
migrations.AddField(
model_name='expenseitem',
name='eventstaffmember',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.EventStaffMember'),
),
migrations.AddField(
model_name='expenseitem',
name='eventvenue',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='venueexpense', to='core.Event'),
),
migrations.AddField(
model_name='expenseitem',
name='payToLocation',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Location'),
),
migrations.AddField(
model_name='expenseitem',
name='payToUser',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='payToUser', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='expenseitem',
name='submissionUser',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='expensessubmittedby', to=settings.AUTH_USER_MODEL),
),
]
| django-danceschool/django-danceschool | danceschool/financial/migrations/0002_auto_20170425_0010.py | Python | bsd-3-clause | 3,541 |
# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for sources/source.py."""
__author__ = 'jaq@google.com (Jamie Wilkinson)'
import unittest
from nss_cache.sources import source
from nss_cache.sources import source_factory
class TestSourceFactory(unittest.TestCase):
"""Unit tests for the source factory."""
def testRegister(self):
number_of_sources = len(source_factory._source_implementations)
class DummySource(source.Source):
name = 'dummy'
source_factory.RegisterImplementation(DummySource)
self.failUnlessEqual(number_of_sources + 1,
len(source_factory._source_implementations))
self.failUnlessEqual(DummySource,
source_factory._source_implementations['dummy'])
def testRegisterWithoutName(self):
class DummySource(source.Source):
pass
self.assertRaises(RuntimeError,
source_factory.RegisterImplementation, DummySource)
def testCreateWithNoImplementations(self):
source_factory._source_implementations = {}
self.assertRaises(RuntimeError, source_factory.Create, {})
def testCreate(self):
class DummySource(source.Source):
name = 'dummy'
source_factory.RegisterImplementation(DummySource)
dummy_config = {'name': 'dummy'}
dummy_source = source_factory.Create(dummy_config)
self.assertEqual(DummySource, type(dummy_source))
if __name__ == '__main__':
unittest.main()
| UPPMAX/nsscache | nss_cache/sources/source_factory_test.py | Python | gpl-2.0 | 2,150 |
import os
import sys
from erosionbase import ErosionBase
from grass.script.core import run_command, parse_command
class ErosionUSLE(ErosionBase):
def __init__(self, data, factors, epsg='5514', location_path=None,
computeStat=None, computeError=None):
"""USLE constructor.
Two modes are available
- creating temporal location, input data are imported
- use existing location, in this case specified location must
contain maps defined by self.maps directory
:param epgs: EPSG code for creating new temporal location
:param location_path: path to existing location
"""
ErosionBase.__init__(self, epsg, location_path)
self._computeStat = computeStat
self._computeError = computeError
# overwrite existing maps/files by default
os.environ['GRASS_OVERWRITE']='1'
self.euc_name = os.path.splitext(os.path.basename(data[0]))[0]
self.dmt_name = os.path.splitext(os.path.basename(data[1]))[0]
self.bpej_name = os.path.splitext(os.path.basename(data[2]))[0]
self.lpis_name = os.path.splitext(os.path.basename(data[3]))[0]
self.r_factor = factors[0]
self.p_factor = factors[1]
# internal input map names
self._input = { 'euc' : self.euc_name,
'dmt' : self.dmt_name,
'bpej' : self.bpej_name,
'lpis' : self.lpis_name
}
# output names
self._output = { 'erosion' : 'usle_g',
}
def computeStat(self, perc, label):
if self._computeStat is not None:
self._computeStat.emit(perc, label)
sys.stderr.write('[pyerosion] {}: {}\n'.format(perc, label))
def computeError(self, label):
if self._computeError is not None:
self._computeError.emit(label)
sys.stderr.write('[pyerosion ERROR]: {}\n'.format(label))
def run(self, terraflow=False):
"""
Erosion computing
:param terraflow: True : computing direction by method terraflow
False : computing direction by method wattershed
"""
# set computation region based on input DMT
try:
self.computeStat(10, u'Setting up computation region...')
reg = parse_command('g.region',
raster=self._input['dmt'],
flags='g'
)
except:
self.computeError(u'Error in setting up computation region.')
return
# computing slope on input DMT
try:
self.computeStat(15, u'Computing slope...')
slope = self._temp_map('raster')
run_command('r.slope.aspect',
elevation=self._input['dmt'],
slope=slope
)
except:
self.computeError(u'Error in computing slope.')
return
# setting up mask
try:
self.computeStat(20, u'Setting up mask...')
run_command('r.mask',
vector=self._input['euc']
)
except:
self.computeError(u'Error in setting up mask.')
return
# computing accumulation
try:
# TODO: discuss accumulation computation (which module, use
# filled DMT?)
self.computeStat(25, u'Computing accumulation...')
accu = self._temp_map('raster')
if terraflow:
dmt_fill = self._temp_map('raster')
direction = self._temp_map('raster')
swatershed = self._temp_map('raster')
tci = self._temp_map('raster')
run_command('r.terraflow',
elevation=self._input['dmt'],
filled=dmt_fill,
direction=direction,
swatershed=swatershed,
accumulation=accu,
tci=tci
)
else:
run_command('r.watershed',
flags='a',
elevation=self._input['dmt'],
accumulation=accu
)
except:
self.computeError(u'Error in computing accumulation.')
return
# computing LS Factor
try:
self.computeStat(40, u'Computing LS factor...')
formula='ls = 1.6 * pow(' + accu + '* (' + reg['nsres'] +' / 22.13), 0.6) * pow(sin(' + \
slope + '* (3.1415926/180)) / 0.09, 1.3)'
run_command('r.mapcalc',
expr=formula
)
except:
self.computeError(u'Error in computing LS factor.')
return
# computing KC Factor
try:
self.computeStat(60, u'Computing KC factor...')
# overlay layers: bpej and lpis
bpej_lpis = self._temp_map('vector')
run_command('v.overlay',
ainput=self._input['bpej'],
binput=self._input['lpis'],
operator='or',
output=bpej_lpis
)
# add column KC
run_command('v.db.addcolumn',
map=bpej_lpis,
columns='KC double'
)
# compute KC value
run_command('v.db.update',
map=bpej_lpis,
column='KC',
query_column='a_K * b_C')
except:
self.computeError(u'Error in computing KC factor.')
return
# compute final G Factor (Erosion factor)
try:
self.computeStat(75, u'Computing Erosion factor...')
bpej_lpis_raster=self._temp_map('raster')
run_command('v.to.rast',
input=bpej_lpis,
output=bpej_lpis_raster,
use='attr',
attribute_column='KC',
where='KC IS NOT NULL'
)
usle=self._output['erosion'] + '=' + self.r_factor + '* ls *' + bpej_lpis_raster + '*' + self.p_factor
run_command('r.mapcalc',
expr=usle
)
run_command('r.colors',
flags='ne',
map=self._output['erosion'],
color='corine'
)
except:
self.computeError(u'Error in computing Erosion factor.')
return
def test(self):
"""
Run test.
- prints output erosion map metadata
"""
run_command('g.gisenv')
run_command('r.univar', map=self._output['erosion'])
| ctu-geoforall-lab/qgis-soil-erosion-plugin | pyerosion/erosionusle.py | Python | gpl-3.0 | 6,920 |
"""Methods to handle times"""
import time
def now():
"""Current time
This method exists only to save other modules an extra import
"""
return time.time()
def time_since(number_of_seconds):
"""Convert number of seconds to English
Retain only the two most significant numbers
>>> expected = '13 hours, 2 minutes'
>>> actual = time_since(time.time() - (13*60*60 + 2*60 + 5))
>>> assert actual == expected
"""
interval = int(abs(float(number_of_seconds)) - time.time())
interval = int(time.time() - float(number_of_seconds))
minutes, seconds = divmod(interval, 60)
if not minutes:
return "%s seconds" % seconds
hours, minutes = divmod(minutes, 60)
if not hours:
return "%s minutes, %s seconds" % (minutes, seconds)
days, hours = divmod(hours, 24)
if not days:
return "%s hours, %s minutes" % (hours, minutes)
years, days = divmod(days, 365)
if not years:
return "%s days, %s hours" % (days, hours)
return "%s years, %s days" % (years, days)
| jalanb/kd | cde/timings.py | Python | mit | 1,061 |
"""Exceptions for the monitoring app."""
class MonitoringRegistryException(Exception):
pass
| bitmazk/django-monitoring | monitoring/exceptions.py | Python | mit | 98 |
from sqlagg.columns import SimpleColumn
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumnGroup
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.sqlreport import DatabaseColumn
from corehq.apps.reports.standard import DatespanMixin, CustomProjectReport
from corehq.apps.reports.util import format_datatables_data
from custom.up_nrhm.reports import LangMixin
from custom.up_nrhm.filters import HierarchySqlData
from custom.up_nrhm.reports.block_level_af_report import BlockLevelAFReport
from custom.up_nrhm.sql_data import ASHAFacilitatorsData
from django.utils.translation import ugettext as _, ugettext_noop
class DistrictFunctionalityReport(GenericTabularReport, DatespanMixin, CustomProjectReport, LangMixin):
name = ugettext_noop("Format-5 Functionality of ASHAs in blocks")
slug = "district_functionality_report"
no_value = '--'
def get_blocks_for_district(self):
blocks = []
for location in HierarchySqlData(config={'domain': self.domain}).get_data():
if location['district'] == self.report_config['district']:
blocks.append(location['block'])
return set(blocks)
@property
def headers(self):
blocks = self.get_blocks_for_district()
headers = [DataTablesColumnGroup('')]
headers.extend([DataTablesColumnGroup(block) for block in self.get_blocks_for_district()])
columns = [DatabaseColumn(_("Percentage of ASHAs functional on "
"(Number of functional ASHAs/total number of ASHAs) x 100"), SimpleColumn(''),
header_group=headers[0])]
for i, block in enumerate(blocks):
columns.append(DatabaseColumn(_('%s of ASHAs') % '%',
SimpleColumn(block), header_group=headers[i + 1]))
columns.append(DatabaseColumn(_('Grade of Block'), SimpleColumn(block), header_group=headers[i + 1]))
return DataTablesHeader(*headers)
@property
def report_config(self):
return {
'domain': self.domain,
'year': self.request.GET.get('year'),
'month': self.request.GET.get('month'),
'district': self.request.GET.get('hierarchy_district'),
}
@property
def model(self):
return ASHAFacilitatorsData(config=self.report_config)
@property
def rows(self):
def percent(v1, v2):
try:
return float(v1) * 100.0 / float(v2)
except ZeroDivisionError:
return 0
def get_grade(v):
return 'D' if v < 25 else 'C' if v < 50 else 'B' if v < 75 else 'A'
rows = [[column.header] for column in self.model.columns[2:]]
for block in self.get_blocks_for_district():
self.request_params['hierarchy_block'] = block
q = self.request.GET.copy()
q['hierarchy_block'] = block
self.request.GET = q
rs, block_total = BlockLevelAFReport(self.request, domain=self.domain).rows
for index, row in enumerate(rs[0:-2]):
value = percent(row[-1]['sort_key'], block_total)
grade = get_grade(value)
if index < 10:
rows[index].append(format_datatables_data('%.1f%%' % value, '%.1f%%' % value))
rows[index].append(format_datatables_data(grade, grade))
else:
rows[index].append(row[-1])
val = row[-1]['sort_key']
grade = get_grade(val)
rows[index].append(format_datatables_data(grade, grade))
return rows, 0
| qedsoftware/commcare-hq | custom/up_nrhm/reports/district_functionality_report.py | Python | bsd-3-clause | 3,729 |
from django.db import models
from django.contrib.auth.models import User
class Language(models.Model):
name = models.CharField('Name', max_length=50, unique=True,
null=False, blank=False)
def __unicode__(self):
return self.name
class Tag(models.Model):
name = models.CharField('Name', max_length=15, unique=True,
null=False, blank=False)
def __unicode__(self):
return self.name
class Snippet(models.Model):
name = models.CharField('Name', max_length=100, unique=True,
null=False, blank=False)
created_by = models.ForeignKey(User)
language = models.ForeignKey(Language)
public = models.BooleanField('Public', default=False)
code = models.TextField('Code', blank=False, null=False)
created_at = models.DateTimeField('Created at', auto_now_add=True)
modified_at = models.DateTimeField('Last edit at', auto_now=True)
tags = models.ManyToManyField(Tag)
def __unicode__(self):
return self.name + " - " + self.created_by.email
| swones/swa | swa/web/models.py | Python | mit | 1,089 |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 27 07:38:35 2016
@author: msmanski
This script is used to create model surfaces on a 10000 x 10000 grid that
reflect medium NK ruggedness.
"""
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
X = np.zeros([1000,1000])
Y = np.zeros([1000,1000])
Z = np.zeros([1000,1000])
Za = np.zeros([1000,1000])
Zb = np.zeros([1000,1000])
Zc = np.zeros([1000,1000])
Zd = np.zeros([1000,1000])
Y[0,:] = -500
X[:,0] = -500
i = 1
while i < 1000:
Y[i,:] = Y[i-1,0]+1
X[:,i] = X[0,i-1]+1
i = i+1
#Create a matrix Z, which constitutes the Z value for each of the x,y coordinates in X,Y; make this 3d gausian fuctions
Aa=50
xaCenter=100
xaWidth=400
yaCenter=200
yaWidth=200
x=0
while x < 1000:
y = 0
while y < 1000:
Za[x,y] = np.power(Aa,1-1*(np.divide(np.power(X[0,x]-xaCenter,2),2*np.power(xaWidth,2))+np.divide(np.power(Y[y,0]-yaCenter,2),2*np.power(yaWidth,2))))
y=y+1
x=x+1
Ac=35
xcCenter=-200
xcWidth=150
ycCenter=-150
ycWidth=200
x=0
while x < 1000:
y = 0
while y < 1000:
Zb[x,y] = np.power(Ac,1-1*(np.divide(np.power(X[0,x]-xcCenter,2),2*np.power(xcWidth,2))+np.divide(np.power(Y[y,0]-ycCenter,2),2*np.power(ycWidth,2))))
y=y+1
x=x+1
Ad=30
xdCenter=300
xdWidth=600
ydCenter=-100
ydWidth=200
x=0
while x < 1000:
y = 0
while y < 1000:
Zc[x,y] = np.power(Ad,1-1*(np.divide(np.power(X[0,x]-xdCenter,2),2*np.power(xdWidth,2))+np.divide(np.power(Y[y,0]-ydCenter,2),2*np.power(ydWidth,2))))
y=y+1
x=x+1
Z = Za + Zb + Zc
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(X, Y, Z, rstride=50, cstride=50)
plt.show()
np.savetxt('Z-values_medium.csv', Z, delimiter=',') | smanskiLab/pathway_optimization | Surface_Medium.py | Python | mit | 1,862 |
class A:
def test(self):
print "I##|nitializing A", "test"##|
attribute = "hello"
def my_method(self):
print self.attribute
a = A()
a.test()
##r Should expand to Full String "Initializing A"
# Invalid selection:
# nitializing A", "test" | aptana/Pydev | tests/org.python.pydev.refactoring.tests/src/python/visitor/selectionextension/testSelectionExtensionExprFail.py | Python | epl-1.0 | 286 |
import re
from cybox.objects.address_object import Address
from cybox.objects.uri_object import URI
from .text import StixTextTransform
class StixBroIntelTransform(StixTextTransform):
"""Generate observable details for the Bro Intelligence Framework.
This class can be used to generate a list of indicators (observables)
from a STIX package in a format suitable for importing into the Bro
network-based intrusion detection system using its Intelligence
Framework (see https://www.bro.org/sphinx-git/frameworks/intel.html).
Args:
package: the STIX package to process
separator: a string separator used in the text output
include_header: a boolean value that indicates whether or not header
information should be included in the text output
header_prefix: a string prepended to header lines in the output
source: a value to include in the output metadata field 'meta.source'
url: a value to include in the output field metadata 'meta.url'
do_notice: a value to include in the output metadata field
'meta.do_notice', if set to 'T' a Bro notice will be raised by Bro
on a match of this indicator
"""
OBJECT_FIELDS = {
'Address': ['address_value'],
'DomainName': ['value'],
'EmailMessage': [
'header.from_.address_value',
'header.to.address_value',
],
'File': ['hashes.simple_hash_value'],
'HTTPSession': ['http_request_response.http_client_request.' +
'http_request_header.parsed_header.user_agent'],
'SocketAddress': ['ip_address.address_value'],
'URI': ['value'],
}
OBJECT_CONSTRAINTS = {
'Address': {
'category': [Address.CAT_IPV4, Address.CAT_IPV6],
},
'URI': {
'type_': [URI.TYPE_URL],
},
}
STRING_CONDITION_CONSTRAINT = ['None', 'Equals']
HEADER_LABELS = [
'indicator', 'indicator_type', 'meta.source', 'meta.url',
'meta.do_notice', 'meta.if_in', 'meta.whitelist',
]
# Map Cybox object type to Bro Intel types.
BIF_TYPE_MAPPING = {
'Address': 'Intel::ADDR',
'DomainName': 'Intel::DOMAIN',
'EmailMessage': 'Intel::EMAIL',
'File': 'Intel::FILE_HASH',
'HTTPSession': 'Intel::SOFTWARE',
'SocketAddress': 'Intel::ADDR',
'URI': 'Intel::URL',
}
# Map observable id prefix to source and url.
BIF_SOURCE_MAPPING = {
'cert_au': {
'source': 'CERT-AU',
'url': 'https://www.cert.gov.au/',
},
'CCIRC-CCRIC': {
'source': 'CCIRC',
'url': ('https://www.publicsafety.gc.ca/' +
'cnt/ntnl-scrt/cbr-scrt/ccirc-ccric-eng.aspx'),
},
'NCCIC': {
'source': 'NCCIC',
'url': 'https://www.us-cert.gov/',
},
}
def __init__(self, package, separator='\t',
include_header=False, header_prefix='#',
source='UNKNOWN', url='', do_notice='T'):
super(StixBroIntelTransform, self).__init__(
package, separator, include_header, header_prefix,
)
self._source = source
self._url = url
self._do_notice = do_notice
# Make URIs suitable for the Bro format (remove protocol)
self._fix_uris()
def _fix_uris(self):
if 'URI' in self._observables:
for observable in self._observables['URI']:
if 'fields' in observable:
for field in observable['fields']:
if 'value' in field:
field['value'] = re.sub(
pattern=r'^(https?|ftp)://',
repl='',
string=field['value'],
)
def text_for_object_type(self, object_type):
text = ''
if object_type in self._observables:
for observable in self._observables[object_type]:
# Look up source and url from observable ID
id_prefix = observable['id'].split(':')[0]
if id_prefix in self.BIF_SOURCE_MAPPING:
source = self.BIF_SOURCE_MAPPING[id_prefix]['source']
url = self.BIF_SOURCE_MAPPING[id_prefix]['url']
else:
source = self._source
url = self._url
bif_type = self.BIF_TYPE_MAPPING[object_type]
for fields in observable['fields']:
for field in self.OBJECT_FIELDS[object_type]:
if field in fields:
field_values = [
fields[field],
bif_type,
source,
url,
self._do_notice,
'-',
'-',
]
text += self.join(field_values) + '\n'
return text
| thisismyrobot/cti-toolkit | certau/transform/brointel.py | Python | bsd-3-clause | 5,192 |
#!/usr/bin/env python
import os, csv, sys, math, subprocess, psycopg2, viewshed
###
# Get the letters for an OS grid reference,
# Derived from http://www.movable-type.co.uk/scripts/latlong-gridref.html"
###
def getLetters(x, y):
"Get the letters for an OS grid reference."
# TODO: Validate coordinates!!
# get 100k grid square
x100k = math.floor(x / 100000)
y100k = math.floor(y / 100000)
# get letter indices (relate to char codes)
l1 = int((19 - y100k) - (19 - y100k) % 5 + math.floor((x100k + 10) / 5))
l2 = int((19 - y100k) * 5 % 25 + x100k % 5)
# miss out the letter 'I' (not used on OS grid)
if l1 > 7:
l1 += 1
if l2 > 7:
l2 += 1
# get letters based upon the char codes
return chr(l1 + ord('a')) + chr(l2 + ord('a'))
###
# Get the tile numbers from an OS grid reference,
# Derived from http://www.movable-type.co.uk/scripts/latlong-gridref.html"
###
def getNumbers(x, y):
"Get the tile numbers from an OS grid reference."
# TODO: Validate coordinates!!
# get the second number form each coordinate
x = int(math.floor((x % 100000)) / 10000)
y = int(math.floor((y % 100000)) / 10000)
return str(x) + str(y)
###
# Get the tile path required for a given an OS grid reference,
###
def getPath(x, y, dd):
"Get the tile path required for a given an OS grid reference."
# get the components
letters = getLetters(x, y)
numbers = getNumbers(x, y)
# build path for data tile
return '/'.join([dd, letters, letters.upper() + numbers + ".asc"])
###
# Construct a blank data tile if it is missing
###
def makeBlankTile(x, y, resolution, t, dd):
"Construct a blank ascii grid file"
# get path to write to
path = dd + "/_blanks/"
# get the components
letters = getLetters(x, y)
numbers = getNumbers(x, y)
# make sure the directory exists...
if not os.path.exists(path):
os.makedirs(path)
# get file name to write to
filePath = path + letters + numbers + ".asc"
# get coordinate for bottom left corner
tx = math.floor(x / t) * t
ty = math.floor(y / t) * t
# get size in cells
ncols = t / resolution
# open a file in write mode
fo = open(filePath, "w")
# write headers
fo.write("ncols " + str(ncols) + "\n")
fo.write("nrows " + str(ncols) + "\n")
fo.write("xllcorner " + str(tx) + "\n")
fo.write("yllcorner " + str(ty) + "\n")
fo.write("cellsize " + str(resolution) + "\n")
fo.write("NODATA_value -9999\n")
# write data
for index in range(ncols):
# initialise a blank string
dataLine = ""
# make a large enough string of 0's
for index in range(ncols):
dataLine += "0.0 "
# write the string to the file
fo.write(dataLine + "\n")
# Close file
fo.close()
#return new path
return filePath
###
# Build a textfile list of paths required for the VRT, for use with gdalbuildvrt
# This version uses a single point and a radius to define the required area
###
def buildVRT1(x, y, r, t, wd, dd):
"Build a textfile list of paths required for the VRT, for use with gdalbuildvrt. This version uses a single point and a radius to define the required area"
# get top left coordinates required
tlx = x - r
tly = y + r
# get bottom right coordinates required
brx = x + r
bry = y - r
#print tlx, tly, brx, bry
# prepare the file
f = open("/".join([wd,'in.txt']), 'w')
# get all tiles in question and write to file (1 tile buffer around all sides for edge cases)
for xi in xrange(tlx - t, brx + t, t):
for yi in xrange(bry - t, tly + t, t):
# get the path of the tile containing the required point
path = getPath(xi, yi, dd)
# verify that the file exists... otherwise make one
if(os.path.exists(path)):
f.write(path + '\n')
else:
path = makeBlankTile(xi, yi, 50, t, dd)
f.write(path + '\n')
# finish the file
f.close()
# build vrt
subprocess.call(["gdalbuildvrt", "-q", "-overwrite", "-input_file_list", "in.txt", "out.vrt"])
###
# Build a textfile list of paths required for the VRT, for use with gdalbuildvrt
# This version uses two points to define the required area
###
def buildVRT2(x1, y1, x2, y2, t, wd, dd):
"Build a textfile list of paths required for the VRT, for use with gdalbuildvrt. This version uses two points to define the required area."
# get top left coordinates required
tlx = min(x1, x2)
tly = max(y1, y2)
# get bottom right coordinates required
brx = max(x1, x2)
bry = min(y1, y2)
# print tlx, tly, brx, bry
# prepare the file
f = open("/".join([wd,'in.txt']), 'w')
# get all tiles in question and write to file (1 tile buffer around all sides for edge cases)
for xi in xrange(tlx - t, brx + t, t):
for yi in xrange(bry - t, tly + t, t):
# verify that the file exists... otherwise make one
path = getPath(xi, yi, dd)
# verify that the file exists... otherwise make one
if(os.path.exists(path)):
f.write(path + '\n')
else:
path = makeBlankTile(xi, yi, 50, t, dd)
f.write(path + '\n')
# finish the file
f.close()
# build vrt
subprocess.call(["gdalbuildvrt", "-q", "-overwrite", "-input_file_list", "in.txt", "out.vrt"])
#######################################################################
# path to working directory
wd = "."
# path to data directory
dd = "/Users/jonnyhuck/Dropbox/Maps/_OS_OPEN_DATA/terr50_gagg_gb/data"
# Connect to an existing database
conn = psycopg2.connect("dbname=viewshed user=jonnyhuck")
# Open a cursor to perform database operations
cur = conn.cursor()
cur2 = conn.cursor()
cur3 = conn.cursor()
# Query the database for all towers
cur.execute("SELECT * FROM towers;")
# loop through each origin tower
for tower in cur:
# print name of the origin tower
print tower[1]
# get all of the destination towers within 30km of the origin tower
cur2.execute("select id_tower, tower_name, easting, northing from towers where id_tower != %s and st_dwithin(geom, ST_SetSRID(ST_POINT(%s, %s), 27700), %s);", (tower[0], tower[2], tower[3], 30000))
# build the vrt data tile (this way only do it once per origin)
buildVRT1(tower[2], tower[3], 30000, 10000, wd, dd)
# verify that there were results
if(cur2.rowcount > 0):
# loop through each destination tower
for row in cur2:
# test for line of sight
output = viewshed.doLoS(50, tower[2], tower[3], row[2], row[3], 40, 40, "out.vrt")
# print the name of all the towers it can see
if(output>0):
# print " " + row[1]
cur3.execute("insert into i_visibility (origin, destination) values (%s, %s);", (tower[0], row[0]))
# make the changes to the database persistent
conn.commit()
# print "----------------------------"
# make the changes to the database persistent
# conn.commit()
# close communication with the database
cur.close()
cur2.close()
cur3.close()
conn.close() | jonnyhuck/Viewshed | pgLOS.py | Python | gpl-3.0 | 7,481 |
from __future__ import unicode_literals
import json
from moto.swf.exceptions import (
SWFClientError,
SWFUnknownResourceFault,
SWFDomainAlreadyExistsFault,
SWFDomainDeprecatedFault,
SWFSerializationException,
SWFTypeAlreadyExistsFault,
SWFTypeDeprecatedFault,
SWFWorkflowExecutionAlreadyStartedFault,
SWFDefaultUndefinedFault,
SWFValidationException,
SWFDecisionValidationException,
)
from moto.swf.models import (
WorkflowType,
)
def test_swf_client_error():
ex = SWFClientError("ASpecificType", "error message")
ex.code.should.equal(400)
json.loads(ex.get_body()).should.equal({
"__type": "ASpecificType",
"message": "error message"
})
def test_swf_unknown_resource_fault():
ex = SWFUnknownResourceFault("type", "detail")
ex.code.should.equal(400)
json.loads(ex.get_body()).should.equal({
"__type": "com.amazonaws.swf.base.model#UnknownResourceFault",
"message": "Unknown type: detail"
})
def test_swf_unknown_resource_fault_with_only_one_parameter():
ex = SWFUnknownResourceFault("foo bar baz")
ex.code.should.equal(400)
json.loads(ex.get_body()).should.equal({
"__type": "com.amazonaws.swf.base.model#UnknownResourceFault",
"message": "Unknown foo bar baz"
})
def test_swf_domain_already_exists_fault():
ex = SWFDomainAlreadyExistsFault("domain-name")
ex.code.should.equal(400)
json.loads(ex.get_body()).should.equal({
"__type": "com.amazonaws.swf.base.model#DomainAlreadyExistsFault",
"message": "domain-name"
})
def test_swf_domain_deprecated_fault():
ex = SWFDomainDeprecatedFault("domain-name")
ex.code.should.equal(400)
json.loads(ex.get_body()).should.equal({
"__type": "com.amazonaws.swf.base.model#DomainDeprecatedFault",
"message": "domain-name"
})
def test_swf_serialization_exception():
ex = SWFSerializationException("value")
ex.code.should.equal(400)
json.loads(ex.get_body()).should.equal({
"__type": "com.amazonaws.swf.base.model#SerializationException",
"message": "class java.lang.Foo can not be converted to an String (not a real SWF exception ; happened on: value)"
})
def test_swf_type_already_exists_fault():
wft = WorkflowType("wf-name", "wf-version")
ex = SWFTypeAlreadyExistsFault(wft)
ex.code.should.equal(400)
json.loads(ex.get_body()).should.equal({
"__type": "com.amazonaws.swf.base.model#TypeAlreadyExistsFault",
"message": "WorkflowType=[name=wf-name, version=wf-version]"
})
def test_swf_type_deprecated_fault():
wft = WorkflowType("wf-name", "wf-version")
ex = SWFTypeDeprecatedFault(wft)
ex.code.should.equal(400)
json.loads(ex.get_body()).should.equal({
"__type": "com.amazonaws.swf.base.model#TypeDeprecatedFault",
"message": "WorkflowType=[name=wf-name, version=wf-version]"
})
def test_swf_workflow_execution_already_started_fault():
ex = SWFWorkflowExecutionAlreadyStartedFault()
ex.code.should.equal(400)
json.loads(ex.get_body()).should.equal({
"__type": "com.amazonaws.swf.base.model#WorkflowExecutionAlreadyStartedFault",
'message': 'Already Started',
})
def test_swf_default_undefined_fault():
ex = SWFDefaultUndefinedFault("execution_start_to_close_timeout")
ex.code.should.equal(400)
json.loads(ex.get_body()).should.equal({
"__type": "com.amazonaws.swf.base.model#DefaultUndefinedFault",
"message": "executionStartToCloseTimeout",
})
def test_swf_validation_exception():
ex = SWFValidationException("Invalid token")
ex.code.should.equal(400)
json.loads(ex.get_body()).should.equal({
"__type": "com.amazon.coral.validate#ValidationException",
"message": "Invalid token",
})
def test_swf_decision_validation_error():
ex = SWFDecisionValidationException([
{"type": "null_value",
"where": "decisions.1.member.startTimerDecisionAttributes.startToFireTimeout"},
{"type": "bad_decision_type",
"value": "FooBar",
"where": "decisions.1.member.decisionType",
"possible_values": "Foo, Bar, Baz"},
])
ex.code.should.equal(400)
ex.error_type.should.equal("com.amazon.coral.validate#ValidationException")
msg = ex.get_body()
msg.should.match(r"2 validation errors detected:")
msg.should.match(
r"Value null at 'decisions.1.member.startTimerDecisionAttributes.startToFireTimeout' "
r"failed to satisfy constraint: Member must not be null;"
)
msg.should.match(
r"Value 'FooBar' at 'decisions.1.member.decisionType' failed to satisfy constraint: "
r"Member must satisfy enum value set: \[Foo, Bar, Baz\]"
)
| silveregg/moto | tests/test_swf/test_exceptions.py | Python | apache-2.0 | 4,812 |
# mako/codegen.py
# Copyright (C) 2006-2012 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides functionality for rendering a parsetree constructing into module
source code."""
import time
import re
from mako.pygen import PythonPrinter
from mako import util, ast, parsetree, filters, exceptions
MAGIC_NUMBER = 8
# names which are hardwired into the
# template and are not accessed via the
# context itself
RESERVED_NAMES = set(['context', 'loop', 'UNDEFINED'])
def compile(node,
uri,
filename=None,
default_filters=None,
buffer_filters=None,
imports=None,
source_encoding=None,
generate_magic_comment=True,
disable_unicode=False,
strict_undefined=False,
enable_loop=True,
reserved_names=()):
"""Generate module source code given a parsetree node,
uri, and optional source filename"""
# if on Py2K, push the "source_encoding" string to be
# a bytestring itself, as we will be embedding it into
# the generated source and we don't want to coerce the
# result into a unicode object, in "disable_unicode" mode
if not util.py3k and isinstance(source_encoding, unicode):
source_encoding = source_encoding.encode(source_encoding)
buf = util.FastEncodingBuffer()
printer = PythonPrinter(buf)
_GenerateRenderMethod(printer,
_CompileContext(uri,
filename,
default_filters,
buffer_filters,
imports,
source_encoding,
generate_magic_comment,
disable_unicode,
strict_undefined,
enable_loop,
reserved_names),
node)
return buf.getvalue()
class _CompileContext(object):
def __init__(self,
uri,
filename,
default_filters,
buffer_filters,
imports,
source_encoding,
generate_magic_comment,
disable_unicode,
strict_undefined,
enable_loop,
reserved_names):
self.uri = uri
self.filename = filename
self.default_filters = default_filters
self.buffer_filters = buffer_filters
self.imports = imports
self.source_encoding = source_encoding
self.generate_magic_comment = generate_magic_comment
self.disable_unicode = disable_unicode
self.strict_undefined = strict_undefined
self.enable_loop = enable_loop
self.reserved_names = reserved_names
class _GenerateRenderMethod(object):
"""A template visitor object which generates the
full module source for a template.
"""
def __init__(self, printer, compiler, node):
self.printer = printer
self.last_source_line = -1
self.compiler = compiler
self.node = node
self.identifier_stack = [None]
self.in_def = isinstance(node, (parsetree.DefTag, parsetree.BlockTag))
if self.in_def:
name = "render_%s" % node.funcname
args = node.get_argument_expressions()
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
defs = None
pagetag = None
if node.is_block and not node.is_anonymous:
args += ['**pageargs']
else:
defs = self.write_toplevel()
pagetag = self.compiler.pagetag
name = "render_body"
if pagetag is not None:
args = pagetag.body_decl.get_argument_expressions()
if not pagetag.body_decl.kwargs:
args += ['**pageargs']
cached = eval(pagetag.attributes.get('cached', 'False'))
self.compiler.enable_loop = self.compiler.enable_loop or eval(
pagetag.attributes.get(
'enable_loop', 'False')
)
else:
args = ['**pageargs']
cached = False
buffered = filtered = False
if args is None:
args = ['context']
else:
args = [a for a in ['context'] + args]
self.write_render_callable(
pagetag or node,
name, args,
buffered, filtered, cached)
if defs is not None:
for node in defs:
_GenerateRenderMethod(printer, compiler, node)
@property
def identifiers(self):
return self.identifier_stack[-1]
def write_toplevel(self):
"""Traverse a template structure for module-level directives and
generate the start of module-level code.
"""
inherit = []
namespaces = {}
module_code = []
encoding =[None]
self.compiler.pagetag = None
class FindTopLevel(object):
def visitInheritTag(s, node):
inherit.append(node)
def visitNamespaceTag(s, node):
namespaces[node.name] = node
def visitPageTag(s, node):
self.compiler.pagetag = node
def visitCode(s, node):
if node.ismodule:
module_code.append(node)
f = FindTopLevel()
for n in self.node.nodes:
n.accept_visitor(f)
self.compiler.namespaces = namespaces
module_ident = set()
for n in module_code:
module_ident = module_ident.union(n.declared_identifiers())
module_identifiers = _Identifiers(self.compiler)
module_identifiers.declared = module_ident
# module-level names, python code
if self.compiler.generate_magic_comment and \
self.compiler.source_encoding:
self.printer.writeline("# -*- encoding:%s -*-" %
self.compiler.source_encoding)
self.printer.writeline("from mako import runtime, filters, cache")
self.printer.writeline("UNDEFINED = runtime.UNDEFINED")
self.printer.writeline("__M_dict_builtin = dict")
self.printer.writeline("__M_locals_builtin = locals")
self.printer.writeline("_magic_number = %r" % MAGIC_NUMBER)
self.printer.writeline("_modified_time = %r" % time.time())
self.printer.writeline("_enable_loop = %r" % self.compiler.enable_loop)
self.printer.writeline(
"_template_filename = %r" % self.compiler.filename)
self.printer.writeline("_template_uri = %r" % self.compiler.uri)
self.printer.writeline(
"_source_encoding = %r" % self.compiler.source_encoding)
if self.compiler.imports:
buf = ''
for imp in self.compiler.imports:
buf += imp + "\n"
self.printer.writeline(imp)
impcode = ast.PythonCode(
buf,
source='', lineno=0,
pos=0,
filename='template defined imports')
else:
impcode = None
main_identifiers = module_identifiers.branch(self.node)
module_identifiers.topleveldefs = \
module_identifiers.topleveldefs.\
union(main_identifiers.topleveldefs)
module_identifiers.declared.add("UNDEFINED")
if impcode:
module_identifiers.declared.update(impcode.declared_identifiers)
self.compiler.identifiers = module_identifiers
self.printer.writeline("_exports = %r" %
[n.name for n in
main_identifiers.topleveldefs.values()]
)
self.printer.write("\n\n")
if len(module_code):
self.write_module_code(module_code)
if len(inherit):
self.write_namespaces(namespaces)
self.write_inherit(inherit[-1])
elif len(namespaces):
self.write_namespaces(namespaces)
return main_identifiers.topleveldefs.values()
def write_render_callable(self, node, name, args, buffered, filtered,
cached):
"""write a top-level render callable.
this could be the main render() method or that of a top-level def."""
if self.in_def:
decorator = node.decorator
if decorator:
self.printer.writeline(
"@runtime._decorate_toplevel(%s)" % decorator)
self.printer.writelines(
"def %s(%s):" % (name, ','.join(args)),
# push new frame, assign current frame to __M_caller
"__M_caller = context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writeline("context._push_buffer()")
self.identifier_stack.append(
self.compiler.identifiers.branch(self.node))
if (not self.in_def or self.node.is_block) and '**pageargs' in args:
self.identifier_stack[-1].argument_declared.add('pageargs')
if not self.in_def and (
len(self.identifiers.locally_assigned) > 0 or
len(self.identifiers.argument_declared) > 0
):
self.printer.writeline("__M_locals = __M_dict_builtin(%s)" %
','.join([
"%s=%s" % (x, x) for x in
self.identifiers.argument_declared
]))
self.write_variable_declares(self.identifiers, toplevel=True)
for n in self.node.nodes:
n.accept_visitor(self)
self.write_def_finish(self.node, buffered, filtered, cached)
self.printer.writeline(None)
self.printer.write("\n\n")
if cached:
self.write_cache_decorator(
node, name,
args, buffered,
self.identifiers, toplevel=True)
def write_module_code(self, module_code):
"""write module-level template code, i.e. that which
is enclosed in <%! %> tags in the template."""
for n in module_code:
self.write_source_comment(n)
self.printer.write_indented_block(n.text)
def write_inherit(self, node):
"""write the module-level inheritance-determination callable."""
self.printer.writelines(
"def _mako_inherit(template, context):",
"_mako_generate_namespaces(context)",
"return runtime._inherit_from(context, %s, _template_uri)" %
(node.parsed_attributes['file']),
None
)
def write_namespaces(self, namespaces):
"""write the module-level namespace-generating callable."""
self.printer.writelines(
"def _mako_get_namespace(context, name):",
"try:",
"return context.namespaces[(__name__, name)]",
"except KeyError:",
"_mako_generate_namespaces(context)",
"return context.namespaces[(__name__, name)]",
None,None
)
self.printer.writeline("def _mako_generate_namespaces(context):")
for node in namespaces.values():
if node.attributes.has_key('import'):
self.compiler.has_ns_imports = True
self.write_source_comment(node)
if len(node.nodes):
self.printer.writeline("def make_namespace():")
export = []
identifiers = self.compiler.identifiers.branch(node)
self.in_def = True
class NSDefVisitor(object):
def visitDefTag(s, node):
s.visitDefOrBase(node)
def visitBlockTag(s, node):
s.visitDefOrBase(node)
def visitDefOrBase(s, node):
if node.is_anonymous:
raise exceptions.CompileException(
"Can't put anonymous blocks inside "
"<%namespace>",
**node.exception_kwargs
)
self.write_inline_def(node, identifiers, nested=False)
export.append(node.funcname)
vis = NSDefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.printer.writeline("return [%s]" % (','.join(export)))
self.printer.writeline(None)
self.in_def = False
callable_name = "make_namespace()"
else:
callable_name = "None"
if 'file' in node.parsed_attributes:
self.printer.writeline(
"ns = runtime.TemplateNamespace(%r,"
" context._clean_inheritance_tokens(),"
" templateuri=%s, callables=%s, "
" calling_uri=_template_uri)" %
(
node.name,
node.parsed_attributes.get('file', 'None'),
callable_name,
)
)
elif 'module' in node.parsed_attributes:
self.printer.writeline(
"ns = runtime.ModuleNamespace(%r,"
" context._clean_inheritance_tokens(),"
" callables=%s, calling_uri=_template_uri,"
" module=%s)" %
(
node.name,
callable_name,
node.parsed_attributes.get('module', 'None')
)
)
else:
self.printer.writeline(
"ns = runtime.Namespace(%r,"
" context._clean_inheritance_tokens(),"
" callables=%s, calling_uri=_template_uri)" %
(
node.name,
callable_name,
)
)
if eval(node.attributes.get('inheritable', "False")):
self.printer.writeline("context['self'].%s = ns" % (node.name))
self.printer.writeline(
"context.namespaces[(__name__, %s)] = ns" % repr(node.name))
self.printer.write("\n")
if not len(namespaces):
self.printer.writeline("pass")
self.printer.writeline(None)
def write_variable_declares(self, identifiers, toplevel=False, limit=None):
"""write variable declarations at the top of a function.
the variable declarations are in the form of callable
definitions for defs and/or name lookup within the
function's context argument. the names declared are based
on the names that are referenced in the function body,
which don't otherwise have any explicit assignment
operation. names that are assigned within the body are
assumed to be locally-scoped variables and are not
separately declared.
for def callable definitions, if the def is a top-level
callable then a 'stub' callable is generated which wraps
the current Context into a closure. if the def is not
top-level, it is fully rendered as a local closure.
"""
# collection of all defs available to us in this scope
comp_idents = dict([(c.funcname, c) for c in identifiers.defs])
to_write = set()
# write "context.get()" for all variables we are going to
# need that arent in the namespace yet
to_write = to_write.union(identifiers.undeclared)
# write closure functions for closures that we define
# right here
to_write = to_write.union(
[c.funcname for c in identifiers.closuredefs.values()])
# remove identifiers that are declared in the argument
# signature of the callable
to_write = to_write.difference(identifiers.argument_declared)
# remove identifiers that we are going to assign to.
# in this way we mimic Python's behavior,
# i.e. assignment to a variable within a block
# means that variable is now a "locally declared" var,
# which cannot be referenced beforehand.
to_write = to_write.difference(identifiers.locally_declared)
if self.compiler.enable_loop:
has_loop = "loop" in to_write
to_write.discard("loop")
else:
has_loop = False
# if a limiting set was sent, constraint to those items in that list
# (this is used for the caching decorator)
if limit is not None:
to_write = to_write.intersection(limit)
if toplevel and getattr(self.compiler, 'has_ns_imports', False):
self.printer.writeline("_import_ns = {}")
self.compiler.has_imports = True
for ident, ns in self.compiler.namespaces.iteritems():
if ns.attributes.has_key('import'):
self.printer.writeline(
"_mako_get_namespace(context, %r)."\
"_populate(_import_ns, %r)" %
(
ident,
re.split(r'\s*,\s*', ns.attributes['import'])
))
if has_loop:
self.printer.writeline(
'loop = __M_loop = runtime.LoopStack()'
)
for ident in to_write:
if ident in comp_idents:
comp = comp_idents[ident]
if comp.is_block:
if not comp.is_anonymous:
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
else:
if comp.is_root():
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
elif ident in self.compiler.namespaces:
self.printer.writeline(
"%s = _mako_get_namespace(context, %r)" %
(ident, ident)
)
else:
if getattr(self.compiler, 'has_ns_imports', False):
if self.compiler.strict_undefined:
self.printer.writelines(
"%s = _import_ns.get(%r, UNDEFINED)" %
(ident, ident),
"if %s is UNDEFINED:" % ident,
"try:",
"%s = context[%r]" % (ident, ident),
"except KeyError:",
"raise NameError(\"'%s' is not defined\")" %
ident,
None, None
)
else:
self.printer.writeline(
"%s = _import_ns.get(%r, context.get(%r, UNDEFINED))" %
(ident, ident, ident))
else:
if self.compiler.strict_undefined:
self.printer.writelines(
"try:",
"%s = context[%r]" % (ident, ident),
"except KeyError:",
"raise NameError(\"'%s' is not defined\")" %
ident,
None
)
else:
self.printer.writeline(
"%s = context.get(%r, UNDEFINED)" % (ident, ident)
)
self.printer.writeline("__M_writer = context.writer()")
def write_source_comment(self, node):
"""write a source comment containing the line number of the
corresponding template line."""
if self.last_source_line != node.lineno:
self.printer.writeline("# SOURCE LINE %d" % node.lineno)
self.last_source_line = node.lineno
def write_def_decl(self, node, identifiers):
"""write a locally-available callable referencing a top-level def"""
funcname = node.funcname
namedecls = node.get_argument_expressions()
nameargs = node.get_argument_expressions(include_defaults=False)
if not self.in_def and (
len(self.identifiers.locally_assigned) > 0 or
len(self.identifiers.argument_declared) > 0):
nameargs.insert(0, 'context.locals_(__M_locals)')
else:
nameargs.insert(0, 'context')
self.printer.writeline("def %s(%s):" % (funcname, ",".join(namedecls)))
self.printer.writeline(
"return render_%s(%s)" % (funcname, ",".join(nameargs)))
self.printer.writeline(None)
def write_inline_def(self, node, identifiers, nested):
"""write a locally-available def callable inside an enclosing def."""
namedecls = node.get_argument_expressions()
decorator = node.decorator
if decorator:
self.printer.writeline(
"@runtime._decorate_inline(context, %s)" % decorator)
self.printer.writeline(
"def %s(%s):" % (node.funcname, ",".join(namedecls)))
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
self.printer.writelines(
# push new frame, assign current frame to __M_caller
"__M_caller = context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writelines(
"context._push_buffer()",
)
identifiers = identifiers.branch(node, nested=nested)
self.write_variable_declares(identifiers)
self.identifier_stack.append(identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, filtered, cached)
self.printer.writeline(None)
if cached:
self.write_cache_decorator(node, node.funcname,
namedecls, False, identifiers,
inline=True, toplevel=False)
def write_def_finish(self, node, buffered, filtered, cached,
callstack=True):
"""write the end section of a rendering function, either outermost or
inline.
this takes into account if the rendering function was filtered,
buffered, etc. and closes the corresponding try: block if any, and
writes code to retrieve captured content, apply filters, send proper
return value."""
if not buffered and not cached and not filtered:
self.printer.writeline("return ''")
if callstack:
self.printer.writelines(
"finally:",
"context.caller_stack._pop_frame()",
None
)
if buffered or filtered or cached:
if buffered or cached:
# in a caching scenario, don't try to get a writer
# from the context after popping; assume the caching
# implemenation might be using a context with no
# extra buffers
self.printer.writelines(
"finally:",
"__M_buf = context._pop_buffer()"
)
else:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()"
)
if callstack:
self.printer.writeline("context.caller_stack._pop_frame()")
s = "__M_buf.getvalue()"
if filtered:
s = self.create_filter_callable(node.filter_args.args, s,
False)
self.printer.writeline(None)
if buffered and not cached:
s = self.create_filter_callable(self.compiler.buffer_filters,
s, False)
if buffered or cached:
self.printer.writeline("return %s" % s)
else:
self.printer.writelines(
"__M_writer(%s)" % s,
"return ''"
)
def write_cache_decorator(self, node_or_pagetag, name,
args, buffered, identifiers,
inline=False, toplevel=False):
"""write a post-function decorator to replace a rendering
callable with a cached version of itself."""
self.printer.writeline("__M_%s = %s" % (name, name))
cachekey = node_or_pagetag.parsed_attributes.get('cache_key',
repr(name))
cache_args = {}
if self.compiler.pagetag is not None:
cache_args.update(
(
pa[6:],
self.compiler.pagetag.parsed_attributes[pa]
)
for pa in self.compiler.pagetag.parsed_attributes
if pa.startswith('cache_') and pa != 'cache_key'
)
cache_args.update(
(
pa[6:],
node_or_pagetag.parsed_attributes[pa]
) for pa in node_or_pagetag.parsed_attributes
if pa.startswith('cache_') and pa != 'cache_key'
)
if 'timeout' in cache_args:
cache_args['timeout'] = int(eval(cache_args['timeout']))
self.printer.writeline("def %s(%s):" % (name, ','.join(args)))
# form "arg1, arg2, arg3=arg3, arg4=arg4", etc.
pass_args = [
'=' in a and "%s=%s" % ((a.split('=')[0],)*2) or a
for a in args
]
self.write_variable_declares(
identifiers,
toplevel=toplevel,
limit=node_or_pagetag.undeclared_identifiers()
)
if buffered:
s = "context.get('local')."\
"cache._ctx_get_or_create("\
"%s, lambda:__M_%s(%s), context, %s__M_defname=%r)" % \
(cachekey, name, ','.join(pass_args),
''.join(["%s=%s, " % (k,v)
for k, v in cache_args.items()]),
name
)
# apply buffer_filters
s = self.create_filter_callable(self.compiler.buffer_filters, s,
False)
self.printer.writelines("return " + s,None)
else:
self.printer.writelines(
"__M_writer(context.get('local')."
"cache._ctx_get_or_create("\
"%s, lambda:__M_%s(%s), context, %s__M_defname=%r))" %
(cachekey, name, ','.join(pass_args),
''.join(["%s=%s, " % (k,v)
for k, v in cache_args.items()]),
name,
),
"return ''",
None
)
def create_filter_callable(self, args, target, is_expression):
"""write a filter-applying expression based on the filters
present in the given filter names, adjusting for the global
'default' filter aliases as needed."""
def locate_encode(name):
if re.match(r'decode\..+', name):
return "filters." + name
elif self.compiler.disable_unicode:
return filters.NON_UNICODE_ESCAPES.get(name, name)
else:
return filters.DEFAULT_ESCAPES.get(name, name)
if 'n' not in args:
if is_expression:
if self.compiler.pagetag:
args = self.compiler.pagetag.filter_args.args + args
if self.compiler.default_filters:
args = self.compiler.default_filters + args
for e in args:
# if filter given as a function, get just the identifier portion
if e == 'n':
continue
m = re.match(r'(.+?)(\(.*\))', e)
if m:
(ident, fargs) = m.group(1,2)
f = locate_encode(ident)
e = f + fargs
else:
x = e
e = locate_encode(e)
assert e is not None
target = "%s(%s)" % (e, target)
return target
def visitExpression(self, node):
self.write_source_comment(node)
if len(node.escapes) or \
(
self.compiler.pagetag is not None and
len(self.compiler.pagetag.filter_args.args)
) or \
len(self.compiler.default_filters):
s = self.create_filter_callable(node.escapes_code.args,
"%s" % node.text, True)
self.printer.writeline("__M_writer(%s)" % s)
else:
self.printer.writeline("__M_writer(%s)" % node.text)
def visitControlLine(self, node):
if node.isend:
self.printer.writeline(None)
if node.has_loop_context:
self.printer.writeline('finally:')
self.printer.writeline("loop = __M_loop._exit()")
self.printer.writeline(None)
else:
self.write_source_comment(node)
if self.compiler.enable_loop and node.keyword == 'for':
text = mangle_mako_loop(node, self.printer)
else:
text = node.text
self.printer.writeline(text)
children = node.get_children()
# this covers the three situations where we want to insert a pass:
# 1) a ternary control line with no children,
# 2) a primary control line with nothing but its own ternary
# and end control lines, and
# 3) any control line with no content other than comments
if not children or (
util.all(isinstance(c, (parsetree.Comment,
parsetree.ControlLine))
for c in children) and
util.all((node.is_ternary(c.keyword) or c.isend)
for c in children
if isinstance(c, parsetree.ControlLine))):
self.printer.writeline("pass")
def visitText(self, node):
self.write_source_comment(node)
self.printer.writeline("__M_writer(%s)" % repr(node.content))
def visitTextTag(self, node):
filtered = len(node.filter_args.args) > 0
if filtered:
self.printer.writelines(
"__M_writer = context._push_writer()",
"try:",
)
for n in node.nodes:
n.accept_visitor(self)
if filtered:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()",
"__M_writer(%s)" %
self.create_filter_callable(
node.filter_args.args,
"__M_buf.getvalue()",
False),
None
)
def visitCode(self, node):
if not node.ismodule:
self.write_source_comment(node)
self.printer.write_indented_block(node.text)
if not self.in_def and len(self.identifiers.locally_assigned) > 0:
# if we are the "template" def, fudge locally
# declared/modified variables into the "__M_locals" dictionary,
# which is used for def calls within the same template,
# to simulate "enclosing scope"
self.printer.writeline(
'__M_locals_builtin_stored = __M_locals_builtin()')
self.printer.writeline(
'__M_locals.update(__M_dict_builtin([(__M_key,'
' __M_locals_builtin_stored[__M_key]) for __M_key in'
' [%s] if __M_key in __M_locals_builtin_stored]))' %
','.join([repr(x) for x in node.declared_identifiers()]))
def visitIncludeTag(self, node):
self.write_source_comment(node)
args = node.attributes.get('args')
if args:
self.printer.writeline(
"runtime._include_file(context, %s, _template_uri, %s)" %
(node.parsed_attributes['file'], args))
else:
self.printer.writeline(
"runtime._include_file(context, %s, _template_uri)" %
(node.parsed_attributes['file']))
def visitNamespaceTag(self, node):
pass
def visitDefTag(self, node):
pass
def visitBlockTag(self, node):
if node.is_anonymous:
self.printer.writeline("%s()" % node.funcname)
else:
nameargs = node.get_argument_expressions(include_defaults=False)
nameargs += ['**pageargs']
self.printer.writeline("if 'parent' not in context._data or "
"not hasattr(context._data['parent'], '%s'):"
% node.funcname)
self.printer.writeline(
"context['self'].%s(%s)" % (node.funcname, ",".join(nameargs)))
self.printer.writeline("\n")
def visitCallNamespaceTag(self, node):
# TODO: we can put namespace-specific checks here, such
# as ensure the given namespace will be imported,
# pre-import the namespace, etc.
self.visitCallTag(node)
def visitCallTag(self, node):
self.printer.writeline("def ccall(caller):")
export = ['body']
callable_identifiers = self.identifiers.branch(node, nested=True)
body_identifiers = callable_identifiers.branch(node, nested=False)
# we want the 'caller' passed to ccall to be used
# for the body() function, but for other non-body()
# <%def>s within <%call> we want the current caller
# off the call stack (if any)
body_identifiers.add_declared('caller')
self.identifier_stack.append(body_identifiers)
class DefVisitor(object):
def visitDefTag(s, node):
s.visitDefOrBase(node)
def visitBlockTag(s, node):
s.visitDefOrBase(node)
def visitDefOrBase(s, node):
self.write_inline_def(node, callable_identifiers, nested=False)
if not node.is_anonymous:
export.append(node.funcname)
# remove defs that are within the <%call> from the
# "closuredefs" defined in the body, so they dont render twice
if node.funcname in body_identifiers.closuredefs:
del body_identifiers.closuredefs[node.funcname]
vis = DefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.identifier_stack.pop()
bodyargs = node.body_decl.get_argument_expressions()
self.printer.writeline("def body(%s):" % ','.join(bodyargs))
# TODO: figure out best way to specify
# buffering/nonbuffering (at call time would be better)
buffered = False
if buffered:
self.printer.writelines(
"context._push_buffer()",
"try:"
)
self.write_variable_declares(body_identifiers)
self.identifier_stack.append(body_identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, False, False, callstack=False)
self.printer.writelines(
None,
"return [%s]" % (','.join(export)),
None
)
self.printer.writelines(
# push on caller for nested call
"context.caller_stack.nextcaller = "
"runtime.Namespace('caller', context, "
"callables=ccall(__M_caller))",
"try:")
self.write_source_comment(node)
self.printer.writelines(
"__M_writer(%s)" % self.create_filter_callable(
[], node.expression, True),
"finally:",
"context.caller_stack.nextcaller = None",
None
)
class _Identifiers(object):
"""tracks the status of identifier names as template code is rendered."""
def __init__(self, compiler, node=None, parent=None, nested=False):
if parent is not None:
# if we are the branch created in write_namespaces(),
# we don't share any context from the main body().
if isinstance(node, parsetree.NamespaceTag):
self.declared = set()
self.topleveldefs = util.SetLikeDict()
else:
# things that have already been declared
# in an enclosing namespace (i.e. names we can just use)
self.declared = set(parent.declared).\
union([c.name for c in parent.closuredefs.values()]).\
union(parent.locally_declared).\
union(parent.argument_declared)
# if these identifiers correspond to a "nested"
# scope, it means whatever the parent identifiers
# had as undeclared will have been declared by that parent,
# and therefore we have them in our scope.
if nested:
self.declared = self.declared.union(parent.undeclared)
# top level defs that are available
self.topleveldefs = util.SetLikeDict(**parent.topleveldefs)
else:
self.declared = set()
self.topleveldefs = util.SetLikeDict()
self.compiler = compiler
# things within this level that are referenced before they
# are declared (e.g. assigned to)
self.undeclared = set()
# things that are declared locally. some of these things
# could be in the "undeclared" list as well if they are
# referenced before declared
self.locally_declared = set()
# assignments made in explicit python blocks.
# these will be propagated to
# the context of local def calls.
self.locally_assigned = set()
# things that are declared in the argument
# signature of the def callable
self.argument_declared = set()
# closure defs that are defined in this level
self.closuredefs = util.SetLikeDict()
self.node = node
if node is not None:
node.accept_visitor(self)
illegal_names = self.compiler.reserved_names.intersection(
self.locally_declared)
if illegal_names:
raise exceptions.NameConflictError(
"Reserved words declared in template: %s" %
", ".join(illegal_names))
def branch(self, node, **kwargs):
"""create a new Identifiers for a new Node, with
this Identifiers as the parent."""
return _Identifiers(self.compiler, node, self, **kwargs)
@property
def defs(self):
return set(self.topleveldefs.union(self.closuredefs).values())
def __repr__(self):
return "Identifiers(declared=%r, locally_declared=%r, "\
"undeclared=%r, topleveldefs=%r, closuredefs=%r, "\
"argumentdeclared=%r)" %\
(
list(self.declared),
list(self.locally_declared),
list(self.undeclared),
[c.name for c in self.topleveldefs.values()],
[c.name for c in self.closuredefs.values()],
self.argument_declared)
def check_declared(self, node):
"""update the state of this Identifiers with the undeclared
and declared identifiers of the given node."""
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.locally_declared.add(ident)
def add_declared(self, ident):
self.declared.add(ident)
if ident in self.undeclared:
self.undeclared.remove(ident)
def visitExpression(self, node):
self.check_declared(node)
def visitControlLine(self, node):
self.check_declared(node)
def visitCode(self, node):
if not node.ismodule:
self.check_declared(node)
self.locally_assigned = self.locally_assigned.union(
node.declared_identifiers())
def visitNamespaceTag(self, node):
# only traverse into the sub-elements of a
# <%namespace> tag if we are the branch created in
# write_namespaces()
if self.node is node:
for n in node.nodes:
n.accept_visitor(self)
def _check_name_exists(self, collection, node):
existing = collection.get(node.funcname)
collection[node.funcname] = node
if existing is not None and \
existing is not node and \
(node.is_block or existing.is_block):
raise exceptions.CompileException(
"%%def or %%block named '%s' already "
"exists in this template." %
node.funcname, **node.exception_kwargs)
def visitDefTag(self, node):
if node.is_root() and not node.is_anonymous:
self._check_name_exists(self.topleveldefs, node)
elif node is not self.node:
self._check_name_exists(self.closuredefs, node)
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
# visit defs only one level deep
if node is self.node:
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitBlockTag(self, node):
if node is not self.node and \
not node.is_anonymous:
if isinstance(self.node, parsetree.DefTag):
raise exceptions.CompileException(
"Named block '%s' not allowed inside of def '%s'"
% (node.name, self.node.name), **node.exception_kwargs)
elif isinstance(self.node,
(parsetree.CallTag, parsetree.CallNamespaceTag)):
raise exceptions.CompileException(
"Named block '%s' not allowed inside of <%%call> tag"
% (node.name, ), **node.exception_kwargs)
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
if not node.is_anonymous:
self._check_name_exists(self.topleveldefs, node)
self.undeclared.add(node.funcname)
elif node is not self.node:
self._check_name_exists(self.closuredefs, node)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitIncludeTag(self, node):
self.check_declared(node)
def visitPageTag(self, node):
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
self.check_declared(node)
def visitCallNamespaceTag(self, node):
self.visitCallTag(node)
def visitCallTag(self, node):
if node is self.node:
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
else:
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
_FOR_LOOP = re.compile(
r'^for\s+((?:\(?)\s*[A-Za-z_][A-Za-z_0-9]*'
r'(?:\s*,\s*(?:[A-Za-z_][A-Za-z0-9_]*),??)*\s*(?:\)?))\s+in\s+(.*):'
)
def mangle_mako_loop(node, printer):
"""converts a for loop into a context manager wrapped around a for loop
when access to the `loop` variable has been detected in the for loop body
"""
loop_variable = LoopVariable()
node.accept_visitor(loop_variable)
if loop_variable.detected:
node.nodes[-1].has_loop_context = True
match = _FOR_LOOP.match(node.text)
if match:
printer.writelines(
'loop = __M_loop._enter(%s)' % match.group(2),
'try:'
#'with __M_loop(%s) as loop:' % match.group(2)
)
text = 'for %s in loop:' % match.group(1)
else:
raise SyntaxError("Couldn't apply loop context: %s" % node.text)
else:
text = node.text
return text
class LoopVariable(object):
"""A node visitor which looks for the name 'loop' within undeclared
identifiers."""
def __init__(self):
self.detected = False
def _loop_reference_detected(self, node):
if 'loop' in node.undeclared_identifiers():
self.detected = True
else:
for n in node.get_children():
n.accept_visitor(self)
def visitControlLine(self, node):
self._loop_reference_detected(node)
def visitCode(self, node):
self._loop_reference_detected(node)
def visitExpression(self, node):
self._loop_reference_detected(node)
| swangui/ggrid | mako/codegen.py | Python | mit | 48,628 |
import os
from snactor.loader import create_actor, load
from snactor.registry import get_registered_actors, must_get_actor
from snactor.utils import get_chan
class UnresolvedDependenciesError(Exception):
"""
UnresolvedDependenciesError is thrown when the dependencies between actors can't be automatically resolved
"""
pass
def _order_by_channels(channels, actors):
result = []
while actors:
scheduled = []
for idx, actor in enumerate(actors):
for channel in actor[0].inputs:
if channels[channel['name']]['producers']:
break
else:
for channel in actor[0].outputs:
channels[channel['name']]['producers'].remove(actor[0].name)
scheduled.append(idx)
result.append(actor[0].name)
if not scheduled:
raise UnresolvedDependenciesError(
"Could not solve dependency order for '{}'".format(', '.join([a[0].name for a in actors])))
for idx in reversed(scheduled):
actors.pop(idx)
return result
def load_as_group(name, location, tags=()):
"""
load_as_group creates a group actor with the given `name`, with actors loaded from `location` and is filtered
by `tags`
:param name: Name of the group actor to create
:param location: Where to load the actors from
:param tags: List of tags to filter by
:return: A group actor ordered with resolved dependencies
"""
load(location, tags)
return _create_group_from_actors(name, get_registered_actors().values(), tags)
def create_group_from_names(name, names):
"""
create_group_from_names creates a group actor named `name`, from a list of names specified in `names` (requires
the actors to be loaded before)
:param name: Name of the group actor to create
:param names: List of actor names
:return: A group actor ordered with resolved dependencies
"""
return _create_group_from_actors(name, map(lambda x: get_registered_actors().get(x), names), None)
def _create_group_from_actors(name, actors, tags):
channels = {}
inputs, outputs = set(), set()
skip_actors = set()
for actor in actors:
if actor[0].init.get('group'):
skip_actors = skip_actors.union([a.definition.name for a in actor[0].actors])
actor_list = []
for actor in actors:
if actor[0].name in skip_actors:
continue
actor_list.append(actor)
for chan in actor[0].inputs:
inputs.add(chan['name'])
get_chan(channels, chan)['consumers'].append(actor[0].name)
for chan in actor[0].outputs:
outputs.add(chan['name'])
get_chan(channels, chan)['producers'].append(actor[0].name)
initial = inputs.difference(outputs)
create_actor(name, {
'$location': os.getcwd(),
'inputs': [channels[chan]['data'] for chan in initial],
'outputs': [channels[chan]['data'] for chan in outputs.difference(initial)],
'group': _order_by_channels(channels, actor_list),
'description': 'Auto generated group actor',
'tags': tags or (),
})
return must_get_actor(name)
| leapp-to/snactor | snactor/loader/group.py | Python | apache-2.0 | 3,242 |
# coding=utf-8
import datetime
import decimal
import uuid
from hazelcast import HazelcastClient
from hazelcast.config import IntType
from hazelcast.core import HazelcastJsonValue
from hazelcast.serialization import MAX_BYTE, MAX_SHORT, MAX_INT, MAX_LONG
from tests.base import SingleMemberTestCase
from tests.hzrc.ttypes import Lang
from tests.util import (
random_string,
skip_if_client_version_newer_than_or_equal,
skip_if_client_version_older_than,
skip_if_server_version_older_than,
)
class SerializersLiveTest(SingleMemberTestCase):
@classmethod
def configure_client(cls, config):
config["cluster_name"] = cls.cluster.id
return config
def setUp(self):
self.disposables = []
self.map = self.client.get_map(random_string()).blocking()
def tearDown(self):
self.map.clear()
for disposable in self.disposables:
disposable()
def get_from_server(self):
script = (
"""
function foo() {
var map = instance_0.getMap("%s");
var res = map.get("key");
if (res.getClass().isArray()) {
return Java.from(res);
} else {
return res;
}
}
result = ""+foo();"""
% self.map.name
)
response = self.rc.executeOnController(self.cluster.id, script, Lang.JAVASCRIPT)
return response.result.decode("utf-8")
def set_on_server(self, obj):
script = """
var map = instance_0.getMap("%s");
map.set("key", %s);""" % (
self.map.name,
obj,
)
response = self.rc.executeOnController(self.cluster.id, script, Lang.JAVASCRIPT)
return response.success
def create_new_map_with(self, default_int_type):
client = HazelcastClient(cluster_name=self.cluster.id, default_int_type=default_int_type)
self.disposables.append(lambda: client.shutdown())
self.map = client.get_map(random_string()).blocking()
def test_bool(self):
value = True
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = self.get_from_server() == "true"
self.assertEqual(value, response)
def test_byte(self):
self.create_new_map_with(IntType.BYTE)
value = (1 << 7) - 1
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = int(self.get_from_server())
self.assertEqual(value, response)
def test_short(self):
self.create_new_map_with(IntType.SHORT)
value = -1 * (1 << 15)
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = int(self.get_from_server())
self.assertEqual(value, response)
def test_int(self):
value = (1 << 31) - 1
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = int(self.get_from_server())
self.assertEqual(value, response)
def test_long(self):
self.create_new_map_with(IntType.LONG)
value = -1 * (1 << 63)
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = int(self.get_from_server())
self.assertEqual(value, response)
def test_double(self):
value = 123.0
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = float(self.get_from_server())
self.assertEqual(value, response)
def test_string(self):
value = "value"
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = self.get_from_server()
self.assertEqual(value, response)
def test_utf_string(self):
value = "Iñtërnâtiônàlizætiøn"
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = self.get_from_server()
self.assertEqual(value, response)
def test_emoji(self):
value = "1⚐中💦2😭🙆😔5"
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = self.get_from_server()
self.assertEqual(value, response)
def test_utf_chars(self):
value = "\u0040\u0041\u01DF\u06A0\u12E0\u1D306"
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = self.get_from_server()
self.assertEqual(value, response)
def test_uuid(self):
value = uuid.uuid4()
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = uuid.UUID(hex=self.get_from_server())
self.assertEqual(value, response)
def test_hjv(self):
value = HazelcastJsonValue({"a": 3})
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = HazelcastJsonValue(self.get_from_server())
self.assertEqual(value, response)
def test_bytearray(self):
value = bytearray("value".encode())
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = bytearray(map(int, self.get_from_server().split(",")))
self.assertEqual(value, response)
def test_datetime(self):
skip_if_client_version_newer_than_or_equal(self, "5.0")
value = datetime.datetime.now()
self.map.set("key", value)
self.assertEqual(value.timetuple(), self.map.get("key").timetuple())
response = self.get_from_server()
self.assertTrue(response.startswith(value.strftime("%a %b %d %H:%M:%S")))
def test_big_integer(self):
self.create_new_map_with(IntType.BIG_INT)
value = 1 << 128
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = int(self.get_from_server())
self.assertEqual(value, response)
def test_variable_integer(self):
self.create_new_map_with(IntType.VAR)
value = MAX_BYTE
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = int(self.get_from_server())
self.assertEqual(value, response)
value = MAX_SHORT
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = int(self.get_from_server())
self.assertEqual(value, response)
value = MAX_INT
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = int(self.get_from_server())
self.assertEqual(value, response)
value = MAX_LONG
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = int(self.get_from_server())
self.assertEqual(value, response)
value = 1234567890123456789012345678901234567890
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = int(self.get_from_server())
self.assertEqual(value, response)
def test_decimal(self):
skip_if_client_version_older_than(self, "5.0")
decimal_value = "1234567890123456789012345678901234567890.987654321"
value = decimal.Decimal(decimal_value)
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = self.get_from_server()
self.assertEqual(decimal_value, response)
def test_list(self):
value = [1, 2, 3]
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = self.get_from_server()
self.assertEqual(value, list(map(int, response[1:-1].split(", "))))
def test_datetime_date(self):
skip_if_client_version_older_than(self, "5.0")
skip_if_server_version_older_than(self, self.client, "5.0")
value = datetime.datetime.now().date()
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = self.get_from_server()
self.assertEqual(response, value.strftime("%Y-%m-%d"))
def test_datetime_time(self):
skip_if_client_version_older_than(self, "5.0")
skip_if_server_version_older_than(self, self.client, "5.0")
value = datetime.datetime.now()
if value.microsecond % 1000 == 0:
# A little hack for Windows. Time is precise to the
# milliseconds there. If we send the microseconds
# we have now, due to trailing zeros, get_from_server()
# call below will return the string representation with
# 3 digits for the microseconds. But, Python always expects
# 6 digits. So, the assertion will fail. To fix that,
# we add 1 microseconds to the value, so that in the Java
# side, nanoseconds representation will only have 3 trailing
# zeros, and will send the data as we want.
value = value + datetime.timedelta(microseconds=1)
value = value.time()
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = self.get_from_server()
self.assertEqual(response, value.strftime("%H:%M:%S.%f"))
def test_datetime_datetime(self):
skip_if_client_version_older_than(self, "5.0")
skip_if_server_version_older_than(self, self.client, "5.0")
value = datetime.datetime.now(datetime.timezone(datetime.timedelta(seconds=1800)))
if value.microsecond % 1000 == 0:
# A little hack for Windows. Time is precise to the
# milliseconds there. If we send the microseconds
# we have now, due to trailing zeros, get_from_server()
# call below will return the string representation with
# 3 digits for the microseconds. But, Python always expects
# 6 digits. So, the assertion will fail. To fix that,
# we add 1 microseconds to the value, so that in the Java
# side, nanoseconds representation will only have 3 trailing
# zeros, and will send the data as we want.
value = value + datetime.timedelta(microseconds=1)
self.map.set("key", value)
self.assertEqual(value, self.map.get("key"))
response = self.get_from_server()
expected = value.strftime("%Y-%m-%dT%H:%M:%S.%f%z")
# Java sends offset string with a : in between hour and minute
expected = "%s:%s" % (expected[:-2], expected[-2:])
self.assertEqual(response, expected)
def test_bool_from_server(self):
self.assertTrue(self.set_on_server("true"))
self.assertEqual(True, self.map.get("key"))
def test_byte_from_server(self):
self.assertTrue(self.set_on_server("new java.lang.Byte(-23)"))
self.assertEqual(-23, self.map.get("key"))
def test_char_from_server(self):
self.assertTrue(self.set_on_server("new java.lang.Character('x')"))
self.assertEqual("x", self.map.get("key"))
def test_short_from_server(self):
self.assertTrue(self.set_on_server("new java.lang.Short(23)"))
self.assertEqual(23, self.map.get("key"))
def test_integer_from_server(self):
self.assertTrue(self.set_on_server("new java.lang.Integer(" + str(1 << 30) + ")"))
self.assertEqual(1 << 30, self.map.get("key"))
def test_long_from_server(self):
self.assertTrue(self.set_on_server("new java.lang.Long(-1 * " + str(1 << 63) + ")"))
self.assertEqual(-1 * (1 << 63), self.map.get("key"))
def test_float_from_server(self):
self.assertTrue(self.set_on_server("new java.lang.Float(32.0)"))
self.assertEqual(32.0, self.map.get("key"))
def test_double_from_server(self):
self.assertTrue(self.set_on_server("new java.lang.Double(-12332.0)"))
self.assertEqual(-12332.0, self.map.get("key"))
def test_string_from_server(self):
self.assertTrue(self.set_on_server('"1⚐中💦2😭🙆😔5"'))
self.assertEqual("1⚐中💦2😭🙆😔5", self.map.get("key"))
def test_uuid_from_server(self):
self.assertTrue(self.set_on_server("new java.util.UUID(0, 1)"))
self.assertEqual(uuid.UUID(int=1), self.map.get("key"))
def test_hjv_from_server(self):
self.assertTrue(
self.set_on_server('new com.hazelcast.core.HazelcastJsonValue("{\\"a\\": 3}")')
)
self.assertEqual(HazelcastJsonValue({"a": 3}), self.map.get("key"))
def test_bool_array_from_server(self):
self.assertTrue(self.set_on_server('Java.to([true, false], "boolean[]")'))
self.assertEqual([True, False], self.map.get("key"))
def test_byte_array_from_server(self):
self.assertTrue(self.set_on_server('Java.to([3, 123], "byte[]")'))
self.assertEqual(bytearray([3, 123]), self.map.get("key"))
def test_char_array_from_server(self):
self.assertTrue(self.set_on_server("Java.to(['x', 'y'], \"char[]\")"))
self.assertEqual(["x", "y"], self.map.get("key"))
def test_short_array_from_server(self):
self.assertTrue(self.set_on_server('Java.to([1323, -1232], "short[]")'))
self.assertEqual([1323, -1232], self.map.get("key"))
def test_int_array_from_server(self):
self.assertTrue(self.set_on_server('Java.to([2147483647, -2147483648], "int[]")'))
self.assertEqual([2147483647, -2147483648], self.map.get("key"))
def test_long_array_from_server(self):
self.assertTrue(
self.set_on_server('Java.to([1152921504606846976, -1152921504606846976], "long[]")')
)
self.assertEqual([1152921504606846976, -1152921504606846976], self.map.get("key"))
def test_float_array_from_server(self):
self.assertTrue(self.set_on_server('Java.to([3123.0, -123.0], "float[]")'))
self.assertEqual([3123.0, -123.0], self.map.get("key"))
def test_double_array_from_server(self):
self.assertTrue(self.set_on_server('Java.to([3123.0, -123.0], "double[]")'))
self.assertEqual([3123.0, -123.0], self.map.get("key"))
def test_string_array_from_server(self):
self.assertTrue(self.set_on_server('Java.to(["hey", "1⚐中💦2😭🙆😔5"], "java.lang.String[]")'))
self.assertEqual(["hey", "1⚐中💦2😭🙆😔5"], self.map.get("key"))
def test_date_from_server(self):
skip_if_client_version_newer_than_or_equal(self, "5.0")
self.assertTrue(self.set_on_server("new java.util.Date(100, 11, 15, 23, 59, 49)"))
# server adds 1900 to year. Also, month is 0-based for server and 1-based for the client
self.assertEqual(datetime.datetime(2000, 12, 15, 23, 59, 49), self.map.get("key"))
def test_big_integer_from_server(self):
self.assertTrue(self.set_on_server('new java.math.BigInteger("12", 10)'))
self.assertEqual(12, self.map.get("key"))
self.assertTrue(self.set_on_server('new java.math.BigInteger("-13", 10)'))
self.assertEqual(-13, self.map.get("key"))
self.assertTrue(
self.set_on_server(
'new java.math.BigInteger("1234567890123456789012345678901234567890", 10)'
)
)
self.assertEqual(1234567890123456789012345678901234567890, self.map.get("key"))
self.assertTrue(
self.set_on_server(
'new java.math.BigInteger("-1234567890123456789012345678901234567890", 10)'
)
)
self.assertEqual(-1234567890123456789012345678901234567890, self.map.get("key"))
def test_big_decimal_from_server(self):
skip_if_client_version_older_than(self, "5.0")
self.assertTrue(self.set_on_server('new java.math.BigDecimal("12.12")'))
self.assertEqual(decimal.Decimal("12.12"), self.map.get("key"))
self.assertTrue(self.set_on_server('new java.math.BigDecimal("-13.13")'))
self.assertEqual(decimal.Decimal("-13.13"), self.map.get("key"))
self.assertTrue(
self.set_on_server(
'new java.math.BigDecimal("1234567890123456789012345678901234567890.123456789")'
)
)
self.assertEqual(
decimal.Decimal("1234567890123456789012345678901234567890.123456789"),
self.map.get("key"),
)
self.assertTrue(
self.set_on_server(
'new java.math.BigDecimal("-1234567890123456789012345678901234567890.123456789")'
)
)
self.assertEqual(
decimal.Decimal("-1234567890123456789012345678901234567890.123456789"),
self.map.get("key"),
)
def test_java_class_from_server(self):
self.assertTrue(self.set_on_server("java.lang.String.class"))
self.assertEqual("java.lang.String", self.map.get("key"))
def test_array_list_from_server(self):
script = (
"""
var list = new java.util.ArrayList();
list.add(1);
list.add(2);
list.add(3);
var map = instance_0.getMap("%s");
map.set("key", list);"""
% self.map.name
)
response = self.rc.executeOnController(self.cluster.id, script, Lang.JAVASCRIPT)
self.assertTrue(response.success)
self.assertEqual([1, 2, 3], self.map.get("key"))
def test_linked_list_from_server(self):
script = (
"""
var list = new java.util.LinkedList();
list.add("a");
list.add("b");
list.add("c");
var map = instance_0.getMap("%s");
map.set("key", list);"""
% self.map.name
)
response = self.rc.executeOnController(self.cluster.id, script, Lang.JAVASCRIPT)
self.assertTrue(response.success)
self.assertEqual(["a", "b", "c"], self.map.get("key"))
def test_local_date_from_server(self):
skip_if_client_version_older_than(self, "5.0")
skip_if_server_version_older_than(self, self.client, "5.0")
self.assertTrue(self.set_on_server("java.time.LocalDate.of(2000, 12, 15)"))
self.assertEqual(datetime.date(2000, 12, 15), self.map.get("key"))
def test_local_time_from_server(self):
skip_if_client_version_older_than(self, "5.0")
skip_if_server_version_older_than(self, self.client, "5.0")
self.assertTrue(self.set_on_server("java.time.LocalTime.of(18, 3, 35)"))
self.assertEqual(datetime.time(18, 3, 35), self.map.get("key"))
def test_local_date_time_from_server(self):
skip_if_client_version_older_than(self, "5.0")
skip_if_server_version_older_than(self, self.client, "5.0")
self.assertTrue(
self.set_on_server("java.time.LocalDateTime.of(2021, 8, 24, 0, 59, 55, 987654000)")
)
self.assertEqual(datetime.datetime(2021, 8, 24, 0, 59, 55, 987654), self.map.get("key"))
def test_offset_date_time_from_server(self):
skip_if_client_version_older_than(self, "5.0")
skip_if_server_version_older_than(self, self.client, "5.0")
self.assertTrue(
self.set_on_server(
"java.time.OffsetDateTime.of(2021, 8, 24, 0, 59, 55, 987654000, "
"java.time.ZoneOffset.ofTotalSeconds(2400))"
)
)
self.assertEqual(
datetime.datetime(
2021, 8, 24, 0, 59, 55, 987654, datetime.timezone(datetime.timedelta(seconds=2400))
),
self.map.get("key"),
)
| hazelcast/hazelcast-python-client | tests/integration/backward_compatible/serialization/serializers_test.py | Python | apache-2.0 | 19,548 |
from bluebottle.bluebottle_drf2.serializers import PrimaryKeyGenericRelatedField, TagSerializer, FileSerializer, TaggableSerializerMixin
from bluebottle.accounts.serializers import UserPreviewSerializer
from apps.projects.serializers import ProjectPreviewSerializer
from apps.tasks.models import Task, TaskMember, TaskFile, Skill
from apps.wallposts.serializers import TextWallPostSerializer, WallPostListSerializer
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.utils.encoding import smart_text
from rest_framework import serializers
class TaskPreviewSerializer(serializers.ModelSerializer):
author = UserPreviewSerializer()
project = ProjectPreviewSerializer()
skill = serializers.PrimaryKeyRelatedField()
class Meta:
model = Task
fields = ('id', 'title', 'location', 'skill', 'status', 'created', 'project', 'deadline', 'time_needed')
class TaskMemberSerializer(serializers.ModelSerializer):
member = UserPreviewSerializer()
task = serializers.PrimaryKeyRelatedField()
status = serializers.ChoiceField(choices=TaskMember.TaskMemberStatuses.choices, required=False, default=TaskMember.TaskMemberStatuses.applied)
motivation = serializers.CharField(required=False)
class Meta:
model = TaskMember
fields = ('id', 'member', 'task', 'status', 'created', 'motivation')
class TaskFileSerializer(serializers.ModelSerializer):
author = UserPreviewSerializer()
task = serializers.PrimaryKeyRelatedField()
file = FileSerializer()
class Meta:
model = TaskFile
fields = ('id', 'author', 'task', 'file', 'created', 'title')
class TaskSerializer(TaggableSerializerMixin, serializers.ModelSerializer):
members = TaskMemberSerializer(many=True, source='taskmember_set', required=False)
files = TaskFileSerializer(many=True, source='taskfile_set', required=False)
project = serializers.SlugRelatedField(slug_field='slug')
skill = serializers.PrimaryKeyRelatedField()
author = UserPreviewSerializer()
tags = TagSerializer()
wallpost_ids = WallPostListSerializer()
class Meta:
model = Task
fields = ('id', 'title', 'project', 'description', 'end_goal', 'members', 'files', 'location', 'skill',
'time_needed', 'author', 'status', 'created', 'deadline', 'tags', 'wallpost_ids')
class SkillSerializer(serializers.ModelSerializer):
class Meta:
model = Skill
fields = ('id', 'name')
# Task WallPost serializers
class TaskWallPostSerializer(TextWallPostSerializer):
""" TextWallPostSerializer with task specific customizations. """
url = serializers.HyperlinkedIdentityField(view_name='task-twallpost-detail')
task = PrimaryKeyGenericRelatedField(Task)
class Meta(TextWallPostSerializer.Meta):
# Add the project slug field.
fields = TextWallPostSerializer.Meta.fields + ('task', )
| gannetson/sportschooldeopenlucht | apps/tasks/serializers.py | Python | bsd-3-clause | 2,995 |
import json
import os
import logging
import logging.config
"""Order 15: Use logging with generate log files.
In this sample:
Logging config: dependency/logging.json
Info logging file: dependency/logs/info.log
Error logging file: dependency/logs/error.log
"""
class LoggingSystem(object):
logging_config_file = 'dependency/logging.json'
def __init__(self):
if os.path.exists(self.logging_config_file):
with open(self.logging_config_file, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=logging.INFO)
# LoggingSystem()
# logger = logging.getLogger(__name__)
# logger.info('this is info')
# logger.error('this is error')
| flyingSprite/spinelle | task_inventory/order_1_to_30/order_15_logging_system.py | Python | mit | 760 |
"""
WSGI config for metropol project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "metropol.settings")
application = get_wsgi_application()
| carborgar/metropol | metropol/wsgi.py | Python | mit | 393 |
#!/usr/bin/python
import os
import psycopg2
import sys
file = open("/home/" + os.getlogin() + "/.pgpass", "r")
pgpasses = []
for line in file:
pgpasses.append(line.rstrip("\n").split(":"))
file.close()
for pgpass in pgpasses:
#print str(pgpass)
if pgpass[0] == "54.236.235.110" and pgpass[3] == "geonode":
src_pgpass = pgpass
if pgpass[0] == "54.197.226.56" and pgpass[3] == "geonode":
dst_pgpass = pgpass
src = psycopg2.connect(host=src_pgpass[0], database="geonode2", user=src_pgpass[3], password=src_pgpass[4])
dst = psycopg2.connect(host=dst_pgpass[0], database="geonode", user=dst_pgpass[3], password=dst_pgpass[4])
src_cur = src.cursor()
dst_cur = dst.cursor()
src_cur.execute("select id, domain, name from django_site")
for src_row in src_cur:
assignments = []
#id
assignments.append(src_row[0])
#domain
assignments.append(src_row[1])
#name
assignments.append(src_row[2])
try:
dst_cur.execute("insert into django_site(id, domain, name) values (%s, %s, %s)", assignments)
dst.commit()
except Exception as error:
print
print type(error)
print str(error) + "select id, domain, name from django_site"
print str(src_row)
dst.rollback()
dst.commit()
src_cur.close()
dst_cur.close()
src.close()
dst.close()
| DOE-NEPA/geonode_2.0_to_2.4_migration | migrate_django_site.py | Python | gpl-2.0 | 1,290 |
# -*- coding: utf-8 -*-
"""Test for short_panel and panel sandwich
Created on Fri May 18 13:05:47 2012
Author: Josef Perktold
moved example from main of random_panel
"""
import numpy as np
from numpy.testing import assert_almost_equal
import numpy.testing as npt
import statsmodels.tools.eval_measures as em
from statsmodels.stats.moment_helpers import cov2corr, se_cov
from statsmodels.regression.linear_model import OLS
from statsmodels.sandbox.panel.panel_short import ShortPanelGLS, ShortPanelGLS2
from statsmodels.sandbox.panel.random_panel import PanelSample
import statsmodels.sandbox.panel.correlation_structures as cs
import statsmodels.stats.sandwich_covariance as sw
def assert_maxabs(actual, expected, value):
npt.assert_array_less(em.maxabs(actual, expected, None), value)
def test_short_panel():
#this checks that some basic statistical properties are satisfied by the
#results, not verified results against other packages
#Note: the ranking of robust bse is different if within=True
#I added within keyword to PanelSample to be able to use old example
#if within is False, then there is no within group variation in exog.
nobs = 100
nobs_i = 5
n_groups = nobs // nobs_i
k_vars = 3
dgp = PanelSample(nobs, k_vars, n_groups, corr_structure=cs.corr_arma,
corr_args=([1], [1., -0.9],), seed=377769, within=False)
#print 'seed', dgp.seed
y = dgp.generate_panel()
noise = y - dgp.y_true
#test dgp
dgp_cov_e = np.array(
[[ 1. , 0.9 , 0.81 , 0.729 , 0.6561],
[ 0.9 , 1. , 0.9 , 0.81 , 0.729 ],
[ 0.81 , 0.9 , 1. , 0.9 , 0.81 ],
[ 0.729 , 0.81 , 0.9 , 1. , 0.9 ],
[ 0.6561, 0.729 , 0.81 , 0.9 , 1. ]])
npt.assert_almost_equal(dgp.cov, dgp_cov_e, 13)
cov_noise = np.cov(noise.reshape(-1,n_groups, order='F'))
corr_noise = cov2corr(cov_noise)
npt.assert_almost_equal(corr_noise, dgp.cov, 1)
#estimate panel model
mod2 = ShortPanelGLS(y, dgp.exog, dgp.groups)
res2 = mod2.fit_iterative(2)
#whitened residual should be uncorrelated
corr_wresid = np.corrcoef(res2.wresid.reshape(-1,n_groups, order='F'))
assert_maxabs(corr_wresid, np.eye(5), 0.1)
#residual should have same correlation as dgp
corr_resid = np.corrcoef(res2.resid.reshape(-1,n_groups, order='F'))
assert_maxabs(corr_resid, dgp.cov, 0.1)
assert_almost_equal(res2.resid.std(),1, decimal=0)
y_pred = np.dot(mod2.exog, res2.params)
assert_almost_equal(res2.fittedvalues, y_pred, 13)
#compare with OLS
res2_ols = mod2._fit_ols()
npt.assert_(mod2.res_pooled is res2_ols)
res2_ols = mod2.res_pooled #TODO: BUG: requires call to _fit_ols
#fitting once is the same as OLS
#note: I need to create new instance, otherwise it continuous fitting
mod1 = ShortPanelGLS(y, dgp.exog, dgp.groups)
res1 = mod1.fit_iterative(1)
assert_almost_equal(res1.params, res2_ols.params, decimal=13)
assert_almost_equal(res1.bse, res2_ols.bse, decimal=13)
res_ols = OLS(y, dgp.exog).fit()
assert_almost_equal(res1.params, res_ols.params, decimal=13)
assert_almost_equal(res1.bse, res_ols.bse, decimal=13)
#compare with old version
mod_old = ShortPanelGLS2(y, dgp.exog, dgp.groups)
res_old = mod_old.fit()
assert_almost_equal(res2.params, res_old.params, decimal=13)
assert_almost_equal(res2.bse, res_old.bse, decimal=13)
mod5 = ShortPanelGLS(y, dgp.exog, dgp.groups)
res5 = mod5.fit_iterative(5)
#make sure it's different
#npt.assert_array_less(0.009, em.maxabs(res5.bse, res2.bse))
cov_clu = sw.cov_cluster(mod2.res_pooled, dgp.groups.astype(int))
clubse = se_cov(cov_clu)
pnwbse = se_cov(sw.cov_nw_panel(mod2.res_pooled, 4, mod2.group.groupidx))
bser = np.vstack((res2.bse, res5.bse, clubse, pnwbse))
bser_mean = np.mean(bser, axis=0)
#cov_cluster close to robust and PanelGLS
#is up to 24% larger than mean of bser
#npt.assert_array_less(0, clubse / bser_mean - 1)
npt.assert_array_less(clubse / bser_mean - 1, 0.25)
#cov_nw_panel close to robust and PanelGLS
npt.assert_array_less(pnwbse / bser_mean - 1, 0.1)
#OLS underestimates bse, robust at least 60% larger
npt.assert_array_less(0.6, bser_mean / res_ols.bse - 1)
#cov_hac_panel with uniform_kernel is the same as cov_cluster for balanced
#panel with full length kernel
#I fixe default correction to be equal
cov_uni = sw.cov_nw_panel(mod2.res_pooled, 4, mod2.group.groupidx,
weights_func=sw.weights_uniform,
use_correction='c')
assert_almost_equal(cov_uni, cov_clu, decimal=13)
#without correction
cov_clu2 = sw.cov_cluster(mod2.res_pooled, dgp.groups.astype(int),
use_correction=False)
cov_uni2 = sw.cov_nw_panel(mod2.res_pooled, 4, mod2.group.groupidx,
weights_func=sw.weights_uniform,
use_correction=False)
assert_almost_equal(cov_uni2, cov_clu2, decimal=13)
cov_white = sw.cov_white_simple(mod2.res_pooled)
cov_pnw0 = sw.cov_nw_panel(mod2.res_pooled, 0, mod2.group.groupidx,
use_correction='hac')
assert_almost_equal(cov_pnw0, cov_white, decimal=13)
| DonBeo/statsmodels | statsmodels/sandbox/panel/tests/test_random_panel.py | Python | bsd-3-clause | 5,437 |
# -*- coding: utf-8 -*-
"""Utility classes and values used for marshalling and unmarshalling objects to
and from primitive types.
.. warning::
This module is treated as private API.
Users should not need to use this module directly.
"""
from __future__ import unicode_literals
from marshmallow.utils import missing
from marshmallow.compat import text_type, iteritems
from marshmallow.exceptions import (
ValidationError,
)
__all__ = [
'Marshaller',
'Unmarshaller',
]
class ErrorStore(object):
def __init__(self):
#: Dictionary of errors stored during serialization
self.errors = {}
#: List of `Field` objects which have validation errors
self.error_fields = []
#: List of field_names which have validation errors
self.error_field_names = []
#: True while (de)serializing a collection
self._pending = False
def reset_errors(self):
self.errors = {}
self.error_field_names = []
self.error_fields = []
def get_errors(self, index=None):
if index is not None:
errors = self.errors.get(index, {})
self.errors[index] = errors
else:
errors = self.errors
return errors
def call_and_store(self, getter_func, data, field_name, field_obj, index=None):
"""Call ``getter_func`` with ``data`` as its argument, and store any `ValidationErrors`.
:param callable getter_func: Function for getting the serialized/deserialized
value from ``data``.
:param data: The data passed to ``getter_func``.
:param str field_name: Field name.
:param FieldABC field_obj: Field object that performs the
serialization/deserialization behavior.
:param int index: Index of the item being validated, if validating a collection,
otherwise `None`.
"""
try:
value = getter_func(data)
except ValidationError as err: # Store validation errors
self.error_fields.append(field_obj)
self.error_field_names.append(field_name)
errors = self.get_errors(index=index)
# Warning: Mutation!
if isinstance(err.messages, dict):
errors[field_name] = err.messages
else:
errors.setdefault(field_name, []).extend(err.messages)
# When a Nested field fails validation, the marshalled data is stored
# on the ValidationError's data attribute
value = err.data or missing
return value
class Marshaller(ErrorStore):
"""Callable class responsible for serializing data and storing errors.
:param str prefix: Optional prefix that will be prepended to all the
serialized field names.
"""
def __init__(self, prefix=''):
self.prefix = prefix
ErrorStore.__init__(self)
def serialize(self, obj, fields_dict, many=False, strict=False,
accessor=None, dict_class=dict, index_errors=True, index=None):
"""Takes raw data (a dict, list, or other object) and a dict of
fields to output and serializes the data based on those fields.
:param obj: The actual object(s) from which the fields are taken from
:param dict fields_dict: Mapping of field names to :class:`Field` objects.
:param bool many: Set to `True` if ``data`` should be serialized as
a collection.
:param bool strict: If `True`, raise errors if invalid data are passed in
instead of failing silently and storing the errors.
:param callable accessor: Function to use for getting values from ``obj``.
:param type dict_class: Dictionary class used to construct the output.
:param bool index_errors: Whether to store the index of invalid items in
``self.errors`` when ``many=True``.
:param int index: Index of the item being serialized (for storing errors) if
serializing a collection, otherwise `None`.
:return: A dictionary of the marshalled data
.. versionchanged:: 1.0.0
Renamed from ``marshal``.
"""
# Reset errors dict if not serializing a collection
if not self._pending:
self.reset_errors()
if many and obj is not None:
self._pending = True
ret = [self.serialize(d, fields_dict, many=False, strict=strict,
dict_class=dict_class, accessor=accessor,
index=idx, index_errors=index_errors)
for idx, d in enumerate(obj)]
self._pending = False
return ret
items = []
for attr_name, field_obj in iteritems(fields_dict):
if getattr(field_obj, 'load_only', False):
continue
if not self.prefix:
key = attr_name
else:
key = ''.join([self.prefix, attr_name])
getter = lambda d: field_obj.serialize(attr_name, d, accessor=accessor)
value = self.call_and_store(
getter_func=getter,
data=obj,
field_name=key,
field_obj=field_obj,
index=(index if index_errors else None)
)
if value is missing:
continue
items.append((key, value))
if self.errors and strict:
raise ValidationError(
self.errors,
field_names=self.error_field_names,
fields=self.error_fields
)
return dict_class(items)
# Make an instance callable
__call__ = serialize
# Key used for schema-level validation errors
SCHEMA = '_schema'
class Unmarshaller(ErrorStore):
"""Callable class responsible for deserializing data and storing errors.
.. versionadded:: 1.0.0
"""
default_schema_validation_error = 'Invalid data.'
def _run_validator(self, validator_func, output,
original_data, fields_dict, index=None,
strict=False, many=False, pass_original=False):
try:
if pass_original: # Pass original, raw data (before unmarshalling)
res = validator_func(output, original_data)
else:
res = validator_func(output)
if res is False:
raise ValidationError(self.default_schema_validation_error)
except ValidationError as err:
errors = self.get_errors(index=index)
# Store or reraise errors
if err.field_names:
field_names = err.field_names
field_objs = [fields_dict[each] if each in fields_dict else None
for each in field_names]
else:
field_names = [SCHEMA]
field_objs = []
for field_name in field_names:
if isinstance(err.messages, (list, tuple)):
# self.errors[field_name] may be a dict if schemas are nested
if isinstance(errors.get(field_name), dict):
errors[field_name].setdefault(
SCHEMA, []
).extend(err.messages)
else:
errors.setdefault(field_name, []).extend(err.messages)
elif isinstance(err.messages, dict):
errors.setdefault(field_name, []).append(err.messages)
else:
errors.setdefault(field_name, []).append(text_type(err))
if strict:
raise ValidationError(
self.errors,
fields=field_objs,
field_names=field_names
)
def deserialize(self, data, fields_dict, many=False, strict=False,
dict_class=dict, index_errors=True, index=None):
"""Deserialize ``data`` based on the schema defined by ``fields_dict``.
:param dict data: The data to deserialize.
:param dict fields_dict: Mapping of field names to :class:`Field` objects.
:param bool many: Set to `True` if ``data`` should be deserialized as
a collection.
:param bool strict: If `True`, raise errors if invalid data are passed in
instead of failing silently and storing the errors.
:param type dict_class: Dictionary class used to construct the output.
:param bool index_errors: Whether to store the index of invalid items in
``self.errors`` when ``many=True``.
:param int index: Index of the item being serialized (for storing errors) if
serializing a collection, otherwise `None`.
:return: A dictionary of the deserialized data.
"""
# Reset errors if not deserializing a collection
if not self._pending:
self.reset_errors()
if many and data is not None:
self._pending = True
ret = [self.deserialize(d, fields_dict, many=False,
strict=strict, dict_class=dict_class,
index=idx, index_errors=index_errors)
for idx, d in enumerate(data)]
self._pending = False
return ret
if data is not None:
items = []
for attr_name, field_obj in iteritems(fields_dict):
if field_obj.dump_only:
continue
try:
raw_value = data.get(attr_name, missing)
except AttributeError: # Input data is not a dict
errors = self.get_errors(index=index)
msg = field_obj.error_messages['type'].format(
input=data, input_type=data.__class__.__name__
)
if strict:
raise ValidationError(
msg,
field_names=[SCHEMA],
fields=[]
)
else:
errors = self.get_errors()
errors.setdefault(SCHEMA, []).append(msg)
# Input data type is incorrect, so we can bail out early
break
field_name = attr_name
if raw_value is missing and field_obj.load_from:
field_name = field_obj.load_from
raw_value = data.get(field_obj.load_from, missing)
if raw_value is missing:
_miss = field_obj.missing
raw_value = _miss() if callable(_miss) else _miss
if raw_value is missing and not field_obj.required:
continue
getter = lambda val: field_obj.deserialize(
val,
field_obj.load_from or attr_name,
data
)
value = self.call_and_store(
getter_func=getter,
data=raw_value,
field_name=field_name,
field_obj=field_obj,
index=(index if index_errors else None)
)
if value is not missing:
key = fields_dict[attr_name].attribute or attr_name
items.append((key, value))
ret = dict_class(items)
else:
ret = None
if self.errors and strict:
raise ValidationError(
self.errors,
field_names=self.error_field_names,
fields=self.error_fields
)
return ret
# Make an instance callable
__call__ = deserialize
| Bachmann1234/marshmallow | marshmallow/marshalling.py | Python | mit | 11,826 |
from blinker import Namespace
namespace = Namespace()
#: Trigerred when a site's metrics job is done.
on_site_metrics_computed = namespace.signal('on-site-metrics-computed')
| etalab/udata | udata/core/metrics/signals.py | Python | agpl-3.0 | 176 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from email.parser import Parser
from email.header import decode_header
from email.utils import parseaddr
import poplib
# 输入邮件地址, 口令和POP3服务器地址:
email = input('Email: ')
password = input('Password: ')
pop3_server = input('POP3 server: ')
def guess_charset(msg):
charset = msg.get_charset()
if charset is None:
content_type = msg.get('Content-Type', '').lower()
pos = content_type.find('charset=')
if pos >= 0:
charset = content_type[pos + 8:].strip()
return charset
def decode_str(s):
value, charset = decode_header(s)[0]
if charset:
value = value.decode(charset)
return value
def print_info(msg, indent=0):
if indent == 0:
for header in ['From', 'To', 'Subject']:
value = msg.get(header, '')
if value:
if header == 'Subject':
value = decode_str(value)
else:
hdr, addr = parseaddr(value)
name = decode_str(hdr)
value = u'%s <%s>' % (name, addr)
print('%s%s: %s' % (' ' * indent, header, value))
if (msg.is_multipart()):
parts = msg.get_payload()
for n, part in enumerate(parts):
print('%spart %s' % (' ' * indent, n))
print('%s--------------------' % (' ' * indent))
print_info(part, indent + 1)
else:
content_type = msg.get_content_type()
if content_type == 'text/plain' or content_type == 'text/html':
content = msg.get_payload(decode=True)
charset = guess_charset(msg)
if charset:
content = content.decode(charset)
print('%sText: %s' % (' ' * indent, content + '...'))
else:
print('%sAttachment: %s' % (' ' * indent, content_type))
# 连接到POP3服务器:
server = poplib.POP3(pop3_server)
# 可以打开或关闭调试信息:
server.set_debuglevel(1)
# 可选:打印POP3服务器的欢迎文字:
print(server.getwelcome().decode('utf-8'))
# 身份认证:
server.user(email)
server.pass_(password)
# stat()返回邮件数量和占用空间:
print('Messages: %s. Size: %s' % server.stat())
# list()返回所有邮件的编号:
resp, mails, octets = server.list()
# 可以查看返回的列表类似[b'1 82923', b'2 2184', ...]
print(mails)
# 获取最新一封邮件, 注意索引号从1开始:
index = len(mails)
resp, lines, octets = server.retr(index)
# lines存储了邮件的原始文本的每一行,
# 可以获得整个邮件的原始文本:
msg_content = b'\r\n'.join(lines).decode('utf-8')
# 稍后解析出邮件:
msg = Parser().parsestr(msg_content)
print_info(msg)
# 可以根据邮件索引号直接从服务器删除邮件:
# server.dele(index)
# 关闭连接:
server.quit()
| whyDK37/py_bootstrap | samples/mail/fetch_mail.py | Python | apache-2.0 | 2,870 |
# coding: utf-8
import os
import json
from irc.client import NickMask
def on_module_loaded( self ):
if not "lewd" in self.data:
self.data[ "lewd" ] = {}
return {
"lewd": {
"description": "Give people 'lewd' points or display yours.",
"syntax": ".lewd [nick]"
}
}
def on_pubmsg( self, c, e ):
do_command( self, e, e.target )
def do_command( self, e, target ):
arg = e.arguments[ 0 ]
argSplit = arg.split()
if argSplit[ 0 ] == ".lewd":
arg = " ".join( argSplit[ 1: ] )
if self.hasPermission( e.source.nick, e.target, 10 ):
if argSplit[ 1 ] == e.source.nick or arg == "":
if argSplit[ 1 ] in self.data[ "lewd" ]:
self.privmsg( target, "Lewdness for %s: %s" % ( argSplit[ 1 ], self.data[ "lewd" ][ argSplit[ 1 ] ] ) )
else:
self.privmsg( target, argSplit[ 1 ] + " is 100% pure." )
else:
if argSplit[ 1 ].lower() == "obama":
self.connection.kick( target, e.source.nick, "THANKS OBAMA" )
else:
if argSplit[ 1 ] in self.data[ "lewd" ]:
c_lewd = self.data[ "lewd" ][ argSplit[ 1 ] ] + 1
else:
c_lewd = 1
self.data[ "lewd" ][ argSplit[ 1 ] ] = c_lewd
self.saveData()
self.privmsg( target, "Lewdness for %s: %s" % ( argSplit[ 1 ], c_lewd ) )
return True
| Spacedude/Py_SpaceBotIRC | modules/lewd.py | Python | gpl-3.0 | 1,288 |
"""
Virtualization installation functions.
Copyright 2007-2008 Red Hat, Inc.
Michael DeHaan <mdehaan@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import os, sys, time, stat
import tempfile
import random
from optparse import OptionParser
import exceptions
import errno
import re
import virtinst
IMAGE_DIR = "/var/lib/vmware/images"
VMX_DIR = "/var/lib/vmware/vmx"
# FIXME: what to put for guestOS
# FIXME: are other settings ok?
TEMPLATE = """
#!/usr/bin/vmware
config.version = "8"
virtualHW.version = "4"
numvcpus = "2"
scsi0.present = "TRUE"
scsi0.virtualDev = "lsilogic"
scsi0:0.present = "TRUE"
scsi0:0.writeThrough = "TRUE"
ide1:0.present = "TRUE"
ide1:0.deviceType = "cdrom-image"
Ethernet0.present = "TRUE"
Ethernet0.AddressType = "static"
Ethernet0.Address = "%(MAC_ADDRESS)s"
Ethernet0.virtualDev = "e1000"
guestOS = "linux"
priority.grabbed = "normal"
priority.ungrabbed = "normal"
powerType.powerOff = "hard"
powerType.powerOn = "hard"
powerType.suspend = "hard"
powerType.reset = "hard"
floppy0.present = "FALSE"
scsi0:0.filename = "%(VMDK_IMAGE)s"
displayName = "%(IMAGE_NAME)s"
memsize = "%(MEMORY)s"
"""
#ide1:0.filename = "%(PATH_TO_ISO)s"
class VirtCreateException(exceptions.Exception):
pass
def random_mac():
"""
from xend/server/netif.py
Generate a random MAC address.
Uses OUI 00-16-3E, allocated to
Xensource, Inc. Last 3 fields are random.
return: MAC address string
FIXME: if VMware has their own range, adapt to that range
"""
mac = [ 0x00, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
return ':'.join(map(lambda x: "%02x" % x, mac))
def make_disk(disksize,image):
cmd = "vmware-vdiskmanager -c -a lsilogic -s %sGb -t 0 %s" % (disksize, image)
print "- %s" % cmd
rc = os.system(cmd)
if rc != 0:
raise VirtCreateException("command failed")
def make_vmx(path,vmdk_image,image_name,mac_address,memory):
template_params = {
"VMDK_IMAGE" : vmdk_image,
"IMAGE_NAME" : image_name,
"MAC_ADDRESS" : mac_address.lower(),
"MEMORY" : memory
}
templated = TEMPLATE % template_params
fd = open(path,"w+")
fd.write(templated)
fd.close()
def register_vmx(vmx_file):
cmd = "vmware-cmd -s register %s" % vmx_file
print "- %s" % cmd
rc = os.system(cmd)
if rc!=0:
raise VirtCreateException("vmware registration failed")
def start_vm(vmx_file):
os.chmod(vmx_file,0755)
cmd = "vmware-cmd %s start" % vmx_file
print "- %s" % cmd
rc = os.system(cmd)
if rc != 0:
raise VirtCreateException("vm start failed")
def start_install(name=None, ram=None, disks=None, mac=None,
uuid=None,
extra=None,
vcpus=None,
profile_data=None, bridge=None, arch=None, no_gfx=False, fullvirt=True, bridge=None):
if profile_data.has_key("file"):
raise koan.InfoException("vmware does not work with --image yet")
mac = None
if not profile_data.has_key("interfaces"):
print "- vmware installation requires a system, not a profile"
return 1
for iname in profile_data["interfaces"]:
intf = profile_data["interfaces"][iname]
mac = intf["mac_address"]
if mac is None:
print "- no MAC information available in this record, cannot install"
return 1
print "DEBUG: name=%s" % name
print "DEBUG: ram=%s" % ram
print "DEBUG: mac=%s" % mac
print "DEBUG: disks=%s" % disks
# starts vmware using PXE. disk/mem info come from Cobbler
# rest of the data comes from PXE which is also intended
# to be managed by Cobbler.
if not os.path.exists(IMAGE_DIR):
os.makedirs(IMAGE_DIR)
if not os.path.exists(VMX_DIR):
os.makedirs(VMX_DIR)
if len(disks) != 1:
raise VirtCreateException("vmware support is limited to 1 virtual disk")
diskname = disks[0][0]
disksize = disks[0][1]
image = "%s/%s" % (IMAGE_DIR, name)
print "- saving virt disk image as %s" % image
make_disk(disksize,image)
vmx = "%s/%s" % (VMX_DIR, name)
print "- saving vmx file as %s" % vmx
make_vmx(vmx,image,name,mac,ram)
register_vmx(vmx)
start_vm(vmx)
| charles-dyfis-net/koan | koan/vmwcreate.py | Python | gpl-2.0 | 4,966 |
# -*- coding: utf-8 -*-
import math
# import warnings
import logging
import collections
from ..patch.pint import ureg
from ..math import linop
from ..math import fit1d
from ..utils import units
from ..materials import compoundfromformula
from ..materials import compoundfromname
from ..materials import multilayer
from ..materials import element
from ..resources import resource_filename
from ..utils import constants
from ..math.utils import round_sig
from ..geometries import diode as diodegeometries
from ..optics import xray as xrayoptics
from ..sources import xray as xraysources
from ..simulation.classfactory import with_metaclass
from ..math import noisepropagation
from ..utils import instance
from . import base
from ..utils import lut
from ..materials.utils import reshape_spectrum_lines
import numpy as np
import silx.math.fit as fit
import scipy.interpolate
import matplotlib.pyplot as plt
import scipy.optimize
from pint import errors as pinterrors
from collections import Counter
logger = logging.getLogger(__name__)
#
#
# Unit of elementary charge: 1 e = 1.6e-19 C
# Electronvolt: 1 eV = 1.6e-19 J
# 1 A/W = 1 C/J = 1.6e-19 C/eV = 1 e/eV
#
#
# Oscillator: current (A) to counts/sec (Hz)
# I(Hz) = Fmax(Hz)/Vmax(V) . Vmax(V)/Imax(A) . I(A) + F0(Hz)
# SXM: Vmax/Imax set
# MICRODIFF: Imax set
#
# PNdiode: flux (ph/s) to current(A)
# I(A) = I(ph/s).E(eV/ph).1(e).(1-T)/Ehole(eV) + D(A)
# = P(W).1(e).(1-T)/Ehole(eV) + D(A)
# = P(W).R(e/eV) + D(A)
# T: transmission of the diode
# P: radiative power
# e: elementary charge
# R: spectral responsivity
#
# Sample flux:
# Is(ph/s) = I0(ph/s).Ts
# I(ph/s) = I0(ph/s).Y = Is(ph/s).Y/Ts
# I: flux seen by the diode
# I0: the flux of the source
# Is: flux seen by the sample
# Y: yield of secondary target (includes solid angle of detector but not attenuation)
# Ts: transmission of the source lines (optics,diode/target,filters)
#
# For multiple source lines (i) and multiple secondary lines (j):
# Iis(ph/s) = I0(ph/s).wi.Tis
# Is(ph/s) = sum_i[Iis] = I0(ph/s) . sum_i [wi.Tis]
# Iij(ph/s) = I0(ph/s).wi.Yij = Is(ph/s).wi.Yij/sum_i[wi.Tis]
#
# I(A) = SUM_ij [Iij(ph/s).Ej(eV/ph).1(e).(1-Tj)/Ehole(eV)] + D(A)
# = SUM_ij [Iij(ph/s).Cj] + D(A)
# = Is(ph/s) . SUM_i [SUM_j[wi.Yij.Cj]] / SUM_k [wk.Tks] + D(A)
# = Is(ph/s) . Cs + D(A)
#
# wi: source line fraction
# Cj (C/ph): charge per photon hitting the diode
# Cs (C/ph): charge per photon hitting the sample
#
# The source fraction at the sample position:
# wis = Iis/sum_i[Iis] = wi.Tis / sum_i[wi.Tis]
#
# An absolute diode measures both I(A) and P(W) so that
# another diode which only measures I(A) can be calibrated:
# I_abs(A) = P(W).R_abs(e/eV) + D_abs(A)
# I(A) = P(W).FACT.R_abs(e/eV) + D(A)
#
# FACT = R(e/eV)/R_abs(e/eV)
# = [I(A)-D(A)]/[I_abs(A)-D_abs(A)]
# = (1-T)/Ehole(eV) . Ehole_abs(eV)/(1-T_abs)
#
class GainRounder(object):
def __init__(self, m=None, base=None):
self.m = m
self.base = base
def __getstate__(self):
return {"m": self.m, "base": self.base}
def __setstate__(self, state):
self.m = state["m"]
self.base = state["base"]
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.m == other.m and self.base == other.base
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __call__(self, x):
if self.m is None and self.base is None:
return x
gain = x.magnitude
u = x.units
if self.m is not None:
gain = gain / self.m
if self.base is not None:
gain = math.log(gain, self.base)
gain = int(round(gain))
if self.base is not None:
gain = self.base ** gain
if self.m is not None:
gain = self.m * gain
return units.Quantity(gain, u)
class Oscillator(object):
"""This class describes a voltage-to-frequency convertor"""
def __init__(self, Fmax=None, F0=None, Vmax=None):
"""
Args:
Fmax(num): maximal frequency (Hz)
F0(num): frequency offset (Hz)
Vmax(num): voltage corresponding to Fmax (V)
"""
self.Fmax = Fmax
self.F0 = F0
self.Vmax = Vmax
def __getstate__(self):
return {"Fmax": self.Fmax, "F0": self.F0, "Vmax": self.Vmax}
def __setstate__(self, state):
self.Fmax = state["Fmax"]
self.F0 = state["F0"]
self.Vmax = state["Vmax"]
def __eq__(self, other):
if isinstance(other, self.__class__):
return (
self.Fmax == other.Fmax
and self.F0 == other.F0
and self.Vmax == other.Vmax
)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def Fmax(self):
return self._Fmax
@Fmax.setter
def Fmax(self, value):
value = units.Quantity(value, "hertz")
self._Fmax = value.to("hertz")
@property
def F0(self):
return self._F0
@F0.setter
def F0(self, value):
value = units.Quantity(value, "hertz")
self._F0 = value.to("hertz")
@property
def Vmax(self):
return self._Vmax
@Vmax.setter
def Vmax(self, value):
value = units.Quantity(value, "volt")
self._Vmax = value.to("volt")
def tojson(self):
kwargs = {"Fmax": self.Fmax, "F0": self.F0, "Vmax": self.Vmax}
return {"classname": self.__class__.__name__, "kwargs": kwargs}
def __str__(self):
return "y hertz = {:~e}/{:~} * x V + {:~}".format(self.Fmax, self.Vmax, self.F0)
def op_voltagetocps(self):
"""Operator to convert voltage to counts-per-second
Args:
None
Returns:
callable: slope (cps/V), intercept(cps)
"""
return linop.LinearOperator(self.Fmax / self.Vmax, self.F0)
def op_cpstovoltage(self):
"""Operator to convert counts-per-second to voltage
Args:
None
Returns:
callable: slope (V/cps), intercept(V)
"""
return self.op_voltagetocps().inverse
class PNdiode(with_metaclass(base.SolidState)):
ELCHARGE = ureg.Quantity(1, ureg.elementary_charge)
# ureg.Quantity(1.6e-19,ureg.coulomb) # approximation used in spec
def __init__(
self,
gain=None,
gainrounder=None,
darkcurrent=None,
oscillator=None,
secondarytarget=None,
simplecalibration=False,
optics=None,
Vmax=None,
beforesample=None,
**kwargs
):
"""
Args:
gain(num): V/A (default) or A
gainrounder(callable): discrete gain settings
darkcurrent(num): C/s
oscillator(Oscillator):
secondarytarget(multilayer): optional secondary target
simplecalibration(bool): simplify
optics:
Vmax: when oscillator is missing
"""
if Vmax is None and oscillator is not None:
Vmax = oscillator.Vmax
self.Vmax = Vmax
self.oscillator = oscillator
self.gainrounder = gainrounder
self.gain = gain
self.darkcurrent = darkcurrent
self.beforesample = beforesample
self.optics = optics
self.secondarytarget = secondarytarget
self.simplecalibration = simplecalibration
self._lut_chargepersamplephoton = lut.LUT()
super(PNdiode, self).__init__(**kwargs)
def __getstate__(self):
state = super(PNdiode, self).__getstate__()
state["oscillator"] = self.oscillator
state["Vmax"] = self.Vmax
state["gainrounder"] = self.gainrounder
state["gain"] = self.gain
state["darkcurrent"] = self.darkcurrent
state["beforesample"] = self.beforesample
state["optics"] = self.optics
if self.secondarytarget is not None:
state["secondarytarget"] = self.secondarytarget
state["simplecalibration"] = self.simplecalibration
state["_lut_chargepersamplephoton"] = self._lut_chargepersamplephoton
return state
def __setstate__(self, state):
super(PNdiode, self).__setstate__(state)
self.oscillator = state["oscillator"]
self.Vmax = state["Vmax"]
self.gainrounder = state["gainrounder"]
self.gain = state["gain"]
self.darkcurrent = state["darkcurrent"]
self.beforesample = state["beforesample"]
self.optics = state["optics"]
self.secondarytarget = state.get("secondarytarget", None)
self.simplecalibration = state["simplecalibration"]
self._lut_chargepersamplephoton = state["_lut_chargepersamplephoton"]
def __eq__(self, other):
if isinstance(other, self.__class__):
if not (super(PNdiode, self).__eq__(other)):
return False
if not self._eq_secondarytarget(other):
return False
return (
self.oscillator == other.oscillator
and self.Vmax == other.Vmax
and self.gainrounder == other.gainrounder
and self.gain == other.gain
and self.darkcurrent == other.darkcurrent
and self.beforesample == other.beforesample
and self.optics == other.optics
and self.simplecalibration == other.simplecalibration
and self._lut_chargepersamplephoton == other._lut_chargepersamplephoton
)
else:
return False
def _eq_secondarytarget(self, other):
if (self.secondarytarget is None) ^ (other.secondarytarget is None):
return False
if self.secondarytarget is not None:
# Circular reference:
gself = self.secondarytarget.geometry.detector
gother = other.secondarytarget.geometry.detector
self.secondarytarget.geometry.detector = None
other.secondarytarget.geometry.detector = None
eqtarget = self.secondarytarget == other.secondarytarget
self.secondarytarget.geometry.detector = gself
other.secondarytarget.geometry.detector = gother
return eqtarget
return True
@property
def optics(self):
return self._optics
@optics.setter
def optics(self, optics):
if optics:
if not instance.isarray(optics):
optics = [optics]
def proc(dev):
if instance.isstring(dev):
return xrayoptics.factory(dev)
else:
return dev
optics = [proc(dev) for dev in optics]
else:
optics = []
self._optics = optics
@property
def caliboptic(self):
for dev in self.optics:
if dev.uselut:
return dev
raise RuntimeError("No optics with calibratable transmission (with LUT)")
@property
def simplecalibration(self):
return self._simplecalibration and not self._lut_chargepersamplephoton.isempty()
@simplecalibration.setter
def simplecalibration(self, value):
self._simplecalibration = value
@property
def gainrounder(self):
return self._gainrounder
@gainrounder.setter
def gainrounder(self, value):
if value is None:
value = GainRounder()
self._gainrounder = value
@property
def Rout(self):
"""Output resistance of the ammeter (V/A)"""
try:
return self.gain.to("V/A")
except pinterrors.DimensionalityError:
return self.Vmax / self.Imax
@Rout.setter
def Rout(self, value):
value = units.Quantity(value, "V/A")
try:
self.gain = value.to(self.gainunits)
except pinterrors.DimensionalityError:
self.gain = (self.Vmax / value).to(self.gainunits)
@property
def Imax(self):
try:
return self.gain.to("A")
except pinterrors.DimensionalityError:
return self.Vmax / self.gain.to("V/A")
@Imax.setter
def Imax(self, value):
value = units.Quantity(value, "A")
try:
self.gain = value.to(self.gainunits)
except pinterrors.DimensionalityError:
self.gain = (self.Vmax / value).to(self.gainunits)
@property
def gain(self):
"""Vmax/Imax or Imax"""
return self._gain
@gain.setter
def gain(self, value):
try:
u = self.gainunits
except AttributeError:
try:
gain = value.to("A")
except pinterrors.DimensionalityError:
gain = value.to("V/A")
else:
gain = units.Quantity(value, u).to(u)
self._gain = self.gainrounder(gain)
@property
def gainunits(self):
return self._gain.units
@property
def darkcurrent(self):
return self._darkcurrent
@darkcurrent.setter
def darkcurrent(self, value):
self._darkcurrent = units.Quantity(value, "A")
def __str__(self):
fmt = "PN-diode:\n{}\n"
args = [super(PNdiode, self).__str__()]
if self.simplecalibration:
fmt += "LUT (e-/sample photon):\n {}\n"
lut = self._lut_chargepersamplephoton.zip("keV", "e")
lut = "\n ".join("{:~}: {:~}".format(*xy) for xy in lut)
if not lut:
lut = None
args.append(lut)
else:
fmt += "Secondary target:\n {}\n"
target = str(self.secondarytarget)
target = "\n ".join(target.split("\n"))
args.append(target)
fmt += "Optics:\n {}\n"
optics = "\n".join(str(dev) for dev in self.optics)
optics = "\n ".join(optics.split("\n"))
if not optics:
optics = None
args.append(optics)
fmt += "Before sample: {}\n"
args.append(self.beforesample)
fmt += "Ammeter:\n Gain = {:~e}\n "
args.append(self.gain)
fmt += "Dark current = {:~}\n"
args.append(self.darkcurrent.to("e/s"))
if self.Vmax is not None:
fmt += " Output voltage = {:~}\n"
args.append(self.Vmax)
fmt += "Voltage-to-Frequency:\n {}"
args.append(self.oscillator)
return fmt.format(*args)
def _diode_absorbance(self, energy, thickness):
# list self.absorbance but with customn thickness
return self.material.mass_att_coeff(energy) * self.material.density * thickness
def _diode_transmission(self, energy, thickness):
# list self.transmission but with customn thickness
return np.exp(-self._diode_absorbance(energy, thickness))
def _diode_attenuation(self, energy, thickness):
# list self.absorbance but with customn thickness
return 1 - self._diode_transmission(energy, thickness)
def _spectral_responsivity_infthick(self):
"""Generated current per radiative power for an infinitely thick diode
Returns:
num: A/W or e/eV
"""
return (self.ELCHARGE / self.ehole.to("eV")).to("A/W")
def spectral_responsivity(self, energy):
"""Generated current per radiative power
Args:
energy(num or array): keV
Returns:
num or array: A/W or e/eV
"""
return self.attenuation(energy) * self._spectral_responsivity_infthick()
def model_spectral_responsivity(self, energy, thickness, ehole):
"""Function to fit measured spectral responsivity"""
return self._diode_attenuation(energy, thickness) / ehole
def fit_spectral_responsivity(self, energy, response):
"""Calculate d and Ehole by fitting:
I(A) = P(W) . 1(e) . (1-exp(-mu.rho.d))/Ehole(eV) + D(A)
response(A/W) = (I(A)-D(A))/P(W) = 1(e).(1-exp(-mu.rho.d))/Ehole(eV)
Args:
energy(array): keV
response(array): A/W
Returns:
None
"""
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# ehole = 1/np.max(response)
# muL = self.material.mass_att_coeff(energy)*self.material.density
# thickness = -np.log(1-(response*ehole).magnitude)/muL
# thickness = np.median(thickness[np.isfinite(thickness)])
# ehole = ehole.magnitude
ehole = self.ehole.to("eV").magnitude
thickness = self.thickness
try:
p, cov_matrix = fit.leastsq(
self.model_spectral_responsivity, energy, response, [thickness, ehole]
)
except:
logger.debug(
"Could not fit spectral response of {}".format(self.__class__.__name__)
)
return
self.thickness = p[0] # "cm"
self.ehole = units.Quantity(p[1], "eV") # eV
def plot_spectral_responsivity(self, energy):
plt.plot(energy, self.spectral_responsivity(energy).to("A/W"))
ax = plt.gca()
ax.set_xlabel("Energy (keV)")
ax.set_ylabel("Spectral responsivity (A/W)")
ax.get_yaxis().get_major_formatter().set_useOffset(False)
plt.title(self.__class__.__name__)
def plot_response(self, energy, flux, weights=None, current=False):
"""
Args:
energy(num or array): source energies
shape = [nSource x] nSourceLines
flux(num or array): source energies
shape = [nSource x] nFluxes
weights(Optional(num or array): source line weights
shape= [nSource x] nSourceLines
"""
energy, weights, singlesource, singleline = reshape_spectrum_lines(
energy, weights=weights
)
if current:
response = (
self.fluxtocurrent(energy, flux, weights=weights).to("A").magnitude
)
unit = "A"
else:
response = self.fluxtocps(energy, flux, weights=weights).to("Hz").magnitude
unit = "Hz"
for i, (x, y) in enumerate(zip(energy, response)):
plt.plot(x, y, label="Source{}".format(i))
# plt.axhline(y=self.oscillator.Fmax.to("Hz").magnitude,label="max")
ax = plt.gca()
ax.set_xlabel("Energy (keV)")
ax.set_ylabel("Response ({})".format(unit))
ax.get_yaxis().get_major_formatter().set_useOffset(False)
plt.title(
"{} @ {:.02e} ph/s, {:~.0e}".format(
self.__class__.__name__, flux, self.gain
)
)
if not singlesource:
plt.legend()
def _chargeperdiodephoton(self, energy):
"""
Charge generated per photon hitting the diode: spectral responsivity multiplied by the energy
Args:
energy(num or array): keV
Returns:
num or array: C/ph
"""
return self.spectral_responsivity(energy) * units.Quantity(energy, "keV")
def _transmission_optics(self, energy):
"""
Args:
energy(array): source energies in keV (shape: nSource)
Returns:
array:
"""
T = np.ones_like(energy)
for dev in self.optics:
T = T * dev.transmission(energy)
return T
def _source_transmission(self, energy):
"""
Transmission between sample and point-before-detection (i.e. before source filter)
Args:
energy(array): keV
Returns:
array:
"""
# Before sample:
# Direct detection : point-before-detection - filter(source) - filter(det) - diode - optics - sample
# Indirect detection: point-before-detection - filter(source) - target - optics - sample
#
# After sample:
# Direct detection : sample - optics - point-before-detection - filter(source) - filter(det) - diode
# Indirect detection: sample - optics - point-before-detection - filter(source) - target
T = self._transmission_optics(energy)
if self.beforesample:
T = T * self.filter_transmission(energy, source=True)
if self.secondarytarget is None:
# Direct detection
T = (
T
* self.transmission(energy)
* self.filter_transmission(energy, source=False)
)
else:
# Indirect detection
T = T * self.secondarytarget.transmission(energy)
return T
def _detection_rates(self, energy, weights):
"""
Rate of line j at point-before-diode (i.e. without diode attenuation)
multiplied by (and due to) rate of line i at point-before-detection (i.e. before source filter)
Args:
energy(array): source energies in keV
shape: [nSource x] nSourceLines
weights(array): source rates
shape: [nSource x] nSourceLines
Returns:
energy2(array): energies (keV) of lines detected by the diode
shape: [nSource] x nSourceLines2
rates(array): rates of lines detected by the diode
shape: [nSource x] nSourceLines2
"""
energy, weights, singlesource, singleline = reshape_spectrum_lines(
energy, weights=weights
)
if self.secondarytarget is None:
# Directly detect the source spectrum
wY = (
weights
* self.filter_transmission(energy, source=True)
* self.filter_transmission(energy, source=False)
)
wY = wY.sum(axis=-1, keepdims=True)
else:
# Target spectrum energies and rates (ph/phsource)
secondarytarget_ratedict = None
if secondarytarget_ratedict is None:
# includes source and detector filters
# but not detector attenuation (withdetectorresponse=False)
spectra = self.secondarytarget.xrayspectrum(
energy, weights=weights, withdetectorresponse=False, method="mc"
)
spectra = [dict(spectrum.spectrum()) for spectrum in spectra]
else:
# Soure weights before the sample:
weights = weights * self.filter_transmission(energy, source=True)
spectra = []
for en, we in zip(energy, weights):
spectrum = Counter()
for e, w in zip(en, we):
rdict = secondarytarget_ratedict[e]
senergies = np.array(list(rdict.keys()))
# Product of:
# source weight before the sample
# rate after sample interaction
# attenuation before detector
srates = (
w
* np.array(list(rdict.values()))
* self.filter_transmission(senergies, source=False)
)
for k, v in zip(senergies, srates):
spectrum[k] += v
spectra.append(dict(spectrum))
# Spectra as arrays:
energies = set()
for spectrum in spectra:
energies |= set(spectrum.keys())
energy = []
wY = []
for spectrum in spectra:
missing = energies - set(spectrum.keys())
if missing:
spectrum.update(zip(missing, np.zeros(len(missing))))
energy.append(list(spectrum.keys()))
wY.append(list(spectrum.values()))
energy = np.asarray(energy)
wY = np.asarray(wY)
if singlesource:
return energy[0], wY[0]
else:
return energy, wY
def _chargepersamplephoton(self, energy, weights=None, keepdims=False):
"""
Charge generated per photon reaching the sample
Args:
energy(num or array): source energies in keV (shape: [nSource x] nSourceLines)
weights(num or array): source line weights (shape: [nSource x] nSourceLines)
Returns:
num: C/ph (num or array(nSource [x 1]))
"""
# Parse input (nSource x nSourceLines)
energy, weights, singlesource, singleline = reshape_spectrum_lines(
energy, weights=weights
)
kwargs = {"axis": -1, "keepdims": keepdims}
if self.simplecalibration:
# I(A) = Is(ph/s) . Cs(C) + D(A)
# Cs(C) = SUM_i [wi.LUTj]
LUTj = self._lut_chargepersamplephoton(units.Quantity(energy, "keV"))
# Sum over source lines:
Cs = np.sum(LUTj * weights, **kwargs) # nSource
else:
# I(A) = Is(ph/s) . Cs(C) + D(A)
# Cs(C) = SUM_i [SUM_j[wi.Yij.Cj]] / SUM_k [wk.Tks]
#
# wi, wk: source line fraction
# Cj (C/ph): charge per photon hitting the diode
# Cs (C/ph): charge per photon hitting the sample
# Yij: rate of line j due to line i (including solid angle but not diode attenuation)
# Tks: transmission of line k (point-before-detection <-> sample)
Ts = self._source_transmission(energy) # nSource x nSourceLines
energy2, wiYij = self._detection_rates(
energy, weights
) # nSource x nSecondary
Cj = self._chargeperdiodephoton(energy2) # nSource x nSecondary
# Sum over secondary source lines:
Cs = np.sum(wiYij * Cj, **kwargs) / np.sum(
weights * Ts, **kwargs
) # nSource
if singlesource:
return Cs[0]
else:
return Cs
def _calibrate_chargepersamplephoton(
self,
energy,
Cscalib,
weights=None,
caliboption="optics",
bound=False,
plot=False,
):
"""
Args:
energy(num or array): source energies in keV (shape: nSourceLines)
Cscalib(num): new charge-per-sample-photon (C or e)
weights(num or array): source line weights (shape: nSourceLines)
caliboption(str): "optics", "solidangle" or "thickness"
Returns:
Cscalc(num)
"""
# Parse input (1 x nSourceLines)
energy, weights, singlesource, singleline = reshape_spectrum_lines(
energy, weights=weights
)
energy = energy[0]
weights = weights[0]
# I(A) = Is(ph/s).Cs + D(A)
if self._simplecalibration:
# Cs = SUM_i [SUM_j[wi.Yij.Cj]] / SUM_k [wk.Tks]
# = SUM_i [wi . Csi]
self._lut_chargepersamplephoton.replace(
units.Quantity(energy, "keV"), Cscalib / weights
)
Cscalc = self._chargepersamplephoton(energy, weights=weights)
else:
# Propagate correction to one of the components of Cs:
# Cs = SUM_i [SUM_j[wi.Yij.Cj]] / SUM_k [wk.Tks]
# Cj = (1-Tj).Cj_inf
# Cj_inf = Ej(eV/ph).1(e)/Ehole(eV)
# Yij = solidangle . ...
# Tj = 1-exp(-rho.mu(Ej).thickness)
#
# Yij: rate of line j due to line i (including solid angle but not diode attenuation)
# Cj (C/ph): charge per photon hitting the diode
# Cs (C/ph): charge per photon hitting the sample
if caliboption not in ["solidangle", "thickness", "optics"]:
raise RuntimeError(
'caliboption must be "optics", "solidangle" or "thickness"'
)
if caliboption == "solidangle":
Cs = self._chargepersamplephoton(energy, weights=weights)
# solidangle' = solidangle*Cs'/Cs
sa = self.geometry.solidangle * units.magnitude(
Cscalib / Cs, "dimensionless"
)
if sa <= 0 or sa > (2 * np.pi):
logger.warning(
"Diode solid angle of 4*pi*{} srad is not valid but will be accepted anyway".format(
sa / (4 * np.pi)
)
)
self.geometry.solidangle = sa
Cscalc = self._chargepersamplephoton(energy, weights=weights)
else:
# No analytical solution (in general): diode thickness or optics transmission
# Fluorescence does not change (expensive to calculate)
energy2, wiYij = self._detection_rates(
energy, weights
) # nSource x nSecondary, nSource x nSecondary
# Solve: func(x) = (Cscalib-Cs(x))^2 = 0
Cs, xorg, x0 = self._calibrate_init(
energy, caliboption=caliboption, bound=bound
)
# Remark: calling function Cs changes self based on first argument
Cscalibm = Cscalib.to("e").magnitude
def func(x):
return (
Cscalibm
- Cs(x, energy, weights, energy2, wiYij).to("e").magnitude
) ** 2
if bound:
x, info = scipy.optimize.bisect(
func, x0[0], x0[-1], full_output=True, disp=False
)
if not info.converged:
x = np.nan
if plot:
print(info)
else:
x, infodict, ier, emsg = scipy.optimize.fsolve(
func, x0=x0, full_output=True
)
if plot:
print(infodict)
print(emsg, ier)
if ier != 1:
x = np.nan
if plot:
plt.axvline(x0, linestyle="dashed")
plt.axvline(x)
plt.axhline(0)
xx = np.linspace(0.1, 1.5, 50)
yy = [func(xi) for xi in xx]
plt.plot(xx, yy)
x = self._calibrate_apply(energy, x, xorg, caliboption=caliboption)
Cscalc = Cs(x, energy, weights, energy2, wiYij)
if plot:
print(bound, x0, x)
print(Cscalib, Cscalc)
plt.show()
return Cscalc
def _calibrate_init(self, energy, caliboption="optics", bound=False):
if caliboption == "thickness":
# Unknown x = diode thickness
xorg = self.thickness
if bound:
attenuationmax = 0.999
# use energy instead of energy2 because we need the maximal thickness
thicknessmax = -np.log(1 - attenuationmax) / (
self.material.mass_att_coeff(energy) * self.material.density
)
x0 = (0.0, np.max(thicknessmax))
else:
x0 = xorg
Cs = self._calibrate_thickness
elif caliboption == "optics":
# Unknown x = transmission
xorg = self.caliboptic.transmission(energy)
if len(energy) > 1:
bound = False
if bound:
x0 = 0.0, 1.0
else:
x0 = xorg
Cs = self._calibrate_optics
else:
raise RuntimeError('caliboption must be "optics" or "thickness"')
return Cs, xorg, x0
def _calibrate_apply(self, energy, x, xorg, caliboption="optics"):
if np.isnan(x):
logger.error("Diode calibration not succesfull")
x = xorg
if caliboption == "thickness":
x = instance.asscalar(x)
if x <= 0:
logger.warning(
"Diode thickness of {} um is not valid but will be accepted anyway".format(
x * 1e-4
)
)
self.thickness = x
elif caliboption == "optics":
if x < 0 or x > 1:
logger.warning(
"Transmission of {} % is not valid but will be accepted anyway".format(
x * 100
)
)
self.caliboptic.set_transmission(energy, x)
else:
raise RuntimeError('caliboption must be "optics" or "thickness"')
return x
def _calibrate_thickness(self, x, energy, weights, energy2, wiYij):
self.thickness = instance.asscalar(x)
Ts = self._source_transmission(energy) # nSource
Cj = self._chargeperdiodephoton(energy2) # nSecondary
return np.sum(wiYij * Cj[np.newaxis, :]) / np.sum(weights * Ts)
def _calibrate_optics(self, x, energy, weights, energy2, wiYij):
self.caliboptic.set_transmission(energy, x)
Ts = self._source_transmission(energy) # nSource
Cj = self._chargeperdiodephoton(energy2) # nSecondary
return np.sum(wiYij * Cj[np.newaxis, :]) / np.sum(weights * Ts)
def samplelineweights(self, energy, weights=None):
"""Source weights after transmission
Args:
energy(num or array): source energies in keV (shape: nSource)
weights(num or array): source line weights (shape: nSource)
Returns:
weights(num or array): source line weights at the sample position
"""
# Parse input
energy, weights, singlesource, singleline = reshape_spectrum_lines(
energy, weights=weights
)
if self.simplecalibration or not self.beforesample:
return weights
else:
# wis = Iis/sum_i[Iis] = wi.Tis / sum_i[wi.Tis]
weights *= self._source_transmission(energy)
weights /= weights.sum(axis=-1, keepdims=True)
return weights
def op_currenttovoltage(self):
"""Operator to convert current to voltage
Args:
None
Returns:
callable: slope (V/A), intercept = 0V
"""
return linop.LinearOperator(self.Rout, units.Quantity(0, "volt"))
def op_voltagetocurrent(self):
"""Operator to convert voltage to current
Args:
None
Returns:
callable: slope (A/V), intercept = 0A
"""
# return self.op_currenttovoltage().inverse
return linop.LinearOperator(1.0 / self.Rout, units.Quantity(0, "ampere"))
def op_fluxtocurrent(self, energy, weights=None):
"""Operator to convert flux to current
Args:
energy(num or array): source energies (keV)
shape = [nSource x] nSourceLines
weights(Optional(num or array): source line weights
shape= [nSource x] nSourceLines
Returns:
callable: slope (C/ph), intercept(C/s)
"""
Cs = self._chargepersamplephoton(energy, weights=weights, keepdims=True)
return linop.LinearOperator(Cs, self.darkcurrent)
def op_currenttoflux(self, energy, weights=None):
"""Operator to convert current to flux
Args:
energy(num or array): source energies (keV)
shape = [nSource x] nSourceLines
weights(Optional(num or array): source line weights
shape= [nSource x] nSourceLines
Returns:
callable: slope (ph/C), intercept(ph/s)
"""
return self.op_fluxtocurrent(energy, weights=weights).inverse
def op_currenttocps(self):
"""Operator to convert current to counts-per-second
Args:
None
Returns:
callable: slope (cps/C), intercept(cps)
"""
return self.oscillator.op_voltagetocps() * self.op_currenttovoltage()
def op_cpstocurrent(self):
"""Operator to convert counts-per-second to current
Args:
None
Returns:
callable: slope (C/cps), intercept(C/s)
"""
return self.op_voltagetocurrent() * self.oscillator.op_cpstovoltage()
def op_countstocurrent(self, time):
"""Operator to convert counts to current
Args:
time: sec
Returns:
callable: slope (C/cts), intercept(C/s)
"""
return self.op_cpstocurrent * self.op_countstocps(time)
def op_voltagetocps(self):
"""Operator to convert voltage to counts-per-second
Args:
None
Returns:
callable: slope (cps/V), intercept(cps)
"""
return self.oscillator.op_voltagetocps()
def op_cpstovoltage(self):
"""Operator to convert counts-per-second to voltage
Args:
None
Returns:
callable: slope (V/cps), intercept(V)
"""
return self.oscillator.op_cpstovoltage()
def op_countstovoltage(self, time):
"""Operator to convert counts to voltage
Args:
None
Returns:
callable: slope (V/cts), intercept(V)
"""
return self.op_cpstovoltage() * self.op_countstocps(time)
def op_cpstoflux(self, energy, weights=None):
"""Operator to convert counts-per-second to flux
Args:
energy(num or array): source energies (keV)
shape = [nSource x] nSourceLines
weights(Optional(num or array): source line weights
shape= [nSource x] nSourceLines
Returns:
callable: slope (ph/cps), intercept(ph/s)
"""
return self.op_currenttoflux(energy, weights=weights) * self.op_cpstocurrent()
def op_fluxtocps(self, energy, weights=None):
"""Operator to convert flux to counts-per-second
Args:
energy(num or array): source energies (keV)
shape = [nSource x] nSourceLines
weights(Optional(num or array): source line weights
shape= [nSource x] nSourceLines
Returns:
callable: slope (cps/ph), intercept(cps)
"""
return self.op_currenttocps() * self.op_fluxtocurrent(energy, weights=weights)
def op_countstocps(self, time):
return linop.LinearOperator(
units.Quantity(1.0 / time, "Hz"), units.Quantity(0, "Hz")
)
def op_countstoflux(self, energy, time, weights=None):
"""Operator to convert counts to flux
Args:
energy(num or array): source energies (keV)
shape = [nSource x] nSourceLines
time(num): sec
weights(Optional(num or array): source line weights
shape= [nSource x] nSourceLines
Returns:
callable: slope (ph/cts), intercept(ph/s)
"""
return self.op_cpstoflux(energy, weights=weights) * self.op_countstocps(time)
def op_voltagetoflux(self, energy, weights=None):
"""Operator to convert voltage to flux
Args:
energy(num or array): source energies (keV)
shape = [nSource x] nSourceLines
weights(Optional(num or array): source line weights
shape= [nSource x] nSourceLines
Returns:
callable: slope (ph/s/V), intercept(ph/s)
"""
return (
self.op_currenttoflux(energy, weights=weights) * self.op_voltagetocurrent()
)
def op_fluxtovoltage(self, energy, weights=None):
"""Operator to convert flux to voltage
Args:
energy(num or array): source energies (keV)
shape = [nSource x] nSourceLines
weights(Optional(num or array): source line weights
shape= [nSource x] nSourceLines
Returns:
callable: slope (V.s/ph), intercept(V)
"""
return self.op_currenttovoltage() * self.op_fluxtocurrent(
energy, weights=weights
)
def fluxtocurrent(self, energy, flux, weights=None):
"""
Args:
energy(num or array): source energies (keV)
shape = [nSource x] nSourceLines
flux(num or array): ph/s
shape = [nSource x] n
weights(Optional(num or array): source line weights
shape= [nSource x] nSourceLines
Returns:
num or array: current (A)
"""
op = self.op_fluxtocurrent(energy, weights=weights)
return op(units.Quantity(flux, "hertz")).to("ampere")
def currenttoflux(self, energy, current, weights=None):
"""
Args:
energy(num or array): source energies (keV)
shape = [nSource x] nSourceLines
current(num or array): A
shape = [nSource x] n
weights(Optional(num or array): source line weights
shape= [nSource x] nSourceLines
Returns:
num or array: flux (ph/s)
"""
op = self.op_currenttoflux(energy, weights=weights)
return op(units.Quantity(current, "ampere")).to("hertz")
def cpstoflux(self, energy, cps, weights=None):
"""
Args:
energy(num or array): source energies (keV)
shape = [nSource x] nSourceLines
cps(num or array): cts/s
shape = [nSource x] n
weights(Optional(num or array): source line weights
shape= [nSource x] nSourceLines
Returns:
num or array: flux (ph/s)
"""
op = self.op_cpstoflux(energy, weights=weights)
return op(units.Quantity(cps, "hertz")).to("hertz")
def countstoflux(self, energy, time, counts, weights=None):
"""
Args:
energy(num or array): source energies (keV)
shape = [nSource x] nSourceLines
time(num): s
cps(num or array): cts/s
shape = [nSource x] n
weights(Optional(num or array): source line weights
shape= [nSource x] nSourceLines
Returns:
num or array: flux (ph/s)
"""
op = self.op_countstoflux(energy, time, weights=weights)
return op(units.Quantity(counts, "dimensionless")).to("hertz")
def fluxtocps(self, energy, flux, weights=None):
"""
Args:
energy(num or array): source energies (keV)
shape = [nSource x] nSourceLines
flux(num or array): ph/s
shape = [nSource x] n
weights(Optional(num or array): source line weights
shape= [nSource x] nSourceLines
Returns:
num or array: cps (cts/s)
"""
op = self.op_fluxtocps(energy, weights=weights)
return op(units.Quantity(flux, "hertz")).to("hertz")
def currenttocps(self, current):
"""
Args:
current(num or array): A
Returns:
num or array: cps (cts/s)
"""
op = self.op_currenttocps()
return op(units.Quantity(current, "ampere")).to("hertz")
def cpstocurrent(self, cps):
"""
Args:
cps(num or array): cts/s
Returns:
num or array: current (A)
"""
op = self.op_cpstocurrent()
return op(units.Quantity(cps, "hertz")).to("ampere")
def countstocurrent(self, time, counts):
"""
Args:
counts(num or array): cts
Returns:
num or array: current (A)
"""
op = self.op_countstocurrent(time)
return op(units.Quantity(counts, "dimensionless")).to("ampere")
def voltagetocps(self, voltage):
"""
Args:
voltage(num or array): V
Returns:
num or array: cps (cts/s)
"""
op = self.op_voltagetocps()
return op(units.Quantity(voltage, "volt")).to("hertz")
def cpstovoltage(self, cps):
"""
Args:
cps(num or array): cts/s
Returns:
num or array: voltage (V)
"""
op = self.op_cpstovoltage()
return op(units.Quantity(cps, "hertz")).to("volt")
def countstocps(self, time, counts):
"""
Args:
counts(num or array): cts
Returns:
num or array: voltage (cps)
"""
op = self.op_countstocps(time)
return op(units.Quantity(counts, "dimensionless")).to("Hz")
def countstovoltage(self, time, counts):
"""
Args:
cps(num or array): cts
Returns:
num or array: voltage (V)
"""
op = self.op_cpstovoltage(time)
return op(units.Quantity(counts, "dimensionless")).to("volt")
def voltagetoflux(self, energy, voltage, weights=None):
"""
Args:
energy(num or array): source energies (keV)
shape = [nSource x] nSourceLines
voltage(num or array): V
shape = [nSource x] n
weights(Optional(num or array): source line weights
shape= [nSource x] nSourceLines
Returns:
num or array: flux (ph/s)
"""
op = self.op_voltagetoflux(energy, weights=weights)
return op(units.Quantity(voltage, "volt")).to("hertz")
def fluxtovoltage(self, energy, flux, weights=None):
"""
Args:
energy(num or array): source energies (keV)
shape = [nSource x] nSourceLines
flux(num or array): ph/s
shape = [nSource x] n
weights(Optional(num or array): source line weights
shape= [nSource x] nSourceLines
Returns:
num or array: voltage (V)
"""
op = self.op_fluxtovoltage(energy, weights=weights)
return op(units.Quantity(flux, "hertz")).to("volt")
def currenttovoltage(self, current):
"""
Args:
cps(num or array): A
Returns:
num or array: voltage (V)
"""
op = self.op_currenttovoltage()
return op(units.Quantity(current, "A")).to("volt")
def fluxop(self, energy, response, weights=None, time=None):
"""Operator to convert diode response flux.
Args:
energy(num or array): source energies (keV)
shape = [nSource x] nSourceLines
response(num or array): shape = [nSource x] n
weights(Optional(num or array): source line weights
shape= [nSource x] nSourceLines
time(Optional(num)): sec
Returns:
op(linop): raw diode conversion operator
"""
if time is None:
default = "hertz"
else:
default = "dimensionless"
response = units.Quantity(response, default)
try:
response.to("Hz")
except pinterrors.DimensionalityError:
try:
response.to("dimensionless")
if time is None:
raise RuntimeError("Need exposure time to convert counts to flux.")
except pinterrors.DimensionalityError:
try:
response.to("A")
except:
response.to("V")
return self.op_voltagetoflux(energy, weights=weights)
else:
return self.op_currenttoflux(energy, weights=weights)
else:
return self.op_countstoflux(energy, time, weights=weights)
else:
return self.op_cpstoflux(energy, weights=weights)
def responsetoflux(self, energy, response, weights=None, time=None):
"""Convert diode response to flux
Args:
energy(num or array): source energies (keV)
shape = [nSource x] nSourceLines
response(num or array): cts (default when time given), cps (default when time not given), A or V
shape = [nSource x] n
weights(Optional(num or array): source line weights
shape= [nSource x] nSourceLines
time(Optional(num)): sec
Returns:
num or array: flux (ph/s)
"""
if time is None:
default = "hertz"
else:
default = "dimensionless"
response = units.Quantity(response, default)
try:
return self.cpstoflux(energy, response.to("hertz"), weights=weights)
except pinterrors.DimensionalityError:
try:
tmp = response.to("dimensionless")
if time is None:
raise RuntimeError("Need exposure time to convert counts to flux.")
return self.countstoflux(energy, time, tmp, weights=weights)
except pinterrors.DimensionalityError:
try:
return self.currenttoflux(energy, response.to("A"), weights=weights)
except:
return self.voltagetoflux(energy, response.to("V"), weights=weights)
def gainfromresponse(
self, response_after, response_before, energy=None, weights=None, time=None
):
"""Try to guess the diode gain
Args:
response_after(num): diode response after gain application (cts, cps)
response_before(num): diode response before gain application (A, V, ph/s)
energy(Optional(num or array)): keV
weights(Optional(num or array)): line fractions
time(Optional(num)): sec
Returns:
gain(num): V/A or A
"""
# Voltage from the signal on which the gain is already applied
if time is None:
default = "Hz"
else:
default = "dimensionless"
response_after = units.Quantity(response_after, default)
try:
V_meas = self.cpstovoltage(response_after.to("Hz"))
except pinterrors.DimensionalityError:
tmp = response.to("dimensionless")
if time is None:
raise RuntimeError("Need exposure time to convert counts to flux.")
V_meas = self.countstovoltage(time, tmp, weights=weights)
# Voltage from the signal before the gain is applied
response_before = units.Quantity(response_before, "Hz")
try:
V_calc = self.fluxtovoltage(
energy, response_before.to("Hz"), weights=weights
)
except pinterrors.DimensionalityError:
try:
V_calc = self.currenttovoltage(response_before.to("A"))
except pinterrors.DimensionalityError:
V_calc = response_before.to("V")
if self.gain.units == ureg.Unit("A"):
# V_meas: inverse proportional to the real gain
# V_calc: inverse proportional to the assumed gain
r = units.magnitude(V_calc / V_meas, "dimensionless")
else:
# V_meas: proportional to the real gain
# V_calc: proportional to the assumed gain
r = units.magnitude(V_meas / V_calc, "dimensionless")
r = np.median(r[np.isfinite(r)])
if np.isfinite(r and r > 0):
return self.gainrounder(self.gain * r)
else:
return None
def xrfnormop(self, energy, expotime, reference, referencetime=None, weights=None):
"""Operator to convert the raw diode signal to a flux normalizing signal.
Usage: Inorm = I/op(iodet)
XRF flux normalization:
Measured XRF:
Ixrf(cts) = F(cps).t(s).cxrf
Desired XRF signal:
Ixrf(cts)/norm = Fref(cps).tref(s).cxrf
Normalization function to be apply on the raw diode signal Idiode
norm = F.t/(Fref.tref) = cpstoflux(Idiode/t).t/(Fref.tref) = op(Idiode)
op: x-> cpstoflux(x/t)/Fref.t/tref
In case reference in counts instead of photons/sec
Fref = round_sig(cpstoflux(Idioderef/t),2)
Args:
energy(num or array): source energies (keV)
shape = [nSource x] nSourceLines
expotime(num): sec
reference(num): iodet (counts) or flux (photons/sec) to which the data should be normalized
referencetime(Optional(num)): time to which the data should be normalized
weights(Optional(num or array): source line weights
shape= [nSource x] nSourceLines
Returns:
op(linop): raw diode conversion operator
Fref(num): flux in photons/s to which the data is normalized after data/op(diode)
tref(num): time in s to which the data is normalized after data/op(diode)
"""
# Convert from counts to photons/sec
# op: x-> cpstoflux(x/t)
t = units.Quantity(expotime, "s")
op = self.op_countstoflux(energy, t, weights=weights)
# Reference flux to which the XRF signal should be normalized
if reference.units == ureg.hertz: # photons/sec
Fref = reference
elif reference.units == ureg.dimensionless: # counts
# Fref = op(Idioderef)
Fref = units.Quantity(
round_sig(units.magnitude(op(reference), "hertz"), 2), "hertz"
)
else:
raise RuntimeError(
"Reference {} should be in photons/sec (flux) or counts (iodet).".format(
reference
)
)
# Convert from counts to counts at reference flux "Fref" and reference time "tref"
if referencetime is not None:
tref = units.Quantity(referencetime, "s")
op2 = linop.LinearOperator(
units.Quantity(t / (Fref * tref), "s"),
units.Quantity(0, "dimensionless"),
)
else:
op2 = linop.LinearOperator(
units.Quantity(1.0 / Fref, "s"), units.Quantity(0, "dimensionless")
)
tref = t
op = op2 * op
op.m = units.magnitude(op.m, "dimensionless")
op.b = units.magnitude(op.b, "dimensionless")
return op, Fref.to("hertz").magnitude, tref.to("s").magnitude
def calibratedark(self, darkresponse, time=None):
"""Dark current from response
Args:
darkresponse(num or array): measured dark (cts, cps, A)
Returns:
None
"""
if time is None:
default = "Hz"
else:
default = "dimensionless"
def func(x):
return np.clip(np.nanmedian(x.magnitude), 0, None)
darkresponse = units.Quantity(darkresponse, default)
try:
self.darkcurrent = self.cpstocurrent(func(darkresponse.to("Hz")))
except pinterrors.DimensionalityError:
try:
tmp = darkresponse.to("dimensionless")
if time is None:
raise RuntimeError("Need exposure time to convert counts to flux.")
self.darkcurrent = self.countstocurrent(time, func(tmp))
except pinterrors.DimensionalityError:
self.darkcurrent = func(darkresponse.to("A"))
self.darkcurrent = np.clip(self.darkcurrent.magnitude, 0, None)
logger.info("Calibrated dark current: {:~e}".format(self.darkcurrent.to("e/s")))
def calibrateF0(self, darkresponse, time=None):
"""F0 from response
Args:
darkresponse(num or array): measured dark (cts, cps)
Returns:
None
"""
if time is None:
default = "Hz"
else:
default = "dimensionless"
def func(x):
return units.Quantity(np.clip(np.nanmedian(x.magnitude), 0, None), "Hz")
darkresponse = units.Quantity(darkresponse, default)
try:
darkcps_meas = func(darkresponse.to("Hz"))
except pinterrors.DimensionalityError:
tmp = darkresponse.to("dimensionless")
if time is None:
raise RuntimeError("Need exposure time to convert counts to flux.")
darkcps_meas = func(self.countstocps(time, tmp))
darkcps_calc = self.currenttocps(self.darkcurrent)
self.oscillator.F0 += darkcps_meas - darkcps_calc
self.oscillator.F0 = np.clip(self.oscillator.F0.magnitude, 0, None)
logger.info("Calibrated oscillator offset: {:~e}".format(self.oscillator.F0))
def fluxcpsinfo(self, energy, weights=None):
"""
Args:
energy(num or array): source energies in keV (shape: nSourceLines)
weights(num or array): source line weights (shape: nSourceLines)
Returns:
dict
"""
ret = collections.OrderedDict()
Cs = self._chargepersamplephoton(energy, weights=weights)
ret["Energy"] = "{} keV".format(energy)
ret["Charge/ph"] = "{:~f}".format(Cs.to("e"))
ret["Dark"] = "{:~e}".format(self.darkcurrent.to("e/s"))
return ret
def propagate(self, N, energy, tframe=None, nframe=None, weights=None):
"""Error propagation of a number of photons.
Args:
N(unumpy.uarray): incomming number of photons with uncertainties
energy(numpy.array): associated energies
tframe(num|numpy.array): time per frame (sec)
nframe(num|numpy.array): number of frames (sec)
Returns:
uncertainties.core.Variable or numpy.array(uncertainties.core.Variable): detector signal in DU
"""
if tframe is None:
ValueError("Frame exposure time not specified.")
if nframe is None:
ValueError("Number of frames not specified.")
# TODO:
Nout = None
return Nout # units: DU
class CalibratedPNdiode(PNdiode):
"""
A pn-diode with a known spectral responsivity
"""
def __init__(
self, energy=None, response=None, model=True, fitresponse=True, **kwargs
):
"""
Args:
energy(array): keV
response(array): spectral responsivity (A/W)
model(Optional(bool)): use the fitted spectral responsivity or interpolate the given response
fitresponse(Optional(bool)): modify thickness and Ehole to response
"""
super(CalibratedPNdiode, self).__init__(**kwargs)
self.model = model
self.menergy = energy
self.mresponse = response
self.fitresponse = fitresponse
self._init_response()
def _init_response(self):
energy = self.menergy
response = self.mresponse.to("A/W") # or e/eV
if self.fitresponse:
self.fit_spectral_responsivity(energy, response)
self.finterpol = scipy.interpolate.interp1d(
energy, response, bounds_error=False, fill_value=np.nan
)
def __getstate__(self):
state = super(CalibratedPNdiode, self).__getstate__()
state["model"] = self.model
state["menergy"] = self.menergy
state["mresponse"] = self.mresponse
state["fitresponse"] = self.fitresponse
return state
def __setstate__(self, state):
super(CalibratedPNdiode, self).__setstate__(state)
self.model = state["model"]
self.menergy = state["menergy"]
self.mresponse = state["mresponse"]
self.fitresponse = state["fitresponse"]
self._init_response()
def __eq__(self, other):
if isinstance(other, self.__class__):
if not (super(CalibratedPNdiode, self).__eq__(other)):
return False
return (
self.model == other.model
and all(self.menergy == other.menergy)
and all(self.mresponse == other.mresponse)
and self.fitresponse == other.fitresponse
)
else:
return False
def spectral_responsivity(self, energy):
"""
Args:
energy(num or array): keV
Returns:
num or array: A/W
"""
if self.model:
r = super(CalibratedPNdiode, self).spectral_responsivity(energy)
else:
r = self.finterpol(energy)
ind = np.isnan(r)
try:
if any(ind):
r[ind] = super(CalibratedPNdiode, self).spectral_responsivity(
energy[ind]
)
except TypeError:
if ind:
r = super(CalibratedPNdiode, self).spectral_responsivity(energy)
return units.Quantity(r, "A/W")
class NonCalibratedPNdiode(PNdiode):
"""
A pn-diode with an unknown spectral responsivity
"""
def __init__(self, **kwargs):
super(NonCalibratedPNdiode, self).__init__(**kwargs)
def calibrate(
self,
response,
sampleflux,
energy,
weights=None,
caliboption="optics",
fixdark=False,
fluxmin=0,
fluxmax=np.inf,
):
"""
Calibrate with another diode measuring the flux at the sample position
Args:
response(array): count rate (Hz) or current (A) measured by this diode (nResponse)
sampleflux(array): flux measured at the sample position (Hz, nResponse)
energy(num or array): source lines (keV, nSourceLines)
weights(Optional(num or array)): source line weights (nSourceLines)
caliboption(str): "optics", "solidangle" or "thickness"
fixdark(Optional(num)): fix dark current
Returns:
None
"""
# I(A) = Is(ph/s).slope + intercept
# = Is(ph/s).Cs + D(A)
# Cs: charge per sample photon
response = units.Quantity(response, "hertz")
try:
current = response.to("A")
except pinterrors.DimensionalityError:
current = self.cpstocurrent(response)
x = units.umagnitude(sampleflux, "hertz")
x = instance.asarray(x)
indfit = (x >= units.umagnitude(fluxmin, "hertz")) & (
x <= units.umagnitude(fluxmax, "hertz")
)
npts = sum(indfit)
if npts < 1:
raise RuntimeError(
"Not enough data points with a flux in [{:e},{:e}] (data range [{:e},{:e}])".format(
fluxmin, fluxmax, np.min(x), np.max(x)
)
)
fixdark |= npts == 1
x = x[indfit]
if fixdark:
# Fit dark-subtracted current vs flux
intercept = units.magnitude(self.darkcurrent, "ampere")
y = units.magnitude(current, "ampere")
y = instance.asarray(y)[indfit]
if npts == 1:
slope = (y[0] - intercept) / x[0]
else:
slope = fit1d.linfit_zerointercept(x, y - intercept)
else:
# Fit current vs flux
y = units.magnitude(current, "ampere")
y = y[indfit]
slope, intercept = fit1d.linfit(x, y)
if intercept < 0:
intercept = 0
slope = fit1d.linfit_zerointercept(x, y)
# Set dark current:
self.darkcurrent = intercept
# Correlation coefficient
ycalc = intercept + slope * x
if npts == 1:
R2 = np.nan
else:
R2 = 1 - sum((y - ycalc) ** 2) / sum((y - np.mean(y)) ** 2)
# Set diode thickness, solid angle or transmission
Cscalib = units.Quantity(slope, "coulomb")
Cscalc = self._calibrate_chargepersamplephoton(
energy, Cscalib, weights=weights, caliboption=caliboption
)
info = (
"Diode calibration:\n Energy: {} keV"
"\n Electron-hole pairs per photon hitting the sample: {:~} (experiment: {:~})"
"\n Electron-hole pairs per second (dark): {:~e} "
"\n R^2 = {}".format(
energy, Cscalc.to("e"), Cscalib.to("e"), self.darkcurrent.to("e/s"), R2
)
)
logger.info(info)
ret = self.fluxcpsinfo(energy, weights=weights)
ret["R$^2$"] = R2
return ret
class SXM_PTB(CalibratedPNdiode):
"""PTB diode used to calibrate"""
aliases = ["ptb"]
def __init__(self, **kwargs):
kwargs["attenuators"] = {}
kwargs["attenuators"]["Detector"] = {
"material": element.Element("Si"),
"thickness": 30e-4,
}
kwargs["ehole"] = constants.eholepair_si()
kwargs["model"] = kwargs.get("model", True)
ptb = np.loadtxt(resource_filename("id21/ptb.dat"))
energy = ptb[:, 0] # keV
response = ureg.Quantity(ptb[:, 1], "milliampere/watt")
super(SXM_PTB, self).__init__(
gain=ureg.Quantity(1e5, "volt/ampere"),
gainrounder=GainRounder(base=10),
darkcurrent=ureg.Quantity(0, "ampere"),
energy=energy,
response=response,
fitresponse=True,
beforesample=False,
**kwargs
)
class SXM_IDET(CalibratedPNdiode):
"""Centronic OSD 50-3T
Keithley K428 (10V max analog output)
NOVA N101VTF voltage-to-frequency converter (Fmax=1e6Hz, F0=0Hz, Vmax=10V)
P201 counter board
"""
aliases = ["idet"]
def __init__(self, **kwargs):
kwargs["attenuators"] = {}
kwargs["attenuators"]["Detector"] = {
"material": element.Element("Si"),
"thickness": 300e-4,
}
kwargs["ehole"] = constants.eholepair_si()
kwargs["model"] = kwargs.get("model", False)
ird = np.loadtxt(resource_filename("id21/ird.dat"))
npop = kwargs.pop("npop", None)
if npop is None:
npop = 4
j = ird.shape[0] - npop
energy = ird[:j, 0] # keV
responseratio = ird[:j, 1]
energyadd = 8.4
if energy[-1] < energyadd:
responseratio = np.append(responseratio, 3.22)
energy = np.append(energy, energyadd)
absdiode = SXM_PTB(model=True)
response = responseratio * absdiode.spectral_responsivity(energy)
vtof = Oscillator(
Fmax=ureg.Quantity(1e6, "Hz"),
F0=ureg.Quantity(0, "Hz"),
Vmax=ureg.Quantity(10, "volt"),
)
super(SXM_IDET, self).__init__(
gain=ureg.Quantity(1e5, "volt/ampere"),
gainrounder=GainRounder(base=10),
darkcurrent=ureg.Quantity(0, "ampere"),
energy=energy,
response=response,
fitresponse=False,
beforesample=False,
oscillator=vtof,
**kwargs
)
class SXM_FDET(NonCalibratedPNdiode):
"""
Keithley K428 (10V max analog output)
NOVA N101VTF voltage-to-frequency converter (Fmax=1e6Hz, F0=0Hz, Vmax=10V)
P201 counter board
"""
aliases = ["fdet"]
def __init__(self, **kwargs):
kwargs["attenuators"] = {}
kwargs["attenuators"]["Detector"] = {
"material": element.Element("Si"),
"thickness": 0.1,
}
kwargs["ehole"] = constants.eholepair_si()
vtof = Oscillator(
Fmax=ureg.Quantity(1e6, "Hz"),
F0=ureg.Quantity(0, "Hz"),
Vmax=ureg.Quantity(10, "volt"),
)
super(SXM_FDET, self).__init__(
gain=ureg.Quantity(1e5, "volt/ampere"),
gainrounder=GainRounder(base=10),
darkcurrent=ureg.Quantity(0, "ampere"),
oscillator=vtof,
beforesample=False,
**kwargs
)
class SXM_IODET1(NonCalibratedPNdiode):
"""International Radiation Detectors (IRD), AXUV-PS1-S
Keithley K428 (10V max analog output)
NOVA N101VTF voltage-to-frequency converter (Fmax=1e6Hz, F0=0Hz, Vmax=10V)
P201 counter board
"""
aliases = ["iodet1"]
def __init__(self, **kwargs):
kwargs2 = {}
if "source" in kwargs:
kwargs2["source"] = kwargs.pop("source")
else:
kwargs2["source"] = "synchrotron"
if instance.isstring(kwargs2["source"]):
kwargs2["source"] = xraysources.factory(kwargs2["source"])
kwargs2["detector"] = self
geometry = diodegeometries.factory("sxm_iodet1", **kwargs2)
kwargs["attenuators"] = {}
kwargs["attenuators"]["Detector"] = {
"material": element.Element("Si"),
"thickness": 0.1,
}
kwargs["ehole"] = constants.eholepair_si()
window = compoundfromname.compoundfromname("silicon nitride")
coating = element.Element("Ti")
secondarytarget = multilayer.Multilayer(
material=[coating, window], thickness=[500e-7, 500e-7], geometry=geometry
)
vtof = Oscillator(
Fmax=ureg.Quantity(1e6, "Hz"),
F0=ureg.Quantity(0, "Hz"),
Vmax=ureg.Quantity(10, "volt"),
)
super(SXM_IODET1, self).__init__(
gain=ureg.Quantity(1e5, "volt/ampere"),
gainrounder=GainRounder(base=10),
darkcurrent=ureg.Quantity(0, "ampere"),
oscillator=vtof,
secondarytarget=secondarytarget,
beforesample=True,
**kwargs
)
class SXM_IODET2(NonCalibratedPNdiode):
"""International Radiation Detectors (IRD), AXUV-PS1-S
Keithley K428 (10V max analog output)
NOVA N101VTF voltage-to-frequency converter (Fmax=1e6Hz, Vmax=0Hz, Vmax=10V)
P201 counter board
"""
aliases = ["iodet2"]
def __init__(self, **kwargs):
kwargs2 = {}
if "source" in kwargs:
kwargs2["source"] = kwargs.pop("source")
else:
kwargs2["source"] = "synchrotron"
if instance.isstring(kwargs2["source"]):
kwargs2["source"] = xraysources.factory(kwargs2["source"])
kwargs2["detector"] = self
geometry = diodegeometries.factory("sxm_iodet2", **kwargs2)
kwargs["attenuators"] = {}
kwargs["attenuators"]["Detector"] = {
"material": element.Element("Si"),
"thickness": 0.1,
}
kwargs["ehole"] = constants.eholepair_si()
window = compoundfromname.compoundfromname("silicon nitride")
secondarytarget = multilayer.Multilayer(
material=[window], thickness=[500e-7], geometry=geometry
)
vtof = Oscillator(
Fmax=ureg.Quantity(1e6, "Hz"),
F0=ureg.Quantity(0, "Hz"),
Vmax=ureg.Quantity(10, "volt"),
)
super(SXM_IODET2, self).__init__(
gain=ureg.Quantity(1e5, "volt/ampere"),
gainrounder=GainRounder(base=10),
darkcurrent=ureg.Quantity(0, "ampere"),
oscillator=vtof,
secondarytarget=secondarytarget,
beforesample=True,
**kwargs
)
class XRD_IDET(NonCalibratedPNdiode):
aliases = ["microdiff_pico1"]
def __init__(self, **kwargs):
kwargs["attenuators"] = {}
kwargs["attenuators"]["Detector"] = {
"material": element.Element("Si"),
"thickness": 0.1,
}
kwargs["ehole"] = constants.eholepair_si()
super(XRD_IDET, self).__init__(
gain=ureg.Quantity(2.1e-6, "ampere"),
gainrounder=GainRounder(base=10, m=2.1),
darkcurrent=ureg.Quantity(0, "ampere"),
beforesample=False,
**kwargs
)
class ID16B_IT(NonCalibratedPNdiode):
"""
Keithley K428 (2V max analog output)
V2F100 voltage-to-frequency converter (Fmax=50e6Hz, F0=?Hz, Vmax=2.5V)
P201 counter board
"""
aliases = ["id16b_It"]
def __init__(self, **kwargs):
kwargs["attenuators"] = {}
kwargs["attenuators"][self.ATMOSPHERELABEL] = {
"material": compoundfromname.compoundfromname("air"),
"thickness": 80.0,
}
kwargs["attenuators"]["Detector"] = {
"material": element.Element("Si"),
"thickness": 500e-4,
}
kwargs["ehole"] = constants.eholepair_si()
vtof = Oscillator(
Fmax=ureg.Quantity(50e6, "Hz"),
F0=ureg.Quantity(0, "Hz"),
Vmax=ureg.Quantity(2.5, "volt"),
)
super(ID16B_IT, self).__init__(
gain=ureg.Quantity(2.1e-6, "ampere"),
gainrounder=GainRounder(base=10, m=2.1),
darkcurrent=ureg.Quantity(0, "ampere"),
oscillator=vtof,
Vmax=ureg.Quantity(2.1, "volt"),
beforesample=False,
**kwargs
)
class ID16B_I0(NonCalibratedPNdiode):
"""
Keithley K428 (2V max analog output)
V2F100 voltage-to-frequency converter (Fmax=50e6Hz, F0=?Hz, Vmax=2.5V)
P201 counter board
"""
aliases = ["id16b_I0"]
def __init__(self, **kwargs):
kwargs["attenuators"] = {}
kwargs["attenuators"]["Detector"] = {
"material": element.Element("Si"),
"thickness": 500e-4,
}
kwargs["ehole"] = constants.eholepair_si()
vtof = Oscillator(
Fmax=ureg.Quantity(50e6, "Hz"),
F0=ureg.Quantity(0, "Hz"),
Vmax=ureg.Quantity(2.5, "volt"),
)
super(ID16B_I0, self).__init__(
gain=ureg.Quantity(2.1e-6, "ampere"),
gainrounder=GainRounder(base=10, m=2.1),
darkcurrent=ureg.Quantity(0, "ampere"),
oscillator=vtof,
Vmax=ureg.Quantity(2.1, "volt"),
beforesample=True,
**kwargs
)
class ID16B_IC(NonCalibratedPNdiode):
"""
Keithley K428 (2V max analog output)
V2F100 voltage-to-frequency converter (Fmax=50e6Hz, F0=?Hz, Vmax=2.5V)
P201 counter board
"""
aliases = ["id16b_IC"]
def __init__(self, **kwargs):
kwargs2 = {}
if "source" in kwargs:
kwargs2["source"] = kwargs.pop("source")
else:
kwargs2["source"] = "synchrotron"
if instance.isstring(kwargs2["source"]):
kwargs2["source"] = xraysources.factory(kwargs2["source"])
kwargs2["detector"] = self
geometry = diodegeometries.factory("id16b_ic", **kwargs2)
kwargs["attenuators"] = {}
kwargs["attenuators"]["Detector"] = {
"material": element.Element("Si"),
"thickness": 500e-4,
}
kwargs["ehole"] = constants.eholepair_si()
window = compoundfromname.compoundfromname(
"silicon nitride"
) # TODO: mirror material + thickness???
secondarytarget = multilayer.Multilayer(
material=[window], thickness=[500e-7], geometry=geometry
)
vtof = Oscillator(
Fmax=ureg.Quantity(50e6, "Hz"),
F0=ureg.Quantity(0, "Hz"),
Vmax=ureg.Quantity(2.5, "volt"),
)
super(ID16B_IC, self).__init__(
gain=ureg.Quantity(2.1e-6, "ampere"),
gainrounder=GainRounder(base=10, m=2.1),
darkcurrent=ureg.Quantity(0, "ampere"),
oscillator=vtof,
Vmax=ureg.Quantity(2.1, "volt"),
secondarytarget=secondarytarget,
beforesample=True,
**kwargs
)
factory = PNdiode.factory
registry = PNdiode.clsregistry
| woutdenolf/spectrocrunch | spectrocrunch/detectors/diode.py | Python | mit | 76,917 |
"""
Tinman Test Application
"""
from datetime import date
import logging
from tornado import web
from tinman.handlers import SessionRequestHandler
from tinman import __version__
LOGGER = logging.getLogger(__name__)
CONFIG = {'Application': {'debug': True,
'xsrf_cookies': False},
'HTTPServer': {'no_keep_alive': False,
'ports': [8000],
'xheaders': False},
'Logging': {'loggers': {'tinman': {'propagate': True,
'level': 'DEBUG'}},
'formatters': {'verbose': ('%(levelname) -10s %(asctime)s'
' %(name) -30s %(funcName) '
'-25s: %(message)s')},
'filters': {'tinman': 'tinman'},
'handlers': {'console': {'formatter': 'verbose',
'filters': ['tinman'],
'debug_only': True,
'class': 'logging.StreamHandler',
'level': 'DEBUG'},
'file': {'delay': False,
'mode': 'a',
'encoding': 'UTF-8',
'formatter': 'verbose',
'filters': ['tinman'],
'class': 'logging.FileHandler',
'filename': '/tmp/tinman.log'}}},
'Routes': [("/", "tinman.test.DefaultHandler")]}
class Handler(SessionRequestHandler):
@web.asynchronous
def get(self, *args, **kwargs):
"""Example HTTP Get response method.
:param args: positional args
:param kwargs: keyword args
"""
self.session.username = 'gmr'
session = self.session.as_dict()
if session['last_request_at']:
session['last_request_at'] = str(date.fromtimestamp(
session['last_request_at']))
# Send a JSON string for our test
self.write({'message': 'Hello World',
'request': {'method': self.request.method,
'protocol': self.request.protocol,
'path': self.request.path,
'query': self.request.query,
'remote_ip': self.request.remote_ip,
'version': self.request.version},
'session': session,
'tinman': {'version': __version__}})
self.finish()
| lucius-feng/tinman | tinman/example.py | Python | bsd-3-clause | 2,829 |
from betamax import Betamax
from currencycloud import Client, Config
from currencycloud.resources import *
class TestTransactions:
def setup_method(self, method):
# TODO: To run against real server please delete ../fixtures/vcr_cassettes/* and replace
# login_id and api_key with valid credentials before running the tests
login_id = 'development@currencycloud.com'
api_key = 'deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef'
environment = Config.ENV_DEMO
self.client = Client(login_id, api_key, environment)
def test_transactions_can_find(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('transactions/can_find')
transactions = self.client.transactions.find(currency="GBP", per_page=1)
assert len(transactions) == 1
transaction = transactions[0]
assert transaction is not None
assert isinstance(transaction, Transaction)
assert transaction.currency == "GBP"
def test_transactions_can_retrieve(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('transactions/can_retrieve')
transaction = self.client.transactions.retrieve("da45e164-620a-47e7-80a6-2e66d5919276")
assert transaction is not None
assert transaction.currency == "GBP"
| CurrencyCloud/currencycloud-python | tests/integration/test_transactions.py | Python | mit | 1,428 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
os.environ.setdefault('DJANGO_CONFIGURATION', 'Dev')
from configurations.management import execute_from_command_line
execute_from_command_line(sys.argv)
| DanielGabris/radius_restserver | src/manage.py | Python | mit | 302 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.serializers import NoOpSerializer
from pyspark.storagelevel import StorageLevel
from pyspark.streaming import DStream
__all__ = ['KinesisUtils', 'InitialPositionInStream', 'utf8_decoder']
def utf8_decoder(s):
""" Decode the unicode as UTF-8 """
if s is None:
return None
return s.decode('utf-8')
class KinesisUtils(object):
@staticmethod
def createStream(ssc, kinesisAppName, streamName, endpointUrl, regionName,
initialPositionInStream, checkpointInterval,
storageLevel=StorageLevel.MEMORY_AND_DISK_2,
awsAccessKeyId=None, awsSecretKey=None, decoder=utf8_decoder,
stsAssumeRoleArn=None, stsSessionName=None, stsExternalId=None):
"""
Create an input stream that pulls messages from a Kinesis stream. This uses the
Kinesis Client Library (KCL) to pull messages from Kinesis.
.. note:: The given AWS credentials will get saved in DStream checkpoints if checkpointing
is enabled. Make sure that your checkpoint directory is secure.
:param ssc: StreamingContext object
:param kinesisAppName: Kinesis application name used by the Kinesis Client Library (KCL) to
update DynamoDB
:param streamName: Kinesis stream name
:param endpointUrl: Url of Kinesis service (e.g., https://kinesis.us-east-1.amazonaws.com)
:param regionName: Name of region used by the Kinesis Client Library (KCL) to update
DynamoDB (lease coordination and checkpointing) and CloudWatch (metrics)
:param initialPositionInStream: In the absence of Kinesis checkpoint info, this is the
worker's initial starting position in the stream. The
values are either the beginning of the stream per Kinesis'
limit of 24 hours (InitialPositionInStream.TRIM_HORIZON) or
the tip of the stream (InitialPositionInStream.LATEST).
:param checkpointInterval: Checkpoint interval for Kinesis checkpointing. See the Kinesis
Spark Streaming documentation for more details on the different
types of checkpoints.
:param storageLevel: Storage level to use for storing the received objects (default is
StorageLevel.MEMORY_AND_DISK_2)
:param awsAccessKeyId: AWS AccessKeyId (default is None. If None, will use
DefaultAWSCredentialsProviderChain)
:param awsSecretKey: AWS SecretKey (default is None. If None, will use
DefaultAWSCredentialsProviderChain)
:param decoder: A function used to decode value (default is utf8_decoder)
:param stsAssumeRoleArn: ARN of IAM role to assume when using STS sessions to read from
the Kinesis stream (default is None).
:param stsSessionName: Name to uniquely identify STS sessions used to read from Kinesis
stream, if STS is being used (default is None).
:param stsExternalId: External ID that can be used to validate against the assumed IAM
role's trust policy, if STS is being used (default is None).
:return: A DStream object
"""
jlevel = ssc._sc._getJavaStorageLevel(storageLevel)
jduration = ssc._jduration(checkpointInterval)
try:
# Use KinesisUtilsPythonHelper to access Scala's KinesisUtils
helper = ssc._jvm.org.apache.spark.streaming.kinesis.KinesisUtilsPythonHelper()
except TypeError as e:
if str(e) == "'JavaPackage' object is not callable":
KinesisUtils._printErrorMsg(ssc.sparkContext)
raise
jstream = helper.createStream(ssc._jssc, kinesisAppName, streamName, endpointUrl,
regionName, initialPositionInStream, jduration, jlevel,
awsAccessKeyId, awsSecretKey, stsAssumeRoleArn,
stsSessionName, stsExternalId)
stream = DStream(jstream, ssc, NoOpSerializer())
return stream.map(lambda v: decoder(v))
@staticmethod
def _printErrorMsg(sc):
print("""
________________________________________________________________________________________________
Spark Streaming's Kinesis libraries not found in class path. Try one of the following.
1. Include the Kinesis library and its dependencies with in the
spark-submit command as
$ bin/spark-submit --packages org.apache.spark:spark-streaming-kinesis-asl:%s ...
2. Download the JAR of the artifact from Maven Central http://search.maven.org/,
Group Id = org.apache.spark, Artifact Id = spark-streaming-kinesis-asl-assembly, Version = %s.
Then, include the jar in the spark-submit command as
$ bin/spark-submit --jars <spark-streaming-kinesis-asl-assembly.jar> ...
________________________________________________________________________________________________
""" % (sc.version, sc.version))
class InitialPositionInStream(object):
LATEST, TRIM_HORIZON = (0, 1)
| WindCanDie/spark | python/pyspark/streaming/kinesis.py | Python | apache-2.0 | 6,165 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.