repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
fert89/prueba-3-heroku-flask
|
fabfile.py
|
Python
|
bsd-3-clause
| 534
| 0
|
from fabric.api import *
# start with the default server
def run():
""" Start with the default server """
local('python runserver.py')
# start with unicorn server.
def grun():
""" Start with gunicorn server """
local('gunicorn -c gunicorn.conf runserver:app')
# run tests
def tests():
""" Run unittests """
local('python runtests.py --verbose')
# sta
|
rt iteractive shell within the flask environm
|
ent
def shell():
""" Start interactive shell within flask environment """
local('python shell.py')
|
Acehaidrey/incubator-airflow
|
airflow/providers/jdbc/hooks/jdbc.py
|
Python
|
apache-2.0
| 3,656
| 0.001915
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, Dict, Optional
import jaydebeapi
from airflow.hooks.dbapi import DbApiHook
from airflow.models.connection import Connection
class JdbcHook(DbApiHook):
"""
General hook for jdbc db access.
JDBC URL, username and password will be taken from the predefined connection.
Note that the whole JDBC URL must be specified in the "host" field in the DB.
Raises an airflow error if the given connection id doesn't exist.
"""
conn_name_attr = 'jdbc_conn_id'
default_conn_name = 'jdbc_default'
conn_type = 'jdbc'
hook_name = 'JDBC Connection'
supports_autocommit = True
@staticmethod
def get_connection_form_widgets() -> Dict[str, Any]:
"""Returns connection widgets to add to connection form"""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import StringField
return {
"extra__jdbc__drv_path": StringField(lazy_gettext('Driver Path'), widget=BS3TextFieldWidget()),
"extra__jdbc__drv_clsname": StringField(
lazy_gettext('Driver Class'), widget=BS3TextFieldWidget()
),
}
@staticmethod
def get_ui_field_behaviour() -> Dict[str, Any]:
"""Returns custom field behaviour"""
return {
"hidden_fields": ['port', 'schema', 'extra'],
"relabeling": {'host': 'Connection URL'},
}
def get_conn(self) -> jaydebeapi.Connection:
conn: Connection = self.get_connection(getattr(self, self.conn_name_attr))
host: str = conn.host
login: str = conn.login
psw: str = conn.password
jdbc_driver_loc: Optional[str] = conn.extra_dejson.get('extra__jdbc__drv_path')
jdbc_driver_name: Optional[str] = conn.extra_dejson.get('extra__jdbc__drv_clsname')
conn = jaydebeapi.connect(
jclassname=jdbc_driver_name,
url=str(host),
driver_args=[str(login), str(psw)],
jars=jdbc_driver_loc.split(",") if jdbc_driver_loc else None,
)
return conn
def set_autocommit(self, conn: jaydebeapi.Connection, autocommit: bool) -> None:
"""
Enable or disable autocommit for the given connection.
:param conn: The connection.
:param autocommit: The con
|
nection's autocommit setting.
"""
conn.jconn.setAutoCommit(autocommit)
def get_autocommit(self, conn: jaydebeapi.Connection) -> bool:
"""
Get autocommit setting for the provided connection.
Return True if conn.autocommit is set to True.
Return False if conn.autocommit is not set or set to False
:param conn: The connection.
:return: co
|
nnection autocommit setting.
:rtype: bool
"""
return conn.jconn.getAutoCommit()
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_tanc_mite_hue.py
|
Python
|
mit
| 582
| 0.041237
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DON
|
E IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_tanc_mite_hue.iff"
result.attribute_template_id = 9
result.stfName("monster_name","tanc_mite")
#### BEGIN MODIFICATIONS ####
result.setStringAttribute("radial_filename", "radials/player_pet.py")
result.options_mask = 0x100
result.pv
|
p_status = PVPSTATUS.PvPStatus_None
#### END MODIFICATIONS ####
return result
|
stvstnfrd/edx-platform
|
openedx/features/enterprise_support/signals.py
|
Python
|
agpl-3.0
| 5,796
| 0.004141
|
"""
This module contains signals related to enterprise.
"""
import logging
import six
from django.conf import settings
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from enterprise.models import EnterpriseCourseEnrollment, EnterpriseCustomer, EnterpriseCustomerUser
from integrated_channels.integrated_channel.tasks import (
transmit_single_learner_data,
transmit_single_subsection_learner_data
)
from slumber.exceptions import HttpClientError
from lms.djangoapps.email_marketing.tasks import update_user
from openedx.core.djangoapps.commerce.utils import ecommerce_api_client
from openedx.core.djangoapps.signals.signals import COURSE_GRADE_NOW_PASSED, COURSE_ASSES
|
SMENT_GRADE_CHANGED
from openedx.features.enterprise_support.api import enterprise_enabled
from openedx.features.enterprise_support.tasks import clear_enterprise_customer_data_consent_share_cache
from openedx.features.enterprise_support.utils impo
|
rt clear_data_consent_share_cache, is_enterprise_learner
from common.djangoapps.student.signals import UNENROLL_DONE
log = logging.getLogger(__name__)
@receiver(post_save, sender=EnterpriseCustomerUser)
def update_email_marketing_user_with_enterprise_vars(sender, instance, **kwargs): # pylint: disable=unused-argument, invalid-name
"""
Update the SailThru user with enterprise-related vars.
"""
user = User.objects.get(id=instance.user_id)
# perform update asynchronously
update_user.delay(
sailthru_vars={
'is_enterprise_learner': True,
'enterprise_name': instance.enterprise_customer.name,
},
email=user.email
)
@receiver(post_save, sender=EnterpriseCourseEnrollment)
def update_dsc_cache_on_course_enrollment(sender, instance, **kwargs): # pylint: disable=unused-argument
"""
clears data_sharing_consent_needed cache after Enterprise Course Enrollment
"""
clear_data_consent_share_cache(
instance.enterprise_customer_user.user_id,
instance.course_id
)
@receiver(pre_save, sender=EnterpriseCustomer)
def update_dsc_cache_on_enterprise_customer_update(sender, instance, **kwargs):
"""
clears data_sharing_consent_needed cache after enable_data_sharing_consent flag is changed.
"""
old_instance = sender.objects.filter(pk=instance.uuid).first()
if old_instance: # instance already exists, so it's updating.
new_value = instance.enable_data_sharing_consent
old_value = old_instance.enable_data_sharing_consent
if new_value != old_value:
kwargs = {'enterprise_customer_uuid': six.text_type(instance.uuid)}
result = clear_enterprise_customer_data_consent_share_cache.apply_async(kwargs=kwargs)
log.info(u"DSC: Created {task_name}[{task_id}] with arguments {kwargs}".format(
task_name=clear_enterprise_customer_data_consent_share_cache.name,
task_id=result.task_id,
kwargs=kwargs,
))
@receiver(COURSE_GRADE_NOW_PASSED, dispatch_uid="new_passing_enterprise_learner")
def handle_enterprise_learner_passing_grade(sender, user, course_id, **kwargs): # pylint: disable=unused-argument
"""
Listen for a learner passing a course, transmit data to relevant integrated channel
"""
if enterprise_enabled() and is_enterprise_learner(user):
kwargs = {
'username': six.text_type(user.username),
'course_run_id': six.text_type(course_id)
}
transmit_single_learner_data.apply_async(kwargs=kwargs)
@receiver(COURSE_ASSESSMENT_GRADE_CHANGED)
def handle_enterprise_learner_subsection(sender, user, course_id, subsection_id, subsection_grade, **kwargs): # pylint: disable=unused-argument
"""
Listen for an enterprise learner completing a subsection, transmit data to relevant integrated channel.
"""
if enterprise_enabled() and is_enterprise_learner(user):
kwargs = {
'username': str(user.username),
'course_run_id': str(course_id),
'subsection_id': str(subsection_id),
'grade': str(subsection_grade),
}
transmit_single_subsection_learner_data.apply_async(kwargs=kwargs)
@receiver(UNENROLL_DONE)
def refund_order_voucher(sender, course_enrollment, skip_refund=False, **kwargs): # pylint: disable=unused-argument
"""
Call the /api/v2/enterprise/coupons/create_refunded_voucher/ API to create new voucher and assign it to user.
"""
if skip_refund:
return
if not course_enrollment.refundable():
return
if not EnterpriseCourseEnrollment.objects.filter(
enterprise_customer_user__user_id=course_enrollment.user_id,
course_id=str(course_enrollment.course.id)
).exists():
return
service_user = User.objects.get(username=settings.ECOMMERCE_SERVICE_WORKER_USERNAME)
client = ecommerce_api_client(service_user)
order_number = course_enrollment.get_order_attribute_value('order_number')
if order_number:
error_message = u"Encountered {} from ecommerce while creating refund voucher. Order={}, enrollment={}, user={}"
try:
client.enterprise.coupons.create_refunded_voucher.post({"order": order_number})
except HttpClientError as ex:
log.info(
error_message.format(type(ex).__name__, order_number, course_enrollment, course_enrollment.user)
)
except Exception as ex: # pylint: disable=broad-except
log.exception(
error_message.format(type(ex).__name__, order_number, course_enrollment, course_enrollment.user)
)
|
lampwins/netbox
|
netbox/extras/tests/test_customfields.py
|
Python
|
apache-2.0
| 13,178
| 0.003035
|
from datetime import date
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from dcim.models import Site
from extras.constants import CF_TYPE_TEXT, CF_TYPE_INTEGER, CF_TYPE_BOOLEAN, CF_TYPE_DATE, CF_TYPE_SELECT, CF_TYPE_URL, CF_TYPE_SELECT
from extras.models import CustomField, CustomFieldValue, CustomFieldChoice
from utilities.testing import APITestCase
from virtualization.models import VirtualMachine
class CustomFieldTest(TestCase):
def setUp(self):
Site.objects.bulk_create([
Site(name='Site A', slug='site-a'),
Site(name='Site B', slug='site-b'),
Site(name='Site C', slug='site-c'),
])
def test_simple_fields(self):
DATA = (
{'field_type': CF_TYPE_TEXT, 'field_value': 'Foobar!', 'empty_value': ''},
{'field_type': CF_TYPE_INTEGER, 'field_value': 0, 'empty_value': None},
{'field_type': CF_TYPE_INTEGER, 'field_value': 42, 'empty_value': None},
{'field_type': CF_TYPE_BOOLEAN, 'field_value': True, 'empty_value': None},
{'field_type': CF_TYPE_BOOLEAN, 'field_value': False, 'empty_value': None},
{'field_type': CF_TYPE_DATE, 'field_value': date(2016, 6, 23), 'empty_value': None},
{'field_type': CF_TYPE_URL, 'field_value': 'http://example.com/', 'empty_value': ''},
)
obj_type = ContentType.objects.get_for_model(Site)
for data in DATA:
# Create a custom field
cf = CustomField(type=data['field_type'], name='my_field', required=False)
cf.save()
cf.obj_type.set([obj_type])
cf.save()
# Assign a value to the first Site
site = Site.objects.first()
cfv = CustomFieldValue(field=cf, obj_type=obj_type, obj_id=site.id)
cfv.value = data['field_value']
cfv.save()
# Retrieve the stored value
cfv = CustomFieldValue.objects.filter(obj_type=obj_type, obj_id=site.pk).first()
self.assertEqual(cfv.value, data['field_value'])
# Delete the stored value
cfv.value = data['empty_value']
cfv.save()
self.assertEqual(CustomFieldValue.objects.filter(obj_type=obj_type, obj_id=site.pk).count(), 0)
# Delete the custom field
cf.delete()
def test_select_field(self):
obj_type = ContentType.objects.get_for_model(Site)
# Create a custom field
cf = CustomField(type=CF_TYPE_SELECT, name='my_field', required=False)
cf.save()
cf.obj_type.set([obj_type])
cf.save()
# Create some choices for the field
CustomFieldChoice.objects.bulk_create([
CustomFieldChoice(field=cf, value='Option A'),
CustomFieldChoice(field=cf, value='Option B'),
CustomFieldChoice(field=cf, value='Option C'),
])
# Assign a value to the first Site
site = Site.objects.first()
cfv = CustomFieldValue(field=cf, obj_type=obj_type, obj_id=site.id)
cfv.value = cf.choices.first()
cfv.save()
# Retrieve the stored value
cfv = CustomFieldValue.objects.filter(obj_type=obj_type, obj_id=site.pk).first()
self.assertEqual(str(cfv.value), 'Option A')
# Delete the stored value
cfv.value = None
cfv.save()
self.assertEqual(CustomFieldValue.objects.filter(obj_type=obj_type, obj_id=site.pk).count(), 0)
# Delete the custom field
cf.delete()
class CustomFieldAPITest(APITestCase):
def setUp(self):
super().setUp()
content_type = ContentType.objects.get_for_model(Site)
# Text custom field
self.cf_text = CustomField(type=CF_TYPE_TEXT, name='magic_word')
self.cf_text.save()
self.cf_text.obj_type.set([content_type])
self.cf_text.save()
# Integer custom field
self.cf_integer = CustomField(type=CF_TYPE_INTEGER, name='magic_number')
self.cf_integer.save()
self.cf_integer.obj_type.set([content_type])
self.cf_integer.save()
# Boolean custom field
self.cf_boolean = CustomField(type=CF_TYPE_BOOLEAN, name='is_magic')
self.cf_boolean.save()
self.cf_boolean.obj_type.set([content_type])
self.cf_boolean.save()
# Date custom field
self.cf_date = CustomField(type=CF_TYPE_DATE, name='magic_date')
self.cf_date.save()
self.cf_date.obj_type.set([content_type])
self.cf_date.save()
# URL custom field
self.cf_url = CustomField(type=CF_TYPE_URL, name='magic_url')
self.cf_url.save()
self.cf_url.obj_type.set([content_type])
self.cf_url.save()
# Select custom field
self.cf_select = CustomField(type=CF_TYPE_SELECT, name='magic_choice')
self.cf_select.save()
self.cf_select.obj_type.set([content_type])
self.cf_select.save()
self.cf_select_choice1 = CustomFieldChoice(field=self.cf_select, value='Foo')
self.cf_select_choice1.save()
self.cf_select_choice2 = CustomFieldChoice(field=self.cf_select, value='Bar')
self.cf_select_choice2.save()
self.cf_select_choice3 = CustomFieldChoice(field=self.cf_select, value='Baz')
self.cf_select_choice3.save()
self.site = Site.objects.create(name='Test Site 1', slug='test-site-1')
def test_get_obj_without_custom_fields(self):
url = reverse('dcim-api:site-detail', kwargs={'pk': self.site.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.site.name)
self.assertEqual(response.data['custom_fields'], {
'magic_word': None,
'magic_number': None,
'is_magic': None,
'magic_date': None,
'magic_url': None,
'magic_choice': None,
})
def test_get_obj_with_custom_fields(self):
CUSTOM_FIELD_VALUES = [
(self.cf_text, 'Test string'),
(self.cf_integer, 1234),
(self.cf_boolean, True),
(self.cf_date, date(2016, 6, 23)),
(self.cf_url, 'http://example.com/'),
(self.cf_select, self.cf_select_choice1.pk),
]
for field, value in CUSTOM_FIELD_VALUES:
cfv = CustomFieldValue(field=field, obj=self.site)
cfv.value = value
cfv.save()
url = reverse('dcim-api:site-detail', kwargs={'pk': self.site.pk})
response = self.client.get(url, **self.header)
self.assertEqual(response.data['name'], self.site.name)
self.assertEqual(response.data['custom_fields'].get('magic_word'), CUSTOM_FIELD_VALUES[0][1])
self.assertEqual(response.data['custom_fields'].get('magic_number'), CUSTOM_FIELD_VALUES[1][1])
self.assertEqual(response.data['custom_fields'].get('is_magic'), CUSTOM_FIELD_VALUES[2][1])
self.assertEqual(response.data['custom_fields'].get('magic_date'), CUSTOM_FIELD_VALUES[3][1])
self.assertEqual(response.data['custom_fields'].get('magic_url'), CUSTOM_FIELD_VALUES[4][1])
self.assertEqual(response.data['custom_fields'].get('magic_choice'), {
'value': self.cf_select_choice1.pk, 'label': 'Foo'
})
def test_set_custom_field_text(self):
data = {
'name': 'Test Site 1',
'slug
|
': 'test-site-1',
'custom_fields': {
'magic_word': 'Foo bar baz',
}
}
url = reverse('dcim-api:site-detail', kwargs={'p
|
k': self.site.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(response.data['custom_fields'].get('magic_word'), data['custom_fields']['magic_word'])
cfv = self.site.custom_field_values.get(field=self.cf_text)
self.assertEqual(cfv.value, data['custom_fields']['magic_word'])
def test_set_custom_field_integer(self):
|
Remi-C/LOD_ordering_for_patches_of_points
|
script/loading benchmark/rforest_on_patch_lean.py
|
Python
|
lgpl-3.0
| 8,121
| 0.046915
|
# -*- coding: utf-8 -
|
*-
"""
Created on Sat Dec 6 17:01:05 2014
@author: remi
@TODO :
in the function train_RForest_with_kfold we should keep all result proba for each class, this could be very intersting.
"""
import numpy as np ; #efficient arrays
impor
|
t pandas as pd; # data frame to do sql like operation
import sklearn
reload(sklearn)
from sklearn.ensemble import RandomForestClassifier ; #base lib
from sklearn import cross_validation, preprocessing ; #normalizing data, creating kfold validation
def create_test_data(feature_number, data_size, class_list):
"""simple function to emulate input, gid is a unique int, other are features"""
import random ; #used to chose a class randomly
#create test vector
feature = np.random.random_sample((data_size,feature_number)) * 10 ;
gid = np.arange(13,data_size+13) ;
#create ground truth class vector : a 1,N vector containing randomly one of the possible class
ground_truth_class = np.zeros(data_size);
for i,(not_used) in enumerate(ground_truth_class):
ground_truth_class[i] = np.random.choice(class_list) ;
return gid, feature, ground_truth_class ;
def create_label_equivalency(labels_name, labels_number):
"""we create an equivalency list between class name and class number"""
import numpy as np;
labels = np.zeros(len(labels_name), dtype={'names':['class_id', 'class_name']\
, 'formats':['i4','a10']}) ;
for i in np.arange(0,len(labels_name)):
labels['class_id'][i] = labels_number[i]
labels['class_name'][i] = labels_name[i]
return labels;
def preprocess_data(X):
from sklearn import preprocessing ;
scaler = preprocessing.StandardScaler(copy=False,with_std=False);
scaler.fit_transform(X) ;
#scaler.transform(Y);
#scaler.transform(X);
return scaler;
def train_RForest_with_kfold(i,train, test, gid,X,Y,weight,scaler,clf,result,feature_importances,learning_time,predicting_time ):
import datetime;
import time;
# creating data for train and test
X_train, X_test, Y_train, Y_test, Weight_train, Weight_test = X[train],X[test], Y[train], Y[test], weight[train], weight[test] ;
#learning
time_temp = time.clock();
print ' starting learning at \n\t\t\t\t%s' % datetime.datetime.now() ;
clf.fit(X_train,Y_train,Weight_train) ;
learning_time = learning_time+ time.clock() - time_temp;
#predicting
print ' learning finished, starting prediction at \n\t\t\t\t%s' % datetime.datetime.now() ;
time_temp = time.clock();
tmp_prob = clf.predict(X_test) ;
predicting_time += time.clock() - time_temp;
print ' prediction finished at \n\t\t\t\t%s' % datetime.datetime.now() ;
#grouping for score per class
proba_class_chosen = np.column_stack( \
(np.array(gid)[test],tmp_prob, Y_test,Weight_test ) ) ;
#constructinig the result data frame
df = pd.DataFrame(proba_class_chosen, columns = ("gid","class_chosen","ground_truth_class" ,"weight")) ;
if (i==0):
result = result.append(df, ignore_index=True) ;
else:
#print 'entering here, df is : ', df
result = result.append( df,ignore_index=True) ;
#plpy.notice("feature used, by importcy");
#plpy.notice(clf.feature_importances_)
#storing how important was each feature to make the prediction
feature_importances.append(clf.feature_importances_) ;
return learning_time,predicting_time,result
def Rforest_learn_predict(gid, X, Y,weight, labels, k_folds, random_forest_trees ,plot_directory):
from sklearn.metrics import classification_report
import datetime;
scaler = preprocess_data(X);
#creating the random forest object
clf = RandomForestClassifier(random_forest_trees, criterion="entropy" ,min_samples_leaf=20) ;
#cutting the set into 10 pieces, then propossing 10 partiion of 9(trainng)+1(test) data
kf_total = cross_validation.KFold(len(X), n_folds = k_folds, shuffle = True, random_state = 4) ;
result = pd.DataFrame() ;
feature_importances = [] ;
learning_time = 0.0 ;
predicting_time = 0.0 ;
for i ,(train, test) in enumerate(kf_total) :
print ' workingg on kfold %s , %s' % (i+1,datetime.datetime.now())
learning_time,predicting_time, result = train_RForest_with_kfold(i,train, test,gid,X,Y,weight,scaler,clf,result,feature_importances,learning_time,predicting_time) ;
report = classification_report( result['ground_truth_class'],result['class_chosen'],target_names = labels)#,sample_weight=result['weight']) ;
return np.column_stack((result['gid']
,result['ground_truth_class'].astype(int)
, result['class_chosen'].astype(int)
, np.zeros(len(result['ground_truth_class'])) )),report,feature_importances,learning_time,predicting_time;
def RForest_learn_predict_pg(gids,feature_iar,gt_classes,weight,labels_name,class_list, k_folds,random_forest_ntree, plot_directory):
"""Compute random forest classifiers using feature_iar and gt_classes ground trhuth. Divide the data set into kfolds to perform the operation K times
@param gids is a int[n]
@param feature_iar is a float[m x n], where m is the number of feature, and the matrix is wirtten row by row
@param gt_classes is a int[n] giving the ground truth class for each observation
@param k_folds is a int describing in how much part we should split the data set
@param random_forest_ntree how much tree in the frest?
@param plot_directory is a string like '/tmp', describing the directory where to write the figures generated
"""
#reshape input feature vector into feature matrix
feature_iar = np.array( feature_iar, dtype=np.float)
feature = np.reshape(feature_iar,( len(gids),len(feature_iar)/len(gids) ) ) ;
gids = np.array(gids);
gt_classes = np.array(gt_classes)
#plpy.notice('toto')
feature[np.isnan(feature)]=0 ;
labels = create_label_equivalency(labels_name,class_list )
weight_iar = np.array(weight)
return Rforest_learn_predict(gids
,feature
,gt_classes
,weight_iar
,labels
, k_folds
, random_forest_ntree
,plot_directory) ;
def RForest_learn_predict_pg_test():
#param
nfeature = 3
n_obs = 1000 ;
class_list = [1,2,3,4,5,6,7]
labels = ['FF1', 'FF2', 'FF3', 'FO2', 'FO3', 'LA6', 'NoC']
k_folds = 10
random_forest_ntree = 10;
plot_directory = '/media/sf_E_RemiCura/PROJETS/point_cloud/PC_in_DB/LOD_ordering_for_patches_of_points/result_rforest/vosges';
#creating input of function
gids = np.arange(13,n_obs+13);
feature_iar = np.random.rand(nfeature*n_obs)*10 ;
gt_classes = np.zeros(n_obs);
for i,(not_used) in enumerate(gt_classes):
gt_classes[i] = np.random.choice(class_list) ;
#
gids= [8736, 8737, 8738, 8739, 8742, 8743, 8744, 8746, 8748, 8749]
feature_iar = [0.0, 0.0, 0.0, 0.0, 1.0, 28.0, 2.0, 593.17, 0.0, 2.0, 4.0, 0.0, 0.0, 1.0, 36.511, 1.0, 592.176, 7.52, 0.0, 0.0, 0.0, 0.0, 1.0, 46.0, 1.0, 598.33, 0.0, 4.0, 23.0, 91.0, 347.0, 1.0, 33.2, 1.0, 585.271, 22.89, 6.0, 36.0, 189.0, 517.0, 1.0, 15.42, 2.0, 616.146, 39.41, 7.0, 37.0, 171.0, 497.0, 1.0, 13.532, 2.0, 607.817, 46.73, 6.0, 33.0, 155.0, 360.0, 1.0, 14.62, 2.0, 596.008, 42.09, 3.0, 29.0, 99.0, 255.0, 1.0, 11.295, 2.0, 572.784, 45.55, 3.0, 30.0, 118.0, 274.0, 1.0, 12.154, 2.0, 517.455, 49.62, 3.0, 28.0, 110.0, 278.0, 0.99, 11.016, 2.0, 495.071, 50.03] ;
gt_classes =[4, 4, 4, 4, 3, 3, 3, 2, 1, 1]
labels_name = ['FF1', 'FF2', 'FF3', 'NoC']
class_list =[1, 2, 3, 4]
weight = [0.25, 0.25, 0.25, 0.25, 0.3333, 0.3333, 0.3333, 1.0, 0.5, 0.5]
random_forest_ntree = 10 ;
#launching function
result = RForest_learn_predict_pg(gids,feature_iar,gt_classes,weight,labels_name,class_list,k_folds,random_forest_ntree, plot_directory)
return result ;
#print RForest_learn_predict_pg_test()
|
GuillaumeDerval/INGInious
|
common/task_file_managers/manage.py
|
Python
|
agpl-3.0
| 1,952
| 0.004098
|
import os.path
from common.base import INGIniousConfiguration
from common.task_file_managers.yaml_manager import TaskYAMLFileManager
_task_file_managers = [TaskYAMLFileManager]
def get_readable_tasks(courseid):
""" Returns the list of all available tasks in a course """
tasks = [
task for task in os.listdir(os.path.join(INGIniousConfiguration["tasks_directory"], courseid))
if os.path.isdir(os.path.join(INGIniousConfiguration["tasks_directory"], courseid, task))
and _task_file_exists(os.path.join(INGIniousConfiguration["tasks_directory"], courseid, task))]
return tasks
def _task_file_exists(directory):
""" Returns true if a task file exists in this directory """
for filename in ["task.{}".format(ext) for e
|
xt in get_available_task_file_managers().keys()]:
if os.path.isfile(os.path.join(directory, filename)):
return True
return False
def get_task_file_manager(courseid, taskid):
|
""" Returns the appropriate task file manager for this task """
for ext, subclass in get_available_task_file_managers().iteritems():
if os.path.isfile(os.path.join(INGIniousConfiguration["tasks_directory"], courseid, taskid, "task.{}".format(ext))):
return subclass(courseid, taskid)
return None
def delete_all_possible_task_files(courseid, taskid):
""" Deletes all possibles task files in directory, to allow to change the format """
for ext in get_available_task_file_managers().keys():
try:
os.remove(os.path.join(INGIniousConfiguration["tasks_directory"], courseid, taskid, "task.{}".format(ext)))
except:
pass
def add_custom_task_file_manager(task_file_manager):
""" Add a custom task file manager """
_task_file_managers.append(task_file_manager)
def get_available_task_file_managers():
""" Get a dict with ext:class pairs """
return {f.get_ext(): f for f in _task_file_managers}
|
jayme-github/CouchPotatoServer
|
couchpotato/core/downloaders/synology/main.py
|
Python
|
gpl-3.0
| 3,961
| 0.009089
|
from couchpotato.core.downloaders.base import Downloader
from couchpotato.core.helpers.encoding import isInt
from couchpotato.core.logger import CPLog
import httplib
import json
import urllib
import urllib2
log = CPLog(__name__)
class Synology(Downloader):
type = ['torrent_magnet']
log = CPLog(__name__)
def download(self, data, movie, manual = False, filedata = None):
if self.isDisabled(manual) or not self.isCorrectType(data.get('type')):
return
log.error('Sending "%s" (%s) to Synology.', (data.get('name'), data.get('type')))
# Load host from config and split out port.
host = self.conf('host').split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
if data.get('type') == 'torrent':
log.error('Can\'t add binary torrent file')
return False
try:
# Send request to Transmission
srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password'))
remote_torrent = srpc.add_torrent_uri(data.get('url'))
log.info('Response: %s', remote_torrent)
return remote_torrent['success']
except Exception, err:
log.error('Exception while adding torrent: %s', err)
return False
class SynologyRPC(object):
'''SynologyRPC lite library'''
def __init__(self, host = 'localhost', port = 5000, username = None, password = None):
super(SynologyRPC, self).__init__()
self.download_url = 'http://%s:%s/webapi/DownloadStation/task.cgi' % (host, port)
self.auth_url = 'http://%s:%s/webapi/auth.cgi' % (host, port)
self.username = username
self.password = password
self.session_name = 'DownloadStation'
def _login(self):
if self.username and self.password:
args = {'api': 'SYNO.API.Auth', 'account': self.username, 'passwd': self.password, 'version': 2,
'method': 'login', 'session': self.session_name, 'format': 'sid'}
response = self._req(self.auth_url, args)
if response['success'] == True:
self.sid = response['data']['sid']
log.debug('Sid=%s', self.sid)
return response
elif self.username or self.password:
log.error('User or password missing, not using authentication.')
|
return False
def _logout(self):
args = {'api':'SYNO.API.Auth', 'version':1, 'method':'logout', 'session':self.session_name, '_sid':self.sid}
return self._req(self.auth_url, args)
def _req(self, url, args):
req_url = url + '?' + urllib.urlencode(args)
try:
req_open = urllib2.urlopen(req_url)
response =
|
json.loads(req_open.read())
if response['success'] == True:
log.info('Synology action successfull')
return response
except httplib.InvalidURL, err:
log.error('Invalid Transmission host, check your config %s', err)
return False
except urllib2.HTTPError, err:
log.error('SynologyRPC HTTPError: %s', err)
return False
except urllib2.URLError, err:
log.error('Unable to connect to Synology %s', err)
return False
def add_torrent_uri(self, torrent):
log.info('Adding torrent URL %s', torrent)
response = {}
# login
login = self._login()
if len(login) > 0 and login['success'] == True:
log.info('Login success, adding torrent')
args = {'api':'SYNO.DownloadStation.Task', 'version':1, 'method':'create', 'uri':torrent, '_sid':self.sid}
response = self._req(self.download_url, args)
self._logout()
else:
log.error('Couldn\'t login to Synology, %s', login)
return response
|
TeamGhostBuster/restful-api
|
app/api/article/controller.py
|
Python
|
apache-2.0
| 8,039
| 0.000373
|
from app.util import ElasticSearchUtil
from app.util import JsonUtil, RequestUtil, ResponseUtil
from app.util.AuthUtil import *
import os
@app.route('/user/article/<string:article_id>', methods=['GET'])
@authorized_required
def get_article(user, article_id):
"""
@api {get} /user/article/:id Get a article in personal list
@apiName Get a article in personal list
@apiGroup Article
@apiUse AuthorizationTokenHeader
@apiParam {String} id The Article id
@apiSuccess {String} id Article id.
@apiSuccess {String} title Article title.
@apiSuccess {Object[]} comments User comments.
@apiSuccess {String} comments.id The comment id.
@apiSuccess {String} comments.content The content.
@apiSuccess {String} comments.timestamp The timestamp of the comment.
@apiSuccess {Email} comments.author The author's email.
@apiSuccessExample {json} Response (Example):
{
"id": "aldkfjadls",
"title": "Process",
"description": "adlsfjdlask",
"url": "https://www.google.com/something",
"comments" : [{
"id": "afjlkdsfjafla",
|
"content": "i hate it",
"created_at": "2017-02-04-19-59-59",
"author": "tester@ualberta.ca"
}],
"tags": ["science", "computer"]
}
@apiUse UnauthorizedAccessError
"""
# Find article from the database
article = MongoUtil.find_article(article_id)
# I
|
f the article does not exist
if article is None:
return jsonify(msg='Articles does not exist'), 404
app.logger.info('User {} Get article {}'.format(user, article))
return jsonify(JsonUtil.serialize(article)), 200
@app.route('/user/list/<string:list_id>/article', methods=['POST'])
@authorized_required
def create_article(user, list_id):
"""
@api {post} /user/list/:id/article Create a article for user
@apiName Create a article for user
@apiGroup Article
@apiUse AuthorizationTokenHeader
@apiParam {String} title The article title.
@apiParam {String} description The description.
@apiParam {String} [url] The url to the article.
@apiParam {Json} [tags] The user custom tags.
@apiParamExample {json} Request (Example):
{
"title": "God know what it is",
"description": "I don't know",
"url": "https://www.gooel.com/something",
"tags": ["tag1", "tag2", "tag3"]
}
@apiSuccess {json} Article json representation.
@apiSuccessExample {json} Respond (Example)
{
"id": "adlkfdalfjk",
"title": "God know what it is",
"description": "I don't know",
"url": "https://www.gooel.com/something",
"tags": ["tag1", "tag2", "tag3"]
}
@apiUse UnauthorizedAccessError
@apiUse ResourceDoesNotExist
@apiUse BadRequest
"""
# Parse request into JSON dict
req = RequestUtil.get_request()
# Create article
result = MongoUtil.create_article(user, req, list_id)
# If error occurs
if isinstance(result, str):
return ResponseUtil.error_response(result)
if not os.getenv('FLASK_CONFIGURATION') == 'test':
# Save it to elasticsearch
ElasticSearchUtil.save_to_es(result)
app.logger.info('User {} Create article {} in List ID: {}'.format(user, result, list_id))
return jsonify(JsonUtil.serialize(result)), 200
@app.route('/article/<string:article_id>', methods=['PUT'])
@authorized_required
def update_article(user, article_id):
"""
@api {put} /article/:id Update a article
@apiName Update a article
@apiGroup Article
@apiUse AuthorizationTokenHeader
@apiParam {String} title The new article title.
@apiParam {String} description The new description.
@apiParam {String} [url] The new url to the article.
@apiParam {Json} [tags] The new user custom tags.
@apiParamExample {json} Request (Example):
{
"title": "God know what it is",
"description": "I don't know",
"url": "https://www.gooel.com/something",
"tags": ["tag1", "tag2", "tag3"]
}
@apiSuccess {json} The new article json data.
@apiSuccessExample {json} Respond (Example)
{
"id": "adlkfdalfjk",
"title": "God know what it is",
"description": "I don't know",
"url": "https://www.gooel.com/something",
"tags": ["tag1", "tag2", "tag3"]
}
@apiUse UnauthorizedAccessError
@apiUse ResourceDoesNotExist
@apiUse BadRequest
"""
# Parse request into JSON dict
print(request.get_json())
req = RequestUtil.get_request()
print(req)
# Update article
result = MongoUtil.update_article(req, article_id)
# If error occurs
if isinstance(result, str):
return ResponseUtil.error_response(result)
# Update in elasticsearch
if not os.getenv('FLASK_CONFIGURATION') == 'test':
# Save it to elasticsearch
ElasticSearchUtil.save_to_es(result)
app.logger.info('User {} Update article {}'.format(user, result))
return jsonify(JsonUtil.serialize(result)), 200
@app.route('/group/<string:group_id>/list/<string:list_id>/article', methods=['POST'])
@authorized_required
def create_article_in_group(user, group_id, list_id):
"""
@api {post} /group/:id/list/:id/article Create a article in group
@apiName Create a article in group
@apiGroup Article
@apiUse AuthorizationTokenHeader
@apiParam {String} title The article title.
@apiParam {String} description The description.
@apiParam {String} [url] The url to the article.
@apiParam {Json} [tags] The user custom tags.
@apiParamExample {json} Request (Example):
{
"title": "God know what it is",
"description": "I don't know",
"url": "https://www.gooel.com/something",
"tags": ["tag1", "tag2", "tag3"]
}
@apiSuccess {String} Message Success message.
@apiUse GroupAccessDenied
@apiUse ListDoesNotExist
"""
app.logger.info('User {} Access {}'.format(user, request.full_path))
# Parse request, parse empty string and
req = RequestUtil.get_request()
# Create new article
result = MongoUtil.create_article_in_group(user, req, list_id, group_id)
if isinstance(result, str):
return ResponseUtil.error_response(result)
if not os.getenv('FLASK_CONFIGURATION') == 'test':
# Save it to elasticsearch
ElasticSearchUtil.save_to_es(result)
app.logger.info('User {} Create article {} in List ID: {} in Group ID: {}'.format(
user, result, list_id, group_id))
return jsonify(JsonUtil.serialize(result)), 200
@app.route('/user/article/<string:article_id>/tag', methods=['POST'])
@authorized_required
def add_tags(user, article_id):
"""
@api {post} /user/article/:id/tag Add tag to article
@apiName Add tag to the article
@apiGroup Article
@apiUse AuthorizationTokenHeader
@apiParam {String} id The article id.
@apiParam {Json} tags The user custom tags.
@apiParamExample {json} Request (Example):
{
"tag": "science"
}
@apiSuccess {String} Message Success message.
@apiUse UnauthorizedAccessError
@apiUse ListDoesNotExist
"""
# Get tag from requrest
app.logger.info('User {} Access {}'.format(user, request.full_path))
req = RequestUtil.get_request()
tag = req.get('tag')
# Add tag
article = MongoUtil.add_tag(article_id, tag)
# If the article does not exist
if article is None:
return jsonify({
'msg': 'List does not exist'
}), 400
if not os.getenv('FLASK_CONFIGURATION') == 'test':
# Save it to elasticsearch
ElasticSearchUtil.save_to_es(article)
app.logger.info('User {} Add tag {} to {}'.format(user, tag, article))
return jsonify(JsonUtil.serialize(article)), 200
|
eve-basil/common
|
tests/__init__.py
|
Python
|
apache-2.0
| 98
| 0
|
from __future__ import absolute_import
f
|
rom hamcrest.library import *
from hamcrest.core impor
|
t *
|
lliss/python-omgeo
|
omgeo/preprocessors.py
|
Python
|
mit
| 11,304
| 0.006723
|
from omgeo.processor import _Processor
import re
class _PreProcessor(_Processor):
"""Takes, processes, and returns a geocoding.places.PlaceQuery object."""
def process(self, pq):
raise NotImplementedError(
'PreProcessor subclasses must implement process().')
class ReplaceRangeWithNumber(_PreProcessor):
"""
Class to take only the first part of an address range
or hyphenated house number to use for geocoding.
This affects the query and address PlaceQuery attributes.
=============================== ========================================
Input Output
=============================== ========================================
``4109-4113 Main St`` ``4109 Main St``
``4109-13 Main St`` ``4109 Main St``
``322-1/2 Water Street`` ``322 Water Street``
``123-2 Maple Lane`` ``123 Maple Lane``
``272-B Greenough St, 19127`` ``272 Greenough St, 19127``
``272 Greenough St 19127-1112`` ``272 Greenough St 19127-1112``
``19127-1112`` ``19127-1112`` (not affected)
``76-20 34th Ave, Queens NY`` ``76 34th Ave, Queens NY`` (see warning)
=============================== ========================================
.. warning::
This may cause problems with addresses presented in the
hyphenated Queens-style format, where the part before the
hyphen indicates the cross street, and the part after
indicates the house number.
"""
#: Regular expression to represent ranges like:
#: * 789-791
#: * 789-91
#: * 201A-201B
#: * 201A-B
RE_STREET_NUMBER = re.compile('(^\d+\w*-\d*\w*)\s', re.IGNORECASE)
def replace_range(self, addr_str):
match = self.RE_STREET_NUMBER.match(addr_str)
if match is not None:
old = match.group(1)
new = old.split('-', 1)[0]
addr_str = addr_str.replace(old, new, 1)
return addr_str
def process(self, pq):
"""
:arg PlaceQuery pq: PlaceQuery instance
:returns: PlaceQuery instance with truncated address range / number
"""
pq.query = self.replace_range(pq.query)
pq.address = self.replace_range(pq.address)
return pq
class ParseSingleLine(_PreProcessor):
"""
Adapted from `Cicero Live <http://azavea.com/packages/azavea_cicero/blocks/cicero_live/view.js>`_
"""
# Some Regexes:
re_unit_numbered = re.compile('(su?i?te|p\W*[om]\W*b(?:ox)?|(?:ap|dep)(?:ar)?t(?:me?nt)?|ro*m|flo*r?|uni?t|bu?i?ldi?n?g|ha?nga?r|lo?t|pier|slip|spa?ce?|stop|tra?i?le?r|bo?x|no\.?)\s+|#', re.IGNORECASE)
re_unit_not_numbered = re.compile('ba?se?me?n?t|fro?nt|lo?bby|lowe?r|off?i?ce?|pe?n?t?ho?u?s?e?|rear|side|uppe?r', re.IGNORECASE)
re_UK_postcode = re.compile('[A-Z]{1,2}[0-9R][0-9A-Z]? *[0-9][A-Z]{0,2}', re.IGNORECASE)
re_blank = re.compile('\s')
def _comma_join(self, left, right):
if left == '':
return right
else:
return '%s, %s' % (left, right)
def process(self, pq):
"""
:arg PlaceQuery pq: PlaceQuery instance
:returns: PlaceQuery instance with :py:attr:`query`
converted to individual elements
"""
if pq.query != '':
postcode = address = city = '' # define the vars we'll use
# global regex postcode search, pop off last result
postcode_matches = self.re_UK_postcode.findall(pq.query)
if len(postcode_matches) > 0:
postcode = postcode_matches[-1]
query_parts = [part.strip() for part in pq.query.split(',')]
if postcode is not '' and re.search(postcode, query_parts[0]):
# if postcode is in the first part of query_parts, there are probably no commas
# get just the part before the postcode
part_before_postcode = query_parts[0].split(postcode)[0].strip()
if self.re_blank.search(part_before_postcode) is None:
address = part_before_postcode
else:
address = query_parts[0] #perhaps it isn't really a postcode (apt num, etc)
else:
address = query_parts[0] # no postcode to worry about
for part in query_parts[1:]:
part = part.strip()
if postcode is not '' and re.search(postcode, part) is not None:
part = part.replace(postcode, '').strip() # if postcode is in part, remove it
if self.re_unit_numbered.search(part) is not None:
# test to see if part is secondary address, like "Ste 402"
address = self._comma_join(address, part)
elif self.re_unit_not_numbered.search(part) is not None:
# ! might cause problems if 'Lower' or 'Upper' is in the city name
# test to see if part is secondary address, like "Basement"
address = self._comma_join(address, part)
else:
city = self._comma_join(city, part)# it's probably a city (or "City, County")
# set pq parts if they aren't already set (we don't want to overwrite explicit params)
if pq.postal == '': pq.postal = postcode
if pq.address == '': pq.address = address
if pq.city == '': pq.city = city
return pq
class CountryPreProcessor(_PreProcessor):
"""
Used to filter acceptable countries
and standardize country names or codes.
"""
def __init__(self, acceptable_countries=None, country_map=None):
"""
:arg list acceptable_countries: A list of acceptable countries.
None is used to indicate that all countries are acceptable.
(default ``[]``)
An empty string is also an acceptable country. To require
a country, use the `RequireCountry` preprocessor.
:arg dict country_map: A map of the input PlaceQuery.country property
to the country value accepted by the geocoding service.
For example, suppose that the geocoding service recognizes
'GB', but not 'UK' -- and 'US', but not 'USA'::
country_map = {'UK':'GB', 'USA':'US'}
"""
self.acceptable_countries = acceptable_countries if acceptable_countries is not None else []
self.country_map = country_map if country_map is not None else {}
def process(self, pq):
"""
:arg PlaceQuery pq: PlaceQuery instance
:returns: modified PlaceQuery, or ``False`` if country is not acceptable.
"""
# Map country, but don't let map overwrite
if pq.country not in self.acceptable_countries and pq.country
|
in self.country_map:
pq.country = self.country_map[pq.countr
|
y]
if pq.country != '' and \
self.acceptable_countries != [] and \
pq.country not in self.acceptable_countries:
return False
return pq
def __repr__(self):
return '<%s: Accept %s mapped as %s>' % (self.__class__.__name__,
self.acceptable_countries, self.country_map)
class CancelIfRegexInAttr(_PreProcessor):
"""
Return False if given regex is found in ANY of the given
PlaceQuery attributes, otherwise return original PlaceQuery instance.
In the event that a given attribute does not exist in the given
PlaceQuery, no exception will be raised.
"""
def __init__(self, regex, attrs, ignorecase=True):
"""
:arg str regex: a regex string to match (represents what you do *not* want)
:arg attrs: a list or tuple of strings of attribute names to look through
:arg bool ignorecase: set to ``False`` for a case-sensitive match (default ``True``)
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/tests/test_cli_mgmt_network_endpoint.py
|
Python
|
mit
| 15,915
| 0.004147
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# TEST SCENARIO COVERAGE
# ----------------------
# Methods Total : 25
# Methods Covered : 25
# Examples Total : 27
# Examples Tested : 27
# Coverage % : 100
# ----------------------
# private_link_services: 13/13
# private_endpoints: 5/5
# private_dns_zone_groups: 4/4
# available_private_endpoint_types: 2/2
# available_endpoint_services: 1/1
import unittest
import pytest
import azure.mgmt.network as az_network
from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
AZURE_LOCATION = 'eastus'
@pytest.mark.live_test_only
class TestMgmtNetwork(AzureMgmtRecordedTestCase):
def setup_method(self, method):
self.mgmt_client = self.create_mgmt_client(
az_network.NetworkManagementClient
)
if self.is_live:
import azure.mgmt.privatedns as az_privatedns
self.dns_client = self.create_mgmt_client(
az_privatedns.PrivateDnsManagementClient
)
def create_load_balancer(self, group_name, location, load_balancer_name, ip_config_name, subnet_id):
# Create load balancer
BODY = {
"location": location,
"sku": {
"name": "Standard"
},
"frontendIPConfigurations": [
{
"name": ip_config_name,
"subnet": {
# "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VNET_NAME + "/subnets/" + SUB_NET
"id": subnet_id
}
}
]
}
result = self.mgmt_client.load_balancers.begin_create_or_update(group_name, load_balancer_name, BODY)
result.result()
def create_virtual_network(self, group_name, location, network_name, subnet_name1, subnet_name2):
result = self.mgmt_client.virtual_networks.begin_create_or_update(
group_name,
network_name,
{
'location': location,
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
},
)
result_create = result.result()
async_subnet_creation = self.mgmt_client.subnets.begin_create_or_update(
group_name,
network_name,
subnet_name1,
{
'address_prefix': '10.0.0.0/24',
'private_link_service_network_policies': 'disabled'
}
)
subnet_info_1 = async_subnet_creation.result()
async_subnet_creation = self.mgmt_client.subnets.begin_create_or_update(
group_name,
network_name,
subnet_name2,
{
'address_prefix': '10.0.1.0/24',
'private_endpoint_network_policies': 'disabled'
}
)
subnet_info_2 = async_subnet_creation.result()
return (subnet_info_1, subnet_info_2)
def create_private_dns_zone(self, group_name, zone_name):
if self.is_live:
# Zones are a 'global' resource.
zone = self.dns_client.private_zones.begin_create_or_update(
group_name,
zone_name,
{
'location': 'global'
}
)
return zone.result().id
else:
return "/subscriptions/" + "00000000-0000-0000-0000-000000000000" + "/resourceGroups/" + group_name + "/providers/Microsoft.Network/privateDnsZones/" + zone_name
@RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
@recorded_by_proxy
def test_network(self, resource_group):
SUBSCRIPTION_ID = self.get_settings_value("SUBSCRIPTION_ID")
RESOURCE_GROUP = resource_group.name
SERVICE_NAME = "myService"
PRIVATE_ENDPOINT_NAME = "myPrivateEndpoint"
PE_CONNECTION_NAME = "myPeConnection"
PRIVATE_DNS_ZONE_GROUP_NAME = "myPrivateDnsZoneGroup"
LOCATION = AZURE_LOCATION
IP_CONFIGURATION_NAME = "myIPConfiguration"
LOAD_BALANCER_NAME = "loadbalancer"
VIRTUAL_NETWORK_NAME = "virtualnetwork"
SUBNET_NAME_1 = "subnet1"
SUBNET_NAME_2 = "subnet2"
ZONE_NAME = "www.zone1.com"
PRIVATE_ZONE_NAME = "zone1"
subnet, _ = self.create_virtual_network(RESOURCE_GROUP, AZURE_LOCATION, VIRTUAL_NETWORK_NAME, SUBNET_NAME_1, SUBNET_NAME_2)
self.create_load_balancer(RESOURCE_GROUP, AZURE_LOCATION, LOAD_BALANCER_NAME, IP_CONFIGURATION_NAME, subnet.id)
# /PrivateLinkServices/put/Create private link service[put]
BODY = {
"location": "eastus",
"visibility": {
"subscriptions": [
SUBSCRIPTION_ID
]
},
"auto_approval": {
"subscriptions": [
SUBSCRIPTION_ID
]
},
"fqdns": [
"fqdn1",
"fqdn2",
"fqdn3"
],
"load_balancer_frontend_ip_configurations": [
{
"id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/loadBalancers/" + LOAD_BALANCER_NAME + "/frontendIPConfigurations/" + IP_CONFIGURATION_NAME
}
],
"ip_configurations": [
{
"name": IP_CONFIGURATION_NAME,
"private_ip_address": "10.0.1.4",
"private_ipallocation_method": "Static",
"private_ip_address_version": "IPv4",
"subnet": {
"id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME + "/subnets/" + SUBNET_NAME_1
|
}
}
]
}
result = self.mgmt_client.private_link_services.begin_create_or_update(resource_group_name=RESOURCE_GROUP, service_name=SERVICE_NAME, parameters=BODY)
result = result.result()
# /PrivateEndpoints/put/Create private endpoint[put]
BODY = {
"location": AZURE_LOCATION,
"private_link_service_connections": [
{
|
"name": SERVICE_NAME, # TODO: This is needed, but was not showed in swagger.
"private_link_service_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/privateLinkServices/" + SERVICE_NAME,
# "group_ids": [
# "groupIdFromResource"
# ],
# "request_message": "Please approve my connection."
}
],
"subnet": {
"id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME + "/subnets/" + SUBNET_NAME_2
}
}
result = self.mgmt_client.private_endpoints.begin_create_or_update(resource_group_name=RESOURCE_GROUP, private_endpoint_name=PRIVATE_ENDPOINT_NAME, parameters=BODY)
result = result.result()
# # /PrivateEndpoints/put/Create private endpoint with manual approval connection[put]
# BODY = {
# "location": "eastus",
# "properties": {
# "manual_private_link_service_connections": [
# {
# "properties": {
# "private_link_service_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/privateLinkServicestestPls",
# "group_ids": [
# "groupIdFromResource"
# ],
# "request_message": "Please manually approve my connection."
# }
#
|
antoinecarme/pyaf
|
tests/artificial/transf_Difference/trend_MovingAverage/cycle_12/ar_12/test_artificial_1024_Difference_MovingAverage_12_12_100.py
|
Python
|
bsd-3-clause
| 273
| 0.084249
|
import pyaf.Bench.TS_datas
|
ets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 12, transform =
|
"Difference", sigma = 0.0, exog_count = 100, ar_order = 12);
|
kg-bot/SupyBot
|
plugins/Sshd/__init__.py
|
Python
|
gpl-3.0
| 2,534
| 0.000395
|
###
# Copyright (c) 2005, Ali Afshar
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED
|
. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Add a description of the plugin (to be presented to the user inside the wizard)
here. This should describe *what* the plugin does.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = "0.2.0"
# XXX Replace this with an appropriate author or supybot.Author instance.
__author__ = supybot.authors.unknown
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
import config
import plugin
reload(plugin) # In case we're being reloaded.
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin is reloaded. Don't forget to import them as well!
if world.testing:
import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
cboling/SDNdbg
|
docs/old-stuff/pydzcvr/doc/neutronclient/v2_0/client.py
|
Python
|
apache-2.0
| 54,103
| 0
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
import time
import urllib
import requests
import six.moves.urllib.parse as urlparse
from neutronclient import client
from neutronclient.common import _
from neutronclient.common import constants
from neutronclient.common import exceptions
from neutronclient.common import serializer
from neutronclient.common import utils
_logger = logging.getLogger(__name__)
def exception_handler_v20(status_code, error_content):
"""Exception handler for API v2.0 client.
This routine generates the appropriate Neutron exception according to
the contents of the response body.
:param status_code: HTTP error status code
:param error_content: deserialized body of error response
"""
error_dict = None
if isinstance(error_content, dict):
error_dict = error_content.get('NeutronError')
# Find real error type
bad_neutron_error_flag = False
if error_dict:
# If Neutron key is found, it will definitely contain
# a 'message' and 'type' keys?
try:
error_type = error_dict['type']
error_message = error_dict['message']
if error_dict['detail']:
error_message += "\n" + error_dict['detail']
except Exception:
bad_neutron_error_flag = True
if not bad_neutron_error_flag:
# If corresponding exception is defined, use it.
client_exc = getattr(exceptions, '%sClient' % error_type, None)
# Otherwise look up per status-code client exception
if not client_exc:
client_exc = exceptions.HTTP_EXCEPTION_MAP.get(status_code)
if client_exc:
raise client_exc(message=error_message,
status_code=status_code)
else:
raise exceptions.NeutronClientException(
status_code=status_code, message=error_message)
else:
raise exceptions.NeutronClientException(status_code=status_code,
message=error_dict)
else:
message = None
if isinstance(error_content, dict):
message = error_content.get('message')
if message:
raise exceptions.NeutronClientException(status_code=status_code,
message=message)
# If we end up here the exception was not a neutron error
msg = "%s-%s" % (status_code, error_content)
raise exceptions.NeutronClientException(status_code=status_code,
message=msg)
class APIParamsCall(object):
"""A Decorator to add support for format and tenant overriding and filters.
"""
def __init__(self, function):
self.function = function
def __get__(self, instance, owner):
def with_params(*args, **kwargs):
_format = instance.format
if 'format' in kwargs:
instance.format = kwargs['format']
ret = self.function(instance, *args, **kwargs)
instance.format = _format
return ret
return with_params
class Client(object):
"""Client for the OpenStack Neutron v2.0 API.
:param string username: Username for authentication. (optional)
:param string user_id: User ID for authentication. (optional)
:param string password: Password for authentication. (optional)
:param string token: Token for authentication. (optional)
:param string tenant_name: Tenant name. (optional)
:param string tenant_id: Tenant id. (optional)
:param string auth_url: Keystone service endpoint for authorization.
:param string service_type: Network service type to pull from the
keystone catalog (e.g. 'network') (optional)
:param string endpoint_type: Network service endpoint type to pull from the
keystone catalog (e.g. 'publicURL',
'internalURL', or 'adminURL') (optional)
:param string region_name: Name of a region to select when choosing an
endpoint from the service catalog.
:param string endpoint_url: A user-supplied endpoint URL for the neutron
service. Lazy-authentication is possible for API
service calls if endpoint is set at
instantiation.(optional)
:param integer timeout: Allows customization of the timeout for client
http requests. (optional)
:param bool insecure: SSL certificate validation. (optional)
:param string ca_cert: SSL CA bundle file to use. (optional)
:param integer retries: How many times idempotent (GET, PUT, DELETE)
requests to Neutron server should be retried if
they fail (default: 0).
:param bool raise_errors: If True then exceptions caused by connection
failure are propagated to the caller.
(default: True)
:param session: Keystone client auth session to use. (optional)
:param auth: Keystone auth plugin to use. (optional)
Example::
from neutronclient.v2_0 import client
neutron = client.Client(username=USER,
password=PASS,
tenant_name=TENANT_NAME,
auth_url=KEYSTONE_URL)
nets = neutron.list_networks()
...
"""
networks_path = "/networks"
network_path = "/networks/%s"
ports_path = "/ports"
port_path = "/ports/%s"
subnets_path = "/subnets"
subnet_path = "/subnets/%s"
quotas_path = "/quotas"
quota_path = "/quotas/%s"
extensions_path = "/extensions"
extension_path = "/extensions/%s"
routers_path = "/routers"
router_path = "/routers/%s"
floatingips_path = "/floatingips"
floatingip_path = "/floatingips/%s"
security_groups_path = "/security-groups"
security_group
|
_path = "/security-groups/%s"
security_group_rules_path = "/security-group-rules"
security_grou
|
p_rule_path = "/security-group-rules/%s"
vpnservices_path = "/vpn/vpnservices"
vpnservice_path = "/vpn/vpnservices/%s"
ipsecpolicies_path = "/vpn/ipsecpolicies"
ipsecpolicy_path = "/vpn/ipsecpolicies/%s"
ikepolicies_path = "/vpn/ikepolicies"
ikepolicy_path = "/vpn/ikepolicies/%s"
ipsec_site_connections_path = "/vpn/ipsec-site-connections"
ipsec_site_connection_path = "/vpn/ipsec-site-connections/%s"
vips_path = "/lb/vips"
vip_path = "/lb/vips/%s"
pools_path = "/lb/pools"
pool_path = "/lb/pools/%s"
pool_path_stats = "/lb/pools/%s/stats"
members_path = "/lb/members"
member_path = "/lb/members/%s"
health_monitors_path = "/lb/health_monitors"
health_monitor_path = "/lb/health_monitors/%s"
associate_pool_health_monitors_path = "/lb/pools/%s/health_monitors"
disassociate_pool_health_monitors_path = (
"/lb/pools/%(pool)s/health_monitors/%(health_monitor)s")
qos_queues_path = "/qos-queues"
qos_queue_path = "/qos-queues/%s"
agents_path = "/agents"
agent_path = "/agents/%s"
network_gateways_path = "/network-gateways"
network_gateway_path = "/network-gateways/%s"
gateway_devices_path = "/gateway-devices"
gateway_device_path = "/gateway-devices/%s"
service_providers_path = "/service-providers"
credentials_path = "/credentials"
|
arthurdejong/python-stdnum
|
stdnum/damm.py
|
Python
|
lgpl-2.1
| 3,297
| 0
|
# damm.py - functions for performing the Damm checksum algorithm
#
# Copyright (C) 2016-2021 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""The Damm algorithm.
The Damm algorithm is a check digit algorithm that should d
|
etect all
single-digit errors and all adjacent transposition errors. Based on
anti-symmetric quasigroup of order 10 it uses a substitution table.
This implementation uses the table from Wikipedia by default but a custom
table can be provided.
More information:
* https://en.wikipedia.org/wiki/Damm_algorithm
>>> validate('572')
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> calc_check_digit('572')
'4'
>>> validate('5724')
'5724'
>>> table
|
= (
... (0, 2, 3, 4, 5, 6, 7, 8, 9, 1),
... (2, 0, 4, 1, 7, 9, 5, 3, 8, 6),
... (3, 7, 0, 5, 2, 8, 1, 6, 4, 9),
... (4, 1, 8, 0, 6, 3, 9, 2, 7, 5),
... (5, 6, 2, 9, 0, 7, 4, 1, 3, 8),
... (6, 9, 7, 3, 1, 0, 8, 5, 2, 4),
... (7, 5, 1, 8, 4, 2, 0, 9, 6, 3),
... (8, 4, 6, 2, 9, 5, 3, 0, 1, 7),
... (9, 8, 5, 7, 3, 1, 6, 4, 0, 2),
... (1, 3, 9, 6, 8, 4, 2, 7, 5, 0))
>>> checksum('816', table=table)
9
"""
from stdnum.exceptions import *
_operation_table = (
(0, 3, 1, 7, 5, 9, 8, 6, 4, 2),
(7, 0, 9, 2, 1, 5, 4, 8, 6, 3),
(4, 2, 0, 6, 8, 7, 1, 3, 5, 9),
(1, 7, 5, 0, 9, 8, 3, 4, 2, 6),
(6, 1, 2, 3, 0, 4, 5, 9, 7, 8),
(3, 6, 7, 4, 2, 0, 9, 5, 8, 1),
(5, 8, 6, 9, 7, 2, 0, 1, 3, 4),
(8, 9, 4, 5, 3, 6, 2, 0, 1, 7),
(9, 4, 3, 8, 6, 1, 7, 2, 0, 5),
(2, 5, 8, 1, 4, 3, 6, 7, 9, 0))
def checksum(number, table=None):
"""Calculate the Damm checksum over the provided number. The checksum is
returned as an integer value and should be 0 when valid."""
table = table or _operation_table
i = 0
for n in str(number):
i = table[i][int(n)]
return i
def validate(number, table=None):
"""Check if the number provided passes the Damm algorithm."""
if not bool(number):
raise InvalidFormat()
try:
valid = checksum(number, table=table) == 0
except Exception: # noqa: B902
raise InvalidFormat()
if not valid:
raise InvalidChecksum()
return number
def is_valid(number, table=None):
"""Check if the number provided passes the Damm algorithm."""
try:
return bool(validate(number, table=table))
except ValidationError:
return False
def calc_check_digit(number, table=None):
"""Calculate the extra digit that should be appended to the number to
make it a valid number."""
return str(checksum(number, table=table))
|
tlecomte/friture
|
friture/level_view_model.py
|
Python
|
gpl-3.0
| 2,390
| 0.00879
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2021 Timothée Lecomte
# This file is part of Friture.
#
# Friture is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# Friture is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Friture. If not, see <http://www.gnu.org/licenses/>.
from PyQt5 import QtCore
from PyQt5.QtCore import pyqtProperty
from friture.ballistic_peak import BallisticPeak
from friture.level_data import LevelData
class LevelViewModel(QtCore.QObject):
two_channels_changed = QtCore.pyqtSignal(bool)
def __init__(self, parent=None):
super().__init__(parent)
self._two_channels = False
self._level_data = LevelData(self)
self._level_data_2 = LevelData(self)
self._level_data_slow = LevelData(self)
self._level_data_slow_2 = LevelData(self)
self._level_data_ballistic = BallisticPeak(self)
self._level_data_ballistic_2 = BallisticPeak(self)
@pyqtProperty(bool, notify=two_channels_changed)
def two_channels(self):
return self._two_channels
@two_channels.setter
def two_channels(self, two_channels):
if self._two_channels != two_channels:
self._two_channels = two_channels
self.two_channels_changed.emit(two_channels)
@pyqtProperty(LevelData, constant = True)
def level_data(self):
return self._level_data
@pyqtProperty(LevelData, constant = True)
def level_data_2(self):
return self._level_data_2
@pyqtProperty(LevelData, constant = True)
def l
|
evel_data_slow(self):
return self._level_data_slow
@pyqtProperty(LevelData, constant = True)
def level_data_slow_2(self):
return self._level_data_slow_2
@pyqtProperty(LevelData, constant = True)
def level_data_ballistic(self):
return self._level_data_ballistic
@pyqtProperty(LevelData, constant = True)
def level_data_ballistic_2(self)
|
:
return self._level_data_ballistic_2
|
asnorkin/shad_testing_script
|
conftest.py
|
Python
|
gpl-3.0
| 8,688
| 0.002532
|
import pytest
import sys
from inspect import isgeneratorfunction
from logging import getLogger
from subproce
|
ss import Popen, PIPE
from re import search
from input_generation import user_input_generator
DELIMITER = "; "
def get_input_generator_from_list(lst):
def _generator():
for line in lst:
yield line
return _generator
def get_input_generat
|
or(request, iterations):
input = request.config.getoption("--input")
if not input:
input = user_input_generator(iterations)
if not input:
raise ValueError("No input data expression or file or user defined function specified")
if isgeneratorfunction(input):
return input
elif isinstance(input, list):
return get_input_generator_from_list(input)
else:
raise TypeError("User defined functions has wrong return value type: {type}."
"Expected: generator or list".format(type=type(input)))
# File input
with open(input, "r") as fin:
input = fin.readlines()
return get_input_generator_from_list(input)
@pytest.fixture()
def log(request):
log_level = request.config.getoption("--log")
_log = getLogger()
_log.setLevel(log_level)
return _log
@pytest.fixture()
def iterations(request):
return int(request.config.getoption("--iterations"))
@pytest.fixture()
def input_generator(request, iterations):
return get_input_generator(request, iterations)
@pytest.fixture()
def solutions(request, log):
_solutions = request.config.getoption("--solutions").split()
for _idx, _sol in enumerate(_solutions):
if _sol.rpartition(".")[-1] == "cpp":
_solutions[_idx] = compile_solution(_sol, log)
compile_solution(_sol, log, sanitizer=False)
return _solutions
@pytest.fixture()
def estimator(request, log):
timeout = float(request.config.getoption("--timeout"))
memory_limit = float(request.config.getoption("--memory_limit"))
return get_estimator(timeout=timeout, memory_limit=memory_limit, log=log)
def get_estimator(timeout, memory_limit, log):
def _estimator(solution, input):
if "__naive" in solution:
solution_without_sanitizer = solution.rpartition(".")[0] + \
"_without_sanitizer" + "." + \
solution.rpartition(".")[-1]
output = run_process(solution_without_sanitizer, input, log)
return output.strip(), 0, 0
_time = run_process_with_time_check(solution, input, log)
if _time > timeout:
log.warning("Timout exceeded: {time}s > {timeout}s\n".format(time=_time, timeout=timeout))
log.warning("Solution: {solution}\n".format(solution=solution))
log.warning("Input: {input}\n".format(input=input))
log.warning("Trying to run without sanitizer..\n")
solution_without_sanitizer = solution.rpartition(".")[0] + \
"_without_sanitizer" + "." + \
solution.rpartition(".")[-1]
_time = run_process_with_time_check(solution_without_sanitizer, input, log)
if _time > timeout:
log.warning("Timeout exceeded without sanitizer: {time}s > {timeout}s\n"
.format(time=_time, timeout=timeout))
log.warning("Solution: {solution}\n".format(solution=solution))
log.warning("Input: {input}\n".format(input=input))
else:
log.warning("Without sanitizer time is ok: {time}s < {timeout}s\n"
.format(time=_time, timeout=timeout))
_mem = run_process_with_memory_check(solution, input, log)
if _mem > memory_limit:
log.warning("Memory limit exceeded: {mem}kb > {mem_limit}kb".format(mem=_mem, mem_limit=memory_limit))
log.warning("Solution: {solution}\n".format(solution=solution))
log.warning("Input: {input}\n".format(input=input))
log.warning("Trying to run without sanitizer..\n")
solution_without_sanitizer = solution.rpartition(".")[0] + \
"_without_sanitizer" + "." + \
solution.rpartition(".")[-1]
_mem = run_process_with_memory_check(solution_without_sanitizer, input, log)
if _mem > memory_limit:
log.warning("Memory limit exceeded without sanitizer: {mem}kb > {mem_limit}kb\n"
.format(mem=_mem, mem_limit=memory_limit))
log.warning("Solution: {solution}\n".format(solution=solution))
log.warning("Input: {input}\n".format(input=input))
else:
log.warning("Without sanitizer memory is ok: {mem}kb < {mem_limit}kb\n"
.format(mem=_mem, mem_limit=memory_limit))
output = run_process(solution, input, log)
return output.strip(), _time, _mem
return _estimator
def compile_solution(solution, log, sanitizer=True):
if solution.rpartition(".")[-1] == "cpp":
sanitizer_suffix = "" if sanitizer else "_without_sanitizer"
compiled_solution = solution.rpartition(".")[0] + sanitizer_suffix + ".out"
args = ["g++",
solution,
"-fsanitize=address,undefined",
"-x",
"c++",
"-std=c++14",
"-O2",
# "-Wall",
# "-Werror",
# "-Wsign-compare",
"-o",
compiled_solution]
if not sanitizer:
del args[2]
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
compile_stdout, compile_stderr = proc.communicate()
if compile_stdout or compile_stderr:
err_msg = "Compilation error:\n" \
"Solution: {}\n" \
"STDOUT: {}\n" \
"STDERR: {}\n"\
.format(solution, str(compile_stdout.decode("utf-8")), str(compile_stderr.decode("utf-8")))
log.error(err_msg)
raise ValueError(err_msg)
else:
err_msg = "Trying to compile not .cpp file: {}".format(solution)
log.error(err_msg)
raise ValueError(err_msg)
return compiled_solution
def run_process(solution, input, log):
proc = Popen("./" + solution, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = proc.communicate(bytes(input + "\n", "utf-8"))
if stderr:
log.warning("Process' stderr is not empty after execution of {}:\n{}"
.format(solution, str(stderr.decode("utf-8"))))
return str(stdout.decode("utf-8"))
def run_process_with_time_check(solution, input, log):
if sys.platform == "linux":
proc = Popen('/usr/bin/time -f "\ntime: %E" ./' + solution, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
elif sys.platform == "darwin":
proc = Popen('gtime -f "\ntime: %E" ./' + solution, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
else:
log.error("Unsupported platform: {}.\n".format(sys.platform))
log.error("Expected linux or darwin")
raise ValueError
stdout, stderr = proc.communicate(bytes(input + "\n", "utf-8"))
minutes, seconds = search(r"time: ([\d]+):([\d]+.[\d]+)", str(stderr.decode("utf-8"))).groups()
return float(minutes) * 60 + float(seconds)
def run_process_with_memory_check(solution, input, log):
if sys.platform == "linux":
proc = Popen('/usr/bin/time -f "\nmem: %M" ./' + solution, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
elif sys.platform == "darwin":
proc = Popen('gtime -f "\nmem: %M" ./' + solution, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
else:
log.error("Unsupported platform: {}.\n".format(sys.platform))
log.error("Expected linux or darwin")
raise ValueError
stdout, stderr = proc.communicate(bytes(input + "\n", "utf-8"))
(mem, ) = search(r"mem: (\d+)",
|
hydroshare/hydroshare_temp
|
hs_core/tests/api/native/test_set_access_rules.py
|
Python
|
bsd-3-clause
| 5,046
| 0.003171
|
__author__ = 'lisa_stillwell'
from unittest import TestCase
from hs_core.hydroshare import resource
from hs_core.hydroshare import users
from hs_core.models import GenericResource
from django.contrib.auth.models import User
import datetime as dt
class TestSetAccessRules(TestCase):
def setUp(self):
# create an admin user
self.admin_user = users.create_account(
'adminuser@email.com',
username='adminuser',
first_name='Super',
last_name='User',
superuser=True,
groups=[])
# create a test user
self.test_user = users.create_account(
'testuser@email.com',
username='testuser',
first_name='Ima',
last_name='Testuser',
superuser=False,
groups=[])
self.new_res = resource.create_resource(
'GenericResource',
self.admin_user,
'My Test Resource'
)
# get the user's id
#self.userid = User.objects.get(username=self.user).pk
self.test_group = users.create_group(
'MyTestGroup',
members=[self.admin_user],
owners=[self.admin_user]
)
def tearDown(self):
self.admin_user.delete()
self.test_user.delete()
self.test_group.delete()
#self.new_res.delete()
def test_set_access_rules(self):
res_id = self.new_res.short_id
# test to see if everything is sane
result = users.set_access_rules(res_id, user=None, group=None, access=users.PUBLIC, allow=True)
self.assertEqual(
res_id,
result.short_id,
msg="Incorrect or no resource id returned."
)
# make sure public access was set correctly
self.assertTrue(
self.new_res.public,
msg="Access rule for PUBLIC = False, expected True"
)
self.new_res = users.set_access_rules(res_id, user=None, group=None, access=users.PUBLIC, allow=False)
self.assertFalse(
self.new_res.public,
msg="Access rule for PUBLIC = True, expected False"
)
# make sure donotdistribute access was set correctly
self.new_res = users.set_access_rules(res_id, user=None, group=None, access=users.DO_NOT_DISTRIBUTE, allow=True)
self.assertEqual(
self.new_res.do_not_distribute,
True,
msg="Access rule for DO_NOT_DISTRIBUTE = False, expected True"
)
self.new_res = users.set_access_rules(res_id, user=None, group=None, access=users.DO_NOT_DISTRIBUTE, allow=False)
self.assertEqual(
self.new_res.do_not_distribute,
False,
msg="Access rule for DO_NOT_DISTRIBUTE = True, expected False"
)
# test with fake resource id - expect an exception
# self.assertRaises(
# NotFound,
# lambda: users.set_access_rules(121212, user=None, group=None, access=users.VIEW, allow=True)
# )
# test EDIT access with user id provided
self.new_res = users.set_access_rules(res_id, user=self.test_user, group=None, access=users.EDIT, allow=True)
self.assertTrue(
self.new_res.edit_users.filter(pk=self.test_user.pk).exists(),
msg="Failure when trying to add EDIT access for user"
)
self.new_res = users.set_access_rules(re
|
s_id, user=self.test_user, group=None, access=users.EDIT, allow=False)
self.assertFalse(
self.new_res.edit_users.filter(pk=
|
self.test_user.pk).exists(),
msg="Failure when trying to remove EDIT access for user"
)
# test EDIT access with no user id provided
self.assertRaises(
TypeError,
lambda: users.set_access_rules(self.new_res, user=None, group=None, access=users.EDIT, allow=True)
)
# test VIEW access with group id provided
self.new_res = users.set_access_rules(self.new_res, user=None, group=self.test_group, access=users.VIEW, allow=True)
self.assertTrue(
self.new_res.view_groups.filter(pk=self.test_group.pk).exists(),
msg="Failure when trying to add VIEW access for group"
)
self.new_res = users.set_access_rules(self.new_res, user=None, group=self.test_group, access=users.VIEW, allow=False)
self.assertFalse(
self.new_res.view_groups.filter(pk=self.test_group.pk).exists(),
msg="Failure when trying to remove VIEW access for group"
)
# test VIEW access with no group id provided
self.assertRaises(
TypeError,
lambda: users.set_access_rules(self.new_res, user=None, group=None, access=users.VIEW, allow=True)
)
# test with fake access rule
self.assertRaises(
TypeError,
lambda: users.set_access_rules(self.new_res, user=None, group=None, access="surprise_me", allow=True)
)
|
RoasterBot/roasterbot-analytics
|
python/threaded_temp.py
|
Python
|
cc0-1.0
| 7,664
| 0.014875
|
#! /usr/bin/env python
# -*- codes: utf-8 -*-
print 'Starting ...'
#Basic imports
from ctypes import *
import sys
import time
import threading
import logging
#Phidget specific imports
from Phidgets.PhidgetException import PhidgetErrorCodes, PhidgetException
from Phidgets.Events.Events import AttachEventArgs, DetachEventArgs, ErrorEventArgs, TemperatureChangeEventArgs
from Phidgets.Devices.TemperatureSensor import TemperatureSensor, ThermocoupleType
#import methods for sleeping thread
from time import sleep
import sqlite3
from Phidgets.Phidget import PhidgetLogLevel
#Information Display Function
def DisplayDeviceInfo():
inputCount = temperatureSensor.getTemperatureInputCount()
print("|------------|----------------------------------|--------------|------------|")
print("|- Attached -|- Type -|- Serial No. -|- Version -|")
print("|------------|----------------------------------|--------------|------------|")
print("|- %8s -|- %30s -|- %10d -|- %8d -|" % (temperatureSensor.isAttached(), temperatureSensor.getDeviceName(), temperatureSensor.getSerialNum(), temperatureSensor.getDeviceVersion()))
print("|------------|----------------------------------|--------------|------------|")
print("Number of Temperature Inputs: %i" % (inputCount))
for i in
|
range(inputCount):
print("Input %i Sensitivity: %f" % (i, temperatureSensor.getTemperatureChangeTrigger(i)))
#
|
Event Handler Callback Functions
def TemperatureSensorAttached(e):
attached = e.device
print("TemperatureSensor %i Attached!" % (attached.getSerialNum()))
def TemperatureSensorDetached(e):
detached = e.device
print("TemperatureSensor %i Detached!" % (detached.getSerialNum()))
def TemperatureSensorError(e):
try:
source = e.device
if source.isAttached():
print("TemperatureSensor %i: Phidget Error %i: %s" % (source.getSerialNum(), e.eCode, e.description))
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
# Event-based. Change to timer-based. Once per second.
def TemperatureSensorTemperatureChanged(e):
try:
ambient = temperatureSensor.getAmbientTemperature() #
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
ambient = 0.00
source = e.device
#print("TemperatureSensor %i: Ambient Temp: %f -- Thermocouple %i temperature: %f -- Potential: %f" % (source.getSerialNum(), ambient, e.index, e.temperature, e.potential))
#print temperatureSensor.getTemperature(0)*2+30
print 'Time: %s. Device %s. Current temp (F): %f' % (time.time()-start_time, e.index, toFarenheight(e.temperature) )
# Format, date, thermocouple, time, thermocouple temp, ambient temp at device
logging.info( "%s,%s,%f" % (time.time()-start_time, e.index, toFarenheight(e.temperature) ))
def toFarenheight(c):
# from MAX3865 code
# and http://www.almanac.com/temperature-conversion
# temperature = (temperature * 9.0/5.0)+ 32;
#return c*2+30
return c * (9.0/5.0)+ 32;
def log_temperatures():
print 'logging'
# Rethinking exception handling ... ?
try:
ambient_temp = temperatureSensor.getAmbientTemperature() #
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
ambient_temp = 0.00
ambient_temp = toFarenheight( temperatureSensor.getAmbientTemperature() )
bean_temp = toFarenheight( temperatureSensor.getTemperature(0) )
air_temp = toFarenheight( temperatureSensor.getTemperature(1) )
# Not using these inputs yet.
tbd_1_temp = toFarenheight( temperatureSensor.getTemperature(2) )
tbd_2_temp = toFarenheight( temperatureSensor.getTemperature(3) )
c_time = time.time()-start_time
# Format
#logging.info("time,ambient_temp,thermocouple_0_temp,thermocouple_1_temp,thermocouple_2_temp,thermocouple_3_temp")
print ( "%d seconds, ambient_temp=%d, bean_temp=%d, air_temp=%d, tbd1=%d, tbd2=%d" % (c_time, ambient_temp, bean_temp, \
air_temp, tbd_1_temp, tbd_2_temp ))
logging.info( "%d,%d,%d,%d,%d,%d" % (c_time, ambient_temp, bean_temp, \
air_temp, tbd_1_temp, tbd_2_temp ))
## This is where we do simple insert statements
return 0
def start_threads():
t = threading.Timer(1.0, start_threads)
t.daemon = True
t.start()
log_temperatures()
# if __name__ == '__main__':
# This is where we should start the timer and simply use
# temperatureSensor.getTemperature(0..3)
if __name__ == '__main__':
print 'in main().'
coffee = raw_input('Which coffee are you roasting? ')
#The begining of the roast
# Will need multiple "time markers" or events: Roaster warm up, drop, 1c, etc.
start_time = time.time()
#need to generate unique file name for each roast ...?
#logging.basicConfig(format='%(asctime)s,%(message)s',filename='roast.log',filemode='w',level=logging.DEBUG)
logging.basicConfig(format='%(asctime)s,%(message)s',filename='roast.log',filemode='w',level=logging.DEBUG)
#logging.basicConfig(format='%(message)s',filename='roast.log',filemode='w',level=logging.DEBUG)
# Format, date, thermocouple, time, thermocouple temp, ambient temp at device
logging.info("coffee,datetime,ms,seconds,ambient_temp,thermocouple_0_temp,thermocouple_1_temp,thermocouple_2_temp,thermocouple_3_temp")
#Create an temperaturesensor object
try:
print 'creating temp sensor'
temperatureSensor = TemperatureSensor()
except RuntimeError as e:
print("Runtime Exception: %s" % e.details)
print("Exiting....")
exit(1)
#start_threads()
print("Opening phidget object....")
try:
temperatureSensor.openPhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Waiting for attach....")
try:
temperatureSensor.waitForAttach(10000)
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
try:
temperatureSensor.closePhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Exiting....")
exit(1)
else:
DisplayDeviceInfo()
print("Setting Thermocouple type...")
temperatureSensor.setThermocoupleType(0, ThermocoupleType.PHIDGET_TEMPERATURE_SENSOR_K_TYPE)
print("Setting sensitivity of the thermocouple....")
temperatureSensor.setTemperatureChangeTrigger(0, 0.10)
sleep(2) #sleep for 2 seconds
print("Sensitivity of thermocouple index 0 is now %f" % (temperatureSensor.getTemperatureChangeTrigger(0)))
print("Press Enter to quit....")
start_threads()
chr = sys.stdin.read(1)
## Perhaps here we can grab other characters and log different data, such as 1c, 2c, drop, etc.
## Write the desired data to log file, then once that's defined, write to db.
print("Closing...")
try:
temperatureSensor.closePhidget()
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Done.")
exit(0)
|
findgriffin/hactar
|
test/base.py
|
Python
|
gpl-2.0
| 6,991
| 0.004291
|
from hashlib import sha1
import shutil
import re
import json
import time
from dateutil.parser import parse
from datetime import datetime as dtime
from datetime import timedelta as tdelta
from flask.ext.testing import TestCase
from app import db, app
import hactar.models
class BaseTest(TestCase):
_multiprocess_can_split = False
def create_app(self):
import json
self.conf = json.load(open('config.json', 'rb'))['test']
app.config.update(self.conf)
self.idx = hactar.models.setup('test')
app.logger.setLevel(30)
app.celery_running = False
return app
def setUp(self):
"""Before each test, set up a blank database"""
try:
shutil.rmtree(self.conf['WHOOSH_BASE'])
except OSError as err:
pass
hactar.models.setup('test')
db.create_all()
def tearDown(self):
"""Get rid of the database again after each test."""
db.session.remove()
db.drop_all()
def login(self, username=None, password=None):
if not username:
username = app.config['USERNAME']
if not password:
password = app.config['PASSWORD']
return self.client.post('/login', data=dict(
username=username,
password=password
), follow_redirects=True)
def logout(self):
return self.client.get('/logout', follow_redirects=True)
class BaseMemeTest(BaseTest):
uri0 = 'http://foobar.com'
desc0 = 'a description of foobar'
uri1 = 'http://stuff.com/somewhere'
desc1 = 'a description of stuff'
uri2 = 'http://more.com/somewhere'
desc2 = 'a description of more'
title0 = 'A title not a URI'
desc4 = 'a description of this "not-URI"'
def check_meme(self, resp, uri, desc, new=True, flash=None, isuri=True,
logged_in=True):
if flash:
self.assertIn(flash, resp.data)
elif new:
msg = 'New meme was successfully added'
self.assertIn(msg, resp.data)
now = 'just now'
self.assertEqual(resp.status_code, 200)
meme_id = int(sha1(uri).hexdigest()[:15], 16)
if logged_in:
self.assertIn('<ahref="/memes/%s">(edit)</a>' % meme_id,
re.sub('\s+', '', resp.data))
else:
self.assertIn('<ahref="/memes/%s">(view)</a>' % meme_id,
re.sub('\s+', '', resp.data))
if isuri:
self.assertIn('<h4><a href="%s" target="_blank">%s</a>' % (uri, uri), resp.data)
else:
self.assertIn('<h4>%s' % uri, resp.data)
self.assertIn('<p>%s</p>' % desc, resp.data)
self.assertIn('%s</small></h4>' % now, resp.data)
return meme_id
def get_meme(self, rjson, meme_id):
for meme in rjson['memes']:
if int(meme['id']) == meme_id:
return meme
raise AssertionError('meme: %s not in response:%s' % (meme_id, rjson))
def check_meme_json(self, resp, wh
|
at, why, new=True, flash=None,
last=True):
rjson = json.loads(resp.data)
isuri = hactar.models.is_uri(what)
if flash:
self.assertEquals([flash], rjson['flashes'])
elif last and new:
msg = u'New meme was successfully added'
self.assertEquals([msg], rjson['flashes'])
self.assertEqual(resp.status_code, 200)
meme_id = int(sha1(what).hexdigest()[:15], 16)
if last:
meme = rjson['memes'][0]
|
self.assertEquals(meme_id, int(meme['id']))
else:
meme = self.get_meme(rjson, meme_id)
self.assertEquals(len(meme.keys()), 9)
self.assertEquals(what, meme['uri'] if isuri else meme['title'])
self.assertEquals(why, meme['text'])
now = int(time.time())
added = parse(meme['added'])
modified = parse(meme['modified'])
if new:
self.assertEquals(added, modified,
msg='created / modified times are not equal %s' % meme)
else:
self.assertTrue(modified > added,
msg='modified not later than added %s' % meme)
return meme_id
class BaseActionTest(BaseTest):
text0 = 'cool event'
text1 = 'another fruity event'
text2 = 'yet another event'
def get_action(self, rjson, action_id):
for action in rjson['actions']:
if int(action['id']) == action_id:
return action
raise AssertionError('action: %s not in response:%s' % (action_id, rjson))
def check_action_json(self, resp, text, new=True, flash=None,
last=True):
rjson = json.loads(resp.data)
if flash:
self.assertEquals([flash], rjson['flashes'])
elif last is True and new:
msg = u'New action was successfully added'
self.assertEquals([msg], rjson['flashes'])
self.assertEqual(resp.status_code, 200)
# 1 is True evaluates to False, do it this way so we can pass in an int
# in place of last=False when required.
if last is True: # this
action_id = len(rjson['actions'])
action = rjson['actions'][0]
self.assertIsInstance(action, dict, msg='no action found in: %s' %
rjson)
self.assertEqual(action_id, int(action['id']))
elif type(last) == int:
action_id = last
action = self.get_action(rjson, action_id)
else:
raise ValueError('need an action_id or last=True')
self.assertEquals(len(action.keys()), 9)
self.assertEquals(text, action['text'])
now = int(time.time())
added = parse(action['added'])
modified = parse(action['modified'])
if new:
self.assertEquals(added, modified,
msg='created / modified times are not equal %s' % action)
else:
self.assertTrue(modified > added,
msg='modified not later than added %s' % action)
return action_id
def check_action(self, resp, text, action_id, new=True,
flash=None, logged_in=True):
if flash:
self.assertIn(flash, resp.data)
elif new:
msg = 'New action was successfully added'
self.assertIn(msg, resp.data)
now = 'just now'
self.assertEqual(resp.status_code, 200)
if logged_in:
self.assertIn('<ahref="/actions/%s">(edit)</a>' % action_id,
re.sub('\s+', '', resp.data))
else:
self.assertIn('<ahref="/actions/%s">(view)</a>' % action_id,
re.sub('\s+', '', resp.data))
self.assertIn('<h4>%s' % text, resp.data)
self.assertIn('%s</small></h4>' % now, resp.data)
return action_id
def get_day(days=0, hours=0):
today = dtime.now()
newday = today+tdelta(days=days, hours=hours)
return newday
|
wdecoster/NanoPlot
|
nanoplotter/__init__.py
|
Python
|
gpl-3.0
| 32
| 0
|
from .nanoplott
|
er_main
|
import *
|
gdementen/larray
|
larray/inout/stata.py
|
Python
|
gpl-3.0
| 1,657
| 0.002414
|
import pandas as pd
from larray.core.array import Array
from larray.inout.pandas import from_frame
__all__ = ['read_stata']
def read_stata(filepath_or_buffer, index_col=None, sort_rows=False, sort_columns=False, **kwargs) -> Array:
r"""
Reads Stata .dta file and returns an Array with the contents
Parameters
----------
filepath_or_buffer : str or file-like object
Path to .dta file or a file handle.
index_col : str or None, optional
Name of column to set as index. Defaults to None.
sort_rows : bool, optional
Whether or not to sort the rows alphabetically (sorting is more efficient than not sorting).
This only makes sense in combination with index_col. Defaults to False.
sort_columns : bool, optional
|
Whether or not to sort the columns alphabetically (sorting is more efficient than not sorting).
Defaults to False.
Returns
-------
Array
See Also
--------
Array.to_stata
Notes
-----
The round trip to Stata (Array.to_stata followed by read_stata
|
) loose the name of the "column" axis.
Examples
--------
>>> read_stata('test.dta') # doctest: +SKIP
{0}\{1} row country sex
0 0 BE F
1 1 FR M
2 2 FR F
>>> read_stata('test.dta', index_col='row') # doctest: +SKIP
row\{1} country sex
0 BE F
1 FR M
2 FR F
"""
df = pd.read_stata(filepath_or_buffer, index_col=index_col, **kwargs)
return from_frame(df, sort_rows=sort_rows, sort_columns=sort_columns)
|
BrainPad/FindYourCandy
|
robot-arm/dobot/command.py
|
Python
|
apache-2.0
| 6,883
| 0.001453
|
# Copyright 2017 BrainPad Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function, unicode_literals
import struct
import logging
from collections import OrderedDict
from dobot.errors import PacketParseError
logger = logging.getLogger(__name__)
class Command(object):
def __init__(self, id_, rw, is_queued, param_format=None, param_names=None):
"""
Args:
id_(int): dobot protocol command id
rw(int): 0 or 1. dobot protocol rw
is_queued(int): 0 or 1. dobot protocol is_queued
param_format(unicode): param binary format
param_names(list of unicode): param names
"""
self.builder = PacketBuilder(id_, rw, is_queued)
self.parser = None
self.params = None
if param_format:
self.parser = PacketParser(param_format, param_names)
def build_packet(self):
return self.builder.build(self.params)
def parse_packet(self, return_packet):
if self.parser is None:
return None
if self.parser.param_names is None:
return self.parser.parse(return_packet)
return self.parser.parse_to_dict(return_packet)
class PacketBuilder(object):
def __init__(self, id_, rw, is_queued):
self.cmd_id = id_
self.ctrl = ctrl(rw, is_queued)
self.params = None
def build(self, params):
"""
Header(2bytes), Len(1byte), ID(1byte), ctrl(1byte), Params, Checksum(1byte)
"""
payload = self._payload(params)
payload_len = len(payload)
# header, payload, checksum
pkt = struct.pack('<HB', 0xAAAA, payload_len)
pkt += payload
pkt += struct.pack('<B', checksum(payload))
return pkt
def _payload(self, params):
pl = struct.pack('<BB', self.cmd_id, self.ctrl)
if params is not None:
pl += params
return pl
class PacketParser(object):
def __init__(self, param_format, param_names=None):
self.param_format = param_format
self.param_names = param_names
def parse(self, packet):
try:
parsed = struct.unpack("<HBBB" + self.param_format + "B", packet)
except struct.error as e:
raise PacketParseError(e.message)
return parsed
def parse_to_dict(self, packet):
d = OrderedDict()
names = ['header', 'len', 'id', 'ctrl'] + self.param_names + ['checksum']
for i, name in enumerate(names):
d[name] = self.parse(packet)[i]
return d
def checksum(payload):
return 0xFF & (0x100 - (sum([ord(c) for c in payload])))
def ctrl(rw, is_queued):
"""
rw(byte0), isQueued(byte1)
"""
c = 0
c |= rw & 0x01
c |= (is_queued << 1) & 0x02
return c
####################################################
# dobot command definition #
####################################################
class GetPose(Command):
def __init__(self):
super(GetPose, self).__init__(
10, 0, 0, '8f', ['x', 'y', 'z', 'r',
|
'basement', 'rear', 'fore', 'end']
)
class GetAlarmsState(Comman
|
d):
def __init__(self):
super(GetAlarmsState, self).__init__(20, 0, 0)
class ClearAllAlarmsState(Command):
def __init__(self):
super(ClearAllAlarmsState, self).__init__(21, 1, 0)
class SetHomeCmd(Command):
def __init__(self):
super(SetHomeCmd, self).__init__(31, 1, 1)
self.params = struct.pack('I', 0)
class SetPTPJointParams(Command):
def __init__(self, v_basement, v_rear, v_fore, v_end, a_basement, a_rear, a_fore, a_end, is_queued=True):
params_fmt = None
params_name = None
if is_queued:
params_fmt = 'Q'
params_name = ['queuedCmdIndex']
super(SetPTPJointParams, self).__init__(
80, 1, 1 if is_queued else 0, params_fmt, params_name
)
self.params = struct.pack('8f', v_basement, v_rear, v_fore, v_end, a_basement, a_rear, a_fore, a_end)
class GetPTPJointParams(Command):
def __init__(self):
super(GetPTPJointParams, self).__init__(
80, 0, 0, '8f', ['v_basement', 'v_rear', 'v_fore', 'v_end', 'a_basement', 'a_rear', 'a_fore', 'a_end']
)
class SetPTPCoordinateParams(Command):
def __init__(self, v_xyz, v_r, acc_xyz, acc_r, is_queued=True):
params_fmt = None
params_name = None
if is_queued:
params_fmt = 'Q'
params_name = ['queuedCmdIndex']
super(SetPTPCoordinateParams, self).__init__(
81, 1, 1 if is_queued else 0, params_fmt, params_name
)
self.params = struct.pack('ffff', v_xyz, v_r, acc_xyz, acc_r)
class GetPTPCoordinateParams(Command):
def __init__(self):
super(GetPTPCoordinateParams, self).__init__(
81, 0, 0, 'ffff', ['v_xyz', 'v_r', 'acc_xyz', 'acc_r']
)
class SetPTPCmd(Command):
def __init__(self, ptp_mode, x, y, z, r):
super(SetPTPCmd, self).__init__(
84, 1, 1, 'Q', ['queuedCmdIndex']
)
self.params = struct.pack('<Bffff', ptp_mode, x, y, z, r)
class SetEndEffectorSuctionCup(Command):
def __init__(self, is_ctrl_enabled, is_sucked):
super(SetEndEffectorSuctionCup, self).__init__(62, 1, 1)
self.params = struct.pack('<BB', is_ctrl_enabled, is_sucked)
class SetQueuedCmdStartExec(Command):
def __init__(self):
super(SetQueuedCmdStartExec, self).__init__(240, 1, 0)
class SetQueuedCmdStopExec(Command):
def __init__(self):
super(SetQueuedCmdStopExec, self).__init__(241, 1, 0)
class SetQueuedCmdForceStopExec(Command):
def __init__(self):
super(SetQueuedCmdForceStopExec, self).__init__(242, 1, 0)
class SetQueuedCmdClear(Command):
def __init__(self):
super(SetQueuedCmdClear, self).__init__(245, 1, 0)
class GetQueuedCmdCurrentIndex(Command):
def __init__(self):
super(GetQueuedCmdCurrentIndex, self).__init__(246, 0, 0, 'Q', ['queuedCmdCurrentIndex'])
class GetQueuedCmdLeftSpace(Command):
def __init__(self):
super(GetQueuedCmdLeftSpace, self).__init__(247, 0, 0, 'L', ['leftSpace'])
|
kball/ambry
|
ambry/database/csv.py
|
Python
|
bsd-2-clause
| 6,934
| 0.010816
|
"""
Copyright (c) 2013 Clarinova. This file is licensed under the terms of the
Revised BSD License, included in this distribution as LICENSE.txt
"""
from __future__ import absolute_import
import unicodecsv
from . import DatabaseInterface
import anydbm
import os
from ..partitions import Partitions
from .inserter import InserterInterface
class ValueInserter(InserterInterface):
'''Inserts arrays of values into database table'''
def __init__(self, path, bundle, partition, table=None, header=None, delimiter = '|',
escapechar='\\', encoding='utf-8',
write_header = False, buffer_size=2*1024*1024):
self.table = table
self.header = header
self.path = path
self.buffer_size = buffer_size
self.delimiter = delimiter
self.escapechar = escapechar
self.encoding = encoding
self.write_header = write_header
self.bundle = bundle
self.partition = partition
if self.header:
pass
elif self.table:
self.header = [c.name for c in self.table.columns]
else:
self.table = None
self.header = None
self._writer = None
self._inserter = None
self._f = None
def insert(self, values):
if self._writer is None:
self._init_writer(values)
try:
self._inserter(values)
except (KeyboardInterrupt, SystemExit):
self.close()
self.delete()
raise
except Exception as e:
self.close()
self.delete()
raise
return True
def _init_writer(self, row):
from sqlalchemy.engine.result import RowProxy
# Four cases:
# Write header, or don't
# Write list, or dict
row_is_dict = isinstance(row, dict) or isinstance(row, RowProxy)
row_is_list = isinstance(row, (list, tuple))
has_header = self.header is not None
if not os.path.exists(self.path):
if not os.path.exists(os.path.dirname(self.path)):
os.makedirs(os.path.dirname(self.path))
f = open(self.path, 'wb', buffering=self.buffer_size)
self._f = f
delimiter = self.delimiter
if row_is_dict and has_header:
self._writer = unicodecsv.DictWriter(f, self.header, delimiter=delimiter,
escapechar=self.escapechar, encoding=self.encoding)
if self.write_header:
self._writer.writeheader()
self._inserter = self._write_dict
elif row_is_dict and not
|
has_header:
self.header = row.keys()
self._writer = unicodecsv.DictWriter(f, self.header, delimiter=delimiter,
escapechar=self.escapechar, encoding=self.encod
|
ing)
if self.write_header:
self._writer.writeheader()
self._inserter = self._write_dict
elif row_is_list and has_header:
self._writer = unicodecsv.writer(f, delimiter=delimiter,
escapechar=self.escapechar, encoding=self.encoding)
if self.write_header:
self._writer.writerow(self.header)
self._inserter = self._write_list
elif row_is_list and not has_header:
self._writer = unicodecsv.writer(f, delimiter=delimiter,
escapechar=self.escapechar, encoding=self.encoding)
self._inserter = self._write_list
else:
raise Exception("Unexpected case for type {}".format(type(row)))
def _write_list(self, row):
self._writer.writerow(row)
def _write_dict(self, row):
self._writer.writerow(row)
def close(self):
if self._f and not self._f.closed:
self._f.flush()
self._f.close()
def delete(self):
import os
if os.path.exists(self.path):
os.remove(self.path)
@property
def linewriter(self):
'''Like writer, but does not write a header. '''
if self._writer is None:
import csv
self.close()
if self.exists:
mode = 'a+'
else:
mode = 'w'
self.file = open(self.path, mode, buffering=self.buffer_size)
self._writer = csv.writer(self.file)
return self._writer
def __enter__(self):
self.partition.set_state(Partitions.STATE.BUILDING)
return self
def __exit__(self, type_, value, traceback):
if type_ is not None:
self.bundle.error("Got Exception: " + str(value))
self.partition.set_state(Partitions.STATE.ERROR)
return False
self.partition.set_state(Partitions.STATE.BUILT)
self.close()
from . import DatabaseInterface
class CsvDb(DatabaseInterface):
EXTENSION = '.csv'
def __init__(self, bundle, partition, base_path, **kwargs):
''''''
self.bundle = bundle
self.partition = partition
self.delimiter = '|'
@property
def path(self):
return self.partition.path+self.EXTENSION
@property
def md5(self):
from ambry.util import md5_for_file
return md5_for_file(self.path)
def exists(self):
import os
return os.path.exists(self.path)
def is_empty(self):
if not self.exists():
return True
import os
statinfo = os.stat(self.path)
if statinfo.st_size == 0:
return True
else:
return False
def create(self):
pass # Created in the inserter
def delete(self):
import os
if os.path.exists(self.path):
os.remove(self.path)
def inserter(self, header=None, skip_header = False, **kwargs):
if not skip_header and header is None and self.partition.table is not None:
header = [c.name for c in self.partition.table.columns]
return ValueInserter(self.path, self.bundle, self.partition, header=header, **kwargs)
def reader(self, encoding='utf-8', *args, **kwargs):
f = open(self.path,'rb')
return unicodecsv.reader(f, *args, delimiter=self.delimiter, encoding='utf-8', **kwargs)
def dict_reader(self, encoding='utf-8', *args, **kwargs):
f = open(self.path,'rb')
return unicodecsv.DictReader(f,*args, delimiter=self.delimiter, encoding='utf-8', **kwargs)
def close(self):
pass
|
open-synergy/opnsynid-stock-logistics-warehouse
|
stock_inventory_by_moving_product/models/__init__.py
|
Python
|
agpl-3.0
| 167
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2019 OpenSynergy
|
Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from .
|
import (
stock_inventory,
)
|
edmorley/django
|
tests/admin_changelist/tests.py
|
Python
|
bsd-3-clause
| 38,622
| 0.001139
|
import datetime
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.templatetags.admin_list import pagination
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.admin.views.main import ALL_VAR, SEARCH_VAR
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.template import Context, Template
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.urls import reverse
from django.utils import formats
from .admin import (
BandAdmin, ChildAdmin, ChordsBandAdmin, ConcertAdmin,
CustomPaginationAdmin, CustomPaginator, DynamicListDisplayChildAdmin,
DynamicListDisplayLinksChildAdmin, DynamicListFilterChildAdmin,
DynamicSearchFieldsChildAdmin, EmptyValueChildAdmin, EventAdmin,
FilteredChildAdmin, GroupAdmin, InvitationAdmin,
NoListDisplayLinksParentAdmin, ParentAdmin, QuartetAdmin, SwallowAdmin,
site as custom_site,
)
from .models import (
Band, Child, ChordsBand, ChordsMusician, Concert, CustomIdUser, Event,
Genre, Group, Invitation, Membership, Musician, OrderedObject, Parent,
Quartet, Swallow, SwallowOneToOne, UnorderedObject,
)
def build_tbody_html(pk, href, extra_fields):
return (
'<tbody><tr class="row1">'
'<td class="action-checkbox">'
'<input type="checkbox" name="_selected_action" value="{}" '
'class="action-select" /></td>'
'<th class="field-name"><a href="{}">name</a></th>'
'{}</tr></tbody>'
).format(pk, href, extra_fields)
@override_settings(ROOT_URLCONF="admin_changelist.urls")
class ChangeListTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
def _create_superuser(self, username):
return User.objects.create_superuser(username=username, email='a@b.com', password='xxx')
def _mocked_authenticated_request(self, url, user):
request = self.factory.get(url)
request.user = user
return request
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_queryset() shouldn't
overwrite a custom select_related provided by ModelAdmin.get_queryset().
"""
m = ChildAdmin(Child, custom_site)
request = self.factory.get('/child/')
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.query.select_related, {'parent': {}})
def test_select_related_as_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
request = self.factory.get('/invitation/')
cl = ia.get_changelist_instance(request)
self.assertEqual(cl.queryset.query.select_related, {'player': {}})
def test_select_related_as_empty_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
ia.list_select_related = ()
request = self.factory.get('/invitation/')
cl = ia.get_changelist_instance(request)
self.assertIs(cl.queryset.query.select_related, False)
def test_get_select_related_custom_method(self):
class GetListSelectRelatedAdmin(admin.ModelAdmin):
list_display = ('band', 'player')
def get_list_select_related(self, request):
return ('band', 'player')
ia = GetListSelectRelatedAdmin(Invitation, custom_site)
request = self.factory.get('/invitation/')
cl = ia.get_changelist_instance(request)
self.assertEqual(cl.queryset.query.select_related, {'player': {}, 'band': {}})
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = build_tbody_html(new_child.id, link, '<td class="field-parent nowrap">-</td>')
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_set_empty_value_display_on_admin_site(self):
"""
Empty value display can be set on AdminSite.
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
# Set a new empty display value on AdminSite.
admin.site.empty_value_display = '???'
m = ChildAdmin(Child, admin.site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link
|
= reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = build_tbody_html(new_child.id, link, '<td class="field-parent nowrap">???</td>')
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_set_empty_value_display_in_model_admin(self):
"""
Empty value display can be set in ModelAdmin
|
or individual fields.
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = EmptyValueChildAdmin(Child, admin.site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = build_tbody_html(
new_child.id,
link,
'<td class="field-age_display">&dagger;</td>'
'<td class="field-age">-empty-</td>'
)
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_html(self):
"""
Inclusion tag result_list generates a table when with default
ModelAdmin settings.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = build_tbody_html(new_child.id, link, '<td class="field-parent nowrap">%s</td>' % new_parent)
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
# Test with list_
|
hinance/www
|
hinance-www/weboob/modules/fakeshop/__init__.py
|
Python
|
mit
| 64
| 0
|
from
|
.module import FakeShopModule
__all__ =
|
['FakeShopModule']
|
geowurster/Acronym
|
acronym/tests/test_vector_transform.py
|
Python
|
bsd-3-clause
| 4,230
| 0.003783
|
#!/usr/bin/env python
# This document is part of Acronym
# https://github.com/geowurster/Acronym
# =================================================================================== #
#
# New BSD License
#
# Copyright (c) 2014, Kevin D. Wurster
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following co
|
nditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and
|
the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * The names of its contributors may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# =================================================================================== #
"""Unittests for acronym.vector.transform"""
import sys
import unittest
from acronym import vector
try:
from osgeo import ogr
from osgeo import osr
except ImportError:
import ogr
import osr
ogr.UseExceptions()
osr.UseExceptions()
#/* ======================================================================= */#
#/* Define TestStandalone() class
#/* ======================================================================= */#
class TestStandalone(unittest.TestCase):
"""Test standalone functions that do not require any complex
setups or teardowns."""
def test_field_structure(self):
# Inherit the default structure
expected = {'field1': {'o_name': 'field1',
'type': ogr.OFTString,
'formatter': str,
'width': 254,
'precision': None}}
actual = vector.transform.field_structure('field1')
self.assertDictEqual(expected, actual)
# Only override a few of the default options
expected = {'field2': {'o_name': 'field2',
'type': ogr.OFTReal,
'formatter': str,
'width': 10,
'precision': 3}}
override = {'type': ogr.OFTReal,
'width': 10,
'precision': 3,
'formatter': str}
actual = vector.transform.field_structure('field2', **override)
self.assertDictEqual(expected, actual)
#/* ======================================================================= */#
#/* Define TestStandalone() class
#/* ======================================================================= */#
class TestDS2DS(unittest.TestCase):
def setUp(self):
driver = ogr.GetDriverByName('Memory')
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
self.test_ds = driver.CreateDataSource('ogr_memory')
self.layer = self.test_ds.CreateLayer('ogr_layer', srs, ogr.wkbPoint)
pass
def tearDown(self):
pass
#/* ======================================================================= */#
#/* Commandline test execution
#/* ======================================================================= */#
if __name__ == '__main__':
sys.exit(unittest.main())
|
zrafa/ev
|
python+tk+opencv/ejemplo-funciona.py
|
Python
|
gpl-2.0
| 2,626
| 0.011805
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import cv2
from Tkinter import *
from PIL import Image, ImageTk
import tkFileDialog
appname = "example"
class App(object):
def __init__(self, root=None):
if not root:
root = Tk()
self.root = root
self.initUI()
def initUI(self):
self.root.title(appname)
menubar = Menu(self.root)
self.root.config(menu=menubar)
fileMenu = Menu(menubar, tearoff=0)
menubar.add_command(label="Tomar Foto", command=self.tomarFoto)
# Rafa
for i in range(3):
self.root.columnconfigure(i, weight=1)
for i in range(20):
self.root.rowconfigure(i, weight=1)
self.etiqueta = Label(self.root, text="Hola")
# fin Rafa
self.canvas = Canvas(self.root)
# self.canvas.pack(side=LEFT, fill=BOTH)
self.canvas.pack(side=BOTTOM, fill=X)
self.scrollbar_vert = Scrollbar(self.root)
self.scrollbar_vert.pack(side=RIGHT, fill=Y)
self.scrollbar_hor = Scrollbar(self.root)
self.scrollbar_hor.config(orient=HORIZONTAL)
self.scrollbar_hor.pack(side=BOTTOM, fill=X)
def onExit(self):
self.root.quit()
def tomarFoto(self):
# Bloque : Tomamos la foto desde la web cam y la grabamos en formato PGM
video_capture = cv2.VideoCapture(0)
ret, frame = video_capture.read()
cv2.imshow('Video', frame)
params = list()
params.append(cv2.cv.CV_IMWRITE_PXM_BINARY)
params.append(1)
print "hola"
frame2 = cv2.cvtColor(frame, cv2.cv.CV_BGR2GRAY) # convert to grayscale
cv2.imwrite('cara2.pgm', frame2, params)
cv2.imwrite('cara2.PGM', frame2, params)
video_capture.release()
cv2.destroyAllWindows()
# Fin de Tomamos la foto desde la web cam y la grabamos en formato PGM
filename = 'cara2.pgm'
self.img = Image.open(filename)
self.photo_image = ImageTk.PhotoImage(self.img)
self.canvas.pack_forget()
self.canvas = Canvas(self.root, width=self.img.size[0], height=self.img.size[1])
self.canvas.create_image(10, 10, anchor=NW, image=self.photo_image)
self.canvas.pack(side=LEFT, fill=BOTH)
self.canvas.config(yscrollcommand=self.scrollbar_vert.set)
self.canvas.config(xscrollcommand=self.scrollbar_hor.set)
self.canvas.config(scrollregion=self.canvas.bbox(ALL))
self.scrollbar_vert.config(command=self.canvas.yview)
self.scrollbar_hor.config(command=self.canvas.xview)
def run(self):
self.root.mainloop()
def main():
root = Tk()
root.geometry("2
|
50x150+300+300")
app = App(root)
app.run()
if __name__ == '__m
|
ain__':
main()
|
hyqneuron/pylearn2-maxsom
|
pylearn2/scripts/papers/LocateReLU/mytrain.py
|
Python
|
bsd-3-clause
| 8,524
| 0
|
#!/usr/bin/env python
"""
This script is copied from pylearn2/scripts/train.py, so that we can put this
LocateReLU folder on PYTHON_PATH automatically, so that we can import local
modules without extra work
Script implementing the logic for training pylearn2 models.
This is a "driver" that we recommend using for all but the most unusual
training experiments.
Basic usage:
.. code-block:: none
train.py yaml_file.yaml
The YAML file should contain a pylearn2 YAML description of a
`pylearn2.train.Train` object (or optionally, a list of Train objects to
run sequentially).
See `doc/yaml_tutorial` for a description of how to write the YAML syntax.
The following environment variables will be locally defined and available
for use within the YAML file:
- `PYLEARN2_TRAIN_BASE_NAME`: the name of the file within the directory
(`foo/bar.yaml` -> `bar.yaml`)
- `PYLEARN2_TRAIN_DIR`: the directory containing the YAML file
(`foo/bar.yaml` -> `foo`)
- `PYLEARN2_TRAIN_FILE_FULL_STEM`: the filepath with the file extension
stripped off.
`foo/bar.yaml` -> `foo/bar`)
- `PYLEARN2_TRAIN_FILE_STEM`: the stem of `PYLEARN2_TRAIN_BASE_NAME`
(`foo/bar.yaml` -> `bar`)
- `PYLEARN2_TRAIN_PHASE` : set to `phase0`, `phase1`, etc. during iteration
through a list of Train objects. Not defined for a single train object.
These environment variables are especially useful for setting the save
path. For example, to make sure that `foo/bar.yaml` saves to `foo/bar.pkl`,
use
.. code-block:: none
save_path: "${PYLEARN2_TRAIN_FILE_FULL_STEM}.pkl"
This way, if you copy `foo/bar.yaml` to `foo/bar2.yaml`, the output of
`foo/bar2.yaml` won't overwrite `foo/bar.pkl`, but will automatically save
to foo/bar2.pkl.
For example configuration files that are consumable by this script, see
- `pylearn2/scripts/tutorials/grbm_smd`
- `pylearn2/scripts/tutorials/dbm_demo`
- `pylearn2/scripts/papers/maxout`
Use `train.py -h` to see an auto-generated description of advanced options.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow", "David Warde-Farley"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
# Standard library imports
import argparse
import gc
import logging
import os
# Third-party imports
import numpy as np
# Local imports
from pylearn2.utils import serial
from pylearn2.utils.logger import (
CustomStreamHandler, CustomFormatter, restore_defaults
)
class FeatureDump(object):
"""
.. todo::
WRITEME
Parameters
----------
encoder : WRITEME
dataset : WRITEME
path : WRITEME
batch_size : WRITEME
topo : WRITEME
"""
def __init__(self, encoder, dataset, path, batch_size=None, topo=False):
"""
.. todo::
WRITEME
"""
self.encoder = encoder
self.dataset = dataset
self.path = path
self.batch_size = batch_size
self.topo = topo
def main_loop(self, **kwargs):
"""
.. todo::
WRITEME
Parameters
----------
**kwargs : dict, optional
WRITEME
"""
if self.batch_size is None:
if self.topo:
data = self.dataset.get_topological_view()
else:
data = self.dataset.get_design_matrix()
output = self.encoder.perform(data)
else:
myiterator = self.dataset.iterator(mode='sequential',
batch_size=self.batch_size,
topo=self.topo)
chunks = []
for data in myiterator:
chunks.append(self.encoder.perform(data))
output = np.concatenate(chunks)
np.save(self.path, output)
def make_argument_parser():
"""
Creates an ArgumentParser to read the options for this script from
sys.argv
"""
parser = argparse.ArgumentParser(
description="Launch an experiment from a YAML configuration file.",
epilog='\n'.join(__doc__.strip().split('\n')[1:]).strip(),
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('--level-name', '-L',
action='
|
store_true',
help='Display the log level (e.g. DEBUG, INFO) '
'for each logged message')
parser.add_argument('--timestamp', '-T',
action='store_true',
help='Display human-readable timestamps for '
'each logged message')
parser.add_argument('--time-budget', '-t', type=int,
help='Time
|
budget in seconds. Stop training at '
'the end of an epoch if more than this '
'number of seconds has elapsed.')
parser.add_argument('--verbose-logging', '-V',
action='store_true',
help='Display timestamp, log level and source '
'logger for every logged message '
'(implies -T).')
parser.add_argument('--debug', '-D',
action='store_true',
help='Display any DEBUG-level log messages, '
'suppressed by default.')
parser.add_argument('config', action='store',
choices=None,
help='A YAML configuration file specifying the '
'training procedure')
return parser
def train(config, level_name=None, timestamp=None, time_budget=None,
verbose_logging=None, debug=None):
"""
Trains a given YAML file.
Parameters
----------
config : str
A YAML configuration file specifying the
training procedure.
level_name : bool, optional
Display the log level (e.g. DEBUG, INFO)
for each logged message.
timestamp : bool, optional
Display human-readable timestamps for
each logged message.
time_budget : int, optional
Time budget in seconds. Stop training at
the end of an epoch if more than this
number of seconds has elapsed.
verbose_logging : bool, optional
Display timestamp, log level and source
logger for every logged message
(implies timestamp and level_name are True).
debug : bool, optional
Display any DEBUG-level log messages,
False by default.
"""
train_obj = serial.load_train_file(config)
try:
iter(train_obj)
iterable = True
except TypeError:
iterable = False
# Undo our custom logging setup.
restore_defaults()
# Set up the root logger with a custom handler that logs stdout for INFO
# and DEBUG and stderr for WARNING, ERROR, CRITICAL.
root_logger = logging.getLogger()
if verbose_logging:
formatter = logging.Formatter(fmt="%(asctime)s %(name)s %(levelname)s "
"%(message)s")
handler = CustomStreamHandler(formatter=formatter)
else:
if timestamp:
prefix = '%(asctime)s '
else:
prefix = ''
formatter = CustomFormatter(prefix=prefix, only_from='pylearn2')
handler = CustomStreamHandler(formatter=formatter)
root_logger.addHandler(handler)
# Set the root logger level.
if debug:
root_logger.setLevel(logging.DEBUG)
else:
root_logger.setLevel(logging.INFO)
if iterable:
for number, subobj in enumerate(iter(train_obj)):
# Publish a variable indicating the training phase.
phase_variable = 'PYLEARN2_TRAIN_PHASE'
phase_value = 'phase%d' % (number + 1)
os.environ[phase_variable] = phase_value
# Execute this training phase.
subobj.main_loop(time_budget=time_budget)
# Clean up, in case there's a lot of memory used that's
# necessary for the next phase.
del subobj
gc.collect()
else:
|
badp/ganeti
|
lib/client/gnt_node.py
|
Python
|
gpl-2.0
| 38,700
| 0.008191
|
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Node related commands"""
# pylint: disable=W0401,W0613,W0614,C0103
# W0401: Wildcard import ganeti.cli
# W0613: Unused argument, since all functions follow the same API
# W0614: Unused import %s from wildcard import (since we need cli)
# C0103: Invalid name gnt-node
import itertools
import errno
from ganeti.cli import *
from ganeti import cli
from ganeti import bootstrap
from ganeti import opcodes
from ganeti import utils
from ganeti import constants
from ganeti import errors
from ganeti import netutils
from ganeti import pathutils
from ganeti import ssh
from ganeti import compat
from ganeti import confd
from ganeti.confd import client as confd_client
#: default list of field for L{ListNodes}
_LIST_DEF_FIELDS = [
"name", "dtotal", "dfree",
"mtotal", "mnode", "mfree",
"pinst_cnt", "sinst_cnt",
]
#: Default field list for L{ListVolumes}
_LIST_VOL_DEF_FIELDS = ["node", "phys", "vg", "name", "size", "instance"]
#: default list of field for L{ListStorage}
_LIST_STOR_DEF_FIELDS = [
constants.SF_NODE,
constants.SF_TYPE,
constants.SF_NAME,
constants.SF_SIZE,
constants.SF_USED,
constants.SF_FREE,
constants.SF_ALLOCATABLE,
]
#: default list of power commands
_LIST_POWER_COMMANDS = ["on", "off", "cycle", "status"]
#: headers (and full field list) for L{ListStorage}
_LIST_STOR_HEADERS = {
constants.SF_NODE: "Node",
constants.SF_TYPE: "Type",
constants.SF_NAME: "Name",
constants.SF_SIZE: "Size",
constants.SF_USED: "Used",
constants.SF_FREE: "Free",
constants.SF_ALLOCATABLE: "Allocatable",
}
#: User-facing storage unit types
_USER_STORAGE_TYPE = {
constants.ST_FILE: "file",
constants.ST_LVM_PV: "lvm-pv",
constants.ST_LVM_VG: "lvm-vg",
constants.ST_SHARED_FILE: "sharedfile",
}
_STORAGE_TYPE_OPT = \
cli_option("-t", "--storage-type",
dest="user_storage_type",
choices=_USER_STORAGE_TYPE.keys(),
default=None,
metavar="STORAGE_TYPE",
help=("Storage type (%s)" %
utils.CommaJoin(_USER_STORAGE_TYPE.keys())))
_REPAIRABLE_STORAGE_TYPES = \
[st for st, so in constants.VALID_STORAGE_OPERATIONS.iteritems()
if constants.SO_FIX_CONSISTENCY in so]
_MODIFIABLE_STORAGE_TYPES = constants.MODIFIABLE_STORAGE_FIELDS.keys()
_OOB_COMMAND_ASK = compat.UniqueFrozenset([
constants.OOB_POWER_OFF,
constants.OOB_POWER_CYCLE,
])
_ENV_OVERRIDE = compat.UniqueFrozenset(["list"])
NONODE_SETUP_OPT = cli_option("--no-node-setup", default=True,
action="store_false", dest="node_setup",
help=("Do not make initial SSH setup on remote"
" node (needs to be done manually)"))
IGNORE_STATUS_OPT = cli_option("--ignore-status", default=False,
action="store_true", dest="ignore_status",
help=("Ignore the Node(s) offline status"
" (potentially DANGEROUS)"))
def ConvertStorageType(user_storage_type):
"""Converts a user storage type to its internal name.
"""
try:
return _USER_STORAGE_TYPE[user_storage_type]
except KeyError:
raise errors.OpPrereqError("Unknown storage type: %s" % user_storage_type,
errors.ECODE_INVAL)
def _TryReadFile(path):
"""Tries to read a file.
If the file is not found, C{None} is returned.
@type path: string
@param path: Filename
@rtype: None or string
@todo: Consider adding a generic ENOENT wrapper
"""
try:
return utils.ReadFile(path)
except EnvironmentError, err:
if err.errno == errno.ENOENT:
return None
else:
raise
def _ReadSshKeys(keyfiles, _tostderr_fn=ToStderr):
"""Reads SSH keys according to C{keyfiles}.
@type keyfiles: dict
@param keyfiles: Dictionary with keys of L{constants.SSHK_ALL} and two-values
tuples (private and public key file)
@rtype: list
@return: List of three-values tuples (L{constants.SSHK_ALL}, private and
public key as strings)
"""
result = []
for (kind, (private_file, public_file)) in keyfiles.items():
private_key = _TryReadFile(private_file)
public_key = _TryReadFile(public_file)
if public_key and private_key:
result.append((kind, private_key, public_key))
elif public_key or private_key:
_tostderr_fn("Couldn't find a complete set of keys for kind '%s'; files"
" '%s' and '%s'", kind, private_file, public_file)
return result
def _SetupSSH(options, cluster_name, node, ssh_port):
"""Configures a destination node's SSH daemon.
@param options: Command line options
@type cluster_name
@param cluster_name: Cluster name
@type node: string
@param node: Destination node name
@type ssh_port: int
@param ssh_port: Destination node ssh port
"""
if options.force_join:
ToStderr("The \"--force-join\" option is no longer supported and will be"
" ignored.")
host_keys = _ReadSshKeys(constants.SSH_DAEMON_KEYFILE
|
S)
(_, root_keyfiles) = \
ssh.GetAllUserFiles(constants.SSH_LOGIN_USER, mkdir=False, dircheck=False)
root_keys = _ReadSshKeys(root_keyfiles)
(_, cert_pem) = \
utils.ExtractX509Certificate(utils.ReadFile(pathutils.NODED_CERT_FILE))
data = {
constants.SS
|
HS_CLUSTER_NAME: cluster_name,
constants.SSHS_NODE_DAEMON_CERTIFICATE: cert_pem,
constants.SSHS_SSH_HOST_KEY: host_keys,
constants.SSHS_SSH_ROOT_KEY: root_keys,
}
bootstrap.RunNodeSetupCmd(cluster_name, node, pathutils.PREPARE_NODE_JOIN,
options.debug, options.verbose, False,
options.ssh_key_check, options.ssh_key_check,
ssh_port, data)
@UsesRPC
def AddNode(opts, args):
"""Add a node to the cluster.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the new node name
@rtype: int
@return: the desired exit code
"""
cl = GetClient()
query_cl = GetClient(query=True)
node = netutils.GetHostname(name=args[0]).name
readd = opts.readd
# Retrieve relevant parameters of the node group.
ssh_port = None
try:
# Passing [] to QueryGroups means query the default group:
node_groups = [opts.nodegroup] if opts.nodegroup is not None else []
output = query_cl.QueryGroups(names=node_groups, fields=["ndp/ssh_port"],
use_locking=False)
(ssh_port, ) = output[0]
except (errors.OpPrereqError, errors.OpExecError):
pass
try:
output = query_cl.QueryNodes(names=[node],
fields=["name", "sip", "master",
"ndp/ssh_port"],
use_locking=False)
node_exists, sip, is_master, ssh_port = output[0]
except (errors.OpPrereqError, errors.OpExecError):
node_exists = ""
sip = None
if readd:
if not node_exists:
ToStderr("Node %s not in the cluster"
" - please retry without '--readd'", node)
return 1
if is_master:
ToStderr("Node %s is the master, cannot readd", node)
return 1
else:
if node_exists:
ToStderr("Node %s already in the cluster (as %s)"
" - please retry with '--readd'", node, node_exists)
return 1
sip = opt
|
WillisXChen/django-oscar
|
oscar/lib/python2.7/site-packages/phonenumbers/data/region_BS.py
|
Python
|
bsd-3-clause
| 1,927
| 0.007265
|
"""Auto-generated file, do not edit by hand. BS metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_BS = PhoneMetadata(id='BS', country_code=1, international_prefix='011',
general_desc=PhoneNumberDesc(national_number_pattern='[2589]\\d{9}', possible_number_pattern='\\d{7}(?:\\d{3})?'),
fixed_line=PhoneNumber
|
Desc(national_number_pattern='242(?:3(?:02|[236][1-9]|4[0-24-9]|5[0-68]|7[3467]|8[0-4]|9[2-467])|461|502|6(?:0[12]|12|7[67]|8[78]|9[89])|702)\\d{4}', possible_number_pattern='\\d{7}(?:\\d{3})?', example_number='2423456789'),
mobile=PhoneNumberDesc(national_number_pattern='242(?:3(?:5[79]|[79]5)|4(?:[2-4][1-9]|5[1-8]|6[2-8]|7\\d|81)|5(?:2[45]|3[35]|44
|
|5[1-9]|65|77)|6[34]6|727)\\d{4}', possible_number_pattern='\\d{10}', example_number='2423591234'),
toll_free=PhoneNumberDesc(national_number_pattern='242300\\d{4}|8(?:00|44|55|66|77|88)[2-9]\\d{6}', possible_number_pattern='\\d{10}', example_number='8002123456'),
premium_rate=PhoneNumberDesc(national_number_pattern='900[2-9]\\d{6}', possible_number_pattern='\\d{10}', example_number='9002123456'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='5(?:00|33|44|66|77)[2-9]\\d{6}', possible_number_pattern='\\d{10}', example_number='5002345678'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='1',
national_prefix_for_parsing='1',
leading_digits='242')
|
LordCorellon/plugin.video.9anime
|
resources/lib/ui/control.py
|
Python
|
gpl-3.0
| 3,032
| 0.014512
|
import re
import sys
import xbmc
import xbmcaddon
import xbmcplugin
import xbmcgui
import http
try:
import StorageServer
except:
import storageserverdummy as StorageServer
HANDLE=int(sys.argv[1])
ADDON_NAME = re.findall('plugin:\/\/([\w\d\.]+)\/', sys.argv[0])[0]
__settings__ = xbmcaddon.Addon(ADDON_NAME)
__language__ = __settings__.getLocalizedString
CACHE = StorageServer.StorageServer("%s.animeinfo" % ADDON_NAME, 24)
def setContent(contentType):
xbmcplugin.setContent(HANDLE, contentType)
def settingsMenu():
return xbmcaddon.Addon().openSettings()
def getSetting(key):
return __settings__.getSetting(key)
def cache(funct, *args):
return CACHE.cacheFunction(funct, *args)
def lang(x):
return __language__(x).encode('utf-8')
def addon_url(url=''):
return "plugin://%s/%s" % (ADDON_NAME, url)
def get_plugin_ur
|
l():
addon_base = addon_url()
assert sys.argv[0].startswith(addon_base), "something bad happened in here"
return sys.argv[0][len(addon_base):]
def keyboard(text):
keyboard = xbmc.Keyboard("", text, False)
keyboard.doModal()
if keyboard.isConfirmed():
return keyboard.getText()
return None
def xbmc_add_player_item(name, url, iconimage=''):
ok=True
u=addon_url(url)
|
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo('video', infoLabels={ "Title": name })
liz.setProperty("fanart_image", __settings__.getAddonInfo('path') + "/fanart.jpg")
liz.setProperty("Video", "true")
liz.setProperty("IsPlayable", "true")
liz.addContextMenuItems([], replaceItems=False)
ok=xbmcplugin.addDirectoryItem(handle=HANDLE,url=u,listitem=liz, isFolder=False)
return ok
def xbmc_add_dir(name, url, iconimage=''):
ok=True
u=addon_url(url)
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo('video', infoLabels={ "Title": name })
liz.setProperty("fanart_image", iconimage)
ok=xbmcplugin.addDirectoryItem(handle=HANDLE,url=u,listitem=liz,isFolder=True)
return ok
def play_source(link):
if callable(link):
link = link()
if link:
linkInfo = http.head_request(link);
if linkInfo.status_code != 200:
raise Exception('could not resolve %s. status_code=%d' %
(link, linkInfo.status_code))
item = xbmcgui.ListItem(path=linkInfo.url)
if 'Content-Type' in linkInfo.headers:
item.setProperty('mimetype', linkInfo.headers['Content-Type'])
xbmcplugin.setResolvedUrl(HANDLE, True, item)
else:
xbmcplugin.setResolvedUrl(HANDLE, False, xbmcgui.ListItem())
def draw_items(video_data):
for vid in video_data:
if vid['is_dir']:
xbmc_add_dir(vid['name'], vid['url'], vid['image'])
else:
xbmc_add_player_item(vid['name'], vid['url'], vid['image'])
xbmcplugin.endOfDirectory(HANDLE, succeeded=True, updateListing=False, cacheToDisc=True)
return True
|
Fewbytes/cosmo-manager-rest-client
|
cosmo_manager_rest_client/swagger/DeploymentsApi.py
|
Python
|
apache-2.0
| 6,880
| 0
|
########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import requests
import json
class DeploymentsApi(object):
def __init__(self, api_client):
self.api_client = api_client
def list(self):
"""Returns a list existing deployments.
Args:
Returns: list[Deployment]
"""
resource_path = '/deployments'
url = self.api_client.resource_url(resource_path)
response = requests.get(url)
self.api_client.raise_if_not(200, response, url)
return self.api_client.deserialize(response.json(),
'list[Deployment]')
def listWorkflows(self, deployment_id):
"""Returns a list of the deployments workflows.
Args:
deployment_id : str
Returns: Workflows
"""
resource_path = '/deployments/{0}/workflows'.format(deployment_id)
url = self.api_client.resource_url(resource_path)
response = requests.get(url)
self.api_client.raise_if_not(200, response, url)
return self.api_client.deserialize(response.json(),
'Workflows')
def createDeployment(self, body, deployment_id):
"""Creates a new deployment
Args:
body, DeploymentRequest: Deployment blue print (required)
Returns: Deployment
"""
resource_path = '/deployments/{0}'.format(deployment_id)
url = self.api_client.resource_url(resource_path)
response = requests.put(url,
headers={'Content-type': 'application/json'},
data=json.dumps(body))
self.api_client.raise_if_not(201, response, url)
return self.api_client.deserialize(response.json(),
'Deployment')
def getById(self, deployment_id):
"""
Args:
deployment_id, : (optional)
Returns: BlueprintState
"""
resource_path = '/deployments/{0}'.format(deployment_id)
url = self.api_client.resource_url(resource_path)
response = requests.get(url)
self.api_client.raise_if_not(200, response, url)
return self.api_client.deserialize(response.json(),
'BlueprintState')
def execute(self, deployment_id, body):
"""Execute a workflow
Args:
deployment_id, : (required)
body, : Workflow execution request (required)
Returns: Execution
"""
resource_path = '/deployments/{0}/executions'.format(deployment_id)
url = self.api_client.resource_url(resource_path)
response = requests.post(url,
headers={'Content-type': 'application/json'},
data=json.dumps(body))
self.api_client.raise_if_not(201, response, url)
return self.api_client.deserialize(response.json(),
'Execution')
def listExecutions(self, deployment_id):
"""Returns deployment executions
Args:
deployment_id, : (required)
Returns: Execution
"""
resource_path = '/deployments/{0}/executions'.format(deployment_id)
url = self.api_client.resource_url(resource_path)
response = requests.get(url)
self.api_client.raise_if_not(200, response, url)
return self.api_clie
|
nt.deserialize(response.json(),
'list[Execution]')
def eventsHeaders(self, id, responseHeadersBuffers):
"""Get headers for events associated with t
|
he deployment
Args:
id, str: ID of deployment that needs to be fetched (required)
responseHeadersBuffers, dict: a buffer for the response headers
Returns:
"""
resource_path = '/deployments/{0}/events'.format(id)
url = self.api_client.resource_url(resource_path)
response = requests.head(url)
self.api_client.raise_if_not(200, response, url)
responseHeadersBuffers.update(response.headers)
def readEvents(self, id, responseHeadersBuffers=None, from_param=0,
count_param=500):
"""Returns deployments events.
Args:
id, str: ID of deployment that needs to be fetched (required)
from_param, int: Index of the first request event. (optional)
count_param, int: Maximum number of events to read. (optional)
responseHeadersBuffers, dict: a buffer for the response
headers (optional)
Returns: DeploymentEvents
"""
resource_path = '/deployments/{0}/events'.format(id)
url = self.api_client.resource_url(resource_path)
query_params = {
'from': str(from_param),
'count': str(count_param)
}
response = requests.get(url,
params=query_params)
self.api_client.raise_if_not(200, response, url)
if responseHeadersBuffers is not None:
responseHeadersBuffers.update(response.headers)
response_json = response.json()
events_json_str = map(lambda x: json.dumps(x),
response_json['events'])
response_json['events'] = events_json_str
return self.api_client.deserialize(response_json,
'DeploymentEvents')
def listNodes(self, deployment_id, get_reachable_state=False):
"""Returns a list of the deployments workflows.
Args:
deployment_id : str
get_reachable_state: bool (default: False)
Returns: DeploymentNodes
"""
resource_path = '/deployments/{0}/nodes'.format(deployment_id)
url = self.api_client.resource_url(resource_path)
query_params = {
'reachable': str(get_reachable_state).lower()
}
response = requests.get(url,
params=query_params)
self.api_client.raise_if_not(200, response, url)
return self.api_client.deserialize(response.json(),
'DeploymentNodes')
|
armstrong/armstrong.esi
|
armstrong/esi/utils.py
|
Python
|
bsd-3-clause
| 5,702
| 0.00228
|
from cStringIO import StringIO
from email.utils import parsedate
import gzip
import logging
import re
import time
from urlparse import urljoin
from django.conf import settings
from django.http import HttpResponse
from django.middleware.gzip import GZipMiddleware
from django.utils.cache import cc_delim_re
from django.utils.datastructures import MultiValueDict
from django.utils.http import http_date
from . import http_client
try:
from logging import NullHandler
except ImportError:
# Compatibility mode for 2.6.x
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('armstrong.esi')
log.addHandler(NullHandler())
esi_tag_re = re.compile(r'<esi:include src="(?P<url>[^"]+?)"\s*/>
|
', re.I)
def reduce_vary_headers(response, additional):
'''Merges the Vary header values so all headers are included.'''
original = response.get('Vary', None)
if original is not None:
additional.append(original)
# Keep track of
|
normalized, lowercase header names in seen_headers while
# maintaining the order and case of the header names in final_headers.
seen_headers = set()
final_headers = []
for vary_value in additional:
headers = cc_delim_re.split(vary_value)
for header in headers:
if header.lower() in seen_headers:
continue
seen_headers.add(header.lower())
final_headers.append(header)
response['Vary'] = ', '.join(final_headers)
def reduce_last_modified_headers(response, additional):
'''Sets Last-Modified to the latest of all of the header values.'''
dates = additional
if 'Last-Modified' in response:
dates.append(response['Last-Modified'])
dates = [time.mktime(parsedate(date_str)) for date_str in dates]
latest = max(dates)
response['Last-Modified'] = http_date(latest)
HEADERS_TO_MERGE = {
'Vary': reduce_vary_headers,
'Last-Modified': reduce_last_modified_headers,
}
def merge_fragment_headers(response, fragment_headers):
'''
Given fragment_headers, a MultiValueDict or other mapping of header names to
values, add the header values to the response as appropriate.
'''
for header, reduce_func in HEADERS_TO_MERGE.items():
if not header in fragment_headers:
continue
if hasattr(fragment_headers, 'getlist'):
values = fragment_headers.getlist(header)
else:
value = fragment_headers.get(header)
values = [value] if value is not None else []
reduce_func(response, values)
def merge_fragment_cookies(response, fragment_cookies):
'''
Merges the fragment and response cookies.
Set the fragment cookies in the order they occurred, then set the main
response cookie last.
'''
if not fragment_cookies:
return
cookies = fragment_cookies[0]
cookies_to_reduce = fragment_cookies[1:]
cookies_to_reduce.append(response.cookies)
for cookie_obj in cookies_to_reduce:
for key, morsel in cookie_obj.items():
# To set Morsels as cookie values directly, we need to bypass
# BaseCookie.__setitem__.
dict.__setitem__(cookies, key, morsel)
response.cookies = cookies
def gunzip_response_content(response):
'''
If the response has already been compressed, gunzip it so we can modify
the text.
'''
f = gzip.GzipFile(fileobj=StringIO(response.content))
response.content = f.read()
f.close()
del response['Content-Encoding']
def gzip_response_content(request, response):
GZipMiddleware().process_response(request, response)
def build_full_fragment_url(request, url):
if url.startswith('/'):
return url
else:
return urljoin(request.path, url)
# TODO: Test this independently of the middleware
# TODO: Reduce the lines of codes and varying functionality of this code so its
# tests can be reduced in complexity.
def replace_esi_tags(request, response):
process_errors = getattr(settings, 'ESI_PROCESS_ERRORS', False)
fragment_headers = MultiValueDict()
fragment_cookies = []
request_data = {
'cookies': request.COOKIES,
'HTTP_REFERER': request.build_absolute_uri(),
'HTTP_X_ESI_FRAGMENT': True,
}
replacement_offset = 0
for match in esi_tag_re.finditer(response.content):
url = build_full_fragment_url(request, match.group('url'))
if response.status_code == 200 or process_errors:
client = http_client.Client(**request_data)
fragment = client.get(url)
else:
fragment = HttpResponse()
if fragment.status_code != 200:
# Remove the error content so it isn't added to the page.
fragment.content = ''
extra = {'data': {
'fragment': fragment.__dict__,
'request': request.__dict__,
}}
log.error('ESI fragment %s returned status code %s' %
(url, fragment.status_code), extra=extra)
start = match.start() + replacement_offset
end = match.end() + replacement_offset
response.content = '%s%s%s' % (response.content[:start],
fragment.content, response.content[end:])
replacement_offset += len(fragment.content) - len(match.group(0))
for header in HEADERS_TO_MERGE:
if header in fragment:
fragment_headers.appendlist(header, fragment[header])
if fragment.cookies:
fragment_cookies.append(fragment.cookies)
merge_fragment_headers(response, fragment_headers)
merge_fragment_cookies(response, fragment_cookies)
|
BunsenLabs/fluxbb-activity
|
fluxbbactivity/__init__.py
|
Python
|
gpl-3.0
| 6,198
| 0.01081
|
#!/usr/bin/env python3
from argparse import ArgumentParser
from bottle import abort, route, run, static_file
import MySQLdb
import calendar
import datetime
import dateutil.parser
import json
import logging
import os
import pathlib
import sqlite3
import sys
import threading
import time
APIVER = 0
BACKEND_URL = 'https://forums.bunsenlabs.org'
PUBLIC = {}
SQLDIR = None
WWWDIR = None
def subdirpath(subdir):
return "{}/{}".format(os.path.dirname(os.path.abspath(__file__)), subdir)
SQLDIR = subdirpath("sql")
WWWDIR = subdirpath("www")
def parse_cmdline():
ap = ArgumentParser()
ap.add_argument("--address", default="127.0.0.1")
ap.add_argument("--sql-db", required=True)
ap.add_argument("--sql-password")
ap.add_argument("--sql-socket", default="/var/run/mysqld/mysqld.sock")
ap.add_argument("--sql-user", required=True)
ap.add_argument("--port", type=int, default=10000)
ap.add_argument("--timeout", type=int, default=900)
ap.add_argument("--journal", required=True)
ap.add_argument("--backend-url", default="https://forums.bunsenlabs.org")
args = ap.parse_args()
if not args.sql_password:
try:
args.sql_password = os.environ["FXBA_SQL_PASSWORD"]
except:
raise BaseException("No SQL password supplied via command line or environment")
return args
class Journal:
def __init__(self, path):
self.path = path
def __enter__(self):
self.conn = sqlite3.connect(self.path)
cur = self.conn.cursor()
cur.execute(""" select name from sqlite_master where type='table' and name='journal'; """)
if not cur.fetchone():
cur.execute(""" create table journal (date TEXT, apiversion INTEGER, query TEXT, value TEXT ); """)
self.conn.commit()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.conn.close()
def commit(self, query, value):
cur = self.conn.cursor()
cur.execute(""" INSERT INTO journal VALUES ( ?, ?, ?, ? ) """,
(str(datetime.datetime.utcnow().isoformat()), APIVER, query, value,))
self.conn.commit()
def history(self, key):
cur = self.conn.cursor()
cur.execute(""" SELECT date,value
FROM journal
WHERE query = ? AND apiversion = ?""",
(key, APIVER,));
rows = list(map(lambda v: [ int(dateutil.parser.parse(v[0]).timestamp()), json.loads(v[1]) ], cur.fetchall()))
self.conn.commit()
return rows
def __history_views(self):
pass
class Fetcher(threading.Thread):
def __init__(self, cconf, queries, timeout, journalpath):
super
|
().__init__()
self.cconf = cconf
self.queries = queries
self.timeout = timeout
self.journal = journalpath
self.public = {}
def run(self):
self.event = threading.Event()
self.update()
while not self.event.wait(timeout=self.timeout):
self.update()
def update(self):
logging.info("Running Fetcher.update()")
|
global PUBLIC
PUBLIC = self.query()
logging.info("Fetcher.update() finished.")
def query(self):
t = { "history": dict() }
with MySQLdb.connect(**self.cconf) as cur:
with Journal(self.journal) as jur:
for cat in self.queries:
t[cat] = {}
t["history"][cat] = {}
for key in self.queries[cat]:
query_key = "{}/{}".format(cat, key)
logging.debug("Executing query {}".format(query_key))
cur.execute(self.queries[cat][key])
t[cat][key] = [ self.convtuple(tup) for tup in cur.fetchall() ]
jur.commit(query_key, json.dumps(t[cat][key]))
if query_key == "counts/all":
t["history"][cat][key] = jur.history(query_key)
t["ts"] = { "last_update": calendar.timegm(time.gmtime(time.time())),
"update_interval": self.timeout }
return t
def convtuple(self, tup):
return list(tup[:-1]) + [ float(tup[-1]) ]
def find_queries(query_dir):
t = {}
for root, dirs, files in os.walk(query_dir):
for f in files:
if f.endswith(".sql"):
cat, key = root.split("/")[-1], f[:-4]
with open("{}/{}".format(root, f), "r") as FILE:
data = FILE.read()
if not cat in t:
t[cat] = {}
t[cat][key] = data
return t
def install_routes():
@route("/api/{}/<cat>/<key>".format(APIVER))
def dataroute(cat, key):
if (cat in PUBLIC) and (key in PUBLIC[cat]):
return { "v":PUBLIC[cat][key] }
else:
return dict()
@route("/api/{}/last-update".format(APIVER))
def callback():
if "ts" in PUBLIC:
return { "v":PUBLIC["ts"] }
else:
return { "v": 0 }
@route("/api/{}/history/<cat>/<key>".format(APIVER))
def callback(cat, key):
print(cat,key)
try:
return { "v": PUBLIC["history"][cat][key] }
except BaseException as err:
return { "v": dict(), "error": err }
@route('/<path:path>')
def callback(path):
return static_file(path, root=WWWDIR)
@route("/api/{}/backend_url".format(APIVER))
def callback():
return { "v": { "backend_url": BACKEND_URL } }
def main():
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s : %(name)s : %(levelname)s : %(message)s")
args = parse_cmdline()
BACKEND_URL = args.backend_url
queries = find_queries(SQLDIR)
cconf = { "db" : args.sql_db, "unix_socket" : args.sql_socket, "user" : args.sql_user, "passwd" : args.sql_password }
fetcher = Fetcher(cconf, queries, args.timeout, args.journal)
fetcher.start()
install_routes()
try:
run(host = args.address, port = args.port, server = "cherrypy")
except:
run(host = args.address, port = args.port)
return 0
|
PythonCharmers/orange3
|
Orange/tests/test_score_feature.py
|
Python
|
gpl-3.0
| 4,389
| 0.000456
|
import unittest
import numpy as np
from Orange.data import Table, Domain, DiscreteVariable
from Orange.preprocess import score
from Orange import preprocess
class FeatureScoringTest(unittest.TestCase):
def setUp(self):
self.zoo = Table("zoo") # disc. features, disc. class
self.housing = Table("housing") # cont. features, cont. class
self.monk = Table("monks-1")
self.adult = Table("adult_sample")
def test_info_gain(self):
scorer = score.InfoGain()
correct = [0.79067, 0.71795, 0.83014, 0.97432, 0.46970]
np.testing.assert_almost_equal([scorer(self.zoo, a) for a in range(5)],
correct, decimal=5)
def test_gain_ratio(self):
scorer = score.GainRatio()
correct = [0.80351, 1.00000, 0.84754, 1.00000, 0.59376]
np.testing.assert_almost_equal([scorer(self.zoo, a) for a in range(5)],
correct, decimal=5)
def test_gini(self):
scorer = score.Gini()
correct = [0.11893, 0.10427, 0.13117, 0.14650, 0.05973]
np.testing.assert_almost_equal([scorer(self.zoo, a) for a in range(5)],
correct, decimal=5)
def test_classless(self):
classless = Table(Domain(self.zoo.domain.attributes),
self.zoo[:, 0:-1])
scorers = [score.Gini(), score.InfoGain(), score.GainRatio()]
for scorer in scorers:
with self.assertRaises(ValueError):
scorer(classless, 0)
def test_wrong_class_type(self):
scorers = [score.Gini(), score.InfoGain(), score.GainRatio()]
for scorer in scorers:
with self.assertRaises(ValueError):
scorer(self.housing, 0)
with self.assertRaises(ValueError):
score.Chi2(self.housing, 0)
with self.assertRaises(ValueError):
score.ANOVA(self.housing, 2)
score.UnivariateLinearRegression(self.housing, 2)
def test_chi2(self):
nrows, ncols = 500, 5
X = np.random.randint(4, size=(nrows, ncols))
y = 10 + (-3*X[:, 1] + X[:, 3]) // 2
domain = Domain.from_numpy(X, y)
domain = Domain(domain.attributes,
DiscreteVariable('c', values=np.unique(y)))
table = Table(domain, X, y)
data = preprocess.Discretize()(table)
scorer = score.Chi2()
sc = [scorer(data, a) for a in range(ncols)]
self.assertTrue(np.argmax(sc) == 1)
def test_anova(self):
nrows, ncols = 500, 5
X = np.random.rand(nrows, ncols)
|
y = 4 + (-3*X[:, 1] + X[:, 3]) // 2
domain = Domain.from_numpy(X, y)
domain = Domain(domain.attributes,
DiscreteVariable('c', values=np.unique(y)))
|
data = Table(domain, X, y)
scorer = score.ANOVA()
sc = [scorer(data, a) for a in range(ncols)]
self.assertTrue(np.argmax(sc) == 1)
def test_regression(self):
nrows, ncols = 500, 5
X = np.random.rand(nrows, ncols)
y = (-3*X[:, 1] + X[:, 3]) / 2
data = Table(X, y)
scorer = score.UnivariateLinearRegression()
sc = [scorer(data, a) for a in range(ncols)]
self.assertTrue(np.argmax(sc) == 1)
def test_relieff(self):
old_monk = self.monk.copy()
weights = score.ReliefF()(self.monk, None)
found = [self.monk.domain[attr].name for attr in reversed(weights.argsort()[-3:])]
reference = ['a', 'b', 'e']
self.assertEqual(sorted(found), reference)
# Original data is unchanged
np.testing.assert_equal(old_monk.X, self.monk.X)
np.testing.assert_equal(old_monk.Y, self.monk.Y)
# Ensure it doesn't crash on adult dataset
weights = score.ReliefF()(self.adult, None)
found = sorted([self.adult.domain[attr].name for attr in weights.argsort()[-2:]])
reference = ['marital-status', 'relationship']
self.assertEqual(found, reference)
def test_rrelieff(self):
scorer = score.RReliefF()
score.RReliefF.__init__(scorer, n_iterations=100, k_nearest=70)
weights = scorer(self.housing, None)
best_five = [self.housing.domain[attr].name
for attr in reversed(weights.argsort()[-5:])]
self.assertTrue('AGE' in best_five)
|
NathanLawrence/lunchbox
|
app_config.py
|
Python
|
mit
| 2,936
| 0.003065
|
#!/usr/bin/env python
"""
Project-wide application configuration.
DO NOT STORE SECRETS, PASSWORDS, ETC. IN THIS FILE.
They will be exposed to users. Use environment variables instead.
See get_secrets() below for a fast way to access them.
"""
import os
"""
NAMES
"""
# Project name to be used in urls
# Use dashes, not underscores!
PROJECT_SLUG = 'lunchbox'
# Project name to be used in file paths
PROJECT_FILENAME = 'lunchbox'
# The name of the repository containing the source
REPOSITORY_NAME = 'lunchbox'
GITHUB_USERNAME = 'nprapps'
REPOSITORY_URL = 'git@github.com:%s/%s.git' % (GITHUB_USERNAME, REPOSITORY_NAME)
REPOSITORY_ALT_URL = None # 'git@bitbucket.org:nprapps/%s.git' % REPOSITORY_NAME'
DEV_CONTACT = 'Nathan Lawrence, KBIA Digital Content Director'
"""
DEPLOYMENT
"""
PRODUCTION_S3_BUCKET = 'lunchbox.kbia.org'
STAGING_S3_BUCKET = 'stage-lunchbox.kbia.org'
DEFAULT_MAX_AGE = 20
FILE_SERVER_USER = 'ubuntu'
FILE_SERVER = 'tools.apps.npr.org'
FILE_SERVER_PATH = '~/www'
# These va
|
riables will be set at runtime. See configure_targets() below
S3_BUCKET = None
S3_BASE_URL = None
S3_DEPLOY_URL = None
DEBUG = True
"""
Utilities
"""
def get_secrets():
"""
A method for accessing our secrets.
"""
secrets_dict = {}
for k,v in os.environ.items():
if k.startswith(PROJECT_SLUG):
k = k[len(PROJECT_SLUG) + 1:]
secrets_dict[k] = v
return se
|
crets_dict
def configure_targets(deployment_target):
"""
Configure deployment targets. Abstracted so this can be
overriden for rendering before deployment.
"""
global S3_BUCKET
global S3_BASE_URL
global S3_DEPLOY_URL
global DEBUG
global DEPLOYMENT_TARGET
global ASSETS_MAX_AGE
if deployment_target == 'electron':
S3_BUCKET = None
S3_BASE_URL = None
S3_DEPLOY_URL = None
DEBUG = False
ASSETS_MAX_AGE = 0
if deployment_target == 'fileserver':
S3_BUCKET = None
S3_BASE_URL = None
S3_DEPLOY_URL = None
DEBUG = False
ASSETS_MAX_AGE = 0
if deployment_target == 'production':
S3_BUCKET = PRODUCTION_S3_BUCKET
S3_BASE_URL = 'http://%s/%s' % (S3_BUCKET, PROJECT_SLUG)
S3_DEPLOY_URL = 's3://%s/%s' % (S3_BUCKET, PROJECT_SLUG)
DEBUG = False
ASSETS_MAX_AGE = 86400
elif deployment_target == 'staging':
S3_BUCKET = STAGING_S3_BUCKET
S3_BASE_URL = 'http://%s/%s' % (S3_BUCKET, PROJECT_SLUG)
S3_DEPLOY_URL = 's3://%s/%s' % (S3_BUCKET, PROJECT_SLUG)
DEBUG = True
ASSETS_MAX_AGE = 20
else:
S3_BUCKET = None
S3_BASE_URL = 'http://127.0.0.1:8000'
S3_DEPLOY_URL = None
DEBUG = True
ASSETS_MAX_AGE = 20
DEPLOYMENT_TARGET = deployment_target
"""
Run automated configuration
"""
DEPLOYMENT_TARGET = os.environ.get('DEPLOYMENT_TARGET', None)
configure_targets(DEPLOYMENT_TARGET)
|
beproud/bp-cron
|
src/handlers/github_reminder.py
|
Python
|
mit
| 4,990
| 0.000213
|
from collections import defaultdict
from datetime import date
from itertools import count, product
from string import ascii_uppercase
from urllib.parse import urlparse
from github import Github
from slacker import Error
from src import settings
from src.utils import slack
from src.utils.google_api import get_service
BOT_EMOJI = ":github:"
CHANNEL = "#bp-employees"
def add_alpha_prefix(length):
"""セルのアルファベット文字列を返すジェネレータ
A, B, ..., Y, Z, AA, AB, ..., AY, AZ, BA, ...
"""
repeat = count(1)
while True:
for strs in product(ascii_uppercase, repeat=next(repeat)):
if length <= 0:
return
length -= 1
yield "".join(strs)
def make_sheet(service, spreadsheet_id, sheet_name, row_count, column_count):
requests = []
requests.append(
{
"addSheet": {
"properties": {
"title": sheet_name,
"index": "0",
"gridProperties": {
"rowCount": row_count,
"columnCount": column_count,
},
}
}
}
)
body = {"requests": requests}
response = (
service.spreadsheets()
.batchUpdate(spreadsheetId=spreadsheet_id, body=body)
.execute()
)
return response
def write_sheet(
service,
spreadsheet_id,
sheet_name,
row_count,
column_count,
org_member_to_repos,
col_member_to_repos,
):
alpha_prefix = [p for p in add_alpha_prefix(row_count)]
start, end = alpha_prefix[0], alpha_prefix[-1]
_range = sheet_name + f"!{start}1:{end}{column_count}"
body = {}
body["range"] = _range
body["majorDimension"] = "ROWS"
values = []
values.append([f"組織メンバー:{len(org_member_to_repos)}"])
for m, repo in org_member_to_repos.items():
inner_list = []
inner_list.append(m)
inner_list.extend(repo)
values.append(inner_list)
values.append([]) # 一行開ける
values.append([f"組織外メンバー:{len(col_member_to_repos)}"])
for m, repo in col_member_to_repos.items():
inner_list = []
inner_list.append(m)
inner_list.extend(repo)
values.append(inner_list)
body["values"] = values
value_input_option = "USER_ENTERED"
result = (
service.spreadsheets()
.values()
.update(
spreadsheetId=spreadsheet_id,
range=_range,
valueInputOption=value_input_option,
body=body,
)
.execute()
)
return result
def fetch_organization_repostiries_info():
g = Github(settings.GITHUB_API_TOKEN)
org = g.get_organization(settings.GITHUB_ORGANIZATION)
# プライベートリポジトリ
org_private_repos = list(org.get_repos(type="private"))
org_private_repos_number = len(org_private_repos)
# 組織メンバー
org_members = sorted(org.get_members(), key=lambda m: m.login)
# 組織外メンバー:
col_members = sorted(org.get_outside_collaborators(), key=lambda m: m.login)
cm = defaultdict(list)
for r in org_private_repos:
for m in r.get_collaborators():
cm[m.login].append(r.full_name)
org_member_to_repos = {m.login: cm[m.login] for m in org_members}
col_member_to_repos = {m.login: cm[m.login] for m in col_members}
return org_private_repos_number, org_member_to_repos, col_member_to_repos
def notify_member_check(event, context):
service = get_service("sheets", "v4")
result = urlparse(settings.GITHUB_SPREADSHEET_URL)
sheet_id = result.path.split("/")[3]
org_private_repos_number, org_member_to_repos, col_member_to_repos = (
fetch_organization_repostiries_info()
)
# 3 -> メンバ名列 + 余白分の行を追加
row_count = 3 + org_private_repos_number
# 10 -> 総数を記載する行の数 + 余白分の行を追加
column_count = 10 + len(org_member_to_repos) + len(org_member_to_repos)
sheet_name = f"{date.today().month}月"
make_sheet(service, sheet_id, sheet_name, row_count, column_count)
write_sheet(
service,
sheet_id,
sheet_name,
row_count,
column_count,
org_member_to_repos,
col_member_to_repos,
)
try:
slack.post_message(
CHANNEL,
":octocat: < @here GitHubメンバー整理をしよう",
userna
|
me="GitHub Information Bot",
icon_emoji=BOT_EMOJI,
attachments=[{
"pretext": "案件のリポジトリを確認してGitHubを利用していないメンバーがいたら外しましょう。",
"color": "#e83a3a",
"text": f"<{setting
|
s.GITHUB_SPREADSHEET_URL}|メンバー一覧>"
}],
link_names=True,
)
except Error:
pass
|
alexholcombe/words-calculate
|
anagramsCalculate.py
|
Python
|
mit
| 3,348
| 0.020012
|
from __future__ import print_function
import pandas as pd
import os
#This program will find all the compound words in a long list of words, by
#taking the lexicon of all half-as-many-letters words and checking them against
#the long list.
print(os.getcwd()) #os.chdir() If using within psychopy, might start out in wrong directory
dir='words_from_databases/'
fname = '8letterWordsFromElexicon'
#Read in all the words (downloaded before from Elexicon
words=pd.read_csv(dir+fname+'.csv')
#Read in all the words with half as many letters
wordsHalfAsManyLtrs = pd.read_csv(dir+'4letterWordsFromElexicon.csv')
#words.to_pickle(fname+'.pickle')
#words = pd.read_pickle(fname+'.pickle')
if type(words['Word'].irow(-1)) is not str: #sometimes last row is bad, probably because carriage return at end
words = words[:-1]
wordsList = words['Word'].astype(str) #put them into a list
wordsList = list( wordsList )
wordsList = [ x.upper() for x in wordsList ] #change to upper case
numLtrs = words['Length'][0]
halfNumLtrs = int(numLtrs/2)
print('words=',words)
words['firstHalf'] = 'ZZZZZ'
words['secondHalf'] = 'ZZZZZ'
#Loop through all words, dividing them into first half and second half
#Add first and second half to same pandas dataframe
for i in range( len(words) ):
thisWord =
|
words['Word'][i]
#print('thisWord=',thisWord)
#print('thisWord[:halfNumLtrs]=',thisWord[:3])
try:
words['firstHalf'][i] = thisWord[:halfNumLtrs]
except Exception,e:
print(e)
print('i=',i)
words['secondHalf'][i] = thisWord[halfNumLtrs:]
print('words.head=',words.head())
#print('words.tail=',words.tail())
if type(wordsHalfAsManyLtrs['Word'].irow(-1)) is not str: #sometimes las
|
t row is bad, probably because carriage return at end
wordsHalfAsManyLtrs = wordsHalfAsManyLtrs[:-1] #remove last item
#For each word, find out whether both firsthalf and second half is a word
halfLengthWords = wordsHalfAsManyLtrs['Word'].astype(str)
halfLengthWords = list(halfLengthWords)
halfLengthWords = [ x.upper() for x in halfLengthWords ] #change to upper case
print('halfLengthWords=',halfLengthWords)
validWords = []
validWordsFreq = []
print('words with both halves legal:')
for i in range(len(words)):
firstHalf = words['firstHalf'][i].upper()
#print('firstHalf=',firstHalf)
secondHalf = words['secondHalf'][i].upper()
if i==0:
print('example firstHalf=',firstHalf,' secondHalf=',secondHalf)
if firstHalf in halfLengthWords and secondHalf in halfLengthWords:
print(words['Word'][i])
validWords.append(firstHalf+secondHalf)
#validWordFreq.append(words['frequency'][i])
print(len(validWords),' valid words')
#freqCriterion = 3
#validWordFreq > freqCriterion
#check whether the half words can be combined in either order to form a legal word
reverseWords = []
print('words whose half-word can be put in either order to form a legal word:')
for i in range(len(words)):
firstHalf = words['firstHalf'][i].upper()
#print('firstHalf=',firstHalf)
secondHalf = words['secondHalf'][i].upper()
reversed = secondHalf + firstHalf
if reversed in wordsList and (firstHalf != secondHalf):
reverseWords.append(reversed)
print(reversed)
if i==0:
print('example reversed=',reversed)
print(len(reverseWords),' valid reverse words')
|
istresearch/scrapy-cluster
|
crawler/tests/online.py
|
Python
|
mit
| 3,708
| 0.005394
|
'''
Online link spider test
'''
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import next
import unittest
from unittest import TestCase
import time
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import scrapy
import redis
from redis.exceptions import ConnectionError
import json
import threading, time
from crawling.spiders.link_spider import LinkSpider
from scrapy.utils.project import get_project_settings
from twisted.internet import reactor
from scrapy.crawler import CrawlerRunner
from kafka import KafkaConsumer
class CustomSpider(LinkSpider):
'''
Overridden link spider for testing
'''
name = "test-spider"
class TestLinkSpider(TestCase):
example_feed = "{\"allowed_domains\":null,\"allow_regex\":null,\""\
"crawlid\":\"abc12345\",\"url\":\"http://dmoztools.net/\",\"expires\":0,\""\
"ts\":1461549923.7956631184,\"priority\":1,\"deny_regex\":null,\""\
"cookie\":null,\"attrs\":null,\"appid\":\"test\",\"spiderid\":\""\
"test-link\",\"useragent\":null,\"deny_extensions\":null,\"maxdepth\":0}"
def setUp(self):
self.settings = get_project_settings()
self.settings.set('KAFKA_TOPIC_PREFIX', "demo_test")
# set up redis
self.redis_conn = redis.Redis(host=self.settings['REDIS_HOST'],
port=self.settings['REDIS_PORT'],
db=self.settings['REDIS_DB'])
try:
self.redis_conn.info()
except ConnectionError:
print("Could not connect to Redis")
# plugin is essential to functionality
sys.exit(1)
# clear out older test keys if any
keys = self.redis_conn.keys("test-spider:*")
for key in keys:
self.redis_conn.delete(key)
# set up kafka to consumer potential result
self.consumer = KafkaConsumer(
"demo_test.crawled_firehose",
bootstrap_servers=self.settings['KAFKA_HOSTS'],
group_id="demo-id",
auto_commit_interval_ms=10,
consumer_timeout_ms=5000,
auto_offset_reset='earliest'
)
time.sleep(1)
def test_crawler_process(self):
runner = CrawlerRunner(self.settings)
d = runner.crawl(CustomSpider)
d.addBoth(lambda _: reactor.stop())
# add crawl to redis
key = "test-spider:dmoztools.net:queue"
self.redis_conn.zadd(key, self.example_feed, -99)
# run the spider, give 20 seconds to see the url, crawl it,
# and send to kafka. Then we kill the reactor
|
def thread_func():
time.sleep(20)
reactor.stop()
thread = threading.Thread(target=thread_func)
thread.start()
reactor.run()
message_count = 0
|
m = next(self.consumer)
if m is None:
pass
else:
the_dict = json.loads(m.value)
if the_dict is not None and the_dict['appid'] == 'test' \
and the_dict['crawlid'] == 'abc12345':
message_count += 1
self.assertEquals(message_count, 1)
def tearDown(self):
keys = self.redis_conn.keys('stats:crawler:*:test-spider:*')
keys = keys + self.redis_conn.keys('test-spider:*')
for key in keys:
self.redis_conn.delete(key)
# if for some reason the tests fail, we end up falling behind on
# the consumer
for m in self.consumer:
pass
self.consumer.close()
if __name__ == '__main__':
unittest.main()
|
bam2332g/proj1part3
|
rahulCode_redo/project1Part3/nba/parseCurrentPlayers.py
|
Python
|
mit
| 422
| 0.037915
|
# parsing the dump to get all the keys for the current players
import json
dic={}
with open('currentPlayerDump.json','r') as f:
data=json.load(f)
print data["resultSets"][0]["headers"]
print len(data["resultSets"]
|
[0]["rowSet"])
for obj in data["resultSets"][0]["rowSet"]:
if obj[0] not in dic:
dic[obj[0]]=obj[1]
with open('playerKey','w') as f1:
for key in dic:
f1.write(s
|
tr(key)+" : "+ str(dic[key])+"\n")
|
Aptitudetech/ERPNext
|
erpnext/selling/doctype/quotation/test_quotation.py
|
Python
|
cc0-1.0
| 2,733
| 0.02232
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
import unittest
test_dependencies = ["Product Bundle"]
class TestQuotation(unittest.TestCase):
def test_make_sales_order(self):
from erpnext.selling.doctype.quotation.quotation import make_sales_order
quotation = frappe.copy_doc(test_records[0])
quotation.insert()
self.assertRaises(frappe.ValidationError, make_sales_order, quotation.name)
quotation.submit()
sales_order = make_sales_order(quotation.name)
self.assertEquals(sales_order.doctype, "Sales Order")
self.assertEquals(len(sales_order.get("items")), 1)
self.assertEquals(sales_order.get("items")[0].doctype, "Sales Order Item")
self.assertEquals(sales_order.get("items")[0].prevdoc_docname, quotation.name)
self.assertEquals(sales_order.customer, "_Test Customer")
sales_order.delivery_date = "2014-01-01"
sales_order.naming_series = "_T-Quotation-"
sales_order.transaction_date = "2013-05-12"
sales_order.insert()
def test_create_quotation_with_margin(self):
from erpnext.selling.doctype.quotation.quotation import make_sales_order
from erpnext.selling.doctype.sales_order.sales_order \
import make_delivery_note, make_sales_invoice
rate_with_margin = flt((1500*18
|
.75)/100 + 1500)
test_records[0]['items'][0]['price_list_rate'] = 1500
test_records[0]['items'][0]['margin_type'] = 'Percentage'
test_records[0]['items'][0]['margin_rate_or_amount'] = 18.75
quotation = frappe.copy_doc(test_records[0])
quotation.insert()
self.assertEquals(quotation.get("items")[0].rate, rate_with_margin)
self.assertRaises(frappe.ValidationError, make_sales_order, quotation.
|
name)
quotation.submit()
sales_order = make_sales_order(quotation.name)
sales_order.naming_series = "_T-Quotation-"
sales_order.transaction_date = "2016-01-01"
sales_order.delivery_date = "2016-01-02"
sales_order.insert()
self.assertEquals(quotation.get("items")[0].rate, rate_with_margin)
sales_order.submit()
dn = make_delivery_note(sales_order.name)
self.assertEquals(quotation.get("items")[0].rate, rate_with_margin)
dn.save()
si = make_sales_invoice(sales_order.name)
self.assertEquals(quotation.get("items")[0].rate, rate_with_margin)
si.save()
test_records = frappe.get_test_records('Quotation')
def get_quotation_dict(customer=None, item_code=None):
if not customer:
customer = '_Test Customer'
if not item_code:
item_code = '_Test Item'
return {
'doctype': 'Quotation',
'customer': customer,
'items': [
{
'item_code': item_code,
'qty': 1,
'rate': 100
}
]
}
|
CloverHealth/airflow
|
airflow/hooks/mysql_hook.py
|
Python
|
apache-2.0
| 4,635
| 0.000216
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import MySQLdb
import MySQLdb.cursors
from airflow.hooks.dbapi_hook import DbApiHook
class MySqlHook(DbApiHook):
"""
Interact with MySQL.
You can specify charset in the extra field of your connection
as ``{"charset": "utf8"}``. Also you can choose cursor as
``{"cursor": "SSCursor"}``. Refer to the MySQLdb.cursors for more details.
"""
conn_name_attr = 'mysql_conn_id'
default_conn_name = 'mysql_default'
supports_autocommit = True
def __init__(self, *args, **kwargs):
super(MySqlHook, self).__init__(*args, **kwargs)
self.schema = kwargs.pop("
|
schema", None)
def set_autocommit(self, conn, autocommit):
"""
MySql connection sets autocommit in a different way.
"""
conn.autocommit(autocommit)
def get_autocommit(self, conn):
"""
MySql connection gets autocommit in a different wa
|
y.
:param conn: connection to get autocommit setting from.
:type conn: connection object.
:return: connection autocommit setting
:rtype bool
"""
return conn.get_autocommit()
def get_conn(self):
"""
Returns a mysql connection object
"""
conn = self.get_connection(self.mysql_conn_id)
conn_config = {
"user": conn.login,
"passwd": conn.password or '',
"host": conn.host or 'localhost',
"db": self.schema or conn.schema or ''
}
if not conn.port:
conn_config["port"] = 3306
else:
conn_config["port"] = int(conn.port)
if conn.extra_dejson.get('charset', False):
conn_config["charset"] = conn.extra_dejson["charset"]
if (conn_config["charset"]).lower() == 'utf8' or\
(conn_config["charset"]).lower() == 'utf-8':
conn_config["use_unicode"] = True
if conn.extra_dejson.get('cursor', False):
if (conn.extra_dejson["cursor"]).lower() == 'sscursor':
conn_config["cursorclass"] = MySQLdb.cursors.SSCursor
elif (conn.extra_dejson["cursor"]).lower() == 'dictcursor':
conn_config["cursorclass"] = MySQLdb.cursors.DictCursor
elif (conn.extra_dejson["cursor"]).lower() == 'ssdictcursor':
conn_config["cursorclass"] = MySQLdb.cursors.SSDictCursor
local_infile = conn.extra_dejson.get('local_infile', False)
if conn.extra_dejson.get('ssl', False):
conn_config['ssl'] = conn.extra_dejson['ssl']
if local_infile:
conn_config["local_infile"] = 1
conn = MySQLdb.connect(**conn_config)
return conn
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
"""
conn = self.get_conn()
cur = conn.cursor()
cur.execute("""
LOAD DATA LOCAL INFILE '{tmp_file}'
INTO TABLE {table}
""".format(**locals()))
conn.commit()
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
"""
conn = self.get_conn()
cur = conn.cursor()
cur.execute("""
SELECT * INTO OUTFILE '{tmp_file}'
FROM {table}
""".format(**locals()))
conn.commit()
@staticmethod
def _serialize_cell(cell, conn):
"""
MySQLdb converts an argument to a literal
when passing those seperately to execute. Hence, this method does nothing.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The same cell
:rtype: object
"""
return cell
|
amandolo/ansible-modules-core
|
commands/raw.py
|
Python
|
gpl-3.0
| 1,845
| 0.001626
|
# this is a virtual module that is entirely implemented server side
DOCUMENTATION = '''
---
module: raw
version_added: historical
short_description: Executes a low-down and dirty SSH command
options:
free_form:
description:
- the raw module takes a free form command to run
required: true
executable:
description:
- change the shell used to execute the command. Should be an absolute path to the executable.
required: false
version_added: "1.0"
description:
- Executes a low-down and dirty SSH command, not going through the module
subsystem. This is useful and should only be done in two cases. The
first case is installing C(python-simplejson) on older (Python 2.4 and
before) hosts that need it as a dependency to run modules, since nearly
all core modules require it. Another is speaking to any devices such as
routers that do not have any Python installed. In any other case, using
the M(shell) or M(command) module is much more appropriate. Arguments
given to M(raw) are run directly through the configured remote shell.
Standard output, error output and return code are returned when
available. There is no change handler support for this module.
- This module does not require python on the remote system, much like
the M(script) module.
notes:
- If you want to execute a command securely and predictably, it may be
better to use the M(command) module instead. Best practices when writing
playbooks will follow the trend
|
of using M(command) unless M(shell) is
explicitly r
|
equired. When running ad-hoc commands, use your best
judgement.
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = '''
# Bootstrap a legacy python 2.4 host
- raw: yum -y install python-simplejson
'''
|
sebalander/sebaPhD
|
dev/intrinsicCalibFullPyMC3.py
|
Python
|
bsd-3-clause
| 80,011
| 0.005601
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Jan 2018
do metropolis sampling to estimate PDF of chessboard calibration. this involves
intrinsic and extrinsic parameters, so it's a very high dimensional search
space (before it was only intrinsic)
@author: sebalander
"""
# %%
# import glob
import os
import numpy as np
import scipy.stats as sts
import matplotlib.pyplot as plt
from copy import deepcopy as dc
from importlib import reload
import corner
import time
# %env THEANO_FLAGS='device=cuda, floatX=float32'
import theano
import theano. tensor as T
import pymc3 as pm
import scipy as sc
import seaborn as sns
import scipy.optimize as opt
import sys
sys.path.append("/home/sebalander/Code/sebaPhD")
from calibration import calibrator as cl
from dev i
|
mport bayesLib as bl
import pickle
from calibration.calibrator import datafull, real, realdete, realbalk, realches
from calibration.calibrator import synt, syntextr, syntches, syntintr
print('libraries imported')
# %%
def stereoFromFisheye(distcoeffs):
'''
takes 4 distcoeffs of the opencv fisheye model and returns the
corresponding k stereografic parameter suche on average they are equal
(integrating over the angle 0 to pi/2)
from wolfram site:
ht
|
tps://www.wolframalpha.com/input/?i=integral+of+K*tan(x%2F2)+-+(x%2Bk1*x%5E3%2Bk2*x%5E5%2Bk3*x%5E7%2Bk4*x%5E9)+from+0+to+pi%2F2
'''
piPow = np.pi**(np.arange(1,6)*2)
numAux = np.array([3840, 480, 80, 15, 3])
fisheyeIntegral = np.sum(piPow * numAux) / 30720
return fisheyeIntegral / np.log(2)
'''
para ver que tan disperso es el paso de cada propuesta. como las propuestas se
sacan de una pdf gaussiana n-dimendional pasa que empieza a haber mucho volumen
de muestras que se acumulan mucho a un cierto radio. hay un compromiso entre
que el volumen aumenta
'''
def radiusStepsNdim(n):
'''
retorna moda, la media y la desv est del radio de pasos al samplear
de gaussianas hiperdimensionales de sigma 1
'''
# https://www.wolframalpha.com/input/?i=integrate+x%5E(n-1)+exp(-x%5E2%2F2)+from+0+to+infinity
# integral_0^∞ x^(n - 1) exp(-x^2/2) dx = 2^(n/2 - 1) Γ(n/2) for Re(n)>0
Inorm = 2**(n / 2 - 1) * sc.special.gamma(n / 2)
# https://www.wolframalpha.com/input/?i=integrate+x%5En+exp(-x%5E2%2F2)+from+0+to+infinity
# integral_0^∞ x^n exp(-x^2/2) dx = 2^((n - 1)/2) Γ((n + 1)/2) for Re(n)>-1
ExpectedR = 2**((n - 1) / 2) * sc.special.gamma((n + 1) / 2)
# https://www.wolframalpha.com/input/?i=integrate+x%5E(n%2B1)+exp(-x%5E2%2F2)+from+0+to+infinity
# integral_0^∞ x^(n + 1) exp(-x^2/2) dx = 2^(n/2) Γ(n/2 + 1) for Re(n)>-2
ExpectedR2 = 2**(n / 2) * sc.special.gamma(n / 2 + 1)
ModeR = np.sqrt(n - 1)
# normalizo las integrales:
ExpectedR /= Inorm
ExpectedR2 /= Inorm
DesvEstR = np.sqrt(ExpectedR2 - ExpectedR**2)
return np.array([ModeR, ExpectedR, DesvEstR])
# %% LOAD DATA
# input
plotCorners = False
#import collections as clt
fullDataFile = "./resources/fullDataIntrExtr.npy"
dataFile = open(fullDataFile, "rb")
fullData = pickle.load(dataFile)
dataFile.close()
# cam puede ser ['vca', 'vcaWide', 'ptz'] son los datos que se tienen
camera = fullData.Synt.Intr.camera
#modelos = ['poly', 'rational', 'fisheye', 'stereographic']
model = fullData.Synt.Intr.model
Ns = [2, 3]
nPt = fullData.Synt.Ches.nPt # cantidad de imagenes
nIm = fullData.Synt.Ches.nIm # puntos por imagen
# ## load data
stdPix = 1.0
imagePoints = fullData.Synt.Ches.imgPt + stdPix * fullData.Synt.Ches.imgNse
chessboardModel = fullData.Synt.Ches.objPt
imgSize = fullData.Synt.Intr.s
# images = glob.glob(imagesFolder+'*.png')
# Parametros de entrada/salida de la calibracion
objpoints2D = np.reshape([chessboardModel[:, :2]] * nIm, (nIm, nPt, 2))
fkVT = np.concatenate([fullData.Synt.Intr.uv, [fullData.Synt.Intr.k]]) #
# load model specific data
cameraMatrixT, distCoeffsT = bl.flat2int(fkVT, Ns, model)
rVecsT = fullData.Synt.Ches.rVecs
tVecsT = fullData.Synt.Ches.tVecs
print('raw data loaded')
# %%
# pongo en forma flat los valores iniciales
XintT, Ns = bl.int2flat(cameraMatrixT, distCoeffsT, model)
XextListT = np.array([bl.ext2flat(rVecsT[i], tVecsT[i]) for i in range(nIm)])
xAllT = np.concatenate([XintT, XextListT.reshape(-1)])
nIntr = XintT.shape[0]
nExtr = nIm * 6
nFree = nExtr + nIntr
nData = nIm * nPt
# 0.1pix as image std
# https://stackoverflow.com/questions/12102318/opencv-findcornersubpix-precision
# increase to 1pix porque la posterior da demasiado rara
Ci = np.repeat([stdPix**2 * np.eye(2)], nData, axis=0).reshape(nIm, nPt, 2, 2)
Crt = np.repeat([False], nIm) # no RT error
# output file
#intrinsicParamsOutFile = imagesFolder + camera + model + "intrinsicParamsML"
#intrinsicParamsOutFile = intrinsicParamsOutFile + str(stdPix) + ".npy"
## pruebo con un par de imagenes
#for j in range(0, nIm, 3):
# xm, ym, Cm = cl.inverse(imagePoints[j, 0], rVecs[j], tVecs[j],
# cameraMatrix,
# distCoeffs, model, Cccd=Ci[j], Cf=False, Ck=False,
# Crt=False, Cfk=False)
# print(xm, ym, Cm)
# datos medidos, observados, experimentalmente en el mundo y en la imagen
yObs = objpoints2D.reshape(-1)
print('data formated')
# %% testeo el error calculado como antes
params = dict()
params["imagePoints"] = imagePoints
params["model"] = model
params["chessboardModel"] = chessboardModel
params["Cccd"] = Ci
params["Cf"] = False
params["Ck"] = False
params["Crt"] = [False] * nIm
params["Cfk"] = False
reload(bl)
reload(cl)
Eint = bl.errorCuadraticoInt(XintT, Ns, XextListT, params)
# %% defino la funcion a minimizar
def objective(xAll):
Xint = xAll[:Ns[1]]
XextList = xAll[Ns[1]:].reshape((-1, 6))
Eint = bl.errorCuadraticoInt(Xint, Ns, XextList, params)
return np.sum(Eint)
objective(xAllT)
# %% result of optimisation:
#xAllOpt = np.array([
# 8.16472244e+02, 4.72646126e+02, 7.96435717e+02, -1.77272668e-01,
# 7.67281179e-02, -9.03548594e-02, -3.33339378e+00, -5.54790259e+00,
# 5.99651705e+00, 9.30295123e-01, 6.21785570e-02, -6.96743493e-02,
# -3.13558219e+00, -4.25053069e+00, 3.87610434e+00, 9.19901975e-01,
# -3.59066370e-01, 9.74042501e-01, 9.47115482e+00, -6.02396770e+00,
# 3.96904837e+00, 1.36935625e-01, -5.42713201e-01, 6.43889150e-01,
# -6.55503634e+00, -4.13393364e+00, 3.64817246e+00, -2.13080979e-02,
# 8.53474025e-01, 1.11909981e-03, -3.06900646e-01, -2.53776109e+00,
# 7.57360018e+00, 8.52154654e-01, 5.11584688e-01, 8.97896445e-01,
# 7.68107905e+00, -6.95803581e+00, 5.21527990e+00, 6.29435124e-01,
# 9.12272513e-01, 1.83799323e+00, 1.85151971e+01, 6.08915024e+00,
# 7.66699884e+00, -7.78003463e-01, 6.73711760e-02, 1.10490676e+00,
# -5.06829679e+00, -3.74958656e+00, 1.11505467e+01, 2.54343546e-02,
# 4.05416594e-03, -1.58361597e+00, -2.48236025e+00, 4.84091669e+00,
# 5.20689047e+00, -4.43952330e-02, -7.01522701e-02, -3.09646881e+00,
# 3.89079512e+00, 4.85194798e+00, 5.49819233e+00, 1.69406316e-01,
# 5.40372043e-02, 3.00215179e-02, -3.90828973e+00, 1.99912411e+00,
# 5.13066284e+00, 6.62491305e-01, 1.92824964e-01, 2.29515447e-01,
# -9.10653837e-01, -2.04052824e+01, 1.42154590e+01, -6.37592106e-02,
# -2.53455685e-02, -3.58656047e-02, -5.00455800e+00, -8.12217250e-01,
# 1.95699162e+01, -1.32713680e+00, -1.01306097e-01, 2.89174569e+00,
# -1.77829953e+01, -3.26660847e+00, 1.20506102e+01, -1.55204173e-01,
# -1.05546726e+00, 1.68382535e+00, -1.88353255e+00, -5.31860869e+00,
# 7.82312257e+00, -4.37144137e-01, -5.52910372e-01, 1.37146393e+00,
# -5.16628075e+00, -5.62202475e+00, 1.23453259e+01, -2.16223315e-01,
# 8.37239606e-01, 1.69334147e+00, 9.75590372e-01, 1.57674791e+01,
# 2.01951559e+01, -1.99116907e-01, 1.10314769e+00, 5.88264305e-01,
# 1.61820088e+01, 6.60128144e+00, 1.72941117e+01, 5.40902817e-02,
# 7.62769730e-02, -1.49449032e-01, -2.63389072e+00, -1.06080724e+01,
# 1.82899752e+01, -8.56471354e-03, -2.45078027e
|
mwiebe/blaze
|
blaze/io/storage.py
|
Python
|
bsd-3-clause
| 7,420
| 0.000809
|
"""URI API
This file contains the part of the blaze API dealing with URIs. The
"URI API". In Blaze persistence is provided by the means of this URI
API, that allows specifying a "location" for an array as an URI.
The URI API allows:
- saving existing arrays to an URI.
- loading an array into memory from an URI.
- opening an URI as an array.
- dropping the contents of a given URI.
"""
from __future__ import absolute_import, division, print_function
import os
import warnings
from datashape import to_numpy, to_numpy_dtype
import blz
from ..py2help import urlparse
from ..datadescriptor import (BLZDataDescriptor, CSVDataDescriptor,
JSONDataDescriptor, HDF5DataDescriptor)
from ..objects.array import Array
# ----------------------------------------------------------------------
# Some helper functions to workaround quirks
# XXX A big hack for some quirks in current datashape. The next deals
# with the cases where the shape is not present like in 'float32'
def _to_numpy(ds):
res = to_numpy(ds)
res = res if type(res) is tuple else ((), to_numpy_dtype(ds))
return res
class Storage(object):
"""
Storage(uri, mode='a', permanent=True)
Class to host parameters for persistence properties.
Parameters
----------
uri : string
The URI where the data set will be stored.
mode : string ('r'ead, 'a'ppend)
The mode for creating/opening the storage.
permanent : bool
Whether this file should be permanent or not.
Examples
--------
>>> store = Storage('blz-store.blz')
"""
SUPPORTED_FORMATS = ('json', 'csv', 'blz', 'hdf5')
@property
def uri(self):
"""The URI for the data set."""
return self._uri
@property
def mode(self):
"""The mode for opening the storage."""
return self._mode
@property
def format(self):
"""The format used for storage."""
return self._format
@property
def permanent(self):
"""Whether this file should be permanent or not."""
return self._permanent
@property
def path(self):
"""Returns a blz path for a given uri."""
return self._path
def __init__(self, uri, mode='a', permanent=True, format=None):
if not isinstance(uri, str):
raise ValueError("`uri` must be a string.")
self._uri = uri
self._format = self._path = ""
self._set_format_and_path_from_uri(uri, format)
self._mode = mode
if not permanent:
raise ValueError(
"`permanent` set to False is not supported yet.")
self._permanent = permanent
def __repr__(self):
args = ["uri=%s" % self._uri, "mode=%s" % self._mode]
return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
def _set_format_and_path_from_uri(self, uri, format=None):
"""Parse the uri into the format and path"""
up = urlparse.urlparse(self._uri)
if up.scheme in self.SUPPORTED_FORMATS:
warnings.warn("Blaze no longer uses file type in network protocol field of the uri. "
"Please use format kwarg.", DeprecationWarning)
self._path = up.netloc + up.path
if os.name == 'nt' and len(up.scheme) == 1:
|
# This is a workaround for raw windows paths like
# 'C:/x/y/z.csv', for which urlparse parses 'C' as
# the scheme and '/x/y/z.csv' as the path.
self._path = uri
if not self._path:
raise ValueError("Unable to extract path from uri: %s", uri)
_, extension = os.path.splitext(self._path)
extension = extension.strip('.')
# Support for deprecated format in url network scheme
format
|
_from_up = None
if up.scheme in self.SUPPORTED_FORMATS:
format_from_up = up.scheme
if format and format_from_up != format_from_up:
raise ValueError("URI scheme and file format do not match. Given uri: %s, format: %s" %
(up.geturl(), format))
# find actual format
if format:
self._format = format
elif format_from_up:
self._format = format_from_up
elif extension:
self._format = extension
else:
raise ValueError("Cannot determine format from: %s" % uri)
if self._format not in self.SUPPORTED_FORMATS:
raise ValueError("`format` '%s' is not supported." % self._format)
def _persist_convert(persist):
if not isinstance(persist, Storage):
if isinstance(persist, str):
persist = Storage(persist)
else:
raise ValueError('persist argument must be either a'
'URI string or Storage object')
return persist
# ----------------------------------------------------------------------
# The actual API specific for persistence.
# Only BLZ, HDF5, CSV and JSON formats are supported currently.
def from_blz(persist, **kwargs):
"""Open an existing persistent BLZ array.
Parameters
----------
persist : a Storage instance
The Storage instance specifies, among other things, path of
where the array is stored.
kwargs : a dictionary
Put here different parameters depending on the format.
Returns
-------
out: a concrete blaze array.
"""
persist = _persist_convert(persist)
d = blz.barray(rootdir=persist.path, **kwargs)
dd = BLZDataDescriptor(d)
return Array(dd)
def from_csv(persist, **kwargs):
"""Open an existing persistent CSV array.
Parameters
----------
persist : a Storage instance
The Storage instance specifies, among other things, path of
where the array is stored.
kwargs : a dictionary
Put here different parameters depending on the format.
Returns
-------
out: a concrete blaze array.
"""
persist = _persist_convert(persist)
dd = CSVDataDescriptor(persist.path, **kwargs)
return Array(dd)
def from_json(persist, **kwargs):
"""Open an existing persistent JSON array.
Parameters
----------
persist : a Storage instance
The Storage instance specifies, among other things, path of
where the array is stored.
kwargs : a dictionary
Put here different parameters depending on the format.
Returns
-------
out: a concrete blaze array.
"""
persist = _persist_convert(persist)
dd = JSONDataDescriptor(persist.path, **kwargs)
return Array(dd)
def from_hdf5(persist, **kwargs):
"""Open an existing persistent HDF5 array.
Parameters
----------
persist : a Storage instance
The Storage instance specifies, among other things, path of
where the array is stored.
kwargs : a dictionary
Put here different parameters depending on the format.
Returns
-------
out: a concrete blaze array.
"""
persist = _persist_convert(persist)
dd = HDF5DataDescriptor(persist.path, **kwargs)
return Array(dd)
def drop(persist):
"""Remove a persistent storage."""
persist = _persist_convert(persist)
if persist.format == 'blz':
from shutil import rmtree
rmtree(persist.path)
elif persist.format in ('csv', 'json', 'hdf5'):
import os
os.unlink(persist.path)
|
lavalamp-/ws-backend-community
|
wselasticsearch/query/all.py
|
Python
|
gpl-3.0
| 754
| 0.001326
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .base import BaseElasticsearchQuery
class AllElasticsearchQuery(
|
BaseElasticsearchQuery):
"""
This is an Elasticsearch query class that is meant to query all of th
|
e document types in
a given index.
"""
# Class Members
# Instantiation
# Static Methods
# Class Methods
@classmethod
def get_queried_class(cls):
return None
# Public Methods
# Protected Methods
def _validate_queryable_field(self, field):
pass
# Private Methods
# Properties
@property
def doc_type(self):
return None
@property
def queryable_fields(self):
return []
# Representation and Comparison
|
hall-lab/svtyper
|
tests/test_singlesample.py
|
Python
|
mit
| 3,138
| 0.001912
|
from .context import singl
|
esample as s
import unittest,
|
os, subprocess
HERE = os.path.dirname(__file__)
in_vcf = os.path.join(HERE, "data/example.vcf")
in_bam = os.path.join(HERE, "data/NA12878.target_loci.sorted.bam")
lib_info_json = os.path.join(HERE, "data/NA12878.bam.json")
out_vcf = os.path.join(HERE, "data/out.vcf")
expected_out_vcf = os.path.join(HERE, "data/example.gt.vcf")
class TestIntegration(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
if os.path.exists(out_vcf):
os.remove(out_vcf)
def test_serial_integration(self):
with open(in_vcf, "r") as inf, open(out_vcf, "w") as outf:
s.sso_genotype(bam_string=in_bam,
vcf_in=inf,
vcf_out=outf,
min_aligned=20,
split_weight=1,
disc_weight=1,
num_samp=1000000,
lib_info_path=lib_info_json,
debug=False,
ref_fasta=None,
sum_quals=False,
max_reads=1000,
max_ci_dist=1e10,
cores=None,
batch_size=1000)
fail_msg = "did not find output vcf '{}' after running sv_genotype".format(out_vcf)
self.assertTrue(os.path.exists(out_vcf), fail_msg)
fail_msg = ("output vcf '{}' "
"did not match expected "
"output vcf '{}'").format(out_vcf, expected_out_vcf)
self.assertTrue(self.diff(), fail_msg)
def test_parallel_integration(self):
with open(in_vcf, "r") as inf, open(out_vcf, "w") as outf:
s.sso_genotype(bam_string=in_bam,
vcf_in=inf,
vcf_out=outf,
min_aligned=20,
split_weight=1,
disc_weight=1,
num_samp=1000000,
lib_info_path=lib_info_json,
debug=False,
ref_fasta=None,
sum_quals=False,
max_reads=1000,
max_ci_dist=1e10,
cores=1,
batch_size=1000)
fail_msg = "did not find output vcf '{}' after running sv_genotype".format(out_vcf)
self.assertTrue(os.path.exists(out_vcf), fail_msg)
fail_msg = ("output vcf '{}' "
"did not match expected "
"output vcf '{}'").format(out_vcf, expected_out_vcf)
self.assertTrue(self.diff(), fail_msg)
def diff(self):
cmd = ['diff', "-I", "^##fileDate=", expected_out_vcf, out_vcf]
rv = None
with open(os.devnull, "w") as f:
# rv = subprocess.call(cmd, stdout=f)
rv = subprocess.call(cmd)
result = rv == 0
return result
if __name__ == '__main__':
unittest.main(verbosity=2)
|
niwinz/cobrascript
|
cobra/base.py
|
Python
|
bsd-3-clause
| 3,602
| 0.003054
|
# -*- coding: utf-8 -*-
import argparse
import ast
import functools
import io
import sys
from . import ast as ecma_ast
from . import compiler
from . import translator
from . import utils
def parse(data:str) -> object:
"""
Given a string with python s
|
ource code,
returns a python ast tree.
"""
return ast.parse(data)
def translate(data:object, **kwargs) -> object:
"""
Given a python ast tree, translate it to
ecma ast.
"""
return translator.TranslateVisitor(**kwargs).translate(data)
def compile(data:str, translate_options=None, compile_options=None) -> str:
if translate_options is None:
|
translate_options = {}
if compile_options is None:
compile_options = {}
# Normalize
data = utils.normalize(data)
# Parse python to ast
python_tree = parse(data)
# Translate python ast to js ast
ecma_tree = translate(python_tree, **translate_options)
# Compile js ast to js string
return compiler.ECMAVisitor(**compile_options).visit(ecma_tree)
def _read_file(path:str):
with io.open(path, "rt") as f:
return f.read()
def _compile_files(paths:list, join=False, translate_options=None, compile_options=None) -> str:
_compile = functools.partial(compile, translate_options=translate_options,
compile_options=compile_options)
if join:
return _compile("\n".join(_read_file(path) for path in paths))
return "\n\n".join(_compile(_read_file(path)) for path in paths)
def main():
parser = argparse.ArgumentParser(prog="cobrascript",
description="Python to Javascript translator.")
parser.add_argument("files", metavar="input.py", type=str, nargs="+",
help="A list of python files for translate.")
parser.add_argument("-g", "--debug", action="store_true", default=False,
help="Activate debug mode (only for developers).")
parser.add_argument("-w", "--warnings", action="store_true", default=False,
help="Show static analizer warnings.")
parser.add_argument("-o", "--output", action="store", type=str, metavar="outputfile.js",
help="Set output file (by default is stdout).")
parser.add_argument("-b", "--bare", action="store_true", default=False,
help="Compile without a toplevel closure.")
parser.add_argument("-j", "--join", action="store_true", default=False,
help="Join python files before compile.")
parser.add_argument("--indent", action="store", type=int, default=4,
help="Set default output indentation level.")
parser.add_argument("--auto-camelcase", action="store_true", default=False,
dest="auto_camelcase", help="Convert all identifiers to camel case.")
parsed = parser.parse_args()
reader_join = True if parsed.join else False
translate_options = {"module_as_closure": not parsed.bare,
"debug": parsed.debug,
"auto_camelcase": parsed.auto_camelcase}
compile_options = {"indent_chars": int(parsed.indent/2)}
compiled_data = _compile_files(parsed.files, join=reader_join,
translate_options=translate_options,
compile_options=compile_options)
if parsed.output:
with io.open(parsed.output, "wt") as f:
print(compiled_data, file=f)
else:
print(compiled_data, file=sys.stdout)
return 0
|
RoozbehFarhoodi/McNeuron
|
train2.py
|
Python
|
mit
| 16,582
| 0.000422
|
"""Collection of functions to train the hierarchical model."""
from __future__ import print_function
import numpy as np
from keras.optimizers import RMSprop, Adagrad, Adam
import models2 as models
import batch_utils
import plot_utils
import matplotlib.pyplot as plt
def clip_weights(model, weight_constraint):
"""
Clip weights of a keras model to be bounded by given constraints.
Parameters
----------
model: keras model object
model for which weights need to be clipped
weight_constraint:
Returns
-------
model: keras model object
model with clipped weights
"""
for l in model.layers:
if True: # 'dense' in l.name:
weights = l.get_weights()
weights = \
[np.clip(w, weight_constraint[0],
weight_constraint[1]) for w in weights]
l.set_weights(weights)
return model
def save_model_weights():
"""
cool stuff.
"""
def train_model(training_data=None,
n_levels=3,
n_nodes=[10, 20, 40],
input_dim=100,
n_epochs=25,
batch_size=64,
n_batch_per_epoch=100,
d_iters=20,
lr_discriminator=0.005,
lr_generator=0.00005,
weight_constraint=[-0.01, 0.01],
rule='mgd',
train_one_by_one=False,
train_loss='wasserstein_loss',
verbose=True):
"""
Train the hierarchical model.
Progressively generate trees with
more and more nodes.
Parameters
----------
training_data: dict of dicts
each inner dict is an array
'geometry': 3-d arrays (locations)
n_samples x n_nodes - 1 x 3
'morphology': 2-d arrays
n_samples x n_nodes - 1 (parent sequences)
example: training_data['geometry']['n20'][0:10, :, :]
gives the geometry for the first 10 neurons
training_data['geometry']['n20'][0:10, :]
gives the parent sequences for the first 10 neurons
here, 'n20' indexes a key corresponding to
20-node downsampled neurons.
n_levels: int
number of levels in the hierarchy
n_nodes: list of length n_levels
specifies the number of nodes for each level.
should be consistent with training data.
input_dim: int
dimensionality of noise input
n_epochs:
number of epochs over training data
batch_size:
batch size
n_batch_per_epoch: int
number of batches per epoch
d_iters: int
number of iterations to train discriminator
lr_discriminator: float
learning rate for optimization of discriminator
lr_generator: float
|
learning rate for optimization of generator
weight_constraint: array
upper and lower bounds of weights (to clip)
verbose: bool
print relevant progress throughout training
Returns
-------
geom_model: list of keras model objects
geome
|
try generators for each level
cond_geom_model: list of keras model objects
conditional geometry generators for each level
morph_model: list of keras model objects
morphology generators for each level
cond_morph_model: list of keras model objects
conditional morphology generators for each level
disc_model: list of keras model objects
discriminators for each level
gan_model: list of keras model objects
discriminators stacked on generators for each level
"""
# ###################################
# Initialize models at all levels
# ###################################
geom_model = list()
cond_geom_model = list()
morph_model = list()
cond_morph_model = list()
disc_model = list()
gan_model = list()
for level in range(n_levels):
# Discriminator
d_model = models.discriminator(n_nodes_in=n_nodes[level],
batch_size=batch_size,
train_loss=train_loss)
# Generators and GANs
# If we are in the first level, no context
if level == 0:
g_model, cg_model, m_model, cm_model = \
models.generator(use_context=False,
n_nodes_in=n_nodes[level-1],
n_nodes_out=n_nodes[level],
batch_size=batch_size)
stacked_model = \
models.discriminator_on_generators(g_model,
cg_model,
m_model,
cm_model,
d_model,
conditioning_rule=rule,
input_dim=input_dim,
n_nodes_in=n_nodes[level-1],
n_nodes_out=n_nodes[level],
use_context=False)
# In subsequent levels, we need context
else:
g_model, cg_model, m_model, cm_model = \
models.generator(use_context=True,
n_nodes_in=n_nodes[level-1],
n_nodes_out=n_nodes[level],
batch_size=batch_size)
stacked_model = \
models.discriminator_on_generators(g_model,
cg_model,
m_model,
cm_model,
d_model,
conditioning_rule=rule,
input_dim=input_dim,
n_nodes_in=n_nodes[level-1],
n_nodes_out=n_nodes[level],
use_context=True)
# Collect all models into a list
disc_model.append(d_model)
geom_model.append(g_model)
cond_geom_model.append(cg_model)
morph_model.append(m_model)
cond_morph_model.append(cm_model)
gan_model.append(stacked_model)
# ###############
# Optimizers
# ###############
optim_d = Adagrad() # RMSprop(lr=lr_discriminator)
optim_g = Adagrad() # RMSprop(lr=lr_generator)
# ##############
# Train
# ##############
for level in range(n_levels):
# ---------------
# Compile models
# ---------------
g_model = geom_model[level]
m_model = morph_model[level]
cg_model = cond_geom_model[level]
cm_model = cond_morph_model[level]
d_model = disc_model[level]
stacked_model = gan_model[level]
g_model.compile(loss='mse', optimizer=optim_g)
m_model.compile(loss='mse', optimizer=optim_g)
cg_model.compile(loss='mse', optimizer=optim_g)
cm_model.compile(loss='mse', optimizer=optim_g)
d_model.trainable = False
if train_loss == 'wasserstein_loss':
stacked_model.compile(loss=models.wasserstein_loss,
optimizer=optim_g)
else:
stacked_model.compile(loss='binary_crossentropy',
optimizer=optim_g)
d_model.trainable = True
if train_loss == 'wasserstein_loss':
d_model.compile(loss=models.wasserstein_loss,
optimizer=optim_d)
else:
d_model.compile(loss='binary_crossentropy',
optimizer=optim_d)
if verbose:
print("")
print(20*"=")
print("Level #{0}".format(level))
print(20*"=")
# ---------
|
RBT-itsec/TLS-SAK
|
lib/tls/tlsconnection.py
|
Python
|
gpl-3.0
| 5,605
| 0.002319
|
# TLS-SAK - TLS Swiss Army Knife
# https://github.com/RBT-itsec/TLS-SAK
# Copyright (C) 2016 by Mirko Hansen / ARGE Rundfunk-Betriebstechnik
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# TLS SAK imports
from lib.connection import Connection
from lib.tls import TLS_VERSIONS
from lib.tls.tlsparameter import TLS_CipherSuite
from lib.tls.tlsparameter import TLS_CompressionMethod
from lib.tls.tlspkg import TLS_pkg
from lib.tls.tlspkg import TLS_pkg_Alert
from lib.tls.tlspkg import TLS_pkg_Handshake
from lib.tls.tlspkg import TLS_Handshake_pkg_Certificate
from lib.tls.tlspkg import TLS_Handshake_pkg_ClientHello
from lib.tls.tlspkg import TLS_Handshake_pkg_ServerHello
from lib.tls.tlspkg import TLS_Handshake_pkg_ServerHelloDone
from lib.tls.tlspkg import TLS_Handshake_pkg_ServerKeyExchange
from lib.tls.tlsexceptions import TLS_Alert_Exception
from lib.tls.tlsexceptions import TLS_Exception
from lib.tls.tlsexceptions import TLS_Parser_Exception
from lib.tls.tlsexceptions import TLS_Protocol_Exception
class TLS_Connection:
def __init__(self, connection):
if not issubclass(type(connection), Connection):
raise TLS_Exception('connection has to be of type Connection for TLS connection')
self.connection = connection
self.buffer = b''
self.cipher_suites = []
self.compression_methods = []
self.state = None
# ---- connection property setters ----
def setAvailableCipherSuites(self, cipher_suites):
# validate parameter
if type(cipher_suites) is not list:
raise TLS_Exception('cipher_suites has to be a list of cipher suites')
for cs in cipher_suites:
if type(cs) is not TLS_CipherSuite:
raise TLS_Exception('cipher_suites has to be a list of cipher suites')
self.cipher_suites = cipher_suites
def setAvailableCompressionMethods(self, compression_methods):
# validate parameter
if type(compression_methods) is not list:
raise TLS_Exception('compression_methods has to be a list of compression methods')
for cm in compression_methods:
if type(cm) is not TLS_CompressionMethod:
raise TLS_Exception('compression_methods has to be a list of compression methods')
self.compression_methods = compression_methods
def setClientProtocolVersion(self, protocol_version):
# validate parameter
if type(protocol_version) is not str:
raise TLS_Exception('protocol_version has to be a string')
if protocol_version not in TLS_VERSIONS:
raise TLS_Exception('invalid protocol version in protocol_version')
self.client_protocol_version = protocol_version
# ---- connection property getters ----
def getChosenCipherSuite(self):
if hasattr(self, 'cipher_suite') and self.cipher_suite is not None:
return self.cipher_suite
return None
def getChosenCompressionMethod(self):
if hasattr(self, 'compression_method') and self.compression_method is not None:
return self.compression_method
return None
def getServerProtocolVersion(self):
if hasattr(self, 'server_protocol_version') and self.server_protocol_version is not None:
return self.server_protocol_version
return None
# ---- internal methods ----
def _readBuffer(self):
buffer = self.connection.recv()
self.buffer += buffer
def _readPackage(self):
while True:
try:
pkg = TLS_pkg.parser(self.buffer)
self.buffer = self.buffer[pkg.size():]
return pkg
except TLS_Parser_Exception as e:
self._readBuffer()
# ---- state machine ----
def connect(self):
client_hello = TLS_Handshake_pkg_ClientHello(version=self.client_protocol_version, cipher_suites=self.cipher_suites, compression_methods=self.compression_methods)
handshake_client_hello = TLS_pkg_Handshake(self.client_protocol_version, client_hello)
self.connection.send(handshake_client_hello.serialize())
serverHelloDoneReceived = False
while not serverHelloDoneReceived:
pk
|
g = self._readPackage()
if type(pkg) is TLS_pkg_Alert:
raise TLS_Alert_Exception(pkg.getLevel(), pkg.getDescription())
elif type(pkg) is not TLS_pkg_Handshake:
raise TLS_Protocol_Exception('handshake package expected, but received other package')
# this is a handshake package
for hs in pkg.handshake:
|
if type(hs) is TLS_Handshake_pkg_ServerHello:
self.cipher_suite = hs.cipher_suite
self.compression_method = hs.compression_method
self.server_protocol_version = hs.version
elif type(hs) is TLS_Handshake_pkg_ServerHelloDone:
serverHelloDoneReceived = True
break
|
johnathanvidu/ppman
|
ppman/cli.py
|
Python
|
apache-2.0
| 181
| 0.005525
|
import sys
def main(args=
|
None):
if args is None:
args = sys.argv[1:]
print('entry point - it worked')
if __name__ == "__main__":
sys.exit(main())
| |
mhuwiler/rootauto
|
bindings/pyroot/JupyROOT/__init__.py
|
Python
|
lgpl-2.1
| 170
| 0
|
from JupyROOT import cppcompleter, uti
|
ls
if '__IPYTHON__' in __builti
|
ns__ and __IPYTHON__:
cppcompleter.load_ipython_extension(get_ipython())
utils.iPythonize()
|
mneedham91/PyPardot4
|
pypardot/objects/accounts.py
|
Python
|
mit
| 1,042
| 0.003839
|
class Accounts(object):
"""
A class to query and use Pardot accounts.
Account field reference: http://developer.pardot.com/kb/object-field-references/#account
"""
def __init__(self, client):
self.client = client
def read(self, **kwargs):
"""
Returns the data for the account of the currently logged in user.
"""
response = self._post(path='/do/read/id/{id}'.format(id=id), params=kwargs)
return response
def _get(self, object_name='account', path=None, params=None):
"""GET
|
requests for the Account object."""
if params is None:
|
params = {}
response = self.client.get(object_name=object_name, path=path, params=params)
return response
def _post(self, object_name='account', path=None, params=None):
"""POST requests for the Account object."""
if params is None:
params = {}
response = self.client.post(object_name=object_name, path=path, params=params)
return response
|
jhseu/tensorflow
|
tensorflow/python/ops/clip_ops.py
|
Python
|
apache-2.0
| 15,309
| 0.003789
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for clipping (gradient, weight) tensors to min/max values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.tf_export import tf_export
@tf_export("clip_by_value")
@dispatch.add_dispatch_support
def clip_by_value(t, clip_value_min, clip_value_max,
name=None):
"""Clips tensor values to a specified min and max.
Given a tensor `t`, this operation returns a tensor of the same type and
shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.
Any values less than `clip_value_min` are set to `clip_value_min`. Any values
greater than `clip_value_max` are set to `clip_value_max`.
Note: `clip_value_min` needs to be smaller or equal to `clip_value_max` for
correct results.
For example:
Basic usage passes a scalar as the min and max value.
>>> t = tf.constant([[-10., -1., 0.], [0., 2., 10.]])
>>> t2 = tf.clip_by_value(t, clip_value_min=-1, clip_value_max=1)
>>> t2.numpy()
array([[-1., -1., 0.],
[ 0., 1., 1.]], dtype=float32)
The min and max can be the same size as `t`, or broadcastable to that size.
>>> t = tf.constant([[-1, 0., 10.], [-1, 0, 10]])
>>> clip_min = [[2],[1]]
>>> t3 = tf.clip_by_value(t, clip_value_min=clip_min, clip_value_max=100)
>>> t3.numpy()
array([[ 2., 2., 10.],
[ 1., 1., 10.]], dtype=float32)
Broadcasting fails, intentionally, if you would expand the dimensions of `t`
>>> t = tf.constant([[-1, 0., 10.], [-1, 0, 10]])
>>> clip_min = [[[2, 1]]] # Has a third axis
>>> t4 = tf.clip_by_value(t, clip_value_min=clip_min, clip_value_max=100)
Traceback (most recent call last):
...
InvalidArgumentError: Incompatible shapes: [2,3] vs. [1,1,2]
It throws a `TypeError` if you try to clip an `int` to a `float` value
(`tf.cast` the input to `float` first).
>>> t = tf.constant([[1, 2], [3, 4]], dtype=tf.int32)
>>> t5 = tf.clip_by_value(t, clip_value_min=-3.1, clip_value_max=3.1)
Traceback (most recent call last):
...
TypeError: Cannot convert ...
Args:
t: A `Tensor` or `IndexedSlices`.
clip_value_min: The minimum value to clip to. A scalar `Tensor` or one that
is broadcastable to the shape of `t`.
clip_value_max: The minimum value to clip to. A scalar `Tensor` or one that
is broadcastable to the shape of `t`.
name: A name for the operation (optional).
Returns:
A clipped `Tensor` or `IndexedSlices`.
Raises:
`tf.errors.InvalidArgumentError`: If the clip tensors would trigger array
broadcasting that would make the returned tensor larger than the input.
TypeError: If dtype of the input is `int32` and dtype of
the `clip_value_min` or `clip_value_max` is `float32`
"""
with ops.name_scope(name, "clip_by_value",
[t, clip_value_min, clip_value_max]) as name:
values = ops.convert_to_tensor(
t.values if isinstance(t, ops.IndexedSlices) else t, name="t")
# Go through list of tensors, for each value in each tensor clip
t_min = math_ops.minimum(values, clip_value_max)
# Assert that the shape is compatible with the initial shape,
# to prevent unintentional broadcasting.
_ = values.shape.merge_with(t_min.shape)
t_max = math_ops.maximum(t_min, clip_value_min, name=name)
_ = values.shape.merge_with(t_max.shape)
if isinstance(t, ops.IndexedSlices):
t_max = ops.IndexedSlices(t_max, t.indices, t.dense_shape)
return t_max
# TODO(scottzhu): switch to use new implmentation in 2 weeks.
# return gen_math_ops.clip_by_value(
# t, clip_value_min, clip_value_max, name=name)
# TODO(scottzhu): switch to use new implmentation in 2 weeks.
# @ops.RegisterGradient("ClipByValue")
def _clip_by_value_grad(op, grad):
"""Returns grad of clip_by_value."""
x = op.inputs[0]
y = op.inputs[1]
z = op.inputs[2]
gdtype = grad.dtype
sx = array_ops.shape(x)
sy = array_ops.shape(y)
sz = array_ops.shape(z)
gradshape = array_ops.shape(grad)
zeros = array_ops.zeros(gradshape, gdtype)
xymask = math_ops.less(x, y)
xzmask = math_ops.greater(x, z)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
rx, rz = gen_array_ops.broadcast_gradient_args(sx, sz)
xgrad = array_ops.where(math_ops.logical_or(xymask, xzmask), zeros, grad)
ygrad = array_ops.where(xymask, grad, zeros)
zgrad = array_ops.where(xzmask, grad, zeros)
gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)
gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)
gz = array_ops.reshape(math_ops.reduce_sum(zgrad, rz), sz)
return (gx, gy, gz)
@tf_export("clip_by_norm")
def clip_by_norm(t, clip_norm, axes=None, name=None):
"""Clips tensor values to a ma
|
ximum L2-norm.
Given a tensor `t`, and a maximum clip value `clip_norm`, this operation
normalizes `t` so that its L2-norm is less than or equal to `clip_norm`,
|
along the dimensions given in `axes`. Specifically, in the default case
where all dimensions are used for calculation, if the L2-norm of `t` is
already less than or equal to `clip_norm`, then `t` is not modified. If
the L2-norm is greater than `clip_norm`, then this operation returns a
tensor of the same type and shape as `t` with its values set to:
`t * clip_norm / l2norm(t)`
In this case, the L2-norm of the output tensor is `clip_norm`.
As another example, if `t` is a matrix and `axes == [1]`, then each row
of the output will have L2-norm less than or equal to `clip_norm`. If
`axes == [0]` instead, each column of the output will be clipped.
This operation is typically used to clip gradients before applying them with
an optimizer.
Args:
t: A `Tensor` or `IndexedSlices`.
clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value.
axes: A 1-D (vector) `Tensor` of type int32 containing the dimensions
to use for computing the L2-norm. If `None` (the default), uses all
dimensions.
name: A name for the operation (optional).
Returns:
A clipped `Tensor` or `IndexedSlices`.
Raises:
ValueError: If the clip_norm tensor is not a 0-D scalar tensor.
TypeError: If dtype of the input is not a floating point or
complex type.
"""
with ops.name_scope(name, "clip_by_norm", [t, clip_norm]) as name:
values = ops.convert_to_tensor(
t.values if isinstance(t, ops.IndexedSlices) else t, name="t")
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
l2sum = math_ops.reduce_sum(values * values, axes, keepdims=True)
pred = l2sum > 0
# Two-tap tf.where trick to bypass NaN gradients
l2sum_safe = array_ops.where(pred, l2sum, array_ops.ones_like(l2sum))
l2norm = array_ops.where(pred, math_ops.sqrt(l2sum_safe), l2sum)
intermediate = values * clip_norm
# Assert that the shape is compatible with the initial shape,
# to prevent unintentional broadcasting.
_ = values.shape.merge_with(intermediate.sha
|
patriczek/faf
|
src/pyfaf/actions/pull_components.py
|
Python
|
gpl-3.0
| 6,262
| 0.00016
|
# Copyright (C) 2013 ABRT Team
# Copyright (C) 2013 Red Hat, Inc.
#
# This file is part of faf.
#
# faf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# faf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with faf. If not, see <http://www.gnu.org/licenses/>.
from pyfaf.actions import Action
from pyfaf.common import FafError
from pyfaf.opsys import systems
from pyfaf.queries import (get_component_by_name,
get_opsys_by_name,
get_osrelease)
from pyfaf.storage import (OpSysComponent,
OpSysReleaseComponent)
class PullComponents(Action):
name = "pull-components"
def __init__(self):
super(PullComponents, self).__init__()
def _get_tasks(self, cmdline, db):
result = set()
# no arguments - pull everything for non-EOL releases
if len(cmdline.opsys) < 1:
for osplugin in systems.values():
db_opsys = get_opsys_by_name(db, osplugin.nice_name)
if db_opsys is None:
raise FafError("Operating system '{0}' is not defined in "
"storage".format(osplugin.nice_name))
for db_release in db_opsys.releases:
if db_release.status != "EOL":
result.add((osplugin, db_release))
# a single opsys - respect opsysrelease
elif len(cmdline.opsys) == 1:
if cmdline.opsys[0] not in systems:
raise FafError("Operating system '{0}' is not supported"
.format(cmdline.opsys[0]))
osplugin = systems[cmdline.opsys[0]]
db_opsys = get_opsys_by_name(db, osplugin.nice_name)
if db_opsys is None:
raise FafError("Operating system '{0}' is not defined in "
"storage".format(osplugin.nice_name))
if len(cmdline.opsys_release) < 1:
for db_release in db_opsys.releases:
result.add((osplugin, db_release))
else:
for release in cmdline.opsys_release:
db_release = get_osrelease(db, osplugin.nice_name, release)
if db_release is None:
self.log_warn("Operating system '{0} {1}' is not "
"supported".format(osplugin.nice_name,
release))
continue
result.add((osplugin, db_release))
# multiple opsys - pull all of their releases
else:
for opsys_name in cmdline.opsys:
if not opsys_name in systems:
self.log_warn("Operating system '{0}' is not supported"
.format(opsys_name))
continue
osplugin = systems[opsys_name]
db_opsys = get_opsys_by_name(db, osplugin.nice_name)
if db_opsys is None:
self.log_warn("Operating system '{0}' is not defined in "
"storage".format(osplugin.nice_name))
continue
for db_release in db_opsys.releases:
result.add((osplugin, db_release))
return sorted(result, key=lambda (p, r): (r.opsys.name, r.version))
def run(self, cmdline, db):
try:
tasks = self._get_tasks(cmdline, db)
except FafError as ex:
self.log_error("Unable to process command line arguments: {0}"
.format(str(ex)))
return 1
new_components = {}
i = 0
for osplugin, db_release in tasks:
i += 1
self.log_info("[{0} / {1}] Processing '{2} {3}'"
.format(i, len(tasks), osplugin.nice_name,
db_release.version))
db_comp
|
onents = [c.component.name for c in db_release.components]
remote_components = osplugin.get_components(db_release.version)
for remote_component in remote_components:
if remote_component in db_components:
continue
db_component = get_component_by_name(db, remote_component,
db_r
|
elease.opsys.name)
if db_component is None:
key = (db_release.opsys, remote_component)
if key in new_components:
db_component = new_components[key]
else:
self.log_info("Creating new component '{0}' in "
"operating system '{1}'"
.format(remote_component,
osplugin.nice_name))
db_component = OpSysComponent()
db_component.name = remote_component
db_component.opsys = db_release.opsys
db.session.add(db_component)
new_components[key] = db_component
self.log_info("Creating new component '{0}' in {1} {2}"
.format(remote_component, osplugin.nice_name,
db_release.version))
db_release_component = OpSysReleaseComponent()
db_release_component.release = db_release
db_release_component.component = db_component
db.session.add(db_release_component)
db.session.flush()
def tweak_cmdline_parser(self, parser):
parser.add_opsys(multiple=True)
parser.add_opsys_release(multiple=True)
|
mcgonagle/ansible_f5
|
library/bigip_device_dns.py
|
Python
|
apache-2.0
| 11,184
| 0.000715
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_device_dns
short_description: Manage BIG-IP device DNS settings
description:
- Manage BIG-IP device DNS settings
version_added: "2.2"
options:
cache:
description:
- Specifies whether the system caches DNS lookups or performs the
operation each time a lookup is needed. Please note that this applies
only to Access Policy Manager features, such as ACLs, web application
rewrites, and authentication.
default: disable
choices:
- enabled
- disabled
name_servers:
description:
- A list of name servers that the system uses to validate DNS lookups
forwarders:
deprecated: Deprecated in 2.4. Use the GUI or edit named.conf.
description:
- A list of BIND servers that the system can use to perform DNS lookups
search:
description:
- A list of domains that the system searches for local domain lookups,
to resolve local host names.
ip_version:
description:
- Specifies whether the DNS specifies IP addresses using IPv4 or IPv6.
choices:
- 4
- 6
state:
description:
- The state of the variable on the system. When C(present), guarantees
that an existing variable is set to C(value).
default: present
choices:
- absent
- present
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Set the DNS settings on the BIG-IP
bigip_device_dns:
name_servers:
- 208.67.222.222
- 208.67.220.220
search:
- localdomain
- lab.local
password: secret
server: lb.mydomain.com
user: admin
validate_certs: no
delegate_to: localhost
'''
RETURN = r'''
cache:
description: The new value of the DNS caching
returned: changed
type: string
sample: enabled
name_servers:
description: List of name servers that were set
returned: changed
type: list
sample: ['192.0.2.10', '172.17.12.10']
search:
description: List of search domains that were set
returned: changed
type: list
sample: ['192.0.2.10', '172.17.12.10']
ip_version:
description: IP version that was set that DNS will specify IP addresses in
returned: changed
type: int
sample: 4
warnings:
description: The list of warnings (if any) generated by module based on arguments
returned: always
type: list
sample: ['...', '...']
'''
from ansible.module_utils.f5_utils import AnsibleF5Client
from ansible.module_utils.f5_utils import AnsibleF5Parameters
from ansible.module_utils.f5_utils import HAS_F5SDK
from ansible.module_utils.f5_utils import F5ModuleError
try:
from ansible.module_utils.f5_utils import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
'dhclient.mgmt': 'dhcp',
'dns.cache': 'cache',
'nameServers': 'name_servers',
'include': 'ip_version'
}
api_attributes = [
'nameServers', 'search', 'include'
]
updatables = [
'cache', 'name_servers', 'search', 'ip_version'
]
returnables = [
'cache', 'name_servers', 'search', 'ip_version'
]
absentables = [
'name_servers', 'search'
]
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
@property
def search(self):
result = []
if self._values['search'] is None:
return None
for server in self._values['search']:
result.append(str(server))
return result
@property
def name_servers(self):
result = []
if self._values['name_servers'] is None:
return None
for server in self._values['name_servers']:
result.append(str(server))
return result
@property
def cache(self):
if str(self._values['cache']) in ['enabled', 'enable']:
return 'enable'
else:
return 'disable'
@property
def dhcp(self):
valid = ['enable', 'enabled']
return True if self._values['dhcp'] in valid else False
@property
def forwarders(self):
if self._values['forwarders'] is None:
return None
else:
raise F5ModuleError(
"The modifying of forwarders is not supported."
)
@property
def ip_version(self):
if self._values['ip_version'] in [6, '6', 'options inet6']:
return "options inet6"
elif self._values['ip_version'] in [4, '4', '']:
return ""
else:
return None
class ModuleManager(object):
def __init__(self, client):
|
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
|
self.changes = Parameters(changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.update()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def read_current_from_device(self):
want_keys = ['dns.cache']
result = dict()
dbs = self.client.api.tm.sys.dbs.get_collection()
for db in dbs:
if db.name in want_keys:
result[db.name] = db.value
dns = self.client.api.tm.sys.dns.load()
attrs = dns.attrs
if 'include' not in attrs:
attrs['include'] = 4
result.update(attrs)
return Parameters(result)
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.want.api_params()
tx = self.client.api.tm.transactions.transaction
with BigIpTxContext(tx) as api:
cache = api.tm.sys.dbs.db.load(name='dns.cache')
dns = api.tm.sys.dns.load()
# Empty values can be supplied, but you cannot supply the
# None value, so we check for that specifically
if self.want.cache is not None:
cache.update(value
|
android/android-test
|
tools/android/emulator/resources.py
|
Python
|
apache-2.0
| 2,873
| 0.004873
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, ei
|
ther express or implied.
# See th
|
e License for the specific language governing permissions and
# limitations under the License.
"""Local implementation of resources.
"""
import os
import sys
_GOOGLE_STR = os.sep + 'android_test_support' + os.sep
def GetRunfilesDir():
starting_point = sys.argv[0]
return FindRunfilesDir(os.path.abspath(starting_point))
def GetResourceAsFile(file_path):
return open(GetResourceFilename(file_path))
def GetResourceFilename(file_path):
if os.path.isabs(file_path):
return file_path
else:
return os.path.join(GetRunfilesDir(), file_path)
def FindRunfilesDir(program_filename):
"""Look for a runfiles directory corresponding to the given program.
Args:
program_filename: absolute path to a Python program
Returns:
The path to the runfiles directory, or None if one wasn't found.
"""
def _GetBinaryDirectoryFilename(filename):
"""Find a match for the binary filename and its path.
If the binary directory isn't known, search the program's
filename for a binary directory.
Args:
filename: The name of the binary file.
Returns:
A tuple of the binary directory, and the filename relative to that
directory.
If the binary directory isn't known, search the program's
filename for a binary directory
"""
# first, see if filename begins with a bin directory
for bindir in ['bin', 'bazel-bin']:
bindir_sep = bindir + os.sep
if filename.startswith(bindir_sep):
filename = filename[len(bindir_sep):]
return bindir, filename
# if not, find the bin directory in the absolute programname
for elem in os.path.abspath(sys.argv[0]).split(os.sep):
if elem in ['bin', 'bazel-bin']:
return elem, filename
# shouldn't happen but will fail os.path.isdir below
return '', filename
google_idx = program_filename.rfind(_GOOGLE_STR)
if google_idx != -1:
root_dir = program_filename[:google_idx]
rel_filename = program_filename[google_idx + len(_GOOGLE_STR):]
bindir, rel_filename = _GetBinaryDirectoryFilename(rel_filename)
rel_filename_noext = os.path.splitext(rel_filename)[0]
runfiles = os.path.join(root_dir, 'android_test_support', bindir,
rel_filename_noext + '.runfiles')
if os.path.isdir(runfiles):
return runfiles
return root_dir
else:
return None
|
sangoma/switchy
|
switchy/apps/measure/sys.py
|
Python
|
mpl-2.0
| 3,923
| 0
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v.
|
2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Rudimentary system stats collection using ``psutil``.
"""
import time
from switchy import event_callback, utils
def sys_stats(df):
"""Reindex on the call index to allign with call metrics data
and interpolate.
|
"""
df.index = df.call_index
ci = df.pop('call_index')
# iterpolate all system stats since the arrays will be sparse
# compared to the associated call metrics data.
return df.reindex(range(int(ci.iloc[-1]) + 1)).interpolate()
class SysStats(object):
"""A switchy app for capturing system performance stats during load test
using the `psutil`_ module.
An instance of this app should be loaded if rate limited data gathering is
to be shared across multiple slaves (threads).
.. _psutil:
https://pythonhosted.org/psutil/
"""
operators = {
'sys_stats': sys_stats,
}
def __init__(self, psutil, rpyc=None):
self._psutil = psutil
self.rpyc = rpyc
self._conn = None
self.log = utils.get_logger(__name__)
# required to define the columns for the data frame storer
self.fields = [
'call_index',
'total_cpu_percent',
'percent_cpu_sys',
'percent_cpu_usr',
'percent_cpu_idle',
'percent_cpu_iow',
'phymem_percent_usage',
'load_avg',
]
# this call should ensure we have the correct type
self._times_tup_type = psutil.cpu_times().__class__
self.log = utils.get_logger(type(self).__name__)
# initial cpu usage
self._last_cpu_times = self.psutil.cpu_times()
@property
def psutil(self):
try:
return self._psutil
except (ReferenceError, EOFError): # rpyc and its weakrefs being flaky
if self.rpyc:
self.log.warn("resetting rypc connection...")
self._conn = conn = self.rpyc.classic_connect()
self._psutil = conn.modules.psutil
return self._psutil
raise
def prepost(self, collect_rate=2, storer=None):
self.storer = storer
self.count = 0
self._collect_period = 1. / collect_rate
self._last_collect_time = 0
@property
def collect_rate(self):
return 1. / self._collect_period
@collect_rate.setter
def collect_rate(self, rate):
self._collect_period = 1. / rate
@event_callback("CHANNEL_CREATE")
def on_create(self, sess):
now = time.time()
if sess.is_outbound():
# rate limiting
if (now - self._last_collect_time) >= self._collect_period:
# XXX important to keep this here for performance and
# avoiding thread racing
self._last_collect_time = now
psutil = self.psutil
self.log.debug("writing psutil row at time '{}'".format(now))
curr_times = self.psutil.cpu_times()
delta = self._times_tup_type(*tuple(
now - last for now, last in
zip(curr_times, self._last_cpu_times)
))
self._last_cpu_times = curr_times
tottime = sum(delta)
self.storer.append_row((
sess.call.vars['call_index'],
psutil.cpu_percent(interval=None),
delta.system / tottime * 100.,
delta.user / tottime * 100.,
delta.idle / tottime * 100.,
delta.iowait / tottime * 100.,
psutil.phymem_usage().percent,
psutil.os.getloadavg()[0],
))
|
cjlee112/socraticqs2
|
mysite/chat/migrations/0011_auto_20170614_0341.py
|
Python
|
apache-2.0
| 636
| 0.001572
|
from django.db import models, migrations
def update_last_modify_timestamp(apps, schema_editor):
Chat = apps.get_model('chat', 'Chat')
for chat in Chat.objects.all():
if not chat.last_modify_timestamp:
last_msg = chat.message_set.all().order_by('-timestamp').first()
if last_ms
|
g:
chat.last_modify_timestamp = las
|
t_msg.timestamp
chat.save()
class Migration(migrations.Migration):
dependencies = [
('chat', '0010_auto_20170613_0632'),
]
operations = [
migrations.RunPython(update_last_modify_timestamp, lambda apps, se: None),
]
|
gion86/awlsim
|
awlsim/gui/icons/outputs.py
|
Python
|
gpl-2.0
| 5,539
| 0
|
# AUTOMATICALLY GENERATED FILE
# DO NOT EDIT
# Generated from outputs.png
icon_outputs = b'iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAA'\
b'BHNCSVQICAgIfAhkiAAAAAlwSFlzAAAN1wAADdcBQiibeAAA'\
b'CuNJREFUaIHVmltsXcd1hr+Z2ftceZVoi4xFS6KdSlHkooGr'\
b'JgpyqWQEKFwkqNvaemrhINVDCxfpS4HW7YP8UD+5ShEbiuJL'\
b'0LhGW0Bx/ZDGdYwiiA2bVS624jqOqBspUbLEm07Ec9vXWdOH'\
b'fXgkUhR5KJkPXcAAm+SamfXPWutfa8+mYhX57ksvTez6xCe2'\
b'Gt+jq9xFIZ/H8zy01hhjaDabvPXWW+zbtw+tNSJCkqaEYUi9'\
b'XqNarXH6zJnxr331q/esttetiLeawq9/Xdm6cWCAOI4xxuD5'\
b'PlopcrkcuVyORqPB5OQk5XIZAGstJk1J0pT5apVfnTjBlUpl'\
b'ZD2MB9CrKVQqFcbPnmViYoKJ8XFOjo1Rq9dwOABEhCNHjuCc'\
b'wzmHOIdYi01T0tQizq2X7UAHHujt7WPrtq3EcUIulyNJEur1'\
b'Ohs2bGjr+L7fflaLZq+v8dCBB8QJIq59ws4JWpsV5zho6QLr'\
b'7IHVAYi0jadlj9aLp8Ub77zJbNcGs16yaghdO/lrQ6nFgdLY'\
b'vuvGedz88Dc9dPjOgip+1hil6mEyOvP9A9O3ZD1r9MDCWOoB'\
b'EzSvGe5cZnkWPzekweaH/7FYcubAx4e7vrNrpP/ZjWX9xJav'\
b'HP7UrQLoyAPipM06ywHo/uDdZec5R3vegjRrVb/buG0Pfu7+'\
b'/vs+OcybPzn7tXd/OXkfX/7mM82q/v7sG4/V1wKgcw9cl8h6'\
b'SQipOF5s/DJPbamADatGa0H5Pl/Y8xveV76067MPfGbLoTs3'\
b'qX/Y/OVn7l0LgM5yQDJ+X0hGpZfH7ZY8L0TRIh17WdmwbIKg'\
b'SdAMERyDg/383t5dg73dxT8/fmJqq/8Hh5+bODX9Or96YvHJ'\
b'3CqABS8sWLM0hNpAr/2wSH+ppFFVh0FAEEbXwkwrfvu3tvob'\
b'+sq//4ux6W3lrq6R2e0v/vv0K386s5J9q4YQLITRNRBLWQgg'\
b'jmNmpqex1q5YviSdVjap6yAICIKYMIwIg5gwiEmsZXhzv/ni'\
b'79x936fv23Tw7g25x0ceevFGirtOVvUAgBW7mFiW4UfnHKVy'\
b'mUYQ8/LrP+fyzDzVepWr81XCIOTjD3z9nyQNsHGUS+Lg/iiK'\
b'iOKk7SSHa4ern/P4ze0b+3tK3l+cmLj6KW//dw+dmr/jNV57'\
b'MLolACLSYsQWE4ncoFMoFMgXCpw4fZE33znN5uEhujYOUto4'\
b'hHOOLTvk6yIWsYITy+DQx4jj9DoAZF4WyfJNhM2DJd/3+EJ3'\
b'ybvLG6/srj78z9+6ePTRD9cMwFqL1jpjIlixQXNOEBHK3SVy'\
b'+QKe52N8D6M9jDEordFaoxQkseAU7ZxZaFXEZmuICN1lzY4t'\
b'pRHl7F9fqoTbcg9/54XxmfNv8sYTaecesAKoRR3oTQHgCMOI'\
b'ZjPCYRAB48AYhRHQBrQGpTQKAbVQ96RF1RkAay1WLNam2NSq'\
b'uwf9XLngHskpu924Lc8FDz939OLRA5UOc0BQWrcZQ8TeXNlB'\
b'GEU0myFK+4hoPDF4vuCcRjvXAtAiA7cQ/7RaccnacWtJbYqz'\
b'ljRNiOMIlTY9L67c74Jwq9HFe+7+wxeOdJwDGQvJTZM4Mz7L'\
b'8jAIqTdCtJfH4eGU4LQDBaalpvS1Oe33CHHYNDv9KIwJg4hm'\
b'EFCvN5ifrzJ35SqNep1mEG4UU/5Lv2dwW8c5YDzTrk4rhpA4'\
b'qtUa5yen2TAg9PQIpbIjX4BcDjzPobXAwuk7hxUhTYU0scRJ'\
b'QhjGBEFAFEUEzWb2HAYkqSAOcAKS+qRxV8cAsgR2CCsDAAjD'\
b'iLkrNVKXJ4g05QCKJSgUHJ5n8VrJrFSWV04yEGItSSokFsRp'\
b'HAqlNcYsJL4CpZ3yyxXtd79skc5CKKsDGbWtFEJZNXY4ScG5'\
b'bHPPYHwfz/fxPB/f9/A80zZoodIbEVKlsjwTh/UM2hq0NqhW'\
b'vCXWprFVp1LtH7GR/t7cf//t5VUB2DQliWNMuYtavYHYlOHh'\
b'YbTSy7YUOEFsgnVZomtt8IyH7xn8nEfO9zMAKgOw8Ma3UMFF'\
b'BCuZ4ZmOxlpLtVazjUbzB5G13557+9LrcNRCBzRqPA9rhTAM'\
b'6evrpau7m2Kx2ObpZRCglKVZvcK8Udi4SbN2hUI+R84zeEZj'\
b'jOKurdvp7t3QMljQrdMXIxgrmNbhNOpVNzN96VK1Ov9vYZR8'\
b'u3r8hTPX79ZRCDmxxHGMtNy7kgxs7OOhL32Gk2cnCaKAMJwn'\
b'asR8cH72ZSQFiY3YZPeDf3zgrg0DmxARtLXgwEhmuNaKNI25'\
b'NDkuM1Mf/rRarT1zdebKK1z+z+bS/TqjUSBXyEO7obv5fcPG'\
b'vh7+6sAfMTc3x8S5CSYvXKDeaHDwh//yCADdO/pzxdxhrfUj'\
b'uZyPtRalaAFROKAyN8X5MyevXLgw8UoQREeaY0ePt8y4QTpm'\
b'IVo8LU5WvGhY9M6gVLt1aBug8xatref75Hy/lbgOrS1pEnP+'\
|
b'zPv2zNj747NTlw/HNj0anP7BhzffbQ0AFqrwzUJobm4u43Rr'\
b'aTab1Gs1mvUGjUY22qI9h3LWmIyNRASlFLX5Cu+/M5qc+OXP'\
b'X6vNX30+OCevwhvparZ1XInlul5lORkYGEBECIKAsZMn6evv'\
b'p1AsUCwWl6Nd8TyDMQZrA86OvefePfbjS+fGT/5rHNSejy/9'\
b'7FQndnUMIEnSa53iKuHz7LPPEscxs3NzfHHv797IVMo4EAmC'\
b'kLmZKX729o/kg/eO/c/Uh+e/lVyeeRnOh50a3zGA1KaL7j2d'\
b'ZP1KmqakNuP7NE2xIpRKJQC6u
|
7qWL3jKc07Z+uiPf+h++vaP'\
b'Khcnz7waBbVvplPHjwMrdIm3AUCsJQpCnHOEYciFC5MMbhpk'\
b'5J57SJOE4TBsg9m/fz8nxsbIFwpcuDB5I4gro00Z/PSrk+On'\
b'Upz93yRJXqfy3oqJelsAlFIkSUI+n0dpTblUZnjzMIVCnjiO'\
b'SZKETzYaxEnS/jYwODREpVJBxKFQSyu2tVM/+S8Lr/ER3P6u'\
b'CkBrTZIkWLFEQZNCvkAcx1kz6bLQaXz+88RRlHWVSUIUhsRx'\
b'jBWL0mrZS4CPwviOABitCaOYixcv0tvXl3WMSYzWClD09vXx'\
b'99/4RhZCaUocx9nXmUaDOIpxIhiz8m32egLIO+emjVabrAjO'\
b'SqtHD7CpxZgI43nQ4v+09WUmCAKSNMXh8DyfJI6m1gvAsr5d'\
b'It3Xja7WKG3btu1ze/bsedzzPAYGBjh27Njp0dHRvwFCoNEa'\
b'daAK1FrjI5dOWGjZzffs2SN79+59vFAoMDIyQpIkU6Ojo//x'\
b'0Zu4snREo8tJsVhM+/v7KRQK3HHHHRSLxVXL/nrILQPwfd/2'\
b'9/dTLBYZGBigXC7//wJQLpfT/v5+fN+nVCqhtV7/L3rLSCdJ'\
b'zKFDh+5VSr0D9LR/qX2czoPOgUvBRigXL/quNDQ0dG7//v0j'\
b'Sql1A6ceffTRP/F9/2MrKe3cufPeffv2/dlaF+/q6uKpp576'\
b'uzRNl+1xlFJqYGDg4pNPPvnSWtdur3H8+PFzpVJpy0pK1Wr1'\
b'lhbv6elZVadarZ7cvXv3jlvagA5ywFpLvV6nXl/Tpysge4/o'\
b'6+tbVed2xBsaGmr/n8NSSZKEq1ev4nkeU1NrL6b5fJ4dO3Ys'\
b'f/3SEpFVbglWEe/w4cMv5nK5TUv/EEWRFwRBDkBrPdTd3f3A'\
b'Whfv7e3l6aeffp6bvJC31r6tNqMjFnrsscd2OOd+AeTXsvjO'\
b'nTtnZ2dnBw8ePHh7cbKC/B8rHMqx5Ahr7gAAAABJRU5ErkJg'\
b'gg=='
|
william-richard/moto
|
tests/test_kinesis/test_kinesis_cloudformation.py
|
Python
|
apache-2.0
| 5,255
| 0.000761
|
import boto3
import sure # noqa
from moto import mock_kinesis, mock_cloudformation
@mock_cloudformation
def test_kinesis_cloudformation_create_stream():
cf_conn = boto3.client(
|
"cloudformation", region_name="us-east-1")
stack_name = "MyStack"
template = '{"Resources":{"MyStr
|
eam":{"Type":"AWS::Kinesis::Stream"}}}'
cf_conn.create_stack(StackName=stack_name, TemplateBody=template)
provisioned_resource = cf_conn.list_stack_resources(StackName=stack_name)[
"StackResourceSummaries"
][0]
provisioned_resource["LogicalResourceId"].should.equal("MyStream")
len(provisioned_resource["PhysicalResourceId"]).should.be.greater_than(0)
@mock_cloudformation
@mock_kinesis
def test_kinesis_cloudformation_get_attr():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
stack_name = "MyStack"
template = """
Resources:
TheStream:
Type: AWS::Kinesis::Stream
Outputs:
StreamName:
Value: !Ref TheStream
StreamArn:
Value: !GetAtt TheStream.Arn
""".strip()
cf_conn.create_stack(StackName=stack_name, TemplateBody=template)
stack_description = cf_conn.describe_stacks(StackName=stack_name)["Stacks"][0]
output_stream_name = [
output["OutputValue"]
for output in stack_description["Outputs"]
if output["OutputKey"] == "StreamName"
][0]
output_stream_arn = [
output["OutputValue"]
for output in stack_description["Outputs"]
if output["OutputKey"] == "StreamArn"
][0]
kinesis_conn = boto3.client("kinesis", region_name="us-east-1")
stream_description = kinesis_conn.describe_stream(StreamName=output_stream_name)[
"StreamDescription"
]
output_stream_arn.should.equal(stream_description["StreamARN"])
@mock_cloudformation
@mock_kinesis
def test_kinesis_cloudformation_update():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
stack_name = "MyStack"
template = """
Resources:
TheStream:
Type: AWS::Kinesis::Stream
Properties:
Name: MyStream
ShardCount: 4
RetentionPeriodHours: 48
Tags:
- Key: TagKey1
Value: TagValue1
- Key: TagKey2
Value: TagValue2
""".strip()
cf_conn.create_stack(StackName=stack_name, TemplateBody=template)
stack_description = cf_conn.describe_stacks(StackName=stack_name)["Stacks"][0]
stack_description["StackName"].should.equal(stack_name)
kinesis_conn = boto3.client("kinesis", region_name="us-east-1")
stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[
"StreamDescription"
]
stream_description["RetentionPeriodHours"].should.equal(48)
tags = kinesis_conn.list_tags_for_stream(StreamName="MyStream")["Tags"]
tag1_value = [tag for tag in tags if tag["Key"] == "TagKey1"][0]["Value"]
tag2_value = [tag for tag in tags if tag["Key"] == "TagKey2"][0]["Value"]
tag1_value.should.equal("TagValue1")
tag2_value.should.equal("TagValue2")
shards_provisioned = len(
[
shard
for shard in stream_description["Shards"]
if "EndingSequenceNumber" not in shard["SequenceNumberRange"]
]
)
shards_provisioned.should.equal(4)
template = """
Resources:
TheStream:
Type: AWS::Kinesis::Stream
Properties:
ShardCount: 6
RetentionPeriodHours: 24
Tags:
- Key: TagKey1
Value: TagValue1a
- Key: TagKey2
Value: TagValue2a
""".strip()
cf_conn.update_stack(StackName=stack_name, TemplateBody=template)
stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[
"StreamDescription"
]
stream_description["RetentionPeriodHours"].should.equal(24)
tags = kinesis_conn.list_tags_for_stream(StreamName="MyStream")["Tags"]
tag1_value = [tag for tag in tags if tag["Key"] == "TagKey1"][0]["Value"]
tag2_value = [tag for tag in tags if tag["Key"] == "TagKey2"][0]["Value"]
tag1_value.should.equal("TagValue1a")
tag2_value.should.equal("TagValue2a")
shards_provisioned = len(
[
shard
for shard in stream_description["Shards"]
if "EndingSequenceNumber" not in shard["SequenceNumberRange"]
]
)
shards_provisioned.should.equal(6)
@mock_cloudformation
@mock_kinesis
def test_kinesis_cloudformation_delete():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
stack_name = "MyStack"
template = """
Resources:
TheStream:
Type: AWS::Kinesis::Stream
Properties:
Name: MyStream
""".strip()
cf_conn.create_stack(StackName=stack_name, TemplateBody=template)
stack_description = cf_conn.describe_stacks(StackName=stack_name)["Stacks"][0]
stack_description["StackName"].should.equal(stack_name)
kinesis_conn = boto3.client("kinesis", region_name="us-east-1")
stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[
"StreamDescription"
]
stream_description["StreamName"].should.equal("MyStream")
cf_conn.delete_stack(StackName=stack_name)
streams = kinesis_conn.list_streams()["StreamNames"]
len(streams).should.equal(0)
|
Jaesin/OctoPrint
|
src/octoprint/plugins/appkeys/__init__.py
|
Python
|
agpl-3.0
| 13,069
| 0.028469
|
# coding=utf-8
from __future__ import absolute_import
import flask
import threading
import os
import yaml
import codecs
import time
from binascii import hexlify
from collections import defaultdict
from flask_babel import gettext
import octoprint.plugin
from octoprint.settings import valid_boolean_trues
from octoprint.server.util.flask import restricted_access, no_firstrun_access
from octoprint.server import NO_CONTENT, current_user, admin_permission
from octoprint.users import DummyUser
from octoprint.util import atomic_write, monotonic_time, ResettableTimer
CUTOFF_TIME = 10 * 60 # 10min
POLL_TIMEOUT = 5 # 5 seconds
class AppAlreadyExists(Exception):
pass
class PendingDecision(object):
def __init__(self, app_id, app_token, user_id, user_token, timeout_callback=None):
self.app_id = app_id
self.app_token = app_token
self.user_id = user_id
self.user_token = user_token
self.created = monotonic_time()
if callable(timeout_callback):
self.poll_timeout = ResettableTimer(POLL_TIMEOUT, timeout_callba
|
ck, [user_token])
self.poll_timeout.start()
def external(self):
return dict(app_id=self.app_id,
user_id=self.user_id,
user_token=self.user_token)
def __repr__(self):
return u"PendingDecision({!r}, {!r}, {!r}, {!r}, timeout_callback=...)".format(self.app_id,
|
self.app_token,
self.user_id,
self.user_token)
class ReadyDecision(object):
def __init__(self, app_id, app_token, user_id):
self.app_id = app_id
self.app_token = app_token
self.user_id = user_id
@classmethod
def for_pending(cls, pending, user_id):
return cls(pending.app_id, pending.app_token, user_id)
def __repr__(self):
return u"ReadyDecision({!r}, {!r}, {!r})".format(self.app_id,
self.app_token,
self.user_id)
class ActiveKey(object):
def __init__(self, app_id, api_key, user_id):
self.app_id = app_id
self.api_key = api_key
self.user_id = user_id
def external(self):
return dict(app_id=self.app_id,
api_key=self.api_key,
user_id=self.user_id)
def internal(self):
return dict(app_id=self.app_id,
api_key=self.api_key)
@classmethod
def for_internal(cls, internal, user_id):
return cls(internal["app_id"], internal["api_key"], user_id)
def __repr__(self):
return u"ActiveKey({!r}, {!r}, {!r})".format(self.app_id,
self.api_key,
self.user_id)
class AppKeysPlugin(octoprint.plugin.AssetPlugin,
octoprint.plugin.BlueprintPlugin,
octoprint.plugin.SimpleApiPlugin,
octoprint.plugin.TemplatePlugin):
def __init__(self):
self._pending_decisions = []
self._pending_lock = threading.RLock()
self._ready_decisions = []
self._ready_lock = threading.RLock()
self._keys = defaultdict(list)
self._keys_lock = threading.RLock()
self._key_path = None
def initialize(self):
self._key_path = os.path.join(self.get_plugin_data_folder(), "keys.yaml")
self._load_keys()
##~~ TemplatePlugin
def get_template_configs(self):
return [dict(type="usersettings", name=gettext("Application Keys")),
dict(type="settings", name=gettext("Application Keys"))]
##~~ AssetPlugin
def get_assets(self):
return dict(js=["js/appkeys.js"],
clientjs=["clientjs/appkeys.js"],
less=["less/appkeys.less"],
css=["css/appkeys.css"])
##~~ BlueprintPlugin mixin
@octoprint.plugin.BlueprintPlugin.route("/probe", methods=["GET"])
@no_firstrun_access
def handle_probe(self):
return NO_CONTENT
@octoprint.plugin.BlueprintPlugin.route("/request", methods=["POST"])
@no_firstrun_access
def handle_request(self):
data = flask.request.json
if data is None:
return flask.make_response("Missing key request", 400)
if not "app" in data:
return flask.make_response("No app name provided", 400)
app_name = data["app"]
user_id = None
if "user" in data and data["user"]:
user_id = data["user"]
app_token, user_token = self._add_pending_decision(app_name, user_id=user_id)
self._plugin_manager.send_plugin_message(self._identifier, dict(type="request_access",
app_name=app_name,
user_token=user_token,
user_id=user_id))
response = flask.jsonify(app_token=app_token)
response.status_code = 201
response.headers["Location"] = flask.url_for(".handle_decision_poll", app_token=app_token, _external=True)
return response
@octoprint.plugin.BlueprintPlugin.route("/request/<app_token>")
@no_firstrun_access
def handle_decision_poll(self, app_token):
result = self._get_pending_by_app_token(app_token)
if result:
for pending_decision in result:
pending_decision.poll_timeout.reset()
response = flask.jsonify(message="Awaiting decision")
response.status_code = 202
return response
result = self._get_decision(app_token)
if result:
return flask.jsonify(api_key=result)
return flask.abort(404)
@octoprint.plugin.BlueprintPlugin.route("/decision/<user_token>", methods=["POST"])
@restricted_access
def handle_decision(self, user_token):
data = flask.request.json
if not "decision" in data:
return flask.make_response("No decision provided", 400)
decision = data["decision"] in valid_boolean_trues
user_id = current_user.get_name()
result = self._set_decision(user_token, decision, user_id)
if not result:
return flask.abort(404)
# Close access_request dialog for this request on all open OctoPrint connections
self._plugin_manager.send_plugin_message(self._identifier, dict(
type="end_request",
user_token=user_token
))
return NO_CONTENT
def is_blueprint_protected(self):
return False # No API key required to request API access
##~~ SimpleApiPlugin mixin
def get_api_commands(self):
return dict(generate=["app"],
revoke=["key"])
def on_api_get(self, request):
user_id = current_user.get_name()
if not user_id:
return flask.abort(403)
if request.values.get("all") in valid_boolean_trues and admin_permission.can():
keys = self._all_api_keys()
else:
keys = self._api_keys_for_user(user_id)
return flask.jsonify(keys=map(lambda x: x.external(), keys),
pending=dict((x.user_token, x.external()) for x in self._get_pending_by_user_id(user_id)))
def on_api_command(self, command, data):
user_id = current_user.get_name()
if not user_id:
return flask.abort(403)
if command == "revoke":
api_key = data.get("key")
if not api_key:
return flask.abort(400)
if not admin_permission.can():
user_for_key = self._user_for_api_key(api_key)
if user_for_key is None or user_for_key.user_id != user_id:
return flask.abort(403)
self._delete_api_key(api_key)
elif command == "generate":
# manual generateKey
app_name = data.get("app")
if not app_name:
return flask.abort(400)
self._add_api_key(user_id, app_name.strip())
return NO_CONTENT
##~~ key validator hook
def validate_api_key(self, api_key, *args, **kwargs):
return self._user_for_api_key(api_key)
##~~ Helpers
def _add_pending_decision(self, app_name, user_id=None):
app_token = self._generate_key()
user_token = self._generate_key()
with self._pending_lock:
self._remove_stale_pending()
self._pending_decisions.append(PendingDecision(app_name, app_token, user_id, user_token,
timeout_callback=self._expire_pending))
return app_token, user_token
def _get_pending_by_app_token(self, app_token):
result = []
with self._pending_lock:
self._remove_stale_pending()
for d
|
masschallenge/django-accelerator
|
accelerator/migrations/0058_memberprofile1.py
|
Python
|
mit
| 1,918
| 0
|
# Generated by Django 2.2.10 on 2021-06-08 14:37
import django.db.models.deletion
from django.db import (
migrations,
models,
)
from accelerator.utils import copy_m2m_fields
def migrate_member_profile_data(apps, schema_editor):
MemberProfile = apps.get_model('accelerator', 'MemberProfile')
MemberProfile1 = apps.get_model('accelerator', 'MemberProfile1')
CoreProfile = apps.get_model('accelerator', 'CoreProfile')
exclude = CoreProfile.objects.all().values_list('user_id', flat=True)
m2m_fields = ['gender_identity', 'interest_categories', 'program_families',
'ethno_racial_identification', 'additional_industries',
'functional_expertise', 'mentoring_specialties']
for profile in MemberProfile.objects.exclude(user_id__in=exclude):
profile_dict = profile.__dict__.copy()
profile_dict.pop("_state")
profile_dict.pop("id")
new_profile = MemberProfile1.objects.create(**profile_dict)
copy_m2m_fields(profile, new_profile, m2m_fields)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0057_update_polymorphic_ctype'),
]
operations = [
migrations.CreateModel(
name='MemberProfile1',
fields=[
('coreprofile_ptr',
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to='accelerator.CoreProfile')),
],
options={
'db
|
_table': 'accelerator_memberprofile1',
},
bases=('accelerator.coreprofile',),
),
migrations.RunPython(migrate_mem
|
ber_profile_data,
migrations.RunPython.noop),
]
|
robotic-ultrasound-image-system/ur5
|
universal_robot-kinetic-devel/ur_modern_driver-master/planvios.py
|
Python
|
apache-2.0
| 3,473
| 0.031097
|
#!/usr/bin/env python
import sys
import copy
import rospy,sys
import moveit_commander
import actionlib
import roslib; roslib.load_manifest('ur_modern_driver')
import tf
from control_msgs.msg import *
from trajectory_msgs.msg import *
from sensor_msgs.msg import JointState
from geometry_msgs.msg import PoseStamped,Pose,PointStamped
wpoint = geometry_msgs.msg.PointStamped()
wpose = geometry_msgs.msg.PoseStamped()
def listener():
try:
rospy.Subscriber("/kinect2/click_point/left",PointStamped,move_joint)
except KeyboardInterrupt:
raise
except:
raise
def move_joint(wpoint):
try:
print "============got the point "
moveit_commander.roscpp_initialize(sys.argv)
arm=moveit_commander.MoveGroupCommander('manipulator')
end_effector_link=arm.get_end_effector_link()
blink=arm.get_planning_frame()
arm.set_goal_position_tolerance(0.01)
arm.set_goal_orientation_tolerance(0.1)
arm.set_max_velocity_scaling_factor(0.2)
arm.set_m
|
ax_acceleration_scaling_factor(0.05)
arm.set_named_target("reset")
arm.go()
rospy.sleep(1)
print "============ arrival reset "
print "============ start "
wpose.header=wpoint.header
wp
|
ose.header.frame_id="base_link"
wpose.pose.position=wpoint.point
print wpose
waypoints=[]
#arm.set_pose_target(wpose)
waypoints.append(arm.get_current_pose().pose)
waypoints.append(copy.deepcopy(wpose.pose))
## We want the cartesian path to be interpolated at a resolution of 1 cm
## which is why we will specify 0.01 as the eef_step in cartesian
## translation. We will specify the jump threshold as 0.0, effectively
## disabling it.
(plan3, fraction) = arm.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # eef_step
0.0) # jump_threshold
rospy.sleep(2)
arm.execute(plan3)
t = tf.TransformerROS(True, rospy.Duration(10.0))
pose_target = geometry_msgs.msg.Pose()
pose_target=t.transformPose("kinect2_link",wpose)
print pose_target
#waypoints=[]
# start with the current pose
#waypoints.append(arm.get_current_pose().pose)
#waypoints.append(copy.deepcopy(wpose.pose))
## We want the cartesian path to be interpolated at a resolution of 1 cm
## which is why we will specify 0.01 as the eef_step in cartesian
## translation. We will specify the jump threshold as 0.0, effectively
## disabling it.
#(plan3, fraction) = arm.compute_cartesian_path(
# waypoints, # waypoints to follow
# 0.01, # eef_step
# 0.0) # jump_threshold
rospy.sleep(1)
print "============ arrival goal "
except KeyboardInterrupt:
raise
except:
raise
def main():
try:
rospy.init_node("test_move", anonymous=False)
inp = raw_input("Continue? y/n: ")[0]
if (inp == 'y'):
listener()
rospy.spin()
else:
print "Halting program"
except KeyboardInterrupt:
rospy.signal_shutdown("KeyboardInterrupt")
raise
if __name__ == '__main__': main()
|
larsks/cloud-init
|
tests/cloud_tests/platforms/lxd/instance.py
|
Python
|
gpl-3.0
| 9,690
| 0
|
# This file is part of cloud-init. See LICENSE file for license information.
"""Base LXD instance."""
import os
import shutil
import time
from tempfile import mkdtemp
from cloudinit.util import load_yaml, subp, ProcessExecutionError, which
from tests.cloud_tests import LOG
from tests.cloud_tests.util import PlatformError
from ..instances import Instance
from pylxd import exceptions as pylxd_exc
class LXDInstance(Instance):
"""LXD container backed instance."""
platform_name = "lxd"
_console_log_method = None
_console_log_file = None
def __init__(self, platform, name, properties, config, features,
pylxd_container):
"""Set up instance.
@param platform: platform object
@param name: hostname of instance
@param properties: image properties
@param config: image config
@param features: supported feature flags
"""
if not pylxd_container:
raise ValueError("Invalid value pylxd_container: %s" %
pylxd_container)
self._pylxd_container = pylxd_container
super(LXDInstance, self).__init__(
platform, name, properties, config, features)
self.tmpd = mkdtemp(prefix="%s-%s" % (type(self).__name__, name))
self.name = name
self._setup_console_log()
@property
def pylxd_container(self):
"""Property function."""
if self._pylxd_container is None:
raise RuntimeError(
"%s: Attempted use of pylxd_container after deletion." % self)
self._pylxd_container.sync()
return self._pylxd_container
def __str__(self):
return (
'%s(name=%s) status=%s' %
(self.__class__.__name__, self.name,
("deleted" if self._pylxd_container is None else
self.pylxd_container.status)))
def _execute(self, command, stdin=None, env=None):
if env is None:
env = {}
env_args = []
if env:
env_args = ['env'] + ["%s=%s" for k, v in env.items()]
# ensure instance is running and execute the command
self.start()
# Use cmdline client due to https://github.com/lxc/pylxd/issues/268
exit_code = 0
try:
stdout, stderr = subp(
['lxc', 'exec', self.name, '--'] + env_args + list(command),
data=stdin, decode=False)
except ProcessExecutionError as e:
exit_code = e.exit_code
stdout = e.stdout
stderr = e.stderr
return stdout, stderr, exit_code
def read_data(self, remote_path, decode=False):
"""Read data from instance filesystem.
@param remote_path: path in instance
@param decode: decode data before returning.
@return_value: content of remote_path as bytes if 'decode' is False,
and as string if 'decode' is True.
"""
data = self.pylxd_container.files.get(remote_path)
return data.decode() if decode else data
def write_data(self, remote_path, data):
"""Write data to instance filesystem.
@param remote_path: path in instance
@param data: data to write in bytes
"""
self.pylxd_container.files.put(remote_path, data)
@property
def console_log_method(self):
if self._console_log_method is not None:
return self._console_log_method
client = which('lxc')
if not client:
raise PlatformError("No 'lxc' client.")
elif _has_proper_console_support():
self._console_log_method = 'show-log'
elif client.startswith("/snap"):
self._console_log_method = 'logfile-snap'
else:
self._console_log_method = 'logfile-tmp'
LOG.debug("Set console log method to %s", self._console_log_method)
return self._console_log_method
def _setup_console_log(self):
method = self.console_log_method
if not method.startswith("logfile-"):
return
if method == "logfile-snap":
log_dir = "/var/snap/lxd/common/consoles"
if not os.path.exists(log_dir):
raise PlatformError(
"Unable to log with snap lxc. Please run:\n"
" sudo mkdir --mode=1777 -p %s" % log_dir)
elif method == "logfile-tmp":
log_dir = "/tmp"
else:
raise PlatformError(
"Unexpected value for console method: %s" % method)
# doing this ensures we can read it. Otherwise it ends up root:root.
log_file = os.path.join(log_dir, self.name)
with open(log_file, "w") as fp:
fp.write("# %s\n" % self.name)
cfg = "lxc.console.logfile=%s" % log_file
orig = self._pylxd_container.config.get('raw.lxc', "")
if orig:
orig += "\n"
self._pylxd_container.config['raw.lxc'] = orig + cfg
self._pylxd_container.save()
self._console_log_file = log_file
def console_log(self):
"""Console log.
@return_value: bytes of this instance's console
"""
if self._console_log_file:
if not os.path.exists(self._console_log_file):
raise NotImplementedError(
"Console log '%s' does not exist. If this is a remote "
"lxc, then this is really NotImplementedError. If it is "
"A local lxc, then this is a RuntimeError."
"https://github.com/lxc/lxd/issues/1129")
with open(self._console_log_file, "rb") as fp:
return fp.read()
try:
return subp(['lxc', 'console', '--show-log', self.name],
decode=False)[0]
except ProcessExecutionError as e:
raise PlatformError(
"console log",
"Console log failed [%d]: stdout=%s stderr=%s" % (
e.exit_code, e.stdout, e.stderr))
def reboot(self, wait=True):
"""Reboot instance."""
self.shutdown(wait=wait)
self.start(wait=wait)
def shutdown(self, wait=True, retry=1):
"""Shutdown instance."""
if self.pylxd_container.status == 'Stopped':
return
try:
LOG.debug("%s: shutting down (wait=%s)", self, wait)
self.pylxd_container.stop(wait=wait)
except (pylxd_exc.LXDAPIException, pylxd_exc.NotFound) as e:
# An exception happens here sometimes (LP: #1783198)
# LOG it, and try again.
LOG.warning(
("%s: shutdown(retry=%d) caught %s in shutdown "
"(response=%s): %s"),
self, retry, e.__class__.__name__, e.response, e)
if isinstance(e, pylxd_exc.NotFound):
LOG.debug("container_exists(%s) == %s",
self.name, self.platform.container_exists(self.name))
if retry == 0:
raise e
return self.shutdown(wait=wait, retry=retry - 1)
def start(self, wait=True, wait_for_cloud_init=False):
"""Start instance."""
if self.pylxd_container.status != 'Running':
self.pylxd_container.start(wait=wait)
if wait:
self._wait_for_system(wait_for_cloud_init)
def freeze(self):
"""Freeze instance."""
if self.pylxd_container.status != 'Frozen':
self.pylxd_container.freeze(wait=True)
def unfreeze(self):
"""Unfreeze instance."""
if self.pylxd_container.status == 'Frozen':
|
self.py
|
lxd_container.unfreeze(wait=True)
def destroy(self):
"""Clean up instance."""
LOG.debug("%s: deleting container.", self)
self.unfreeze()
self.shutdown()
retries = [1] * 5
for attempt, wait in enumerate(retries):
try:
self.pylxd_container.delete(wait=True)
break
except Exception:
if attempt + 1 >= len(retries):
raise
|
hms-dbmi/clodius
|
scripts/exonU.py
|
Python
|
mit
| 4,383
| 0.001825
|
from __future__ import print_function
__author__ = "Alaleh Azhir,Peter Kerpedjiev"
import collections as col
import sys
import argparse
class GeneInfo:
def __init__(self):
pass
def merge_gene_info(gene_infos, gene_info):
"""
Add a new gene_info. If it's txStart and txEnd overlap with a previous entry for this
gene, combine them.
"""
merged = False
for existing_gene_info in gene_infos[gene_info.geneId]:
if (
existing_gene_info.chrName == gene_info.chrName
and existing_gene_info.txEnd > gene_info.txStart
and gene_info.txEnd > existing_gene_info.txStart
):
# overlapping genes, merge the exons of the second into the first
existing_gene_info.txStart = min(
existing_gene_info.txStart, gene_info.txStart
)
existing_gene_info.txEnd = max(existing_gene_info.txEnd, gene_info.txEnd)
for (exon_start, exon_end) in gene_info.exonUnions:
existing_gene_info.exonUnions.add((exon_start, exon_end))
merged = True
if not merged:
gene_infos[gene_info.geneId].append(gene_info)
return gene_infos
def main():
parser = argparse.ArgumentParser(
description="""
python ExonUnion.py Calculate the union of the exons of a list
of transcript.
chr10 27035524 27150016 ABI1 76 - NM_001178120 10006 protein-coding abl-interactor 1 27037498 27149792 10 27035524,27040526,27047990,27054146,27057780,27059173,27060003,27065993,27112066,27149675, 27037674,27040712,27048164,27054247,27057921,27059274,27060018,27066170,27112234,2
|
7150016,
"""
)
parser.add_argument("transcript_bed")
# parser.add_argument('-o', '--options', default='yo',
# help="Some option", type='str')
# parser.add_argument('-u', '--useless', action='store_true',
# help='Another useless option')
args = parser.parse_args()
inputFile = open(args.transcript_bed, "r")
gene_infos = col.defaultdict(list)
for line in inputFile:
words = line
|
.strip().split("\t")
gene_info = GeneInfo()
try:
gene_info.chrName = words[0]
gene_info.txStart = words[1]
gene_info.txEnd = words[2]
gene_info.geneName = words[3]
gene_info.score = words[4]
gene_info.strand = words[5]
gene_info.refseqId = words[6]
gene_info.geneId = words[7]
gene_info.geneType = words[8]
gene_info.geneDesc = words[9]
gene_info.cdsStart = words[10]
gene_info.cdsEnd = words[11]
gene_info.exonStarts = words[12]
gene_info.exonEnds = words[13]
except:
print("ERROR: line:", line, file=sys.stderr)
continue
# for some reason, exon starts and ends have trailing commas
gene_info.exonStartParts = gene_info.exonStarts.strip(",").split(",")
gene_info.exonEndParts = gene_info.exonEnds.strip(",").split(",")
gene_info.exonUnions = set(
[
(int(s), int(e))
for (s, e) in zip(gene_info.exonStartParts, gene_info.exonEndParts)
]
)
# add this gene info by checking whether it overlaps with any existing ones
gene_infos = merge_gene_info(gene_infos, gene_info)
for gene_id in gene_infos:
for contig in gene_infos[gene_id]:
output = "\t".join(
map(
str,
[
contig.chrName,
contig.txStart,
contig.txEnd,
contig.geneName,
contig.score,
contig.strand,
"union_" + gene_id,
gene_id,
contig.geneType,
contig.geneDesc,
contig.cdsStart,
contig.cdsEnd,
",".join([str(e[0]) for e in sorted(contig.exonUnions)]),
",".join([str(e[1]) for e in sorted(contig.exonUnions)]),
],
)
)
print(output)
if __name__ == "__main__":
main()
|
tilezen/tilequeue
|
tilequeue/queue/sqs.py
|
Python
|
mit
| 6,744
| 0
|
import threading
from datetime import datetime
from tilequeue.queue import MessageHandle
from tilequeue.utils import grouper
class VisibilityState(object):
def __init__(self, last, total):
# the datetime when the message was last extended
self.last = last
# the total amount of time currently extended
self.total = total
class VisibilityManager(object):
def __init__(self, extend_secs, max_extend_secs, timeout_secs):
self.extend_secs = extend_secs
self.max_extend_secs = max_extend_secs
self.timeout_secs = timeout_secs
self.handle_state_map = {}
self.lock = threading.Lock()
def should_extend(self, handle, now=None):
if now is None:
now = datetime.now()
with self.lock:
state = self.handle_state_map.get(handle)
if not state:
return True
if state.total + self.extend_secs > self.max_extend_secs:
return False
delta = now - state.last
return delta.seconds > self.extend_secs
def extend(self, handle, now=None):
if now is None:
now = datetime.now()
with self.lock:
state = self.handle_state_map.get(handle)
if state:
state.last = now
state.total += self.extend_secs
else:
state = VisibilityState(now, self.extend_secs)
self.handle_state_map[handle] = state
return state
def done(self, handle):
try:
with self.lock:
del self.handle_state_map[handle]
except KeyError:
pass
class JobProgressException(Exception):
def __init__(self, msg, cause, err_details):
super(JobProgressExce
|
ption, self).__init__(
msg + ', caused by ' + repr(cause))
self.err_details = err_details
class SqsQueue(object):
def __init__(self, sqs_client, queue_url, read_size,
recv_wait_time_seconds, visibility_mgr):
self.sqs_client = sqs_client
self.
|
queue_url = queue_url
self.read_size = read_size
self.recv_wait_time_seconds = recv_wait_time_seconds
self.visibility_mgr = visibility_mgr
def enqueue(self, payload):
return self.sqs_client.send(
QueueUrl=self.queue_url,
MessageBody=payload,
)
def enqueue_batch(self, payloads):
# sqs can only send 10 messages at once
for payloads_chunk in grouper(payloads, 10):
msgs = []
for i, payload in enumerate(payloads_chunk):
msg_id = str(i)
msg = dict(
Id=msg_id,
MessageBody=payload,
)
msgs.append(msg)
resp = self.sqs_client.send_message_batch(
QueueUrl=self.queue_url,
Entries=msgs,
)
if resp['ResponseMetadata']['HTTPStatusCode'] != 200:
raise Exception('Invalid status code from sqs: %s' %
resp['ResponseMetadata']['HTTPStatusCode'])
failed_messages = resp.get('Failed')
if failed_messages:
# TODO maybe retry failed messages if not sender's fault? up to
# a certain maximum number of attempts?
# http://boto3.readthedocs.io/en/latest/reference/services/sqs.html#SQS.Client.send_message_batch
raise Exception('Messages failed to send to sqs: %s' %
len(failed_messages))
def read(self):
msg_handles = []
resp = self.sqs_client.receive_message(
QueueUrl=self.queue_url,
MaxNumberOfMessages=self.read_size,
AttributeNames=('SentTimestamp',),
WaitTimeSeconds=self.recv_wait_time_seconds,
VisibilityTimeout=self.visibility_mgr.timeout_secs,
)
if resp['ResponseMetadata']['HTTPStatusCode'] != 200:
raise Exception('Invalid status code from sqs: %s' %
resp['ResponseMetadata']['HTTPStatusCode'])
sqs_messages = resp.get('Messages')
if not sqs_messages:
return None
for sqs_message in sqs_messages:
payload = sqs_message['Body']
try:
timestamp = float(sqs_message['Attributes']['SentTimestamp'])
except (TypeError, ValueError):
timestamp = None
sqs_handle = sqs_message['ReceiptHandle']
metadata = dict(timestamp=timestamp)
msg_handle = MessageHandle(sqs_handle, payload, metadata)
msg_handles.append(msg_handle)
return msg_handles
def job_done(self, handle):
self.visibility_mgr.done(handle)
self.sqs_client.delete_message(
QueueUrl=self.queue_url,
ReceiptHandle=handle,
)
def job_progress(self, handle):
if self.visibility_mgr.should_extend(handle):
visibility_state = self.visibility_mgr.extend(handle)
try:
self.sqs_client.change_message_visibility(
QueueUrl=self.queue_url,
ReceiptHandle=handle,
VisibilityTimeout=self.visibility_mgr.extend_secs,
)
except Exception as e:
err_details = dict(
visibility=dict(
last=visibility_state.last.isoformat(),
total=visibility_state.total,
))
raise JobProgressException(
'update visibility timeout', e, err_details)
def clear(self):
n = 0
while True:
msgs = self.read()
if not msgs:
break
for msg in msgs:
self.job_done(msg.handle)
n += len(msgs)
return n
def close(self):
pass
def make_visibility_manager(extend_secs, max_extend_secs, timeout_secs):
visibility_mgr = VisibilityManager(extend_secs, max_extend_secs,
timeout_secs)
return visibility_mgr
def make_sqs_queue(name, region, visibility_mgr):
import boto3
sqs_client = boto3.client('sqs', region_name=region)
resp = sqs_client.get_queue_url(QueueName=name)
assert resp['ResponseMetadata']['HTTPStatusCode'] == 200, \
'Failed to get queue url for: %s' % name
queue_url = resp['QueueUrl']
read_size = 10
recv_wait_time_seconds = 20
return SqsQueue(sqs_client, queue_url, read_size, recv_wait_time_seconds,
visibility_mgr)
|
Cadasta/ckanext-project
|
ckanext/project/logic/auth.py
|
Python
|
agpl-3.0
| 2,564
| 0
|
import ckan.plugins.toolkit as toolkit
import ckan.model as model
from ckanext.project.model import projectAdmin
import logging
log = logging.getLogger(__name__)
def _is_project_admin(context):
'''
Determines whether user in context is in the project admin list.
'''
user = context.get('user', '')
userobj = model.User.get(user)
return projectAdmin.is_user_project_admin(userobj)
def create(context, data_dict):
'''Create a project.
Only sysadmin or users listed as project Admins can create a project.
'''
return {'success': _is_project_admin(context)}
def delete(context, data_dict):
'''Delete a project.
Only sysadmin or users listed as project Admins can delete a project.
'''
return {'success': _is_project_admin(context)}
def update(context, data_dict):
'''Update a project.
Only sysadmin or users listed as project Admins can update a project.
'''
return {'success': _is_project_admin(context)}
@toolkit.auth_allow_anonymous_access
def show(context, data_dict):
'''All users can access a project show'''
return {'success': True}
@toolkit.auth_allow_anonymous_access
def list(context, data_dict):
'''
|
All users can access a project list'''
return {'success': True}
def package_association_create(context, data_dict):
'''Create a package project association.
Only sysadmins or user listed as project Admins can create a
package/project association.
'
|
''
return {'success': _is_project_admin(context)}
def package_association_delete(context, data_dict):
'''Delete a package project association.
Only sysadmins or user listed as project Admins can delete a
package/project association.
'''
return {'success': _is_project_admin(context)}
@toolkit.auth_allow_anonymous_access
def project_package_list(context, data_dict):
'''All users can access a project's package list'''
return {'success': True}
@toolkit.auth_allow_anonymous_access
def package_project_list(context, data_dict):
'''All users can access a packages's project list'''
return {'success': True}
def add_project_admin(context, data_dict):
'''Only sysadmins can add users to project admin list.'''
return {'success': False}
def remove_project_admin(context, data_dict):
'''Only sysadmins can remove users from project admin list.'''
return {'success': False}
def project_admin_list(context, data_dict):
'''Only sysadmins can list project admin users.'''
return {'success': False}
|
nion-software/nionswift
|
nion/swift/DataItemThumbnailWidget.py
|
Python
|
gpl-3.0
| 19,101
| 0.003508
|
from __future__ import annotations
# standard libraries
import asyncio
import typing
# third party libraries
import numpy.typing
# local libraries
from nion.data import Image
from nion.swift import MimeTypes
from nion.swift import Thumbnails
from nion.swift.model import DisplayItem
from nion.swift.model import DocumentModel
from nion.ui import CanvasItem
from nion.ui import UserInterface
from nion.ui import Widgets
from nion.utils import Geometry
if typing.TYPE_CHECKING:
from nion.swift.model import Persistence
from nion.ui import DrawingContext
from nion.ui import Window
from nion.utils import Binding
from nion.utils import Event
_ImageDataType = Image._ImageDataType
_NDArray = numpy.typing.NDArray[typing.Any]
class AbstractThumbnailSource:
def __init__(self) -> None:
self.on_thumbnail_data_changed: typing.Optional[typing.Callable[[typing.Optional[_NDArray]], None]] = None
self.__thumbnail_data: typing.Optional[_NDArray] = None
self.overlay_canvas_item: CanvasItem.AbstractCanvasItem = CanvasItem.EmptyCanvasItem()
def close(self) -> None:
self.on_thumbnail_data_changed = None
@property
def thumbnail_data(self) -> typing.Optional[_NDArray]:
return self.__thumbnail_data
def _set_thumbnail_data(self, thumbnail_data: typing.Optional[_NDArray]) -> None:
self.__thumbnail_data = thumbnail_data
def populate_mime_data_for_drag(self, mime_data: UserInterface.MimeData, size: Geometry.IntSize) -> typing.Tuple[bool, typing.Optional[_NDArray]]:
return False, None
class BitmapOverlayCanvasItem(CanvasItem.CanvasItemComposition):
def __init__(self) -> None:
super().__init__()
self.focusable = True
self.__dropping = False
self.__focused = False
self.wants_drag_events = True
self.wants_mouse_events = True
self.__drag_start: typing.Optional[Geometry.IntPoint] = None
self.on_drop_mime_data: typing.Optional[typing.Callable[[UserInterface.MimeData, int, int], str]] = None
self.on_delete: typing.Optional[typing.Callable[[], None]] = None
self.on_drag_pressed: typing.Optional[typing.Callable[[int, int, UserInterface.KeyboardModifiers], None]] = None
self.active = False
def close(self) -> None:
self.on_drop_mime_data = None
self.on_delete = None
self.on_drag_pressed = None
super().close()
@property
def focused(self) -> bool:
return self.__focused
def _set_focused(self, focused: bool) -> None:
if self.__focused != focused:
self.__focused = focused
self.update()
def _repaint(self, drawing_context: DrawingContext.DrawingContext) -> None:
super()._repaint(drawing_context)
# canvas size
canvas_size = self.canvas_size
if canvas_size:
focused_style = "#3876D6" # TODO: platform dependent
if self.active:
with drawing_context.saver():
drawing_context.begin_path()
drawing_context.round_rect(2, 2, 6, 6, 3)
drawing_context.fill_style = "rgba(0, 255, 0, 0.80)"
drawing_context.fill()
if self.__dropping:
with drawing_context.saver():
drawing_context.begin_path()
drawing_context.rect(0, 0, canvas_size.width, canvas_size.height)
drawing_context.fill_style = "rgba(255, 0, 0, 0.10)"
drawing_context.fill()
if self.focused:
stroke_style = focused_style
drawing_context.begin_path()
drawing_context.rect(2, 2, canvas_size.width - 4, canvas_size.height - 4)
drawing_context.line_join = "miter"
drawing_context.stroke_style = stroke_style
drawing_context.line_width = 4.0
drawing_context.stroke()
def drag_enter(self, mime_data: UserInterface.MimeData) -> str:
self.__dropping = True
self.update()
return "ignore"
def drag_leave(self) -> str:
self.__dropping = False
self.update()
return "ignore"
def drop(self, mime_data: UserInterface.Mime
|
Data, x: int, y: int) -> str:
if callable(self.on_drop_mime_data):
result = self.on_drop_mime_data(mime_data, x, y)
if result:
return result
return super().drop(mime_data, x, y)
def key_pressed(self, key: UserI
|
nterface.Key) -> bool:
if key.is_delete:
on_delete = self.on_delete
if callable(on_delete):
on_delete()
return True
return super().key_pressed(key)
def mouse_pressed(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
self.__drag_start = Geometry.IntPoint(x=x, y=y)
return True
def mouse_released(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
self.__drag_start = None
return True
def mouse_position_changed(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
if self.__drag_start is not None and Geometry.distance(Geometry.FloatPoint(y, x), self.__drag_start.to_float_point()) > 2:
self.__drag_start = None
on_drag_pressed = self.on_drag_pressed
if on_drag_pressed:
on_drag_pressed(x, y, modifiers)
return True
return False
class ThumbnailCanvasItem(CanvasItem.CanvasItemComposition):
def __init__(self, ui: UserInterface.UserInterface, thumbnail_source: AbstractThumbnailSource, size: typing.Optional[Geometry.IntSize] = None) -> None:
super().__init__()
bitmap_overlay_canvas_item = BitmapOverlayCanvasItem()
bitmap_canvas_item = CanvasItem.BitmapCanvasItem(background_color="#CCC", border_color="#444")
bitmap_overlay_canvas_item.add_canvas_item(bitmap_canvas_item)
if size is not None:
bitmap_canvas_item.update_sizing(bitmap_canvas_item.sizing.with_fixed_size(size))
thumbnail_source.overlay_canvas_item.update_sizing(thumbnail_source.overlay_canvas_item.sizing.with_fixed_size(size))
bitmap_overlay_canvas_item.add_canvas_item(thumbnail_source.overlay_canvas_item)
self.__thumbnail_source = thumbnail_source
self.on_drag: typing.Optional[typing.Callable[[UserInterface.MimeData, typing.Optional[_ImageDataType], int, int], None]] = None
self.on_drop_mime_data: typing.Optional[typing.Callable[[UserInterface.MimeData, int, int], str]] = None
self.on_delete: typing.Optional[typing.Callable[[], None]] = None
def drag_pressed(x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> None:
on_drag = self.on_drag
if callable(on_drag):
mime_data = ui.create_mime_data()
valid, thumbnail = thumbnail_source.populate_mime_data_for_drag(mime_data, Geometry.IntSize(width=80, height=80))
if valid:
on_drag(mime_data, thumbnail, x, y)
def drop_mime_data(mime_data: UserInterface.MimeData, x: int, y: int) -> str:
if callable(self.on_drop_mime_data):
return self.on_drop_mime_data(mime_data, x, y)
return "ignore"
def delete() -> None:
on_delete = self.on_delete
if callable(on_delete):
on_delete()
bitmap_overlay_canvas_item.on_drag_pressed = drag_pressed
bitmap_overlay_canvas_item.on_drop_mime_data = drop_mime_data
bitmap_overlay_canvas_item.on_delete = delete
def thumbnail_data_changed(thumbnail_data: typing.Optional[_NDArray]) -> None:
bitmap_canvas_item.rgba_bitmap_data = thumbnail_data
self.__thumbnail_source.on_thumbnail_data_changed = thumbnail_data_changed
bitmap_canvas_item.rgba_bitmap_data = self.__thumbnail_source.thumbnail_data
self.add_canvas_item(bitmap_overlay_canvas_item)
def close(self) -> None:
self.__thumb
|
afaquejam/Grok-Python
|
MultiProcessing/multi_process.py
|
Python
|
gpl-2.0
| 947
| 0.004224
|
import random
import time
import multiprocessing
# This could be a consumer function, which is invoked when a n
|
ew RabbitMQ message arrives.
def busy_process(id):
print("Processing message ID: " + str(id))
loop_count = 1000
for outer in range(0, loop_count):
for inner in range(0, loop_count):
|
random_sum = outer + inner
print("Finished processing the message bearing ID: " + str(id))
# This could be a callback function which the RabbitMQ invokes when a message arrives.
# However, we need to limit max. amount of concurrent threads that can execute.
def execute_busy_processes():
# Generating a new job id for the task.
job_id = random.randint(1, 100)
new_process = multiprocessing.Process(target=busy_process, args=(job_id,))
new_process.start()
if __name__ == "__main__":
# Simulate a new log message coming.
for new_message in range(0, 10000):
execute_busy_processes()
|
william-richard/moto
|
moto/resourcegroupstaggingapi/urls.py
|
Python
|
apache-2.0
| 215
| 0
|
from __future__ import unicode_literals
from .responses import
|
ResourceGroupsTaggingAPIResponse
url_bases = ["https?://tagging.(.+).amazonaws.c
|
om"]
url_paths = {"{0}/$": ResourceGroupsTaggingAPIResponse.dispatch}
|
cflq3/getcms
|
plugins/Webluker_cdn.py
|
Python
|
mit
| 127
| 0.007874
|
#!/usr/bin/env python
# encoding: utf-8
def run(whatweb
|
, pluginname):
whatweb.recog_
|
from_header(pluginname, "Webluker")
|
YuxuanLing/trunk
|
trunk/code/study/python/core_python_appilication/ch13/stock.py
|
Python
|
gpl-3.0
| 485
| 0
|
#!/usr/bin/env python
from time import ctime
from urllib2 import urlopen
TICKs = ('yhoo', 'dell', 'cost', 'adbe', 'intc')
URL = 'http://quot
|
e.yahoo.com/d/quotes.csv?s=%s&f=sl1c1p2'
print '\nPrices quoted as of: %s\n' % ctime()
print 'TICKER', 'PRICE', 'CHANGE', '%AGE'
print '------', '-----', '------', '----'
u = urlopen(URL % ','.join(TICKs
|
))
for row in u:
tick, price, chg, per = row.split(',')
print tick, '%.2f' % float(price), chg, per,
u.close()
|
wrshoemaker/ffpopsim
|
examples/genealogies_with_selection.py
|
Python
|
gpl-3.0
| 2,468
| 0.029173
|
import FFPopSim as h
import numpy as np
from matplotlib import pyplot as plt
import random as rd
from Bio import Phylo
print "This script is meant to illustrate and explore the effect of\n\
positive selection on genealogies in asexual and sexual populations. \n\n\
Simulations are performed using an infinite sites model with L segregating\n\
sites at which mutations with identical beneficial effect are injected.\n\n"
#suggested values
#
|
neutral asexual: N=100 s=0.00001 r=0.0
#selected asexual: N=10000 s=0.01 r=0.0
#selected sexual: N=1000 s=0.01 r=1.0
L = 1000 #number of segregating sites
s = 1e-2 #single site effect
N = 10000 #population size
r = 0.0 #outcrossing rate
sample_size=30 #number of individuals whose genealogy is looked at
nsamples = 3 #number of trees
burnin =
|
2000 #either ~5*N or 5/s, depending on whether coalescence is dominated by drift or draft
dt = 1000 #time between samples
#set up population, switch on infinite sites mode
pop=h.haploid_highd(L, all_polymorphic=True)
#set the population size via the carrying capacity
pop.carrying_capacity= N
#set the crossover rate, outcrossing_rate and recombination model
pop.outcrossing_rate = r
pop.recombination_model = h.CROSSOVERS
pop.crossover_rate = 1.0/pop.L
#set the effect sizes of the mutations that are injected (the same at each site in this case)
pop.set_fitness_additive(np.ones(L)*s)
#track the genealogy at a central locus L/2 (which one doesn't matter in the asexual case)
pop.track_locus_genealogy([L/2])
#initialize the populations
pop.set_wildtype(pop.carrying_capacity)
print "Population parameters:"
pop.status()
#burn in
print "\nEquilibrate:"
while pop.generation<burnin:
print "Burn in: at", pop.generation, "out of", burnin, "generations"
pop.evolve(100)
print "\nPlot coalescent trees:"
fig=plt.figure(figsize=(7,10))
fig.suptitle("".join(map(str,['N=',N,' r=',r,' L=',L, ' s=',s])), fontsize=18)
for si in xrange(nsamples):
print "sample",si,"out of",nsamples
#evolve a while before sampling the next tree
pop.evolve(dt)
#draw a sample from the population, convert its genealogy to a BioPython tree object and plot
tree = pop.genealogy.get_tree(L/2)
subtree = tree.create_subtree_from_keys(rd.sample(tree.leafs,sample_size)).to_Biopython_tree()
subtree.ladderize()
plt.subplot(3,1,si+1)
Phylo.draw(subtree,label_func=lambda x:"")
plt.draw()
plt.savefig("".join(map(str,['tree_', 'N=',N,'_r=',r,'_L=',L, '_s=',s,'.pdf'])))
|
henriquenogueira/aedes
|
aedes_server/core/migrations/0009_auto_20160329_2208.py
|
Python
|
mit
| 490
| 0.002041
|
# -*- coding: utf-8 -*-
# G
|
enerated by Django 1.9.4 on 2016-03-30 01:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
|
('core', '0008_report_photo'),
]
operations = [
migrations.AlterField(
model_name='report',
name='photo',
field=models.ImageField(blank=True, upload_to='/upload/%Y/%m/%d/', verbose_name='foto'),
),
]
|
manuelcortez/socializer
|
src/wxUI/dialogs/creation.py
|
Python
|
gpl-2.0
| 975
| 0.002051
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import wx
import widgetUtils
class audio_album(widgetUtils.BaseDialog):
def __init__(self, *args, **kwargs):
super(audio_album, self).__init__(title=_("Create a new album"), parent=None)
panel = wx.Panel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
lbl = wx.StaticText(panel, wx.NewId(), _("Album title"))
self.title = wx.TextCtrl(panel, wx.NewId())
box = wx.BoxSizer(wx.HORIZONTAL)
|
box.Add(lbl, 1, wx.ALL, 5)
box.Add(self.title, 1, wx.ALL, 5)
sizer.Add(box, 1, wx.ALL, 5)
ok = wx.Button(panel, wx.ID_OK, _("&OK"))
ok.SetDefault()
cancel = wx.Button(panel, wx.ID_CANCEL, _("&Close"))
btnsizer = wx.BoxSizer()
btnsizer.Add(ok, 0, wx.ALL, 5)
btnsizer.Add(cancel, 0, wx.ALL, 5)
sizer.Add(btnsizer, 0, wx.ALL, 5)
panel.SetSizer(sizer)
self.SetClientSize(si
|
zer.CalcMin())
|
jnns/wagtail
|
wagtail/images/templatetags/wagtailimages_tags.py
|
Python
|
bsd-3-clause
| 4,585
| 0.002181
|
import re
from django import template
from django.core.exceptions import ImproperlyConfigured
from django.urls import NoReverseMatch
from django.utils.functional import cached_property
from wagtail.images.models import Filter
from wagtail.images.shortcuts import get_rendition_or_not_found
from wagtail.images.views.serve import generate_image_url
register = template.Library()
allowed_filter_pattern = re.compile(r"^[A-Za-z0-9_\-\.]+$")
@register.tag(name="image")
def image(parser, token):
bits = token.split_contents()[1:]
image_expr = parser.compile_filter(bits[0])
bits = bits[1:]
filter_specs = []
attrs = {}
output_var_name = None
as_context = False # if True, the next bit to be read is the output variable name
is_valid = True
for bit in bits:
if bit == 'as':
# token is of the form {% image self.photo max-320x200 as img %}
as_context = True
elif as_context:
if output_var_name is None:
output_var_name = bit
else:
# more than one item exists after 'as' - reject as invalid
is_valid = False
else:
try:
name, value = bit.split('=')
attrs[name] = parser.compile_filter(value) # setup to resolve context variables as value
except ValueError:
if allowed_filter_pattern.match(bit):
filter_specs.append(bit)
else:
raise template.TemplateSyntaxError(
"filter specs in 'image' tag may only contain A-Z, a-z, 0-9, dots, hyphens and underscores. "
"(given filter: {})".format(bit)
)
if as_context and output_var_name is None:
# context was introduced but no variable given ...
is_valid = False
if output_var_name and attrs:
# attributes are not valid when using the 'as img' form of the tag
is_valid = False
if len(filter_specs) == 0:
# there must always be at least one filter spec provided
is_valid = False
if len(bits) == 0:
# no resize rule provided eg. {% image page.image %}
raise template.TemplateSyntaxError(
"no resize rule provided. "
"'image' tag should be of the form {% image self.photo max-320x200 [ custom-attr=\"value\" ... ] %} "
"or {% image self.photo max-320x200 as img %}"
)
if is_valid:
return ImageNode(image_expr, '|'.join(filter_specs), attrs=attrs, output_var_name=output_var_name)
else:
raise template.TemplateSyntaxError(
"'image' tag should be of the form {% image self.photo max-320x200 [ custom-attr=\"value\" ... ] %} "
"or {% image self.photo max-320x200 as img %}"
)
class ImageNode(template.Node):
def __init__(self, image_expr, filter_spec, output_var_name=None, attrs={}):
self.image_expr = image_expr
self.output_var_name = output_var_name
self.attrs = attrs
self.filter_spec = filter_spec
@cached_property
def filter(self):
return Filter(spec=self.filter_spec)
def render(self, context):
try:
image = self.image_expr.resolve(context)
except template.VariableDoesNotExist:
return ''
if not image:
if self.output_var_name:
context[self.output_var_name] = None
return ''
if not hasattr(image, 'get_rendition'):
raise ValueError("image tag expected an Image object, got %r" % image)
|
rendition = get_rendition_or_not_found(image, self.filter)
if self.output_var_name:
|
# return the rendition object in the given variable
context[self.output_var_name] = rendition
return ''
else:
# render the rendition's image tag now
resolved_attrs = {}
for key in self.attrs:
resolved_attrs[key] = self.attrs[key].resolve(context)
return rendition.img_tag(resolved_attrs)
@register.simple_tag()
def image_url(image, filter_spec, viewname='wagtailimages_serve'):
try:
return generate_image_url(image, filter_spec, viewname)
except NoReverseMatch:
raise ImproperlyConfigured(
"'image_url' tag requires the " + viewname + " view to be configured. Please see "
"https://docs.wagtail.org/en/stable/advanced_topics/images/image_serve_view.html#setup for instructions."
)
|
franklingu/leetcode-solutions
|
questions/random-pick-with-weight/Solution.py
|
Python
|
mit
| 1,988
| 0.003541
|
"""
You are given an array of positive integers w where w[i] describes the weight of ith index (0-indexed).
We need to call the function pickIndex() which randomly returns an inte
|
ger in the range [0, w.length - 1]. pickIndex() should return the integer proportional to its weight in the w array. For example, for w = [1, 3], the probability of picking the index 0 is 1 / (1 + 3) = 0.25 (i.e 25%) while the probabi
|
lity of picking the index 1 is 3 / (1 + 3) = 0.75 (i.e 75%).
More formally, the probability of picking index i is w[i] / sum(w).
Example 1:
Input
["Solution","pickIndex"]
[[[1]],[]]
Output
[null,0]
Explanation
Solution solution = new Solution([1]);
solution.pickIndex(); // return 0. Since there is only one single element on the array the only option is to return the first element.
Example 2:
Input
["Solution","pickIndex","pickIndex","pickIndex","pickIndex","pickIndex"]
[[[1,3]],[],[],[],[],[]]
Output
[null,1,1,1,1,0]
Explanation
Solution solution = new Solution([1, 3]);
solution.pickIndex(); // return 1. It's returning the second element (index = 1) that has probability of 3/4.
solution.pickIndex(); // return 1
solution.pickIndex(); // return 1
solution.pickIndex(); // return 1
solution.pickIndex(); // return 0. It's returning the first element (index = 0) that has probability of 1/4.
Since this is a randomization problem, multiple answers are allowed so the following outputs can be considered correct :
[null,1,1,1,1,0]
[null,1,1,1,1,1]
[null,1,1,1,0,0]
[null,1,1,1,0,1]
[null,1,0,1,0,0]
......
and so on.
Constraints:
1 <= w.length <= 10000
1 <= w[i] <= 10^5
pickIndex will be called at most 10000 times.
"""
class Solution:
def __init__(self, w: List[int]):
self.w = list(itertools.accumulate(w))
def pickIndex(self) -> int:
return bisect.bisect_left(self.w, random.randint(1, self.w[-1]))
# Your Solution object will be instantiated and called as such:
# obj = Solution(w)
# param_1 = obj.pickIndex()
|
teeple/pns_server
|
work/install/Python-2.7.4/Misc/BeOS-setup.py
|
Python
|
gpl-2.0
| 23,664
| 0.008959
|
# Autodetec
|
ting setup.py script for building the Python extensions
#
# Modified for BeOS build. Donn Cave, March 27 2001.
__version__ = "special BeOS after 1.37"
import sys, os
from distutils import sysconfig
from distutils import text_file
from distutils.errors import *
from distutils.core import Extension, setup
from distutils.command.build_ext import build_ext
# This global variable is used to hold the list of modules to be disabled.
disabled_module_list = ['dbm', 'mmap', 'resource', 'nis']
d
|
ef find_file(filename, std_dirs, paths):
"""Searches for the directory where a given file is located,
and returns a possibly-empty list of additional directories, or None
if the file couldn't be found at all.
'filename' is the name of a file, such as readline.h or libcrypto.a.
'std_dirs' is the list of standard system directories; if the
file is found in one of them, no additional directives are needed.
'paths' is a list of additional locations to check; if the file is
found in one of them, the resulting list will contain the directory.
"""
# Check the standard locations
for dir in std_dirs:
f = os.path.join(dir, filename)
if os.path.exists(f): return []
# Check the additional directories
for dir in paths:
f = os.path.join(dir, filename)
if os.path.exists(f):
return [dir]
# Not found anywhere
return None
def find_library_file(compiler, libname, std_dirs, paths):
filename = compiler.library_filename(libname, lib_type='shared')
result = find_file(filename, std_dirs, paths)
if result is not None: return result
filename = compiler.library_filename(libname, lib_type='static')
result = find_file(filename, std_dirs, paths)
return result
def module_enabled(extlist, modname):
"""Returns whether the module 'modname' is present in the list
of extensions 'extlist'."""
extlist = [ext for ext in extlist if ext.name == modname]
return len(extlist)
class PyBuildExt(build_ext):
def build_extensions(self):
# Detect which modules should be compiled
self.detect_modules()
# Remove modules that are present on the disabled list
self.extensions = [ext for ext in self.extensions
if ext.name not in disabled_module_list]
# Fix up the autodetected modules, prefixing all the source files
# with Modules/ and adding Python's include directory to the path.
(srcdir,) = sysconfig.get_config_vars('srcdir')
# Figure out the location of the source code for extension modules
moddir = os.path.join(os.getcwd(), srcdir, 'Modules')
moddir = os.path.normpath(moddir)
srcdir, tail = os.path.split(moddir)
srcdir = os.path.normpath(srcdir)
moddir = os.path.normpath(moddir)
# Fix up the paths for scripts, too
self.distribution.scripts = [os.path.join(srcdir, filename)
for filename in self.distribution.scripts]
for ext in self.extensions[:]:
ext.sources = [ os.path.join(moddir, filename)
for filename in ext.sources ]
ext.include_dirs.append( '.' ) # to get config.h
ext.include_dirs.append( os.path.join(srcdir, './Include') )
# If a module has already been built statically,
# don't build it here
if ext.name in sys.builtin_module_names:
self.extensions.remove(ext)
# Parse Modules/Setup to figure out which modules are turned
# on in the file.
input = text_file.TextFile('Modules/Setup', join_lines=1)
remove_modules = []
while 1:
line = input.readline()
if not line: break
line = line.split()
remove_modules.append( line[0] )
input.close()
for ext in self.extensions[:]:
if ext.name in remove_modules:
self.extensions.remove(ext)
# When you run "make CC=altcc" or something similar, you really want
# those environment variables passed into the setup.py phase. Here's
# a small set of useful ones.
compiler = os.environ.get('CC')
linker_so = os.environ.get('LDSHARED')
args = {}
# unfortunately, distutils doesn't let us provide separate C and C++
# compilers
if compiler is not None:
args['compiler_so'] = compiler
if linker_so is not None:
args['linker_so'] = linker_so + ' -shared'
self.compiler.set_executables(**args)
build_ext.build_extensions(self)
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsError), why:
self.announce('WARNING: building of extension "%s" failed: %s' %
(ext.name, sys.exc_info()[1]))
def get_platform (self):
# Get value of sys.platform
platform = sys.platform
if platform[:6] =='cygwin':
platform = 'cygwin'
elif platform[:4] =='beos':
platform = 'beos'
return platform
def detect_modules(self):
try:
belibs = os.environ['BELIBRARIES'].split(';')
except KeyError:
belibs = ['/boot/beos/system/lib']
belibs.append('/boot/home/config/lib')
self.compiler.library_dirs.append('/boot/home/config/lib')
try:
beincl = os.environ['BEINCLUDES'].split(';')
except KeyError:
beincl = []
beincl.append('/boot/home/config/include')
self.compiler.include_dirs.append('/boot/home/config/include')
# lib_dirs and inc_dirs are used to search for files;
# if a file is found in one of those directories, it can
# be assumed that no additional -I,-L directives are needed.
lib_dirs = belibs
inc_dirs = beincl
exts = []
platform = self.get_platform()
# Check for MacOS X, which doesn't need libm.a at all
math_libs = ['m']
if platform in ['Darwin1.2', 'beos']:
math_libs = []
# XXX Omitted modules: gl, pure, dl, SGI-specific modules
#
# The following modules are all pretty straightforward, and compile
# on pretty much any POSIXish platform.
#
# Some modules that are normally always on:
exts.append( Extension('_weakref', ['_weakref.c']) )
exts.append( Extension('_symtable', ['symtablemodule.c']) )
# array objects
exts.append( Extension('array', ['arraymodule.c']) )
# complex math library functions
exts.append( Extension('cmath', ['cmathmodule.c'],
libraries=math_libs) )
# math library functions, e.g. sin()
exts.append( Extension('math', ['mathmodule.c'],
libraries=math_libs) )
# fast string operations implemented in C
exts.append( Extension('strop', ['stropmodule.c']) )
# time operations and variables
exts.append( Extension('time', ['timemodule.c'],
libraries=math_libs) )
# operator.add() and similar goodies
exts.append( Extension('operator', ['operator.c']) )
# access to the built-in codecs and codec registry
exts.append( Extension('_codecs', ['_codecsmodule.c']) )
# Python C API test module
exts.append( Extension('_testcapi', ['_testcapimodule.c']) )
# static Unicode character database
exts.append( Extension('unicodedata', ['unicodedata.c']) )
# access to ISO C locale support
exts.append( Extension('_locale', ['_localemodule.c']) )
# Modules with some UNIX dependencies -- on by default:
# (If you have a really backward UNIX, select and socket may not be
# supported...)
# fcntl(2) and ioctl(2)
exts.append( Extension('fcntl', ['fcntlmodule.c']) )
# pwd(3)
exts.append( Extension('pwd', ['pwdmodule.c']) )
|
kristinn/casanova
|
casanova/http.py
|
Python
|
mit
| 956
| 0.004184
|
import re
import BaseHTTPServer
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
"""
Handle incoming GET requests.
We will only handle one request and an authorization_code GET parameter
must be present. We don't really care about the path being requested.
"""
try:
auth_code = re.findall("authorization_code=([\w\d]+)", self.path)[0]
except IndexError:
self.send_response(500)
return
self.send_response(200)
self.send_header("Content-Type", "text/plain")
self.end_headers()
|
self.wfile.write(auth_code)
class HTTPServer(object):
def __init__(self, host, port):
self.addr = (host, port)
def serve(self):
"""
Listen for the Bitcasa auth callback.
"""
httpd = BaseHTTPServer.HTTPServer(self.addr, RequestHandler)
|
httpd.handle_request()
|
sunyihuan326/DeltaLab
|
daily_learning/serving_learning/save_model.py
|
Python
|
mit
| 4,825
| 0.001658
|
# coding:utf-8
'''
created on 2018/8/22
@author:sunyihuan
'''
from __future__ import print_function
import os
import sys
# This is a placeholder for a Google-internal import.
import tensorflow as tf
import mnist_input_data
tf.app.flags.DEFINE_integer('training_iteration', 1000,
'number of training iterations.')
tf.app.flags.DEFINE_integer('model_version', 1, 'version number of the model.')
tf.app.flags.DEFINE_string('work_dir', '/tmp', 'Working directory.')
FLAGS = tf.app.flags.FLAGS
def main(_):
if len(sys.argv
|
) < 2 or sys.argv[-1].startswith('-'):
print('Usage: mni
|
st_saved_model.py [--training_iteration=x] '
'[--model_version=y] export_dir')
sys.exit(-1)
if FLAGS.training_iteration <= 0:
print('Please specify a positive value for training iteration.')
sys.exit(-1)
if FLAGS.model_version <= 0:
print('Please specify a positive value for version number.')
sys.exit(-1)
# Train model
print('Training model...')
mnist = mnist_input_data.read_data_sets(FLAGS.work_dir, one_hot=True)
sess = tf.InteractiveSession()
serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
feature_configs = {'x': tf.FixedLenFeature(shape=[784], dtype=tf.float32), }
tf_example = tf.parse_example(serialized_tf_example, feature_configs)
x = tf.identity(tf_example['x'], name='x') # use tf.identity() to assign name
y_ = tf.placeholder('float', shape=[None, 10])
w = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
sess.run(tf.global_variables_initializer())
y = tf.nn.softmax(tf.matmul(x, w) + b, name='y')
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
values, indices = tf.nn.top_k(y, 10)
table = tf.contrib.lookup.index_to_string_table_from_tensor(
tf.constant([str(i) for i in range(10)]))
prediction_classes = table.lookup(tf.to_int64(indices))
for _ in range(FLAGS.training_iteration):
batch = mnist.train.next_batch(50)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
print('training accuracy %g' % sess.run(
accuracy, feed_dict={
x: mnist.test.images,
y_: mnist.test.labels
}))
print('Done training!')
# Export model
# WARNING(break-tutorial-inline-code): The following code snippet is
# in-lined in tutorials, please update tutorial documents accordingly
# whenever code changes.
export_path_base = sys.argv[-1]
export_path = os.path.join(
tf.compat.as_bytes(export_path_base),
tf.compat.as_bytes(str(FLAGS.model_version)))
print('Exporting trained model to', export_path)
builder = tf.saved_model.builder.SavedModelBuilder(export_path)
# Build the signature_def_map.
classification_inputs = tf.saved_model.utils.build_tensor_info(
serialized_tf_example)
classification_outputs_classes = tf.saved_model.utils.build_tensor_info(
prediction_classes)
classification_outputs_scores = tf.saved_model.utils.build_tensor_info(values)
classification_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs={
tf.saved_model.signature_constants.CLASSIFY_INPUTS:
classification_inputs
},
outputs={
tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES:
classification_outputs_classes,
tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES:
classification_outputs_scores
},
method_name=tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME))
tensor_info_x = tf.saved_model.utils.build_tensor_info(x)
tensor_info_y = tf.saved_model.utils.build_tensor_info(y)
prediction_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs={'images': tensor_info_x},
outputs={'scores': tensor_info_y},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
'predict_images':
prediction_signature,
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
classification_signature,
},
main_op=tf.tables_initializer(),
strip_default_attrs=True)
builder.save()
print('Done exporting!')
if __name__ == '__main__':
tf.app.run()
|
codeAshu/nn_ner
|
nn/interfaces.py
|
Python
|
mit
| 939
| 0.01065
|
# -*- coding: utf8 -*-
"""
@author: Matthias Feys (matthiasfeys@gmail.com)
@date: %(date)
"""
import theano
class Layer(object):
def __init__(self,name, params=None):
self.name=name
self.input = input
self.params = []
if params!=None:
self.setParams(params=params.__dict__.get(name))
else:
self.initParams()
def __getstate__(self):
params = {}
|
for param in self.params:
params[param.name] = param.get_value()
return params
def setParams(self,params):
for pname,param in params.__dict__.iteritems():
self.__dict__[pname[:-(len(self.name)+1)]] = theano.shared(param, name=pname[:-(len(self.name)+1)]+'_'+self.name, borrow=True)
def initParams():
raise NotImplementedError
|
class Network():
def __getstate__(self):
return dict([(layer.name,layer) for layer in self.layers])
|
scopatz/regolith
|
tests/test_validators.py
|
Python
|
cc0-1.0
| 876
| 0
|
from
|
collections.abc import Sequence
import pytest
from regolith.schemas import SCHEMAS, validate, EXEMPLARS
from pprint import pprint
@pytest.mark.parametrize("key", SCHEMAS.keys())
def test_validation(key):
if isinstance(EXEMPLARS[key], Sequence):
for e in EXEMPLARS[key]:
validate(key, e, SCHEMAS
|
)
else:
validate(key, EXEMPLARS[key], SCHEMAS)
@pytest.mark.parametrize("key", SCHEMAS.keys())
def test_exemplars(key):
if isinstance(EXEMPLARS[key], Sequence):
for e in EXEMPLARS[key]:
v = validate(key, e, SCHEMAS)
assert v[0]
else:
v = validate(key, EXEMPLARS[key], SCHEMAS)
if not v[0]:
for vv, reason in v[1].items():
print(vv, reason)
print(type(EXEMPLARS[key][vv]))
pprint(EXEMPLARS[key][vv])
assert v[0]
|
grlee77/nipype
|
nipype/interfaces/elastix/tests/test_auto_Registration.py
|
Python
|
bsd-3-clause
| 1,483
| 0.024275
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.elastix.registration import Registration
def test_Registration_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
fixed_image=dict(argstr='-f %s',
mandatory=True,
|
),
fixed_mask=dict(argstr='-fMask %s',
),
ignore_exception
|
=dict(nohash=True,
usedefault=True,
),
initial_transform=dict(argstr='-t0 %s',
),
moving_image=dict(argstr='-m %s',
mandatory=True,
),
moving_mask=dict(argstr='-mMask %s',
),
num_threads=dict(argstr='-threads %01d',
),
output_path=dict(argstr='-out %s',
mandatory=True,
usedefault=True,
),
parameters=dict(argstr='-p %s...',
mandatory=True,
),
terminal_output=dict(nohash=True,
),
)
inputs = Registration.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Registration_outputs():
output_map = dict(transform=dict(),
warped_file=dict(),
warped_files=dict(),
warped_files_flags=dict(),
)
outputs = Registration.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
danielfm/pydozeoff
|
ez_setup.py
|
Python
|
bsd-3-clause
| 10,286
| 0.004667
|
#!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c11"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c10-py2.3.egg': 'ce1e2ab5d3a0256456d9fc13800a7090',
'setuptools-0.6c10-py2.4.egg': '57d6d9d6e9b80772c59a53a8433a5dd4',
'setuptools-0.6c10-py2.5.egg': 'de46ac8b1c97c895572e5e8596aeb8c7',
'setuptools-0.6c10-py2.6.egg': '58ea40aef06da02ce641495523a0b7f5',
'setuptools-0.6c11-py2.3.egg': '2baeac6e13d414a9d28e7ba5b5a596de',
'setuptools-0.6c11-py2.4.egg': 'bd639f9b0eac4c42497034dec2ec0c2b',
'setuptools-0.6c11-py2.5.egg': '64c94f3bf7a72a13ec83e0b24f2749b2',
'setuptools-0.6c11-py2.6.egg': 'bfa92100bd772d5a213eedd356d64086',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try
|
:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to displa
|
y
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
|
unitpoint/oxygine-objectscript
|
tools/src2/xml_processor.py
|
Python
|
mit
| 9,313
| 0.01267
|
from xml.dom import minidom
import os
import shutil
import process_atlas
import process_font
import process_starling_atlas
import oxygine_helper
class XmlWalker:
def __init__(self, src, folder, scale_factor, node, meta_node, scale_quality):
self.base = folder
self.path = folder
self.scale_factor = scale_factor
self.root = node
self.last = None
self.root_meta = meta_node
self.last_meta = None
self.src = src
self.scale_quality = scale_quality
self.checkSetAttributes()
def getType(self):
return self.root
def getPath(self, attr):
path = self.root.getAttribute(attr)
if path.startswith("./") or path.startswith(".\\"):
path = path[2:len(path)]
return self.path + path
def setSrcFullPath(self, path):
return self.src + path
def checkSetAttributes(self):
self._checkSetAttributes(self.root)
def _checkSetAttributes(self, node):
path = node.getAttribute("path")
if path:
if 0:
path = ""
if path.startswith("./") or path.startswith(".\\"):
path = self.base + path[2:len(path)]
self.path = path + "/"
scale_factor = node.getAttribute("scale_factor")
if scale_factor:
self.scale_factor = float(scale_factor)
scale_quality = node.getAttribute("scale_quality")
if scale_quality:
self.scale_quality = float(scale_quality)
def next(self):
while True:
if not self.last:
if len(self.root.childNodes) == 0:
return None
self.last = self.root.childNodes[0]
else:
self.last = self.last.nextSibling
if not self.last:
return None
if self.last.nodeType == self.last.TEXT_NODE:
continue
if self.last.nodeType == self.last.COMMENT_NODE:
continue
meta = self.root_meta.ownerDocument.createElement(self.last.nodeName)
self.root_meta.appendChild(meta)
self.last_meta = meta
if self.last.nodeName == "set":
self._checkSetAttributes(self.last)
continue
break
return XmlWalker(self.src, self.path, self.scale_factor, self.last, self.last_meta, self.scale_quality)
class XmlProcessor:
def __init__(self, args):
self.src_data = args.src_data + "/"
self.dest_data = args.dest_data + "/"
self.compression = args.compression.lower()
#self.etc1tool = args.android_sdk + "\\tools\\etc1tool.exe "
#if self.compression == "etc1":
# if not os.path.exists(self.etc1tool):
# raise Exception("can't find etc1tool. please pass correct path to android_sdk")
self.path_xml = args.xml
self.xml_name = os.path.split(self.path_xml)[1]
self.atlas_group_id = 0
self.args = args
self.verbosity = args.verbosity
self.warnings = 0
self.errors = 0
#self.scale_factor = 1.0
#self.scale_quality = 1.0
self.scale = args.scale
self.debug = args.debug
self.processors = {}
#self.path_current = ""
self._meta_doc = None
#self._meta_element = None
|
self.helper = oxygine_helper.helper(os.path.split(__file__)[0] + "/../../")
self.register_processor(process_font.bmfc_font_Processor())
self.register_processor(process_font.font_Processor())
self.r
|
egister_processor(process_atlas.atlas_Processor())
self.register_processor(process_starling_atlas.starling_atlas_Processor())
self._current_processor = None
def register_processor(self, processor):
self.processors[processor.node_id] = processor
def get_apply_scale(self, applyScaleFactor, walker):
"""
returns scale should be applied to image
"""
v = self.scale * walker.scale_quality
if applyScaleFactor:
v *= walker.scale_factor
return v
"""
def add_meta(self, node_id = ""):
if not node_id:
node_id = self._current_processor.node_id
meta = self._meta_doc.createElement(node_id)
self._meta_element.appendChild(meta)
return meta
def get_meta_doc(self):
return self._meta_doc
"""
"""
def _process_set(self, el):
path = el.getAttribute("path")
if path:
if path.startswith(".\\") or path.startswith("./"):
path = self.path_current + path
path = os.path.normpath(path) + "/"
self.path_current = path
scale_factor = el.getAttribute("scale_factor")
if scale_factor:
self.scale_factor = float(scale_factor)
scale_quality = el.getAttribute("scale_quality")
if scale_quality:
self.scale_quality = float(scale_quality)
self.add_meta("set");
"""
def _open_xml(self, path):
with open(path, "r") as file:
font_doc = minidom.parse(file)
return font_doc.documentElement
def _get_src_path(self, local_path):
return self.src_data + local_path
def _get_dest_path(self, local_path):
return self.dest_data + local_path
def _get_meta_xml_path(self, local_path):
return self._get_dest_path(self.xml_name) + ".ox" + "/" + local_path
"""
def get_current_src_path(self, local = ""):
return self._get_src_path(self.path_current + local)
"""
def get_inner_dest(self, inner_local_path = ""):
return self._get_meta_xml_path(self._current_processor.node_id + "/" + inner_local_path)
def log(self, st):
print st
def warning(self, st):
if self.args.warnings:
print "warning: " + st
def error(self, st):
print "error: " + st
def process(self):
#print self.path_data
#print self.path_xml
#print self.path_atlasses
try:
nm = self._get_src_path(self.path_xml)
file = open(nm, "r")
except IOError:
print "can't open file: " + nm
return
doc = minidom.parse(file)
del file
self._meta_doc = minidom.Document()
meta_element = self._meta_doc.createElement("resources")
self._meta_doc.appendChild(meta_element)
totalAtlasses = 0
folder = self._get_meta_xml_path("")
shutil.rmtree(folder, True)
try:
os.makedirs(folder)
except OSError:
pass
xml_folder = os.path.split(self.path_xml)[0] + "/"
walker = XmlWalker(self.src_data, xml_folder, 1.0, doc.documentElement, meta_element, 1.0)
while True:
next = walker.next();
if not next:
break
name = next.root.nodeName
if name in self.processors:
proc = self.processors[name]
self._current_processor = proc
try:
if proc.create_folder:
os.makedirs(self.get_inner_dest(""))
except OSError:
pass
proc.process(self, next)
"""
for el in doc.documentElement.childNodes:
name = el.nodeName
if name in self.processors:
proc = self.proc
|
coringao/capitan
|
dependencies/alfont/freetype/src/autohint/mather.py
|
Python
|
gpl-3.0
| 1,699
| 0.021189
|
#!/usr/bin/env python
#
#
# autohint math table builder
#
# Copyright 1996-2000 by
# David Turner, Robert Wilhelm, and Werner Lemberg.
#
# This file is part of the FreeType project, and may only be used, modified,
# and distr
|
ibuted under the terms of the FreeType project license,
# LICENSE.TXT. By continuing to use, modify, or distribute this file you
# indicate that you have read the license and understand and accept it
# fully.
import math
ag_pi = 256
def print_arctan( atan_bits ):
atan_base = 1 << atan_bits
print " static AH_Angle ag_
|
arctan[1L << AG_ATAN_BITS] ="
print " {"
count = 0
line = " "
for n in range( atan_base ):
comma = ","
if ( n == atan_base - 1 ):
comma = ""
angle = math.atan( n * 1.0 / atan_base ) / math.pi * ag_pi
line = line + " " + repr( int( angle + 0.5 ) ) + comma
count = count + 1;
if ( count == 8 ):
count = 0
print line
line = " "
if ( count > 0 ):
print line
print " };"
# This routine is not used currently.
#
def print_sines():
print " static FT_Fixed ah_sines[AG_HALF_PI + 1] ="
print " {"
count = 0
line = " "
for n in range( ag_pi / 2 ):
sinus = math.sin( n * math.pi / ag_pi )
line = line + " " + repr( int( 65536.0 * sinus ) ) + ","
count = count + 1
if ( count == 8 ):
count = 0
print line
line = " "
if ( count > 0 ):
print line
print " 65536"
print " };"
print_arctan( 8 )
print
# END
|
myles/pyconfluence
|
src/pyconfluence/confluence.py
|
Python
|
bsd-3-clause
| 2,731
| 0.034053
|
import os
import logging
import ConfigParser
from optparse import OptionParser
from pyconfluence import __version__, Confluence, ConfluenceException
USAGE = """%prog [options]
get_server_info:\t Retrieve some basic information about the server connected to.
get_spaces:\t\t All Space Summaries that the current user can see.
get_space:\t\t Returns a signle space."""
SPACE_NOT_FOUND_OR_AUTH = "You are not allowed to view %(key)s space, or it doesn't exist."
def main():
parser = OptionParser(usage=USAGE, version=__version__)
parser.add_option('-c', '--confluence', action='store', dest='confluence_url',
help="the confluence url.")
parser.add_option('-u', '--username', action='store', dest='username',
help="the username to use for authentication.")
parser.add_option('-p', '--password', action='store', dest='password',
help="the password to use for authentication.")
parser.add_option('-v', '--verbosity', action='store', dest='verbosity',
default='1', type='choice', choices=['0', '1', '2'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output')
parser.set_defaults(input='-')
options, args = parser.parse_args()
level = {'0': logging.WARN, '1': log
|
ging.INFO, '2': logging.DEBUG}[options.verbosity]
logging.basicConfig(level=level, format="%(name)s: %(levelname)s: %(message)s")
log = logging.getLogger('pyconfluence')
if not args:
log.warn("You didn't tell me anything.") # FIXME
return
config = ConfigParser.ConfigPars
|
er()
config.read(os.path.expanduser("~/.pyconfluence.ini"))
confluence_url = options.confluence_url
username = options.username
password = options.password
if config.get('pyconfluence', 'url'):
confluence_url = config.get('pyconfluence', 'url')
if config.get('pyconfluence', 'username'):
username = config.get('pyconfluence', 'username')
if config.get('pyconfluence', 'password'):
password = config.get('pyconfluence', 'password')
try:
c = Confluence(confluence_url, username, password)
except ConfluenceException, err:
log.warn("Their was an error connecting to the server %(url)s using the username %(username)s.\n%(err)s" % {'url': confluence_url, 'username': username, 'err': err})
if args[0] == 'get_server_info':
print c.get_server_info()
if args[0] == 'get_spaces':
for space in c.get_spaces():
print "%(key)s: %(name)s" % {
'key': space.key,
'name': space.name,
'url': space.url
}
if args[0] == 'get_space':
try:
space_key = args[1]
except IndexError:
space_key = None
if not space_key:
space_key = raw_input("> ")
try:
print c.get_space(space_key)
except ConfluenceException:
log.warn(SPACE_NOT_FOUND_OR_AUTH % {'key': space_key})
return c.logout()
|
karstenw/nodebox-pyobjc
|
setup_large.py
|
Python
|
mit
| 4,542
| 0.007266
|
"""
Script for building NodeBox
Usage:
python setup.py py2app
"""
from distutils.core import setup
from setuptools.extension import Extension
import py2app
import nodebox
NAME = 'NodeBox extended'
VERSION = nodebox.__version__
AUTHOR = "Frederik De Bleser",
AUTHOR_EMAIL = "frederik@pandora.be",
URL = "http://nodebox.net/",
CLASSIFIERS = (
"Development Status :: 5 - Production/Stable",
"Environment :: MacOS X :: Cocoa",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS :: MacOS X",
"Programming Language :: Python",
"Topic :: Artistic Software",
"Topic :: Multimedia :: Graphics",
"Topic :: Multimedia :: Graphics :: Editors :: Vector-Based",
"Topic :: Multimedia :: Video",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: User Interfaces",
"Topic :: Text Editors :: Integrated Development Environments (IDE)",
)
DESCRIPTION = (u"Simple application for creating 2-dimensional graphics "
u"and animation using Python code")
LONG_DESCRIPTION = u"""NodeBox is a Mac OS X application that allows you to create visual output
with programming code. The application targets an audience of designers, with an easy set of state
commands that is both intuitive and creative. It is essentially a learning environment and an automation tool.
The current version features:
* State-based graphics context
* Extensive reference documentation and tutorials
* PDF export fo
|
r graphics
* QuickTime export for animations
* Manipulate every num
|
eric variable in a script by command-dragging it, even during animation
* Creating simple user interfaces using text fields, sliders, and buttons
* Stop a running script by typing command-period
* Universal Binary
* Integrated bezier mathematics and boolean operations
* Command-line interface
* Zooming
"""
creator = 'NdBx'
bundleID = "net.nodebox.NodeBox"
setup(
name = NAME,
version = VERSION,
description = DESCRIPTION,
long_description = LONG_DESCRIPTION,
author = AUTHOR,
author_email = AUTHOR_EMAIL,
url = URL,
classifiers = CLASSIFIERS,
app=[{
'script': "macboot.py",
"plist": {
"NSPrincipalClass": 'NSApplication',
"CFBundleIdentifier": bundleID,
"CFBundleName": NAME,
"CFBundleSignature": creator,
"CFBundleShortVersionString": VERSION,
"CFBundleGetInfoString": DESCRIPTION,
"NSHumanReadableCopyright": "Copyright (c) 2015 Frederik De Bleser",
'CFBundleDocumentTypes': [
{
'CFBundleTypeExtensions': [ 'py', 'bot' ],
'CFBundleTypeIconFile': 'NodeBoxFile.icns',
'CFBundleTypeName': "Python File",
'CFBundleTypeOSTypes': [ '????', '****', 'utxt'],
'CFBundleTypeRole': 'Editor',
'NSDocumentClass': u'NodeBoxDocument',
}
]
}
}],
data_files=[
"Resources/English.lproj/AskString.xib",
"Resources/English.lproj/Credits.rtf",
"Resources/English.lproj/ExportImageAccessory.xib",
"Resources/English.lproj/ExportMovieAccessory.xib",
"Resources/English.lproj/MainMenu.xib",
"Resources/English.lproj/NodeBoxDocument.xib",
"Resources/English.lproj/NodeBoxPreferences.xib",
"Resources/English.lproj/ProgressBarSheet.xib",
"Resources/NodeBox.icns",
"Resources/NodeBoxFile.icns",
"Resources/zoombig.png",
"Resources/zoomsmall.png"
],
ext_modules=[
Extension('bwdithering', ['libs/bwdithering/bwdithering.c']),
Extension('fractal', ['libs/fractal/fractal.c']),
Extension('cGeo', ['libs/cGeo/cGeo.c']),
Extension('cPathmatics', ['libs/pathmatics/pathmatics.c']),
Extension('cPolymagic', ['libs/polymagic/gpc.c', 'libs/polymagic/polymagic.m'],
extra_link_args=['-framework', 'AppKit', '-framework', 'Foundation'])
],
options={
"py2app": {
"iconfile": "Resources/NodeBoxExtended.icns",
"packages": [ "numpy", "scipy", "matplotlib",
"mpl_toolkits", "sklearn", "sympy", "pandas",
"cv2", "dlib", "skimage"],
"excludes": ["TkInter",],
}
} )
|
kubernetes-client/python
|
kubernetes/client/models/v1beta1_limited_priority_level_configuration.py
|
Python
|
apache-2.0
| 6,329
| 0
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1beta1LimitedPriorityLevelConfiguration(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'assured_concurrency_shares': 'int',
'limit_response': 'V1beta1LimitResponse'
}
attribute_map = {
'assured_concurrency_shares': 'assuredConcurrencyShares',
'limit_response': 'limitResponse'
}
def __init__(self, assured_concurrency_shares=None, limit_response=None, local_vars_configuration=None): # noqa: E501
"""V1beta1LimitedPriorityLevelConfiguration - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._assured_concurrency_shares = None
self._limit_response = None
self.discriminator = None
if assured_concurrency_shares is not None:
self.assured_concurrency_shares = assured_concurrency_shares
if limit_response is not None:
self.limit_response = limit_response
@property
def assured_concurrency_shares(self):
"""Gets the assured_concurrency_shares of this V1beta1LimitedPriorityLevelConfiguration. # noqa: E501
`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level: ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) bigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30. # noqa: E501
:return: The assured_concurrency_shares of this V1beta1LimitedPriorityLevelConfiguration. # noqa: E501
:rtype: int
"""
return self._assured_concurrency_shares
@assured_concurrency_shares.setter
def assured_concurrency_shares(self, assured_concurrency_shares):
"""Sets the assured_concurrency_shares of this V1beta1LimitedPriorityLevelConfiguration.
`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level: ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) bigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30. # noqa: E501
:pa
|
ram assured_concurrency_shares: The assured_concurrency_shares of this V1beta1LimitedPriorityLevelConfiguration. # noqa: E501
:type: int
"""
self._assured_concurrency_shares = assured_concurrency_shares
@property
def limit_response(self):
"""Gets the limit_response of this V1beta1LimitedPriorityLevelConfiguration. # noqa: E501
:return: The limit_response of this V1beta1LimitedPriorityLevelConfiguration. # noqa: E501
|
:rtype: V1beta1LimitResponse
"""
return self._limit_response
@limit_response.setter
def limit_response(self, limit_response):
"""Sets the limit_response of this V1beta1LimitedPriorityLevelConfiguration.
:param limit_response: The limit_response of this V1beta1LimitedPriorityLevelConfiguration. # noqa: E501
:type: V1beta1LimitResponse
"""
self._limit_response = limit_response
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1LimitedPriorityLevelConfiguration):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1LimitedPriorityLevelConfiguration):
return True
return self.to_dict() != other.to_dict()
|
pybquillast/xkAddonIDE
|
toRecycle/baseUI.py
|
Python
|
gpl-3.0
| 32,423
| 0.018444
|
'''
Created on 5/03/2014
@author: Alex Montes Barrios
'''
import sys
import os
import Tkinter as tk
import tkMessageBox
import tkFileDialog
import tkSimpleDialog
import ttk
import tkFont
import keyword
import pickle
import re
NORM_PROMPT = '>>> '
CELL_PROMPT = '... '
class PythonEditor(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
self.customFont = tkFont.Font(family = 'Consolas', size = 18)
self.prompt =''
self.cellInput = ''
self.textw = tk.Text(self, font = self.customFont, tabs=('1.5c'))
textw = self.textw
textw.pack(side = tk.LEFT, fill = tk.BOTH, expand = 1)
textw.see('end')
textw.event_add('<<CursorlineOff>>','<Up>','<Down>','<Next>','<Prior>','<Button-1>')
textw.event_add('<<CursorlineOn>>','<KeyRelease-Up>','<KeyRelease-Down>','<KeyRelease-Next>','<KeyRelease-Prior>','<ButtonRelease-1>')
textw.tag_configure('pythonKwd', foreground = 'blue')
textw.tag_configure('pythonString', foreground = 'lime green')
textw.tag_configure('pythonComment', foreground = 'grey')
textw.tag_configure('cursorLine', background = 'alice blue')
self.dispPrompt()
textw.bind('<Key>', self.keyHandler)
textw.bind('<Control-C>', self.selCopy)
textw.bind('<Control-V>', self.selPaste)
textw.bind('<Control-X>', self.selCut)
textw.bind('<Control-A>',self.selAll)
textw.bind('<<CursorlineOff>>', self.onUpPress)
textw.bind('<<CursorlineOn>>', self.onUpRelease)
scrollbar = tk.Scrollbar(self)
scrollbar.pack(side = tk.RIGHT, fill = tk.Y)
scrollbar.config(command=textw.yview)
textw.config(yscrollcommand=scrollbar.set)
def onUpPress(self, event = None):
textw = self.textw
textw.tag_remove('cursorLine', '1.0', 'end')
def onUpRelease(self, event = None):
textw = self.textw
if textw.tag_ranges('sel'): return
textw.tag_add('cursorLine', 'insert linestart', 'insert lineend + 1 chars')
def getSelRange(self):
textw = self.textw
try:
return textw.tag_ranges('sel')
except tk.TclError:
return None
def formatContent(self,index1 = '1.0', index2 = 'end'):
textw = self.textw
content = textw.get(index1, index2)
toColor = []
toColor.append(('pythonKwd',r'\b(' + '|'.join(keyword.kwlist) + r')\b'))
toColor.append(('pythonString', r'([\'\"]{3}|[\'\"]).*?\1'))
toColor.append(('pythonComment', '#.*'))
baseIndex = textw.index(index1)
for tagToColor, rePattern in toColor:
if tagToColor != 'pythonString':
reg = re.compile(rePattern)
else:
reg = re.compile(rePattern, re.DOTALL)
pos = 0
while 1:
match = reg.search(content, pos)
if not match: break
tagIni = baseIndex + ' + %d chars'%match.start(0)
tagFin = baseIndex + ' + %d chars'%match.end(0)
textw.tag_add(tagToColor, tagIni, tagFin)
pos = match.end(0)
def getContent(self):
textw = self.textw
return textw.get('1.0','end')
def setContent(self,text):
self.textw.delete('1.0','end')
self.textw.insert('1.0',text)
self.formatContent()
def selDel(self, event = None):
textw = self.textw
selRange = self.getSelRange()
if selRange: textw.delete(*selRange)
def selPaste(self, event = None):
textw = self.textw
try:
text = textw.selection_get(selection = 'CLIPBOARD')
textw.insert('insert', text)
except tk.TclError:
pass
def selCopy(self, event = None):
textw = self.textw
selRange = self.getSelRange()
if selRange:
text = textw.get(*selRange)
textw.clipboard_clear()
textw.clipboard_append(text)
return selRange
def selCut(self, event = None):
textw = self.textw
selRange = self.selCopy()
if selRange: textw.delete(*selRange)
def selAll(self, event = None):
textw = self.textw
textw.tag_add('sel', '1.0', 'end')
def setCustomFont(self, tFamily = "Consolas", tSize = 18):
self.customFont.configure(family = tFamily, size = tSize)
def dispPrompt(self):
self.textw.insert('insert', self.prompt)
self.textw.insert('insert', self.cellInput)
def isIndent
|
ModeOn(self):
return len(self.cellInput) > 0
def setNextIndentation(self,expr):
if len(expr):
nTabs = len(expr) - len(expr.lstrip('\t'))
if expr[-1] == ':': nTabs += 1
self.cellInput = nTabs * '\t'
else:
self.cellInput = ''
|
def keyHandler(self,event):
textw = event.widget
if event.keysym == 'Return':
strInst = textw.get('insert linestart', 'insert lineend')
self.setNextIndentation(strInst)
textw.insert('insert', '\n')
self.dispPrompt()
return "break"
if event.keysym in 'abcdefghijklmnopqrstuvwxyz':
textw.insert('insert',event.keysym)
word = textw.get('insert -1 chars wordstart', 'insert -1 chars wordend')
textw.tag_remove('pythonKwd', 'insert -1 chars wordstart', 'insert -1 chars wordend')
if keyword.iskeyword(word):
textw.tag_add('pythonKwd', 'insert -1 chars wordstart', 'insert -1 chars wordend')
return "break"
class PythonFrontEnd(tk.Frame):
def __init__(self, master, theGlobals=None):
tk.Frame.__init__(self, master)
frontEndComm = dict(cls = self._clear, changeFont=self.setCustomFont)
if theGlobals == None: theGlobals = {}
theGlobals.update(frontEndComm)
self._globals = theGlobals
self.outputBuffer = []
self.inCommands = []
self.inCommandsPos = 0
self.prompt = NORM_PROMPT
self.cellInput = ''
self.customFont = tkFont.Font(family = 'Consolas', size = 18)
self.textw = tk.Text(self, font = self.customFont)
text = self.textw
text.pack(side = tk.LEFT, fill = tk.BOTH, expand = 1)
text.bind('<Key>', self.keyHandler)
text.tag_configure('error', foreground = 'red')
text.tag_configure('output', foreground = 'blue')
text.tag_configure('prompt', foreground ='green')
self.dispPrompt()
scrollbar = tk.Scrollbar(self)
scrollbar.pack(side = tk.RIGHT, fill = tk.Y)
scrollbar.config(command=text.yview)
text.config(yscrollcommand=scrollbar.set)
def setCustomFont(self, tFamily = "Consolas", tSize = 18):
self.customFont.configure(family = tFamily, size = tSize)
def dispPrompt(self):
self.textw.insert(tk.END, self.prompt, ('prompt',))
self.textw.insert(tk.END, self.cellInput)
def _clear(self):
self.textw.delete('1.0', 'end')
def clear(self):
self._clear()
self.dispPrompt()
def keyHandler(self,event):
if not (event.keysym in ['Return', 'Up', 'Down', 'Escape', 'Home']):return
textw = event.widget
if event.keysym == 'Return':
textw.mark_set(tk.INSERT, 'end -1 chars')
if event.keysym == 'Home':
strLine = textw.get('insert linestart', 'insert lineend')
homePos = textw.index('insert linestart +%s chars'%len(self.prompt))
if strLine.find(self.prompt) == 0:
homePos = textw.index('insert linestart +%s chars'%len(self.prompt))
textw.mark_set('insert', homePos)
return 'break'
if(textw.index('insert lineend') == textw.index('end -1 chars')):
if event.keysym == 'Return':
strInst = event.widget.g
|
skosukhin/spack
|
var/spack/repos/builtin/packages/fastphase/package.py
|
Python
|
lgpl-2.1
| 1,682
| 0.000595
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for
|
our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation)
|
version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Fastphase(Package):
"""Software for haplotype reconstruction, and estimating missing genotypes
from population data."""
homepage = "http://stephenslab.uchicago.edu/software.html"
url = "http://scheet.org/code/Linuxfp.tar.gz"
version('2016-03-30', 'b48731eed9b8d0a5a321f970c5c20d8c')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('fastPHASE', prefix.bin)
|
duyet-website/api.duyet.net
|
lib/gensim/test/test_word2vec.py
|
Python
|
mit
| 34,696
| 0.003603
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import os
import tempfile
import itertools
import bz2
import sys
import numpy as np
from gensim import utils, matutils
from gensim.utils import check_output
from subprocess import PIPE
from gensim.models import word2vec, keyedvectors
from testfixtures import log_capture
try:
from pyemd import emd
PYEMD_EXT = True
except ImportError:
PYEMD_EXT = False
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
class LeeCorpus(object):
def __iter__(self):
with open(datapath('lee_background.cor')) as f:
for line in f:
yield utils.simple_preprocess(line)
list_corpus = list(LeeCorpus())
sentences = [
['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']
]
new_sentences = [
['computer', 'artificial', 'intelligence'],
['artificial', 'trees'],
['human', 'intelligence'],
['artificial', 'graph'],
['intelligence'],
['artificial', 'intelligence', 'system']
]
def testfile():
# temporary data will be stored to this file
return os.path.join(tempfile.gettempdir(), 'gensim_word2vec.tst')
def _rule(word, count, min_count):
if word == "human":
return utils.RULE_DISCARD # throw out
else:
return utils.RULE_DEFAULT # apply default rule, i.e. min_count
def load_on_instance():
# Save and load a Word2Vec Model on instance for test
model = word2vec.Word2Vec(sentences, min_count=1)
model.save(testfile())
model = word2vec.Word2Vec() # should fail at this point
return model.load(testfile())
class TestWord2VecModel(unittest.TestCase):
def testOnlineLearning(self):
"""Test that the algorithm is able to add new words to the
vocabulary and to a trained model when using a sorted vocabulary"""
model_hs = word2vec.Word2Vec(sentences, size=10, min_count=0, seed=42, hs=1, negative=0)
model_neg = word2vec.Word2Vec(sentences, size=10, min_count=0, seed=42, hs=0, negative=5)
self.assertTrue(len(model_hs.wv.vocab), 12)
self.assertTrue(model_hs.wv.vocab['graph'].count, 3)
model_hs.build_vocab(new_sentences, update=True)
model_neg.build_vocab(new_sentences, update=True)
self.assertTrue(model_hs.wv.vocab['graph'].count, 4)
self.assertTrue(model_hs.wv.vocab['artificial'].count, 4)
self.assertEqual(len(model_hs.wv.vocab), 14)
self.assertEqual(len(model_neg.wv.vocab), 14)
def testOnlineLearningAfterSave(self):
"""Test that the algorithm is able to add new words to the
vocabulary and to a trained model when using a sorted vocabulary"""
model_neg = word2vec.Word2Vec(sentences, size=10, min_count=0, seed=42, hs=0, negative=5)
model_neg.save(testfile())
model_neg = word2vec.Word2Vec.load(testfile())
self.assertTrue(len(model_neg.wv.vocab), 12)
model_neg.build_vocab(new_sentences, update=True)
model_neg.train(new_sentences)
self.assertEqual(len(model_neg.wv.vocab), 14)
def onlineSanity(self, model):
terro, others = [], []
for l in list_corpus:
if 'terrorism' in l:
terro.append(l)
else:
others.append(l)
self.assertTrue(all(['terrorism' not in l for l in others]))
model.build_vocab(others)
model.train(others)
self.assertFalse('terrorism' in model.wv.vocab)
model.build_vocab(terro, update=True)
self.assertTrue('terrorism' in model.wv.vocab)
orig0 = np.copy(model.wv.syn0)
model.train(terro)
self.assertFalse(np.allclose(model.wv.syn0, orig0))
sim = model.n_similarity(['war'], ['terrorism'])
self.assertLess(0., sim)
def test_sg_hs_online(self):
"""Test skipgram w/ hierarchical softmax"""
model = word2vec.Word2Vec(sg=1, window=5, hs=1, negative=0, min_count=3, iter=10, seed=42, workers=2)
self.onlineSanity(model)
def test_sg_neg_online(self):
"""Test skipgram w/ negative sampling"""
model = word2vec.Word2Vec(sg=1, window=4, hs=0, negative=15, min_count=3, iter=10, seed=42, workers=2)
self.onlineSanity(model)
def test_cbow_hs_online(self):
"""Test CBOW w/ hierarchical softmax"""
model = word2vec.Word2Vec(sg=0, cbow_mean=1, alpha=0.05, window=5, hs=1, negative=0,
min_count=3, iter=10, seed=42, workers=2)
self.onlineSanity(model)
def test_cbow_neg_online(self):
"""Test CBOW w/ negative sampling"""
model = word2vec.Word2Vec(sg=0, cbow_mean=1, alpha=0.05, window=5, hs=0, negative=15,
min_count=5, iter=10, seed=42, workers=2, sample=0)
self.onlineSanity(model)
def testPersistence(self):
"""Test storing/loading the entire model."""
model = word2vec.Word2Vec(sentences, min_count=1)
model.save(testfile())
self.models_equal(model, word2vec.Word2Vec.load(testfile()))
# test persistence of the KeyedVectors of a model
wv = model.wv
wv.save(testfile())
loaded_wv = keyedvectors.KeyedVectors.load(testfile())
self.assertTrue(np.allclose(wv.syn0, loaded_wv.syn0))
self.assertEqual(len(wv.vocab), len(loaded
|
_wv.vocab))
def testPersistenceWithConstructorRule(self):
"""Test storing/loading the entire model with a vocab trimming rule passed in the constructor."""
model = word2vec.Word2Vec(sentences, min_count=1, trim_rule=_rule)
model.save(testfile())
self.models_equal(model, word2vec.Word2Vec.load(testfile()))
def testRuleWithMinCount(self):
"""Test that returning RULE_DEFAULT from
|
trim_rule triggers min_count."""
model = word2vec.Word2Vec(sentences + [["occurs_only_once"]], min_count=2, trim_rule=_rule)
self.assertTrue("human" not in model.wv.vocab)
self.assertTrue("occurs_only_once" not in model.wv.vocab)
self.assertTrue("interface" in model.wv.vocab)
def testRule(self):
"""Test applying vocab trim_rule to build_vocab instead of constructor."""
model = word2vec.Word2Vec(min_count=1)
model.build_vocab(sentences, trim_rule=_rule)
self.assertTrue("human" not in model.wv.vocab)
def testLambdaRule(self):
"""Test that lambda trim_rule works."""
rule = lambda word, count, min_count: utils.RULE_DISCARD if word == "human" else utils.RULE_DEFAULT
model = word2vec.Word2Vec(sentences, min_count=1, trim_rule=rule)
self.assertTrue("human" not in model.wv.vocab)
def testSyn0NormNotSaved(self):
"""Test syn0norm isn't saved in model file"""
model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
model.save(testfile())
loaded_model = word2vec.Word2Vec.load(testfile())
self.assertTrue(loaded_model.wv.syn0norm is None)
wv = model.wv
wv.save(testfile())
loaded_kv = keyedvectors.KeyedVectors.load(testfile())
self.assertTrue(loaded_kv.syn0norm is None)
def testLoadPreKeyedVectorModel(self):
"""Test loading pre-KeyedVectors word2vec model"""
if sys.version_info[:2] == (3,4):
model_file_suffix = '_py3_4'
elif sys.version_info < (3,):
model_file_suffix = '_py2'
else:
model_file_suffix = '_py3'
# Model stored in one file
model_file = 'word2vec_pre_kv%s' % mode
|
postatum/nefertari-sqla
|
nefertari_sqla/documents.py
|
Python
|
apache-2.0
| 28,898
| 0.000069
|
import copy
import logging
from datetime import datetime
import six
from sqlalchemy.orm import (
class_mapper, object_session, properties, attributes)
from sqlalchemy.orm.collections import InstrumentedList
from sqlalchemy.exc import InvalidRequestError, IntegrityError
from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
from sqlalchemy.orm.query import Query
from sqlalchemy.orm.properties import RelationshipProperty
from pyramid_sqlalchemy import Session, BaseObject
from nefertari.json_httpexceptions import (
JHTTPBadRequest, JHTTPNotFound, JHTTPConflict)
from nefertari.utils import (
process_fields, process_limit, _split, dictset,
DataProxy)
from .signals import ESMetaclass, on_bulk_delete
from .fields import ListField, DictField, DateTimeField, IntegerField
from . import types
log = logging.getLogger(__name__)
def get_document_cls(name):
try:
return BaseObject._decl_class_registry[name]
except KeyError:
raise ValueError('SQLAlchemy model `{}` does not exist'.format(name))
def
|
get_document_classes():
""" Get all defined not abstract document classes
Class is assumed to be non-abstract if it has `__table__` or
`__tablename__` attributes defined.
"""
document_classes = {}
re
|
gistry = BaseObject._decl_class_registry
for model_name, model_cls in registry.items():
tablename = (getattr(model_cls, '__table__', None) is not None or
getattr(model_cls, '__tablename__', None) is not None)
if tablename:
document_classes[model_name] = model_cls
return document_classes
def process_lists(_dict):
for k in _dict:
new_k, _, _t = k.partition('__')
if _t == 'in' or _t == 'all':
_dict[k] = _dict.aslist(k)
return _dict
def process_bools(_dict):
for k in _dict:
new_k, _, _t = k.partition('__')
if _t == 'bool':
_dict[new_k] = _dict.pop_bool_param(k)
return _dict
TYPES_MAP = {
types.LimitedString: {'type': 'string'},
types.LimitedText: {'type': 'string'},
types.LimitedUnicode: {'type': 'string'},
types.LimitedUnicodeText: {'type': 'string'},
types.Choice: {'type': 'string'},
types.Boolean: {'type': 'boolean'},
types.LargeBinary: {'type': 'object'},
types.Dict: {'type': 'object'},
types.LimitedNumeric: {'type': 'double'},
types.LimitedFloat: {'type': 'double'},
types.LimitedInteger: {'type': 'long'},
types.LimitedBigInteger: {'type': 'long'},
types.LimitedSmallInteger: {'type': 'long'},
types.Interval: {'type': 'long'},
types.DateTime: {'type': 'date', 'format': 'dateOptionalTime'},
types.Date: {'type': 'date', 'format': 'dateOptionalTime'},
types.Time: {'type': 'date', 'format': 'HH:mm:ss'},
}
class BaseMixin(object):
""" Represents mixin class for models.
Attributes:
_auth_fields: String names of fields meant to be displayed to
authenticated users.
_public_fields: String names of fields meant to be displayed to
non-authenticated users.
_nested_relationships: String names of relationship fields
that should be included in JSON data of an object as full
included documents. If relationship field is not
present in this list, this field's value in JSON will be an
object's ID or list of IDs.
"""
_public_fields = None
_auth_fields = None
_nested_relationships = ()
_type = property(lambda self: self.__class__.__name__)
@classmethod
def get_es_mapping(cls):
""" Generate ES mapping from model schema. """
from nefertari.elasticsearch import ES
properties = {}
mapping = {
ES.src2type(cls.__name__): {
'properties': properties
}
}
mapper = class_mapper(cls)
columns = {c.name: c for c in mapper.columns}
relationships = {r.key: r for r in mapper.relationships}
# Replace field 'id' with primary key field
columns['id'] = columns.get(cls.pk_field())
for name, column in columns.items():
column_type = column.type
if isinstance(column_type, types.ChoiceArray):
column_type = column_type.impl.item_type
column_type = type(column_type)
if column_type not in TYPES_MAP:
continue
properties[name] = TYPES_MAP[column_type]
for name, column in relationships.items():
if name in cls._nested_relationships:
column_type = {'type': 'object'}
else:
rel_pk_field = column.mapper.class_.pk_field_type()
column_type = TYPES_MAP[rel_pk_field]
properties[name] = column_type
properties['_type'] = {'type': 'string'}
return mapping
@classmethod
def autogenerate_for(cls, model, set_to):
""" Setup `after_insert` event handler.
Event handler is registered for class :model: and creates a new
instance of :cls: with a field :set_to: set to an instance on
which the event occured.
"""
from sqlalchemy import event
def generate(mapper, connection, target):
cls(**{set_to: target})
event.listen(model, 'after_insert', generate)
@classmethod
def pk_field(cls):
""" Get a primary key field name. """
return class_mapper(cls).primary_key[0].name
@classmethod
def pk_field_type(cls):
return class_mapper(cls).primary_key[0].type.__class__
@classmethod
def check_fields_allowed(cls, fields):
""" Check if `fields` are allowed to be used on this model. """
fields = [f.split('__')[0] for f in fields]
fields_to_query = set(cls.fields_to_query())
if not set(fields).issubset(fields_to_query):
not_allowed = set(fields) - fields_to_query
raise JHTTPBadRequest(
"'%s' object does not have fields: %s" % (
cls.__name__, ', '.join(not_allowed)))
@classmethod
def filter_fields(cls, params):
""" Filter out fields with invalid names. """
fields = cls.fields_to_query()
return dictset({
name: val for name, val in params.items()
if name.split('__')[0] in fields
})
@classmethod
def apply_fields(cls, query_set, _fields):
""" Apply fields' restrictions to `query_set`.
First, fields are split to fields that should only be included and
fields that should be excluded. Then excluded fields are removed
from included fields.
"""
fields_only, fields_exclude = process_fields(_fields)
if not (fields_only or fields_exclude):
return query_set
try:
fields_only = fields_only or cls.native_fields()
fields_exclude = fields_exclude or []
if fields_exclude:
# Remove fields_exclude from fields_only
fields_only = [
f for f in fields_only if f not in fields_exclude]
if fields_only:
fields_only = [
getattr(cls, f) for f in sorted(set(fields_only))]
query_set = query_set.with_entities(*fields_only)
except InvalidRequestError as e:
raise JHTTPBadRequest('Bad _fields param: %s ' % e)
return query_set
@classmethod
def apply_sort(cls, query_set, _sort):
if not _sort:
return query_set
sorting_fields = []
for field in _sort:
if field.startswith('-'):
sorting_fields.append(getattr(cls, field[1:]).desc())
else:
sorting_fields.append(getattr(cls, field))
return query_set.order_by(*sorting_fields)
@classmethod
def count(cls, query_set):
return query_set.count()
@classmethod
def filter_objects(cls, objects, first=False, **params):
""" Perform query with :params: on instances sequence :objects:
Arguments:
:object
|
morpheby/levelup-by
|
common/lib/xmodule/xmodule/tests/__init__.py
|
Python
|
agpl-3.0
| 3,708
| 0.002157
|
"""
unittests for xmodule
Run like this:
rake test_common/lib/xmodule
"""
import json
import os
import unittest
import fs
import fs.osfs
import numpy
from mock import Mock
from path import path
import calc
from xblock.field_data import DictFieldData
from xmodule.x_module import ModuleSystem, XModuleDescriptor, DescriptorSystem
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.mako_module import MakoDescriptorSystem
# Location of common test DATA directory
# '../../../../edx-platform/common/test/data/'
MODULE_DIR = path(__file__).dirname()
DATA_DIR = path.joinpath(*MODULE_DIR.splitall()[:-4]) / 'test/data/'
open_ended_grading_interface = {
'url': 'blah/',
'username': 'incorrect_user',
|
'password': 'incorrect_pass',
'staff_grading' : 'staff_grading',
'peer_grading' : 'peer_grading',
'grading_controller' : 'grading_controller'
}
def get_test_system(course_id=''):
"""
Construct a test ModuleSystem instance.
By default, the render_template()
|
method simply returns the repr of the
context it is passed. You can override this behavior by monkey patching::
system = get_test_system()
system.render_template = my_render_func
where `my_render_func` is a function of the form my_render_func(template, context).
"""
return ModuleSystem(
ajax_url='courses/course_id/modx/a_location',
track_function=Mock(),
get_module=Mock(),
render_template=lambda template, context: repr(context),
replace_urls=lambda html: str(html),
user=Mock(is_staff=False),
filestore=Mock(),
debug=True,
hostname="edx.org",
xqueue={'interface': None, 'callback_url': '/', 'default_queuename': 'testqueue', 'waittime': 10, 'construct_callback' : Mock(side_effect="/")},
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
xmodule_field_data=lambda descriptor: descriptor._field_data,
anonymous_student_id='student',
open_ended_grading_interface=open_ended_grading_interface,
course_id=course_id,
)
def get_test_descriptor_system():
"""
Construct a test DescriptorSystem instance.
"""
return MakoDescriptorSystem(
load_item=Mock(),
resources_fs=Mock(),
error_tracker=Mock(),
render_template=lambda template, context: repr(context),
mixins=(InheritanceMixin,),
)
class ModelsTest(unittest.TestCase):
def setUp(self):
pass
def test_load_class(self):
vc = XModuleDescriptor.load_class('video')
vc_str = "<class 'xmodule.video_module.VideoDescriptor'>"
self.assertEqual(str(vc), vc_str)
class PostData(object):
"""Class which emulate postdata."""
def __init__(self, dict_data):
self.dict_data = dict_data
def getlist(self, key):
"""Get data by key from `self.dict_data`."""
return self.dict_data.get(key)
class LogicTest(unittest.TestCase):
"""Base class for testing xmodule logic."""
descriptor_class = None
raw_field_data = {}
def setUp(self):
class EmptyClass:
"""Empty object."""
url_name = ''
category = 'test'
self.system = get_test_system()
self.descriptor = EmptyClass()
self.xmodule_class = self.descriptor_class.module_class
self.xmodule = self.xmodule_class(
self.descriptor, self.system, DictFieldData(self.raw_field_data), Mock()
)
def ajax_request(self, dispatch, data):
"""Call Xmodule.handle_ajax."""
return json.loads(self.xmodule.handle_ajax(dispatch, data))
|
dmsurti/mayavi
|
integrationtests/mayavi/common.py
|
Python
|
bsd-3-clause
| 25,780
| 0.001241
|
"""MayaVi test related utilities.
"""
# Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in>
# Copyright (c) 2005-2015, Enthought, Inc.
# License: BSD Style.
from __future__ import print_function
# Standard library imports
import gc
import os
import os.path
import sys
import logging
import traceback
from optparse import OptionParser
# Enthought library imports
from traits.etsconfig.api import ETSConfig
from traits.api import Any, Bool, Instance
from pyface.api import GUI
from tvtk.api import tvtk
from tvtk.common import configure_input
from mayavi.plugins.app import Mayavi, setup_logger
# The TVTK window.
from tvtk.pyface.tvtk_scene import TVTKWindow
# Global variables.
VERBOSE = False
logger = logging.getLogger()
def off_screen_viewer():
"""A factory that creates an offscreen viewer."""
win = TVTKWindow(off_screen_rendering=True)
# Need to set some non-zero size for the off screen window. If
# not we get VTK errors on Linux.
win.scene.set_size((300,300))
return win
class MayaviTestError(Exception):
pass
###########################################################################
# `MemoryAssistant` class.
###########################################################################
class MemoryAssistant(object):
""" Assistant methods to assert memory usage and memory leaks.
"""
def assertMemoryUsage(self, process, usage, slack=0, msg=None):
""" Assert that the memory usage does not exceed the provided limit.
Parameters
----------
process : psutil.Process
The process to check.
usage : float
The target memory usage. This is used as a soft-limit.
msg : str
The message to show on AssertionError.
slack : float
The percentage (relative to `usage`) that we allow the
process memory usage to exceed the soft limit. The default is 0.0
Raises
------
AssertionError :
if the current memory usage of the process is higher than
:math:`usage * (1 + slack)`.
"""
current_usage = self._memory_usage(process)
hard_limit = usage * (1 + slack)
if hard_limit < current_usage:
if msg is None:
difference = (current_usage -
|
usage) / usage
msg = "Memory leak of {:.2%}".format(difference)
raise AssertionError(msg)
def assertReturnsMemory(self, function, args=None, iterations=100,
slack=0.0, ms
|
g=None):
""" Assert that the function does not retain memory over a number of
runs.
Parameters
----------
func : callable
The function to check. The function should take no arguments.
args : tuple
The tuple of arguments to pass to the callable.
iterations : int
The number of times to run the function. Default is 100.
msg : str
The message to show on AssertionError.
slack : float
The percentage (relative to the first run) that we allow the
process memory usage to exceed the expected. The default is 0.0
Note
----
The function is executed in-process thus any memory leaks will be
there to cause problems to other tests that are part of the currently
running test suite.
"""
try:
import psutil
except ImportError:
msg = "Please install psutil to check memory usage"
raise ImportError(msg)
process = psutil.Process(os.getpid())
def test_function():
if args is None:
function()
else:
function(*args)
gc.collect()
baseline = self._memory_usage(process)
samples_msg = "Samples : {}"
mem_usage_msg = "Memory growth (MB): {:5.1f} to {:5.1f}"
mem_leak_msg = "Memory leak (%) : {:5.1f}"
try:
print('Profiling', end=' ')
sys.stdout.flush()
for index in range(iterations):
test_function()
print('.', end=' ')
sys.stdout.flush()
gc.collect()
self.assertMemoryUsage(process, baseline, slack=slack)
##########################################
# If we have come this far, we are golden!
##########################################
final = self._memory_usage(process)
leak = (final - baseline) / baseline
print()
print(samples_msg.format(index + 1))
print(mem_usage_msg.format(baseline, final))
print(mem_leak_msg.format(leak * 100.0, index + 1))
except AssertionError:
final = self._memory_usage(process)
leak = (final - baseline) / baseline
if msg is None:
msg = 'Memory Leak!!!\n'
msg += samples_msg.format(index + 1)
msg += '\n'
msg += mem_usage_msg.format(baseline, final)
msg += '\n'
msg += mem_leak_msg.format(leak * 100.0, index + 1)
raise AssertionError(msg)
else:
raise AssertionError(msg)
def _memory_usage(self, process):
return float(process.get_memory_info().rss) / (1024 ** 2)
######################################################################
# Image comparison utility functions.
######################################################################
# Much of this code is translated from `vtk.test.Testing`.
def _print_image_error(img_err, err_index, img_base):
"""Prints out image related error information."""
msg = """Failed image test with error: %(img_err)f
Baseline image, error index: %(img_base)s, %(err_index)s
Test image: %(img_base)s.test.small.jpg
Difference image: %(img_base)s.diff.small.jpg
Valid image: %(img_base)s.small.jpg"""%locals()
logger.error(msg)
if VERBOSE:
print(msg)
def _print_image_success(img_err, err_index):
"Prints XML data for Dart when image test succeeded."
msg = "Image Error, image_index: %s, %s"%(img_err, err_index)
logger.debug(msg)
if VERBOSE:
print(msg)
def _handle_failed_image(idiff, src_img, pngr, img_fname):
"""Writes all the necessary images when an image comparison
failed."""
f_base, f_ext = os.path.splitext(img_fname)
# write out the difference file in full.
pngw = tvtk.PNGWriter(file_name=f_base + ".diff.png")
configure_input(pngw, idiff)
pngw.write()
# write the difference image scaled and gamma adjusted for the
# dashboard.
sz = pngr.output.dimensions
if sz[1] <= 250.0:
mag = 1.0
else:
mag = 250.0/sz[1]
shrink = tvtk.ImageResample(interpolate=1)
configure_input(shrink, idiff.output)
shrink.set_axis_magnification_factor(0, mag)
shrink.set_axis_magnification_factor(1, mag)
gamma = tvtk.ImageShiftScale(shift=0, scale=10)
configure_input(gamma, shrink)
jpegw = tvtk.JPEGWriter(file_name=f_base + ".diff.small.jpg",
quality=85)
configure_input(jpegw, gamma)
jpegw.write()
# write out the image that was generated.
pngw.set(file_name=f_base + ".test.png")
configure_input(pngw, src_img)
pngw.write()
# write out a smaller version of the image that was generated.
configure_input(shrink, idiff.input)
jpegw.set(file_name=f_base + ".test.small.jpg")
configure_input(jpegw, shrink)
jpegw.write()
# write out the valid image that matched.
configure_input(shrink, idiff.image)
jpegw.set(file_name=f_base + ".small.jpg")
configure_input(jpegw, shrink)
jpegw.write()
def _set_scale(r1, r2):
"""Given two instances of tvtk.ImageResample, this sets the scale
of the two such that their outputs are of the same. The final
size is chosen as the minumum of the height and width of each
image.
"""
img1, img2 = r1.input, r2.input
if hasattr(img1, '
|
rcbops/glance-buildpackage
|
glance/api/policy.py
|
Python
|
apache-2.0
| 3,392
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Policy Engine For Glance"""
import json
import os.path
from glance.common import cfg
from glance.common import exception
from glance.common import policy
class Enforcer(object):
"""Responsible for loading and enforcing rules"""
policy_opts = (
cfg.StrOpt('policy_file', default=None),
cfg.StrOpt('policy_default_rule', default='default'),
)
def __init__(self, conf):
for opt in self.policy_opts:
conf.register_opt(opt)
self.default_rule = conf.policy_default_rule
self.policy_path = self._find_policy_file(conf)
self.policy_file_mtime = None
self.policy_file_contents = None
def set_rules(self, rules):
"""Create a new Brain based on the provided dict of rules"""
brain = policy.Brain(rules, self.default_rule)
policy.set_brain(brain)
def load_rules(self):
"""Set the rules found in the json file on disk"""
rules = self._read_policy_file()
self.set_rules(rules)
@staticmethod
def _find_policy_file(conf):
"""Locate the policy json data file"""
if conf.policy_file:
return conf.policy_file
matches = cfg.find_config_files('glance', 'policy', 'json')
try:
return matches[0]
except IndexError:
raise cfg.ConfigFilesNotFoundError(('policy.json',))
def _read_policy_file(self):
"""Read contents of the policy file
This re-caches policy data if the file has been changed.
"""
mtime = os.path.getmtime(self.policy_path)
if not self.policy_file_contents or mtime != self.policy_file_mtime:
with open(self.policy_path) as fap:
|
raw_contents = fap.read()
self.policy_file_contents = json.loads(raw_contents)
self.policy_file_mtime = mtime
return self.policy_file_contents
def enforce(self, context, action, target):
"""Verifies that the action is valid on the target in this context.
:param context: Glance request context
:param action: String representing the action t
|
o be checked
:param object: Dictionary representing the object of the action.
:raises: `glance.common.exception.NotAuthorized`
:returns: None
"""
self.load_rules()
match_list = ('rule:%s' % action,)
credentials = {
'roles': context.roles,
'user': context.user,
'tenant': context.tenant,
}
try:
policy.enforce(match_list, target, credentials)
except policy.NotAuthorized:
raise exception.NotAuthorized(action=action)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.