repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
mmottahedi/neuralnilm_prototype
|
scripts/e572.py
|
Python
|
mit
| 15,490
| 0.003938
|
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource)
from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,
RandomSegments, RandomSegmentsInMemory,
SameLocation, MultiSource)
from neuralnilm.experiment import (run_experiment, init_experiment,
change_dir, configure_logger)
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer, BLSTMLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter, Recta
|
ngularOutputPlotter, StartEndMeanPlotter
from neuralnilm.updates import clipped_nesterov_momentum
from neuralnilm.rectangulariser import rectangularise
from lasagne.nonlinearities import (sigmoid, rectify, tanh, identity, softmax)
from lasagne.objectives import s
|
quared_error, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer,
DimshuffleLayer, DropoutLayer, ConcatLayer, PadLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
# PATH = "/home/jack/experiments/neuralnilm/figures"
UKDALE_FILENAME = '/data/dk3810/ukdale.h5'
SKIP_PROBABILITY_FOR_TARGET = 0.5
INDEPENDENTLY_CENTER_INPUTS = True
WINDOW_PER_BUILDING = {
1: ("2013-04-12", "2014-12-15"),
2: ("2013-05-22", "2013-10-03 06:16:00"),
3: ("2013-02-27", "2013-04-01 06:15:05"),
4: ("2013-03-09", "2013-09-24 06:15:14"),
5: ("2014-06-29", "2014-09-01")
}
INPUT_STATS = {
'mean': np.array([297.87216187], dtype=np.float32),
'std': np.array([374.43884277], dtype=np.float32)
}
def get_source(appliance, logger, target_is_start_and_end_and_mean=False,
is_rnn=False, window_per_building=WINDOW_PER_BUILDING,
source_type='multisource',
filename=UKDALE_FILENAME):
"""
Parameters
----------
source_type : {'multisource', 'real_appliance_source'}
Returns
-------
Source
"""
N_SEQ_PER_BATCH = 64
TRAIN_BUILDINGS_REAL = None
if appliance == 'microwave':
SEQ_LENGTH = 288
TRAIN_BUILDINGS = [1, 2]
VALIDATION_BUILDINGS = [5]
APPLIANCES = [
'microwave',
['fridge freezer', 'fridge', 'freezer'],
'dish washer',
'kettle',
['washer dryer', 'washing machine']
]
MAX_APPLIANCE_POWERS = [3000, 300, 2500, 3100, 2500]
ON_POWER_THRESHOLDS = [ 200, 50, 10, 2000, 20]
MIN_ON_DURATIONS = [ 12, 60, 1800, 12, 1800]
MIN_OFF_DURATIONS = [ 30, 12, 1800, 0, 160]
elif appliance == 'washing machine':
SEQ_LENGTH = 1024
TRAIN_BUILDINGS = [1, 5]
VALIDATION_BUILDINGS = [2]
APPLIANCES = [
['washer dryer', 'washing machine'],
['fridge freezer', 'fridge', 'freezer'],
'dish washer',
'kettle',
'microwave'
]
MAX_APPLIANCE_POWERS = [2500, 300, 2500, 3100, 3000]
ON_POWER_THRESHOLDS = [ 20, 50, 10, 2000, 200]
MIN_ON_DURATIONS = [1800, 60, 1800, 12, 12]
MIN_OFF_DURATIONS = [ 160, 12, 1800, 0, 30]
if is_rnn:
N_SEQ_PER_BATCH = 16
elif appliance == 'fridge':
SEQ_LENGTH = 512
TRAIN_BUILDINGS = [1, 2, 4]
VALIDATION_BUILDINGS = [5]
APPLIANCES = [
['fridge freezer', 'fridge', 'freezer'],
['washer dryer', 'washing machine'],
'dish washer',
'kettle',
'microwave'
]
MAX_APPLIANCE_POWERS = [ 300, 2500, 2500, 3100, 3000]
ON_POWER_THRESHOLDS = [ 50, 20, 10, 2000, 200]
MIN_ON_DURATIONS = [ 60, 1800, 1800, 12, 12]
MIN_OFF_DURATIONS = [ 12, 160, 1800, 0, 30]
if is_rnn:
N_SEQ_PER_BATCH = 16
elif appliance == 'kettle':
SEQ_LENGTH = 128
TRAIN_BUILDINGS = [1, 2, 3, 4]
# House 3's mains often doesn't include kettle!
TRAIN_BUILDINGS_REAL = [1, 2, 4]
VALIDATION_BUILDINGS = [5]
APPLIANCES = [
'kettle',
['fridge freezer', 'fridge', 'freezer'],
['washer dryer', 'washing machine'],
'dish washer',
'microwave'
]
MAX_APPLIANCE_POWERS = [3100, 300, 2500, 2500, 3000]
ON_POWER_THRESHOLDS = [2000, 50, 20, 10, 200]
MIN_ON_DURATIONS = [ 12, 60, 1800, 1800, 12]
MIN_OFF_DURATIONS = [ 0, 12, 160, 1800, 30]
elif appliance == 'dish washer':
SEQ_LENGTH = 1024 + 512
TRAIN_BUILDINGS = [1, 2]
VALIDATION_BUILDINGS = [5]
APPLIANCES = [
'dish washer',
['fridge freezer', 'fridge', 'freezer'],
['washer dryer', 'washing machine'],
'kettle',
'microwave'
]
MAX_APPLIANCE_POWERS = [2500, 300, 2500, 3100, 3000]
ON_POWER_THRESHOLDS = [ 10, 50, 20, 2000, 200]
MIN_ON_DURATIONS = [1800, 60, 1800, 12, 12]
MIN_OFF_DURATIONS = [1800, 12, 160, 0, 30]
if is_rnn:
N_SEQ_PER_BATCH = 16
TARGET_APPLIANCE = APPLIANCES[0]
MAX_TARGET_POWER = MAX_APPLIANCE_POWERS[0]
ON_POWER_THRESHOLD = ON_POWER_THRESHOLDS[0]
MIN_ON_DURATION = MIN_ON_DURATIONS[0]
MIN_OFF_DURATION = MIN_OFF_DURATIONS[0]
if TRAIN_BUILDINGS_REAL is None:
TRAIN_BUILDINGS_REAL = TRAIN_BUILDINGS
real_appliance_source1 = RealApplianceSource(
logger=logger,
filename=filename,
appliances=APPLIANCES,
max_appliance_powers=MAX_APPLIANCE_POWERS,
on_power_thresholds=ON_POWER_THRESHOLDS,
min_on_durations=MIN_ON_DURATIONS,
min_off_durations=MIN_OFF_DURATIONS,
divide_input_by_max_input_power=False,
window_per_building=window_per_building,
seq_length=SEQ_LENGTH,
output_one_appliance=True,
train_buildings=TRAIN_BUILDINGS,
validation_buildings=VALIDATION_BUILDINGS,
n_seq_per_batch=N_SEQ_PER_BATCH,
skip_probability=0.75,
skip_probability_for_first_appliance=SKIP_PROBABILITY_FOR_TARGET,
standardise_input=True,
input_stats=INPUT_STATS,
independently_center_inputs=INDEPENDENTLY_CENTER_INPUTS,
target_is_start_and_end_and_mean=target_is_start_and_end_and_mean
)
if source_type != 'multisource':
return real_appliance_source1
same_location_source1 = SameLocation(
logger=logger,
filename=filename,
target_appliance=TARGET_APPLIANCE,
window_per_building=window_per_building,
seq_length=SEQ_LENGTH,
train_buildings=TRAIN_BUILDINGS_REAL,
validation_buildings=VALIDATION_BUILDINGS,
n_seq_per_batch=N_SEQ_PER_BATCH,
skip_probability=SKIP_PROBABILITY_FOR_TARGET,
standardise_input=True,
offset_probability=1,
divide_target_by=MAX_TARGET_POWER,
input_stats=INPUT_STATS,
independently_center_inputs=INDEPENDENTLY_CENTER_INPUTS,
on_power_threshold=ON_POWER_THRESHOLD,
min_on_duration=MIN_ON_DURATION,
min_off_duration=MIN_OFF_DURATION,
include_all=False,
all
|
kangsanChang/mjutt
|
timetable/models.py
|
Python
|
apache-2.0
| 1,169
| 0.005133
|
from django.db import models
from .switcher import switch_to_deptname
# Create your models here.
class
|
Classitem(models.Model):
grade = models.CharField(max_length=10, blank=True, null=True)
classname = models.CharField(max_length=50, blank=True, null=True)
krcode = models.CharField(max_length=10, blank=True, null=True)
|
credit = models.CharField(max_length=5, blank=True, null=True)
timeperweek = models.CharField(max_length=5, blank=True, null=True)
prof = models.CharField(max_length=30, blank=True, null=True)
classcode = models.CharField(max_length=10, blank=True, null=True)
limitstud = models.CharField(max_length=10, blank=True, null=True)
time = models.CharField(max_length=200, blank=True, null=True)
note = models.CharField(max_length=200, blank=True, null=True)
dept = models.CharField(max_length=100, blank=True, null=True)
class Meta:
managed = False
db_table = 'classitem'
def get_deptname(self):
return switch_to_deptname(self.dept)
def __str__(self):
return str(self.id)+" / "+ self.dept + " / " + self.classname + " / " + self.prof + " / " + self.classcode
|
Team395/headlights
|
VisionScripts/TestPallate.py
|
Python
|
mit
| 314
| 0.019108
|
import cv2
import numpy as np
low = np.array([[[20,40,30]]*100]*100, np.uint8)
high = np.array([[[20,255,255]]*100]*100, np.uint8)
#low = cv2.cvtColor(low, cv2.COLOR_HSV2BGR)
#high = cv2.cvt
|
Color(high, cv2.
|
COLOR_HSV2BGR)
cv2.imshow('low', low)
cv2.imshow('high', high)
cv2.waitKey(10000)
cv2.destroyAllWindows()
|
brokendata/bigmler
|
bigmler/tests/test_05_evaluation.py
|
Python
|
apache-2.0
| 14,252
| 0.00407
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Testing evaluations' creation
"""
from __future__ import absolute_import
from bigmler.tests.world import (world, common_setup_module,
common_teardown_module, teardown_class)
import bigmler.tests.basic_tst_prediction_steps as test_pred
import bigmler.tests.evaluation_steps as evaluation
def setup_module():
"""Setup for the module
"""
common_setup_module()
test = TestEvaluation()
test.setup_scenario1()
def teardown_module():
"""Teardown for the module
"""
common_teardown_module()
class TestEvaluation(object):
def teardown(self):
"""Calling generic teardown for every method
"""
print "\nEnd of tests in: %s\n-------------------\n" % __name__
teardown_class()
def setup(self):
"""
Debug information
"""
print "\n-------------------\nTests in: %s\n" % __name__
def setup_scenario1(self):
"""
Scenario: Successfully building evaluations from start:
Given I create BigML resources uploading train "<data>" file to evaluate and log evaluation in "<output>"
And I check that the source has been created
And I check that the dataset has been c
|
reated
And I check that the model has been created
And I check that the evaluation has been created
Then the evaluation file is like "<json_evaluation_file>"
Examples:
| data | output
|
| json_evaluation_file |
| ../data/iris.csv | ./scenario_e1/evaluation | ./check_files/evaluation_iris.json |
"""
print self.setup_scenario1.__doc__
examples = [
['data/iris.csv', 'scenario_e1/evaluation', 'check_files/evaluation_iris.json']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_create_all_resources_to_evaluate(self, data=example[0], output=example[1])
test_pred.i_check_create_source(self)
test_pred.i_check_create_dataset(self, suffix=None)
test_pred.i_check_create_model(self)
test_pred.i_check_create_evaluation(self)
evaluation.then_the_evaluation_file_is_like(self, example[2])
def test_scenario2(self):
"""
Scenario: Successfully building evaluations from source
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML resources using source to evaluate and log evaluation in "<output>"
And I check that the dataset has been created
And I check that the model has been created
And I check that the evaluation has been created
Then the evaluation file is like "<json_evaluation_file>"
Examples:
|scenario | kwargs | output | json_evaluation_file |
| scenario_e1| {"data": "../data/iris.csv", "output": "./scenario_e1/predictions.csv"} |./scenario_e2/evaluation | ./check_files/evaluation_iris.json |
"""
print self.test_scenario2.__doc__
examples = [
['scenario_e1', '{"data": "data/iris.csv", "output": "scenario_e1/predictions.csv"}', 'scenario_e2/evaluation', 'check_files/evaluation_iris.json']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
evaluation.given_i_create_bigml_resources_using_source_to_evaluate(self, output=example[2])
test_pred.i_check_create_dataset(self, suffix=None)
test_pred.i_check_create_model(self)
test_pred.i_check_create_evaluation(self)
evaluation.then_the_evaluation_file_is_like(self, example[3])
def test_scenario3(self):
"""
Scenario: Successfully building evaluations from dataset
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML resources using dataset to evaluate and log evaluation in "<output>"
And I check that the model has been created
And I check that the evaluation has been created
Then the evaluation file is like "<json_evaluation_file>"
Examples:
|scenario | kwargs |output | json_evaluation_file |
| scenario_e1| {"data": "../data/iris.csv", "output": "./scenario_e1/predictions.csv"} |./scenario_e3/evaluation | ./check_files/evaluation_iris.json |
"""
print self.test_scenario3.__doc__
examples = [
['scenario_e1', '{"data": "data/iris.csv", "output": "scenario_e1/predictions.csv"}', 'scenario_e3/evaluation', 'check_files/evaluation_iris.json']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
evaluation.given_i_create_bigml_resources_using_dataset_to_evaluate(self, output=example[2])
test_pred.i_check_create_model(self)
test_pred.i_check_create_evaluation(self)
evaluation.then_the_evaluation_file_is_like(self, example[3])
def test_scenario4(self):
"""
Scenario: Successfully building evaluation from model and test file
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML resources using test file "<test>" to evaluate a model and log evaluation in "<output>"
And I check that the source has been created
And I check that the dataset has been created
And I check that the evaluation has been created
Then the evaluation file is like "<json_evaluation_file>"
Examples:
|scenario | kwargs | test | output | json_evaluation_file |
| scenario_e1| {"data": "../data/iris.csv", "output": "./scenario_e1/predictions.csv"} | ../data/iris.csv | ./scenario_e4/evaluation | ./check_files/evaluation_iris2.json |
"""
print self.test_scenario4.__doc__
examples = [
['scenario_e1', '{"data": "data/iris.csv", "output": "scenario_e1/predictions.csv"}', 'data/iris.csv', 'scenario_e4/evaluation', 'check_files/evaluation_iris2.json']]
for example in examples:
print "\nTesting with:\n", example
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
evaluation.i_create_all_resources_to_evaluate_with_model(self, data=example[2], output=example[3])
test_pred.i_check_create_source(self)
test_pred.i_check_create_dataset(self, suffix=None)
test_pred.i_check_create_evaluation(self)
evaluation.then_the_evaluation_file_is_like(self, example[4])
def test_scenario5(self):
"""
Scenario: Successfully building evaluation from model and test file with data map
Given I have previously executed "<scenario>" or reproduce it wit
|
dremdem/maykor_python_learn
|
Lesson16/mysite/flask_app.py
|
Python
|
mit
| 843
| 0.006173
|
#-*- coding: utf-8 -*-
# A very simple Flask Hello World app for you to get started with...
from flask import Flask, render_template, redirect, url_for
from forms import TestForm
app = Flask(__name__, template_folder='/home/dremdem/mysite/templates')
app.secret_key = 's3cr3t'
class NobodyCare(object):
def __init__(self):
self.just_a = 'AAAAAAAAAA'
self.ju
|
st_b = 'BBBBBBBBB'
self.just_123 = '123'
self.people = [u'Иванов', u'Петров', u'Сидоров']
@app.route('/test', methods=['GET', 'POST'])
def test():
t1 = u'Еще не запускали!'
form = TestForm()
if form.validate_on_submit():
t1 = form.test1.data
|
return render_template('test.html', form=form, t1=t1)
@app.route('/')
def hello_world():
return render_template('hello.html', a='100')
|
nemesisdesign/django
|
tests/middleware/tests.py
|
Python
|
bsd-3-clause
| 42,600
| 0.001855
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gzip
import random
import re
from io import BytesIO
from unittest import skipIf
from django.conf import settings
from django.core import mail
from django.core.exceptions import PermissionDenied
from django.http import (
FileResponse, HttpRequest, HttpResponse, HttpResponseNotFound,
HttpResponsePermanentRedirect, HttpResponseRedirect, StreamingHttpResponse,
)
from django.middleware.clickjacking import XFrameOptionsMiddleware
from django.middleware.common import (
BrokenLinkEmailsMiddleware, CommonMiddleware,
)
from django.middleware.gzip import GZipMiddleware
from django.middleware.http import ConditionalGetMiddleware
from django.test import (
Re
|
questFactory, SimpleTestCase, ignore_warnings, override_settings,
)
from django.utils import six
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_str
from django.utils.six.moves import range
from django.utils.six.moves.url
|
lib.parse import quote
@override_settings(ROOT_URLCONF='middleware.urls')
class CommonMiddlewareTest(SimpleTestCase):
rf = RequestFactory()
@override_settings(APPEND_SLASH=True)
def test_append_slash_have_slash(self):
"""
URLs with slashes should go unmolested.
"""
request = self.rf.get('/slash/')
self.assertIsNone(CommonMiddleware().process_request(request))
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_resource(self):
"""
Matches to explicit slashless URLs should go unmolested.
"""
request = self.rf.get('/noslash')
self.assertIsNone(CommonMiddleware().process_request(request))
response = HttpResponse("Here's the text of the Web page.")
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_unknown(self):
"""
APPEND_SLASH should not redirect to unknown resources.
"""
request = self.rf.get('/unknown')
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect(self):
"""
APPEND_SLASH should redirect slashless URLs to a valid pattern.
"""
request = self.rf.get('/slash')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect_querystring(self):
"""
APPEND_SLASH should preserve querystrings when redirecting.
"""
request = self.rf.get('/slash?test=1')
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertEqual(r.url, '/slash/?test=1')
@override_settings(APPEND_SLASH=True, DEBUG=True)
def test_append_slash_no_redirect_on_POST_in_DEBUG(self):
"""
Tests that while in debug mode, an exception is raised with a warning
when a failed attempt is made to POST, PUT, or PATCH to an URL which
would normally be redirected to a slashed version.
"""
msg = "maintaining %s data. Change your form to point to testserver/slash/"
request = self.rf.get('/slash')
request.method = 'POST'
response = HttpResponseNotFound()
with self.assertRaisesMessage(RuntimeError, msg % request.method):
CommonMiddleware().process_response(request, response)
request = self.rf.get('/slash')
request.method = 'PUT'
with self.assertRaisesMessage(RuntimeError, msg % request.method):
CommonMiddleware().process_response(request, response)
request = self.rf.get('/slash')
request.method = 'PATCH'
with self.assertRaisesMessage(RuntimeError, msg % request.method):
CommonMiddleware().process_response(request, response)
@override_settings(APPEND_SLASH=False)
def test_append_slash_disabled(self):
"""
Disabling append slash functionality should leave slashless URLs alone.
"""
request = self.rf.get('/slash')
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_quoted(self):
"""
URLs which require quoting should be redirected to their slash version.
"""
request = self.rf.get(quote('/needsquoting#'))
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, '/needsquoting%23/')
@override_settings(APPEND_SLASH=False, PREPEND_WWW=True)
def test_prepend_www(self):
request = self.rf.get('/path/')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, 'http://www.testserver/path/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_have_slash(self):
request = self.rf.get('/slash/')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, 'http://www.testserver/slash/')
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_slashless(self):
request = self.rf.get('/slash')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, 'http://www.testserver/slash/')
# The following tests examine expected behavior given a custom URLconf that
# overrides the default one through the request object.
@override_settings(APPEND_SLASH=True)
def test_append_slash_have_slash_custom_urlconf(self):
"""
URLs with slashes should go unmolested.
"""
request = self.rf.get('/customurlconf/slash/')
request.urlconf = 'middleware.extra_urls'
self.assertIsNone(CommonMiddleware().process_request(request))
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_resource_custom_urlconf(self):
"""
Matches to explicit slashless URLs should go unmolested.
"""
request = self.rf.get('/customurlconf/noslash')
request.urlconf = 'middleware.extra_urls'
self.assertIsNone(CommonMiddleware().process_request(request))
response = HttpResponse("Here's the text of the Web page.")
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_unknown_custom_urlconf(self):
"""
APPEND_SLASH should not redirect to unknown resources.
"""
request = self.rf.get('/customurlconf/unknown')
request.urlconf = 'middleware.extra_urls'
self.assertIsNone(CommonMiddleware().process_request(request))
response = HttpResponseNotFound()
self.assertEqual(CommonMiddleware().process_response(request, response), response)
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect_custom_urlconf(self):
"""
APPEND_SLASH should redirect slashless URLs to a valid pattern.
"""
request = self.rf.get('/customurlconf/slash')
request.urlconf = 'middleware.extra_urls'
response = HttpResponseNotFound()
r = CommonMiddleware().process_response(request, response)
self.assertIsNotNone(r, "CommonMiddleware failed to return APPEND_SLASH redirect using request.urlconf")
self.assert
|
django-leonardo/horizon
|
horizon/workflows/views.py
|
Python
|
apache-2.0
| 9,305
| 0
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
from django import forms
from django import http
from django import shortcuts
from django.views import generic
import six
from horizon import exceptions
from horizon.forms import views as hz_views
from horizon.forms.views import ADD_TO_FIELD_HEADER # noqa
from horizon import messages
class W
|
orkflowView(hz_views.ModalBackdropM
|
ixin, generic.TemplateView):
"""A generic class-based view which handles the intricacies of workflow
processing with minimal user configuration.
.. attribute:: workflow_class
The :class:`~horizon.workflows.Workflow` class which this view handles.
Required.
.. attribute:: template_name
The template to use when rendering this view via standard HTTP
requests. Required.
.. attribute:: ajax_template_name
The template to use when rendering the workflow for AJAX requests.
In general the default common template should be used. Defaults to
``"horizon/common/_workflow.html"``.
.. attribute:: context_object_name
The key which should be used for the workflow object in the template
context. Defaults to ``"workflow"``.
"""
workflow_class = None
template_name = 'horizon/common/_workflow_base.html'
context_object_name = "workflow"
ajax_template_name = 'horizon/common/_workflow.html'
step_errors = {}
def __init__(self):
super(WorkflowView, self).__init__()
if not self.workflow_class:
raise AttributeError("You must set the workflow_class attribute "
"on %s." % self.__class__.__name__)
def get_initial(self):
"""Returns initial data for the workflow. Defaults to using the GET
parameters to allow pre-seeding of the workflow context values.
"""
return copy.copy(self.request.GET)
def get_workflow_class(self):
"""Returns the workflow class"""
return self.workflow_class
def get_workflow(self):
"""Returns the instantiated workflow class."""
extra_context = self.get_initial()
entry_point = self.request.GET.get("step", None)
workflow = self.get_workflow_class()(self.request,
context_seed=extra_context,
entry_point=entry_point)
return workflow
def get_context_data(self, **kwargs):
"""Returns the template context, including the workflow class.
This method should be overridden in subclasses to provide additional
context data to the template.
"""
context = super(WorkflowView, self).get_context_data(**kwargs)
workflow = self.get_workflow()
context[self.context_object_name] = workflow
next = self.request.GET.get(workflow.redirect_param_name)
context['REDIRECT_URL'] = next
context['layout'] = self.get_layout()
# For consistency with Workflow class
context['modal'] = 'modal' in context['layout']
if ADD_TO_FIELD_HEADER in self.request.META:
context['add_to_field'] = self.request.META[ADD_TO_FIELD_HEADER]
return context
def get_layout(self):
"""returns classes for the workflow element in template based on
the workflow characteristics
"""
if self.request.is_ajax():
layout = ['modal', ]
if self.workflow_class.fullscreen:
layout += ['fullscreen', ]
else:
layout = ['static_page', ]
if self.workflow_class.wizard:
layout += ['wizard', ]
return layout
def get_template_names(self):
"""Returns the template name to use for this request."""
if self.request.is_ajax():
template = self.ajax_template_name
else:
template = self.template_name
return template
def get_object_id(self, obj):
return getattr(obj, "id", None)
def get_object_display(self, obj):
return getattr(obj, "name", None)
def add_error_to_step(self, error_msg, step):
self.step_errors[step] = error_msg
def set_workflow_step_errors(self, context):
workflow = context['workflow']
for step in self.step_errors:
error_msg = self.step_errors[step]
workflow.add_error_to_step(error_msg, step)
def get(self, request, *args, **kwargs):
"""Handler for HTTP GET requests."""
context = self.get_context_data(**kwargs)
self.set_workflow_step_errors(context)
return self.render_to_response(context)
def validate_steps(self, request, workflow, start, end):
"""Validates the workflow steps from ``start`` to ``end``, inclusive.
Returns a dict describing the validation state of the workflow.
"""
errors = {}
for step in workflow.steps[start:end + 1]:
if not step.action.is_valid():
errors[step.slug] = dict(
(field, [six.text_type(error) for error in errors])
for (field, errors) in six.iteritems(step.action.errors))
return {
'has_errors': bool(errors),
'workflow_slug': workflow.slug,
'errors': errors,
}
def render_next_steps(self, request, workflow, start, end):
"""render next steps
this allows change form content on the fly
"""
rendered = {}
request = copy.copy(self.request)
# patch request method, because we want render new form without
# validation
request.method = "GET"
new_workflow = self.get_workflow_class()(
request,
context_seed=workflow.context,
entry_point=workflow.entry_point)
for step in new_workflow.steps[end:]:
rendered[step.get_id()] = step.render()
return rendered
def post(self, request, *args, **kwargs):
"""Handler for HTTP POST requests."""
context = self.get_context_data(**kwargs)
workflow = context[self.context_object_name]
try:
# Check for the VALIDATE_STEP* headers, if they are present
# and valid integers, return validation results as JSON,
# otherwise proceed normally.
validate_step_start = int(self.request.META.get(
'HTTP_X_HORIZON_VALIDATE_STEP_START', ''))
validate_step_end = int(self.request.META.get(
'HTTP_X_HORIZON_VALIDATE_STEP_END', ''))
except ValueError:
# No VALIDATE_STEP* headers, or invalid values. Just proceed
# with normal workflow handling for POSTs.
pass
else:
# There are valid VALIDATE_STEP* headers, so only do validation
# for the specified steps and return results.
data = self.validate_steps(request, workflow,
validate_step_start,
validate_step_end)
next_steps = self.render_next_steps(request, workflow,
validate_step_start,
validate_step_end)
# append rendered next steps
data["rendered"] = next_steps
return http.HttpResponse(json.dumps(data),
content_type="application/json")
if not workflow.is_valid():
return self.render_to_response(context)
|
eudemonia-research/mincoin
|
rpc.py
|
Python
|
gpl-3.0
| 712
| 0.002809
|
import requests
import json
call_id = 0
class RPC:
def __getattr__(self, name):
def func(*args):
global call_id
url = "http://localhost:12344/jsonrpc"
headers = {'content-type': 'application/json'}
|
payload = {
"method": name,
"params": args,
"jsonrpc": "2.0",
"id": call_id,
}
response = requests.post(url, data=json.dumps(payload), headers=headers).json()
assert response['id'] == call_id
call_id += 1
if 'error' in response:
print(response['error'])
ret
|
urn response['result']
return func
|
wenhulove333/ScutServer
|
Sample/Doudizhu/Server/src/ZyGames.Doudizhu.HostServer/bin/Debug/Script/PyScript/Action/Action2005.py
|
Python
|
mit
| 2,064
| 0.004873
|
"""2005_叫地主接口"""
import clr, sys
from action import *
from lang import Lang
clr.AddReference('ZyGames.Framework.Game')
clr.AddReference('ZyGames.Doudizhu.Lang')
clr.AddReference('ZyGames.Doudizhu.Model')
clr.AddReference('ZyGames.Doudizhu.Bll')
from ZyGames.Framework.Game.Service import *
from ZyGames.Doudizhu.Lang import *
from ZyGames.Doudizhu.Model import *
from ZyGames.Doudizhu.Bll.Logic import *
class UrlParam(HttpParam):
def __init__(self):
HttpParam.__init__(self)
self.op = 0
class ActionResult(DataResult):
def __init__(self):
DataResult.__init__(self)
def getUrlElement(httpGet, parent):
urlParam = UrlParam()
if httpGet.Contains("op"):
urlParam.op = httpGet.GetIntValue("op")
else:
urlParam.Result = False
return urlParam
def takeAction(urlParam, parent):
actionResult = ActionResult()
user = parent.Current.User
table = GameRoom.Current.GetTableData(user)
if not table or not user:
parent.ErrorCode = Lang.getLang("ErrorCode")
parent.ErrorInfo = Lang.getLang("LoadError")
actionResult.Result = False
return actionResult
if table.IsCallEnd:
parent.ErrorCode = Lang.getLang("ErrorCode")
parent.ErrorInfo = Lang.getLang("St2005_CalledIsEnd")
actionResult.Result = False
return actionResult
position =
|
GameTable.Current.GetUserPosition(user, table)
if not position:
parent.ErrorCode = Lang.getLang("ErrorCode")
parent.ErrorInfo = Lang.getLang("LoadError")
actionResult.Result = False
return actionResult
if position.IsAI:
position.IsAI = False
G
|
ameTable.Current.NotifyAutoAiUser(user.UserId, False)
isCall = urlParam.op == 1 and True or False
GameTable.Current.CallCard(user.Property.PositionId, table, isCall)
GameTable.Current.ReStarTableTimer(table)
return actionResult
def buildPacket(writer, urlParam, actionResult):
return True
|
schlos/OIPA-V2.1
|
OIPA/api/v3/resources/activity_list_resources.py
|
Python
|
agpl-3.0
| 4,609
| 0.007377
|
# Django specific
from django.db.models import Q
# Tastypie specific
from tastypie import fields
from tastypie.constants import ALL
from tastypie.resources import ModelResource
from tastypie.serializers import Serializer
# Data specific
from iati.models import Activity
from api.v3.resources.helper_resources import TitleResource, DescriptionResource, FinanceTypeResource
from api.cache import NoTransformCache
from api.v3.resources.advanced_resources import OnlyCountryResource, OnlyRegionResource
from api.v3.resources.activity_view_resources import ActivityViewTiedStatusResource, ActivityViewAidTypeResource, ActivityViewOrganisationResource, ActivityViewActivityStatusResource, ActivityViewSectorResource, ActivityViewCollaborationTypeResource, ActivityViewFlowTypeResource, ActivityViewCurrencyResource
#cache specific
from django.http import HttpResponse
from cache.validator import Validator
class ActivityListResource(ModelResource):
reporting_organisation = fields.ForeignKey(ActivityViewOrganisationResource, 'reporting_organisation', full=True, null=True)
participating_organisations = fields.ToManyField(ActivityViewOrganisationResource, 'participating_organisation', full=True, null=True)
activity_status = fields.ForeignKey(ActivityViewActivityStatusResource, 'activity_status', full=True, null=True)
countries = fields.ToManyField(OnlyCountryResource, 'recipient_country', full=True, null=True)
regions = fields.ToManyField(OnlyRegionResource, 'recipient_region', full=True, null=True)
sectors = fields.ToManyField(ActivityViewSectorResource, 'sector', full=True, null=True)
titles = fields.ToManyField(TitleResource, 'title_set', full=True, null=True)
descriptions = fields.ToManyField(DescriptionResource, 'description_set', full=True, null=True)
collaboration_type = fields.ForeignKey(ActivityViewCollaborationTypeResource, attribute='collaboration_type', full=True, null=True)
default_flow_type = fields.ForeignKey(ActivityViewFlowTypeResource, attribute='default_flow_type', full=True, null=True)
default_finance_type = fields.ForeignKey(FinanceTypeResource, attribute='default_finance_type', full=True, null=True)
default_aid_type = fields.ForeignKey(ActivityViewAidTypeResource, attribute='default_aid_type', full=True, null=True)
default_tied_status = fields.ForeignKey(ActivityViewTiedStatusResource, attribute='default_tied_status', full=True, null=True)
default_currency = fields.ForeignKey(ActivityViewCurrencyResourc
|
e, attribute='default_currency', full=True, null=True)
class Meta:
queryset = Activity.objects.all()
resource_name = 'activity-list'
max_limit = 100
serializer = Serializer(formats=['xml', 'json'])
excludes = ['date_created']
ordering = ['start_actual', 'start_planned', 'end_actual', 'end_planned', 'sectors', 'total_budget']
filtering = {
'iati_identifier': 'exact',
'start_planned
|
': ALL,
'start_actual': ALL,
'end_planned' : ALL,
'end_actual' : ALL,
'total_budget': ALL,
'sectors' : ('exact', 'in'),
'regions': ('exact', 'in'),
'countries': ('exact', 'in'),
'reporting_organisation': ('exact', 'in')
}
cache = NoTransformCache()
def apply_filters(self, request, applicable_filters):
base_object_list = super(ActivityListResource, self).apply_filters(request, applicable_filters)
query = request.GET.get('query', None)
filters = {}
if query:
qset = (
Q(id__in=query, **filters) |
Q(activity_recipient_country__country__name__in=query, **filters) |
Q(title__title__icontains=query, **filters) #|
# Q(description__description__icontains=query, **filters)
)
return base_object_list.filter(qset).distinct()
return base_object_list.filter(**filters).distinct()
def get_list(self, request, **kwargs):
# check if call is cached using validator.is_cached
# check if call contains flush, if it does the call comes from the cache updater and shouldn't return cached results
validator = Validator()
cururl = request.META['PATH_INFO'] + "?" + request.META['QUERY_STRING']
if not 'flush' in cururl and validator.is_cached(cururl):
return HttpResponse(validator.get_cached_call(cururl), mimetype='application/json')
else:
return super(ActivityListResource, self).get_list(request, **kwargs)
|
mbohlool/client-python
|
kubernetes/client/models/v1_storage_class_list.py
|
Python
|
apache-2.0
| 6,537
| 0.001836
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1StorageClassList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'items': 'list[V1StorageClass]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
"""
V1StorageClassList - a model defined in Swagger
"""
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""
Gets the api_version of this V1StorageClassList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1StorageClassList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1StorageClassList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1StorageClassList.
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""
Gets the items of this V1StorageClassList.
Items is the list of StorageClasses
:return: The items of this V1StorageClassList.
:rtype: list[V1StorageClass]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1StorageClassList.
Items is the list of StorageClasses
:param items: The items of this V1StorageClassList.
:type: list[V1StorageClass]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1StorageClassList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1StorageClassList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1StorageClassList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1StorageClassList.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1StorageClassList.
Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:return: The metadata of this V1StorageClassList.
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
|
def metadata(self, metadata):
"""
Sets the metadata of this V1StorageClassList.
Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:param metadata: The metadata of this V1StorageClassList.
:type: V1ListMeta
|
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1StorageClassList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
PowerOfJusam/JusamMud
|
mudserver.py
|
Python
|
mit
| 15,734
| 0.008262
|
#testing
"""
Basic MUD server module for creating text-based Multi-User Dungeon (MUD) games.
Contains one class, MudServer, which can be instantiated to start a server running
then used to send and receive messages from players.
author: Mark Frimston - mfrimston@gmail.com
"""
import socket
import select
import time
import sys
class MudServer(object):
"""
A basic server for text-based Multi-User Dungeon (MUD) games.
Once created, the server will listen for players connecting using Telnet.
Messages can then be sent to and from multiple connected players.
The 'update' method should be called in a loop to keep the server running.
"""
# An inner class which is instantiated for each connected client to store
# info about them
class _Client(object):
"Holds information about a connected player"
socket = None # the socket object used to communicate with this client
address = "" # the ip address of this client
buffer = "" # holds data send from the client until a full message is received
lastcheck = 0 # the last time we checked if the client was still connected
def __init__(self,socket,address,buffer,lastcheck):
self.socket = socket
self.address = address
self.buffer = buffer
self.lastcheck = lastcheck
# Used to store different types of occurences
_EVENT_NEW_PLAYER = 1
_EVENT_PLAYER_LEFT = 2
_EVENT_COMMAND = 3
# Different states we can be in while reading data from client
# See _process_sent_data function
_READ_STATE_NORMAL = 1
_READ_STATE_COMMAND = 2
_READ_STATE_SUBNEG = 3
# Command codes used by Telnet protocol
# See _process_sent_data function
_TN_INTERPRET_AS_COMMAND = 255
_TN_ARE_YOU_THERE = 246
_TN_WILL = 251
_TN_WONT = 252
_TN_DO = 253
_TN_DONT = 254
_TN_SUBNEGOTIATION_START = 250
_TN_SUBNEGOTIATION_END = 240
_listen_socket = None # socket used to listen for new clients
_clients = {} # holds info on clients. Maps client id to _Client object
_nextid = 0 # counter for assigning each client a new id
_events = [] # list of occurences waiting to be handled by the code
_new_events = [] # list of newly-added occurences
def __init__(self):
"""
Constructs the MudServer object and starts listening for new players.
"""
self._clients = {}
self._nextid = 0
self._events = []
self._new_events = []
# create a new tcp socket which will be used to listen for new clients
self._listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# set a special option on the socket which allows the port to be immediately
# without having to wait
self._listen_socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)
# bind the socket to an ip address and port. Port 23 is the standard telnet port
# which telnet clients will use. Address 0.0.0.0 means that we will bind to all
# of the available network interfaces
self._listen_socket.bind(("0.0.0.0",23))
# set to non-blocking mode. This means that when we call 'accept', it will
# return immediately without waiting for a connection
self._listen_socket.setblocking(False)
# start listening for connections on the socket
self._listen_socket.listen(1)
def update(self):
"""
Checks for new players, disconnected players, and new messages sent from players.
This method must be called before up-to-date info can be obtained from the
'get_new_players', 'get_disconnected_players' and 'get_commands' methods.
It should be called in a loop to keep the game running.
"""
# check for new stuff
self._check_for_new_connections()
self._check_for_disconnected()
self._check_for_messages()
# move the new events into the main events list so that they can be obtained
# with 'get_new_players', 'get_disconnected_players' and 'get_commands'. The
# previous events are discarded
self._events = list(self._new_events)
self._new_events = []
def get_new_players(self):
"""
Returns a list containing info on any new players that have entered the game
since the last call to 'update'. Each item in the list is a player id number.
"""
retval = []
# go through all the events in the main list
for ev in self._events:
# if the event is a new player occurence, add the info to the list
if ev[0] == self._EVENT_NEW_PLAYER: retval.append(ev[1])
# return the info list
return retval
def get_disconnected_players(self):
"""
Returns a list containing info on any players that have left the game since
the last call to 'update'. Each item in the list is a player id number.
"""
retval = []
# go through all the events in the main list
for ev in self._events:
# if the event is a player disconnect occurence, add the info to the list
if ev[0] == self._EVENT_PLAYER_LEFT: retval.append(ev[1])
# return the info list
return retval
def get_commands(self):
"""
Returns a list containing any commands sent from players since the last call
to 'update'. Each item in the list is a 3-tuple containing the id number of
the sending player, a string containing the command (i.e. the first word of
what they typed), and another string containing the text after the command
"""
retval = []
# go through all the events in the main list
for ev in self._events:
# if the event is a command occurence, add the info to the list
if ev[0] == self._EVENT_COMMAND: retval.append((ev[1],ev[2],ev[3]))
# return the info list
return retval
def send_message(self,to,message):
"""
Sends the text in the 'message' parameter to the player with the id number
given in the 'to' parameter. The text will be printed out in the player's
terminal.
"""
# we make sure to put a newline on the end so the client receives the
# message on its own line
self._attempt_send(to,message+"\n\r")
def shutdown(self):
"""
Closes down the server, disconnecting all clients and closing the
listen socket.
"""
# for each client
for cl in self._clients.values():
#
|
close the socket, disconnecting the client
cl.socket.shutdown()
cl.socket.close()
# stop listening for new clients
self._listen_socket.close()
def _attempt_send(self,clid,data):
# python 2/3 compatability fix - convert non-unicode string to unicode
|
if sys.version < '3' and type(data)!=unicode: data = unicode(data,"latin1")
try:
# look up the client in the client map and use 'sendall' to send
# the message string on the socket. 'sendall' ensures that all of
# the data is sent in one go
self._clients[clid].socket.sendall(bytearray(data,"latin1"))
# KeyError will be raised if there is no client with the given id in
# the map
except KeyError: pass
# If there is a connection problem with the client (e.g. they have
# disconnected) a socket error will be raised
except socket.error:
self._handle_disconnect(clid)
def _check_for_new_connections(self):
# 'select' is used to check whether there is data waiting to be read
# from the socket. We pass in 3 lists of sockets, the first being those
# to check for readability. It returns 3 lists, the first being
# the sockets that are readable. The last parameter is how long to wait -
# we pass in 0 so that it returns immediately without waiting
rlist,wlist,xlist = select.select([sel
|
kdart/bpython
|
doc/sphinx/source/conf.py
|
Python
|
mit
| 7,072
| 0.00608
|
# -*- coding: utf-8 -*-
#
# bpython documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 8 11:58:16 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bpython'
copyright = u'2008-2015 Bob Farrell, Andreas Stuehrk et al.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version_file = os.path.join(os.
|
path.dirname(os.path.abspath(__file__)),
'../../../bpython/_version.py')
with open(version_file) as vf:
version = vf.read().strip().split('=')[-1].replace('\'', '')
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to document
|
ation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = ['configuration-options']
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'bpythondoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
#latex_documents = [
# ('index', 'bpython.tex', u'bpython Documentation',
# u'Robert Farrell', 'manual'),
#]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('man-bpython', 'bpython',
u'a fancy {curses, GTK+, urwid} interface to the Python interactive interpreter',
[], 1),
('man-bpython-config', 'bpython-config',
u'user configuration file for bpython',
[], 5)
]
# If true, show URL addresses after external links.
#man_show_urls = False
|
naparuba/shinken
|
test/test_end_parsing_types.py
|
Python
|
agpl-3.0
| 6,105
| 0.002293
|
#!/usr/bin/env python
# Copyright (C) 2009-2015:
# Coavoux Sebastien <s.coavoux@free.fr>
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
import unittest
import string
from shinken_test import time_hacker
from shinken.log import logger
from shinken.objects.config import Config
from shinken.brok import Brok
from shinken.external_command import ExternalCommand
from shinken.property import UnusedProp, StringProp, IntegerProp, \
BoolProp, CharProp, DictProp, FloatProp, ListProp, AddrProp, ToGuessProp
class TestEndParsingType(unittest.TestCase):
def map_type(self, obj):
# TODO: Replace all str with unicode when done in property.default attribute
# TODO: Fix ToGuessProp as it may be a list.
if isinstance(obj, ListProp):
return list
if isinstance(obj, StringProp):
return str
if isinstance(obj, UnusedProp):
return str
if isinstance(obj, BoolProp):
return bool
if isinstance(obj, IntegerProp):
return int
if isinstance(obj, FloatProp):
return float
if isinstance(obj, CharProp):
return str
if isinstance(obj, DictProp):
return dict
if isinstance(obj, AddrProp):
return str
if isinstance(obj, ToGuessProp):
return str
def print_header(self):
print "\n" + "#" * 80 + "\n" + "#" + " " * 78 + "#"
print "#" + string.center(self.id(), 78) + "#"
print "#" + " " * 78 + "#\n" + "#" * 80 + "\n"
def add(self, b):
if isinstance(b, Brok):
self.broks[b.id] = b
return
if isinstance(b, ExternalCommand):
self.sched.run_external_command(b.cmd_line)
def test_types(self):
path = 'etc/shinken_1r_1h_1s.cfg'
time_hacker.set_my_time()
self.print_header()
# i am arbiter-like
self.broks = {}
self.me = None
self.log = logger
self.log.setLevel("INFO")
self.log.load_obj(self)
self.config_files = [path]
self.conf = Config()
buf = self.conf.read_config(self.config_files)
raw_objects = self.conf.read_config_buf(buf)
self.conf.create_objects_for_type(raw_objects, 'arbiter')
self.conf.create_objects_for_type(raw_objects, 'module')
self.conf.early_arbiter_linking()
self.conf.create_objects(raw_objects)
self.conf.instance_id = 0
self.conf.instance_name = 'test'
# Hack push_flavor, that is set by the dispatcher
self.conf.push_flavor = 0
self.conf.load_triggers()
self.conf.linkify_templates()
self.conf.apply_inheritance()
self.conf.explode()
self.conf.apply_implicit_inheritance()
self.conf.fill_default()
self.conf.remove_templates()
self.conf.override_properties()
self.conf.linkify()
self.conf.apply_dependencies()
self.conf.explode_global_conf()
self.conf.propagate_timezone_option()
self.conf.create_business_rules()
self.conf.create_business_rules_dependencies()
self.conf.is_correct()
# Cannot do it for all obj for now. We have to ensure unicode everywhere fist
for objs in [self.conf.arbiters]:
for obj in objs:
#print "=== obj : %s ===" % obj.__class__
for prop in obj.properties:
if hasattr(obj, prop):
value = getattr(obj, prop)
# We should get ride of None, maybe use the "neutral" value for type
if value is not None:
#print("TESTING %s with value %s" % (prop, value))
self.assertIsInstance(value, self.map_type(obj.properties[prop]))
else:
print("Skipping %s " % prop)
#print "==="
# Manual check of several attr for self.conf.contacts
# because contacts contains unicode attr
|
for contact in self.conf.contacts:
for prop in ["notificationways", "host_notification_commands", "service_no
|
tification_commands"]:
if hasattr(contact, prop):
value = getattr(contact, prop)
# We should get ride of None, maybe use the "neutral" value for type
if value is not None:
print("TESTING %s with value %s" % (prop, value))
self.assertIsInstance(value, self.map_type(contact.properties[prop]))
else:
print("Skipping %s " % prop)
# Same here
for notifway in self.conf.notificationways:
for prop in ["host_notification_commands", "service_notification_commands"]:
if hasattr(notifway, prop):
value = getattr(notifway, prop)
# We should get ride of None, maybe use the "neutral" value for type
if value is not None:
print("TESTING %s with value %s" % (prop, value))
self.assertIsInstance(value, self.map_type(notifway.properties[prop]))
else:
print("Skipping %s " % prop)
if __name__ == '__main__':
unittest.main()
|
stalker314314/MissPopularStackOverflow
|
load_to_mongo_from_dump.py
|
Python
|
gpl-3.0
| 2,043
| 0.008321
|
import logging.handlers
import time
import dateutil.parser
from pymongo.mongo_client import MongoClient
import xml.etree.ElementTree as etree
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.handlers.TimedRotatingFileHandler(filename='load_to_mongo_from_dump.log', when='midnight', interval=1)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler
|
(ch)
INTEGER_KEYS = ('Id', 'ParentId', 'LastEditor
|
UserId', 'OwnerUserId', 'PostTypeId', 'ViewCount', 'Score', 'AcceptedAnswerId', 'AnswerCount', 'CommentCount', 'FavoriteCount')
STRING_KEYS = ('Title', 'LastEditorDisplayName', 'Body', 'OwnerDisplayName')
DATE_KEYS = ('CommunityOwnedDate', 'LastActivityDate', 'LastEditDate', 'CreationDate', 'ClosedDate')
LIST_KEYS = ('Tags')
def warning_nonexistant_key(key, value):
logger.warning('Unknown key %s with value %s', key, value)
return value
PREPROCESSOR = {
INTEGER_KEYS: lambda k,v: int(v),
STRING_KEYS: lambda k,v: v,
DATE_KEYS: lambda k,v: dateutil.parser.parse(v + 'Z'),
LIST_KEYS: lambda k,v: v[1:-1].split('><'),
'': warning_nonexistant_key
}
if __name__ == '__main__':
db = MongoClient('localhost', 27017)['so']
xml = r'c:\users\branko\Desktop\Posts.xml'
i = 0
benchmark_start_time = time.time()
for event, elem in etree.iterparse(xml, events=('end',)):
if elem.tag != 'row': continue
entry = dict([key, PREPROCESSOR[next((key_type for key_type in PREPROCESSOR if key in key_type), '')](key, value)] for key,value in elem.attrib.items())
db.entries.insert(entry)
elem.clear()
i = i + 1
if i % 10000 == 0:
logger.info('Processing row %d, speed %f rows/sec', i, 10000 / (time.time() - benchmark_start_time))
benchmark_start_time = time.time()
|
ph4m/eand
|
eand/demo/monodiff_demo.py
|
Python
|
gpl-3.0
| 3,605
| 0.006657
|
'''
eand package (Easy Algebraic Numerical Differentiation)
Copyright (C) 2013 Tu-Hoa Pham
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
from math import tanh, exp, cos, sin, pi, cosh, sqrt
import matplotlib.pyplot as plt
from eand.kmand.monodiff import MonoDiff
'''
Demonstration for monodimensional derivative estimation with algebraic numerical differentiation
See 'Numerical differentiation with annihilators in noisy environments' (Mboup et al., 2009)
'''
# Observation parameters
T0 = 0.
|
; # initial observation time
T1 = 5.; # final observation time
Ts = 1./200.; # sampling period
SNR = 25.; # signal to noise ratio
t = np.arange(T0,T1+Ts,Ts); # observation time
Ns = len(t); # total number of samples
# Noise-free
|
signal
x = np.array([0.]*Ns)
xp = np.array([0.]*Ns)
xpp = np.array([0.]*Ns)
for i in range(Ns):
x[i] = tanh(t[i]-1.) + exp(-t[i]/1.2)*sin(6.*t[i]+pi)
xp[i] = -6.*exp(-5./6.*t[i])*cos(6.*t[i]) + 1./(cosh(1.-t[i])**2.) + 5./6.*exp(-5./6.*t[i])*sin(6.*t[i]);
xpp[i] = 10.*exp(-5.*t[i]/6.)*cos(6.*t[i]) + 1271./36.*exp(-5.*t[i]/6.)*sin(6.*t[i]) + 2.*(1./cosh(1.-t[i])**2.)*tanh(1.-t[i]);
signalRefSeq = [x,xp,xpp]
# Noisy signal
noise_mean = 0;
noise_var = (1./(10.**(SNR/10.)-1.))*(1./Ns)*sum(x**2.);
noise_sd = sqrt(noise_var);
noise = np.random.normal(noise_mean,noise_sd,Ns)
signalNoisy = signalRefSeq[0] + noise
# Numerical differentiation parameters
monoDiffCfg = {'n': 1, # derivative order to estimate
'N': 1, # Taylor expansion order
'kappa': 0, # differentiator parameter
'mu': 0, # differentiator parameter
'M': 40, # estimation samples
'Ts': Ts, # sampling period
'xi': 0.5, # xi parameter for real lambda
'lambdaOptType': 'noisyenv', # 'mismodel' or 'noisyenv'
'causality': 'causal', # 'causal' or 'anticausal'
'flagCompleteTime': 'none'} # complete tPost into t: 'none', 'zero', 'findiff'
# Construction of the (kappa,mu)-algebraic numerical differentiator
monoDiff = MonoDiff(monoDiffCfg)
# Differentiation of the noisy signal
(tPost,dPost) = monoDiff.differentiate(t,signalNoisy)
# Plot initial noisy signal
plt.figure(-1)
plt.plot(t, signalRefSeq[0], 'b', label='reference')
plt.plot(t, signalNoisy, 'r', label='noisy signal')
plt.grid()
plt.axhline(color='k')
plt.axvline(color='k')
plt.legend()
plt.title('Initial noisy signal')
# Plot derivative estimate
plt.figure(monoDiffCfg['n'])
plt.plot(t, signalRefSeq[monoDiffCfg['n']], 'b', label='reference')
plt.plot(tPost, dPost, 'r', label='estimate')
plt.grid()
plt.axhline(color='k')
plt.axvline(color='k')
plt.legend()
plt.title('Order %d derivative estimate' % (monoDiffCfg['n']))
plt.show()
|
buguelos/odoo
|
addons/web_notification/__init__.py
|
Python
|
agpl-3.0
| 64
| 0.015625
|
#f
|
lake8: noqa
#
import notifications
import base
import se
|
tting
|
matthewayne/evernote-sdk-python
|
sample/all_methods/listSearches.py
|
Python
|
bsd-3-clause
| 670
| 0.008955
|
# Import the Evernote client
from evernote.api.client import EvernoteClient
# Define access token either:
# D
|
eveloper Tokens (https://dev.evernote.com/doc/articles/dev_tokens.php)
# or OAuth (https://dev.evernote.com/doc/articles/authentication.php)
access
|
_token = "insert dev or oauth token here"
# Setup the client
client = EvernoteClient(token = access_token, sandbox = True)
# Get note store object
note_store = client.get_note_store()
# Returns the current state of the search with the provided GUID.
saved_searches = note_store.listSearches()
print "Found %s saved searches:" % len(saved_searches)
for search in saved_searches:
print " '%s'" % search.query
|
DistrictDataLabs/minimum-entropy
|
fugato/serializers.py
|
Python
|
apache-2.0
| 4,013
| 0.004236
|
# fugato.serializers
# JSON Serializers for the Fugato app
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Thu Oct 23 15:03:36 2014 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: serializers.py [8eae6c4] benjamin@bengfort.com $
"""
JSON Serializers for the Fugato app
"""
##########################################################################
## Imports
##########################################################################
from fugato.models import *
from fugato.exceptions import *
from users.serializers import *
from minent.utils import signature
from rest_framework import serializers
##########################################################################
## Question Serializers
##########################################################################
class QuestionSerializer(serializers.HyperlinkedModelSerializer):
"""
Serializes the Question object for use in the API.
"""
author = UserSerializer(
default=serializers.CurrentUserDefault(),
read_only=True,
)
page_url = serializers.SerializerMethodField(read_only=True)
tags = serializers.StringRelatedField(many=True, read_only=True)
class Meta:
model = Question
fields = (
'url', 'text', 'author', 'page_url',
'details', 'details_rendered', 'tags'
)
extra_kwargs = {
'url': {'view_name': 'api:question-detail',},
'details_rendered': {'read_only': True},
}
|
######################################################################
## Serializer Methods
######################################################################
def get_page_url(self, obj):
|
"""
Returns the models' detail absolute url.
"""
return obj.get_absolute_url()
#####################################################################
## Override create and update for API
######################################################################
def create(self, validated_data):
"""
Override the create method to deal with duplicate questions and
other API-specific errors that can happen on Question creation.
"""
## Check to make sure there is no duplicate
qsig = signature(validated_data['text'])
if Question.objects.filter(signature=qsig).exists():
raise DuplicateQuestion()
## Create the model as before
return super(QuestionSerializer, self).create(validated_data)
def update(self, instance, validated_data):
"""
Override the update method to perform non-duplication checks that
aren't instance-specific and to determine if other fields should
be updated like the parse or the concepts.
Currently this is simply the default behavior.
TODO:
- Check if reparsing needs to be performed
- Check if concepts need to be dealt with
- Check if the question text has changed and what to do
"""
return super(QuestionSerializer, self).update(instance, validated_data)
##########################################################################
## Answer Serializers
##########################################################################
class AnswerSerializer(serializers.HyperlinkedModelSerializer):
"""
Serializes the Answer object for use in the API.
"""
author = UserSerializer(
default=serializers.CurrentUserDefault(),
read_only=True,
)
class Meta:
model = Answer
fields = ('url', 'text', 'text_rendered', 'author', 'question', 'created', 'modified')
read_only_fields = ('text_rendered', 'author')
extra_kwargs = {
'url': {'view_name': 'api:answer-detail',},
'question': {'view_name': 'api:question-detail',}
}
|
NikolaYolov/invenio_backup
|
modules/bibauthorid/lib/bibauthorid_config.py
|
Python
|
gpl-2.0
| 10,357
| 0.003862
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
bibauthorid_config
Part of the framework responsible for supplying configuration options used
by different parts of the framework. Note, however, that it's best to
declare any configuration options for the modules within themselves.
"""
import logging.handlers
import sys
import os.path as osp
from invenio.access_control_config import SUPERADMINROLE
GLOBAL_CONFIG = True
try:
from invenio.config import CFG_BIBAUTHORID_PERSONID_SQL_MAX_THREADS, \
CFG_BIBAUTHORID_MAX_PROCESSES, \
CFG_BIBAUTHORID_PERSONID_MIN_P_FROM_BCTKD_RA, \
CFG_BIBAUTHORID_PERSONID_MIN_P_FROM_NEW_RA, \
CFG_BIBAUTHORID_PERSONID_MAX_COMP_LIST_MIN_TRSH, \
CFG_BIBAUTHORID_PERSONID_MAX_COMP_LIST_MIN_TRSH_P_N, \
CFG_BIBAUTHORID_EXTERNAL_CLAIMED_RECORDS_KEY, \
CFG_BIBAUTHORID_ATTACH_VA_TO_MULTIPLE_RAS , \
CFG_BIBAUTHORID_ENABLED, \
CFG_BIBAUTHORID_ON_AUTHORPAGES
except ImportError:
GLOBAL_CONFIG = False
# Current version of the framework
VERSION = '1.1.0'
# make sure current directory is importable
FILE_PATH = osp.dirname(osp.abspath(__file__))
if FILE_PATH not in sys.path:
sys.path.insert(0, FILE_PATH)
# Permission definitions as in actions defined in roles
CLAIMPAPER_ADMIN_ROLE = "claimpaperoperators"
CLAIMPAPER_USER_ROLE = "claimpaperusers"
CMP_ENABLED_ROLE = "paperclaimviewers"
CHP_ENABLED_ROLE = "paperattributionviewers"
AID_LINKS_ROLE = "paperattributionlinkviewers"
CLAIMPAPER_VIEW_PID_UNIVERSE = 'claimpaper_view_pid_universe'
CLAIMPAPER_CHANGE_OWN_DATA = 'claimpaper_change_own_data'
CLAIMPAPER_CHANGE_OTHERS_DATA = 'claimpaper_change_others_data'
CLAIMPAPER_CLAIM_OWN_PAPERS = 'claimpaper_claim_own_papers'
CLAIMPAPER_CLAIM_OTHERS_PAPERS = 'claimpaper_claim_others_papers'
#Number of persons in a search result for which the first five papers will be shown
PERSON_SEARCH_RESULTS_SHOW_PAPERS_PERSON_LIMIT = 10
CMPROLESLCUL = {'guest': 0,
CLAIMPAPER_USER_ROLE: 25,
CLAIMPAPER_ADMIN_ROLE: 50,
SUPERADMINROLE: 50}
# Globally enable AuthorID Interfaces.
# If False: No guest, user or operator will have access to the system.
if GLOBAL_CONFIG:
AID_ENABLED = CFG_BIBAUTHORID_ENABLED
else:
AID_ENABLED = True
# Enable AuthorID information on the author pages.
if GLOBAL_CONFIG:
AID_ON_AUTHORPAGES = CFG_BIBAUTHORID_ON_AUTHORPAGES
else:
AID_ON_AUTHORPAGES = True
# Limit the disambiguation to a specific collections. Leave empty for all
# Collections are to be defined as a list of strings
LIMIT_TO_COLLECTIONS = []
# Exclude documents that are visible in a collection mentioned here:
EXCLUDE_COLLECTIONS = ["HEPNAMES", "INST"]
# User info keys for externally claimed records
# e.g. for arXiv SSO: ["external_arxivids"]
if GLOBAL_CONFIG and CFG_BIBAUTHORID_EXTERNAL_CLAIMED_RECORDS_KEY:
EXTERNAL_CLAIMED_RECORDS_KEY = CFG_BIBAUTHORID_EXTERNAL_CLAIMED_RECORDS_KEY
else:
EXTERNAL_CLAIMED_RECORDS_KEY = []
# Lists all filters that are valid to filter the export by.
# An example is 'arxiv' to return only papers with a 037 entry named arxiv
VALID_EXPORT_FILTERS = ["arxiv"]
# Max number of threads to parallelize sql queryes in table_utils updates
if GLOBAL_CONFIG and CFG_BIBAUTHORID_PERSONID_SQL_MAX_THREADS:
PERSONID_SQL_MAX_THREADS = CFG_BIBAUTHORID_PERSONID_SQL_MAX_THREADS
else:
PERSONID_SQL_MAX_THREADS = 4
# Max number of processes spawned by the disambiguation algorithm
if GLOBAL_CONFIG and CFG_BIBAUTHORID_MAX_PROCESSES:
BIBAUTHORID_MAX_PROCESSES = CFG_BIBAUTHORID_MAX_PROCESSES
else:
BIBAUTHORID_MAX_PROCESSES = 4
# Threshold for connecting a paper to a person: BCTKD are the papers from the
# backtracked RAs found searching back for the papers already connected to the
# persons, NEW is for the newly found one
if GLOBAL_CONFIG and CFG_BIBAUTHORID_PERSONID_MIN_P_FROM_BCTKD_RA:
PERSONID_MIN_P_FROM_BCTKD_RA = CFG_BIBAUTHORID_PERSONID_MIN_P_FROM_BCTKD_RA
else:
PERSONID_MIN_P_FROM_BCTKD_RA = 0.5
if GLOBAL_CONFIG and CFG_BIBAUTHORID_PERSONID_MIN_P_FROM_NEW_RA:
PERSONID_MIN_P_FROM_NEW_RA = CFG_BIBAUTHORID_PERSONID_MIN_P_FROM_NEW_RA
else:
PERSONID_MIN_P_FROM_NEW_RA = 0.5
# Minimum threshold for the compatibility list of persons to an RA: if no RA
# is more compatible that that it will create a new person
if GLOBAL_CONFIG and CFG_BIBAUTHORID_PERSONID_MAX_COMP_LIST_MIN_TRSH:
PERSONID_MAX_COMP_LIST_MIN_TRSH = CFG_BIBAUTHORID_PERSONID_MAX_COMP_LIST_MIN_TRSH
else:
PERSONID_MAX_COMP_LIST_MIN_TRSH = 0.5
if GLOBAL_CONFIG and CFG_BIBAUTHORID_PERSONID_MAX_CO
|
MP_LIST_MIN_TRSH_P_N:
PERSONID_MAX_COMP_LIST_MIN_TRSH_P_N = CFG_BIBAUTHORID_PERSONID_MAX_COMP_LIST_MIN_TRSH_P_N
else:
PERSONID_MAX_COMP_LIST_MIN_TRSH_P_N = 0.5
#Create_new_person flags thresholds
PERSONID_CNP_FLAG_1
|
= 0.75
PERSONID_CNP_FLAG_MINUS1 = 0.5
# update_personid_from_algorithm person_paper_list for get_person_ra call
# minimum flag
PERSONID_UPFA_PPLMF = -1
#Tables Utils debug output
TABLES_UTILS_DEBUG = False
# Is the authorid algorithm allowed to attach a virtual author to multiple
# real authors in the last run of the orphan processing?
if GLOBAL_CONFIG and CFG_BIBAUTHORID_ATTACH_VA_TO_MULTIPLE_RAS:
ATTACH_VA_TO_MULTIPLE_RAS = CFG_BIBAUTHORID_ATTACH_VA_TO_MULTIPLE_RAS
else:
ATTACH_VA_TO_MULTIPLE_RAS = False
# Shall we send from locally defined eMail address or from the users one
# when we send out a ticket? Default is True -> send with user's email
TICKET_SENDING_FROM_USER_EMAIL = True
# Log Level for the message output.
# Log Levels are defined in the Python logging system
# 0 - 50 (log everything - log exceptions)
LOG_LEVEL = 30
# Default logging file name
LOG_FILENAME = "job.log"
# tables_utils_config
TABLE_POPULATION_BUNCH_SIZE = 6000
# Max number of authors on a paper to be considered while creating jobs
MAX_AUTHORS_PER_DOCUMENT = 15
# Set limit_authors to true, if papers that are written by collaborations
# or by more than MAX_AUTHORS_PER_DOCUMENT authors shall be excluded
# The default is False.
LIMIT_AUTHORS_PER_DOCUMENT = False
# Regexp for the names separation
NAMES_SEPARATOR_CHARACTER_LIST = ",;.=\-\(\)"
SURNAMES_SEPARATOR_CHARACTER_LIST = ",;"
# Path where all the modules live and which prefix the have.
MODULE_PATH = ("%s/bibauthorid_comparison_functions/aid_cmp_*.py"
% (FILE_PATH,))
## threshold for adding a va to more than one real authors for
## the add_new_virtualauthor function
REALAUTHOR_VA_ADD_THERSHOLD = 0.449
## parameters for the 'compute real author name' function
CONFIDENCE_THRESHOLD = 0.46
P_THRESHOLD = 0.46
INVERSE_THRESHOLD_DELTA = 0.1
## parameters for the comparison function chain
CONSIDERATION_THRESHOLD = 0.04
## Set up complex logging system:
## - Setup Default logger, which logs to console on critical events only
## - on init call, set up a three-way logging system:
## - 1. Log to console anything ERROR or higher.
## - 2. Log everything LOG_LEVEL or higher to memory and
## - 3. Flush to file in the specified path.
LOGGERS = []
HANDLERS = {}
## Default logger and handler
DEFAULT_HANDLER = logging.StreamHandler()
DEFAULT_LOG_FORMAT = logging.Formatter('%(levelname)-8s %(message)s')
DEFAULT_HANDLER.setFormatter(DEFAULT_LOG_FORMAT)
DEFAULT_HANDLER.setLevel(logging.CRITICAL)
## workaround for the classes to detect that LOGGER is actually an instance
## of type loggi
|
jongha/thunderfs
|
src/web/application/libs/shorten.py
|
Python
|
agpl-3.0
| 301
| 0.0299
|
#!/usr/
|
bin/env python3
# -*- coding: utf-8 -*-
import os
import codecs
import json
from application.libs import bitly
def get(url):
api = bitly.Api(login='o_1gc6gttus8', apikey='R_a6ce0adb432e4e42b48c7635cc62571a')
shorten_url = api.shorten(url,{'history':1})
return shorten_ur
|
l
|
NikolaiT/proxy.py
|
setup.py
|
Python
|
bsd-3-clause
| 1,527
| 0.024885
|
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
HTTP Proxy Server in Python with explicit WebSocket support.
:copyright: (c) 2013 by Abhinav Singh.
:license: BSD, see LICENSE for more details.
Added WebSocket support to modify and change WebSocket messages on Summer 2015.
The original source code may be found on:
https://github.com/abhinavsingh/proxy.py
"""
from setuptools import setup
import proxy
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD L
|
icense',
'Operating System :: MacOS',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: Microsoft',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: Proxy Servers',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers',
]
setup(
name = 'proxy_websockets',
version = proxy.__versi
|
on__,
description = proxy.__description__,
long_description = open('README.md').read().strip(),
author = proxy.__author__,
author_email = proxy.__author_email__,
url = proxy.__homepage__,
license = proxy.__license__,
py_modules = ['proxy'],
scripts = ['proxy.py'],
install_requires = [],
classifiers = classifiers
)
|
bmmalone/as-auto-sklearn
|
as_auto_sklearn/train_as_auto_sklearn.py
|
Python
|
mit
| 10,979
| 0.005192
|
#! /usr/bin/env python3
import argparse
import ctypes
import itertools
import joblib
import numpy as np
import os
import pandas as pd
import shlex
import string
import sys
import yaml
import sklearn.pipeline
import sklearn.preprocessing
from aslib_scenario.aslib_scenario import ASlibScenari
|
o
import misc.automl_utils as automl_utils
import misc.math_utils as math_utils
import misc.parallel as parallel
import misc.shell_utils as shell_utils
import misc.utils as utils
from misc.column_selector import ColumnSelector
from misc.column_selector import ColumnTransformer
import logging
import misc.logging_utils as logging_utils
logg
|
er = logging.getLogger(__name__)
default_num_cpus = 1
default_num_blas_cpus = 1
def _get_pipeline(args, config, scenario):
""" Create the pipeline which will later be trained to predict runtimes.
Parameters
----------
args: argparse.Namespace
The options for training the autosklearn regressor
config: dict-like
Other configuration options, such as whether to preprocess the data
scenario: ASlibScenario
The scenario. *N.B.* This is only used to get the feature names and
grouping. No information is "leaked" to the pipeline.
"""
pipeline_steps = []
# find the allowed features
allowed_features = scenario.features
allowed_feature_groups = config.get('allowed_feature_groups', [])
if len(allowed_feature_groups) > 0:
allowed_features = [
scenario.feature_group_dict[feature_group]['provides']
for feature_group in allowed_feature_groups
]
allowed_features = utils.flatten_lists(allowed_features)
feature_set_selector = ColumnSelector(allowed_features)
fs = ('feature_set', feature_set_selector)
pipeline_steps.append(fs)
# check for taking the log
if 'fields_to_log' in config:
# then make sure all of the fields are in the scenario
fields_to_log = config['fields_to_log']
missing_fields = [f for f in fields_to_log if f not in scenario.features]
if len(missing_fields) > 0:
missing_fields_str = ",".join(missing_fields)
msg = ("[train-auto-sklearn]: Could not find the following fields "
"to log: {}".format(missing_fields_str))
raise ValueError(msg)
log_transformer = ColumnTransformer(fields_to_log, np.log1p)
log_transformer = ('log_transformer', log_transformer)
pipeline_steps.append(log_transformer)
# handling missing values
imputer_strategy = config.get('imputer_strategy', 'mean')
automl_utils.check_imputer_strategy(
imputer_strategy,
raise_error=True,
error_prefix="[train-auto-sklearn]: "
)
msg = ("[train-auto-sklearn]: Using imputation strategy: {}".format(
imputer_strategy))
logger.debug(msg)
imputer = sklearn.preprocessing.Imputer(strategy=imputer_strategy)
imputer = ('imputer', imputer)
pipeline_steps.append(imputer)
# optionally, check if we want to preprocess
preprocessing_strategy = config.get('preprocessing_strategy', None)
if preprocessing_strategy == 'scale':
msg = ("[train-auto-sklearn]: Adding standard scaler (zero mean, unit "
"variance) for preprocessing")
logger.debug(msg)
s = ('scaler', sklearn.preprocessing.StandardScaler())
pipeline_steps.append(s)
elif preprocessing_strategy is None:
# no preprocessing is fine
pass
else:
msg = ("[train-auto-sklearn]: The preprocessing strategy is not "
"recognized. given: {}".format(preprocessing_strategy))
raise ValueError(msg)
# last, we need to convert it to a "contiguous" array
ct = sklearn.preprocessing.FunctionTransformer(np.ascontiguousarray)
ct = ("contiguous_transformer", ct)
pipeline_steps.append(ct)
# then we use the auto-sklearn options
# in order to match with AutoFolio, check if we have wallclock_limit in the
# config file
args.total_training_time = config.get("wallclock_limit",
args.total_training_time)
regressor = automl_utils.AutoSklearnWrapper()
regressor.create_regressor(args)
r = ('auto_sklearn', regressor)
pipeline_steps.append(r)
# and create the pipeline
pipeline = sklearn.pipeline.Pipeline(pipeline_steps)
return pipeline
def _outer_cv(solver_fold, args, config):
solver, fold = solver_fold
# there are problems serializing the aslib scenario, so just read it again
scenario = ASlibScenario()
scenario.read_scenario(args.scenario)
msg = "Solver: {}, Fold: {}".format(solver, fold)
logger.info(msg)
msg = "Constructing template pipeline"
logger.info(msg)
pipeline = _get_pipeline(args, config, scenario)
msg = "Extracting solver and fold performance data"
logger.info(msg)
testing, training = scenario.get_split(fold)
X_train = training.feature_data
y_train = training.performance_data[solver].values
if 'log_performance_data' in config:
y_train = np.log1p(y_train)
msg = "Fitting the pipeline"
logger.info(msg)
pipeline = pipeline.fit(X_train, y_train)
out = string.Template(args.out)
out = out.substitute(solver=solver, fold=fold)
msg = "Writing fit pipeline to disk: {}".format(out)
logger.info(msg)
joblib.dump(pipeline, out)
return pipeline
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="This script trains a model to predict the runtime for a "
"solver from an ASlib scenario using autosklearn. It assumes an "
"\"outer\" cross-validation strategy, and it only trains a model for "
"the indicated folds and solvers. It then writes the learned model to "
"disk. It *does not* collect any statistics, make predictions ,etc.")
parser.add_argument('scenario', help="The ASlib scenario")
parser.add_argument('out', help="A template string for the filenames for "
"the learned models. They are written with joblib.dump, so they need "
"to be read back in with joblib.load. ${solver} and ${fold} are the "
"template part of the string. It is probably necessary to surround "
"this argument with single quotes in order to prevent shell "
"replacement of the template parts.")
parser.add_argument('--config', help="A (yaml) config file which specifies "
"options controlling the learner behavior")
parser.add_argument('--solvers', help="The solvers for which models will "
"be learned. By default, models for all solvers are learned",
nargs='*', default=[])
parser.add_argument('--folds', help="The outer-cv folds for which a model "
"will be learned. By default, models for all folds are learned",
type=int, nargs='*', default=[])
parser.add_argument('-p', '--num-cpus', help="The number of CPUs to use "
"for parallel solver/fold training", type=int,
default=default_num_cpus)
parser.add_argument('--num-blas-threads', help="The number of threads to "
"use for parallelizing BLAS. The total number of CPUs will be "
"\"num_cpus * num_blas_cpus\". Currently, this flag only affects "
"OpenBLAS and MKL.", type=int, default=default_num_blas_cpus)
parser.add_argument('--do-not-update-env', help="By default, num-blas-threads "
"requires that relevant environment variables are updated. Likewise, "
"if num-cpus is greater than one, it is necessary to turn off python "
"assertions due to an issue with multiprocessing. If this flag is "
"present, then the script assumes those updates are already handled. "
"Otherwise, the relevant environment variables are set, and a new "
"processes is spawned with this flag and otherwise the same "
"arguments. This flag is not inended for external users.",
action='store_true')
automl_utils.add_automl_options(parser)
logging_utils.add_logging_options(parser)
ar
|
fernandezcuesta/ansible
|
lib/ansible/modules/identity/ipa/ipa_group.py
|
Python
|
gpl-3.0
| 9,300
| 0.001935
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipa_group
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA group
description:
- Add, modify and delete group within IPA server
options:
cn:
description:
- Canonical name.
- Can not be changed as it is the unique identifier.
required: true
aliases: ['name']
external:
description:
- Allow adding external non-IPA members from trusted domains.
required: false
gidnumber:
description:
- GID (use this option to set it manually).
required: false
group:
description:
- List of group names assigned to this group.
- If an empty list is passed all groups will be removed from this group.
- If option is omitted assigned groups will not be checked or changed.
- Groups that are already assigned but not passed will be removed.
nonposix:
description:
- Create as a non-POSIX group.
required: false
user:
description:
- List of user names assigned to this group.
- If an empty list is passed all users will be removed from this group.
- If option is omitted assigned users will not be checked or changed.
- Users that are already assigned but not passed will be removed.
state:
description:
- State to ensure
required: false
default: "present"
choices: ["present", "absent"]
ipa_port:
description: Port of IPA server
required: false
default: 443
ipa_host:
description: IP or hostname of IPA server
required: false
default: "ipa.example.com"
ipa_user:
description: Administrative account used on IPA server
required: false
default: "admin"
ipa_pass:
description: Password of administrative user
required: true
ipa_prot:
description: Protocol used by IPA server
required: false
default: "https"
choices: ["http", "https"]
validate_certs:
description:
- This only applies if C(ipa_prot) is I(https).
- If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
required: false
default: true
version_added: "2.3"
'''
EXAMPLES = '''
# Ensure group is present
- ipa_group:
name: oinstall
gidnumber: 54321
state: present
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure that groups sysops and appops are assigned to ops but no other group
- ipa_group:
name: ops
group:
- sysops
- appops
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure that users linus and larry are assign to the group, but no other user
- ipa_group:
name: sysops
user:
- linus
- larry
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure group is absent
- ipa_group:
name: sysops
state: absent
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
'''
RETURN = '''
group:
description: Group as returned by IPA API
returned: always
type: dict
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ipa import IPAClient
from ansible.module_utils._text import to_native
class GroupIPAClient(IPAClient):
def __init__(self, module, host, port, protocol):
super(GroupIPAClient, self).__init__(module, host, port, protocol)
def group_find(self, name):
return self._post_json(method='group_find', name=None, item={'all': True, 'cn': name})
def group_add(self, name, item):
return self._post_json(method='group_add', name=name, item=item)
def group_mod(self, name, item):
return self._post_json(method='group_mod', name=name, item=item)
def group_del(self, name):
return self._post_json(method='group_del', name=name)
def group_add_member(self, name, item):
return self._post_json(method='group_add_member', name=name, item=item)
def group_add_member_group(self, name, item):
return self.group_add_member(name=name, item={'group': item})
def group_add_member_user(self, name, item):
return self.group_add_member(name=name, item={'user': item})
def group_remove_member(self, name, item):
return self._post_json(method='group_remove_member', name=name, item=item)
def group_remove_member_group(self, name, item):
return self.group_remove_member(name=name, item={'group': item})
def group_remove_member_user(self, name, item):
return self.group_remove_member(name=name, item={'user': item})
def get_group_dict(description=None, external=None, gid=None, nonposix=None):
group = {}
if description is not None:
group['description'] = description
if external is not None:
group['external'] = external
if gid is not None:
group['gidnumber'] = gid
if nonposix is not None:
group['nonposix'] = nonposix
return grou
|
p
def get_group_diff(client, ipa_group, module_group):
|
data = []
# With group_add attribute nonposix is passed, whereas with group_mod only posix can be passed.
if 'nonposix' in module_group:
# Only non-posix groups can be changed to posix
if not module_group['nonposix'] and ipa_group.get('nonposix'):
module_group['posix'] = True
del module_group['nonposix']
return client.get_diff(ipa_data=ipa_group, module_data=module_group)
def ensure(module, client):
state = module.params['state']
name = module.params['name']
group = module.params['group']
user = module.params['user']
module_group = get_group_dict(description=module.params['description'], external=module.params['external'],
gid=module.params['gidnumber'], nonposix=module.params['nonposix'])
ipa_group = client.group_find(name=name)
changed = False
if state == 'present':
if not ipa_group:
changed = True
if not module.check_mode:
ipa_group = client.group_add(name, item=module_group)
else:
diff = get_group_diff(client, ipa_group, module_group)
if len(diff) > 0:
changed = True
if not module.check_mode:
data = {}
for key in diff:
data[key] = module_group.get(key)
client.group_mod(name=name, item=data)
if group is not None:
changed = client.modify_if_diff(name, ipa_group.get('member_group', []), group,
client.group_add_member_group,
client.group_remove_member_group) or changed
if user is not None:
changed = client.modify_if_diff(name, ipa_group.get('member_user', []), user,
client.group_add_member_user,
client.group_remove_member_user) or changed
else:
if ipa_group:
changed = True
if not module.check_mode:
client.group_del(name)
return changed, client.group_find(name=name)
def main():
module = AnsibleModule(
argument_spec=dict(
cn=dict(type='str', required=True, aliases=['name']),
description=dict(type='str', required=False),
external=dict(type='bool', required=False),
gidnumber=dict(type='str', required=False, aliases=['gid']),
group=dict(type='list', required=False),
nonposix=dict(type='bool', required=False),
state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
user=dict(type='list', required=F
|
sejimhp/BulletReflector
|
BulletManager.py
|
Python
|
mit
| 999
| 0.003083
|
from Common import *
class BulletManager:
def __init__(self):
self.
|
bullets = []
def add(self, x, y, r, rad, bullet_type):
if bullet_type == 1:
self.bullets.append(MyBullet(x, y, r, rad))
elif bullet_type == 2:
self.bullets.append(EnemyBullet(x, y, r + random.randint(1, 3), rad))
elif bullet_type == 3:
self.bullets.append(Arrow(x, y, r, rad))
|
elif bullet_type == 4:
self.bullets.append(EnemyLaser(x, y, r, rad))
elif bullet_type == 5:
self.bullets.append(PlayerLaser(x, y, r, rad))
def update(self, stage, player):
# 弾が画面外に行った場合削除
for bullet in self.bullets:
bullet.update(player)
if bullet.valid(stage) == False:
self.bullets.remove(bullet)
def draw(self, screen, player, stage):
for bullet in self.bullets:
bullet.draw(screen, player, stage)
|
rsalmei/clearly
|
clearly/server/__init__.py
|
Python
|
mit
| 34
| 0
|
f
|
rom .server import ClearlyServ
|
er
|
rallylee/gem5
|
src/mem/ruby/structures/DirectoryMemory.py
|
Python
|
bsd-3-clause
| 2,558
| 0.000391
|
# Copyright (c) 2017 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO
|
, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Brad Beckmann
from m5.params import *
from m5.proxy import
|
*
from m5.SimObject import SimObject
class RubyDirectoryMemory(SimObject):
type = 'RubyDirectoryMemory'
cxx_class = 'DirectoryMemory'
cxx_header = "mem/ruby/structures/DirectoryMemory.hh"
addr_ranges = VectorParam.AddrRange(
Parent.addr_ranges, "Address range this directory responds to")
|
sprockets/sprockets.clients.postgresql
|
sprockets/clients/postgresql/__init__.py
|
Python
|
bsd-3-clause
| 5,585
| 0.003581
|
"""
PostgreSQL Session API
======================
The Session classes wrap the Queries :py:class:`Session <queries.Session>` and
:py:class:`TornadoSession <queries.tornado_session.TornadoSession>` classes
providing environment variable based configuration.
Environment variables should be s
|
et using the ``PGSQL[_DBNAME]`` format
where the value is a PostgreSQL URI.
For PostgreSQL URI format, see:
http://www.postgresql.org/docs/9
|
.3/static/libpq-connect.html#LIBPQ-CONNSTRING
As example, given the environment variable:
.. code:: python
PGSQL_FOO = 'postgresql://bar:baz@foohost:6000/foo'
and code for creating a :py:class:`Session` instance for the database name
``foo``:
.. code:: python
session = sprockets.postgresql.Session('foo')
A :py:class:`queries.Session` object will be created that connects to Postgres
running on ``foohost``, port ``6000`` using the username ``bar`` and the
password ``baz``, connecting to the ``foo`` database.
"""
version_info = (2, 0, 1)
__version__ = '.'.join(str(v) for v in version_info)
import logging
import os
from queries import pool
import queries
from queries import tornado_session
_ARGUMENTS = ['host', 'port', 'dbname', 'user', 'password']
LOGGER = logging.getLogger(__name__)
# For ease of access to different cursor types
from queries import DictCursor
from queries import NamedTupleCursor
from queries import RealDictCursor
from queries import LoggingCursor
from queries import MinTimeLoggingCursor
# Expose exceptions so clients do not need to import queries as well
from queries import DataError
from queries import DatabaseError
from queries import IntegrityError
from queries import InterfaceError
from queries import InternalError
from queries import NotSupportedError
from queries import OperationalError
from queries import ProgrammingError
from queries import QueryCanceledError
from queries import TransactionRollbackError
def _get_uri(dbname):
"""Return the URI for the specified database name from an environment
variable. If dbname is blank, the ``PGSQL`` environment variable is used,
otherwise the database name is cast to upper case and concatenated to
``PGSQL_`` and the URI is retrieved from ``PGSQL_DBNAME``. For example,
if the value ``foo`` is passed in, the environment variable used would be
``PGSQL_FOO``.
:param str dbname: The database name to construct the URI for
:return: str
:raises: KeyError
"""
if not dbname:
return os.environ['PGSQL']
return os.environ['PGSQL_{0}'.format(dbname).upper()]
class Session(queries.Session):
"""Extends queries.Session using configuration data that is stored
in environment variables.
Utilizes connection pooling to ensure that multiple concurrent asynchronous
queries do not block each other. Heavily trafficked services will require
a higher ``max_pool_size`` to allow for greater connection concurrency.
:param str dbname: PostgreSQL database name
:param queries.cursor: The cursor type to use
:param int pool_idle_ttl: How long idle pools keep connections open
:param int pool_max_size: The maximum size of the pool to use
:param str db_url: Optional database connection URL. Use this when
you need to connect to a database that is only known at runtime.
"""
def __init__(self, dbname,
cursor_factory=queries.RealDictCursor,
pool_idle_ttl=pool.DEFAULT_IDLE_TTL,
pool_max_size=pool.DEFAULT_MAX_SIZE,
db_url=None):
if db_url is None:
db_url = _get_uri(dbname)
super(Session, self).__init__(db_url,
cursor_factory,
pool_idle_ttl,
pool_max_size)
class TornadoSession(tornado_session.TornadoSession):
"""Extends queries.TornadoSession using configuration data that is stored
in environment variables.
Utilizes connection pooling to ensure that multiple concurrent asynchronous
queries do not block each other. Heavily trafficked services will require
a higher ``max_pool_size`` to allow for greater connection concurrency.
:py:meth:`query <queries.tornado_session.TornadoSession.query>` and
:py:meth:`callproc <queries.tornado_session.TornadoSession.callproc>` must
call :py:meth:`Results.free <queries.tornado_session.Results.free>`
:param str dbname: PostgreSQL database name
:param queries.cursor: The cursor type to use
:param int pool_idle_ttl: How long idle pools keep connections open
:param int pool_max_size: The maximum size of the pool to use
:param tornado.ioloop.IOLoop ioloop: Pass in the instance of the tornado
IOLoop you would like to use. Defaults to the global instance.
:param str db_url: Optional database connection URL. Use this when
you need to connect to a database that is only known at runtime.
"""
def __init__(self, dbname,
cursor_factory=queries.RealDictCursor,
pool_idle_ttl=pool.DEFAULT_IDLE_TTL,
pool_max_size=tornado_session.DEFAULT_MAX_POOL_SIZE,
io_loop=None, db_url=None):
if db_url is None:
db_url = _get_uri(dbname)
super(TornadoSession, self).__init__(db_url,
cursor_factory,
pool_idle_ttl,
pool_max_size,
io_loop)
|
efce/voltPy
|
manager/helpers/html.py
|
Python
|
gpl-3.0
| 458
| 0.008734
|
from django.utils.safestring import mark_safe
from django.contrib.staticfiles.templatetags.staticfiles import static
def locked():
return mark_safe('<img src="%s" alt="locked" style="border:0px; margin: 0px; padding: 0px"/>' % (
static('manager/padlock_close.png')
)
|
)
def unlocked():
return mark_safe('<img
|
src="%s" alt="locked" style="border:0px; margin: 0px; padding: 0px"/>' % (
static('manager/padlock_open.png')
))
|
google/mipnerf
|
internal/vis.py
|
Python
|
apache-2.0
| 5,130
| 0.009747
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Helper functions for visualizing things."""
import jax
import jax.numpy as jnp
import jax.scipy as jsp
import matplotlib.cm as cm
def sinebow(h):
"""A cyclic and uniform colormap, see http://basecase.org/env/on-rainbows."""
f = lambda x: jnp.sin(jnp.pi * x)**2
return jnp.stack([f(3 / 6 - h), f(5 / 6 - h), f(7 / 6 - h)], -1)
def convolve2d(z, f):
return jsp.signal.convolve2d(
z, f, mode='same', precision=jax.lax.Precision.HIGHEST)
def depth_to_normals(depth):
"""Assuming `depth` is orthographic, linearize it to a set of normals."""
f_blur = jnp.array([1, 2, 1]) / 4
f_edge = jnp.array([-1, 0, 1]) / 2
dy = convolve2d(depth, f_blur[None, :] * f_edge[:, None])
dx = convolve2d(depth, f_blur[:, None] * f_edge[None, :])
inv_denom = 1 / jnp.sqrt(1 + dx**2 + dy**2)
normals = jnp.stack([dx * inv_denom, dy * inv_denom, inv_denom], -1)
return normals
def visualize_depth(depth,
acc=None,
near=None,
far=None,
|
ignore_frac=0,
curve_fn=lambda x: -jnp.log(x + jnp.finfo(jnp.float32).eps),
modulus=0,
colormap=None):
"""Visualize a depth map.
Args:
depth: A depth map.
acc: An accumulation map, in [0, 1].
near: The depth of the near plane, if None then just use the min().
far: The depth of the far plane, if None then just
|
use the max().
ignore_frac: What fraction of the depth map to ignore when automatically
generating `near` and `far`. Depends on `acc` as well as `depth'.
curve_fn: A curve function that gets applied to `depth`, `near`, and `far`
before the rest of visualization. Good choices: x, 1/(x+eps), log(x+eps).
Note that the default choice will flip the sign of depths, so that the
default colormap (turbo) renders "near" as red and "far" as blue.
modulus: If > 0, mod the normalized depth by `modulus`. Use (0, 1].
colormap: A colormap function. If None (default), will be set to
matplotlib's turbo if modulus==0, sinebow otherwise.
Returns:
An RGB visualization of `depth`.
"""
if acc is None:
acc = jnp.ones_like(depth)
acc = jnp.where(jnp.isnan(depth), jnp.zeros_like(acc), acc)
# Sort `depth` and `acc` according to `depth`, then identify the depth values
# that span the middle of `acc`, ignoring `ignore_frac` fraction of `acc`.
sortidx = jnp.argsort(depth.reshape([-1]))
depth_sorted = depth.reshape([-1])[sortidx]
acc_sorted = acc.reshape([-1])[sortidx]
cum_acc_sorted = jnp.cumsum(acc_sorted)
mask = ((cum_acc_sorted >= cum_acc_sorted[-1] * ignore_frac) &
(cum_acc_sorted <= cum_acc_sorted[-1] * (1 - ignore_frac)))
depth_keep = depth_sorted[mask]
# If `near` or `far` are None, use the highest and lowest non-NaN values in
# `depth_keep` as automatic near/far planes.
eps = jnp.finfo(jnp.float32).eps
near = near or depth_keep[0] - eps
far = far or depth_keep[-1] + eps
# Curve all values.
depth, near, far = [curve_fn(x) for x in [depth, near, far]]
# Wrap the values around if requested.
if modulus > 0:
value = jnp.mod(depth, modulus) / modulus
colormap = colormap or sinebow
else:
# Scale to [0, 1].
value = jnp.nan_to_num(
jnp.clip((depth - jnp.minimum(near, far)) / jnp.abs(far - near), 0, 1))
colormap = colormap or cm.get_cmap('turbo')
vis = colormap(value)[:, :, :3]
# Set non-accumulated pixels to white.
vis = vis * acc[:, :, None] + (1 - acc)[:, :, None]
return vis
def visualize_normals(depth, acc, scaling=None):
"""Visualize fake normals of `depth` (optionally scaled to be isotropic)."""
if scaling is None:
mask = ~jnp.isnan(depth)
x, y = jnp.meshgrid(
jnp.arange(depth.shape[1]), jnp.arange(depth.shape[0]), indexing='xy')
xy_var = (jnp.var(x[mask]) + jnp.var(y[mask])) / 2
z_var = jnp.var(depth[mask])
scaling = jnp.sqrt(xy_var / z_var)
scaled_depth = scaling * depth
normals = depth_to_normals(scaled_depth)
vis = jnp.isnan(normals) + jnp.nan_to_num((normals + 1) / 2, 0)
# Set non-accumulated pixels to white.
if acc is not None:
vis = vis * acc[:, :, None] + (1 - acc)[:, :, None]
return vis
def visualize_suite(depth, acc):
"""A wrapper around other visualizations for easy integration."""
vis = {
'depth': visualize_depth(depth, acc),
'depth_mod': visualize_depth(depth, acc, modulus=0.1),
'depth_normals': visualize_normals(depth, acc)
}
return vis
|
astorfi/speech_feature_extraction
|
example/test_local.py
|
Python
|
apache-2.0
| 2,213
| 0.009489
|
"""
This example is provided to test the package locally.
There is no need to installing the package using pip.
Only forking the project repository is required.
"""
import scipy.io.wavfile as wav
import numpy as np
import os
import sys
lib_path = os.path.abspath(os.path.join('..'))
print(lib_path)
sys.path.append(lib_path)
from speechpy import processing
from speechpy import feature
import os
file_name = os.path.join(os.path.dirname(os.path.abspath(__file__)),'Alesis-Sanctuary-QCard-AcoustcBas-C2.wav')
fs, signal = wav.read(file_name)
signal = signal[:,0]
# Pre-emphasizing.
signal_preemphasized = processing.preemphasis(signal, cof=0.98)
# Staching frames
frames = processing.stack_frames(signal, sampling_frequency=fs,
frame_length=0.020,
frame_stride=0.01,
filter=lambda x: np.ones((x,)),
zero_padding=True)
# Extracting power spectrum
power_spectrum = processing.power_spectrum(frames, fft_points=512)
print('power spectrum shape=', power_spectrum.shape)
############# Extract MFCC features #############
mfcc = feature.mfcc(signal, sampling_frequency=fs,
frame_length=0.020, frame_stride=0.01,
num_filters=40, fft_length=512, low_frequency=0,
high_frequency=None)
# Cepstral mean variance normalization.
mfcc_cmvn = processing.cmvn(mfcc,variance_normalization=True)
print('mfcc(mean + variance normalized) feature shape=', mfcc_cmvn.shape)
# Extracting derivative features
mfcc_f
|
eature_cube = feature.extract_derivative_feature(mfcc)
print('mfcc feature cube shape=', mfcc_feature_cube.shape)
############# Extract logenergy features #############
logenergy = feature.lmfe(signal, sampling_frequency=fs,
frame_length=0.020, frame_stride=0.01,
num_filters=40, fft_length=512,
low_frequency=0, high_frequency=None)
logenergy_feature_cube = feature.extract_derivative_feature(logenergy)
pri
|
nt('logenergy features=', logenergy.shape)
|
dana-i2cat/felix
|
optin_manager/src/python/openflow/optin_manager/sfa/drivers/OFShell.py
|
Python
|
apache-2.0
| 8,320
| 0.023918
|
import threading
import time
import re
from openflow.optin_manager.sfa.openflow_utils.CreateOFSliver import CreateOFSliver
from openflow.optin_manager.sfa.openflow_utils.sliver_status import get_sliver_status
from openflow.optin_manager.sfa.openflow_utils.delete_slice import delete_slice
from openflow.optin_manager.sfa.openflow_utils.rspec3_to_expedient import get_fs_from_group
from openflow.optin_manager.sfa.util.xrn import Xrn
from openflow.optin_manager.opts.models import Experiment, ExperimentFLowSpace
from openflow.optin_manager.xmlrpc_server.models import CallBackServerProxy, FVServerProxy
#TODO: Uncomment when merge
#from expedient.common.utils.mail import send_mail
from django.conf import settings
from openflow.optin_manager.sfa.openflow_utils.ServiceThread import ServiceThread
from openflow.optin_manager.sfa.models import ExpiringComponents
from openflow.optin_manager.sfa.openflow_utils.federationlinkmanager import FederationLinkManager
#XXX TEST
from openflow.optin_manager.sfa.tests.data_example import test_switches, test_links
class OFShell:
def __init__(self):
pass
@staticmethod
def get_switches(used_switches=[]):
complete_list = []
switches = OFShell().get_raw_switches()
for switch in switches:
if len(used_switches)>0:
if not switch[0] in used_switches:
continue
if int(switch[1]['nPorts']) == 0:
#TODO: Uncomment when merge with ofelia.development
#send_mail('SFA OptinManager Error', 'There are some errors related with switches: GetSwitches() returned 0 ports.',settings.ROOT_EMAIL, [settings.ROOT_EMAIL])
raise Exception("The switch with dpid:%s has a connection problem and the OCF Island Manager has already been informed. Please try again later." % str(switch[0]))
#TODO: Send Mail to the Island Manager Here.
port_list = switch[1]['portNames'].split(',')
ports = list()
for port in port_list:
match = re.match(r'[\s]*(.*)\((.*)\)', port)
ports.append({'port_name':match.group(1), 'port_num':match.group(2)})
complete_list.append({'dpid':switch[0], 'ports':ports})
return complete_list
@staticmethod
def get_links():
links = OFShell().get_raw_links()
link_list = list()
for link in links:
link_list.append({'src':{ 'dpid':link[0],'port':link[1]}, 'dst':{'dpid':link[2], 'port':link[3]}})
#for link in FederationLinkManager.get_federated_links():
# link_list.append({'src':{'dpid':link['src_id'], 'port':link['src_port']}, 'dst':{'dpid':link['dst_id'],'port':link['dst_port']}})
return link_list
@staticmethod
def get_federation_links():
link_list = list()
for link in FederationLinkManager.get_federated_links():
link_list.append({'src':{'dpid':link['src_id'], 'port':link['src_port']}, 'dst':{'dpid':link['dst_id'],'port':link['dst_port']}})
return link_list
def GetNodes(self,slice_urn=None,authority=None):
if not slice_urn:
switch_list = self.get_switches()
link_list = self.get_links()
federated_links = self.get_federation_links()
return {'switches':switch_list, 'links':link_list, 'federation_links':federated_links}
else:
nodes = list()
experiments = Experiment.objects.filter(slice_id=slice_urn)
for experiment in experiments:
expfss = ExperimentFLowSpace.objects.filter(exp = experiment.id)
for expfs in expfss:
if not expfs.dpid in nodes:
nodes.append(expfs.dpid)
switches = self.get_switches(nodes)
return {'switches':switches, 'links':[]}
#def GetSlice(self,slicename,authority):
#
# name = slicename
# nodes = self.GetNodes()
# slices = dict()
# List = list()
# return slices
def StartSlice(self, slice_urn):
#Look if the slice exists and return True or RecordNotFound
experiments = Experiment.objects.filter(slice_id=str(slice_urn))
if len(experiments) > 0:
return True
else:
raise ""
def StopSlice(self, slice_urn):
#Look if the slice exists and return True or RecordNotFound
experiments = Experiment.objects.filter(slice_id=slice_urn)
if len(experiments) > 0:
return True
else:
raise ""
def RebootSlice(self, slice_urn):
return self.StartSlice(slice_urn)
def DeleteSlice(self, slice_urn):
try:
delete_slice(slice_urn)
return 1
except Exception as e:
print e
raise ""
def CreateSliver(self, requested_attributes, slice_urn, authority,expiration):
project_description = 'SFA Project from %s' %authority
slice_id = slice_urn
for rspec_attrs in requested_attributes:
switch_slivers = get_fs_from_group(rspec_attrs['match'], rspec_attrs['group'])
controller = rspec_attrs['controller'][0]['url']
email = rspec_attrs['email']
email_pass = ''
slice_description = rspec_attrs['description']
if not self.check_req_switches(switch_slivers):
raise Exception("The Requested OF Switches on the RSpec do not match with the available OF switches of this island. Please check the datapath IDs of your Request RSpec.")
CreateOFSliver(slice_id, authority, project_description, slice_urn, slice_description, controller, email, email_pass, switch_slivers)
if expiration:
#Since there is a synchronous connection, expiring_components table is easier to fill than VTAM
#ExpiringComponents.objects.create(slice=slice_urn, authority=authority, expires=expiration)
pass
return 1
def SliverStatus(self, slice_urn):
try:
print "-----------------------------------------------------------SliverStatus"
sliver_status = get_sliver_status(slice_urn)
print sliver_status
if len(sliver_status) == 0:
xrn = Xrn(slice_urn, 'slice')
slice_leaf = xrn.get_leaf()
sliver_status = ['The requested flowspace for slice %s is still pending for approval' %slice_leaf]
granted_fs = {'granted_flowspaces':get_sliver_status(slice_urn)}
return [granted_fs]
except Exception as e:
import traceback
print traceback.print_exc()
raise e
def check_req_switches(self, switch_slivers):
available_switches = self.get_raw_switches()
for sliver in switch_slivers:
found = False
for switch in available_switches:
if str(sl
|
iver['datapath_id']) == str(switch[0]): #Avoiding Unicodes
found = True
break
if found == False:
return False
return True
def get_raw_switches(self):
try:
|
#raise Exception("")
fv = FVServerProxy.objects.all()[0]
switches = fv.get_switches()
except Exception as e:
switches = test_switches
#raise e
return switches
def get_raw_links(self):
try:
#raise Exception("")
fv = FVServerProxy.objects.all()[0]
links =
|
nicolacimmino/LoP-RAN
|
LoPAccessPoint/icmp_packet.py
|
Python
|
gpl-3.0
| 6,510
| 0.008295
|
"""
A pure Python "ping" implementation, based on a rewrite by Johannes Meyer,
of a script originally by Matthew Dixon Cowles. Which in turn was derived
from "ping.c", distributed in Linux's netkit. The version this was forked
out of can be found here: https://gist.github.com/pklaus/856268
I've rewritten nearly everything for enhanced performance and readability,
and removed unnecessary functions (assynchroneous PingQuery and related).
Those of the original comments who still applied to this script were kept.
A lot was changed on my rewrite, and as far as my tests went it is working
quite beautifully. In any case, bug reports are very much welcome.
Please note that ICMP messages can only be sent by processes ran as root.
Since this was originally based on "ping.c", which a long, long time ago
was released under public domain. I will follow the Open Source mindset
and waive all rights over this script. Do whatever you want with it, just
don't hold me liable for any losses or damages that could somehow come
out of a freaking "ping" script.
Cheers and enjoy!
"""
#
# Changes by Nicola Cimmino 2014
# Added function create_response_packet
import socket
import struct
imp
|
ort select
import random
from time import time
ICMP_ECHO_REQUEST = 8
ICMP_ECHO_RESPONSE = 0
ICMP_CODE = socket.getprotobyname('icmp')
ERROR_DESCR = {
1: 'ERROR: ICMP messages can only be sent from processes running as root.',
|
10013: 'ERROR: ICMP messages can only be sent by users or processes with administrator rights.'
}
__all__ = ['create_packet', 'echo', 'recursive']
def checksum(source_string):
sum = 0
count_to = (len(source_string) / 2) * 2
count = 0
while count < count_to:
this_val = ord(source_string[count + 1])*256+ord(source_string[count])
sum = sum + this_val
sum = sum & 0xffffffff # Necessary?
count = count + 2
if count_to < len(source_string):
sum = sum + ord(source_string[len(source_string) - 1])
sum = sum & 0xffffffff # Necessary?
sum = (sum >> 16) + (sum & 0xffff)
sum = sum + (sum >> 16)
answer = ~sum
answer = answer & 0xffff
answer = answer >> 8 | (answer << 8 & 0xff00)
return answer
def create_packet(id):
"""Creates a new echo request packet based on the given "id"."""
# Builds Dummy Header
# Header is type (8), code (8), checksum (16), id (16), sequence (16)
header = struct.pack('bbHHh', ICMP_ECHO_REQUEST, 0, 0, id, 1)
data = 192 * 'Q'
# Builds Real Header
header = struct.pack('bbHHh', ICMP_ECHO_REQUEST, 0, socket.htons(checksum(header + data)), id, 1)
return header + data
def create_response_packet(id, seq, data):
"""Create a new echo response packet based on the given "id".
"""
# Build Dummy Header
# Header is type (8), code (8), checksum (16), id (16), sequence (16)
header = struct.pack('bbHHH', ICMP_ECHO_RESPONSE, 0, 0, id, seq)
# Build Real Header
header = struct.pack(
'bbHHH', ICMP_ECHO_RESPONSE, 0, socket.htons(checksum(header + data)), id, seq)
return header + data
def response_handler(sock, packet_id, time_sent, timeout):
"""Handles packet response, returning either the delay or timing out (returns "None")."""
while True:
ready = select.select([sock], [], [], timeout)
if ready[0] == []: # Timeout
return
time_received = time()
rec_packet, addr = sock.recvfrom(1024)
icmp_header = rec_packet[20:28]
type, code, checksum, rec_id, sequence = struct.unpack('bbHHh', icmp_header)
if rec_id == packet_id:
return time_received - time_sent
timeout -= time_received - time_sent
if timeout <= 0:
return
def echo(dest_addr, timeout=1):
"""
Sends one ICMP packet to the given destination address (dest_addr)
which can be either an ip or a hostname.
"timeout" can be any integer or float except for negatives and zero.
Returns either the delay (in seconds), or "None" on timeout or an
invalid address, respectively.
"""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, ICMP_CODE)
except socket.error, (error_number, msg):
if error_number in ERROR_DESCR:
# Operation not permitted
raise socket.error(''.join((msg, ERROR_DESCR[error_number])))
raise # Raises the original error
try:
socket.gethostbyname(dest_addr)
except socket.gaierror:
return
packet_id = int((id(timeout) * random.random()) % 65535)
packet = create_packet(packet_id)
while packet:
# The icmp protocol does not use a port, but the function
# below expects it, so we just give it a dummy port.
sent = sock.sendto(packet, (dest_addr, 1))
packet = packet[sent:]
delay = response_handler(sock, packet_id, time(), timeout)
sock.close()
return delay
def recursive(dest_addr, count=4, timeout=1, verbose=False):
"""
Pings "dest_addr" "count" times and returns a list of replies. If
"verbose" is True prints live feedback.
"count" can be any integer larger than 0.
"timeout" can be any integer or float except for negatives and zero.
Returns a list of delay times for the response (in seconds). If no
response is recorded "None" is stored.
"""
if verbose:
print("PING {} ; SEQUENCE {} ; TIMEOUT {}s".format(dest_addr, count, timeout))
nrc = 0
log = []
for i in xrange(count):
log.append(echo(dest_addr, timeout))
if verbose:
if log[-1] is None:
print("Echo Request Failed...")
nrc += 1
else:
print("Echo Received: sequence_id={} delay={} ms").format(i, round(log[-1]*1000, 3))
# Code block below is malfunctioning, it's late and I'm too damn tired to fix it. Maybe tomorrow.
# if verbose:
# print("PACKET STATISTICS: sent={} received={} ratio={}%".format(count, count-nrc, (count-nrc * 100)/count))
# print("max/min/avg in ms {}/{}/{}".format(max(log), min(log), round(sum([x*1000 for x in log if x is not None])/len(log), 3)))
return log
# Testing
if __name__ == '__main__':
recursive('www.heise.de', 4, 2, True)
print("")
recursive('google.com', 4, 2, True)
print("")
recursive('invalid-test-url.com', 4, 2, True)
print("")
recursive('127.0.0.1', 4, 2, True)
|
bluetiki/pylab
|
telnet.py
|
Python
|
bsd-2-clause
| 1,063
| 0.007526
|
#!/usr/bin/env python
import telnetli
|
b
import time
import sys
import socket
TEL_PORT = 23
TEL_TO = 3
def write_cmd(cmd, conn):
cmd = cmd.rstrip()
conn.wri
|
te(cmd + '\n')
time.sleep(1)
return conn.read_very_eager()
def telnet_conn(ip, port, timeout):
try:
conn = telnetlib.Telnet(ip, port, timeout)
except socket.timeout:
sys.exit("connection timed out")
return conn
def login(user, passwd, conn):
output = conn.read_until("sername:", TEL_TO)
conn.write(user + '\n')
output += conn.read_until("assword:", TEL_TO)
conn.write(passwd + '\n')
return output
def main():
ip = '50.76.53.27'
user = 'pyclass'
passwd = '88newclass'
conn = telnet_conn(ip, TEL_PORT, TEL_TO)
login(user, passwd, conn)
hostname = write_cmd('show run | i hostname', conn)
hostname.lstrip('hostname ')
write_cmd('terminal length 0', conn)
out = write_cmd('show ver ', conn)
print out.rstrip('\n' + hostname + '#')
conn.close()
if __name__ == "__main__":
main()
|
danialbehzadi/Nokia-RM-1013-2.0.0.11
|
webkit/Tools/wx/build/build_utils.py
|
Python
|
gpl-3.0
| 6,778
| 0.006639
|
# Copyright (C) 2009 Kevin Ollivier All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Helper functions for the WebKit build.
import commands
import glob
import os
import platform
import re
import shutil
import sys
import urllib
import urlparse
def get_output(command):
"""
Windows-compatible function for getting output from a command.
"""
if sys.platform.startswith('win'):
f = os.popen(command)
return f.read().strip()
else:
return commands.getoutput(command)
def get_excludes(root, patterns):
"""
Get a list of exclude patterns going down several dirs.
TODO: Make this fully recursive.
"""
excludes = []
for pattern in patterns:
subdir_pattern = os.sep + '*'
for subdir in [subdir_pattern, subdir_pattern*2, subdir_pattern*3]:
adir = root + subdir + os.sep + pattern
files = glob.glob(adir)
for afile in files:
excludes.append(os.path.basename(afile))
return excludes
def get_dirs_for_features(root, features, dirs):
"""
Find which directories to include in the list of build dirs based upon the
enabled port(s) and features.
"""
outdirs = dirs
for adir in dirs:
for feature in features:
relpath = os.path.join(adir, feature)
featuredir = os.path.join(root, relpath)
if os.path.exists(featuredir) and not relpath in outdirs:
outdirs.append(relpath)
return outdirs
def download_if_newer(url, destdir):
"""
Checks if the file on the server is newer than the one in the user's tree,
and if so, downloads it.
Returns the filename of the downloaded file if downloaded, or None if
the existing file matches the one on the server.
"""
obj = urlparse.urlparse(url)
filename = os.path.basename(obj.path)
destfile = os.path.join(destdir, filename)
urlobj = urllib.urlopen(url)
size = long(urlobj.info().getheader('Content-Length'))
def download_callback(downloaded, block_size, total_size):
downloaded = block_size * downloaded
if downloaded > total_size:
downloaded = total_size
sys.stdout.write('%s %d of %d bytes downloaded\r' % (filename, downloaded, total_size))
# NB: We don't check modified time as Python doesn't yet handle timezone conversion
# properly when converting strings to time objects.
if not os.path.exists(destfile) or os.path.getsize(destfile) != size:
urllib.
|
urlretrieve(url, destfile, download_callback)
print ''
return destfile
return None
def update_wx_deps(conf, wk_root, msvc_version='msvc2008'):
"""
Download and update tools needed to build the wx port.
"""
import Logs
Logs.info('Ensuring wxWebKit dependencies are up-to-date.')
wklibs_dir = os.path.join(wk_root, 'WebKitLibraries')
waf = download_if_newer('http://wxwebkit.wxcommun
|
ity.com/downloads/deps/waf', os.path.join(wk_root, 'Tools', 'wx'))
if waf:
# TODO: Make the build restart itself after an update.
Logs.warn('Build system updated, please restart build.')
sys.exit(1)
# since this module is still experimental
wxpy_dir = os.path.join(wk_root, 'Source', 'WebKit', 'wx', 'bindings', 'python')
swig_module = download_if_newer('http://wxwebkit.wxcommunity.com/downloads/deps/swig.py.txt', wxpy_dir)
if swig_module:
shutil.copy(os.path.join(wxpy_dir, 'swig.py.txt'), os.path.join(wxpy_dir, 'swig.py'))
if sys.platform.startswith('win'):
Logs.info('downloading deps package')
archive = download_if_newer('http://wxwebkit.wxcommunity.com/downloads/deps/wxWebKitDeps-%s.zip' % msvc_version, wklibs_dir)
if archive and os.path.exists(archive):
os.system('unzip -o %s -d %s' % (archive, os.path.join(wklibs_dir, msvc_version)))
elif sys.platform.startswith('darwin'):
# export the right compiler for building the dependencies
if platform.release().startswith('10'): # Snow Leopard
os.environ['CC'] = conf.env['CC'][0]
os.environ['CXX'] = conf.env['CXX'][0]
os.system('%s/Tools/wx/install-unix-extras' % wk_root)
def includeDirsForSources(sources):
include_dirs = []
for group in sources:
for source in group:
dirname = os.path.dirname(source)
if not dirname in include_dirs:
include_dirs.append(dirname)
return include_dirs
def flattenSources(sources):
flat_sources = []
for group in sources:
flat_sources.extend(group)
return flat_sources
def git_branch_name():
try:
branches = commands.getoutput("git branch --no-color")
match = re.search('^\* (.*)', branches, re.MULTILINE)
if match:
return ".%s" % match.group(1)
except:
pass
return ""
def get_config(wk_root):
config_file = os.path.join(wk_root, 'WebKitBuild', 'Configuration')
config = 'Debug'
if os.path.exists(config_file):
config = open(config_file).read()
return config
def svn_revision():
if os.system("git-svn info") == 0:
info = commands.getoutput("git-svn info ../..")
else:
info = commands.getoutput("svn info")
for line in info.split("\n"):
if line.startswith("Revision: "):
return line.replace("Revision: ", "").strip()
return ""
|
PMEAL/OpenPNM
|
openpnm/models/network/__init__.py
|
Python
|
mit
| 158
| 0
|
r"""
Network
=======
This submo
|
dule contains models for calculating topological properties of
networks
"""
|
from ._topology import *
from ._health import *
|
truedays/sandbox
|
python/pretty.py
|
Python
|
gpl-3.0
| 3,498
| 0.023156
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# CTA API CLI YAH
import requests
#from bs4 import BeautifulSoup
from xmltodict import parse
from time import gmtime, strftime
# enable debugging
import cgitb
cgitb.enable()
# get API key from file
f = open('./.cta-api.key', 'r')
APIKEY = "?key=" + f.read(25)
f.close()
URL = "http://www.ctabustracker.com/bustime/api/v1/"
apicmd = "getpredictions"
#showResponse = ["stpnm","stpid","vid","rt","rtdir","prdtm"]
showResponse = ["tmstmp","typ","stpnm","stpid","vid","dstp","rt","rtdir","des","prdtm"]
counter = 0
def getPred(localurl):
if "error" in out['bustime-response']:
errorMsg = out['bustime-response']['error']['msg']
return
# Lame safety check:
if "prdtm" in out['bustime-response']['prd']:
#print "tmpstmp: " + out['bustime-response']['prd']['tmstmp']
for x in showResponse:
print x + ": " + out['bustime-response']['prd'][x]
#out['bustime-response']['prd']:
#print key
#print x
# true == multiple predictions returned
if isinstance(out['bustime-response
|
']['prd'], list):
for x in range(0,len(out[
|
'bustime-response']['prd'])):
if out['bustime-response']['prd'][x]:
hourNow=int(out['bustime-response']['prd'][x]['tmstmp'][9:11])
minNow=int(out['bustime-response']['prd'][x]['tmstmp'][12:14])
hourPred=int(out['bustime-response']['prd'][x]['prdtm'][9:11])
minPred=int(out['bustime-response']['prd'][x]['prdtm'][12:14])
timeRemain = ((hourPred*60)+minPred) - ((hourNow*60)+minNow)
for response in showResponse:
print response + "[" + str(x) + "]" + ": " + out['bustime-response']['prd'][x][response]
print "Minutes remaining: " + str(timeRemain)
print "___"
else:
if "tmstmp" in out['bustime-response']['prd']:
# print out['bustime-response']['prd']['tmstmp'][9:11]
# print out['bustime-response']['prd']['tmstmp'][12:14]
# print out['bustime-response']['prd']['prdtm'][9:11]
# print out['bustime-response']['prd']['prdtm'][12:14]
hourNow=int(out['bustime-response']['prd']['tmstmp'][9:11])
minNow=int(out['bustime-response']['prd']['tmstmp'][12:14])
hourPred=int(out['bustime-response']['prd']['prdtm'][9:11])
minPred=int(out['bustime-response']['prd']['prdtm'][12:14])
timeRemain = ((hourPred*60)+minPred) - ((hourNow*60)+minNow)
print "Minutes remaining: " + str(timeRemain)
print "___"
# Determine direction based on time of day
if int(strftime("%H", gmtime())) > 18 or int(strftime("%H", gmtime())) < 6 :
# Heading home
heading = "home"
stops = ["&rt=78&stpid=11401", "&rt=56&stpid=14101"]
else:
# heading to work
heading = "work"
stops = ["&rt=78&stpid=11321", "&rt=56&stpid=5586"]
print """Content-type: text/html
"""
print "<html><title> Bus times - Heading to " + heading + "</title>"
print """<head>
<link rel="stylesheet" type="text/css" href="mystyle.css">
</head><body>
<center>
"""
print "Heading: " + heading + " time: " + strftime("%h:%m", gmtime())
exit()
for stop in stops:
fullurl = URL + apicmd + APIKEY + stop
getPred(fullurl)
print "</pre>"
print """
<FORM>
<INPUT TYPE="button" onClick="history.go(0)" VALUE="Refresh">
</FORM>
"""
if nothing > 1:
r = requests.get(localurl)
out = parse(r.text)
print '<table style="width:relative">'
print "<tr>"
print "<th>Route "
route = "no route"
if isinstance(out['bustime-response']['prd'], list):
route = str(out['bustime-response']['prd'][0]['rt'])
else:
route = str(out['bustime-response']['prd']['rt'])
print route
print "</th>"
print "</tr>"
|
alexcc4/flask_restful_backend
|
app/libs/error.py
|
Python
|
mit
| 412
| 0
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from webargs.flaskparser import (parser, abort)
@parser.error_ha
|
ndler
def handler_request_parsing_error(e):
error({}, 400, err
|
ors=e.messages)
def error(params=None, code=422, message='不合法的请求', errors=None):
params['message'] = message
params['errcode'] = code
if errors:
params['errors'] = errors
abort(code, **params)
|
cyli/volatility
|
volatility/plugins/mac/adiummsgs.py
|
Python
|
gpl-2.0
| 5,115
| 0.011144
|
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import os
import volatility.obj as obj
import volatility.plugins.mac.pstasks as pstasks
import volatility.plugins.mac.common as common
from volatility.renderers import TreeGrid
from volatility.renderers.basic import Address
class mac_adium(pstasks.mac_tasks):
""" Lists Adium messages """
def __init__(self, config, *args, **kwargs):
pstasks.mac_tasks.__init__(self, config, *args, **kwargs)
self._config.add_option('DUMP-DIR', short_option = 'D', default = None, help = 'Output directory', action = 'store', type = 'str')
self._config.add_option('WIDE', short_option = 'W', default = False, help = 'Wide character search', action = 'store_true')
def _make_uni(self, msg):
if self._config.WIDE:
return "\x00".join([m for m in msg])
else:
return msg
def calculate(self):
common.set_plugin_members(self)
procs = pstasks.mac_tasks.calculate(self)
for proc in procs:
if proc.p_comm.lower().find("adium") == -1:
continue
proc_as = proc.get_process_address_space()
for map in proc.get_p
|
roc_maps():
if map.get_perms() != "rw-" or map.get_path() != "":
continue
buffer = proc_as.zread(map.start.v(), map.end.v() - map.start.v())
if not buffe
|
r:
continue
msg_search = self._make_uni('<span class="x-message"')
time_search = self._make_uni('<span class="x-ltime"')
send_search = self._make_uni('<span class="x-sender"')
end_search = self._make_uni('</span>')
idx = 0
msg_idx = buffer.find(msg_search)
while msg_idx != -1:
idx = idx + msg_idx
msg_end_idx = buffer[idx:].find(end_search)
if msg_end_idx == -1:
break
msg = buffer[idx: idx + msg_end_idx + 14]
# to look for time and send
search_idx = idx - 200
time_idx = buffer[search_idx : search_idx + 200].find(time_search)
msg_time = ""
if time_idx != -1:
time_end_idx = buffer[search_idx + time_idx: search_idx + time_idx + 130].find(end_search)
if time_end_idx != -1:
msg_time = buffer[search_idx + time_idx: search_idx + time_idx + time_end_idx + 14]
msg_sender = ""
send_idx = buffer[idx + search_idx: idx + search_idx + 200].find(send_search)
if send_idx != -1:
send_end_idx = buffer[search_idx + send_idx: search_idx + send_idx + 60].find(end_search)
if send_end_idx != -1:
msg_sender = buffer[search_idx + send_idx: search_idx + send_idx + send_end_idx + 14]
yield proc, map.start + idx, msg_time + msg_sender + msg
idx = idx + 5
msg_idx = buffer[idx:].find(msg_search)
def unified_output(self, data):
return TreeGrid([("Pid", int),
("Name", str),
("Start", Address),
("Size", str),
("Path", str),
],
self.generator(data))
def generator(self, data):
for (proc, start, msg) in data:
fname = "Adium.{0}.{1:x}.txt".format(proc.p_pid, start)
file_path = os.path.join(self._config.DUMP_DIR, fname)
fd = open(file_path, "wb+")
fd.write(msg)
fd.close()
yield(0, [
str(proc.p_pid),
str(proc.p_comm),
Address(start),
str(len(msg)),
str(file_path),
])
|
clips/pattern
|
pattern/text/en/inflect.py
|
Python
|
bsd-3-clause
| 36,230
| 0.024924
|
#### PATT
|
ERN | EN | INFLECT ########################################################################
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
####################################################################################################
# Regular expressions-based rules for English word inflection:
# - pluralization and singularization of nouns and adjectives,
# - conjugation of verbs,
# - comparative and superlative of adjectives.
# Accuracy (measured on CELEX English morphology word forms):
# 95% for pluralize()
# 96% for singularize()
# 95% for Verbs.find_lemma() (for regular verbs)
# 96% for Verbs.find_lexeme() (for regular verbs)
from __future__ import unicode_literals
from __future__ import division
from builtins import str, bytes, dict, int
from builtins import map, zip, filter
from builtins import object, range
import os
import sys
import re
try:
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
sys.path.insert(0, os.path.join(MODULE, "..", "..", "..", ".."))
from pattern.text import Verbs as _Verbs
from pattern.text import (
INFINITIVE, PRESENT, PAST, FUTURE,
FIRST, SECOND, THIRD,
SINGULAR, PLURAL, SG, PL,
PROGRESSIVE,
PARTICIPLE
)
sys.path.pop(0)
VERB, NOUN, ADJECTIVE, ADVERB = "VB", "NN", "JJ", "RB"
VOWELS = "aeiouy"
re_vowel = re.compile(r"a|e|i|o|u|y", re.I)
is_vowel = lambda ch: ch in VOWELS
#### ARTICLE #######################################################################################
# Based on the Ruby Linguistics module by Michael Granger:
# http://www.deveiate.org/projects/Linguistics/wiki/English
RE_ARTICLE = list(map(lambda x: (re.compile(x[0]), x[1]), (
(r"euler|hour(?!i)|heir|honest|hono", "an"), # exceptions: an hour, an honor
# Abbreviations:
# strings of capitals starting with a vowel-sound consonant followed by another consonant,
# which are not likely to be real words.
(r"(?!FJO|[HLMNS]Y.|RY[EO]|SQU|(F[LR]?|[HL]|MN?|N|RH?|S[CHKLMNPTVW]?|X(YL)?)[AEIOU])[FHLMNRSX][A-Z]", "an"),
(r"^[aefhilmnorsx][.-]" , "an"), # hyphenated: an f-16, an e-mail
(r"^[a-z][.-]" , "a" ), # hyphenated: a b-52
(r"^[^aeiouy]" , "a" ), # consonants: a bear
(r"^e[uw]" , "a" ), # -eu like "you": a european
(r"^onc?e" , "a" ), # -o like "wa" : a one-liner
(r"uni([^nmd]|mo)" , "a" ), # -u like "you": a university
(r"^u[bcfhjkqrst][aeiou]", "a" ), # -u like "you": a uterus
(r"^[aeiou]" , "an"), # vowels: an owl
(r"y(b[lor]|cl[ea]|fere|gg|p[ios]|rou|tt)", "an"), # y like "i": an yclept, a year
(r"" , "a" ) # guess "a"
)))
def definite_article(word):
return "the"
def indefinite_article(word):
""" Returns the indefinite article for a given word.
For example: indefinite_article("university") => "a" university.
"""
word = word.split(" ")[0]
for rule, article in RE_ARTICLE:
if rule.search(word) is not None:
return article
DEFINITE, INDEFINITE = \
"definite", "indefinite"
def article(word, function=INDEFINITE):
""" Returns the indefinite (a or an) or definite (the) article for the given word.
"""
return function == DEFINITE and definite_article(word) or indefinite_article(word)
_article = article
def referenced(word, article=INDEFINITE):
""" Returns a string with the article + the word.
"""
return "%s %s" % (_article(word, article), word)
#print referenced("hour")
#print referenced("FBI")
#print referenced("bear")
#print referenced("one-liner")
#print referenced("european")
#print referenced("university")
#print referenced("uterus")
#print referenced("owl")
#print referenced("yclept")
#print referenced("year")
#### PLURALIZE #####################################################################################
# Based on "An Algorithmic Approach to English Pluralization" by Damian Conway:
# http://www.csse.monash.edu.au/~damian/papers/HTML/Plurals.html
# Prepositions are used in forms like "mother-in-law" and "man at arms".
plural_prepositions = set((
"about" , "before" , "during", "of" , "till" ,
"above" , "behind" , "except", "off" , "to" ,
"across" , "below" , "for" , "on" , "under",
"after" , "beneath", "from" , "onto" , "until",
"among" , "beside" , "in" , "out" , "unto" ,
"around" , "besides", "into" , "over" , "upon" ,
"at" , "between", "near" , "since", "with" ,
"athwart", "betwixt",
"beyond",
"but",
"by"))
# Inflection rules that are either:
# - general,
# - apply to a certain category of words,
# - apply to a certain category of words only in classical mode,
# - apply only in classical mode.
# Each rule is a (suffix, inflection, category, classic)-tuple.
plural_rules = [
# 0) Indefinite articles and demonstratives.
(( r"^a$|^an$", "some" , None, False),
( r"^this$", "these" , None, False),
( r"^that$", "those" , None, False),
( r"^any$", "all" , None, False)
), # 1) Possessive adjectives.
(( r"^my$", "our" , None, False),
( r"^your$", "your" , None, False),
( r"^thy$", "your" , None, False),
(r"^her$|^his$", "their" , None, False),
( r"^its$", "their" , None, False),
( r"^their$", "their" , None, False)
), # 2) Possessive pronouns.
(( r"^mine$", "ours" , None, False),
( r"^yours$", "yours" , None, False),
( r"^thine$", "yours" , None, False),
(r"^her$|^his$", "theirs" , None, False),
( r"^its$", "theirs" , None, False),
( r"^their$", "theirs" , None, False)
), # 3) Personal pronouns.
(( r"^I$", "we" , None, False),
( r"^me$", "us" , None, False),
( r"^myself$", "ourselves" , None, False),
( r"^you$", "you" , None, False),
(r"^thou$|^thee$", "ye" , None, False),
( r"^yourself$", "yourself" , None, False),
( r"^thyself$", "yourself" , None, False),
( r"^she$|^he$", "they" , None, False),
(r"^it$|^they$", "they" , None, False),
(r"^her$|^him$", "them" , None, False),
(r"^it$|^them$", "them" , None, False),
( r"^herself$", "themselves" , None, False),
( r"^himself$", "themselves" , None, False),
( r"^itself$", "themselves" , None, False),
( r"^themself$", "themselves" , None, False),
( r"^oneself$", "oneselves" , None, False)
), # 4) Words that do not inflect.
(( r"$", "" , "uninflected", False),
( r"$", "" , "uncountable", False),
( r"s$", "s" , "s-singular" , False),
( r"fish$", "fish" , None, False),
(r"([- ])bass$", "\\1bass" , None, False),
( r"ois$", "ois" , None, False),
( r"sheep$", "sheep" , None, False),
( r"deer$", "deer" , None, False),
( r"pox$", "pox" , None, False),
(r"([A-Z].*)ese$", "\\1ese" , None, False),
( r"itis$", "itis" , None, False),
(r"(fruct|gluc|galact|lact|ket|malt|rib|sacchar|cellul)ose$", "\\1ose", None, False)
), # 5) Irregular plural forms (e.g., mongoose, oxen).
(( r"atlas$", "atlantes" , None, True ),
( r"atlas$", "atlases" , None, False),
( r"beef$", "beeves" , None, True ),
( r"brother$", "brethren" , None, True ),
( r"child$", "children" , None, False),
( r"corpus$", "corpora" , None, True ),
( r"corpus$", "corpuses" , None, False),
( r"^cow$", "kine" , None, True ),
( r"ephemeris$", "ephemerides", None, False),
( r"ganglion$", "ganglia" , None, True ),
( r"genie$", "genii" , None, True ),
( r"genus$", "genera" , None, False),
( r"graffito$", "graffiti" , None, False
|
nuclear-wizard/moose
|
python/chigger/tests/geometric/line_source/line_source_data_tube.py
|
Python
|
lgpl-2.1
| 710
| 0.021127
|
#!/usr/bin/env python3
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1,
|
please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
|
import chigger
tube = chigger.filters.TubeFilter(radius=1)
cyl0 = chigger.geometric.LineSource(point1=[0,0,0], point2=[0,1,0], data=[1, 2, 4, 8, 16], cmap='viridis')
cyls = chigger.base.ChiggerResult(cyl0, filters=[tube])
window = chigger.RenderWindow(cyls, size=[300,300], test=True)
window.write('line_source_data_tube.png')
window.start()
|
oukiar/qreader
|
parsetest.py
|
Python
|
mit
| 443
| 0.006772
|
from parse_rest.conn
|
ection import register
from parse_rest.datatypes import Object
class GameScore(Object):
pass
register("XEPryFHrd5Tztu45du5Z3kpqxDsweaP1Q0lt8JOb", "PE8FNw0hDdlvcHYYgxEnbUyxPkP9TAsPqKvdB4L0")
myClassName = "GameScore"
myClass = Object.factory(myClassName)
print myClass
gameScore = GameScore(score=1337, player_name='John Doe', cheat_mode=False)
gameScore.cheat_mode = True
gameScore.
|
level = 2
gameScore.save()
|
cpcloud/arrow
|
python/pyarrow/util.py
|
Python
|
apache-2.0
| 5,001
| 0
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Miscellaneous utility code
import contextlib
import functools
import gc
import pathlib
import socket
import sys
import types
import warnings
_DEPR_MSG = (
"pyarrow.{} is deprecated as of {}, please use pyarrow.{} instead."
)
def implements(f):
def decorator(g):
g.__doc__ = f.__doc__
return g
return decorator
def _deprecate_api(old_name, new_name, api, next_version):
msg = _DEPR_MSG.format(old_name, next_version, new_name)
def wrapper(*args, **kwargs):
warnings.warn(msg, FutureWarning)
return api(*args, **kwargs)
return wrapper
def _deprecate_class(old_name, new_class, next_version,
instancecheck=True):
"""
Raise warning if a deprecated class is used in an isinstance check.
"""
class _DeprecatedMeta(type):
def __instancecheck__(self, other):
warnings.warn(
_DEPR_MSG.format(old_name, next_version, new_class.__name__),
FutureWarning,
stacklevel=2
)
return isinstance(other, new_class)
return _DeprecatedMeta(old_name, (new_class,), {})
def _is_iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
def _is_path_like(path):
# PEP519 filesystem path protocol is availab
|
le from python 3.6, so pathlib
# doesn't implement __fspath__ for earlier versions
return (isinstance(path, str) or
hasattr(path, '__fspath__') or
isinstance(path, pathlib.Path))
def _stringify_path(path):
"""
Convert *path* to a string
|
or unicode path if possible.
"""
if isinstance(path, str):
return path
# checking whether path implements the filesystem protocol
try:
return path.__fspath__() # new in python 3.6
except AttributeError:
# fallback pathlib ckeck for earlier python versions than 3.6
if isinstance(path, pathlib.Path):
return str(path)
raise TypeError("not a path-like object")
def product(seq):
"""
Return a product of sequence items.
"""
return functools.reduce(lambda a, b: a*b, seq, 1)
def get_contiguous_span(shape, strides, itemsize):
"""
Return a contiguous span of N-D array data.
Parameters
----------
shape : tuple
strides : tuple
itemsize : int
Specify array shape data
Returns
-------
start, end : int
The span end points.
"""
if not strides:
start = 0
end = itemsize * product(shape)
else:
start = 0
end = itemsize
for i, dim in enumerate(shape):
if dim == 0:
start = end = 0
break
stride = strides[i]
if stride > 0:
end += stride * (dim - 1)
elif stride < 0:
start += stride * (dim - 1)
if end - start != itemsize * product(shape):
raise ValueError('array data is non-contiguous')
return start, end
def find_free_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with contextlib.closing(sock) as sock:
sock.bind(('', 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return sock.getsockname()[1]
def guid():
from uuid import uuid4
return uuid4().hex
def _break_traceback_cycle_from_frame(frame):
# Clear local variables in all inner frames, so as to break the
# reference cycle.
this_frame = sys._getframe(0)
refs = gc.get_referrers(frame)
while refs:
for frame in refs:
if frame is not this_frame and isinstance(frame, types.FrameType):
break
else:
# No frame found in referrers (finished?)
break
refs = None
# Clear the frame locals, to try and break the cycle (it is
# somewhere along the chain of execution frames).
frame.clear()
# To visit the inner frame, we need to find it among the
# referers of this frame (while `frame.f_back` would let
# us visit the outer frame).
refs = gc.get_referrers(frame)
refs = frame = this_frame = None
|
crepererum/invenio
|
invenio/ext/legacy/layout.py
|
Python
|
gpl-2.0
| 9,951
| 0.002311
|
# -*- coding: utf-8 -*-
# This file is part of Invenio.
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Global organisation of the application's URLs.
This module binds together Invenio's modules and maps them to
their corresponding URLs (ie, /search to the websearch modules,...)
"""
from invenio.ext.legacy.handler import create_handler
from invenio.ext.logging import register_exception
from invenio.ext.legacy.handler import WebInterfaceDirectory
from invenio.utils import apache
from invenio.config import CFG_DEVEL_SITE, CFG_ACCESS_CONTROL_LEVEL_SITE
from invenio.legacy.registry import webinterface_proxy, webinterfaces
class WebInterfaceDeprecatedPages(WebInterfaceDirectory):
"""Implement dumb interface for deprecated pages."""
_exports = ['']
def __call__(self, req, form):
"""Return deprecation warning."""
try:
from invenio.legacy.webpage import page
except ImportError:
register_exception()
page = lambda * args: args[1]
req.status = apache.HTTP_SERVICE_UNAVAILABLE
msg = "<p>This functionality will be soon deprecated.</p>"
try:
from invenio.config import CFG_SITE_ADMIN_EMAIL
msg += """<p>If you would still like to use it, please ask your
Invenio administrator <code>%s</code> to consider enabling it.
</p>""" % CFG_SITE_ADMIN_EMAIL
except ImportError:
pass
try:
return page('Service disabled', msg, req=req)
except:
return msg
def _lookup(self, component, path):
"""Return current interface for given path."""
return WebInterfaceDeprecatedPages(), path
index = __call__
class WebInterfaceDisabledPages(WebInterfaceDirectory):
"""This class implements a dumb interface to use as a fallback in case the
site is switched to read only mode, i.e. CFG_ACCESS_CONTROL_LEVEL_SITE > 0"""
_exports = ['']
def __call__(self, req, form):
try:
from invenio.legacy.webpage import page
except ImportError:
register_exception()
page = lambda * args: args[1]
req.status = apache.HTTP_SERVICE_UNAVAILABLE
msg = "<p>This functionality is currently unavailable due to a service maintenance.</p>"
try:
from invenio.config import CFG_SITE_ADMIN_EMAIL
msg += """<p>You can contact <code>%s</code>
in case of questions.</p>""" % \
CFG_SITE_ADMIN_EMAIL
except ImportError:
pass
msg += """<p>We are going to restore the service soon.</p>
<p>Sorry for the inconvenience.</p>"""
try:
return page('Service unavailable', msg, req=req)
except:
return msg
def _lookup(self, component, path):
return WebInterfaceDisabledPages(), path
index = __call__
class WebInterfaceDumbPages(WebInterfaceDirectory):
"""This class implements a dumb interface to use as a fallback in case of
errors importing particular module pages."""
_exports = ['']
def __call__(self, req, form):
try:
from invenio.legacy.webpage import page
except ImportError:
page = lambda * args: args[1]
req.status = apache.HTTP_INTERNAL_SERVER_ERROR
msg = "<p>This functionality is experiencing a temporary failure.</p>"
msg += "<p>The administrator has been informed about the problem.</p>"
try:
from invenio.config import CFG_SITE_ADMIN_EMAIL
msg += """<p>You can contact <code>%s</code>
in case of questions.</p>""" % \
CFG_SITE_ADMIN_EMAIL
except ImportError:
pass
msg += """<p>We hope to restore the service soon.</p>
<p>Sorry for the inconvenience.</p>"""
try:
return page('Service failure', msg, req=req)
except:
return msg
def _lookup(self, component, path):
return WebInterfaceDumbPages(), path
index = __call__
try:
from invenio.legacy.bibdocfile.webinterface import bibdocfile_legacy_getfile
except:
register_exception(alert_admin=True, subject='EMERGENCY')
bibdocfile_legacy_getfile = WebInterfaceDumbPages
try:
from invenio.legacy.websearch.webinterface import WebInterfaceSearchInterfacePages
except:
register_exception(alert_admin=True, subject='EMERGENCY')
WebInterfaceSearchInterfacePages = WebInterfaceDumbPages
try:
from invenio.legacy.bibcirculation.admin_webinterface import \
WebInterfaceBibCirculationAdminPages
except:
register_exception(alert_admin=True, subject='EMERGENCY')
WebInterfaceBibCirculationAdminPages = WebInterfaceDumbPages
try:
from invenio.legacy.bibsched.webinterface import \
WebInterfaceBibSchedPages
except:
register_exception(alert_admin=True, subject='EMERGENCY')
WebInterfaceBibSchedPages = WebInterfaceDumbPages
if CFG_DEVEL_SITE:
test_exports = ['httptest']
else:
test_exports = []
class WebInterfaceAdminPages(WebInterfaceDirectory):
"""This class implements /admin2 admin pages."""
_exports = ['index', 'bibcirculation', 'bibsched']
def index(self, req, form):
return "FIXME: return /help/admin content"
bibcirculation = WebInterfaceBibCirculationAdminPages()
bibsched = WebInterfaceBibSchedPages()
class WebInterfaceInvenio(WebInterfaceSearchInterfacePages):
|
""" The global URL layout is composed of the search API plus all
the other modules."""
_exports = WebInterfaceSearchInterfacePages._exports + \
[
|
'youraccount',
'youralerts',
'yourbaskets',
'yourmessages',
'yourloans',
'yourcomments',
'ill',
'yourgroups',
'yourtickets',
'comments',
'error',
'oai2d', ('oai2d.py', 'oai2d'),
('getfile.py', 'getfile'),
'submit',
'rss',
'stats',
'journal',
'help',
'unapi',
'exporter',
'kb',
'batchuploader',
'bibsword',
'ping',
'admin2',
'linkbacks',
'textmining',
'goto',
'info',
'authorlist',
] + test_exports
def __init__(self):
self.getfile = bibdocfile_legacy_getfile
if CFG_DEVEL_SITE:
self.httptest = webinterfaces.get('WebInterfaceHTTPTestPages',
WebInterfaceDisabledPages)()
_mapping = dict(
submit='WebInterfaceSubmitPages',
youraccount='WebInterfaceYourAccountPages',
youralerts='WebInterfaceYourAlertsPages',
yourbaskets='WebInterfaceYourBasketsPages',
yourmessages='WebInterfaceYourMessagesPages',
yourloans='WebInterfaceYourLoansPages',
ill='WebInterfaceILLPages',
yourgroups='WebInterfaceYourGroupsPages',
yourtickets='WebInterfaceYourTicketsPages',
comments='WebInterfaceCommentsPages',
e
|
Iconoclasteinc/tgit
|
cute/rect.py
|
Python
|
gpl-3.0
| 473
| 0.002114
|
f
|
rom PyQt5.QtCore import QPoint, QMargins
def center_right(rect):
return QPoint(rect.right(), rect.center().y())
def center_left(rect):
return QPoint(rect.left(), rect.center().y())
def top_center(rect):
return QPoint(rect.center().x(), rect.top())
def bottom_center(rect):
return QPoint(rect.center().x(), rect.bottom())
def inside_bounds(rect, left=0, top=0, right=0, bottom=0):
return rect.marginsRemoved(QMargins(left, top, right, bottom
|
))
|
shiny-fortnight/auto_id
|
whitepages_api.py
|
Python
|
apache-2.0
| 1,774
| 0.003382
|
#!/usr/bin/env python
import requests as r
import sys, json, re
import StringIO as StringIO
API_KEY = 'ea04b1f16b56a1b1a21b0159b8b1990e'
def _getPhoneNumber(inPhone):
re.sub("\D", "", inPhone)
inPhone = inPhone.replace('-', '')
if inPhone[0] == '1':
inPhone = inPhone[1:]
assert len(inPhone) == 10
return inPhone
def makeAPIRequest(inPhone):
"""
Returns the results of the whitepages requests as a list:
name, streetaddr, carrier, phoneType, cleaned
"""
try:
cleaned = _getPhoneNumber(inPhone)
except AssertionError:
raise ValueError('Bad Input Phone')
req = 'https://proapi.whitepages.com/2.1/phone.json?api_key=%s&phone_number=%s' % (API_KEY, cleaned)
result = r.get(req)
asDict = json.loads(result.text)
nameValues = asDict['results'][0]['belongs_to']
if len(nameValues) > 0:
nameValues = nameValues[0]
if nameValues['best_name']:
fname = nameValues['best_name']
elif nameValues['best_name']['first_name'] or nameValues['best_name']['last_name']:
nameKeys = ['first_name', 'middle_name', 'last_name']
fname = ''
for name in nameKeys:
fname
|
+= nameValues['names'][name] if nameValues['names'][name] else ''
else:
fname = None
locationValues = asDict['results'][0]['best_location']
carrier = asDict['results'][0]['carrier']
phoneType = asDict['results'][0]['line_type']
locKeys = ['standard_address_line1', 'standard_address_line2', 'city', 'standard_address_location']
streetAddr = ''
for name
|
in locKeys:
streetAddr += locationValues[name]+' ' if locationValues[name] else ''
return [fname, streetAddr, carrier, phoneType, cleaned]
|
dileep-kishore/microbial-ai
|
tests/regulation/test_memory.py
|
Python
|
mit
| 1,304
| 0
|
# @Author: dileep
# @Last Modified by: dileep
import random
import pytest
from microbial_ai.regulation import Event, Action, Memory
@pytest.fixture
def random_action():
return Action(type='fixed', phi={'rxn1': (random.random(), '+')})
@pytest.fixture
def random_event(random_action):
return Event(state=random.rand
|
int(0, 100), action=random_action,
next_state=random.randint(0, 100), reward=random.random())
@pytest.mark.usefixtures("random_event")
class TestMemory:
"""
Tests for the Memory class
"""
def test_initialization(self):
memory = Memory(1000)
assert memory.capacity == 1000
assert memory.idx == 0
def test_add_event(self, random_event):
memory = Memory(100
|
0)
memory.add_event(random_event)
assert len(memory.memory) == 1
assert memory.idx == 1
for _ in range(1500):
memory.add_event(random_event)
assert len(memory.memory) == memory.capacity
assert memory.idx == (1000 - 500 + 1)
def test_sample(self, random_event):
memory = Memory(1000)
with pytest.raises(ValueError):
memory.sample(100)
for _ in range(400):
memory.add_event(random_event)
assert len(memory.sample(200)) == 200
|
Kromey/roglick
|
roglick/components/attributes.py
|
Python
|
mit
| 157
| 0.006369
|
from r
|
oglick.engine.ecs import ComponentBase
class AttributesComponent(ComponentBase):
_properties = (('st', 10), ('dx', 10), ('iq', 10), ('pe', 10))
| |
h4/fuit-webdev
|
examples/lesson5/pandora_data.py
|
Python
|
mit
| 335
| 0
|
from box.models import *
gold = ThingsType(title="Золото", kind=2, color="Жёлтый")
gold.save()
ferrum = ThingsType(title="Железо", kind=2, color="Чёрный")
ferrum.save()
ring = Thing(title="Кольцо", things_type=gold)
ring.save()
hammer = Thing(title="Молоток", th
|
ings_type=ferrum)
hammer.save()
|
|
lamby/buildinfo.debian.net
|
bidb/keys/models.py
|
Python
|
agpl-3.0
| 744
| 0
|
import datetime
from
|
django.db import models, transaction
class Key(models.Model):
uid = models.CharField(max_length=255, unique=True)
name = models.TextField()
created = models.DateTimeField(default=datetime.datetime.utcnow)
class Meta:
ordering = ('-created',)
get_latest_by = 'created'
def __unicode__(self
|
):
return u"pk=%d uid=%r name=%r" % (
self.pk,
self.uid,
self.name,
)
def save(self, *args, **kwargs):
created = not self.pk
super(Key, self).save(*args, **kwargs)
if created:
from .tasks import update_or_create_key
transaction.on_commit(lambda: update_or_create_key.delay(self.uid))
|
cneill/designate-testing
|
designate/exceptions.py
|
Python
|
apache-2.0
| 8,130
| 0
|
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
class Base(Exception):
error_code = 500
error_type = None
error_message = None
errors = None
def __init__(self, *args, **kwargs):
self.errors = kwargs.pop('errors', None)
self.object = kwargs.pop('object', None)
super(Base, self).__init__(*args, **kwargs)
if len(args) > 0 and isinstance(args[0], six.string_types):
self.error_message = args[0]
class Backend(Exception):
pass
class RelationNotLoaded(Base):
error_code = 500
error_type = 'relation_not_loaded'
def __init__(self, *args, **kwargs):
self.relation = kwargs.pop('relation', None)
super(RelationNotLoaded, self).__init__(*args, **kwargs)
self.error_message = "%(relation)s is not loaded on %(object)s" % \
{"relation": self.relation, "object": self.object.obj_name()}
def __str__(self):
return self.error_message
class AdapterNotFound(Base):
error_code = 500
error_type = 'adapter_not_found'
class NSD4SlaveBackendError(Backend):
pass
class NotImplemented(Base, NotImplementedError):
pass
class XFRFailure(Base):
pass
class ConfigurationError(Base):
error_type = 'configuration_error'
class UnknownFailure(Base):
error_code = 500
error_type = 'unknown_failure'
class CommunicationFailure(Base):
error_code = 504
error_type = 'communication_failure'
class NeutronCommunicationFailure(CommunicationFailure):
"""
Raised in case one of the alleged Neutron endpoints fails.
"""
error_type = 'neutron_communication_failure'
class NoServersConfigured(ConfigurationError):
error_code = 500
error_type
|
= 'no_servers_configured'
c
|
lass NoPoolTargetsConfigured(ConfigurationError):
error_code = 500
error_type = 'no_pool_targets_configured'
class OverQuota(Base):
error_code = 413
error_type = 'over_quota'
class QuotaResourceUnknown(Base):
error_type = 'quota_resource_unknown'
class InvalidObject(Base):
error_code = 400
error_type = 'invalid_object'
expected = True
class BadRequest(Base):
error_code = 400
error_type = 'bad_request'
expected = True
class EmptyRequestBody(BadRequest):
error_type = 'empty_request_body'
expected = True
class InvalidUUID(BadRequest):
error_type = 'invalid_uuid'
class NetworkEndpointNotFound(BadRequest):
error_type = 'no_endpoint'
error_code = 403
class MarkerNotFound(BadRequest):
error_type = 'marker_not_found'
class ValueError(BadRequest):
error_type = 'value_error'
class InvalidMarker(BadRequest):
error_type = 'invalid_marker'
class InvalidSortDir(BadRequest):
error_type = 'invalid_sort_dir'
class InvalidLimit(BadRequest):
error_type = 'invalid_limit'
class InvalidSortKey(BadRequest):
error_type = 'invalid_sort_key'
class InvalidJson(BadRequest):
error_type = 'invalid_json'
class InvalidOperation(BadRequest):
error_code = 400
error_type = 'invalid_operation'
class UnsupportedAccept(BadRequest):
error_code = 406
error_type = 'unsupported_accept'
class UnsupportedContentType(BadRequest):
error_code = 415
error_type = 'unsupported_content_type'
class InvalidDomainName(Base):
error_code = 400
error_type = 'invalid_domain_name'
expected = True
class InvalidRecordSetName(Base):
error_code = 400
error_type = 'invalid_recordset_name'
class InvalidRecordSetLocation(Base):
error_code = 400
error_type = 'invalid_recordset_location'
class InvaildZoneTransfer(Base):
error_code = 400
error_type = 'invalid_zone_transfer_request'
class InvalidTTL(Base):
error_code = 400
error_type = 'invalid_ttl'
class DomainHasSubdomain(Base):
error_code = 400
error_type = 'domain_has_subdomain'
class Forbidden(Base):
error_code = 403
error_type = 'forbidden'
expected = True
class IllegalChildDomain(Forbidden):
error_type = 'illegal_child'
class IllegalParentDomain(Forbidden):
error_type = 'illegal_parent'
class IncorrectZoneTransferKey(Forbidden):
error_type = 'invalid_key'
class Duplicate(Base):
expected = True
error_code = 409
error_type = 'duplicate'
class DuplicateQuota(Duplicate):
error_type = 'duplicate_quota'
class DuplicateServer(Duplicate):
error_type = 'duplicate_server'
class DuplicateTsigKey(Duplicate):
error_type = 'duplicate_tsigkey'
class DuplicateDomain(Duplicate):
error_type = 'duplicate_domain'
class DuplicateTld(Duplicate):
error_type = 'duplicate_tld'
class DuplicateRecordSet(Duplicate):
error_type = 'duplicate_recordset'
class DuplicateRecord(Duplicate):
error_type = 'duplicate_record'
class DuplicateBlacklist(Duplicate):
error_type = 'duplicate_blacklist'
class DuplicatePoolManagerStatus(Duplicate):
error_type = 'duplication_pool_manager_status'
class DuplicatePool(Duplicate):
error_type = 'duplicate_pool'
class DuplicatePoolAttribute(Duplicate):
error_type = 'duplicate_pool_attribute'
class DuplicateDomainAttribute(Duplicate):
error_type = 'duplicate_domain_attribute'
class DuplicatePoolNsRecord(Duplicate):
error_type = 'duplicate_pool_ns_record'
class DuplicateZoneImport(Duplicate):
error_type = 'duplicate_zone_import'
class DuplicateZoneExport(Duplicate):
error_type = 'duplicate_zone_export'
class MethodNotAllowed(Base):
expected = True
error_code = 405
error_type = 'method_not_allowed'
class DuplicateZoneTransferRequest(Duplicate):
error_type = 'duplicate_zone_transfer_request'
class DuplicateZoneTransferAccept(Duplicate):
error_type = 'duplicate_zone_transfer_accept'
class NotFound(Base):
expected = True
error_code = 404
error_type = 'not_found'
class QuotaNotFound(NotFound):
error_type = 'quota_not_found'
class ServerNotFound(NotFound):
error_type = 'server_not_found'
class TsigKeyNotFound(NotFound):
error_type = 'tsigkey_not_found'
class BlacklistNotFound(NotFound):
error_type = 'blacklist_not_found'
class DomainNotFound(NotFound):
error_type = 'domain_not_found'
class DomainMasterNotFound(NotFound):
error_type = 'domain_master_not_found'
class DomainAttributeNotFound(NotFound):
error_type = 'domain_attribute_not_found'
class TldNotFound(NotFound):
error_type = 'tld_not_found'
class RecordSetNotFound(NotFound):
error_type = 'recordset_not_found'
class RecordNotFound(NotFound):
error_type = 'record_not_found'
class ReportNotFound(NotFound):
error_type = 'report_not_found'
class PoolManagerStatusNotFound(NotFound):
error_type = 'pool_manager_status_not_found'
class PoolNotFound(NotFound):
error_type = 'pool_not_found'
class PoolAttributeNotFound(NotFound):
error_type = 'pool_attribute_not_found'
class PoolNsRecordNotFound(NotFound):
error_type = 'pool_ns_record_not_found'
class ZoneTransferRequestNotFound(NotFound):
error_type = 'zone_transfer_request_not_found'
class ZoneTransferAcceptNotFound(NotFound):
error_type = 'zone_transfer_accept_not_found'
class ZoneImportNotFound(NotFound):
error_type = 'zone_import_not_found'
class ZoneExportNotFound(NotFound):
error_type = 'zone_export_not_found'
class LastServerDeleteNotAllowed(BadRequest):
error_type = 'last_server_delete_not_allowed'
class ResourceNotFound(NotFound):
# TODO(kiall): Should this be extending NotFound??
pass
|
sanjuro/RCJK
|
vendor/django/db/backends/__init__.py
|
Python
|
apache-2.0
| 20,370
| 0.001669
|
try:
# Only exists in Python 2.4+
from threading import local
except ImportError:
# Import copy of _thread_local.py from Python 2.4
from django.utils._threading_local import local
try:
set
except NameError:
# Python 2.3 compat
from sets import Set as set
try:
import decimal
except ImportError:
# Python 2.3 fallback
from django.utils import _decimal as decimal
from django.db.backends import util
from django.utils import datetime_safe
class BaseDatabaseWrapper(local):
"""
Represents a database connection.
"""
ops = None
def __init__(self, settings_dict):
# `settings_dict` should be a dictionary containing keys such as
# DATABASE_NAME, DATABASE_USER, etc. It's called `settings_dict`
# instead of `settings` to disambiguate it from Django settings
# modules.
self.connection = None
self.queries = []
self.settings_dict = settings_dict
def _commit(self):
if self.connection is not None:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
return self.connection.rollback()
def _enter_transaction_management(self, managed):
"""
A hook for backend-specific changes required when entering manual
transaction handling.
"""
pass
def _leave_transaction_management(self, managed):
"""
A hook for backend-specific changes required when leaving manual
transaction handling. Will usually be implemented only when
_enter_transaction_management() is also required.
"""
pass
def _savepoint(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_commit_sql(sid))
def close(self):
if self.connection is not None:
self.connection.close()
self.connection = None
def cursor(self):
from django.conf import settings
cursor = self._cursor()
if settings.DEBUG:
return self.make_debug_cursor(cursor)
return cursor
def make_debug_cursor(self, cursor):
return util.CursorDebugWrapper(cursor, self)
class BaseDatabaseFeatures(object):
allows_group_by_pk = False
# True if django.db.backend.utils.typecast_timestamp is used on values
# returned from dates() calls.
needs_datetime_string_cast = True
uses_custom_query_class = False
empty_fetchmany_value = []
update_can_self_select = True
interprets_empty_s
|
trings_as_nulls = False
can_use_chunked_reads = True
can_return_id_from_insert = False
uses_autocommit = False
uses_savepoints = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries = True
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calcu
|
lates the ID of a recently-inserted
row.
"""
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError()
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a DATE object with only
the given specificity.
"""
raise NotImplementedError()
def datetime_cast_sql(self):
"""
Returns the SQL necessary to cast a datetime value so that it will be
retrieved as a Python datetime object instead of a string.
This SQL should include a '%s' in place of the field's name.
"""
return "%s"
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
from django.utils.encoding import smart_unicode, force_unicode
# Convert params to contain Unicode values.
to_unicode = lambda s: force_unicode(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple([to_unicode(val) for val in params])
else:
u_params = dict([(to_unicode(k), to_unicode(v)) for k, v in params.items()])
return smart_unicode(sql) % u_params
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
|
botswana-harvard/edc-calendar
|
edc_calendar/models/update_or_create_event_model_mixin.py
|
Python
|
gpl-2.0
| 2,108
| 0.002846
|
from django.apps import apps as django_apps
from django.db import models
from .event import Event
class UpdatesOrCreatesCalenderEventModelError(Exception):
pass
class UpdatesOrCreatesCalenderEventModelMixin(models.Model):
"""A model mixin that creates or updates a calendar event
on post_save signal.
|
"""
def create_or_update_calendar_event(self):
"""Creates or Updates the event model with attributes
f
|
rom this instance.
Called from the signal
"""
if not getattr(self, self.identifier_field) and not getattr(self, self.title_field):
raise UpdatesOrCreatesCalenderEventModelError(
f'Cannot update or create Calender Event. '
f'Field value for \'{self.identifier_field}\' is None.')
event_value = getattr(self, self.identifier_field)
title_value = getattr(self, self.title_field)
if getattr(self, self.second_title_field):
title_value += str(getattr(self, self.second_title_field))
try:
Event.objects.get(**{
self.identifier_field: event_value,
'title': title_value})
except Event.DoesNotExist:
Event.objects.create(
**{self.identifier_field: event_value, 'title': title_value},
**self.event_options)
@property
def identifier_field(self):
"""Returns the field attr on YOUR model that will update
`identifier_field`.
"""
return 'subject_identifier'
@property
def title_field(self):
"""Returns the field attr on YOUR model that will update
`title`.
"""
return 'visit_code'
@property
def second_title_field(self):
"""Returns the field attr on YOUR model that will update
`title`.
"""
return 'visit_code_sequence'
@property
def event_options(self):
"""Returns the dict of the following attrs
`description` `start_time` `end_time`.
"""
return {}
class Meta:
abstract = True
|
trungnt13/BAY2-uef17
|
utils.py
|
Python
|
gpl-3.0
| 7,061
| 0.000142
|
# ===========================================================================
# This file contain some utilities for the course
# ===========================================================================
from __future__ import print_function, division, absolute_import
import os
import sys
import time
import shutil
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import URLError, HTTPError
import tarfile
import platform
import numpy as np
# Under Python 2, 'urlretrieve' relies on FancyURLopener from legacy
# urllib module, known to have issues with proxy management
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
'''
This function is adpated from: https://github.com/fchollet/keras
Original work Copyright (c) 2014-2015 keras contributors
'''
def chunk_read(response, chunk_size=8192, reporthook=None):
total_size = response.info().get('Content-Length').strip()
total_size = int(total_size)
count = 0
while 1:
chunk = response.read(chunk_size)
if not chunk:
break
count += 1
if reporthook:
reporthook(count, chunk_size, total_size)
yield chunk
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
class Progbar(object):
'''
This function is adpated from: https://github.com/fchollet/keras
Original work Copyright (c) 2014-2015 keras contributors
Modified work Copyright 2016-2017 TrungNT
'''
def __init__(self, target, title=''):
'''
@param target: total number of steps expected
'''
self.width = 39
self.target = target
self.sum_values = {}
self.unique_values = []
self.start = time.time()
self.total_width = 0
self.seen_so_far = 0
self.title = title
def update(self, current, values=[]):
'''
@param current: index of current step
@param values: list of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
'''
for k, v in values:
if k not in self.sum_values:
self.sum_values[k] = [v * (current - self.seen_so_far), current - self.seen_so_far]
self.unique_values.append(k)
else:
self.sum_values[k][0] += v * (current - self.seen_so_far)
self.sum_values[k][1] += (current - self.seen_so_far)
self.seen_so_far = current
now = time.time()
prev_total_width = self.total_width
sys.stdout.write("\b" * prev_total_width)
sys.stdout.write("\r")
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%s %%%dd/%%%dd [' % (self.title, numdigits, numdigits)
bar = barstr % (current, self.target)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
sys.stdout.write(bar)
self.total_width = len(bar)
if current:
time_per_unit = (now - self.start) / current
else:
time_per_unit = 0
eta = time_per_unit * (self.target - current)
info = ''
if current < self.target:
info += ' - ETA: %ds' % eta
else:
info += ' - %ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s:' % k
if type(self.sum_values[k]) is list:
avg = self.sum_values[k][0] / max(1, self.sum_values[k][1])
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self.sum_values[k]
self.total_width += len(info)
if prev_total_width > self.total_width:
info += ((prev_total_width - self.total_width) * " ")
sys.stdout.write(info)
if current >= self.target:
if "Linux" in platform.platform():
sys.stdout.write("\n\n")
else:
sys.stdout.write("\n")
sys.stdout.flush()
def add(self, n, values=[]):
self.update(self.s
|
een_so_far + n, values)
def get_file(fname, origin, untar=False, datadir=None):
'''
This function is adpated from: https://github.com/fchollet/keras
Original work Copyright (
|
c) 2014-2015 keras contributors
Modified work Copyright 2016-2017 TrungNT
Return
------
file path of the downloaded file
'''
# ====== check valid datadir ====== #
if datadir is None:
datadir = os.path.join(os.path.expanduser('~'), '.bay2')
if not os.path.exists(datadir):
os.mkdir(datadir)
elif not os.path.exists(datadir):
raise ValueError('Cannot find folder at path:' + str(datadir))
# ====== download the file ====== #
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
if not os.path.exists(fpath):
print('Downloading data from', origin)
global _progbar
_progbar = None
def dl_progress(count, block_size, total_size):
global _progbar
if _progbar is None:
_progbar = Progbar(total_size)
else:
_progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
_progbar = None
if untar:
if not os.path.exists(untar_fpath):
print('Untaring file...')
tfile = tarfile.open(fpath, 'r:gz')
try:
tfile.extractall(path=datadir)
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(untar_fpath):
if os.path.isfile(untar_fpath):
os.remove(untar_fpath)
else:
shutil.rmtree(untar_fpath)
raise
tfile.close()
return untar_fpath
return fpath
|
vlegoff/tsunami
|
src/primaires/salle/types/combustible.py
|
Python
|
bsd-3-clause
| 3,785
| 0.003975
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 NOEL-BARON Léo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
""
|
"Fichier contenant le type combustible."""
from primaires.interpreteur.editeur.entier import Entier
from primaires.interpreteur.editeur.selection import Selection
from primaires.objet.types.base import BaseType
class Combustible(BaseType):
"""Type d'objet: combustible.
|
"""
nom_type = "combustible"
def __init__(self, cle=""):
"""Constructeur de l'objet"""
BaseType.__init__(self, cle)
self.terrains = []
self.rarete = 1
self.qualite = 2
# Editeurs
self.etendre_editeur("t", "terrains", Selection, self, "terrains",
list(importeur.salle.terrains.keys()))
self.etendre_editeur("r", "rareté", Entier, self, "rarete", 1, 10)
self.etendre_editeur("a", "qualité", Entier, self, "qualite", 1, 10)
@property
def aff_terrains(self):
return ", ".join(self.terrains) if self.terrains else "aucun"
def travailler_enveloppes(self, enveloppes):
"""Travail sur les enveloppes"""
l_terrains = sorted(type(self).importeur.salle.terrains.keys())
terrains = enveloppes["t"]
terrains.apercu = "{objet.aff_terrains}"
terrains.prompt = "Entrez un terrain : "
terrains.aide_courte = \
"Entrez les |ent|terrains|ff| où l'on peut trouver ce " \
"combustible.\n\nTerrains disponibles : {}.\n\n" \
"Terrains actuels : {{objet.aff_terrains}}".format(
", ".join(l_terrains))
rarete = enveloppes["r"]
rarete.apercu = "{objet.rarete}"
rarete.prompt = "Rareté du combustible : "
rarete.aide_courte = \
"Entrez la |ent|rareté|ff| du combustible, entre |cmd|1|ff| " \
"(courant) et |cmd|10|ff| (rare).\n\n" \
"Rareté actuelle : {objet.rarete}"
qualite = enveloppes["a"]
qualite.apercu = "{objet.qualite}"
qualite.prompt = "Qualité du combustible : "
qualite.aide_courte = \
"Entrez la |ent|qualité|ff| du combustible, entre |cmd|1|ff| " \
"(mauvais) et |cmd|10|ff| (très bon).\n\n" \
"Qualité actuelle : {objet.qualite}"
|
pmisik/buildbot
|
master/buildbot/test/unit/reporters/test_generators_utils.py
|
Python
|
gpl-2.0
| 13,510
| 0.002665
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import copy
from parameterized import parameterized
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.process.results import CANCELLED
from buildbot.process.results import EXCEPTION
from buildbot.process.results import FAILURE
from buildbot.process.results import SUCCESS
from buildbot.process.results import WARNINGS
from buildbot.reporters import utils
from buildbot.reporters.generators.utils import BuildStatusGeneratorMixin
from buildbot.test.fake import fakemaster
from buildbot.test.reactor import TestReactorMixin
from buildbot.test.util.config import ConfigErrorsMixin
from buildbot.test.util.reporter import ReporterTestMixin
class TestBuildGenerator(ConfigErrorsMixin, TestReactorMixin,
unittest.TestCase, ReporterTestMixin):
def setUp(self):
self.setup_test_reactor()
self.setup_reporter_test()
self.master = fakemaster.make_master(self, wantData=True, wantDb=True,
wantMq=True)
@defer.inlineCallbacks
def insert_build_finished_get_props(self
|
, results, **kwargs):
build = yield self.insert_build_finished(results, **kwargs)
yield utils.getDetailsForBuild(self.master, build, want_properties=True)
return build
def create_generator(self, mode=("failing", "passing", "warnings"),
|
tags=None, builders=None, schedulers=None, branches=None,
subject="Some subject", add_logs=False, add_patch=False):
return BuildStatusGeneratorMixin(mode, tags, builders, schedulers, branches, subject,
add_logs, add_patch)
def test_generate_name(self):
g = self.create_generator(tags=['tag1', 'tag2'], builders=['b1', 'b2'],
schedulers=['s1', 's2'], branches=['b1', 'b2'])
self.assertEqual(g.generate_name(),
'BuildStatusGeneratorMixin_tags_tag1+tag2_builders_b1+b2_' +
'schedulers_s1+s2_branches_b1+b2failing_passing_warnings')
@parameterized.expand([
('tags', 'tag'),
('tags', 1),
('builders', 'builder'),
('builders', 1),
('schedulers', 'scheduler'),
('schedulers', 1),
('branches', 'branch'),
('branches', 1),
])
def test_list_params_check_raises(self, arg_name, arg_value):
kwargs = {arg_name: arg_value}
g = self.create_generator(**kwargs)
with self.assertRaisesConfigError('must be a list or None'):
g.check()
@parameterized.expand([
('unknown_str', 'unknown', 'not a valid mode'),
('unknown_list', ['unknown'], 'not a valid mode'),
('unknown_list_two', ['unknown', 'failing'], 'not a valid mode'),
('all_in_list', ['all', 'failing'], 'must be passed in as a separate string'),
])
def test_tag_check_raises(self, name, mode, expected_exception):
g = self.create_generator(mode=mode)
with self.assertRaisesConfigError(expected_exception):
g.check()
def test_subject_newlines_not_allowed(self):
g = self.create_generator(subject='subject\nwith\nnewline')
with self.assertRaisesConfigError('Newlines are not allowed'):
g.check()
@defer.inlineCallbacks
def test_is_message_needed_ignores_unspecified_tags(self):
build = yield self.insert_build_finished_get_props(SUCCESS)
# force tags
build['builder']['tags'] = ['slow']
g = self.create_generator(tags=["fast"])
self.assertFalse(g.is_message_needed_by_props(build))
@defer.inlineCallbacks
def test_is_message_needed_tags(self):
build = yield self.insert_build_finished_get_props(SUCCESS)
# force tags
build['builder']['tags'] = ['fast']
g = self.create_generator(tags=["fast"])
self.assertTrue(g.is_message_needed_by_props(build))
@defer.inlineCallbacks
def test_is_message_needed_schedulers_sends_mail(self):
build = yield self.insert_build_finished_get_props(SUCCESS)
g = self.create_generator(schedulers=['checkin'])
self.assertTrue(g.is_message_needed_by_props(build))
@defer.inlineCallbacks
def test_is_message_needed_schedulers_doesnt_send_mail(self):
build = yield self.insert_build_finished_get_props(SUCCESS)
g = self.create_generator(schedulers=['some-random-scheduler'])
self.assertFalse(g.is_message_needed_by_props(build))
@defer.inlineCallbacks
def test_is_message_needed_branches_sends_mail(self):
build = yield self.insert_build_finished_get_props(SUCCESS)
g = self.create_generator(branches=['refs/pull/34/merge'])
self.assertTrue(g.is_message_needed_by_props(build))
@defer.inlineCallbacks
def test_is_message_needed_branches_doesnt_send_mail(self):
build = yield self.insert_build_finished_get_props(SUCCESS)
g = self.create_generator(branches=['some-random-branch'])
self.assertFalse(g.is_message_needed_by_props(build))
@defer.inlineCallbacks
def run_simple_test_sends_message_for_mode(self, mode, result, should_send=True):
build = yield self.insert_build_finished_get_props(result)
g = self.create_generator(mode=mode)
self.assertEqual(g.is_message_needed_by_results(build), should_send)
def run_simple_test_ignores_message_for_mode(self, mode, result):
return self.run_simple_test_sends_message_for_mode(mode, result, False)
def test_is_message_needed_mode_all_for_success(self):
return self.run_simple_test_sends_message_for_mode("all", SUCCESS)
def test_is_message_needed_mode_all_for_failure(self):
return self.run_simple_test_sends_message_for_mode("all", FAILURE)
def test_is_message_needed_mode_all_for_warnings(self):
return self.run_simple_test_sends_message_for_mode("all", WARNINGS)
def test_is_message_needed_mode_all_for_exception(self):
return self.run_simple_test_sends_message_for_mode("all", EXCEPTION)
def test_is_message_needed_mode_all_for_cancelled(self):
return self.run_simple_test_sends_message_for_mode("all", CANCELLED)
def test_is_message_needed_mode_failing_for_success(self):
return self.run_simple_test_ignores_message_for_mode("failing", SUCCESS)
def test_is_message_needed_mode_failing_for_failure(self):
return self.run_simple_test_sends_message_for_mode("failing", FAILURE)
def test_is_message_needed_mode_failing_for_warnings(self):
return self.run_simple_test_ignores_message_for_mode("failing", WARNINGS)
def test_is_message_needed_mode_failing_for_exception(self):
return self.run_simple_test_ignores_message_for_mode("failing", EXCEPTION)
def test_is_message_needed_mode_exception_for_success(self):
return self.run_simple_test_ignores_message_for_mode("exception", SUCCESS)
def test_is_message_needed_mode_exception_for_failure(self):
return self.run_simple_test_ignores_message_for_mode("exception", FAILURE)
def test_is_message_needed_mode_exception_for_warnings(self):
return self.run_simple_test_ignores_message_for_mode("exception", WARNINGS)
def test_is_message_needed_mode_exception_for_exception(self):
return self.run_simple_test_sends_message_for_mode("exception", EXCEPTION)
def test_is_message_needed_m
|
chrisdickinson/tweezers
|
projects/views/private.py
|
Python
|
mit
| 9,684
| 0.002891
|
import simplejson
import os
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.markup.templatetags.markup import restructuredtext
from django.core.urlresolvers import reverse
from django.conf import settings
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404, render_to_response, redirect
from django.template import RequestContext
from django.template.defaultfilters import linebreaks
from django.template.loader import render_to_string
from django.views.generic.list_detail import object_list
from projects import constants
from projects.forms import FileForm, CreateProjectForm, ImportProjectForm, FileRevisionForm
from projects.models import Project, File
from bookmarks.models import Bookmark
@login_required
def project_dashboard(request):
"""
A dashboard! If you aint know what that means you aint need to.
Essentially we show you an overview of your content.
"""
marks = Bookmark.objects.filter(user=request.user)[:5]
return object_list(
request,
queryset=request.user.projects.live(),
page=int(request.GET.get('page', 1)),
template_object_name='project',
extra_context={'bookmark_list': marks },
template_name='projects/project_dashboard.html',
)
@login_required
def project_manage(request, project_slug):
"""
The management view for a project, where you will have links to edit
the projects' configuration, edit the files associated with that
project, etc.
"""
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
return object_list(
request,
queryset=project.files.live(),
extra_context={'project': project},
page=int(request.GET.get('page', 1)),
template_object_name='file',
template_name='projects/project_manage.html',
)
@login_required
def project_create(request):
"""
The view for creating a new project where the docs will be hosted
as objects and edited through the site
"""
form = CreateProjectForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
form.instance.user = request.user
|
project = form.save()
project_manage = reverse('projects_manage', args=[project.slug])
return HttpResponseRedirect(project_
|
manage)
return render_to_response(
'projects/project_create.html',
{'form': form},
context_instance=RequestContext(request)
)
@login_required
def project_edit(request, project_slug):
"""
Edit an existing project - depending on what type of project is being
edited (created or imported) a different form will be displayed
"""
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
if project.is_imported:
form_class = ImportProjectForm
else:
form_class = CreateProjectForm
form = form_class(instance=project, data=request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_manage', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_edit.html',
{'form': form, 'project': project},
context_instance=RequestContext(request)
)
@login_required
def project_delete(request, project_slug):
"""
Make a project as deleted on POST, otherwise show a form asking for
confirmation of delete.
"""
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
if request.method == 'POST':
project.status = constants.DELETED_STATUS
project.save()
project_dashboard = reverse('projects_dashboard')
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_delete.html',
{'project': project},
context_instance=RequestContext(request)
)
@login_required
def project_import(request):
"""
Import docs from an repo
"""
form = ImportProjectForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
form.instance.user = request.user
project = form.save()
project_manage = reverse('projects_manage', args=[project.slug])
return HttpResponseRedirect(project_manage + '?docs_not_built=True')
return render_to_response(
'projects/project_import.html',
{'form': form},
context_instance=RequestContext(request)
)
@login_required
def file_add(request, project_slug):
"""
Add a file to a project, redirecting on success to the projects mgmt page
"""
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
file = File(project=project)
form = FileForm(instance=file, data=request.POST or None)
if request.method == 'POST' and form.is_valid():
form.instance.project = project
file = form.save()
project_manage = reverse('projects_manage', args=[project.slug])
return HttpResponseRedirect(project_manage)
return render_to_response(
'projects/file_add.html',
{'form': form, 'project': project},
context_instance=RequestContext(request)
)
@login_required
def file_edit(request, project_slug, file_id):
"""
Edit an existing file
"""
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
file = get_object_or_404(project.files.live(), pk=file_id)
form = FileForm(instance=file, data=request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save()
project_manage = reverse('projects_manage', args=[project.slug])
return HttpResponseRedirect(project_manage)
return render_to_response(
'projects/file_edit.html',
{'form': form, 'project': project, 'file': file},
context_instance=RequestContext(request)
)
@login_required
def file_delete(request, project_slug, file_id):
"""
Mark a given file as deleted on POST, otherwise ask for confirmation
"""
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
file = get_object_or_404(project.files.live(), pk=file_id)
if request.method == 'POST':
file.status = constants.DELETED_STATUS
file.save()
project_manage = reverse('projects_manage', args=[project.slug])
return HttpResponseRedirect(project_manage)
return render_to_response(
'projects/file_delete.html',
{'project': project, 'file': file},
context_instance=RequestContext(request)
)
@login_required
def file_history(request, project_slug, file_id):
"""
A view that provides diffing from current to any revision, and when
posted to allows you to revert
"""
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
file = get_object_or_404(project.files.live(), pk=file_id)
form = FileRevisionForm(file, request.POST or None)
if request.method == 'POST' and form.is_valid():
form.cleaned_data['revision'].apply()
history = reverse('projects_file_history', args=[project.slug, file.pk])
return HttpResponseRedirect(history)
return object_list(
request,
queryset=file.revisions.all(),
extra_context={'project': project, 'file': file, 'form': form},
page=int(request.GET.get('page', 1)),
template_object_name='revision',
template_name='projects/file_history.html',
)
@login_required
def file_diff(request, project_slug, file_id, from_id, to_id):
"""
Return the contents of a given revision.
"""
project = get_object_or_404(request.user.projects.live(), slug=project_slug)
file = get_object_or_404(project.files.live(), pk=file_id)
# grab the requested revisions
from_rev = get_object_or_404(file.revisions.all(), pk=from_id)
to_rev = get_object_or_404(file.revisions.all(), pk=to_id)
# generate a pretty html diff
diff = file.get
|
plotly/python-api
|
packages/python/plotly/plotly/validators/layout/yaxis/_showticklabels.py
|
Python
|
mit
| 484
| 0.002066
|
imp
|
ort _plotly_utils.basevalidators
class ShowticklabelsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="showticklabels", parent_name="layout.yaxis", **kwargs
):
super(ShowticklabelsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "ticks"),
role=kwargs.pop("role", "style"),
**kwargs
)
| |
kanafghan/fiziq-backend
|
src/models/factories.py
|
Python
|
gpl-2.0
| 2,302
| 0.002606
|
'''
Created on 22/02/2015
@author: Ismail Faizi
'''
import models
class ModelFactory(object):
"""
Factory for creating entities of models
"""
@classmethod
def create_user(cls, name, email, training_journal):
"""
Factory method for creating User entity.
NOTE: you must explicitly call the put() method
"""
user = models.User(parent=models.USER_KEY)
user.name = name
user.email = email
user.training_journal = training_journal.key
return user
@classmethod
def create_training_journal(cls):
"""
Factory method for creating TrainingJournal entity.
NOTE: you must explicitly call the put() method
"""
return models.TrainingJournal(parent=models.TRAINING_JOURNAL_KEY)
@classmethod
def create_workout_session(cls, started_at, ended_at, training_journal):
"""
Factory method for creating WorkoutSession entity.
NOTE: you must explicitly call the put() method
"""
workout_session = models.WorkoutSession(parent=models.WORKOUT_SESSION_KEY)
workout_session.started_at = started_at
workout_session.ended_at = ended_at
workout_session.training_journal = training_journal.key
return workout_session
@classmethod
def create_workout_set(cls, repetitions, weight, workout_session, workout):
"""
Factory method for creating WorkoutSet entity.
NOTE: you must explicitly call the put() method
"""
workout_set = models.WorkoutSet(parent=models.WORKOUT_SET_KEY)
workout_set.repetitions = repetitions
workout_set.weight = weight
workout_set.workout_session = workout_session.key
wor
|
kout_set.workout = work
|
out.key
return workout_set
@classmethod
def create_workout(cls, muscle_group, names=[], description='', images=[]):
"""
Factory method for creating WorkoutSet entity.
NOTE: you must explicitly call the put() method
"""
workout = models.Workout(parent=models.WORKOUT_KEY)
workout.names = names
workout.muscle_group = muscle_group
workout.description = description
workout.images = images
return workout
|
mtlchun/edx
|
lms/djangoapps/django_comment_client/tests/mock_cs_server/test_mock_cs_server.py
|
Python
|
agpl-3.0
| 2,252
| 0.000888
|
import unittest
import threading
import json
import urllib2
from mock_cs_server import MockCommentServiceServer
from nose.plugins.skip import SkipTest
class MockCommentServiceServerTest(unittest.TestCase):
'''
A mock version of the Comment Service server that listens on a local
port and responds with pre-defined grade messages.
'''
def setUp(self):
super(MockCommentServiceServerTest, self).setUp()
# This is a test of the test setup,
# so it does not need to run as part of the unit test suite
# You can re-enable it by commenting out the line below
raise SkipTest
# Create the server
server_port = 4567
self.server_url = 'http://127.0.0.1:%d' % server_port
# Start up the server and tell it that by default it should
# return this as its json response
self.expected_response = {'username': 'user100', 'external_id': '4'}
self.server = MockCommentServiceServer(port_num=server_port,
response=self.expected_response)
# Start the server in a separate daemon thread
server_thread = threading.Thread(target=self.server.serve_forever)
server_thread.daemon = True
server_thread.start()
def tearDown(self):
# Stop the server, freeing up the port
self.server.shutdown()
def test_new_user_request(self):
"""
Test the mock comment service using an example
of how you would create a new user
"""
# Send a request
values = {'username': u'user100',
'external_id': '4', 'email': u'user100@edx.org'}
data = json.dumps(values)
headers = {'Content-Type': 'application/json', 'Content-Length': len(data), 'X-Edx-Api-Key': 'TEST_API_KEY'}
req = urllib2.Request(self.server_url + '/api/v1/users/4', data, headers)
# Send the request to the mock cs server
response = urllib2.urlopen(req)
# Receive the reply from the mock cs server
response_dict = json.loads(response.read())
|
# You should have received the response specified in the setup above
|
self.assertEqual(response_dict, self.expected_response)
|
relekang/photos
|
photos/gallery/views.py
|
Python
|
mit
| 2,650
| 0.001132
|
# -*- coding: utf-8 -*-
import logging
from django.core.urlresolvers import reverse
from django.http.response import Http404, JsonResponse
from django.shortcuts import get_object_or_404, redirect
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from photos.gallery.serializers import PhotoSerializer
from photos.users.models import User
from .models import Photo
logger = logging.getLogger(__name__)
class PhotoViewMixin(object):
model = Photo
def custom_domain(self):
host = self.request.get_host()
return host != 'photos.mocco.no' and host != '127.0.0.1:8000'
def dispatch(self, *args, **kwargs):
username = self.kwargs.get('username', None)
if username and self.custom_domain():
raise Http404()
if username:
user = User.objects.get(username=username)
if user.domain:
return redirect("http://{domain}{path}".format(
domain=user.domain,
path=self.request.get_full_path().replace(r'^/u/{0}'.format(username), '')
))
return super().dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
username = self.kwargs.get('username', None)
if self.custom_domain():
context['photographer'] = User.objects.get(domain=self.request.get_host())
context['archive_url'] = reverse('gallery:archive')
elif username:
context['photographer'] = User.objects.get(username=username)
context['archive_url'] = reverse('user_archive', args=[username])
return context
def get_queryset(self):
queryset = self.model.objects.all()
username = self.kwargs.get('username', None)
if self.custom_domain():
queryset = queryse
|
t.filter(user__domain=self.request.get_host())
elif username is not None:
queryset = queryset.filter(user__username=username)
|
return queryset
class PhotoDetailView(PhotoViewMixin, DetailView):
def get_object(self, queryset=None, **kwargs):
queryset = queryset or self.get_queryset()
slug = self.kwargs.get(self.slug_url_kwarg, None)
if slug is None:
return queryset.last()
return get_object_or_404(queryset, slug=slug)
class ArchiveView(PhotoViewMixin, ListView):
pass
class ArchiveApiView(PhotoViewMixin, ListView):
def get(self, request, *args, **kwargs):
return JsonResponse(PhotoSerializer(self.get_queryset(), many=True).data, safe=False)
|
melviso/phycpp
|
beatle/app/cxxtypes.py
|
Python
|
gpl-2.0
| 96
| 0
|
# -*- cod
|
ing: utf-8 -*-
"""
Created on Tue Dec 17
|
00:25:59 2013
@author: mel
"""
_types = [
]
|
Parallel-in-Time/pySDC
|
pySDC/projects/Hamiltonian/fput.py
|
Python
|
bsd-2-clause
| 8,363
| 0.002511
|
import os
from collections import defaultdict
import dill
import numpy as np
import pySDC.helpers.plot_helper as plt_helper
from pySDC.helpers.stats_helper import filter_stats, sort_stats
from pySDC.implementations.collocation_classes.gauss_lobatto import CollGaussLobatto
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.FermiPastaUlamTsingou import fermi_pasta_ulam_tsingou
from pySDC.implementations.sweeper_classes.verlet import verlet
from pySDC.implementations.transfer_classes.TransferParticles_NoCoarse import particles_to_particles
from pySDC.projects.Hamiltonian.hamiltonian_and_energy_output import hamiltonian_and_energy_output
def setup_fput():
"""
Helper routine for setting up everything for the Fermi-Pasta-Ulam-Tsingou problem
Returns:
description (dict): description of the controller
controller_params (dict): controller parameters
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-12
level_params['dt'] = 2.0
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussLobatto
sweeper_params['num_nodes'] = [5, 3]
sweeper_params['initial_guess'] = 'zero'
# initialize problem parameters for the Penning trap
problem_params = dict()
problem_params['npart'] = 2048
problem_params['alpha'] = 0.25
problem_params['k'] = 1.0
problem_params['energy_modes'] = [[1, 2, 3, 4]]
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['hook_class'] = hamiltonian_and_energy_output
controller_params['logger_level'] = 30
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = fermi_pasta_ulam_tsingou
description['problem_params'] = problem_params
description['sweeper_class'] = verlet
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
description['space_transfer_class'] = particles_to_particles
return description, controller_params
def run_simulation():
"""
Routine to run the simulation of a second order problem
"""
description, controller_params = setup_fput()
# set time parameters
t0 = 0.0
# set this to 10000 to reproduce the picture in
# http://www.scholarpedia.org/article/Fermi-Pasta-Ulam_nonlinear_lattice_oscillations
Tend = 1000.0
num_procs = 1
f = open('fput_out.txt', 'w')
out = 'Running fput problem with %s processors...' % num_procs
f.write(out + '\n')
print(out)
# instantiate the controller
controller = controller_nonMPI(num_procs=num_procs, controller_params=controller_params,
description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t=t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by type (number of iterations)
filtered_stats = filter_stats(stats, type='niter')
# convert filtered statistics to list of iterations count, sorted by process
iter_counts = sort_stats(filtered_stats, sortby='time')
# compute and print statistics
# for item in iter_counts:
# out = 'Number of iterations for time %4.2f: %2i' % item
# f.write(out + '\n')
# print(out)
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
f.write(out + '\n')
print(out)
out = ' Range of values for number of iterations: %2i ' % np.ptp(niters)
f.write(out + '\n')
print(out)
out = ' Position of max/min number of iterations: %2i -- %2i' % \
(int(np.argmax(niters)), int(np.argmin(niters)))
f.write(out + '\n')
print(out)
out = ' Std and var for number of iterations: %4.2f -- %4.2f' % (float(np.std(niters)), float(np.var(niters)))
f.write(out + '\n')
print(out)
# get runtime
timing_run = sort_stats(filter_stats(stats, type='timing_run'), sortby='time')[0][1]
out = '... took %6.4f seconds to run this.' % timing_run
f.write(out + '\n')
print(out)
f.close()
# assert np.mean(niters) <= 3.46, 'Mean number of iterations is too high, got %s' % np.mean(niters)
fname = 'data/fput.dat'
f = open(fname, 'wb')
dill.dump(stats, f)
f.close()
assert os.path.isfile(fname), 'Run for %s did not create stats file'
def show_results(cwd=''):
"""
Helper function to plot the error of the Hamiltonian
Args:
cwd (str): current working directory
"""
# read in the dill data
f = open(cwd + 'data/fput.dat', 'rb')
stats = dill.load(f)
f.close()
plt_helper.mpl.style.use('classic')
plt_helper.setup_mpl()
# HAMILTONIAN PLOTTING #
# extract error in hamiltonian and prepare for plotting
extract_stats = filter_stats(stats, type='err_hamiltonian')
result = defaultdict(list)
for k, v in extract_stats.items():
result[k.iter].append((k.time, v))
for k, _ in result.items():
result[k] = sorted(result[k], key=lambda x: x[0])
plt_helper.newfig(textwidth=238.96, scale=0.89)
# Rearrange data for easy plotting
err_ham = 1
for k, v in result.items():
time = [item[0] for item in v]
ham = [item[1] for item in v]
err_ham = ham[-1]
plt_helper.plt.semilogy(time, ham, '-', lw=1, label='Iter ' + str(k))
print(err_ham)
# assert err_ham < 6E-10, 'Error in the Hamiltonian is too large, got %s' % err_ham
plt_helper.plt.xlabel('Time')
plt_helper.plt.ylabel('Error in Hamiltonian')
plt_helper.plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
fname = 'data/fput_hamiltonian'
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(fname + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(fname + '.png'), 'ERROR: plotting did not create PNG file'
# ENERGY PLOTTING #
# extract error in hamiltonian and prepare for plotting
extract_stats = filter_stats(stats, type='energy_step')
result = sort_stats(extract_stats, sortby='time')
plt_helper.newfig(textwidth=238.96, scale=0.89)
# Rearrange data for easy plotting
for mode in result[0][1].keys():
time = [item[0] for item in result]
energy = [item[1][mode] for item in result]
plt_helper.plt.plot(time, energy, label=str(mode) + 'th mode')
plt_helper.plt.xlabel('Time')
plt_helper.plt.ylabel('Energy')
plt_helper.plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
fname = 'data/fput_energy'
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(fname + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(fname + '.png'), 'ERROR: plotting did not create PNG file'
# POSITION PLOTTING #
# extract positions and prepare for plotting
extract_stats = filter_stats(stats, type='position')
result = sort_stats(extract_stats, sortby='time')
plt_helper.newfig(textwidth=238.96, scale=0
|
.89)
# Rearrange data for easy plotting
nparts = len(result[0][1])
nsteps = len(result)
pos = np.zeros((nparts, nsteps))
time = np.zeros(nsteps)
for
|
idx, item in enumerate(result):
time[idx] = item[0]
for n in range(nparts):
pos[n, idx] = item[1][n]
for n in range(min(nparts, 16)):
plt_helper.plt.plot(time, pos[n, :])
fname = 'data/fput_positions'
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(fname + '.pgf'), 'ERROR: plotting did not create PGF fi
|
zuun77/givemegoogletshirts
|
leetcode/python/792_number-of-matching-subsequences.py
|
Python
|
apache-2.0
| 716
| 0.006983
|
import bisect
class Solution:
def numMatchingSubseq(self, S: str, words):
d = {}
for i, x in
|
enumerate(S):
if x not in d: d[x] = [i]
else: d[x].append(i)
ans = []
for w in words:
i = -1
result = True
for x in w:
if x not in d:
|
result = False
break
idx = bisect.bisect_left(d[x], i+1)
if idx >= len(d[x]):
result = False
break
i = d[x][idx]
if result: ans.append(w)
return len(ans)
print(Solution().numMatchingSubseq("abcde", ["a", "bb", "acd", "ace"]))
|
lvuotto/so-tps
|
tp3/bin/csv_loader.py
|
Python
|
mit
| 1,092
| 0.020147
|
#!/usr/bin/env python
from pymongo.connection import MongoClient
import csv
import json
""" Script para re-importar los posts a la base de datos """
reader = csv.DictReader(open('data/redit.csv'),
fieldnames=('image_id','unixtime','rawtime','title','total_votes',
'reddit_id','number_of_upvotes','subreddit','number_of_downvotes',
'localtime','score','number_of_comments','username'))
conn = MongoClient()
db = conn.reddit
print "Cleaning DB collections %s.%s" % ("reddit", "posts")
db.posts.remove()
for it in reader:
try:
it['total_votes'] = int(it['total_votes'] )
it['number_of_upvotes'] = int(it['number_of_upvotes'] )
it[
|
'number_of_downvotes'] = int(it['number_of_downvotes'] )
it['score'] = int(it['score'] )
it['number_of_comments'] = int(it['number_of_comments'] )
db.posts.insert(it)
except Exception as e:
print e, "while inserting", it
print "Inserted %d records" % d
|
b.posts.count()
assert db.posts.count() == 269689, "Missing records on DB, inserted: %d" % db.posts.count()
|
ismailakbudak/election-algorithm-on-graph
|
test/node_test.py
|
Python
|
mit
| 1,091
| 0.03758
|
# -*- coding: utf-8 -*-
# Test file for node class
# Develope
|
r
# Ismail AKBUDAK
# ismailakbudak.com
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from graph import Node
def log(message):
print("TEST:: %s"%(message))
def log_nei
|
ghbours(node):
log("%s neighbours : %s "%(node, str(node.neighbours)) )
n1=Node(1,1)
n2=Node(2,2)
n3=Node(3,3)
n4=Node(4,4)
n5=Node(5,5)
n6=Node(6,6)
n1.addNeighbour(n2)
n1.addNeighbour(n3)
n1.addNeighbour(n3)
n1.addNeighbour(n5)
n1.addNeighbour(n6)
n3.addNeighbour(n2)
n4.addNeighbour(n2)
log_neighbours(n1)
log_neighbours(n2)
log_neighbours(n3)
log_neighbours(n4)
log_neighbours(n5)
log_neighbours(n6)
n1.remove()
log_neighbours(n1)
log_neighbours(n2)
log_neighbours(n3)
log_neighbours(n4)
log_neighbours(n5)
log_neighbours(n6)
if len(n1.neighbours) == 0 and len(n3.neighbours) == 1 and len(n4.neighbours) == 1 :
log("completed successfully..")
else:
log("There is something wrong..")
|
gawbul/ReproPhyloVagrant
|
reprophylo/reprophylo.py
|
Python
|
mit
| 281,160
| 0.011737
|
reprophyloversion="99c16963"
############################################################################################
if False:
"""
ReproPhylo version 1
General purpose phylogenetics package for reproducible and experimental analysis
Amir Szitenebrg
A.Szitenberg@Hull.ac.uk
Szitenberg@gmail.com
David H Lunt
D.H.Lunt@Hull.ac.uk
EvoHull.org
University of Hull
Developed with:
CPython 2.7.6
IPython 1.2.1
ete2 2.2rev1056
biopython 1.64
dendropy 3.12.0
cloud 2.8.5
numpy 1.8.2
matplotlib 1.3.1
pandas
RAxML 8
Phylobayes 3
Trimal 1
Muscle
Mafft 7
Pal2nal 14
"""
##############################################################################################
from Bio import SeqIO
import os, csv, sys, dendropy, re, time, random, glob, platform, warnings, rpgit, ast, gb_syn,css
import HTML, inspect, shutil
import subprocess as sub
from Bio.Seq import Seq
import numpy as np
import matplotlib.pyplot as plt
from Bio.Alphabet import IUPAC
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import SeqFeature, FeatureLocation, CompoundLocation
from Bio.Align.Applications import MafftCommandline, MuscleCommandline
from StringIO import StringIO
from Bio import AlignIO
from Bio.Phylo.Applications import RaxmlCommandline
from Bio.Align import MultipleSeqAlignment
from Bio.SeqUtils import GC
from ete2 import *
from collections import Counter
#import pandas as pd
import math
import __builtin__
##############################################################################################
class Locus:
##############################################################################################
""" Configure the loci stored in the ReproPhylo Project.
>>> locus = Locus('dna', 'CDS', 'coi', ['cox1','COX1','coi','COI','CoI'])
>>> print(locus)
Locus(char_type=dna, feature_type=CDS, name=coi, aliases=cox1; COX1; coi; COI; CoI)
"""
char_type = 'NotSet'
feature_type = 'NotSet'
name = 'NotSet'
aliases = []
def __init__(self, char_type=char_type, feature_type=feature_type,
name=name, aliases=aliases):
self.char_type = char_type
self.feature_type = feature_type
self.name = name
self.aliases = aliases
valid = ['dna','prot']
if not self.char_type in valid:
raise ValueError('self.char_type should be \'dna\' or \'prot\'')
if not type(self.feature_type) is str:
raise ValueError('self.feature_type should be a string')
if not type(self.name) is str:
raise ValueError('self.name should be a string')
if not type(self.aliases) is list:
raise ValueError('self.aliases should be a list')
else:
for a in self.aliases:
if not type(a) is str:
raise ValueError('aliases in self.aliases have to be strings')
def __str__(self):
aliases_str = ('; ').join(self.aliases)
return ('Locus(char_type='+self.char_type+', feature_type='+self.feature_type+
', name='+self.name+', aliases='+aliases_str+')')
##############################################################################################
class Concatenation:
##############################################################################################
"""This class is used to configure concatenations given loci and rules.
>>> coi = Locus('dna', 'CDS', 'coi', ['cox1','COX1','coi','COI','CoI'])
|
>>> ssu = Locus('dna', 'rRNA', '18S', ['18S rRNA','SSU rRNA'])
>>> bssu = Locus('dna', 'rRNA', '16S', ['16S rRNA'])
>>> lsu = Locus('dna', 'rRNA', '28S', ['28S rRNA'
|
, 'LSU rRNA'])
>>> alg11 = Locus('dna', 'CDS', 'ALG11', ['ALG11'])
>>> loci = [coi, ssu, bssu, lsu, alg11]
>>> concatenation = Concatenation(name='combined', loci=loci,
... otu_meta='OTU_name',
... otu_must_have_all_of=['coi'],
... otu_must_have_one_of =[['16S','28S'],['ALG11','18S']],
... define_trimmed_alns=["MuscleDefaults@dummyTrimMethod"])
>>> print(str(concatenation))
Concatenation named combined, with loci coi,18S,16S,28S,ALG11,
of which coi must exist for all species
and at least one of each group of [ 16S 28S ][ ALG11 18S ] is represented.
Alignments with the following names: MuscleDefaults@dummyTrimMethod are prefered
"""
otu_must_have_all_of = []
otu_must_have_one_of = 'any'
define_trimmed_alns = [] #should be Locus_name@Alignment_method_name@Trimming_mathod_name
feature_id_dict = {}
def __init__(self,
name,
loci,
otu_meta,
otu_must_have_all_of = otu_must_have_all_of,
otu_must_have_one_of = otu_must_have_one_of,
define_trimmed_alns = define_trimmed_alns):
self.name = name
self.loci = loci
self.otu_meta = otu_meta
self.otu_must_have_all_of = otu_must_have_all_of
self.otu_must_have_one_of = otu_must_have_one_of
if isinstance(otu_must_have_all_of,str):
raise IOError('The keyword \'otu_must_have_all_of\' has to be a list')
if isinstance(otu_must_have_one_of[0],str) and not otu_must_have_one_of == 'any':
raise IOError('The keyword \'otu_must_have_one_of\' has to be a list of lists')
if self.otu_must_have_one_of == 'any':
self.otu_must_have_one_of = [[l.name for l in self.loci]]
self.feature_id_dict = {} # Will hold the feature_id list for each otu
self.define_trimmed_alns = define_trimmed_alns # To choose between alternative
# alignments of the same locus
self.used_trimmed_alns = {} #To hold the alignment chosen for each locus
# Validate loci list
seen = []
for locus in loci:
if not isinstance(locus, Locus):
raise TypeError("Expecting Locus object in loci list")
if locus.name in seen:
raise NameError('Locus ' + locus.name + ' appears more than once in self.loci')
else:
seen.append(locus.name)
def __str__(self):
loci_names = [i.name for i in self.loci]
loci_string = ''
for l in loci_names:
loci_string += l+','
loci_string = loci_string[:-1]
must_have = ''
for i in self.otu_must_have_all_of:
must_have += i+','
must_have = must_have[:-1]
trimmed_alignmnets_spec = ''
one_of = ''
for i in self.otu_must_have_one_of:
one_of += '[ '
for j in i:
one_of += j+' '
one_of += ']'
if (self.define_trimmed_alns) > 0:
for i in self.define_trimmed_alns:
trimmed_alignmnets_spec += i
return ("Concatenation named %s, with loci %s,\n"
"of which %s must exist for all species\n"
"and at least one of each group of %s is represented.\n"
"Alignments with the following names: %s are prefered"
% (self.name, loci_string, must_have, one_of, trimmed_alignmnets_spec))
##############################################################################################
if False:
"""
Reprophylo Project Utilities
Used in the Project class but are not in the classe's methods
"""
##############################################################################################
## Git management
__builtin__.git = False
# git log template
gitline = "<<<<\n%s\nSTDOUT:\n%s\nSTDERR:%s\n>>>>\n"
def undate_git_log(pj, out, err):
if not err:
err = 'None'
if not out:
out = 'None'
pj.git_log += gitline%(str(time.asctime()),str(out), str(err))
def start_git(pj):
__builtin__.git = True # flag it on
|
thaim/ansible
|
lib/ansible/modules/cloud/ovirt/ovirt_storage_template_info.py
|
Python
|
mit
| 5,147
| 0.003109
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If
|
not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_storage_template_info
short_description: Retrieve information about one or more oVirt/RHV templates relate to
|
a storage domain.
author: "Maor Lipchuk (@machacekondra)"
version_added: "2.4"
description:
- "Retrieve information about one or more oVirt/RHV templates relate to a storage domain."
- This module was called C(ovirt_storage_template_facts) before Ansible 2.9, returning C(ansible_facts).
Note that the M(ovirt_storage_template_info) module no longer returns C(ansible_facts)!
notes:
- "This module returns a variable C(ovirt_storage_templates), which
contains a list of templates. You need to register the result with
the I(register) keyword to use it."
options:
unregistered:
description:
- "Flag which indicates whether to get unregistered templates which contain one or more
disks which reside on a storage domain or diskless templates."
type: bool
default: false
max:
description:
- "Sets the maximum number of templates to return. If not specified all the templates are returned."
storage_domain:
description:
- "The storage domain name where the templates should be listed."
extends_documentation_fragment: ovirt_info
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather information about all Templates which relate to a storage domain and
# are unregistered:
- ovirt_storage_template_info:
unregistered=True
register: result
- debug:
msg: "{{ result.ovirt_storage_templates }}"
'''
RETURN = '''
ovirt_storage_templates:
description: "List of dictionaries describing the Templates. Template attributes are mapped to dictionary keys,
all Templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_info_full_argument_spec,
get_id_by_name
)
def main():
argument_spec = ovirt_info_full_argument_spec(
storage_domain=dict(default=None),
max=dict(default=None, type='int'),
unregistered=dict(default=False, type='bool'),
)
module = AnsibleModule(argument_spec)
is_old_facts = module._name == 'ovirt_storage_template_facts'
if is_old_facts:
module.deprecate("The 'ovirt_storage_template_facts' module has been renamed to 'ovirt_storage_template_info', "
"and the renamed one no longer returns ansible_facts", version='2.13')
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
storage_domains_service = connection.system_service().storage_domains_service()
sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
templates_service = storage_domain_service.templates_service()
# Find the unregistered Template we want to register:
if module.params.get('unregistered'):
templates = templates_service.list(unregistered=True)
else:
templates = templates_service.list(max=module.params['max'])
result = dict(
ovirt_storage_templates=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in templates
],
)
if is_old_facts:
module.exit_json(changed=False, ansible_facts=result)
else:
module.exit_json(changed=False, **result)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
|
joakim-hove/ert
|
ert_gui/plottery/plots/history.py
|
Python
|
gpl-3.0
| 771
| 0
|
def plotHistory(plot_context, axes):
"""
@type axes: matplotlib.axes.Axes
@type plot_config: PlotConfig
"""
plot_config = plot_context.plotConfig()
if (
not plot_config.isHistoryEnabled()
or plot_context.history_data is None
or plot_context.history_data.empty
):
return
data = plot_context.history_data
style = plot_config.historyStyle()
lines = axes.plot_date(
x=data.index.values,
y=data,
color=style.color,
alpha=style.alpha,
marker=style.marker,
linestyle=style.line_style,
linewidth=style.width,
markersize=style.size,
)
if len(lines) >
|
0 and style.isVisible():
plot_config.addLegendItem("History", line
|
s[0])
|
mathLab/RBniCS
|
rbnics/backends/online/numpy/linear_solver.py
|
Python
|
lgpl-3.0
| 1,491
| 0.004024
|
# Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from numpy.linalg import solve
from rbnics.backends.abstract import LinearProblemWrapper
from rbnics.backends.online.basic import LinearSolver as BasicLinearSolver
from rbnics.backends.online.numpy.function import Function
from rbnics.backends.online.numpy.matrix import Matrix
from
|
rbnics.backends.online.numpy.transpose import DelayedTransposeWithArithmetic
from
|
rbnics.backends.online.numpy.vector import Vector
from rbnics.utils.decorators import BackendFor, DictOfThetaType, ModuleWrapper, ThetaType
backend = ModuleWrapper(Function, Matrix, Vector)
wrapping = ModuleWrapper(DelayedTransposeWithArithmetic=DelayedTransposeWithArithmetic)
LinearSolver_Base = BasicLinearSolver(backend, wrapping)
@BackendFor("numpy", inputs=((Matrix.Type(), DelayedTransposeWithArithmetic, LinearProblemWrapper),
Function.Type(),
(Vector.Type(), DelayedTransposeWithArithmetic, None),
ThetaType + DictOfThetaType + (None,)))
class LinearSolver(LinearSolver_Base):
def set_parameters(self, parameters):
assert len(parameters) == 0, "NumPy linear solver does not accept parameters yet"
def solve(self):
solution = solve(self.lhs, self.rhs)
self.solution.vector()[:] = solution
if self.monitor is not None:
self.monitor(self.solution)
|
jongiddy/jute
|
compare/python_abc.py
|
Python
|
mit
| 1,481
| 0
|
from time import time
import abc
# Create an interface IFoo
# Create a sub-interface IFooBar
# Create a class FooBarBaz implementing IFooBar
# Repeatedly create an instance of FooBarBaz and pass it to a function
# that wants an IFoo
# Call foo
# Time all the above
# Create a single instance of FooBarBaz. Confirm whether a function
# wanting an IFoo succeeds or fails calling foo, bar, baz
# Check whether a Foo object (implementing only foo) can be passed to a
# function wanting an IFooBar, and if it can, whether it succeeds or
# fails calling foo, bar, baz
class Increments(metaclass=abc.ABCMeta):
@abc.abstractmethod
def increment():
"""Increment something"""
class IncrementsBar(Increments, metaclass=abc.ABCMeta):
"""Provide bar as an attribute""
|
"
@property
@abc.abstractmethod
def bar():
"""An attrib
|
ute"""
class IncrementingInteger(IncrementsBar):
"""Increment an integer when increment is called."""
bar = 2
def increment(self):
self.bar += 1
def f(incrementer):
incrementer.increment()
def test_time():
start = time()
for i in range(1000000):
inc = IncrementingInteger()
f(inc)
stop = time()
print(stop - start)
def test_time1():
start = time()
inc = IncrementingInteger()
for i in range(1000000):
f(inc)
stop = time()
print(stop - start)
def main():
test_time()
test_time1()
if __name__ == '__main__':
main()
|
jamescarignan/Flask-User
|
example_apps/invite_app.py
|
Python
|
bsd-2-clause
| 6,755
| 0.004737
|
import os
from flask import Flask, redirect, render_template_string, request, url_for
from flask_babel import Babel
from flask_mail import Mail
from flask_sqlalchemy import SQLAlchemy
from flask_user import confirm_email_required, current_user, login_required, \
UserManager, UserMixin, SQLAlchemyAdapter
from flask_user.signals import user_sent_invitation, user_registered
# Use a Class-based config to avoid needing a 2nd file
# os.getenv() enables configuration through OS environment variables
class ConfigClass(object):
# Flask settings
SECRET_KEY = os.getenv('SECRET_KEY', 'THIS IS AN INSECURE SECRET')
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', 'sqlite:///test_app.sqlite')
CSRF_ENABLED = True
# Flask-Mail settings
MAIL_USERNAME = os.getenv('MAIL_USERNAME', 'email@example.com')
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD', 'password')
MAIL_DEFAULT_SENDER = os.getenv('MAIL_DEFAULT_SENDER', '"MyApp" <noreply@example.com>')
MAIL_SERVER = os.getenv('MAIL_SERVER', 'smtp.gmail.com')
MAIL_PORT = int(os.getenv('MAIL_PORT', '465'))
MAIL_USE_SSL = os.getenv('MAIL_USE_SSL', True)
# Flask-User settings
USER_APP_NAME = "AppName" # Used by email templates
USER_ENABLE_INVITATION = True
USER_REQUIRE_INVITATION = True
def create_app(test_config=None): # For automated tests
# Setup Flask and read config from ConfigClass defined above
app = Flask(__name__)
app.config.from_object(__name__+'.ConfigClass')
# Load local_settings.py if file exists # For automated tests
try: app.config.from_object('local_settings')
except: pass
# Load optional test_config # For automated tests
if test_config:
app.config.update(test_config)
# Initialize Flask extensions
db = SQLAlchemy(app) # Initialize Flask-SQLAlchemy
mail = Mail(app) # Initialize Flask-Mail
babel = Babel(app) # Initialize Flask-Babel
@babel.localeselector
def get_locale():
translations = [str(translation) for translation in babel.list_translations()]
language = request.accept_languages.best_match(translations)
print('translations=',repr(translations), 'language=', repr(language))
return language
# Define the User data model. Make sure to add flask.ext.user UserMixin !!!
class User(db.Model, UserMixin):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
# User authentication information
username = db.Column(db.String(50), nullable=True, unique=True)
password = db.Column(db.String(255), nullable=False, server_default='')
reset_password_token = db.Column(db.String(100), nullable=False, server_default='')
# User email information
email = db.Column(db.String(255), nullable=False, unique=True)
confirmed_at = db.Column(db.DateTime(), nullable=True)
# User information
is_enabled = db.Column(db.Boolean(), nullable=False, server_default='0')
first_name = db.Column(db.String(100), nullable=False, server_default='')
last_name = db.Column(db.String(100), nullable=False, server_default='')
class UserInvitation(db.Model):
__tablename__ = 'user_invite'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), nullable=False)
# save the user of the invitee
invited_by_user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
# token used for registration page to identify user registering
token = db.Column(db.String(100), nullable=False, server_default='')
# Create all database tables
db.create_all()
# Setup Flask-User
db_adapter = SQLAlchemyAdapter(db, User, UserInvitationClass=UserInvitation) # Select database adapter
user_manager = UserManager(db_adapter, app) # Init Flask-User and bind to app
@user_registered.connect_via(app)
def after_registered_hook(sender, user, user_invite):
sender.logger.info("USER REGISTERED")
@user_sent_invitation.connect_via(app)
def after_invitation_hook(sender, **extra):
sender.logger.info("USER SENT INVITATION")
# The Home page is accessible to anyone
@app.route('/')
def home_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>{%trans%}Home Pages{%endtrans%}</h2>
{% if current_user.is_authenticated %}
<p> <a href="{{ url_for('user_profile_page') }}">
{%trans%}Profile Page{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.logout') }}">
{%trans%}Sign out{%endtrans%}</a></p>
{% else %}
<p> <a href="{{ url_for('user.login') }}">
{%trans%}Sign in or Register{%endtrans%}</a></p>
{% endif %}
{% endblock %}
""")
if current_user.is_authenticated:
return redirect(url_for('user_profile_page'))
else:
return redirect(url_for('user.login'))
# The Profile page requires a logged-in user
@app.route('/user/profiles')
@login_required # Use of @login_required decorator
@confirm_email_required
def user_profile_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>{%trans%}Profile Page{%endtrans%}</h2>
<p> {%trans%}Hello{%endtrans%}
{{ current_user.username or current_user.email }},</p>
<p>
|
<a href="{{ url_for('home_page') }}">
{%trans%}Home Page{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.change_username') }}">
{%trans%}Change username{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.change_password
|
') }}">
{%trans%}Change password{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.invite') }}">
{%trans%}Invite User{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.logout') }}">
{%trans%}Sign out{%endtrans%}</a></p>
{% endblock %}
""")
return app
# Start development web server
if __name__=='__main__':
app = create_app()
app.run(host='0.0.0.0', port=5000, debug=True)
|
UManPychron/pychron
|
pychron/extraction_line/switch_manager.py
|
Python
|
apache-2.0
| 40,032
| 0.001274
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =============enthought library imports=======================
import binascii
import os
import pickle
import time
from operator import itemgetter
from pickle import PickleError
from string import digits
import yaml
from traits.api import Any, Dict, List, Bool, Event, Str
from pychron.core.helpers.iterfuncs import groupby_key
from pychro
|
n.core.helpers.strtools import to_bool
from pychron.core.yaml import yload
from pychron.extraction_line import VERBOSE_DEBUG, VERBOSE
from pychron.extraction_line.pipettes.tracking import PipetteTracker
from pychron.globals import globalv
from pychron.hardware.core.checksum_helper import computeCRC
from pychron.hardware.core.i_core_device import ICoreDevice
fro
|
m pychron.hardware.switch import Switch, ManualSwitch
from pychron.hardware.valve import HardwareValve
from pychron.managers.manager import Manager
from pychron.paths import paths
from pychron.pychron_constants import NULL_STR
from .switch_parser import SwitchParser
def parse_interlocks(vobj, tag):
vs = []
if tag in vobj:
vs = vobj[tag]
elif '{}s'.format(tag) in vobj:
vs = vobj.get('{}s'.format(tag))
if isinstance(vs, (tuple, list)):
interlocks = [i.strip() for i in vs]
else:
interlocks = [vs.strip()]
return interlocks
def add_checksum(func):
def wrapper(*args, **kw):
d = func(*args, **kw)
return '{}{}'.format(d, computeCRC(d))
return wrapper
class ValveGroup(object):
owner = None
valves = None
class SwitchManager(Manager):
"""
Manager to interface with the UHV and HV pneumatic valves
"""
switches = Dict
explanable_items = List
extraction_line_manager = Any
pipette_trackers = List(PipetteTracker)
valves_path = Str
actuators = List
query_valve_state = Bool(True)
use_explanation = True
refresh_explanation = Event
refresh_state = Event
refresh_lock_state = Event
refresh_canvas_needed = Event
refresh_owned_state = Event
console_message = Event
mode = None
setup_name = Str('valves')
_prev_keys = None
def set_logger_level_hook(self, level):
for v in self.switches.values():
v.logger.setLevel(level)
for act in self.actuators:
act.logger.setLevel(level)
def actuate_children(self, name, action, mode):
"""
actuate all switches that have ``name`` defined as their parent
"""
for v in self.switches.values():
if v.parent == name:
self.debug('actuating child, {}, {}'.format(v.display_name, action))
if v.parent_inverted:
func = self.open_by_name if action == 'close' else self.close_by_name
else:
func = self.open_by_name if action == 'open' else self.close_by_name
func(v.display_name, mode)
def show_valve_properties(self, name):
v = self.get_switch_by_name(name)
if v is not None:
v.edit_traits()
def kill(self):
super(SwitchManager, self).kill()
self._save_states()
def create_device(self, name, *args, **kw):
"""
"""
dev = super(SwitchManager, self).create_device(name, *args, **kw)
dev.configuration_dir_name = self.configuration_dir_name
if 'actuator' in name or 'controller' in name:
if dev is not None:
self.actuators.append(dev)
return dev
def finish_loading(self, update=False):
"""
"""
if self.actuators:
for a in self.actuators:
self.info('setting actuator {}'.format(a.name))
self.info('comm. device = {} '.format(a.com_device_name))
# open config file
# setup_file = os.path.join(paths.extraction_line_dir, add_extension(self.setup_name, '.xml'))
self._load_valves_from_file(self.valves_path)
if globalv.load_valve_states:
self._load_states()
if globalv.load_soft_locks:
self._load_soft_lock_states()
if globalv.load_manual_states:
self._load_manual_states()
def set_child_state(self, name, state):
self.debug('set states for children of {}. state={}'.format(name, state))
# elm = self.extraction_line_manager
for k, v in self.switches.items():
if v.parent == name:
v.set_state(state)
self.refresh_state = (k, state)
# elm.update_valve_state(k, state)
def calculate_checksum(self, vkeys):
vs = self.switches
val = b''.join((vs[k].state_str().encode('utf-8') for k in vkeys if k in vs))
return binascii.crc32(val)
def get_valve_names(self):
return list(self.switches.keys())
def refresh_network(self):
self.debug('refresh network')
for k, v in self.switches.items():
self.refresh_state = (k, v.state, False)
self.refresh_canvas_needed = True
def get_indicator_state(self, name):
v = self.get_switch_by_name(name)
if v is not None:
return v.get_hardware_indicator_state()
@add_checksum
def get_owners(self):
"""
eg.
1. 129.128.12.141-A,B,C:D,E,F
A,B,C owned by 141
D,E,F free
2. A,B,C,D,E,F
All free
3. 129.128.12.141-A,B,C:129.138.12.150-D,E:F
A,B,C owned by 141,
D,E owned by 150
F free
"""
vs = [(v.name.split('-')[1], v.owner) for v in self.switches.values()]
owners = []
for owner, valves in groupby_key(vs, itemgetter(1)):
valves, _ = list(zip(*valves))
v = ','.join(valves)
if owner:
t = '{}-{}'.format(owner, v)
else:
t = v
owners.append(t)
return ':'.join(owners)
def get_locked(self):
return [v.name for v in self.switches.values() if v.software_lock and not v.ignore_lock_warning]
@add_checksum
def get_software_locks(self, version=0):
return self._make_word('software_lock', version=version)
# return ','.join(['{}{}'.format(k, int(v.software_lock)) for k, v in self.switches.items()])
def _make_word(self, attr, timeout=0.25, version=0):
word = 0x00
keys = []
if timeout:
prev_keys = []
st = time.time()
clear_prev_keys = False
if self._prev_keys:
clear_prev_keys = True
prev_keys = self._prev_keys
for k, v in self.switches.items():
'''
querying a lot of valves can add up hence timeout.
most valves are not queried by default which also helps shorten
execution time for get_states.
'''
if timeout and k in prev_keys:
continue
s = bool(getattr(v, attr))
if version:
keys.append(k)
word = word << 0x01 | s
else:
keys.append('{}{}'.format(k, int(s)))
if timeout and time.time() - st > timeout:
self.debug('get states timeout. timeout={}'.format(timeout))
break
else:
|
pikulak/pywdbms
|
api/app.py
|
Python
|
apache-2.0
| 16,191
| 0.003768
|
import sys
import os
from flask import render_template, make_response, request, Blueprint, redirect, url_for, flash
from sqlalchemy import select
from collections import defaultdict
from sqlalchemy.exc import OperationalError
from pywdbms.db.file import load_databases_from_file as load
from pywdbms.db.file import update_databases_to_file as update
from pywdbms.db.containers import DatabaseContainer, BindContainer
from pywdbms.utils.decorators import require_database_connection, require_host_or_404
from pywdbms.utils.checks import check_connection
from pywdbms.api.forms import DatabaseAddForm, DatabaseEditForm, SqlForm
from pywdbms.api.settings import DEFAULT_OFFSET, SUPPORTED_DRIVERS, COMMANDS
from pywdbms.db.statements import StatementsChooser
blueprint = Blueprint('blueprint', __name__, template_folder="../templates")
load()
@blueprint.route('/')
def dashboard():
resp = make_response(render_template('dashboard/main.html'), 200)
return resp
##############################
########SERVER ROUTE##########
##############################
@blueprint.route('/servers/<string:host>/')
@blueprint.route('/servers/<string:host>/info/')
@require_host_or_404
def server_view_info(host):
return make_response(render_template(
'server/info.html',
host=host), 200)
@blueprint.route('/servers/<string:host>/databases/')
@require_host_or_404
def server_view_databases(host):
sorted_by_drivers = {}
versions = {}
for _driver in SUPPORTED_DRIVERS:
sorted_by_drivers[_driver] = (DatabaseContainer.get_databases(host=host,
drivername=_driver))
return make_response(render_template(
'server/databases.html',
sorted_by_drivers=sorted_by_drivers,
host=host), 200)
@blueprint.route('/servers/<string:host>/users/')
@require_host_or_404
def server_view_users(host):
sorted_by_drivers = {}
users = {}
headers = {}
_BINDS = BindContainer.get_all()
for _driver in SUPPORTED_DRIVERS:
sorted_by_drivers[_driver] = (DatabaseContainer.get_databases(host=hos
|
t,
|
drivername=_driver))
for drivername, databases in sorted_by_drivers.items():
for database in databases:
for shortname, db_properties in database.items():
if shortname in _BINDS:
connection = _BINDS[shortname][0] #connection
stmt = StatementsChooser.for_[drivername].get_server_users()
result = connection.execute(stmt)
headers[drivername] = result.keys()
users[drivername] = result.fetchall()
break
else:
continue
break
return make_response(render_template(
'server/users.html',
host=host,
headers=headers,
users=users), 200)
@blueprint.route('/servers/<string:host>/export/')
@require_host_or_404
def server_view_export(host):
return make_response(render_template(
'server/export.html',
host=host), 200)
@blueprint.route('/servers/<string:host>/import/')
@require_host_or_404
def server_view_import(host):
return make_response(render_template(
'server/import.html',
host=host), 200)
@blueprint.route('/servers/<string:host>/operations/')
@require_host_or_404
def server_view_operations(host):
c = request.args.get("c")
if c is not None:
if COMMANDS[c](host):
return redirect('/')
return make_response(render_template(
'server/operations.html',
host=host), 200)
##############################
########DATABASE ROUTE########
##############################
@blueprint.route('/servers/<string:host>/databases/<string:shortname>/')
@blueprint.route('/servers/<string:host>/databases/<string:shortname>/structure/')
@require_host_or_404
@require_database_connection
def database_view_structure(host, shortname):
connection, meta, _ = BindContainer.get(shortname)
return make_response(render_template(
'database/structure.html',
host=host), 200)
@blueprint.route('/servers/<string:host>/databases/<string:shortname>/sql/', methods=["POST", "GET"])
@require_database_connection
@require_host_or_404
def database_view_sql(host, shortname):
connection, meta, _ = BindContainer.get(shortname)
form = SqlForm(request.form)
result = False
error = False
if request.method == 'POST':
if form.validate():
stmt = form.data["stmt"]
try:
result_ = connection.execute(stmt)
result = {}
result["labels"] = result_.keys()
result["query_result"] = result_.fetchall()
except Exception as e:
error = e
else:
error = "Can't validate form."
return make_response(render_template(
'database/sql.html',
host=host,
result=result,
form=form,
error=error), 200)
@blueprint.route('/servers/<string:host>/databases/<string:shortname>/search/')
@require_host_or_404
@require_database_connection
def database_view_search(host, shortname):
connection, meta, _ = BindContainer.get(shortname)
return make_response(render_template(
'database/search.html',
host=host), 200)
@blueprint.route('/servers/<string:host>/databases/<string:shortname>/import/')
@require_host_or_404
@require_database_connection
def database_view_import(host, shortname):
connection, meta, _ = BindContainer.get(shortname)
return make_response(render_template(
'database/import.html',
host=host), 200)
@blueprint.route('/servers/<string:host>/databases/<string:shortname>/export/')
@require_host_or_404
@require_database_connection
def database_view_export(host, shortname):
connection, meta, _ = BindContainer.get(shortname)
return make_response(render_template(
'database/export.html',
host=host), 200)
@blueprint.route('/servers/<string:host>/databases/<string:shortname>/operations/', methods=["POST", "GET"])
@require_host_or_404
@require_database_connection
def database_view_operations(host, shortname):
error = False
c = request.args.get("c")
act_db_properties = DatabaseContainer.get(shortname)
form = DatabaseEditForm(request.form)
if c is not None:
if COMMANDS[c](host=host, shortname=shortname):
return redirect(url_for("blueprint.server_view_databases", host=host))
else:
return redirect('/')
if request.method == 'POST':
if form.validate():
if check_connection(form.data):
DatabaseContainer.add(form.data)
DatabaseContainer.delete([shortname])
BindContainer.delete([shortname])
BindContainer.add(form.shortname.data)
update()
return redirect(url_for(
"blueprint.database_view_operations",
host=host,
shortname=form.shortname.data))
else:
error = "Unable connect to database."
else:
if len(form.shortname.errors) > 0:
error = "Shortname already exists. Please specify another one."
if len(form.database.errors) > 0:
error = "Specifed database already exists."
else:
error = "Please provide correct data."
return make_response(render_template(
'database/operations.html',
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/numpy/oldnumeric/arrayfns.py
|
Python
|
agpl-3.0
| 2,532
| 0.011848
|
"""Backward compatible with arrayfns from Numeric
"""
__all__ = ['array_set', 'construct3', 'digitize', 'error', 'find_mask',
|
'histogram', 'index_sort', 'interp', 'nz', 'reverse', 'span',
'to_corners', 'zmin_zmax']
import numpy as np
from numpy import asarray
class error(Exception):
pass
def array_set(vals1, indices, vals2):
indices = asarray(indices)
if indices.ndim != 1:
raise ValueError, "index array must be 1-d"
if not isinstance(vals1, np.ndarray):
raise TypeError,
|
"vals1 must be an ndarray"
vals1 = asarray(vals1)
vals2 = asarray(vals2)
if vals1.ndim != vals2.ndim or vals1.ndim < 1:
raise error, "vals1 and vals2 must have same number of dimensions (>=1)"
vals1[indices] = vals2
from numpy import digitize
from numpy import bincount as histogram
def index_sort(arr):
return asarray(arr).argsort(kind='heap')
def interp(y, x, z, typ=None):
"""y(z) interpolated by treating y(x) as piecewise function
"""
res = np.interp(z, x, y)
if typ is None or typ == 'd':
return res
if typ == 'f':
return res.astype('f')
raise error, "incompatible typecode"
def nz(x):
x = asarray(x,dtype=np.ubyte)
if x.ndim != 1:
raise TypeError, "intput must have 1 dimension."
indxs = np.flatnonzero(x != 0)
return indxs[-1].item()+1
def reverse(x, n):
x = asarray(x,dtype='d')
if x.ndim != 2:
raise ValueError, "input must be 2-d"
y = np.empty_like(x)
if n == 0:
y[...] = x[::-1,:]
elif n == 1:
y[...] = x[:,::-1]
return y
def span(lo, hi, num, d2=0):
x = np.linspace(lo, hi, num)
if d2 <= 0:
return x
else:
ret = np.empty((d2,num),x.dtype)
ret[...] = x
return ret
def zmin_zmax(z, ireg):
z = asarray(z, dtype=float)
ireg = asarray(ireg, dtype=int)
if z.shape != ireg.shape or z.ndim != 2:
raise ValueError, "z and ireg must be the same shape and 2-d"
ix, iy = np.nonzero(ireg)
# Now, add more indices
x1m = ix - 1
y1m = iy-1
i1 = x1m>=0
i2 = y1m>=0
i3 = i1 & i2
nix = np.r_[ix, x1m[i1], x1m[i1], ix[i2] ]
niy = np.r_[iy, iy[i1], y1m[i3], y1m[i2]]
# remove any negative indices
zres = z[nix,niy]
return zres.min().item(), zres.max().item()
def find_mask(fs, node_edges):
raise NotImplementedError
def to_corners(arr, nv, nvsum):
raise NotImplementedError
def construct3(mask, itype):
raise NotImplementedError
|
mfraezz/osf.io
|
osf/utils/migrations.py
|
Python
|
apache-2.0
| 23,271
| 0.003051
|
from past.builtins import basestring
import os
import itertools
import builtins
import json
import logging
import warnings
from math import ceil
from contextlib import contextmanager
from django.apps import apps
from django.db import connection
from django.db.migrations.operations.base import Operation
from osf.models.base import generate_object_id
from osf.utils.sanitize import strip_html, unescape_entities
from website import settings
from website.project.metadata.schemas import get_osf_meta_schemas
logger = logging.getLogger(__file__)
increment = 100000
# Dict to map original schema formats to schema block types
FORMAT_TYPE_TO_TYPE_MAP = {
('multiselect', 'choose'): 'multi-select-input',
(None, 'multiselect'): 'multi-select-input',
(None, 'choose'): 'single-select-input',
('osf-upload-open', 'osf-upload'): 'file-input',
('osf-upload-toggle', 'osf-upload'): 'file-input',
('singleselect', 'choose'): 'single-s
|
elect-input',
|
('text', 'string'): 'short-text-input',
('textarea', 'osf-author-import'): 'contributors-input',
('textarea', None): 'long-text-input',
('textarea', 'string'): 'long-text-input',
('textarea-lg', None): 'long-text-input',
('textarea-lg', 'string'): 'long-text-input',
('textarea-xl', 'string'): 'long-text-input',
}
def get_osf_models():
"""
Helper function to retrieve all osf related models.
Example usage:
with disable_auto_now_fields(models=get_osf_models()):
...
"""
return list(itertools.chain(*[app.get_models() for app in apps.get_app_configs() if app.label.startswith('addons_') or app.label.startswith('osf')]))
@contextmanager
def disable_auto_now_fields(models=None):
"""
Context manager to disable auto_now field updates.
If models=None, updates for all auto_now fields on *all* models will be disabled.
:param list models: Optional list of models for which auto_now field updates should be disabled.
"""
if not models:
models = apps.get_models()
changed = []
for model in models:
for field in model._meta.get_fields():
if hasattr(field, 'auto_now') and field.auto_now:
field.auto_now = False
changed.append(field)
try:
yield
finally:
for field in changed:
if hasattr(field, 'auto_now') and not field.auto_now:
field.auto_now = True
@contextmanager
def disable_auto_now_add_fields(models=None):
"""
Context manager to disable auto_now_add field updates.
If models=None, updates for all auto_now_add fields on *all* models will be disabled.
:param list models: Optional list of models for which auto_now_add field updates should be disabled.
"""
if not models:
models = apps.get_models()
changed = []
for model in models:
for field in model._meta.get_fields():
if hasattr(field, 'auto_now_add') and field.auto_now_add:
field.auto_now_add = False
changed.append(field)
try:
yield
finally:
for field in changed:
if hasattr(field, 'auto_now_add') and not field.auto_now_add:
field.auto_now_add = True
def ensure_licenses(*args, **kwargs):
"""Upsert the licenses in our database based on a JSON file.
:return tuple: (number inserted, number updated)
Moved from website/project/licenses/__init__.py
"""
ninserted = 0
nupdated = 0
try:
NodeLicense = args[0].get_model('osf', 'nodelicense')
except Exception:
# Working outside a migration
from osf.models import NodeLicense
with builtins.open(
os.path.join(
settings.APP_PATH,
'node_modules', '@centerforopenscience', 'list-of-licenses', 'dist', 'list-of-licenses.json'
)
) as fp:
licenses = json.loads(fp.read())
for id, info in licenses.items():
name = info['name']
text = info['text']
properties = info.get('properties', [])
url = info.get('url', '')
node_license, created = NodeLicense.objects.get_or_create(license_id=id)
node_license.name = name
node_license.text = text
node_license.properties = properties
node_license.url = url
node_license.save()
if created:
ninserted += 1
else:
nupdated += 1
logger.info('License {name} ({id}) added to the database.'.format(name=name, id=id))
logger.info('{} licenses inserted into the database, {} licenses updated in the database.'.format(
ninserted, nupdated
))
return ninserted, nupdated
def remove_licenses(*args):
from osf.models import NodeLicense
pre_count = NodeLicense.objects.all().count()
NodeLicense.objects.all().delete()
logger.info('{} licenses removed from the database.'.format(pre_count))
def ensure_schemas(*args):
"""Import meta-data schemas from JSON to database if not already loaded
"""
state = args[0] if args else apps
schema_count = 0
try:
schema_model = state.get_model('osf', 'registrationschema')
except LookupError:
# Use MetaSchema model if migrating from a version before RegistrationSchema existed
schema_model = state.get_model('osf', 'metaschema')
for schema in get_osf_meta_schemas():
schema_obj, created = schema_model.objects.update_or_create(
name=schema['name'],
schema_version=schema.get('version', 1),
defaults={
'schema': schema,
}
)
schema_count += 1
if created:
logger.info('Added schema {} to the database'.format(schema['name']))
logger.info('Ensured {} schemas are in the database'.format(schema_count))
def remove_schemas(*args):
from osf.models import RegistrationSchema
pre_count = RegistrationSchema.objects.all().count()
RegistrationSchema.objects.all().delete()
logger.info('Removed {} schemas from the database'.format(pre_count))
def create_schema_block(state, schema_id, block_type, display_text='', required=False, help_text='',
registration_response_key=None, schema_block_group_key='', example_text=''):
"""
For mapping schemas to schema blocks: creates a given block from the specified parameters
"""
state = state or apps
schema_block_model = state.get_model('osf', 'registrationschemablock')
return schema_block_model.objects.create(
schema_id=schema_id,
block_type=block_type,
required=required,
display_text=unescape_entities(
display_text,
safe={
'<': '<',
'>': '>'
}
),
help_text=unescape_entities(
help_text,
safe={
'<': '<',
'>': '>'
}
),
registration_response_key=registration_response_key,
schema_block_group_key=schema_block_group_key,
example_text=unescape_entities(
example_text,
safe={
'<': '<',
'>': '>'
}
)
)
# Split question multiple choice options into their own blocks
def split_options_into_blocks(state, rs, question, schema_block_group_key):
"""
For mapping schemas to schema blocks: splits individual multiple choice
options into their own schema blocks
"""
for option in question.get('options', []):
answer_text = option if isinstance(option, basestring) else option.get('text')
help_text = '' if isinstance(option, basestring) else option.get('tooltip', '')
create_schema_block(
state,
rs.id,
'select-input-option',
display_text=answer_text,
help_text=help_text,
schema_block_group_key=schema_block_group_key,
)
def get_registration_response_key(question):
"""
For mapping schemas to schema blocks:
Answer ids w
|
Somsubhra/Simplify
|
src/enrich/__init__.py
|
Python
|
mit
| 63
| 0.015873
|
__author__ = 's7a'
# All impo
|
rts
from enricher import
|
Enricher
|
jorik041/plaso
|
tests/parsers/winreg_plugins/outlook.py
|
Python
|
apache-2.0
| 3,175
| 0.00126
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Outlook Windows Registry plugins."""
import unittest
# pylint: disable=unused-import
from plaso.formatters import winreg as winreg_formatter
from plaso.parsers.winreg_plugins import outlook
from tests.parsers.winreg_plugins import test_lib
from tests.winregistry import test_lib as winreg_test_lib
class MSOutlook2013SearchMRUPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the Outlook Search MRU Windows Registry plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = outlook.OutlookSearchMRUPlugin()
def testProcess(self):
"""Tests the Process function."""
key_path = u'\\Software\\Microsoft\\Office\\15.0\\Outlook\\S
|
earch'
values = []
values.append(winreg_test_lib.TestRegValue(
(u'C:\\Users\\username\\AppData\\Loc
|
al\\Microsoft\\Outlook\\'
u'username@example.com.ost'), b'\xcf\x2b\x37\x00',
winreg_test_lib.TestRegValue.REG_DWORD, offset=1892))
winreg_key = winreg_test_lib.TestRegKey(
key_path, 1346145829002031, values, 1456)
event_queue_consumer = self._ParseKeyWithPlugin(self._plugin, winreg_key)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
expected_msg = (
u'[{0:s}] '
u'C:\\Users\\username\\AppData\\Local\\Microsoft\\Outlook\\'
u'username@example.com.ost: 0x00372bcf').format(key_path)
expected_msg_short = u'[{0:s}] C:\\Users\\username\\AppData\\Lo...'.format(
key_path)
self.assertEqual(len(event_objects), 1)
event_object = event_objects[0]
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_object.parser, self._plugin.plugin_name)
self.assertEqual(event_object.timestamp, 1346145829002031)
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
# TODO: The catalog for Office 2013 (15.0) contains binary values not
# dword values. Check if Office 2007 and 2010 have the same. Re-enable the
# plug-ins once confirmed and OutlookSearchMRUPlugin has been extended to
# handle the binary data or create a OutlookSearchCatalogMRUPlugin.
# class MSOutlook2013SearchCatalogMRUPluginTest(unittest.TestCase):
# """Tests for the Outlook Search Catalog MRU Windows Registry plugin."""
#
# def setUp(self):
# """Sets up the needed objects used throughout the test."""
# self._plugin = outlook.MSOutlook2013SearchCatalogMRUPlugin()
#
# def testProcess(self):
# """Tests the Process function."""
# key_path = (
# u'\\Software\\Microsoft\\Office\\15.0\\Outlook\\Search\\Catalog')
# values = []
#
# values.append(winreg_test_lib.TestRegValue(
# (u'C:\\Users\\username\\AppData\\Local\\Microsoft\\Outlook\\'
# u'username@example.com.ost'), b'\x94\x01\x00\x00\x00\x00',
# winreg_test_lib.TestRegValue.REG_BINARY, offset=827))
#
# winreg_key = winreg_test_lib.TestRegKey(
# key_path, 1346145829002031, values, 3421)
#
# # TODO: add test for Catalog key.
if __name__ == '__main__':
unittest.main()
|
kumoru/torment
|
test_torment/test_unit/test_fixtures/__init__.py
|
Python
|
apache-2.0
| 15,613
| 0.005519
|
# Copyright 2015 Alex Brandt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
import logging
import typing # noqa (use mypy typing)
import unittest
import uuid
from torment import fixtures
from torment import contexts
logger = logging.getLogger(__name__)
class FixturesCreateUnitTest(unittest.TestCase):
def test_fixture_create_without_context(self) -> None:
'''torment.fixtures.Fixture() → TypeError'''
self.assertRaises(TypeError, fixtures.Fixture)
def test_fixture_create_with_context(self) -> None:
'''torment.fixtures.Fixture(context).context == context'''
c = unittest.TestCase()
f = fixtures.Fixture(c)
self.assertEqual(f.context, c)
class FixturesPropertyUnitTest(unittest.TestCase):
def setUp(self) -> None:
self.c = unittest.TestCase()
self.f = fixtures.Fixture(self.c)
def test_fixture_category(self) -> None:
'''torment.fixtures.Fixture(context).category == 'fixtures' '''
self.f.__module__ = unittest.mock.MagicMock(__name__ = 'test_torment.test_unit.test_fixtures.fixture_a44bc6dda6654b1395a8c2cbd55d964d')
self.assertEqual(self.f.category, 'fixtures')
def test_fixture_description(self) -> None:
'''torment.fixtures.Fixture(context).description == '94d7c58f6ee44683936c21cb84d1e458—torment.fixtures' '''
self.f.context.module = 'fixtures'
self.f.uuid = uuid.UUID('94d7c58f6ee44683936c21cb84d1e458')
self.assertEqual(self.f.description, '94d7c58f6ee44683936c21cb84d1e458—fixtures')
def test_fixture_name(self) -> None:
'''torment.fixtures.Fixture(context).name == 'test_94d7c58f6ee44683936c21cb84d1e458' '''
self.f.__class__.__name__ = '94d7c58f6ee44683936c21cb84d1e458'
self.assertEqual(self.f.name, 'test_94d7c58f6ee44683936c21cb84d1e458')
class ErrorFixturesPropertyUnitTest(unittest.TestCase):
def test_error_fixture_description(self) -> None:
'''torment.fixtures.ErrorFixture(context).description == 'expected → failure' '''
class fixture(fixtures.Fixture):
@property
def description(self) -> str:
return 'expected'
class error_fixture(fixtures.ErrorFixture, fixture):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.error = RuntimeError('failure')
c = unittest.TestCase()
e = error_fixture(c)
self.assertEqual(e.description, 'expected → failure')
|
class ErrorFixturesRunTest(unittest.TestCase):
def test_error_fixture_run(self) -> None:
|
'''torment.fixtures.ErrorFixture(context).run()'''
class fixture(fixtures.Fixture):
def run(self):
raise RuntimeError('failure')
class error_fixture(fixtures.ErrorFixture, fixture):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.error = RuntimeError('failure')
c = unittest.TestCase()
e = error_fixture(c)
e.run()
self.assertIsInstance(e.exception, RuntimeError)
self.assertEqual(e.exception.args, ( 'failure', ))
class OfUnitTest(unittest.TestCase):
def test_of_zero(self) -> None:
'''torment.fixtures.of(()) == []'''
self.assertEqual(len(fixtures.of(())), 0)
def test_of_many_without_subclasses(self) -> None:
'''torment.fixtures.of(( FixtureA, )) == []'''
class FixtureA(object):
def __init__(self, context) -> None:
pass
self.assertEqual(len(fixtures.of(( FixtureA, ))), 0)
def test_of_many_with_subclasses(self) -> None:
'''torment.fixtures.of(( FixtureA, )) == [ fixture_a, ]'''
class FixtureA(object):
def __init__(self, context) -> None:
pass
class FixtureB(FixtureA):
pass
result = fixtures.of(( FixtureA, ))
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], FixtureB)
class RegisterUnitTest(unittest.TestCase):
def setUp(self) -> None:
_ = unittest.mock.patch('torment.fixtures.inspect')
mocked_inspect = _.start()
self.addCleanup(_.stop)
mocked_inspect.configure_mock(**{ 'isclass': inspect.isclass, 'isfunction': inspect.isfunction, })
mocked_inspect.stack.return_value = ( None, ( None, 'test_unit/test_d43830e2e9624dd19c438b15250c5818.py', ), )
class ContextStub(object):
pass
self.context = ContextStub()
self.context.module = mocked_inspect.getmodule.return_value = 'stack'
self.ns = {} # type: Dict[str, Any]
self.class_name = 'f_d43830e2e9624dd19c438b15250c5818'
def test_zero_properties(self) -> None:
'''torment.fixtures.register({}, (), {})'''
fixtures.register(self.ns, ( fixtures.Fixture, ), {})
_ = self.ns[self.class_name](self.context)
self.assertEqual(_.uuid, uuid.UUID('d43830e2e9624dd19c438b15250c5818'))
def test_one_literal_properties(self) -> None:
'''torment.fixtures.register({}, (), { 'a': 'a', })'''
fixtures.register(self.ns, ( fixtures.Fixture, ), { 'a': 'a', })
_ = self.ns[self.class_name](self.context)
self.assertEqual(_.a, 'a')
def test_one_class_properties(self) -> None:
'''torment.fixtures.register({}, (), { 'a': class, })'''
class A(object):
pass
fixtures.register(self.ns, ( fixtures.Fixture, ), { 'a': A, })
_ = self.ns[self.class_name](self.context)
self.assertIsInstance(_.a, A)
def test_one_fixture_class_properties(self) -> None:
'''torment.fixtures.register({}, (), { 'a': fixture_class, })'''
class A(fixtures.Fixture):
pass
fixtures.register(self.ns, ( fixtures.Fixture, ), { 'a': A, })
_ = self.ns[self.class_name](self.context)
self.assertIsInstance(_.a, A)
self.assertEqual(_.a.context, self.context)
def test_one_function_properties(self) -> None:
'''torment.fixtures.register({}, (), { 'a': self → None, })'''
def a(self) -> None:
pass
fixtures.register(self.ns, ( fixtures.Fixture, ), { 'a': a, })
_ = self.ns[self.class_name](self.context)
self.assertIsNone(_.a)
def test_description_property(self) -> None:
'''torment.fixtures.register({}, (), { 'description': 'needle', })'''
fixtures.register(self.ns, ( fixtures.Fixture, ), { 'description': 'needle', })
_ = self.ns[self.class_name](self.context)
self.assertEqual(_.description, 'd43830e2e9624dd19c438b15250c5818—stack—needle')
def test_error_property(self) -> None:
'''torment.fixtures.register({}, (), { 'error': …, })'''
fixtures.register(self.ns, ( fixtures.Fixture, ), { 'error': { 'class': RuntimeError, }, })
_ = self.ns[self.class_name](self.context)
self.assertIsInstance(_.error, RuntimeError)
def test_mocks_mock_property(self) -> None:
'''torment.fixtures.register({}, (), { 'mocks': { 'symbol': …, }, }).setup()'''
_ = unittest.mock.patch('torment.fixtures._find_mocker')
mocked_fixtures_find_mocker = _.start()
self.addCleanup(_.stop)
mocked_fixtures_find_mocker.return_value = lambda: True
_ = unittest.mock.patch('torment.fixtures._prepare_mock')
mocked_fixtures_prepare_mock = _.start()
self.addCleanup(_.stop)
fixtures.register(self.ns, ( fixtures.Fixture, ), { 'mocks': { 'symbol':
|
jsteilen/FlopPyImg
|
funktionen.py
|
Python
|
gpl-3.0
| 6,553
| 0.03145
|
import platform
import os
import shutil
import time
import hashlib
import xml.dom.minidom
import xml.etree.ElementTree
import re
def nameCount(name, targetPath):
pathXmlFile = targetPath + os.sep + 'info.xml'
if os.path.exists(pathXmlFile):
f = open(pathXmlFile, 'r')
xmlFileData = f.read()
f.close()
|
dom4 = xml.dom.minidom.parseString(xmlFileData)
volumeIdPosition = dom4.getElementsByTagName('count')
volumeId = str(int(volumeIdPosition[0].firstChild.nodeValue) + 1)
file
|
Name = 'disk' + volumeId + name
return fileName
## Liste mit Dateien und Ordnern
def contentList(path, listFileName, targetPath):
content = os.listdir(path) # in die Klammer den gewünschten Pfad einfügen
content.sort() # sortiert Liste alphabetisch!
openLstFile = targetPath + os.sep + listFileName
f = open(openLstFile, 'a')
ueberschrift = path + ':'
f.write(ueberschrift + '\n') # '\n' Zeilenumbruch
path_size = os.stat(path)
path_size_int = path_size.st_size
path_size_str = str(path_size_int) # Wichtig um String zu casten
f.write(path_size_str + '\n')
for i in range(0,len(content)):
file = path + os.sep + content[i]
einstellungen = os.stat(file)
speicher_int = einstellungen.st_size
speicher_str = str(speicher_int) # nullen / leerzeichen auffüllen!
while len(speicher_str) <= 10:
speicher_str = '0' + speicher_str
rechte_int = einstellungen.st_mode
rechte_oc = oct(rechte_int)
rechte_oc = rechte_oc[-3:]
rechte_str = str(rechte_oc)
daten = rechte_str + " " + speicher_str + " " + time.ctime(os.path.getmtime(file)) + " " + content[i]
print(daten)
f.write(daten + '\n')
f.close()
return content
## xml-parser
def volumeMetadata(basicPath, name, media, label, description, remarks):
path = basicPath + os.sep + 'info.xml'
print(path)
f = open(path, 'r')
data = f.read()
f.close()
dom1 = xml.dom.minidom.parseString(data)
vol = dom1.getElementsByTagName('volume')
# Attribute finden und in liste speichern
attributeList = []
for element in vol:
for elem in element.attributes.values():
attributeList.append(elem.firstChild.data)
newAttribute = len(attributeList) + 1
print(newAttribute)
count = dom1.getElementsByTagName('count')
count[0].firstChild.nodeValue = str(newAttribute)
def checkWrittenFile(file, path):
fileNames = 'disk' + str(len(attributeList) + 1) + file
filePath = path + os.sep + fileNames
if os.path.exists(filePath):
return 'written', fileNames
else:
return 'write error', fileNames
statusImg, fileImg = checkWrittenFile('.img', basicPath)
statusMd5, fileMd5 = checkWrittenFile('.md5', basicPath)
statusLst, fileLst = checkWrittenFile('.lst', basicPath)
dom2 = xml.dom.minidom.Document()
# create new vol
newVol = dom2.createElement('volume') # create new tag element 'volume'
newVol.setAttribute('n', str(newAttribute))
newVolName = dom2.createElement('name')
newVolNameText = dom2.createTextNode(name)
newVolMedia = dom2.createElement('media')
newVolMediaText = dom2.createTextNode(media)
newVolLabel = dom2.createElement('label')
newVolLabelText = dom2.createTextNode(label)
newVolDescription = dom2.createElement('description')
newVolDescriptionText = dom2.createTextNode(description)
newVolRemarks = dom2.createElement('remarks')
newVolRemarksText = dom2.createTextNode(remarks)
newVolImgfile = dom2.createElement('imgfile')
newVolImgfile.setAttribute('status', statusImg)
newVolImgfile.setAttribute('md5', md5Image)
newVolImgfileText = dom2.createTextNode(fileImg)
newVolMd5file = dom2.createElement('md5file')
newVolMd5file.setAttribute('status', statusMd5)
newVolMd5fileText = dom2.createTextNode(fileMd5)
newVolLstfile = dom2.createElement('lstfile')
newVolLstfile.setAttribute('status', statusLst)
newVolLstfileText = dom2.createTextNode(fileLst)
newVolName.appendChild(newVolNameText) # append textnode to element newVolName
newVol.appendChild(newVolName) # append element newVolName to element newVol
newVolMedia.appendChild(newVolMediaText)
newVol.appendChild(newVolMedia)
newVolLabel.appendChild(newVolLabelText)
newVol.appendChild(newVolLabel)
newVolDescription.appendChild(newVolDescriptionText)
newVol.appendChild(newVolDescription)
newVolRemarks.appendChild(newVolRemarksText)
newVol.appendChild(newVolRemarks)
newVolImgfile.appendChild(newVolImgfileText)
newVol.appendChild(newVolImgfile)
newVolMd5file.appendChild(newVolMd5fileText)
newVol.appendChild(newVolMd5file)
newVolLstfile.appendChild(newVolLstfileText)
newVol.appendChild(newVolLstfile)
root = dom2._get_documentElement()
dom2.appendChild(newVol)
dataVol = dom2.toprettyxml()
dom2 = xml.dom.minidom.parseString(dataVol)
x = dom1.importNode(dom2.childNodes[0], True)
dom1.childNodes[0].appendChild(x)
print(dom1.toxml())
# count hochzaehlen
#volumeIdPosition = dom1.getElementsByTagName('count')
#volumeId = str(int(volumeIdPosition[0].firstChild.nodeValue) + 1)
#volumeIdTag = '<count>' + volumeId + '</count>'
# volumeIdTag = '<count>' + str(newAttribute) + '</count>'
# print('volumeIdTag: ', volumeIdTag)
data2 = dom1.toxml()
# data3 = re.sub("<count>(.)</count>", volumeIdTag,data2)
# print('regex done?')
path = basicPath + os.sep + 'info.xml'
f = open(path, 'w')
f.write(data2)
f.close()
## md5 checksumme
def checkSum(imgPath):
global md5Image
md5FileName = nameCount('.md5', imgPath)
imgFileList = []
for file in os.listdir(imgPath):
if file.endswith('.img'):
imgFileList.append(file)
if len(imgFileList) == 0:
imgFileName = 'disk1.img'
else:
imgFileName = 'disk' + str(len(imgFileList)) + '.img'
md5LoadPath = imgPath + os.sep + imgFileName
md5SavePath = imgPath + os.sep + md5FileName
f = open(md5LoadPath, 'rb')
daten = f.read()
f.close()
md5Image = hashlib.md5(daten).hexdigest()
md5Image1 = md5Image + ' *' + md5FileName
f = open(md5SavePath, 'w')
f.write(md5Image1)
f.close()
return md5Image
## ein Imagetool
def imageStandard(sourcePath, targetPath):
checkOS = platform.platform()
if checkOS[0:5] == 'Linux':
logicalPath = sourcePath
elif checkOS[0:7] == 'Windows':
pathName = r'\\.\F:'
logicalPath = pathName[:-2] + sourcePath[:-1]
else:
print('unkown os')
imgFileName = nameCount('.img', targetPath)
targetName = targetPath + os.sep + imgFileName
shutil.copyfile(logicalPath, targetName)
|
tony/libvcs
|
libvcs/__about__.py
|
Python
|
mit
| 440
| 0
|
__title__ = 'libvcs
|
'
__package_name__ = 'libvcs'
__description__ = 'vcs abstraction layer'
__version__ = '0.10.1'
__author__ = 'Tony Narlock'
__github__ = 'https://github.com/vcs-python/libvcs'
__docs__ = 'https://libvcs.git-pull.com'
__tracker__ = 'https://github.com/vcs-python/libvcs/issues'
__pypi__ = 'https://pypi.org/project/libvcs/'
__email__ = 'tony@git-pull.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016- Tony Na
|
rlock'
|
rocity/dj-instagram
|
djinstagram/instaapp/apps.py
|
Python
|
apache-2.0
| 91
| 0
|
from djan
|
go.apps import AppConfig
class InstaappConfig(AppConfig):
name = '
|
instaapp'
|
bthirion/nipy
|
nipy/algorithms/statistics/tests/test_mixed_effects.py
|
Python
|
bsd-3-clause
| 5,555
| 0.00486
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Testing the glm module
"""
import numpy as np
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal,
assert_raises)
from nose.tools import assert_true
import numpy.random as nr
from ..mixed_effects_stat import (
one_sa
|
mple_ttest, one_sample_ftest, two_sample_ttest, two_sample_ftest,
generate_data, t_stat, mfx_stat)
from ..bayesian_mixed_effects import two_level_glm
def test_mfx():
""" Test the generic mixed-effects model"""
n_samples, n_tests = 20, 100
np.random.seed(1)
# generate some data
V1 = np.random.rand(n_s
|
amples, n_tests)
Y = generate_data(np.ones((n_samples, 1)), 0, 1, V1)
X = np.random.randn(20, 3)
# compute the test statistics
t1, = mfx_stat(Y, V1, X, 1,return_t=True,
return_f=False, return_effect=False,
return_var=False)
assert_true(t1.shape == (n_tests,))
assert_true(t1.mean() < 5 / np.sqrt(n_tests))
assert_true((t1.var() < 2) and (t1.var() > .5))
t2, = mfx_stat(Y, V1, X * np.random.rand(3), 1)
assert_almost_equal(t1, t2)
f, = mfx_stat(Y, V1, X, 1, return_t=False, return_f=True)
assert_almost_equal(t1 ** 2, f)
v2, = mfx_stat(Y, V1, X, 1, return_t=False, return_var=True)
assert_true((v2 > 0).all())
fx, = mfx_stat(Y, V1, X, 1, return_t=False, return_effect=True)
assert_true(fx.shape == (n_tests,))
def test_t_test():
""" test that the t test run
"""
n_samples, n_tests = 15, 100
data = nr.randn(n_samples, n_tests)
t = t_stat(data)
assert_true(t.shape == (n_tests,))
assert_true( np.abs(t.mean() < 5 / np.sqrt(n_tests)))
assert_true(t.var() < 2)
assert_true( t.var() > .5)
def test_two_sample_ttest():
""" test that the mfx ttest indeed runs
"""
n_samples, n_tests = 15, 4
np.random.seed(1)
# generate some data
vardata = np.random.rand(n_samples, n_tests)
data = generate_data(np.ones(n_samples), 0, 1, vardata)
# compute the test statistics
u = np.concatenate((np.ones(5), np.zeros(10)))
t2 = two_sample_ttest(data, vardata, u, n_iter=5)
assert t2.shape == (n_tests,)
assert np.abs(t2.mean() < 5 / np.sqrt(n_tests))
assert t2.var() < 2
assert t2.var() > .5
# try verbose mode
t3 = two_sample_ttest(data, vardata, u, n_iter=5, verbose=1)
assert_almost_equal(t2, t3)
def test_two_sample_ftest():
""" test that the mfx ttest indeed runs
"""
n_samples, n_tests = 15, 4
np.random.seed(1)
# generate some data
vardata = np.random.rand(n_samples, n_tests)
data = generate_data(np.ones((n_samples, 1)), 0, 1, vardata)
# compute the test statistics
u = np.concatenate((np.ones(5), np.zeros(10)))
t2 = two_sample_ftest(data, vardata, u, n_iter=5)
assert t2.shape == (n_tests,)
assert np.abs(t2.mean() < 5 / np.sqrt(n_tests))
assert t2.var() < 2
assert t2.var() > .5
# try verbose mode
t3 = two_sample_ftest(data, vardata, u, n_iter=5, verbose=1)
assert_almost_equal(t2, t3)
def test_mfx_ttest():
""" test that the mfx ttest indeed runs
"""
n_samples, n_tests = 15, 100
np.random.seed(1)
# generate some data
vardata = np.random.rand(n_samples, n_tests)
data = generate_data(np.ones((n_samples, 1)), 0, 1, vardata)
# compute the test statistics
t2 = one_sample_ttest(data, vardata, n_iter=5)
assert t2.shape == (n_tests,)
assert np.abs(t2.mean() < 5 / np.sqrt(n_tests))
assert t2.var() < 2
assert t2.var() > .5
# try verbose mode
t3 = one_sample_ttest(data, vardata, n_iter=5, verbose=1)
assert_almost_equal(t2, t3)
def test_mfx_ftest():
""" test that the mfx ftest indeed runs
"""
n_samples, n_tests = 15, 100
np.random.seed(1)
# generate some data
vardata = np.random.rand(n_samples, n_tests)
data = generate_data(np.ones((n_samples, 1)), 0, 1, vardata)
# compute the test statistics
f = one_sample_ftest(data, vardata, n_iter=5)
assert f.shape == (n_tests,)
assert (np.abs(f.mean() - 1) < 1)
assert f.var() < 10
assert f.var() > .2
def test_two_level_glm():
nsub = 10
npts = 100
reg1 = np.ones(nsub)
reg2 = np.random.random(nsub)
X = np.array((reg1, reg2)).T
y = np.repeat(np.reshape(reg1 + reg2, (nsub, 1)), npts, axis=1)
vy = np.zeros((nsub, npts))
beta, s2, dof = two_level_glm(y, vy, X)
assert_array_almost_equal(beta, 1)
assert_array_almost_equal(s2, 0)
def test_two_level_glm_novar():
X = np.random.normal(0, 1, size=(100, 10))
y = np.random.normal(0, 1, size=(100, 50))
vy = np.zeros((100, 50))
beta, s2, dof = two_level_glm(y, vy, X)
beta_error = np.mean(beta ** 2)
s2_error = np.abs(np.mean(s2) - 1)
print('Errors: %f (beta), %f (s2)' % (beta_error, s2_error))
assert beta_error < 0.1
assert s2_error < 0.1
def test_two_level_glm_error():
# this tests whether two_level_glm raises a value error if the
# design matrix has more regressors than the number of
# observations
X = np.random.normal(0, 1, size=(10, 11))
y = np.random.normal(0, 1, size=(10, 5))
vy = np.zeros((10, 5))
assert_raises(ValueError, two_level_glm, y, vy, X)
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
|
google/gif-for-cli
|
gif_for_cli/__main__.py
|
Python
|
apache-2.0
| 755
| 0
|
"""
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the Licens
|
e.
"""
import os
import sys
from .execute import execute
def main(): # pragma: no cover
execute(os.environ, sys.argv[1:], sys.stdout)
if __name__ == '__main__': # pragma: no cover
main()
| |
ThomasTheSpaceFox/SBTCVM-Mark-2
|
SBTCVM-asm2.py
|
Python
|
gpl-3.0
| 33,216
| 0.048862
|
#!/usr/bin/env python
import VMSYSTEM.libSBTCVM as libSBTCVM
import VMSYSTEM.libbaltcalc as libbaltcalc
import sys
import os
assmoverrun=19683
instcnt=0
txtblk=0
VMSYSROMS=os.path.join("VMSYSTEM", "ROMS")
critcomperr=0
compvers="v2.2.0"
outfile="assmout.trom"
#define IOmaps
IOmapread={"random": "--0------"}
IOmapwrite={}
#populate IOmaps with memory pointers
scratchmap={}
scratchstart="---------"
shortsccnt=1
scratchstop="---++++++"
IOgen=scratchstart
while IOgen!=scratchstop:
#scratchmap[("mem" + str(shortsccnt))] = IOgen
IOmapread[("mem" + str(shortsccnt))] = IOgen
IOmapwrite[("mem" + str(shortsccnt))] = IOgen
IOgen=libSBTCVM.trunkto6(libbaltcalc.btadd(IOgen, "+"))
shortsccnt += 1
#scratchmap[("mem" + str(shortsccnt))] = scratchstop
IOmapread[("mem" + str(shortsccnt))] = scratchstop
IOmapwrite[("mem" + str(shortsccnt))] = scratchstop
def getlinetern(line):
line=(line-9841)
tline=libSBTCVM.trunkto6(libbaltcalc.DECTOBT(line))
return tline
tracecomp=0
#used to write to the compiler log if the compiler is in tracelog mode
def complog(textis):
if tracecomp==1:
compilerlog.write(textis)
#class used by the goto refrence system
class gotoref:
def __init__(self, line, gtname):
self.line=line
self.tline=getlinetern(line)
self.gtname=gtname
#begin by reading command line arguments
try:
cmd=sys.argv[1]
except:
cmd=None
if "GLOBASMFLG" in globals():
cmd=GLOBASMFLG
if cmd=="-h" or cmd=="--help" or cmd=="help":
print '''This is SBTCVM-asm2.py, SBTCVM Mark 2's assembler.
commands:
SBTCVM-asm2.py -h (--help) (help): this text
SBTCVM-asm2.py -v (--version)
SBTCVM-asm2.py -a (--about): about SBTCVM-asm2.py
SBTCVM-asm2.py -c (--compile) [sourcefile]: build a tasm source into a trom
SBTCVM-asm2.py -t (--tracecompile) [sourcefile]: same as -c but logs the compiling process in detail in the CAP directory.
SBTCVM-asm2.py [sourcefile]: build a tasm source into a trom
'''
elif cmd=="-v" or cmd=="--version":
print ("SBTCVM Assember" + compvers)
elif cmd=="-a" or cmd=="--about":
print '''SBTCVM Assembler 2
''' + compvers + '''
(c)2016-2017 Thomas Leathers and Contributors
SBTCVM Assembler 2 is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SBTCVM Assembler 2 is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SBTCVM Assembler 2. If not, see <http://www.gnu.org/licenses/>
'''
elif cmd==None:
print "tip: use SBTCVM-asm2.py -h for help."
elif cmd=="-c" or cmd=="--compile" or cmd[0]!="-" or cmd=="-t" or cmd=="--tracecompile":
print("SBTCVM-asm " + compvers + " starting")
if "GLOBASMFLG" in globals():
arg=GLOBASMFLG
else:
if cmd[0]!="-":
arg=sys.argv[1]
else:
arg=sys.argv[2]
print arg
lowarg=arg.lower()
argisfile=0
argistasm=0
for extq in ["", ".tasm", ".TASM"]:
qarg=(arg + extq)
qlowarg=(lowarg + extq.lower())
print "searching for: \"" + qarg + "\"..."
argisfile
if os.path.isfile(qarg):
argisfile=1
print "found: " + qarg
elif os.path.isfile(os.path.join("VMSYSTEM", qarg)):
qarg=os.path.join("VMSYSTEM", qarg)
print "found: " + qarg
argisfile=1
elif os.path.isfile(os.path.join(VMSYSROMS, qarg)):
qarg=os.path.join(VMSYSROMS, qarg)
print "found: " + qarg
argisfile=1
elif os.path.isfile(os.path.join("VMUSER", qarg)):
qarg=os.path.join("VMUSER", qarg)
print "found: " + qarg
argisfile=1
elif os.path.isfile(os.path.join("ROMS", qarg)):
qarg=os.path.join("ROMS", qarg)
print "found
|
: " + qarg
argisfile=1
if argisfile==1:
if qlowarg.endswith(".tasm") an
|
d os.path.isfile(qarg):
print "tasm source found."
arg=qarg
argistasm=1
break
else:
print "Not valid."
argisfile=0
if argisfile==0 or argistasm==0:
#print "ERROR: file not found, or is not a tasm file STOP"
sys.exit("ERROR: SBTCVM assembler was unable to load the specified filename. STOP")
#generate a name for logs in case its needed
#logsub=arg.replace("/", "-")
#logsub=logsub.replace("~", "")
#logsub=logsub.split(".")
logsub=libSBTCVM.namecrunch(arg, "-tasm-comp.log")
#detect if command line options specify tracelog compile mode:
if cmd=="-t" or cmd=="--tracecompile":
tracecomp=1
compilerlog=open(os.path.join('CAP', logsub), "w")
else:
tracecomp=0
#arg=arg.replace("./", "")
#print arg
complog("starting up compiler...\n")
complog("TASM VERSION: SBTCVM-asm " + compvers + "\n")
complog("source: " + arg + "\n")
complog("---------\n\n")
#open 2 instances of source. one per pass.
sourcefile=open(arg, 'r')
sourcefileB=open(arg, 'r')
#open(arg, 'r') as sourcefile
gotoreflist=list()
print "preforming prescan & prep pass"
complog("preforming prescan & prep pass\n")
srcline=0
for linen in sourcefile:
srcline += 1
lined=linen
linen=linen.replace("\n", "")
linen=linen.replace(" ", "")
linenraw=linen
linen=(linen.split("#"))[0]
linelist=linen.split("|")
if (len(linelist))==2:
instword=(linelist[0])
instdat=(linelist[1])
else:
instword=(linelist[0])
instdat="000000000"
if instword=="textstop":
txtblk=0
complog("TEXTBLOCK END\n")
gtflag=1
if txtblk==1:
for f in lined:
instcnt += 1
elif instword=="textstart":
txtblk=1
complog("TEXTBLOCK START\n")
#raw class
elif instword=="romread1":
instcnt += 1
elif instword=="romread2":
instcnt += 1
elif instword=="IOread1":
instcnt += 1
elif instword=="IOread2":
instcnt += 1
elif instword=="IOwrite1":
instcnt += 1
elif instword=="IOwrite2":
instcnt += 1
elif instword=="regswap":
instcnt += 1
elif instword=="copy1to2":
instcnt += 1
elif instword=="copy2to1":
instcnt += 1
elif instword=="invert1":
instcnt += 1
elif instword=="invert2":
instcnt += 1
elif instword=="add":
instcnt += 1
elif instword=="subtract":
instcnt += 1
elif instword=="multiply":
instcnt += 1
elif instword=="divide":
instcnt += 1
elif instword=="setreg1":
instcnt += 1
elif instword=="setreg2":
instcnt += 1
elif instword=="setinst":
instcnt += 1
elif instword=="setdata":
instcnt += 1
#----jump in used opcodes----
#color drawing
elif instword=="continue":
instcnt += 1
elif instword=="colorpixel":
instcnt += 1
elif instword=="setcolorreg":
instcnt += 1
elif instword=="colorfill":
instcnt += 1
elif instword=="setcolorvect":
instcnt += 1
elif instword=="colorline":
instcnt += 1
elif instword=="colorrect":
instcnt += 1
#mono drawing
elif instword=="monopixel":
instcnt += 1
elif instword=="monofill":
instcnt += 1
elif instword=="setmonovect":
instcnt += 1
elif instword=="monoline":
instcnt += 1
elif instword=="monorect":
instcnt += 1
#----opcode --00-+ unused----
elif instword=="stop":
instcnt += 1
elif instword=="null":
instcnt += 1
elif instword=="gotodata":
instcnt += 1
elif instword=="gotoreg1":
instcnt += 1
elif instword=="gotodataif":
instcnt += 1
elif instword=="wait":
instcnt += 1
elif instword=="YNgoto":
instcnt += 1
elif instword=="userwait":
instcnt += 1
elif instword=="TTYclear":
instcnt += 1
#----gap in used opcodes----
elif instword=="gotoA":
instcnt += 1
autostpflg=1
elif instword=="gotoAif":
instcnt += 1
elif instword=="gotoB":
instcnt += 1
autostpflg=1
elif instword=="gotoBif":
instcnt += 1
elif instword=="gotoC":
instcnt += 1
elif instword=="gotoCif":
instcnt += 1
elif instword=="gotoD":
instcnt += 1
elif instword=="gotoDif":
instcnt += 1
elif instword=="gotoE":
instcnt += 1
elif instword=="gotoEif":
instcnt += 1
elif instword=="gotoF":
instcnt += 1
elif instword=="gotoFif":
instcnt += 1
#----gap in used opcodes----
elif instword=="dumpreg1":
instcnt += 1
e
|
NiklasRosenstein/localimport
|
tests/test_localimport.py
|
Python
|
mit
| 1,885
| 0.013793
|
from nose.tools import *
from localimport import localimport
import os
import sys
modules_dir = os.path.join(os.path.dirname(__file__), 'modules')
def test_localimport_with_autodisable():
sys.path.append(modules_dir)
import another_module as mod_a
try:
with localimport('modules') as _imp:
import some_module
import another_module as mod_b
assert 'some_module' in sys.modules
assert sys.modules['another_module'] is mod_b
assert 'some_module' not in sys.modules
assert sys.modules['another_module'] is mod_a
assert mod_a is not mod_b
finally:
sys.path.remove(modules_dir)
del sys.modules['another_module']
def test_localimport_without_autodisable():
sys.path.append(modules_dir)
import another_module as mod_a
try:
with localimport('modules', do_autodisable=False) as _imp:
import some_module
import another_module as mod_b
assert 'some_module' in sys.modules
assert sys.modules['another_module'] is mod_b
assert mod_a is mod_b
assert 'some_module' not in sys.modules
assert sys.modules['another_module'] is mod_a
finally:
sys.path.remove(modules_dir)
del sys.modules['another_module']
def test_localimpot_parent_dir():
with localimport('.', parent_dir=modules_dir) as _imp:
import some_module
assert 'some_module' not in sys.modules
assert 'another_module' not in sys.modules
def test_localimpot_curdir():
with localimport('.') as _imp:
import some_module
assert 'some_module' not in sys.modules
assert 'another_module' not in sys.modules
def test_discover():
wit
|
h localimport('.') as _imp:
assert_equals(sorted(
|
x.name for x in _imp.discover()), ['another_module', 'some_module', 'test_localimport'])
with localimport('modules') as _imp:
assert_equals(sorted(x.name for x in _imp.discover()), ['another_module', 'some_module'])
|
coUrbanize/rest_framework_ember
|
example/tests/test_model_viewsets.py
|
Python
|
bsd-2-clause
| 5,527
| 0.001628
|
import json
from example.tests import TestBase
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.conf import settings
class ModelViewSetTests(TestBase):
"""
Test usage with ModelViewSets, also tests pluralization, camelization,
and underscore.
[<RegexURLPattern user-list ^identities/$>,
<RegexURLPattern user-detail ^identities/(?P<pk>[^/]+)/$>]
"""
list_url = reverse('user-list')
def setUp(self):
super(ModelViewSetTests, self).setUp()
self.detail_url = reverse('user-detail', kwargs={'pk': self.miles.pk})
def test_key_in_list_result(self):
"""
Ensure the result has a 'user' key since that is the name of the model
"""
response = self.client.get(self.list_url)
self.assertEqual(response.status_code, 200)
user = get_user_model().objects.all()[0]
expected = {
u'user': [{
u'id': user.pk,
u'first_name': user.first_name,
u'last_name': user.last_name,
u'email': user.email
}]
}
json_content = json.loads(response.content.decode('utf8'))
meta = json_content.get('meta')
self.assertEquals(expected.get('user'), json_content.get('user'))
self.assertEquals(meta.get(
|
'total'), 2)
self.ass
|
ertEquals(meta.get('count', 0),
get_user_model().objects.count())
self.assertEquals(meta.get('next'), 2)
self.assertEqual(u'http://testserver/identities?page=2',
meta.get('next_link'))
self.assertEqual(meta.get('page'), 1)
def test_page_two_in_list_result(self):
"""
Ensure that the second page is reachable and is the correct data.
"""
response = self.client.get(self.list_url, {'page': 2})
self.assertEqual(response.status_code, 200)
user = get_user_model().objects.all()[1]
expected = {
u'user': [{
u'id': user.pk,
u'first_name': user.first_name,
u'last_name': user.last_name,
u'email': user.email
}]
}
json_content = json.loads(response.content.decode('utf8'))
meta = json_content.get('meta')
self.assertEquals(expected.get('user'), json_content.get('user'))
self.assertEquals(meta.get('count', 0),
get_user_model().objects.count())
self.assertIsNone(meta.get('next'))
self.assertIsNone(meta.get('next_link'))
self.assertEqual(meta.get('previous'), 1)
self.assertEqual(meta.get('page'), 2)
# Older versions of DRF add page=1 for first page. Later trim to root
try:
self.assertEqual(u'http://testserver/identities',
meta.get('previous_link'))
except AssertionError:
self.assertEqual(u'http://testserver/identities?page=1',
meta.get('previous_link'))
def test_page_range_in_list_result(self):
"""
Ensure that the range of a page can be changed from the client,
tests pluralization as two objects means it converts ``user`` to
``users``.
"""
response = self.client.get(self.list_url, {'page_size': 2})
self.assertEqual(response.status_code, 200)
users = get_user_model().objects.all()
expected = {
u'users': [{
u'id': users[0].pk,
u'first_name': users[0].first_name,
u'last_name': users[0].last_name,
u'email': users[0].email
},{
u'id': users[1].pk,
u'first_name': users[1].first_name,
u'last_name': users[1].last_name,
u'email': users[1].email
}]
}
json_content = json.loads(response.content.decode('utf8'))
meta = json_content.get('meta')
self.assertEquals(expected.get('users'), json_content.get('user'))
self.assertEquals(meta.get('count', 0),
get_user_model().objects.count())
def test_key_in_detail_result(self):
"""
Ensure the result has a 'user' key.
"""
response = self.client.get(self.detail_url)
self.assertEqual(response.status_code, 200)
result = json.loads(response.content.decode('utf8'))
expected = {
u'user': {
u'id': self.miles.pk,
u'first_name': self.miles.first_name,
u'last_name': self.miles.last_name,
u'email': self.miles.email
}
}
self.assertEqual(result, expected)
def test_key_in_post(self):
"""
Ensure a key is in the post.
"""
self.client.login(username='miles', password='pw')
data = {
u'user': {
u'id': self.miles.pk,
u'first_name': self.miles.first_name,
u'last_name': self.miles.last_name,
u'email': 'miles@trumpet.org'
}
}
response = self.client.put(self.detail_url, data=data, format='json')
result = json.loads(response.content.decode('utf8'))
self.assertIn('user', result.keys())
self.assertEqual(result['user']['email'], 'miles@trumpet.org')
# is it updated?
self.assertEqual(
get_user_model().objects.get(pk=self.miles.pk).email,
'miles@trumpet.org')
|
philipforget/django-oauth-plus
|
oauth_provider/runtests/runtests.py
|
Python
|
bsd-3-clause
| 1,167
| 0.005141
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# https://github.com/swistakm/django-rest-framework/blob/master/rest_framework/runtests/runtests.py
import os
import sys
# fix sys path so we don't need to setup PYTHONPATH
sys.path.append(os.path.join(os.path.dirname(__file__), "../.."))
os.environ['DJANGO_SETTINGS_MODULE'] = 'oauth_provider.runtests.settings'
from django.conf import settings
from django.test.utils import get_runner
from south.management.commands import patch_for_test_db_setup
def usage():
return """
Usage: python runtests.py [UnitTestClass].[method]
You can pass the Class name of the `UnitTestClass` you want to test.
Append a method name if you only want to test a specific method of that class.
"""
d
|
ef main():
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=2)
if len(sys.argv) == 2:
test_case = '.' + sys.argv[1]
elif len(sys.argv) == 1:
test_case = ''
else:
print(usage())
sys.exit(1)
patch_for_test_db_setup()
failures = test_runner.run_tests(['tests' + test_case])
sys.exit(failures)
if __name__ == '__m
|
ain__':
main()
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractWinterTranslates.py
|
Python
|
bsd-3-clause
| 598
| 0.036789
|
def extractWinterTranslates(item):
"""
'Winter
|
Translates'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['ti
|
tle'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if 'Summaries' in item['tags']:
return None
tagmap = [
('Villain Rehab Plan', 'Transmigrating into a Mob Character to Rehabilitate the Villain Plan', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
cnsoft/kbengine-cocos2dx
|
kbe/src/lib/python/Lib/test/test_generators.py
|
Python
|
lgpl-3.0
| 50,722
| 0.000434
|
tutorial_tests = """
Let's try a simple generator:
>>> def f():
... yield 1
... yield 2
>>> for i in f():
... print(i)
1
2
>>> g = f()
>>> next(g)
1
>>> next(g)
2
"Falling off the end" stops the generator:
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
StopIteration
"return" also stops the generat
|
or:
>>> def f():
... yield 1
... return
... yield 2 # never reached
...
>>> g = f()
>>> next(g)
1
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 3, in f
StopIteration
>>> next(g) # once stopped, can't be resumed
Traceback (most recent call last):
File "<stdin>", line 1,
|
in ?
StopIteration
"raise StopIteration" stops the generator too:
>>> def f():
... yield 1
... raise StopIteration
... yield 2 # never reached
...
>>> g = f()
>>> next(g)
1
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
However, they are not exactly equivalent:
>>> def g1():
... try:
... return
... except:
... yield 1
...
>>> list(g1())
[]
>>> def g2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print(list(g2()))
[42]
This may be surprising at first:
>>> def g3():
... try:
... return
... finally:
... yield 1
...
>>> list(g3())
[1]
Let's create an alternate range() function implemented as a generator:
>>> def yrange(n):
... for i in range(n):
... yield i
...
>>> list(yrange(5))
[0, 1, 2, 3, 4]
Generators always return to the most recent caller:
>>> def creator():
... r = yrange(5)
... print("creator", next(r))
... return r
...
>>> def caller():
... r = creator()
... for i in r:
... print("caller", i)
...
>>> caller()
creator 0
caller 1
caller 2
caller 3
caller 4
Generators can call other generators:
>>> def zrange(n):
... for i in yrange(n):
... yield i
...
>>> list(zrange(5))
[0, 1, 2, 3, 4]
"""
# The examples from PEP 255.
pep_tests = """
Specification: Yield
Restriction: A generator cannot be resumed while it is actively
running:
>>> def g():
... i = next(me)
... yield i
>>> me = g()
>>> next(me)
Traceback (most recent call last):
...
File "<string>", line 2, in g
ValueError: generator already executing
Specification: Return
Note that return isn't always equivalent to raising StopIteration: the
difference lies in how enclosing try/except constructs are treated.
For example,
>>> def f1():
... try:
... return
... except:
... yield 1
>>> print(list(f1()))
[]
because, as in any function, return simply exits, but
>>> def f2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print(list(f2()))
[42]
because StopIteration is captured by a bare "except", as is any
exception.
Specification: Generators and Exception Propagation
>>> def f():
... return 1//0
>>> def g():
... yield f() # the zero division exception propagates
... yield 42 # and we'll never get here
>>> k = g()
>>> next(k)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
File "<stdin>", line 2, in f
ZeroDivisionError: integer division or modulo by zero
>>> next(k) # and the generator cannot be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>>
Specification: Try/Except/Finally
>>> def f():
... try:
... yield 1
... try:
... yield 2
... 1//0
... yield 3 # never get here
... except ZeroDivisionError:
... yield 4
... yield 5
... raise
... except:
... yield 6
... yield 7 # the "raise" above stops this
... except:
... yield 8
... yield 9
... try:
... x = 12
... finally:
... yield 10
... yield 11
>>> print(list(f()))
[1, 2, 4, 5, 8, 9, 10, 11]
>>>
Guido's binary tree example.
>>> # A binary tree class.
>>> class Tree:
...
... def __init__(self, label, left=None, right=None):
... self.label = label
... self.left = left
... self.right = right
...
... def __repr__(self, level=0, indent=" "):
... s = level*indent + repr(self.label)
... if self.left:
... s = s + "\\n" + self.left.__repr__(level+1, indent)
... if self.right:
... s = s + "\\n" + self.right.__repr__(level+1, indent)
... return s
...
... def __iter__(self):
... return inorder(self)
>>> # Create a Tree from a list.
>>> def tree(list):
... n = len(list)
... if n == 0:
... return []
... i = n // 2
... return Tree(list[i], tree(list[:i]), tree(list[i+1:]))
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # A recursive generator that generates Tree labels in in-order.
>>> def inorder(t):
... if t:
... for x in inorder(t.left):
... yield x
... yield t.label
... for x in inorder(t.right):
... yield x
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # Print the nodes of the tree in in-order.
>>> for x in t:
... print(' '+x, end='')
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
>>> # A non-recursive generator.
>>> def inorder(node):
... stack = []
... while node:
... while node.left:
... stack.append(node)
... node = node.left
... yield node.label
... while not node.right:
... try:
... node = stack.pop()
... except IndexError:
... return
... yield node.label
... node = node.right
>>> # Exercise the non-recursive generator.
>>> for x in t:
... print(' '+x, end='')
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
"""
# Examples from Iterator-List and Python-Dev and c.l.py.
email_tests = """
The difference between yielding None and returning it.
>>> def g():
... for i in range(3):
... yield None
... yield None
... return
>>> list(g())
[None, None, None, None]
Ensure that explicitly raising StopIteration acts like any other exception
in try/except, not like a return.
>>> def g():
... yield 1
... try:
... raise StopIteration
... except:
... yield 2
... yield 3
>>> list(g())
[1, 2, 3]
Next one was posted to c.l.py.
>>> def gcomb(x, k):
... "Generate all combinations of k elements from list x."
...
... if k > len(x):
... return
... if k == 0:
... yield []
... else:
... first, rest = x[0], x[1:]
... # A combination does or doesn't contain first.
... # If it does, the remainder is a k-1 comb of rest.
... for c in gcomb(rest, k-1):
... c.insert(0, first)
... yield c
... # If it doesn't contain first, it's a k comb of rest.
... for c in gco
|
shoopio/shoop
|
shuup/campaigns/migrations/0013_hourconfition_fix.py
|
Python
|
agpl-3.0
| 1,527
| 0.00131
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-11 22:27
from __future__ import unicode_literals
import django
import django.core.validators
import re
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('campaigns', '0012_basket_campaign_undiscounted'),
]
operations = [
migrations.AlterField(
model_name='hourbasketcondition',
name='days',
field=models.CharField(
verbose_name='days',
max_length=255,
validators=[
django.core.validators.RegexValidator(
re.compile('^[\\d,]+\\Z' if django.VERSION < (1, 9) else '^\\d+(?:\\,\\d+)*\\Z', 32),
code='invalid',
message='Enter only digits separated by commas.'
)
]
),
),
migrations.AlterField(
model_name='hourcondition',
name='days',
field=models.Char
|
Field(
verbose_name='days',
max_length=255,
validators=[
django.core.validators.RegexValidator(
re.compile('^[\\d,]+\\Z' if django.VERSION < (1, 9) else '^\\d+(?:\\,\\d+)*\\Z', 32),
code='invalid',
message='Enter only digits separated by
|
commas.'
)
]
),
),
]
|
FilipeMaia/afnumpy
|
afnumpy/indexing.py
|
Python
|
bsd-2-clause
| 10,029
| 0.00349
|
import arrayfire
import sys
import afnumpy
from . import private_utils as pu
import numbers
import numpy
import math
def __slice_len__(idx, shape, axis):
maxlen = shape[axis]
# Out of bounds slices should be converted to None
if(idx.stop is not None and idx.stop >= maxlen):
idx.stop = None
if(idx.start is not None and idx.start >= maxlen):
idx.start = None
if idx.step is None:
step = 1
else:
step = idx.step
if idx.start is None:
if step < 0:
start = maxlen-1
else:
start = 0
else:
start = idx.start
if(start < 0):
start += maxlen
if idx.stop is None:
if step < 0:
end = -1
else:
end = maxlen
else:
end = idx.stop
if(end < 0 and step > 0):
end += maxlen
if(start == end):
return 0
if((start-end > 0 and step > 0) or
(start-end < 0 and step < 0)):
return 0
return int(math.ceil(float(end-start)/step))
def __slice_to_seq__(shape, idx, axis):
maxlen = shape[axis]
if(isinstance(idx, numb
|
ers.Number)):
if idx < 0:
idx = maxlen + idx
if(idx >= maxlen):
raise IndexError('index %d is out of bounds for axis %d with size %d' % (idx, axis, maxlen))
return idx
if(is
|
instance(idx, afnumpy.ndarray)):
return idx.d_array
if not isinstance(idx, slice):
return afnumpy.array(idx).d_array
if idx.step is None:
step = 1
else:
step = idx.step
if idx.start is None:
if step < 0:
start = maxlen-1
else:
start = 0
else:
start = idx.start
if(start < 0):
start += maxlen
if idx.stop is None:
if step < 0:
end = 0
else:
end = maxlen-1
else:
end = idx.stop
if(end < 0):
end += maxlen
if step < 0:
end += 1
else:
end -= 1
# arrayfire doesn't like other steps in this case
if(start == end):
step = 1
if((start-end > 0 and step > 0) or
(start-end < 0 and step < 0)):
return None
return arrayfire.seq(float(start),
float(end),
float(step))
def __npidx_to_afidx__(idx, dim_len):
if(isinstance(idx, numbers.Number)):
return idx
if(isinstance(idx, slice)):
start = idx.start
stop = idx.stop
step = idx.step
# Out of bounds slices should be converted to None
if(stop is not None and stop >= dim_len):
stop = None
if(start is not None and start >= dim_len):
start = None
if(start is not None and start < 0):
start += dim_len
if(stop is not None and stop < 0):
stop += dim_len
if idx.step is not None and idx.step < 0:
if idx.start is None:
start = dim_len-1
if idx.stop is None:
stop = -1
ret = slice(start,stop,step)
if __slice_len__(ret, [dim_len], 0) <= 0:
return None
return ret
if(not isinstance(idx, afnumpy.ndarray)):
idx = afnumpy.array(idx)
if(afnumpy.safe_indexing):
# Check if we're going out of bounds
max_index = afnumpy.arrayfire.max(idx.d_array)
min_index = afnumpy.arrayfire.min(idx.d_array)
if max_index >= dim_len:
raise IndexError('index %d is out of bounds for axis with size %d' % (max_index, dim_len))
if min_index < 0:
# Transform negative indices in positive ones
idx.d_array[idx.d_array < 0] += dim_len
return idx.d_array
def __convert_dim__(shape, idx):
# Convert numpy style indexing arguments to arrayfire style
# Always returns a list
# Should also return the shape of the result
# Also returns the shape that the input should be reshaped to
input_shape = list(shape)
if not isinstance(idx, tuple):
idx = (idx,)
idx = list(idx)
# According to http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
# newaxis is an alias for 'None', and 'None' can be used in place of this with the same result.
newaxis = None
# Check for Ellipsis. Expand it to ':' such that idx shape matches array shape, ignoring any newaxise
# We have to do this because we don't want to trigger comparisons
if any(e is Ellipsis for e in idx):
for axis in range(0, len(idx)):
if(idx[axis] is Ellipsis):
i = axis
break
idx.pop(i)
if any(e is Ellipsis for e in idx):
raise IndexError('Only a single Ellipsis allowed')
while __idx_ndims__(idx)-idx.count(newaxis) < len(shape):
idx.insert(i, slice(None,None,None))
# Check and remove newaxis. Store their location for final reshape
newaxes = []
while newaxis in idx:
newaxes.append(idx.index(newaxis))
idx.remove(newaxis)
# Append enough ':' to match the dimension of the aray
while __idx_ndims__(idx) < len(shape):
idx.append(slice(None,None,None))
# ret = [0]*len(idx)
ret = []
# Check for the number of ndarrays. Raise error if there are multiple
arrays_in_idx = []
for axis in range(0,len(idx)):
if isinstance(idx[axis], afnumpy.ndarray):
arrays_in_idx.append(axis)
if isinstance(idx[axis], numpy.ndarray):
idx[axis] = afnumpy.array(idx[axis])
arrays_in_idx.append(axis)
if len(arrays_in_idx) > 1:
# This will fail because while multiple arrays
# as indices in numpy treat the values given by
# the arrays as the coordinates of the hyperslabs
# to keep, arrayfire does things differently.
# In arrayfire each entry of each array gets combined
# with all entries of all other arrays to define the coordinate
# In numpy each entry only gets combined with the corresponding
# entry in the other arrays.
# For example if one has [0,1],[0,1] as the two arrays for numpy
# this would mean that the coordinates retrieved would be [0,0],
# [1,1] while for arrayfire it would be [0,0], [0,1], [1,0], [1,1].
raise NotImplementedError('Fancy indexing with multiple arrays is not implemented')
# bcast_arrays = afnumpy.broadcast_arrays(*[idx[axis] for axis in arrays_in_idx])
# for axis,bcast_array in zip(arrays_in_idx, bcast_arrays):
# idx[axis] = bcast_array
for axis in range(0,len(idx)):
# Handle boolean arrays indexes which require a reshape
# of the input array
if(isinstance(idx[axis], afnumpy.ndarray) and
idx[axis].ndim > 1):
# Flatten the extra dimensions
extra_dims = 1
for i in range(1,idx[axis].ndim):
extra_dims *= input_shape.pop(axis+1)
input_shape[axis] *= extra_dims
af_idx = __npidx_to_afidx__(idx[axis], shape[axis])
ret.insert(0,af_idx)
# ret[pu.c2f(shape,axis)] = af_idx
ret_shape = __index_shape__(shape, ret)
# Insert new dimensions start from the end so we don't perturb other insertions
for n in newaxes[::-1]:
ret_shape.insert(n,1)
return ret, tuple(ret_shape), tuple(input_shape)
def __index_shape__(A_shape, idx, del_singleton=True):
shape = []
for i in range(0,len(idx)):
if(idx[i] is None):
shape.append(0)
elif(isinstance(idx[i],numbers.Number)):
if del_singleton:
# Remove dimensions indexed with a scalar
continue
else:
shape.append(1)
elif(isinstance(idx[i],arrayfire.index.Seq)):
if(idx[i].s == arrayfire.af_span):
shape.append(A_shape[i])
else:
shape.append(idx[i].size)
elif(isinstance(idx[i],slice)):
shape.append(__slice_len__(idx[i], pu.c2f(A_shape), i))
elif(isinstance(
|
soulmachine/scikit-learn
|
sklearn/linear_model/logistic.py
|
Python
|
bsd-3-clause
| 38,054
| 0.000053
|
"""
Logistic Regression
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <f@bianp.net>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder
from ..svm.base import BaseLibLinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils.extmath import log_logistic, safe_sparse_dot
from ..utils.optimize import newton_cg
from ..utils.validation import as_float_array, DataConversionWarning
from ..utils.fixes import expit
from ..externals.joblib import Parallel, delayed
from ..cross_validation import _check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_loss_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss, gradient and the Hessian.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return out, grad, Hs
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=True,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1.):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit
|
.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solv
|
er.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-t
|
dbishai/DeepRGB
|
python/caffe/draw.py
|
Python
|
mit
| 7,604
| 0.000395
|
"""
Caffe network visualization: draw the NetParameter protobuffer.
.. note::
This requires pydot>=1.0.2, which is not included in requirements.txt since
it requires graphviz and other prerequisites outside the scope of the
Caffe.
"""
from caffe.proto import caffe_pb2
"""
pydot is not supported under python 3 and pydot2 doesn't work properly.
pydotplus works nicely (pip install pydotplus)
"""
try:
# Try to load pydotplus
import pydotplus as pydot
except ImportError:
import pydot
# Internal layer and blob styles.
LAYER_STYLE_DEFAULT = {'shape': 'record',
'fillcolor': '#6495ED',
'style': 'filled'}
NEURON_LAYER_STYLE = {'shape': 'record',
'fillcolor': '#90EE90',
'style': 'filled'}
BLOB_STYLE = {'shape': 'octagon',
'fillcolor': '#E0E0E0',
'style': 'filled'}
def get_pooling_types_dict():
"""Get dictionary mapping pooling type number to type name
"""
desc = caffe_pb2.PoolingParameter.PoolMethod.DESCRIPTOR
d = {}
for k, v in desc.values_by_name.items():
d[v.number] = k
return d
def get_edge_label(layer):
"""Define edge label based on layer type.
"""
if layer.type == 'Data':
edge_label = 'Batch ' + str(layer.data_param.batch_size)
elif layer.type == 'Convolution' or layer.type == 'Deconvolution':
edge_label = str(layer.convolution_param.num_output)
elif layer.type == 'InnerProduct':
edge_label = str(layer.inner_product_param.num_output)
else:
edge_label = '""'
return edge_label
def get_layer_label(layer, rankdir):
"""Define node label based on layer type.
Parameters
----------
layer : ?
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
Returns
-------
string :
A label for the current layer
"""
if rankdir in ('TB', 'BT'):
# If graph orientation is vertical, horizontal space is free and
# vertical space is not; separate words with spaces
separator = ' '
else:
# If graph orientation is horizontal, vertical space is free and
# horizontal space is not; separate words with newlines
separator = '\\n'
if layer.type == 'Convolution' or layer.type == 'Deconvolution':
# Outer double quotes needed or else colon characters don't parse
# properly
node_label = '"%s%s(%s)%skernel size: %d%sstride: %d%spad: %d"' %\
(layer.name,
separator,
layer.type,
separator,
layer.convolution_param.kernel_size[0] if len(layer.convolution_param.kernel_size._values) else 1,
separator,
layer.convolution_param.stride[0] if len(layer.convolution_param.stride._values) else 1,
separator,
layer.convolution_param.pad[0] if len(layer.convolution_param.pad._values) else 0)
elif layer.type == 'Pooling':
pooling_types_dict = get_pooling_types_dict()
node_label = '"%s%s(%s %s)%skernel size: %d%sstride: %d%spad: %d"' %\
(layer.name,
separator,
pooling_types_dict[layer.pooling_param.pool],
layer.type,
separator,
layer.pooling_param.kernel_size,
separator,
|
layer.pooling_param.stride,
separator,
layer.pooling_param.pad)
else:
node_label = '"%s%s(%s)"' % (layer.name, separator, layer.type)
return node_label
def choose_color_by_layertype(layertype):
"""Define colors for nodes based on the layer type.
"""
color = '#6495ED' # Default
if layertype == 'Convolution' or layertype == 'Deconvolution':
color =
|
'#FF5050'
elif layertype == 'Pooling':
color = '#FF9900'
elif layertype == 'InnerProduct':
color = '#CC33FF'
return color
def get_pydot_graph(caffe_net, rankdir, label_edges=True):
"""Create a data structure which represents the `caffe_net`.
Parameters
----------
caffe_net : object
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
label_edges : boolean, optional
Label the edges (default is True).
Returns
-------
pydot graph object
"""
pydot_graph = pydot.Dot(caffe_net.name,
graph_type='digraph',
rankdir=rankdir)
pydot_nodes = {}
pydot_edges = []
for layer in caffe_net.layer:
node_label = get_layer_label(layer, rankdir)
node_name = "%s_%s" % (layer.name, layer.type)
if (len(layer.bottom) == 1 and len(layer.top) == 1 and
layer.bottom[0] == layer.top[0]):
# We have an in-place neuron layer.
pydot_nodes[node_name] = pydot.Node(node_label,
**NEURON_LAYER_STYLE)
else:
layer_style = LAYER_STYLE_DEFAULT
layer_style['fillcolor'] = choose_color_by_layertype(layer.type)
pydot_nodes[node_name] = pydot.Node(node_label, **layer_style)
for bottom_blob in layer.bottom:
pydot_nodes[bottom_blob + '_blob'] = pydot.Node('%s' % bottom_blob,
**BLOB_STYLE)
edge_label = '""'
pydot_edges.append({'src': bottom_blob + '_blob',
'dst': node_name,
'label': edge_label})
for top_blob in layer.top:
pydot_nodes[top_blob + '_blob'] = pydot.Node('%s' % (top_blob))
if label_edges:
edge_label = get_edge_label(layer)
else:
edge_label = '""'
pydot_edges.append({'src': node_name,
'dst': top_blob + '_blob',
'label': edge_label})
# Now, add the nodes and edges to the graph.
for node in pydot_nodes.values():
pydot_graph.add_node(node)
for edge in pydot_edges:
pydot_graph.add_edge(
pydot.Edge(pydot_nodes[edge['src']],
pydot_nodes[edge['dst']],
label=edge['label']))
return pydot_graph
def draw_net(caffe_net, rankdir, ext='png'):
"""Draws a caffe net and returns the image string encoded using the given
extension.
Parameters
----------
caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer.
ext : string, optional
The image extension (the default is 'png').
Returns
-------
string :
Postscript representation of the graph.
"""
return get_pydot_graph(caffe_net, rankdir).create(format=ext)
def draw_net_to_file(caffe_net, filename, rankdir='LR'):
"""Draws a caffe net, and saves it to file using the format given as the
file extension. Use '.raw' to output raw text that you can manually feed
to graphviz to draw graphs.
Parameters
----------
caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer.
filename : string
The path to a file where the networks visualization will be stored.
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
"""
ext = filename[filename.rfind('.')+1:]
with open(filename, 'wb') as fid:
fid.write(draw_net(caffe_net, rankdir, ext))
|
yongshengwang/builthue
|
desktop/libs/hadoop/src/hadoop/fs/__init__.py
|
Python
|
apache-2.0
| 8,724
| 0.010087
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# ht
|
tp://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITH
|
OUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Interfaces and abstractions for filesystem access.
We should be agnostic whether we're using a "temporary" file
system, rooted in a local tmp dir, or whether we're using
a true HDFS. This file defines the interface.
Note that PEP 355 (Path - object oriented filesystem paths) did
not pass. Many file system methods are in __builtin__, os, or
os.path, and take strings representing filenames as arguments.
We maintain this usage of paths as arguments.
When possible, the interfaces here have fidelity to the
native python interfaces.
"""
import __builtin__
import errno
import grp
import logging
import os
import posixpath
import pwd
import re
import shutil
import stat
import sys
# SEEK_SET and family is found in posixfile or os, depending on the python version
if sys.version_info[:2] < (2, 5):
import posixfile
_tmp_mod = posixfile
else:
_tmp_mod = os
SEEK_SET, SEEK_CUR, SEEK_END = _tmp_mod.SEEK_SET, _tmp_mod.SEEK_CUR, _tmp_mod.SEEK_END
del _tmp_mod
# The web (and POSIX) always uses forward slash as a separator
LEADING_DOUBLE_SEPARATORS = re.compile("^" + posixpath.sep*2)
def normpath(path):
"""
Eliminates double-slashes.
Oddly, posixpath.normpath doesn't eliminate leading double slashes,
but it does clean-up triple-slashes.
"""
p = posixpath.normpath(path)
return LEADING_DOUBLE_SEPARATORS.sub(posixpath.sep, p)
class IllegalPathException(Exception):
pass
class LocalSubFileSystem(object):
"""
Facade around normal python filesystem calls, for a temporary/local
file system rooted in a root directory. This is intended for testing,
and is not a secure chroot alternative.
So far, this doesn't have a notion of current working dir, so all
paths are "absolute". I dislike the state that having cwd's implies,
but it may be convenient.
TODO(philip):
* chown: want to implement with names, not uids.
* chmod
* stat: perhaps implement "stats" which returns a dictionary;
Hadoop and posix have different stats
* set_replication: no equivalent
* file-system level stats
I think this covers all the functionality in "src/contrib/thriftfs/if/hadoopfs.thrift",
but there may be some bits missing. The implementation of the file-like object
for HDFS will be a bit tricky: open(f, "w") is generally the equivalent
of createFile, but it has to handle the case where f already
exists (in which case the best we can do is append, if that).
"""
def __init__(self, root):
"""
A file system rooted in root.
"""
self.root = root
self.name = "file://%s" % self.root
if not os.path.isdir(root):
logging.fatal("Root(%s) not found." % root +
" Perhaps you need to run manage.py create_test_fs")
def _resolve_path(self, path):
"""
Returns path to use in native file system.
"""
# Strip leading "/"
if not path.startswith("/"):
raise IllegalPathException("Path %s must start with leading /." % path)
path = path.lstrip("/")
joined = os.path.join(self.root, path)
absolute = os.path.abspath(joined)
normalized = os.path.normpath(absolute)
prefix = os.path.commonprefix([self.root, normalized])
if prefix != self.root:
raise IllegalPathException("Path %s is not valid." % path)
return joined
def _unresolve_path(self, path):
"""
Given an absolute path within the wrapped filesystem,
return the path that the user of this class sees.
"""
# Resolve it to make it realy absolute
assert path.startswith(self.root)
return path[len(self.root):]
def _wrap(f, paths=None, users=None, groups=None):
"""
Wraps an existing function f, and transforms
path arguments to "resolved paths" and
user arguments to uids.
By default transforms the first (zeroth) argument as
a path, but can be customized.
This lets us write:
def open(self, name, mode="r"):
return open(self._resolve_path(name), mode)
as
open = _wrap(__builtin__.open)
NOTE: No transformation is done on the keyword args;
they are not accepted. (The alternative would be to
require the names of the keyword transformations.)
"""
if users is None:
users = []
if groups is None:
groups = []
if paths is None and 0 not in users and 0 not in groups:
paths = [0]
# complicated way of taking the intersection of three lists.
assert not reduce(set.intersection, map(set, [paths, users, groups]))
def wrapped(*args):
self = args[0]
newargs = list(args[1:])
for i in paths:
newargs[i] = self._resolve_path(newargs[i])
for i in users:
newargs[i] = pwd.getpwnam(newargs[i]).pw_uid
for i in groups:
newargs[i] = grp.getgrnam(newargs[i]).gr_gid
return f(*newargs)
return wrapped
# These follow their namesakes.
open = _wrap(__builtin__.open)
remove = _wrap(os.remove)
mkdir = _wrap(os.mkdir)
rmdir = _wrap(os.rmdir)
listdir = _wrap(os.listdir)
rename = _wrap(os.rename, paths=[0,1])
exists = _wrap(os.path.exists)
isfile = _wrap(os.path.isfile)
isdir = _wrap(os.path.isdir)
chmod = _wrap(os.chmod)
# This could be provided with an error_handler
rmtree = _wrap(shutil.rmtree)
chown = _wrap(os.chown, paths=[0], users=[1], groups=[2])
@property
def uri(self):
return self.name
def stats(self, path, raise_on_fnf=True):
path = self._resolve_path(path)
try:
statobj = os.stat(path)
except OSError, ose:
if ose.errno == errno.ENOENT and not raise_on_fnf:
return None
raise
ret = dict()
ret["path"] = self._unresolve_path(path)
ret["size"] = statobj[stat.ST_SIZE]
ret["mtime"] = statobj[stat.ST_MTIME]
ret["mode"] = statobj[stat.ST_MODE]
ret["user"] = pwd.getpwuid(statobj[stat.ST_UID]).pw_name
ret["group"] = grp.getgrgid(statobj[stat.ST_GID]).gr_name
return ret
def setuser(self, user, groups=None):
pass
def status(self):
return FakeStatus()
def listdir_stats(self, path):
"""
This is an equivalent of listdir that, instead of returning file names,
returns a list of stats instead.
"""
listdir_files = self.listdir(path)
paths = [posixpath.join(path, f) for f in listdir_files]
return [self.stats(path) for path in paths]
def __repr__(self):
return "LocalFileSystem(%s)" % repr(self.root)
class FakeStatus(object):
"""
A fake implementation of HDFS health RPCs.
These follow the thrift naming conventions,
but return dicts or arrays of dicts,
because they will be encoded as JSON.
"""
def get_messages(self):
"""Warnings/lint checks."""
return [
dict(type="WARNING",message="All your base belong to us."),
dict(type="INFO", message="Hamster Dance!")
]
def get_health(self):
o = dict()
GB = 1024*1024*1024
o["bytesTotal"] = 5*GB
o["bytesUsed"] = 5*GB/2
o["bytesRemaining"] = 2*GB
o["bytesNonDfs"] = GB/2
o["liveDataNodes"] = 13
o["deadDataNodes"] = 2
o["upgradeStatus"] = dict(version=13, percentComplete=100, finalized=True)
return o
def get_datanode_report(self):
r = []
for i in range(0, 13):
dinfo = dict()
dinfo["name"] = "fake-%d" % i
dinfo["storageID"] = "fake-id-%d" % i
dinfo["host"] = "fake-host-%d" % i
dinfo["capacity"] = 123456789
dinfo["dfsUsed"] = 234
|
Andrew-McNab-UK/DIRAC
|
AccountingSystem/Agent/test/Test_NetworkAgent.py
|
Python
|
gpl-3.0
| 3,707
| 0.021581
|
""" Contains unit tests of NetworkAgent module
"""
import DIRAC.AccountingSystem.Agent.NetworkAgent as module
import unittest
from mock.mock import MagicMock
__RCSID__ = "$Id$"
MQURI1 = 'mq.dirac.net::Topic::perfsonar.summary.packet-loss-rate'
MQURI2 = 'mq.dirac.net::Queue::perfsonar.summary.histogram-owdelay'
ROOT_PATH = '/Resources/Sites'
SITE1 = 'LCG.Dirac.net'
SITE2 = 'LCG.DiracToRemove.net'
SITE3 = 'VAC.DiracToAdd.org'
SITE1_HOST1 = 'perfsonar.diracold.net'
SITE1_HOST2 = 'perfsonar-to-disable.diracold.net'
SITE2_HOST1 = 'perfsonar.diractoremove.net'
SITE3_HOST1 = 'perfsonar.diractoadd.org'
INITIAL_CONFIG = \
{
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE1, SITE1_HOST1 ): 'True',
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE1, SITE1_HOST2 ): 'True',
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE2, SITE2_HOST1 ): 'True'
}
UPDATED_CONFIG = \
{
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE1, SITE1_HOST1 ): 'True',
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE1, SITE1_HOST2 ): 'False',
'%s/LCG/%s/Network/%s/Enabled' % ( ROOT_PATH, SITE3, SITE3_HOST1 ): 'True'
}
class NetworkAgentSuccessTestCase( unittest.TestCase ):
""" Test class to check success scenarios.
"""
def setUp( self ):
# external dependencies
module.datetime = MagicMock()
# internal dependencies
module.S_ERROR = MagicMock()
module.S_OK = MagicMock()
module.gLogger = MagicMock()
module.AgentModule = MagicMock()
module.Network = MagicMock()
module.gConfig = MagicMock()
module.CSAPI = MagicMock()
module.createConsumer = MagicMock()
# prepare test object
module.NetworkAgent.__init__ = MagicMock( return_value = None )
module.NetworkAgent.am_getOption = MagicMock( return_value = 100 ) # buffer timeout
self.agent = module.NetworkAgent()
self.agent.initialize()
def test_updateNameDictionary( self ):
module.gConfig.getConfigurationTree.side_effect = [
{'OK': True, 'Value': INITIAL_CONFIG },
{'OK': True, 'Value': UPDATED_CONFIG },
]
# check if name dictionary is empty
self.assertFalse( self.agent.nameDictionary )
self.agent.updateNameDictionary()
self.assertEqual( self.agent.nameDictionary[SITE1_HOST1], SITE1 )
self.assertEqual( self.agent.nameDictionary[SITE1_HOST2], SITE1 )
self.assertEqual( self.agent.nameDictionary[SITE2_HOST1],
|
SITE2 )
self.agent.updateNameDictionary()
self.assertEqual( self.agent.nameDictionary[SITE1_HOST1], SITE1 )
self.assertEqual( self.agent.nameDictionary[SITE3_HOST1], SITE3 )
# check if hosts were removed form dictionary
self.assertRaises( KeyError, lambda: self.agent.nameDictionary[SITE1_HOST2] )
self.assertRaises( KeyError, lambda: self.agent.nameDictionary[SITE2_HOST1] )
|
def test_agentExecute( self ):
module.NetworkAgent.am_getOption.return_value = '%s, %s' % ( MQURI1, MQURI2 )
module.gConfig.getConfigurationTree.return_value = {'OK': True, 'Value': INITIAL_CONFIG }
# first run
result = self.agent.execute()
self.assertTrue( result['OK'] )
# second run (simulate new messages)
self.agent.messagesCount += 10
result = self.agent.execute()
self.assertTrue( result['OK'] )
# third run (no new messages - restart consumers)
result = self.agent.execute()
self.assertTrue( result['OK'] )
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( NetworkAgentSuccessTestCase )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
|
LLNL/spack
|
var/spack/repos/builtin/packages/folly/package.py
|
Python
|
lgpl-2.1
| 1,952
| 0.001537
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Folly(CMakePackage):
"""Folly (acronymed loosely after Facebook Open Source Library) is a
library of C++11 components designed with practicality and efficiency
in mind.
Folly contains a variety of core library components used extensively at
Facebook. In particular, it's often a dependency of Facebook's other open
source C++ efforts and place where those projects can share code.
"""
homepage = "https://github.com/facebook/folly"
url = "https://github.com/facebook/folly/releases/download/v2021.05.24.00/folly-v2021.05.24.00.tar.gz"
version('2021.05.24.00', sha256='9d308adefe4670637f5c7d96309b3b394ac3fa129bc954f5dfbdd8b741c02aad')
# CMakePackage Dependency
depends_on('pkgconfig', type='build')
# folly requires gcc 4.9+ and a version of boost compiled with >= C++14
# TODO: Specify the boost components
variant('cxxstd', default='14', values=('14', '17'), multi=False, description='Use the specified C++ standard when buil
|
ding.')
depends_on('boost+context+container cxxstd=14', when='cxxstd=14')
depends_on('boost+context+container cxxstd=17', when='cxxstd=17')
# required d
|
ependencies
depends_on('gflags')
depends_on('glog')
depends_on('double-conversion')
depends_on('libevent')
depends_on('fmt')
# optional dependencies
variant('libdwarf', default=False, description="Optional Dependency")
variant('elfutils', default=False, description="Optional Dependency")
variant('libunwind', default=False, description="Optional Dependency")
depends_on('libdwarf', when='+libdwarf')
depends_on('elfutils', when='+elfutils')
depends_on('libunwind', when='+libunwind')
configure_directory = 'folly'
|
ebressert/ScipyNumpy_book_examples
|
python_examples/scipy_36_ex2.py
|
Python
|
mit
| 1,073
| 0.001864
|
import numpy as np
from scipy.misc import imread, imsave
from glob import glob
# This function allows us to place in the
# brightest pixels per x and y position between
# two images. It is similar to PIL's
# ImageChop.Lighter function.
def chop_lighter(image1, image2):
s1 = np.sum(image1, axis=2)
s2 = np.sum(image2, axis=2)
index = s1 < s2
image1[index, 0] = image2[index, 0]
image1[index, 1] = image2[index, 1]
image1[index, 2] = image2[index, 2]
return image1
# Getting the list of files in the directory
files = glob('space/*.JPG')
# Opening up the first image for looping
im1 = imread(files[0]).astype(np.float32)
im2 = np.copy(im1)
# Starting loop
for i in xrange(1, len(files)):
print i
im = imread(files[i]).astype(np.
|
float32)
# Same before
im1 += im
# im2 image shows star trails bette
|
r
im2 = chop_lighter(im2, im)
# Saving image with slight tweaking on the combination
# of the two images to show star trails with the
# co-added image.
imsave('scipy_36_ex2.jpg', im1 / im1.max() + im2 / im2.max() * 0.2)
|
jesopo/bitbot
|
src/IRCBuffer.py
|
Python
|
gpl-2.0
| 3,891
| 0.005911
|
import collections, dataclasses, datetime, re, typing, uuid
from src import IRCBot, IRCServer, utils
MAX_LINES = 2**10
@dataclasses.dataclass
class BufferLine(object):
sender: str
message: str
action: bool
tags: dict
from_self: bool
method: str
deleted: bool=False
notes: typing.Dict[str, str] = dataclasses.field(
default_factory=dict)
id: str = dataclasses.field(
default_factory=lambda: str(uuid.uuid4()))
timestamp: datetime.datetime = dataclasses.field(
default_factory=utils.datetime.utcnow)
def format(self):
if self.action:
format = "* %s %s"
else:
format = "<%s> %s"
return format % (self.sender, self.message)
class BufferLineMatch(object):
def __init__(self, line: BufferLine, match: str):
self.line = line
self.match = match
class Buffer(object):
def __init__(self, bot: "IRCBot.Bot", server: "IRCServer.Server"):
self.bot = bot
self.server = server
self._lines: typing.Deque[BufferLine] = collections.deque(
|
maxlen=MAX_LINES)
def __len__(self) -> int:
return len(
|
self._lines)
def add(self, line: BufferLine):
self._lines.appendleft(line)
def get(self, index: int=0, from_self=True, deleted=False
) -> typing.Optional[BufferLine]:
for line in self._lines:
if line.from_self and not from_self:
continue
if line.deleted and not deleted:
continue
return line
return None
def get_all(self, for_user: typing.Optional[str]=None):
if not for_user == None:
for line in self._lines:
if self.server.irc_lower(line.sender) == for_user:
yield line
else:
for line in self._lines:
yield line
def find_all(self, pattern: typing.Union[str, typing.Pattern[str]],
not_pattern: typing.Union[str, typing.Pattern[str]]=None,
from_self=True, for_user: str=None, deleted=False
) -> typing.Generator[BufferLineMatch, None, None]:
if for_user:
for_user = self.server.irc_lower(for_user)
for line in self._lines:
if line.from_self and not from_self:
continue
else:
match = re.search(pattern, line.message)
if match:
if not_pattern and re.search(not_pattern, line.message):
continue
if for_user and not self.server.irc_lower(line.sender
) == for_user:
continue
if line.deleted and not deleted:
continue
yield BufferLineMatch(line, match.group(0))
return None
def find(self, pattern: typing.Union[str, typing.Pattern[str]]
) -> typing.Optional[BufferLineMatch]:
return next(self.find_all(pattern), None)
def find_id(self, id: str) -> typing.Optional[BufferLine]:
for line in self._lines:
if line.id == id:
return line
return None
def find_from(self, nickname: str) -> typing.Optional[BufferLine]:
lines = self.find_many_from(nickname, 1)
if lines:
return lines[0]
else:
return None
def find_many_from(self, nickname: str, max: int
) -> typing.List[BufferLine]:
nickname_lower = self.server.irc_lower(nickname)
found_lines = []
for line in self._lines:
if (not line.from_self
and self.server.irc_lower(line.sender) == nickname_lower):
found_lines.append(line)
if len(found_lines) == max:
break
return found_lines
|
telefonicaid/murano
|
murano/openstack/common/policy.py
|
Python
|
apache-2.0
| 29,785
| 0.000034
|
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Policy Engine Implementation
Policies can be expressed in one of two forms: A list of lists, or a
string written in the new policy language.
In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass. These innermost lists are then
combined as with an "or" conjunction. This is the original way of
expressing policies, but there now exists a new way: the policy
language.
In the policy language, each check is specified the same way as in the
list-of-lists representation: a simple "a:b" pair that is matched to
the correct code to perform that check. However, conjunction
operators are available, allowing for more expressiveness in crafting
policies.
As an example, take the following rule, expressed in the list-of-lists
representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
In the policy language, this becomes::
role:admin or (project_id:%(project_id)s and role:projectadmin)
The policy language also has the "not" operator, allowing a richer
policy rule::
project_id:%(project_id)s and not role:dunce
It is possible to perform policy checks on the following user
attributes (obtained through the token): user_id, domain_id or
project_id::
domain_id:<some_value>
Attributes sent along with API calls can be used by the policy engine
(on the right side of the expression), by using the following syntax::
<some_value>:user.id
Contextual attributes of objects identified by their IDs are loaded
from the database. They are also available to the policy engine and
can be checked through the `target` keyword::
<some_value>:target.role.name
All these attributes (related to users, API calls, and context) can be
checked against each other or against constants, be it literals (True,
<a_number>) or strings.
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
always reject an access. (Note that if a rule is either the empty
list ("[]") or the empty string, this is equivalent to the "@" policy
check.) Of these, the "!" policy check is probably the most useful,
as it allows particular rules to be explicitly disabled.
"""
import abc
import ast
import copy
import os
import re
from oslo.config import cfg
from oslo.serialization import jsonutils
import six
import six.moves.urllib.parse as urlparse
import six.moves.urllib.request as urlrequest
from murano.openstack.common import fileutils
from murano.openstack.common._i18n import _, _LE, _LI
from murano.openstack.common import log as logging
policy_opts = [
cfg.StrOpt('policy_file',
default='policy.json',
help=_('The JSON file that defines policies.')),
cfg.StrOpt('policy_default_rule',
default='default',
help=_('Default rule. Enforced when a requested rule is not '
'found.')),
cfg.MultiStrOpt('policy_dirs',
default=['policy.d'],
help=_('Directories where policy configuration files are '
'stored. They can be relative to any directory '
'in the search path defined by the config_dir '
'option, or absolute paths. The file defined by '
'policy_file must exist for these directories to '
'be searched.')),
]
CONF = cfg.CONF
CONF.register_opts(policy_opts)
LOG = logging.getLogger(__name__)
_checks = {}
def list_opts():
"""Entry point for oslo.config-generator."""
return [(None, copy.deepcopy(policy_opts))]
class PolicyNotAuthorized(Exception):
def __init__(self, rule):
msg = _("Policy doesn't allow %s to be performed.") % rule
super(PolicyNotAuthorized, self).__init__(msg)
class Rules(dict):
"""A store for rules. Handles the default_rule setting directly."""
@classmethod
def load_json(cls, data, default_rule=None):
"""Allow loading of JSON rule data."""
# Suck in the JSON data and parse the rules
rules = dict((k, parse_rule(v)) for k, v in
jsonutils.loads(data).items())
return cls(rules, default_rule)
def __init__(self, rules=None, default_rule=None):
"""Initialize the Rules store."""
super(Rules, self).__init__(rules or {})
self.default_rule = default_rule
def __missing__(self, key):
"""Implements the default rule handling."""
if isinstance(self.default_rule, dict):
raise KeyError(key)
# If the default rule isn't actually defined, do something
# reasonably intelligent
if not self.default_rule:
raise KeyError(key)
if isinstance(self.default_rule, BaseCheck):
return self.default_rule
# We need to check this or we can get infinite recursion
if self.default_rule not in self:
raise KeyError(key)
elif isinstance(self.default_rule, six.string_types):
return self[self.default_rule]
def __str__(self):
"""Dumps a string representation of the rules."""
# Start by building the canonical strings for the rules
out_rules = {}
for key, value in self.items():
# Use empty string for singleton TrueCheck instances
if isinstance(value, TrueCheck):
out_rules[key] = ''
else:
out_rules[key] = str(value)
# Dump a pretty-printed JSON representation
return jsonutils.dumps(out_rules, indent=4)
class Enforcer(object):
"""Responsible for loading and enforcing rules.
:param policy_file: Custom policy file to use, if none is
specified, `CONF.policy_file` will be
used.
:param rules: D
|
efault dictionary / Rules to use. It will be
considered just in the first instantiation. If
`load_rules(True)`, `clear()` or `set_rules(True)`
is called this will be overwritten.
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
:param use_conf: Whe
|
ther to load rules from cache or config file.
:param overwrite: Whether to overwrite existing rules when reload rules
from config file.
"""
def __init__(self, policy_file=None, rules=None,
default_rule=None, use_conf=True, overwrite=True):
self.default_rule = default_rule or CONF.policy_default_rule
self.rules = Rules(rules, self.default_rule)
self.policy_path = None
self.policy_file = policy_file or CONF.policy_file
self.use_conf = use_conf
self.overwrite = overwrite
def set_rules(self, rules, overwrite=True, use_conf=False):
"""Create a new Rules object based on the provided dict of rules.
:param rules: New rules to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
:param use_conf: Whether to reload rules from cache or config file.
"""
if not isinstance(rules, dict):
raise TypeError(_("Rules must be an instance of dict or Rules, "
"got %s instead") % type(rules))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.